Update prebuilts to go1.8 ab/3753832 am: a727cd0454 am: 2519fc4d3d
am: 08d8c1128d

Change-Id: I9741c31873d75be1a2f90f5f4f9163f2e64b2df8
diff --git a/VERSION b/VERSION
index 4fded12..854106e 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-go1.8rc2
\ No newline at end of file
+go1.8
\ No newline at end of file
diff --git a/bin/go b/bin/go
index 7a0de97..cde3cbb 100755
--- a/bin/go
+++ b/bin/go
Binary files differ
diff --git a/bin/gofmt b/bin/gofmt
index 6316934..8ff6eca 100755
--- a/bin/gofmt
+++ b/bin/gofmt
Binary files differ
diff --git a/doc/conduct.html b/doc/conduct.html
index 5b81681..c40b007 100644
--- a/doc/conduct.html
+++ b/doc/conduct.html
@@ -148,29 +148,26 @@
 
 <p>
 The Go spaces are not free speech venues; they are for discussion about Go.
-These spaces have moderators.
-The goal of the moderators is to facilitate civil discussion about Go.
+Each of these spaces have their own moderators.
 </p>
 
 <p>
 When using the official Go spaces you should act in the spirit of the “Gopher
 values”.
-If you conduct yourself in a way that is explicitly forbidden by the CoC,
-you will be warned and asked to stop.
-If you do not stop, you will be removed from our community spaces temporarily.
-Repeated, willful breaches of the CoC will result in a permanent ban.
+If a reported conflict cannot be resolved amicably, the CoC Working Group
+may make a recommendation to the relevant forum moderators.
 </p>
 
 <p>
-Moderators are held to a higher standard than other community members.
-If a moderator creates an inappropriate situation, they should expect less
-leeway than others, and should expect to be removed from their position if they
-cannot adhere to the CoC.
+CoC Working Group members and forum moderators are held to a higher standard than other community members.
+If a working group member or moderator creates an inappropriate situation, they
+should expect less leeway than others, and should expect to be removed from
+their position if they cannot adhere to the CoC.
 </p>
 
 <p>
-Complaints about moderator actions must be handled using the reporting process
-below.
+Complaints about working group member or moderator actions must be handled 
+using the reporting process below.
 </p>
 
 <h2 id="reporting">Reporting issues</h2>
@@ -185,8 +182,6 @@
 <ul>
 	<li>Aditya Mukerjee &lt;dev@chimeracoder.net&gt;
 	<li>Andrew Gerrand &lt;adg@golang.org&gt;
-	<li>Dave Cheney &lt;dave@cheney.net&gt;
-	<li>Jason Buberel &lt;jbuberel@google.com&gt;
 	<li>Peggy Li &lt;peggyli.224@gmail.com&gt;
 	<li>Sarah Adams &lt;sadams.codes@gmail.com&gt;
 	<li>Steve Francia &lt;steve.francia@gmail.com&gt;
@@ -201,13 +196,10 @@
 </p>
 
 <ul>
-<li>Mail <a href="mailto:conduct@golang.org">conduct@golang.org</a> or
-    <a href="https://golang.org/s/conduct-report">submit an anonymous report</a>.
+<li>Mail <a href="mailto:conduct@golang.org">conduct@golang.org</a>.
     <ul>
     <li>Your message will reach the Working Group.
     <li>Reports are confidential within the Working Group.
-    <li>Should you choose to remain anonymous then the Working Group cannot
-        notify you of the outcome of your report.
     <li>You may contact a member of the group directly if you do not feel
         comfortable contacting the group as a whole. That member will then raise
         the issue with the Working Group as a whole, preserving the privacy of the
@@ -229,11 +221,8 @@
 <li>The Working Group will reach a decision as to how to act. These may include:
     <ul>
     <li>Nothing.
-    <li>A request for a private or public apology.
-    <li>A private or public warning.
-    <li>An imposed vacation (for instance, asking someone to abstain for a week
-        from a mailing list or IRC).
-    <li>A permanent or temporary ban from some or all Go spaces.
+    <li>Passing the report along to the offender.
+    <li>A recommendation of action to the relevant forum moderators.
     </ul>
 <li>The Working Group will reach out to the original reporter to let them know
     the decision.
@@ -246,7 +235,6 @@
 conflicts in the most harmonious way possible.</b>
 We hope that in most cases issues may be resolved through polite discussion and
 mutual agreement.
-Bannings and other forceful measures are to be employed only as a last resort.
 </p>
 
 <p>
diff --git a/doc/devel/release.html b/doc/devel/release.html
index 51957df..d046149 100644
--- a/doc/devel/release.html
+++ b/doc/devel/release.html
@@ -30,6 +30,13 @@
 See the <a href="/security">security policy</a> for more details.
 </p>
 
+<h2 id="go1.8">go1.8 (released 2017/02/16)</h2>
+
+<p>
+Go 1.8 is a major release of Go.
+Read the <a href="/doc/go1.8">Go 1.8 Release Notes</a> for more information.
+</p>
+
 <h2 id="go1.7">go1.7 (released 2016/08/15)</h2>
 
 <p>
@@ -69,6 +76,13 @@
 1.7.4 milestone</a> on our issue tracker for details.
 </p>
 
+<p>
+go1.7.5 (released 2017/01/26) includes fixes to the compiler, runtime,
+and the <code>crypto/x509</code> and <code>time</code> packages.
+See the <a href="https://github.com/golang/go/issues?q=milestone%3AGo1.7.5">Go
+1.7.5 milestone</a> on our issue tracker for details.
+</p>
+
 <h2 id="go1.6">go1.6 (released 2016/02/17)</h2>
 
 <p>
diff --git a/doc/gccgo_install.html b/doc/gccgo_install.html
index ef27fd1..4f6a911 100644
--- a/doc/gccgo_install.html
+++ b/doc/gccgo_install.html
@@ -52,6 +52,19 @@
 should not be visible to Go programs.
 </p>
 
+<p>
+The GCC 6 releases include a complete implementation of the Go 1.6.1
+user libraries.  The Go 1.6 runtime is not fully merged, but that
+should not be visible to Go programs.
+</p>
+
+<p>
+The GCC 7 releases are expected to include a complete implementation
+of the Go 1.8 user libraries.  As with earlier releases, the Go 1.8
+runtime is not fully merged, but that should not be visible to Go
+programs.
+</p>
+
 <h2 id="Source_code">Source code</h2>
 
 <p>
@@ -160,23 +173,6 @@
 make install
 </pre>
 
-<h3 id="Ubuntu">A note on Ubuntu</h3>
-
-<p>
-Current versions of Ubuntu and versions of GCC before 4.8 disagree on
-where system libraries and header files are found.  This is not a
-gccgo issue.  When building older versions of GCC, setting these
-environment variables while configuring and building gccgo may fix the
-problem.
-</p>
-
-<pre>
-LIBRARY_PATH=/usr/lib/x86_64-linux-gnu
-C_INCLUDE_PATH=/usr/include/x86_64-linux-gnu
-CPLUS_INCLUDE_PATH=/usr/include/x86_64-linux-gnu
-export LIBRARY_PATH C_INCLUDE_PATH CPLUS_INCLUDE_PATH
-</pre>
-
 <h2 id="Using_gccgo">Using gccgo</h2>
 
 <p>
@@ -364,12 +360,15 @@
 <h3 id="Types">Types</h3>
 
 <p>
-Basic types map directly: an <code>int</code> in Go is an <code>int</code>
-in C, an <code>int32</code> is an <code>int32_t</code>,
-etc.  Go <code>byte</code> is equivalent to C <code>unsigned
-char</code>.
-Pointers in Go are pointers in C. A Go <code>struct</code> is the same as C
-<code>struct</code> with the same fields and types.
+Basic types map directly: an <code>int32</code> in Go is
+an <code>int32_t</code> in C, an <code>int64</code> is
+an <code>int64_t</code>, etc.
+The Go type <code>int</code> is an integer that is the same size as a
+pointer, and as such corresponds to the C type <code>intptr_t</code>.
+Go <code>byte</code> is equivalent to C <code>unsigned char</code>.
+Pointers in Go are pointers in C.
+A Go <code>struct</code> is the same as C <code>struct</code> with the
+same fields and types.
 </p>
 
 <p>
@@ -380,7 +379,7 @@
 <pre>
 struct __go_string {
   const unsigned char *__data;
-  int __length;
+  intptr_t __length;
 };
 </pre>
 
@@ -400,8 +399,8 @@
 <pre>
 struct __go_slice {
   void *__values;
-  int __count;
-  int __capacity;
+  intptr_t __count;
+  intptr_t __capacity;
 };
 </pre>
 
@@ -526,15 +525,3 @@
 guarantee that it will not change in the future. It is more useful as a
 starting point for real Go code than as a regular procedure.
 </p>
-
-<h2 id="RTEMS_Port">RTEMS Port</h2>
-<p>
-The gccgo compiler has been ported to <a href="http://www.rtems.com/">
-<code>RTEMS</code></a>. <code>RTEMS</code> is a real-time executive
-that provides a high performance environment for embedded applications
-on a range of processors and embedded hardware. The current gccgo
-port is for x86. The goal is to extend the port to most of the
-<a href="http://www.rtems.org/wiki/index.php/SupportedCPUs">
-architectures supported by <code>RTEMS</code></a>. For more information on the port,
-as well as instructions on how to install it, please see this
-<a href="http://www.rtems.org/wiki/index.php/GCCGoRTEMS"><code>RTEMS</code> Wiki page</a>.
diff --git a/doc/go1.8.html b/doc/go1.8.html
index 337f13d..cf4c669 100644
--- a/doc/go1.8.html
+++ b/doc/go1.8.html
@@ -15,12 +15,7 @@
 ul li { margin: 0.5em 0; }
 </style>
 
-<h2 id="introduction">DRAFT RELEASE NOTES - Introduction to Go 1.8</h2>
-
-<p><strong>
-Go 1.8 is not yet released. These are work-in-progress
-release notes. Go 1.8 is expected to be released in February 2017.
-</strong></p>
+<h2 id="introduction">Introduction to Go 1.8</h2>
 
 <p>
 The latest Go release, version 1.8, arrives six months after <a href="go1.7">Go 1.7</a>.
@@ -435,11 +430,11 @@
 <h3 id="plugin">Plugins</h3>
 
 <p>
-  Go now supports a “<code>plugin</code>” build mode for generating
-  plugins written in Go, and a
+  Go now provides early support for plugins with a “<code>plugin</code>”
+  build mode for generating plugins written in Go, and a
   new <a href="/pkg/plugin/"><code>plugin</code></a> package for
-  loading such plugins at run time. Plugin support is only currently
-  available on Linux.
+  loading such plugins at run time. Plugin support is currently only
+  available on Linux. Please report any issues.
 </p>
 
 <h2 id="runtime">Runtime</h2>
@@ -799,9 +794,9 @@
       hardware support for AES-GCM is present.
     </p>
 
-    <p> <!-- CL 27315 -->
+    <p> <!-- CL 27315, CL 35290 -->
       AES-128-CBC cipher suites with SHA-256 are also
-      now supported.
+      now supported, but disabled by default.
     </p>
 
   </dd>
@@ -859,11 +854,12 @@
     <p>
       The <a href="/pkg/database/sql#IsolationLevel"><code>IsolationLevel</code></a>
       can now be set when starting a transaction by setting the isolation level
-      on the <code>Context</code> then passing that <code>Context</code> to
-      <a href="/pkg/database/sql#DB.BeginContext"><code>DB.BeginContext</code></a>.
+      on <a href="/pkg/database/sql#TxOptions.Isolation"><code>TxOptions.Isolation</code></a> and passing
+      it to <a href="/pkg/database/sql#DB.BeginTx"><code>DB.BeginTx</code></a>.
       An error will be returned if an isolation level is selected that the driver
       does not support. A read-only attribute may also be set on the transaction
-      with <a href="/pkg/database/sql/#ReadOnlyContext"><code>ReadOnlyContext</code></a>.
+      by setting <a href="/pkg/database/sql/#TxOptions.ReadOnly"><code>TxOptions.ReadOnly</code></a>
+      to true.
     </p>
     <p>
       Queries now expose the SQL column type information for drivers that support it.
@@ -1645,6 +1641,17 @@
       and only the overall execution of the test binary would fail.
     </p>
 
+    <p><!-- CL 32455 -->
+      The signature of the
+      <a href="/pkg/testing/#MainStart"><code>MainStart</code></a>
+      function has changed, as allowed by the documentation. It is an
+      internal detail and not part of the Go 1 compatibility promise.
+      If you're not calling <code>MainStart</code> directly but see
+      errors, that likely means you set the
+      normally-empty <code>GOROOT</code> environment variable and it
+      doesn't match the version of your <code>go</code> command's binary.
+    </p>
+
   </dd>
 </dl>
 
diff --git a/doc/install-source.html b/doc/install-source.html
index 4bf0ba3..45c5bbb 100644
--- a/doc/install-source.html
+++ b/doc/install-source.html
@@ -147,6 +147,9 @@
 which contains the Go 1.4 source code plus accumulated fixes
 to keep the tools running on newer operating systems.
 (Go 1.4 was the last distribution in which the tool chain was written in C.)
+After unpacking the Go 1.4 source, <code>cd</code> to
+the <code>src</code> subdirectory and run <code>make.bash</code> (or,
+on Windows, <code>make.bat</code>).
 </p>
 
 <p>
@@ -218,7 +221,7 @@
 Change to the directory that will be its parent
 and make sure the <code>go</code> directory does not exist.
 Then clone the repository and check out the latest release tag
-(<code class="versionTag">go1.7.4</code>, for example):</p>
+(<code class="versionTag">go1.8</code>, for example):</p>
 
 <pre>
 $ git clone https://go.googlesource.com/go
@@ -406,7 +409,7 @@
 <a href="//groups.google.com/group/golang-announce">golang-announce</a>
 mailing list.
 Each announcement mentions the latest release tag, for instance,
-<code class="versionTag">go1.7.4</code>.
+<code class="versionTag">go1.8</code>.
 </p>
 
 <p>
diff --git a/misc/cgo/test/issue18146.go b/misc/cgo/test/issue18146.go
index ffb04e9..3c60046 100644
--- a/misc/cgo/test/issue18146.go
+++ b/misc/cgo/test/issue18146.go
@@ -73,7 +73,7 @@
 		}
 		runtime.GOMAXPROCS(threads)
 		argv := append(os.Args, "-test.run=NoSuchTestExists")
-		if err := syscall.Exec(os.Args[0], argv, nil); err != nil {
+		if err := syscall.Exec(os.Args[0], argv, os.Environ()); err != nil {
 			t.Fatal(err)
 		}
 	}
diff --git a/misc/cgo/test/sigaltstack.go b/misc/cgo/test/sigaltstack.go
index b16adc7..2b7a1ec 100644
--- a/misc/cgo/test/sigaltstack.go
+++ b/misc/cgo/test/sigaltstack.go
@@ -17,7 +17,7 @@
 static stack_t oss;
 static char signalStack[SIGSTKSZ];
 
-static void changeSignalStack() {
+static void changeSignalStack(void) {
 	stack_t ss;
 	memset(&ss, 0, sizeof ss);
 	ss.ss_sp = signalStack;
@@ -29,7 +29,7 @@
 	}
 }
 
-static void restoreSignalStack() {
+static void restoreSignalStack(void) {
 #if (defined(__x86_64__) || defined(__i386__)) && defined(__APPLE__)
 	// The Darwin C library enforces a minimum that the kernel does not.
 	// This is OK since we allocated this much space in mpreinit,
@@ -42,7 +42,7 @@
 	}
 }
 
-static int zero() {
+static int zero(void) {
 	return 0;
 }
 */
diff --git a/misc/cgo/testsanitizers/msan_shared.go b/misc/cgo/testsanitizers/msan_shared.go
new file mode 100644
index 0000000..966947c
--- /dev/null
+++ b/misc/cgo/testsanitizers/msan_shared.go
@@ -0,0 +1,12 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This program segfaulted during libpreinit when built with -msan:
+// http://golang.org/issue/18707
+
+package main
+
+import "C"
+
+func main() {}
diff --git a/misc/cgo/testsanitizers/test.bash b/misc/cgo/testsanitizers/test.bash
index dfc6d38..4da8502 100755
--- a/misc/cgo/testsanitizers/test.bash
+++ b/misc/cgo/testsanitizers/test.bash
@@ -68,6 +68,25 @@
 
 status=0
 
+testmsanshared() {
+  goos=$(go env GOOS)
+  suffix="-installsuffix testsanitizers"
+  libext="so"
+  if [ "$goos" == "darwin" ]; then
+	  libext="dylib"
+  fi
+  go build -msan -buildmode=c-shared $suffix -o ${TMPDIR}/libmsanshared.$libext msan_shared.go
+
+	echo 'int main() { return 0; }' > ${TMPDIR}/testmsanshared.c
+  $CC $(go env GOGCCFLAGS) -fsanitize=memory -o ${TMPDIR}/testmsanshared ${TMPDIR}/testmsanshared.c ${TMPDIR}/libmsanshared.$libext
+
+  if ! LD_LIBRARY_PATH=. ${TMPDIR}/testmsanshared; then
+    echo "FAIL: msan_shared"
+    status=1
+  fi
+  rm -f ${TMPDIR}/{testmsanshared,testmsanshared.c,libmsanshared.$libext}
+}
+
 if test "$msan" = "yes"; then
     if ! go build -msan std; then
 	echo "FAIL: build -msan std"
@@ -108,6 +127,8 @@
 	echo "FAIL: msan_fail"
 	status=1
     fi
+
+    testmsanshared
 fi
 
 if test "$tsan" = "yes"; then
diff --git a/misc/cgo/testshared/src/depBase/dep.go b/misc/cgo/testshared/src/depBase/dep.go
index a518b4e..9f86710 100644
--- a/misc/cgo/testshared/src/depBase/dep.go
+++ b/misc/cgo/testshared/src/depBase/dep.go
@@ -5,6 +5,8 @@
 	"reflect"
 )
 
+var SlicePtr interface{} = &[]int{}
+
 var V int = 1
 
 var HasMask []string = []string{"hi"}
diff --git a/misc/cgo/testshared/src/exe/exe.go b/misc/cgo/testshared/src/exe/exe.go
index 4337271..84302a8 100644
--- a/misc/cgo/testshared/src/exe/exe.go
+++ b/misc/cgo/testshared/src/exe/exe.go
@@ -19,6 +19,8 @@
 	return nil
 }
 
+var slicePtr interface{} = &[]int{}
+
 func main() {
 	defer depBase.ImplementedInAsm()
 	// This code below causes various go.itab.* symbols to be generated in
@@ -32,4 +34,11 @@
 	if reflect.TypeOf(F).Out(0) != reflect.TypeOf(c) {
 		panic("bad reflection results, see golang.org/issue/18252")
 	}
+
+	sp := reflect.New(reflect.TypeOf(slicePtr).Elem())
+	s := sp.Interface()
+
+	if reflect.TypeOf(s) != reflect.TypeOf(slicePtr) {
+		panic("bad reflection results, see golang.org/issue/18729")
+	}
 }
diff --git a/pkg/bootstrap/bin/asm b/pkg/bootstrap/bin/asm
deleted file mode 100755
index d0abacd..0000000
--- a/pkg/bootstrap/bin/asm
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/bin/compile b/pkg/bootstrap/bin/compile
deleted file mode 100755
index 177f2df..0000000
--- a/pkg/bootstrap/bin/compile
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/bin/link b/pkg/bootstrap/bin/link
deleted file mode 100755
index 1ac8976..0000000
--- a/pkg/bootstrap/bin/link
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/asm/internal/arch.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/asm/internal/arch.a
deleted file mode 100644
index d9cb358..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/asm/internal/arch.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/asm/internal/asm.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/asm/internal/asm.a
deleted file mode 100644
index 7474d64..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/asm/internal/asm.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/asm/internal/flags.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/asm/internal/flags.a
deleted file mode 100644
index 038730b..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/asm/internal/flags.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/asm/internal/lex.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/asm/internal/lex.a
deleted file mode 100644
index 4c0b304..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/asm/internal/lex.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/amd64.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/amd64.a
deleted file mode 100644
index 282afb5..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/amd64.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/arm.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/arm.a
deleted file mode 100644
index ce9a707..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/arm.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/arm64.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/arm64.a
deleted file mode 100644
index abac77c..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/arm64.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/gc.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/gc.a
deleted file mode 100644
index d48baa5..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/gc.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/mips.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/mips.a
deleted file mode 100644
index 9d717ac..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/mips.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/mips64.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/mips64.a
deleted file mode 100644
index 952eff8..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/mips64.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/ppc64.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/ppc64.a
deleted file mode 100644
index 12810e8..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/ppc64.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/s390x.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/s390x.a
deleted file mode 100644
index a43931a..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/s390x.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/ssa.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/ssa.a
deleted file mode 100644
index f1f32c9..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/ssa.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/syntax.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/syntax.a
deleted file mode 100644
index e7c34f8..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/syntax.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/x86.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/x86.a
deleted file mode 100644
index e15b45e..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/compile/internal/x86.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/bio.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/bio.a
deleted file mode 100644
index aa58733..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/bio.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/dwarf.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/dwarf.a
deleted file mode 100644
index 8c507e4..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/dwarf.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/gcprog.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/gcprog.a
deleted file mode 100644
index c4ca64f..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/gcprog.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/obj.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/obj.a
deleted file mode 100644
index d2e8e20..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/obj.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/obj/arm.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/obj/arm.a
deleted file mode 100644
index fe48cf1..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/obj/arm.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/obj/arm64.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/obj/arm64.a
deleted file mode 100644
index cfb6b98..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/obj/arm64.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/obj/mips.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/obj/mips.a
deleted file mode 100644
index 781b8f7..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/obj/mips.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/obj/ppc64.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/obj/ppc64.a
deleted file mode 100644
index 6622a49..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/obj/ppc64.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/obj/s390x.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/obj/s390x.a
deleted file mode 100644
index a64930d..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/obj/s390x.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/obj/x86.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/obj/x86.a
deleted file mode 100644
index 07765d6..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/obj/x86.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/sys.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/sys.a
deleted file mode 100644
index 8e6112d..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/internal/sys.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/link/internal/amd64.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/link/internal/amd64.a
deleted file mode 100644
index d45e03f..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/link/internal/amd64.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/link/internal/arm.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/link/internal/arm.a
deleted file mode 100644
index 9aceb6c..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/link/internal/arm.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/link/internal/arm64.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/link/internal/arm64.a
deleted file mode 100644
index bb050f9..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/link/internal/arm64.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/link/internal/ld.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/link/internal/ld.a
deleted file mode 100644
index cfc561f..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/link/internal/ld.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/link/internal/mips.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/link/internal/mips.a
deleted file mode 100644
index 1518352..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/link/internal/mips.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/link/internal/mips64.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/link/internal/mips64.a
deleted file mode 100644
index 7084720..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/link/internal/mips64.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/link/internal/ppc64.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/link/internal/ppc64.a
deleted file mode 100644
index 0e55ad2..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/link/internal/ppc64.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/link/internal/s390x.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/link/internal/s390x.a
deleted file mode 100644
index 2a04916..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/link/internal/s390x.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/link/internal/x86.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/link/internal/x86.a
deleted file mode 100644
index 422a372..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/cmd/link/internal/x86.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/debug/pe.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/debug/pe.a
deleted file mode 100644
index 8c40350..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/debug/pe.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/pkg/linux_amd64/bootstrap/math/big.a b/pkg/bootstrap/pkg/linux_amd64/bootstrap/math/big.a
deleted file mode 100644
index 4e995cc..0000000
--- a/pkg/bootstrap/pkg/linux_amd64/bootstrap/math/big.a
+++ /dev/null
Binary files differ
diff --git a/pkg/bootstrap/src/bootstrap/cmd/asm/doc.go b/pkg/bootstrap/src/bootstrap/cmd/asm/doc.go
deleted file mode 100644
index 845107f..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/asm/doc.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/doc.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/doc.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Asm, typically invoked as ``go tool asm'', assembles the source file into an object
-file named for the basename of the argument source file with a .o suffix. The
-object file can then be combined with other objects into a package archive.
-
-Command Line
-
-Usage:
-
-	go tool asm [flags] file
-
-The specified file must be a Go assembly file.
-The same assembler is used for all target operating systems and architectures.
-The GOOS and GOARCH environment variables set the desired target.
-
-Flags:
-
-	-D value
-		predefined symbol with optional simple value -D=identifier=value;
-		can be set multiple times
-	-I value
-		include directory; can be set multiple times
-	-S	print assembly and machine code
-	-debug
-		dump instructions as they are parsed
-	-dynlink
-		support references to Go symbols defined in other shared libraries
-	-o string
-		output file; default foo.o for /a/b/c/foo.s
-	-shared
-		generate code that can be linked into a shared library
-	-trimpath string
-		remove prefix from recorded source file paths
-
-Input language:
-
-The assembler uses mostly the same syntax for all architectures,
-the main variation having to do with addressing modes. Input is
-run through a simplified C preprocessor that implements #include,
-#define, #ifdef/endif, but not #if or ##.
-
-For more information, see https://golang.org/doc/asm.
-*/
-package main
diff --git a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/arch/amd64.go b/pkg/bootstrap/src/bootstrap/cmd/asm/internal/arch/amd64.go
deleted file mode 100644
index 1c9d73d..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/arch/amd64.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/arch/amd64.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/arch/amd64.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file encapsulates some of the odd characteristics of the
-// AMD64 instruction set, to minimize its interaction
-// with the core of the assembler.
-
-package arch
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/x86"
-)
-
-// IsAMD4OP reports whether the op (as defined by an amd64.A* constant) is
-// a 4-operand instruction.
-func IsAMD4OP(op obj.As) bool {
-	switch op {
-	case x86.AVPERM2F128,
-		x86.AVPALIGNR,
-		x86.AVPERM2I128,
-		x86.AVINSERTI128,
-		x86.AVPBLENDD:
-		return true
-	}
-	return false
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/arch/arch.go b/pkg/bootstrap/src/bootstrap/cmd/asm/internal/arch/arch.go
deleted file mode 100644
index ee0da2e..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/arch/arch.go
+++ /dev/null
@@ -1,552 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/arch/arch.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/arch/arch.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package arch
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/arm"
-	"bootstrap/cmd/internal/obj/arm64"
-	"bootstrap/cmd/internal/obj/mips"
-	"bootstrap/cmd/internal/obj/ppc64"
-	"bootstrap/cmd/internal/obj/s390x"
-	"bootstrap/cmd/internal/obj/x86"
-	"fmt"
-	"strings"
-)
-
-// Pseudo-registers whose names are the constant name without the leading R.
-const (
-	RFP = -(iota + 1)
-	RSB
-	RSP
-	RPC
-)
-
-// Arch wraps the link architecture object with more architecture-specific information.
-type Arch struct {
-	*obj.LinkArch
-	// Map of instruction names to enumeration.
-	Instructions map[string]obj.As
-	// Map of register names to enumeration.
-	Register map[string]int16
-	// Table of register prefix names. These are things like R for R(0) and SPR for SPR(268).
-	RegisterPrefix map[string]bool
-	// RegisterNumber converts R(10) into arm.REG_R10.
-	RegisterNumber func(string, int16) (int16, bool)
-	// Instruction is a jump.
-	IsJump func(word string) bool
-}
-
-// nilRegisterNumber is the register number function for architectures
-// that do not accept the R(N) notation. It always returns failure.
-func nilRegisterNumber(name string, n int16) (int16, bool) {
-	return 0, false
-}
-
-// Set configures the architecture specified by GOARCH and returns its representation.
-// It returns nil if GOARCH is not recognized.
-func Set(GOARCH string) *Arch {
-	switch GOARCH {
-	case "386":
-		return archX86(&x86.Link386)
-	case "amd64":
-		return archX86(&x86.Linkamd64)
-	case "amd64p32":
-		return archX86(&x86.Linkamd64p32)
-	case "arm":
-		return archArm()
-	case "arm64":
-		return archArm64()
-	case "mips":
-		a := archMips()
-		a.LinkArch = &mips.Linkmips
-		return a
-	case "mipsle":
-		a := archMips()
-		a.LinkArch = &mips.Linkmipsle
-		return a
-	case "mips64":
-		a := archMips64()
-		a.LinkArch = &mips.Linkmips64
-		return a
-	case "mips64le":
-		a := archMips64()
-		a.LinkArch = &mips.Linkmips64le
-		return a
-	case "ppc64":
-		a := archPPC64()
-		a.LinkArch = &ppc64.Linkppc64
-		return a
-	case "ppc64le":
-		a := archPPC64()
-		a.LinkArch = &ppc64.Linkppc64le
-		return a
-	case "s390x":
-		a := archS390x()
-		a.LinkArch = &s390x.Links390x
-		return a
-	}
-	return nil
-}
-
-func jumpX86(word string) bool {
-	return word[0] == 'J' || word == "CALL" || strings.HasPrefix(word, "LOOP") || word == "XBEGIN"
-}
-
-func archX86(linkArch *obj.LinkArch) *Arch {
-	register := make(map[string]int16)
-	// Create maps for easy lookup of instruction names etc.
-	for i, s := range x86.Register {
-		register[s] = int16(i + x86.REG_AL)
-	}
-	// Pseudo-registers.
-	register["SB"] = RSB
-	register["FP"] = RFP
-	register["PC"] = RPC
-	// Register prefix not used on this architecture.
-
-	instructions := make(map[string]obj.As)
-	for i, s := range obj.Anames {
-		instructions[s] = obj.As(i)
-	}
-	for i, s := range x86.Anames {
-		if obj.As(i) >= obj.A_ARCHSPECIFIC {
-			instructions[s] = obj.As(i) + obj.ABaseAMD64
-		}
-	}
-	// Annoying aliases.
-	instructions["JA"] = x86.AJHI   /* alternate */
-	instructions["JAE"] = x86.AJCC  /* alternate */
-	instructions["JB"] = x86.AJCS   /* alternate */
-	instructions["JBE"] = x86.AJLS  /* alternate */
-	instructions["JC"] = x86.AJCS   /* alternate */
-	instructions["JCC"] = x86.AJCC  /* carry clear (CF = 0) */
-	instructions["JCS"] = x86.AJCS  /* carry set (CF = 1) */
-	instructions["JE"] = x86.AJEQ   /* alternate */
-	instructions["JEQ"] = x86.AJEQ  /* equal (ZF = 1) */
-	instructions["JG"] = x86.AJGT   /* alternate */
-	instructions["JGE"] = x86.AJGE  /* greater than or equal (signed) (SF = OF) */
-	instructions["JGT"] = x86.AJGT  /* greater than (signed) (ZF = 0 && SF = OF) */
-	instructions["JHI"] = x86.AJHI  /* higher (unsigned) (CF = 0 && ZF = 0) */
-	instructions["JHS"] = x86.AJCC  /* alternate */
-	instructions["JL"] = x86.AJLT   /* alternate */
-	instructions["JLE"] = x86.AJLE  /* less than or equal (signed) (ZF = 1 || SF != OF) */
-	instructions["JLO"] = x86.AJCS  /* alternate */
-	instructions["JLS"] = x86.AJLS  /* lower or same (unsigned) (CF = 1 || ZF = 1) */
-	instructions["JLT"] = x86.AJLT  /* less than (signed) (SF != OF) */
-	instructions["JMI"] = x86.AJMI  /* negative (minus) (SF = 1) */
-	instructions["JNA"] = x86.AJLS  /* alternate */
-	instructions["JNAE"] = x86.AJCS /* alternate */
-	instructions["JNB"] = x86.AJCC  /* alternate */
-	instructions["JNBE"] = x86.AJHI /* alternate */
-	instructions["JNC"] = x86.AJCC  /* alternate */
-	instructions["JNE"] = x86.AJNE  /* not equal (ZF = 0) */
-	instructions["JNG"] = x86.AJLE  /* alternate */
-	instructions["JNGE"] = x86.AJLT /* alternate */
-	instructions["JNL"] = x86.AJGE  /* alternate */
-	instructions["JNLE"] = x86.AJGT /* alternate */
-	instructions["JNO"] = x86.AJOC  /* alternate */
-	instructions["JNP"] = x86.AJPC  /* alternate */
-	instructions["JNS"] = x86.AJPL  /* alternate */
-	instructions["JNZ"] = x86.AJNE  /* alternate */
-	instructions["JO"] = x86.AJOS   /* alternate */
-	instructions["JOC"] = x86.AJOC  /* overflow clear (OF = 0) */
-	instructions["JOS"] = x86.AJOS  /* overflow set (OF = 1) */
-	instructions["JP"] = x86.AJPS   /* alternate */
-	instructions["JPC"] = x86.AJPC  /* parity clear (PF = 0) */
-	instructions["JPE"] = x86.AJPS  /* alternate */
-	instructions["JPL"] = x86.AJPL  /* non-negative (plus) (SF = 0) */
-	instructions["JPO"] = x86.AJPC  /* alternate */
-	instructions["JPS"] = x86.AJPS  /* parity set (PF = 1) */
-	instructions["JS"] = x86.AJMI   /* alternate */
-	instructions["JZ"] = x86.AJEQ   /* alternate */
-	instructions["MASKMOVDQU"] = x86.AMASKMOVOU
-	instructions["MOVD"] = x86.AMOVQ
-	instructions["MOVDQ2Q"] = x86.AMOVQ
-	instructions["MOVNTDQ"] = x86.AMOVNTO
-	instructions["MOVOA"] = x86.AMOVO
-	instructions["PSLLDQ"] = x86.APSLLO
-	instructions["PSRLDQ"] = x86.APSRLO
-	instructions["PADDD"] = x86.APADDL
-
-	return &Arch{
-		LinkArch:       linkArch,
-		Instructions:   instructions,
-		Register:       register,
-		RegisterPrefix: nil,
-		RegisterNumber: nilRegisterNumber,
-		IsJump:         jumpX86,
-	}
-}
-
-func archArm() *Arch {
-	register := make(map[string]int16)
-	// Create maps for easy lookup of instruction names etc.
-	// Note that there is no list of names as there is for x86.
-	for i := arm.REG_R0; i < arm.REG_SPSR; i++ {
-		register[obj.Rconv(i)] = int16(i)
-	}
-	// Avoid unintentionally clobbering g using R10.
-	delete(register, "R10")
-	register["g"] = arm.REG_R10
-	for i := 0; i < 16; i++ {
-		register[fmt.Sprintf("C%d", i)] = int16(i)
-	}
-
-	// Pseudo-registers.
-	register["SB"] = RSB
-	register["FP"] = RFP
-	register["PC"] = RPC
-	register["SP"] = RSP
-	registerPrefix := map[string]bool{
-		"F": true,
-		"R": true,
-	}
-
-	instructions := make(map[string]obj.As)
-	for i, s := range obj.Anames {
-		instructions[s] = obj.As(i)
-	}
-	for i, s := range arm.Anames {
-		if obj.As(i) >= obj.A_ARCHSPECIFIC {
-			instructions[s] = obj.As(i) + obj.ABaseARM
-		}
-	}
-	// Annoying aliases.
-	instructions["B"] = obj.AJMP
-	instructions["BL"] = obj.ACALL
-	// MCR differs from MRC by the way fields of the word are encoded.
-	// (Details in arm.go). Here we add the instruction so parse will find
-	// it, but give it an opcode number known only to us.
-	instructions["MCR"] = aMCR
-
-	return &Arch{
-		LinkArch:       &arm.Linkarm,
-		Instructions:   instructions,
-		Register:       register,
-		RegisterPrefix: registerPrefix,
-		RegisterNumber: armRegisterNumber,
-		IsJump:         jumpArm,
-	}
-}
-
-func archArm64() *Arch {
-	register := make(map[string]int16)
-	// Create maps for easy lookup of instruction names etc.
-	// Note that there is no list of names as there is for 386 and amd64.
-	register[arm64.Rconv(arm64.REGSP)] = int16(arm64.REGSP)
-	for i := arm64.REG_R0; i <= arm64.REG_R31; i++ {
-		register[arm64.Rconv(i)] = int16(i)
-	}
-	for i := arm64.REG_F0; i <= arm64.REG_F31; i++ {
-		register[arm64.Rconv(i)] = int16(i)
-	}
-	for i := arm64.REG_V0; i <= arm64.REG_V31; i++ {
-		register[arm64.Rconv(i)] = int16(i)
-	}
-	register["LR"] = arm64.REGLINK
-	register["DAIF"] = arm64.REG_DAIF
-	register["NZCV"] = arm64.REG_NZCV
-	register["FPSR"] = arm64.REG_FPSR
-	register["FPCR"] = arm64.REG_FPCR
-	register["SPSR_EL1"] = arm64.REG_SPSR_EL1
-	register["ELR_EL1"] = arm64.REG_ELR_EL1
-	register["SPSR_EL2"] = arm64.REG_SPSR_EL2
-	register["ELR_EL2"] = arm64.REG_ELR_EL2
-	register["CurrentEL"] = arm64.REG_CurrentEL
-	register["SP_EL0"] = arm64.REG_SP_EL0
-	register["SPSel"] = arm64.REG_SPSel
-	register["DAIFSet"] = arm64.REG_DAIFSet
-	register["DAIFClr"] = arm64.REG_DAIFClr
-	// Conditional operators, like EQ, NE, etc.
-	register["EQ"] = arm64.COND_EQ
-	register["NE"] = arm64.COND_NE
-	register["HS"] = arm64.COND_HS
-	register["CS"] = arm64.COND_HS
-	register["LO"] = arm64.COND_LO
-	register["CC"] = arm64.COND_LO
-	register["MI"] = arm64.COND_MI
-	register["PL"] = arm64.COND_PL
-	register["VS"] = arm64.COND_VS
-	register["VC"] = arm64.COND_VC
-	register["HI"] = arm64.COND_HI
-	register["LS"] = arm64.COND_LS
-	register["GE"] = arm64.COND_GE
-	register["LT"] = arm64.COND_LT
-	register["GT"] = arm64.COND_GT
-	register["LE"] = arm64.COND_LE
-	register["AL"] = arm64.COND_AL
-	register["NV"] = arm64.COND_NV
-	// Pseudo-registers.
-	register["SB"] = RSB
-	register["FP"] = RFP
-	register["PC"] = RPC
-	register["SP"] = RSP
-	// Avoid unintentionally clobbering g using R28.
-	delete(register, "R28")
-	register["g"] = arm64.REG_R28
-	registerPrefix := map[string]bool{
-		"F": true,
-		"R": true,
-		"V": true,
-	}
-
-	instructions := make(map[string]obj.As)
-	for i, s := range obj.Anames {
-		instructions[s] = obj.As(i)
-	}
-	for i, s := range arm64.Anames {
-		if obj.As(i) >= obj.A_ARCHSPECIFIC {
-			instructions[s] = obj.As(i) + obj.ABaseARM64
-		}
-	}
-	// Annoying aliases.
-	instructions["B"] = arm64.AB
-	instructions["BL"] = arm64.ABL
-
-	return &Arch{
-		LinkArch:       &arm64.Linkarm64,
-		Instructions:   instructions,
-		Register:       register,
-		RegisterPrefix: registerPrefix,
-		RegisterNumber: arm64RegisterNumber,
-		IsJump:         jumpArm64,
-	}
-
-}
-
-func archPPC64() *Arch {
-	register := make(map[string]int16)
-	// Create maps for easy lookup of instruction names etc.
-	// Note that there is no list of names as there is for x86.
-	for i := ppc64.REG_R0; i <= ppc64.REG_R31; i++ {
-		register[obj.Rconv(i)] = int16(i)
-	}
-	for i := ppc64.REG_F0; i <= ppc64.REG_F31; i++ {
-		register[obj.Rconv(i)] = int16(i)
-	}
-	for i := ppc64.REG_V0; i <= ppc64.REG_V31; i++ {
-		register[obj.Rconv(i)] = int16(i)
-	}
-	for i := ppc64.REG_VS0; i <= ppc64.REG_VS63; i++ {
-		register[obj.Rconv(i)] = int16(i)
-	}
-	for i := ppc64.REG_CR0; i <= ppc64.REG_CR7; i++ {
-		register[obj.Rconv(i)] = int16(i)
-	}
-	for i := ppc64.REG_MSR; i <= ppc64.REG_CR; i++ {
-		register[obj.Rconv(i)] = int16(i)
-	}
-	register["CR"] = ppc64.REG_CR
-	register["XER"] = ppc64.REG_XER
-	register["LR"] = ppc64.REG_LR
-	register["CTR"] = ppc64.REG_CTR
-	register["FPSCR"] = ppc64.REG_FPSCR
-	register["MSR"] = ppc64.REG_MSR
-	// Pseudo-registers.
-	register["SB"] = RSB
-	register["FP"] = RFP
-	register["PC"] = RPC
-	// Avoid unintentionally clobbering g using R30.
-	delete(register, "R30")
-	register["g"] = ppc64.REG_R30
-	registerPrefix := map[string]bool{
-		"CR":  true,
-		"F":   true,
-		"R":   true,
-		"SPR": true,
-	}
-
-	instructions := make(map[string]obj.As)
-	for i, s := range obj.Anames {
-		instructions[s] = obj.As(i)
-	}
-	for i, s := range ppc64.Anames {
-		if obj.As(i) >= obj.A_ARCHSPECIFIC {
-			instructions[s] = obj.As(i) + obj.ABasePPC64
-		}
-	}
-	// Annoying aliases.
-	instructions["BR"] = ppc64.ABR
-	instructions["BL"] = ppc64.ABL
-
-	return &Arch{
-		LinkArch:       &ppc64.Linkppc64,
-		Instructions:   instructions,
-		Register:       register,
-		RegisterPrefix: registerPrefix,
-		RegisterNumber: ppc64RegisterNumber,
-		IsJump:         jumpPPC64,
-	}
-}
-
-func archMips() *Arch {
-	register := make(map[string]int16)
-	// Create maps for easy lookup of instruction names etc.
-	// Note that there is no list of names as there is for x86.
-	for i := mips.REG_R0; i <= mips.REG_R31; i++ {
-		register[obj.Rconv(i)] = int16(i)
-	}
-
-	for i := mips.REG_F0; i <= mips.REG_F31; i++ {
-		register[obj.Rconv(i)] = int16(i)
-	}
-	for i := mips.REG_M0; i <= mips.REG_M31; i++ {
-		register[obj.Rconv(i)] = int16(i)
-	}
-	for i := mips.REG_FCR0; i <= mips.REG_FCR31; i++ {
-		register[obj.Rconv(i)] = int16(i)
-	}
-	register["HI"] = mips.REG_HI
-	register["LO"] = mips.REG_LO
-	// Pseudo-registers.
-	register["SB"] = RSB
-	register["FP"] = RFP
-	register["PC"] = RPC
-	// Avoid unintentionally clobbering g using R30.
-	delete(register, "R30")
-	register["g"] = mips.REG_R30
-
-	registerPrefix := map[string]bool{
-		"F":   true,
-		"FCR": true,
-		"M":   true,
-		"R":   true,
-	}
-
-	instructions := make(map[string]obj.As)
-	for i, s := range obj.Anames {
-		instructions[s] = obj.As(i)
-	}
-	for i, s := range mips.Anames {
-		if obj.As(i) >= obj.A_ARCHSPECIFIC {
-			instructions[s] = obj.As(i) + obj.ABaseMIPS
-		}
-	}
-	// Annoying alias.
-	instructions["JAL"] = mips.AJAL
-
-	return &Arch{
-		LinkArch:       &mips.Linkmipsle,
-		Instructions:   instructions,
-		Register:       register,
-		RegisterPrefix: registerPrefix,
-		RegisterNumber: mipsRegisterNumber,
-		IsJump:         jumpMIPS,
-	}
-}
-
-func archMips64() *Arch {
-	register := make(map[string]int16)
-	// Create maps for easy lookup of instruction names etc.
-	// Note that there is no list of names as there is for x86.
-	for i := mips.REG_R0; i <= mips.REG_R31; i++ {
-		register[obj.Rconv(i)] = int16(i)
-	}
-	for i := mips.REG_F0; i <= mips.REG_F31; i++ {
-		register[obj.Rconv(i)] = int16(i)
-	}
-	for i := mips.REG_M0; i <= mips.REG_M31; i++ {
-		register[obj.Rconv(i)] = int16(i)
-	}
-	for i := mips.REG_FCR0; i <= mips.REG_FCR31; i++ {
-		register[obj.Rconv(i)] = int16(i)
-	}
-	register["HI"] = mips.REG_HI
-	register["LO"] = mips.REG_LO
-	// Pseudo-registers.
-	register["SB"] = RSB
-	register["FP"] = RFP
-	register["PC"] = RPC
-	// Avoid unintentionally clobbering g using R30.
-	delete(register, "R30")
-	register["g"] = mips.REG_R30
-	// Avoid unintentionally clobbering RSB using R28.
-	delete(register, "R28")
-	register["RSB"] = mips.REG_R28
-	registerPrefix := map[string]bool{
-		"F":   true,
-		"FCR": true,
-		"M":   true,
-		"R":   true,
-	}
-
-	instructions := make(map[string]obj.As)
-	for i, s := range obj.Anames {
-		instructions[s] = obj.As(i)
-	}
-	for i, s := range mips.Anames {
-		if obj.As(i) >= obj.A_ARCHSPECIFIC {
-			instructions[s] = obj.As(i) + obj.ABaseMIPS
-		}
-	}
-	// Annoying alias.
-	instructions["JAL"] = mips.AJAL
-
-	return &Arch{
-		LinkArch:       &mips.Linkmips64,
-		Instructions:   instructions,
-		Register:       register,
-		RegisterPrefix: registerPrefix,
-		RegisterNumber: mipsRegisterNumber,
-		IsJump:         jumpMIPS,
-	}
-}
-
-func archS390x() *Arch {
-	register := make(map[string]int16)
-	// Create maps for easy lookup of instruction names etc.
-	// Note that there is no list of names as there is for x86.
-	for i := s390x.REG_R0; i <= s390x.REG_R15; i++ {
-		register[obj.Rconv(i)] = int16(i)
-	}
-	for i := s390x.REG_F0; i <= s390x.REG_F15; i++ {
-		register[obj.Rconv(i)] = int16(i)
-	}
-	for i := s390x.REG_V0; i <= s390x.REG_V31; i++ {
-		register[obj.Rconv(i)] = int16(i)
-	}
-	for i := s390x.REG_AR0; i <= s390x.REG_AR15; i++ {
-		register[obj.Rconv(i)] = int16(i)
-	}
-	register["LR"] = s390x.REG_LR
-	// Pseudo-registers.
-	register["SB"] = RSB
-	register["FP"] = RFP
-	register["PC"] = RPC
-	// Avoid unintentionally clobbering g using R13.
-	delete(register, "R13")
-	register["g"] = s390x.REG_R13
-	registerPrefix := map[string]bool{
-		"AR": true,
-		"F":  true,
-		"R":  true,
-	}
-
-	instructions := make(map[string]obj.As)
-	for i, s := range obj.Anames {
-		instructions[s] = obj.As(i)
-	}
-	for i, s := range s390x.Anames {
-		if obj.As(i) >= obj.A_ARCHSPECIFIC {
-			instructions[s] = obj.As(i) + obj.ABaseS390X
-		}
-	}
-	// Annoying aliases.
-	instructions["BR"] = s390x.ABR
-	instructions["BL"] = s390x.ABL
-
-	return &Arch{
-		LinkArch:       &s390x.Links390x,
-		Instructions:   instructions,
-		Register:       register,
-		RegisterPrefix: registerPrefix,
-		RegisterNumber: s390xRegisterNumber,
-		IsJump:         jumpS390x,
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/arch/arm.go b/pkg/bootstrap/src/bootstrap/cmd/asm/internal/arch/arm.go
deleted file mode 100644
index a681892..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/arch/arm.go
+++ /dev/null
@@ -1,252 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/arch/arm.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/arch/arm.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file encapsulates some of the odd characteristics of the ARM
-// instruction set, to minimize its interaction with the core of the
-// assembler.
-
-package arch
-
-import (
-	"strings"
-
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/arm"
-)
-
-var armLS = map[string]uint8{
-	"U":  arm.C_UBIT,
-	"S":  arm.C_SBIT,
-	"W":  arm.C_WBIT,
-	"P":  arm.C_PBIT,
-	"PW": arm.C_WBIT | arm.C_PBIT,
-	"WP": arm.C_WBIT | arm.C_PBIT,
-}
-
-var armSCOND = map[string]uint8{
-	"EQ":  arm.C_SCOND_EQ,
-	"NE":  arm.C_SCOND_NE,
-	"CS":  arm.C_SCOND_HS,
-	"HS":  arm.C_SCOND_HS,
-	"CC":  arm.C_SCOND_LO,
-	"LO":  arm.C_SCOND_LO,
-	"MI":  arm.C_SCOND_MI,
-	"PL":  arm.C_SCOND_PL,
-	"VS":  arm.C_SCOND_VS,
-	"VC":  arm.C_SCOND_VC,
-	"HI":  arm.C_SCOND_HI,
-	"LS":  arm.C_SCOND_LS,
-	"GE":  arm.C_SCOND_GE,
-	"LT":  arm.C_SCOND_LT,
-	"GT":  arm.C_SCOND_GT,
-	"LE":  arm.C_SCOND_LE,
-	"AL":  arm.C_SCOND_NONE,
-	"U":   arm.C_UBIT,
-	"S":   arm.C_SBIT,
-	"W":   arm.C_WBIT,
-	"P":   arm.C_PBIT,
-	"PW":  arm.C_WBIT | arm.C_PBIT,
-	"WP":  arm.C_WBIT | arm.C_PBIT,
-	"F":   arm.C_FBIT,
-	"IBW": arm.C_WBIT | arm.C_PBIT | arm.C_UBIT,
-	"IAW": arm.C_WBIT | arm.C_UBIT,
-	"DBW": arm.C_WBIT | arm.C_PBIT,
-	"DAW": arm.C_WBIT,
-	"IB":  arm.C_PBIT | arm.C_UBIT,
-	"IA":  arm.C_UBIT,
-	"DB":  arm.C_PBIT,
-	"DA":  0,
-}
-
-var armJump = map[string]bool{
-	"B":    true,
-	"BL":   true,
-	"BX":   true,
-	"BEQ":  true,
-	"BNE":  true,
-	"BCS":  true,
-	"BHS":  true,
-	"BCC":  true,
-	"BLO":  true,
-	"BMI":  true,
-	"BPL":  true,
-	"BVS":  true,
-	"BVC":  true,
-	"BHI":  true,
-	"BLS":  true,
-	"BGE":  true,
-	"BLT":  true,
-	"BGT":  true,
-	"BLE":  true,
-	"CALL": true,
-	"JMP":  true,
-}
-
-func jumpArm(word string) bool {
-	return armJump[word]
-}
-
-// IsARMCMP reports whether the op (as defined by an arm.A* constant) is
-// one of the comparison instructions that require special handling.
-func IsARMCMP(op obj.As) bool {
-	switch op {
-	case arm.ACMN, arm.ACMP, arm.ATEQ, arm.ATST:
-		return true
-	}
-	return false
-}
-
-// IsARMSTREX reports whether the op (as defined by an arm.A* constant) is
-// one of the STREX-like instructions that require special handling.
-func IsARMSTREX(op obj.As) bool {
-	switch op {
-	case arm.ASTREX, arm.ASTREXD, arm.ASWPW, arm.ASWPBU:
-		return true
-	}
-	return false
-}
-
-// MCR is not defined by the obj/arm; instead we define it privately here.
-// It is encoded as an MRC with a bit inside the instruction word,
-// passed to arch.ARMMRCOffset.
-const aMCR = arm.ALAST + 1
-
-// IsARMMRC reports whether the op (as defined by an arm.A* constant) is
-// MRC or MCR
-func IsARMMRC(op obj.As) bool {
-	switch op {
-	case arm.AMRC, aMCR: // Note: aMCR is defined in this package.
-		return true
-	}
-	return false
-}
-
-// IsARMFloatCmp reports whether the op is a floating comparison instruction.
-func IsARMFloatCmp(op obj.As) bool {
-	switch op {
-	case arm.ACMPF, arm.ACMPD:
-		return true
-	}
-	return false
-}
-
-// ARMMRCOffset implements the peculiar encoding of the MRC and MCR instructions.
-// The difference between MRC and MCR is represented by a bit high in the word, not
-// in the usual way by the opcode itself. Asm must use AMRC for both instructions, so
-// we return the opcode for MRC so that asm doesn't need to import obj/arm.
-func ARMMRCOffset(op obj.As, cond string, x0, x1, x2, x3, x4, x5 int64) (offset int64, op0 obj.As, ok bool) {
-	op1 := int64(0)
-	if op == arm.AMRC {
-		op1 = 1
-	}
-	bits, ok := ParseARMCondition(cond)
-	if !ok {
-		return
-	}
-	offset = (0xe << 24) | // opcode
-		(op1 << 20) | // MCR/MRC
-		((int64(bits) ^ arm.C_SCOND_XOR) << 28) | // scond
-		((x0 & 15) << 8) | //coprocessor number
-		((x1 & 7) << 21) | // coprocessor operation
-		((x2 & 15) << 12) | // ARM register
-		((x3 & 15) << 16) | // Crn
-		((x4 & 15) << 0) | // Crm
-		((x5 & 7) << 5) | // coprocessor information
-		(1 << 4) /* must be set */
-	return offset, arm.AMRC, true
-}
-
-// IsARMMULA reports whether the op (as defined by an arm.A* constant) is
-// MULA, MULAWT or MULAWB, the 4-operand instructions.
-func IsARMMULA(op obj.As) bool {
-	switch op {
-	case arm.AMULA, arm.AMULAWB, arm.AMULAWT:
-		return true
-	}
-	return false
-}
-
-var bcode = []obj.As{
-	arm.ABEQ,
-	arm.ABNE,
-	arm.ABCS,
-	arm.ABCC,
-	arm.ABMI,
-	arm.ABPL,
-	arm.ABVS,
-	arm.ABVC,
-	arm.ABHI,
-	arm.ABLS,
-	arm.ABGE,
-	arm.ABLT,
-	arm.ABGT,
-	arm.ABLE,
-	arm.AB,
-	obj.ANOP,
-}
-
-// ARMConditionCodes handles the special condition code situation for the ARM.
-// It returns a boolean to indicate success; failure means cond was unrecognized.
-func ARMConditionCodes(prog *obj.Prog, cond string) bool {
-	if cond == "" {
-		return true
-	}
-	bits, ok := ParseARMCondition(cond)
-	if !ok {
-		return false
-	}
-	/* hack to make B.NE etc. work: turn it into the corresponding conditional */
-	if prog.As == arm.AB {
-		prog.As = bcode[(bits^arm.C_SCOND_XOR)&0xf]
-		bits = (bits &^ 0xf) | arm.C_SCOND_NONE
-	}
-	prog.Scond = bits
-	return true
-}
-
-// ParseARMCondition parses the conditions attached to an ARM instruction.
-// The input is a single string consisting of period-separated condition
-// codes, such as ".P.W". An initial period is ignored.
-func ParseARMCondition(cond string) (uint8, bool) {
-	return parseARMCondition(cond, armLS, armSCOND)
-}
-
-func parseARMCondition(cond string, ls, scond map[string]uint8) (uint8, bool) {
-	if strings.HasPrefix(cond, ".") {
-		cond = cond[1:]
-	}
-	if cond == "" {
-		return arm.C_SCOND_NONE, true
-	}
-	names := strings.Split(cond, ".")
-	bits := uint8(0)
-	for _, name := range names {
-		if b, present := ls[name]; present {
-			bits |= b
-			continue
-		}
-		if b, present := scond[name]; present {
-			bits = (bits &^ arm.C_SCOND) | b
-			continue
-		}
-		return 0, false
-	}
-	return bits, true
-}
-
-func armRegisterNumber(name string, n int16) (int16, bool) {
-	if n < 0 || 15 < n {
-		return 0, false
-	}
-	switch name {
-	case "R":
-		return arm.REG_R0 + n, true
-	case "F":
-		return arm.REG_F0 + n, true
-	}
-	return 0, false
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/arch/arm64.go b/pkg/bootstrap/src/bootstrap/cmd/asm/internal/arch/arm64.go
deleted file mode 100644
index d8f5d92..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/arch/arm64.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/arch/arm64.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/arch/arm64.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file encapsulates some of the odd characteristics of the ARM64
-// instruction set, to minimize its interaction with the core of the
-// assembler.
-
-package arch
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/arm64"
-)
-
-var arm64LS = map[string]uint8{
-	"P": arm64.C_XPOST,
-	"W": arm64.C_XPRE,
-}
-
-var arm64Jump = map[string]bool{
-	"B":     true,
-	"BL":    true,
-	"BEQ":   true,
-	"BNE":   true,
-	"BCS":   true,
-	"BHS":   true,
-	"BCC":   true,
-	"BLO":   true,
-	"BMI":   true,
-	"BPL":   true,
-	"BVS":   true,
-	"BVC":   true,
-	"BHI":   true,
-	"BLS":   true,
-	"BGE":   true,
-	"BLT":   true,
-	"BGT":   true,
-	"BLE":   true,
-	"CALL":  true,
-	"CBZ":   true,
-	"CBZW":  true,
-	"CBNZ":  true,
-	"CBNZW": true,
-	"JMP":   true,
-}
-
-func jumpArm64(word string) bool {
-	return arm64Jump[word]
-}
-
-// IsARM64CMP reports whether the op (as defined by an arm.A* constant) is
-// one of the comparison instructions that require special handling.
-func IsARM64CMP(op obj.As) bool {
-	switch op {
-	case arm64.ACMN, arm64.ACMP, arm64.ATST,
-		arm64.ACMNW, arm64.ACMPW, arm64.ATSTW:
-		return true
-	}
-	return false
-}
-
-// IsARM64STLXR reports whether the op (as defined by an arm64.A*
-// constant) is one of the STLXR-like instructions that require special
-// handling.
-func IsARM64STLXR(op obj.As) bool {
-	switch op {
-	case arm64.ASTLXRB, arm64.ASTLXRH, arm64.ASTLXRW, arm64.ASTLXR:
-		return true
-	}
-	return false
-}
-
-// ARM64Suffix handles the special suffix for the ARM64.
-// It returns a boolean to indicate success; failure means
-// cond was unrecognized.
-func ARM64Suffix(prog *obj.Prog, cond string) bool {
-	if cond == "" {
-		return true
-	}
-	bits, ok := ParseARM64Suffix(cond)
-	if !ok {
-		return false
-	}
-	prog.Scond = bits
-	return true
-}
-
-// ParseARM64Suffix parses the suffix attached to an ARM64 instruction.
-// The input is a single string consisting of period-separated condition
-// codes, such as ".P.W". An initial period is ignored.
-func ParseARM64Suffix(cond string) (uint8, bool) {
-	if cond == "" {
-		return 0, true
-	}
-	return parseARMCondition(cond, arm64LS, nil)
-}
-
-func arm64RegisterNumber(name string, n int16) (int16, bool) {
-	switch name {
-	case "F":
-		if 0 <= n && n <= 31 {
-			return arm64.REG_F0 + n, true
-		}
-	case "R":
-		if 0 <= n && n <= 30 { // not 31
-			return arm64.REG_R0 + n, true
-		}
-	case "V":
-		if 0 <= n && n <= 31 {
-			return arm64.REG_V0 + n, true
-		}
-	}
-	return 0, false
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/arch/mips.go b/pkg/bootstrap/src/bootstrap/cmd/asm/internal/arch/mips.go
deleted file mode 100644
index eec0108..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/arch/mips.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/arch/mips.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/arch/mips.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file encapsulates some of the odd characteristics of the
-// MIPS (MIPS64) instruction set, to minimize its interaction
-// with the core of the assembler.
-
-package arch
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/mips"
-)
-
-func jumpMIPS(word string) bool {
-	switch word {
-	case "BEQ", "BFPF", "BFPT", "BGEZ", "BGEZAL", "BGTZ", "BLEZ", "BLTZ", "BLTZAL", "BNE", "JMP", "JAL", "CALL":
-		return true
-	}
-	return false
-}
-
-// IsMIPSCMP reports whether the op (as defined by an mips.A* constant) is
-// one of the CMP instructions that require special handling.
-func IsMIPSCMP(op obj.As) bool {
-	switch op {
-	case mips.ACMPEQF, mips.ACMPEQD, mips.ACMPGEF, mips.ACMPGED,
-		mips.ACMPGTF, mips.ACMPGTD:
-		return true
-	}
-	return false
-}
-
-// IsMIPSMUL reports whether the op (as defined by an mips.A* constant) is
-// one of the MUL/DIV/REM instructions that require special handling.
-func IsMIPSMUL(op obj.As) bool {
-	switch op {
-	case mips.AMUL, mips.AMULU, mips.AMULV, mips.AMULVU,
-		mips.ADIV, mips.ADIVU, mips.ADIVV, mips.ADIVVU,
-		mips.AREM, mips.AREMU, mips.AREMV, mips.AREMVU:
-		return true
-	}
-	return false
-}
-
-func mipsRegisterNumber(name string, n int16) (int16, bool) {
-	switch name {
-	case "F":
-		if 0 <= n && n <= 31 {
-			return mips.REG_F0 + n, true
-		}
-	case "FCR":
-		if 0 <= n && n <= 31 {
-			return mips.REG_FCR0 + n, true
-		}
-	case "M":
-		if 0 <= n && n <= 31 {
-			return mips.REG_M0 + n, true
-		}
-	case "R":
-		if 0 <= n && n <= 31 {
-			return mips.REG_R0 + n, true
-		}
-	}
-	return 0, false
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/arch/ppc64.go b/pkg/bootstrap/src/bootstrap/cmd/asm/internal/arch/ppc64.go
deleted file mode 100644
index 6b6a07b..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/arch/ppc64.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/arch/ppc64.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/arch/ppc64.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file encapsulates some of the odd characteristics of the
-// 64-bit PowerPC (PPC64) instruction set, to minimize its interaction
-// with the core of the assembler.
-
-package arch
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/ppc64"
-)
-
-func jumpPPC64(word string) bool {
-	switch word {
-	case "BC", "BCL", "BEQ", "BGE", "BGT", "BL", "BLE", "BLT", "BNE", "BR", "BVC", "BVS", "CALL", "JMP":
-		return true
-	}
-	return false
-}
-
-// IsPPC64RLD reports whether the op (as defined by an ppc64.A* constant) is
-// one of the RLD-like instructions that require special handling.
-// The FMADD-like instructions behave similarly.
-func IsPPC64RLD(op obj.As) bool {
-	switch op {
-	case ppc64.ARLDC, ppc64.ARLDCCC, ppc64.ARLDCL, ppc64.ARLDCLCC,
-		ppc64.ARLDCR, ppc64.ARLDCRCC, ppc64.ARLDMI, ppc64.ARLDMICC,
-		ppc64.ARLWMI, ppc64.ARLWMICC, ppc64.ARLWNM, ppc64.ARLWNMCC:
-		return true
-	case ppc64.AFMADD, ppc64.AFMADDCC, ppc64.AFMADDS, ppc64.AFMADDSCC,
-		ppc64.AFMSUB, ppc64.AFMSUBCC, ppc64.AFMSUBS, ppc64.AFMSUBSCC,
-		ppc64.AFNMADD, ppc64.AFNMADDCC, ppc64.AFNMADDS, ppc64.AFNMADDSCC,
-		ppc64.AFNMSUB, ppc64.AFNMSUBCC, ppc64.AFNMSUBS, ppc64.AFNMSUBSCC:
-		return true
-	}
-	return false
-}
-
-func IsPPC64ISEL(op obj.As) bool {
-	return op == ppc64.AISEL
-}
-
-// IsPPC64CMP reports whether the op (as defined by an ppc64.A* constant) is
-// one of the CMP instructions that require special handling.
-func IsPPC64CMP(op obj.As) bool {
-	switch op {
-	case ppc64.ACMP, ppc64.ACMPU, ppc64.ACMPW, ppc64.ACMPWU:
-		return true
-	}
-	return false
-}
-
-// IsPPC64NEG reports whether the op (as defined by an ppc64.A* constant) is
-// one of the NEG-like instructions that require special handling.
-func IsPPC64NEG(op obj.As) bool {
-	switch op {
-	case ppc64.AADDMECC, ppc64.AADDMEVCC, ppc64.AADDMEV, ppc64.AADDME,
-		ppc64.AADDZECC, ppc64.AADDZEVCC, ppc64.AADDZEV, ppc64.AADDZE,
-		ppc64.ACNTLZDCC, ppc64.ACNTLZD, ppc64.ACNTLZWCC, ppc64.ACNTLZW,
-		ppc64.AEXTSBCC, ppc64.AEXTSB, ppc64.AEXTSHCC, ppc64.AEXTSH,
-		ppc64.AEXTSWCC, ppc64.AEXTSW, ppc64.ANEGCC, ppc64.ANEGVCC,
-		ppc64.ANEGV, ppc64.ANEG, ppc64.ASLBMFEE, ppc64.ASLBMFEV,
-		ppc64.ASLBMTE, ppc64.ASUBMECC, ppc64.ASUBMEVCC, ppc64.ASUBMEV,
-		ppc64.ASUBME, ppc64.ASUBZECC, ppc64.ASUBZEVCC, ppc64.ASUBZEV,
-		ppc64.ASUBZE:
-		return true
-	}
-	return false
-}
-
-func ppc64RegisterNumber(name string, n int16) (int16, bool) {
-	switch name {
-	case "CR":
-		if 0 <= n && n <= 7 {
-			return ppc64.REG_CR0 + n, true
-		}
-	case "VS":
-		if 0 <= n && n <= 63 {
-			return ppc64.REG_VS0 + n, true
-		}
-	case "V":
-		if 0 <= n && n <= 31 {
-			return ppc64.REG_V0 + n, true
-		}
-	case "F":
-		if 0 <= n && n <= 31 {
-			return ppc64.REG_F0 + n, true
-		}
-	case "R":
-		if 0 <= n && n <= 31 {
-			return ppc64.REG_R0 + n, true
-		}
-	case "SPR":
-		if 0 <= n && n <= 1024 {
-			return ppc64.REG_SPR0 + n, true
-		}
-	}
-	return 0, false
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/arch/s390x.go b/pkg/bootstrap/src/bootstrap/cmd/asm/internal/arch/s390x.go
deleted file mode 100644
index 6c7104f..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/arch/s390x.go
+++ /dev/null
@@ -1,144 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/arch/s390x.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/arch/s390x.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file encapsulates some of the odd characteristics of the
-// s390x instruction set, to minimize its interaction
-// with the core of the assembler.
-
-package arch
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/s390x"
-)
-
-func jumpS390x(word string) bool {
-	switch word {
-	case "BC",
-		"BCL",
-		"BEQ",
-		"BGE",
-		"BGT",
-		"BL",
-		"BLE",
-		"BLEU",
-		"BLT",
-		"BLTU",
-		"BNE",
-		"BR",
-		"BVC",
-		"BVS",
-		"CMPBEQ",
-		"CMPBGE",
-		"CMPBGT",
-		"CMPBLE",
-		"CMPBLT",
-		"CMPBNE",
-		"CMPUBEQ",
-		"CMPUBGE",
-		"CMPUBGT",
-		"CMPUBLE",
-		"CMPUBLT",
-		"CMPUBNE",
-		"CALL",
-		"JMP":
-		return true
-	}
-	return false
-}
-
-// IsS390xRLD reports whether the op (as defined by an s390x.A* constant) is
-// one of the RLD-like instructions that require special handling.
-// The FMADD-like instructions behave similarly.
-func IsS390xRLD(op obj.As) bool {
-	switch op {
-	case s390x.AFMADD,
-		s390x.AFMADDS,
-		s390x.AFMSUB,
-		s390x.AFMSUBS,
-		s390x.AFNMADD,
-		s390x.AFNMADDS,
-		s390x.AFNMSUB,
-		s390x.AFNMSUBS:
-		return true
-	}
-	return false
-}
-
-// IsS390xCMP reports whether the op (as defined by an s390x.A* constant) is
-// one of the CMP instructions that require special handling.
-func IsS390xCMP(op obj.As) bool {
-	switch op {
-	case s390x.ACMP, s390x.ACMPU, s390x.ACMPW, s390x.ACMPWU:
-		return true
-	}
-	return false
-}
-
-// IsS390xNEG reports whether the op (as defined by an s390x.A* constant) is
-// one of the NEG-like instructions that require special handling.
-func IsS390xNEG(op obj.As) bool {
-	switch op {
-	case s390x.ANEG, s390x.ANEGW:
-		return true
-	}
-	return false
-}
-
-// IsS390xWithLength reports whether the op (as defined by an s390x.A* constant)
-// refers to an instruction which takes a length as its first argument.
-func IsS390xWithLength(op obj.As) bool {
-	switch op {
-	case s390x.AMVC, s390x.ACLC, s390x.AXC, s390x.AOC, s390x.ANC:
-		return true
-	case s390x.AVLL, s390x.AVSTL:
-		return true
-	}
-	return false
-}
-
-// IsS390xWithIndex reports whether the op (as defined by an s390x.A* constant)
-// refers to an instruction which takes an index as its first argument.
-func IsS390xWithIndex(op obj.As) bool {
-	switch op {
-	case s390x.AVSCEG, s390x.AVSCEF, s390x.AVGEG, s390x.AVGEF:
-		return true
-	case s390x.AVGMG, s390x.AVGMF, s390x.AVGMH, s390x.AVGMB:
-		return true
-	case s390x.AVLEIG, s390x.AVLEIF, s390x.AVLEIH, s390x.AVLEIB:
-		return true
-	case s390x.AVLEG, s390x.AVLEF, s390x.AVLEH, s390x.AVLEB:
-		return true
-	case s390x.AVSTEG, s390x.AVSTEF, s390x.AVSTEH, s390x.AVSTEB:
-		return true
-	case s390x.AVPDI:
-		return true
-	}
-	return false
-}
-
-func s390xRegisterNumber(name string, n int16) (int16, bool) {
-	switch name {
-	case "AR":
-		if 0 <= n && n <= 15 {
-			return s390x.REG_AR0 + n, true
-		}
-	case "F":
-		if 0 <= n && n <= 15 {
-			return s390x.REG_F0 + n, true
-		}
-	case "R":
-		if 0 <= n && n <= 15 {
-			return s390x.REG_R0 + n, true
-		}
-	case "V":
-		if 0 <= n && n <= 31 {
-			return s390x.REG_V0 + n, true
-		}
-	}
-	return 0, false
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/asm/asm.go b/pkg/bootstrap/src/bootstrap/cmd/asm/internal/asm/asm.go
deleted file mode 100644
index da557dc..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/asm/asm.go
+++ /dev/null
@@ -1,813 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/asm/asm.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/asm/asm.go:1
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package asm
-
-import (
-	"bytes"
-	"fmt"
-	"text/scanner"
-
-	"bootstrap/cmd/asm/internal/arch"
-	"bootstrap/cmd/asm/internal/flags"
-	"bootstrap/cmd/asm/internal/lex"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-)
-
-// TODO: configure the architecture
-
-var testOut *bytes.Buffer // Gathers output when testing.
-
-// append adds the Prog to the end of the program-thus-far.
-// If doLabel is set, it also defines the labels collect for this Prog.
-func (p *Parser) append(prog *obj.Prog, cond string, doLabel bool) {
-	if cond != "" {
-		switch p.arch.Family {
-		case sys.ARM:
-			if !arch.ARMConditionCodes(prog, cond) {
-				p.errorf("unrecognized condition code .%q", cond)
-				return
-			}
-
-		case sys.ARM64:
-			if !arch.ARM64Suffix(prog, cond) {
-				p.errorf("unrecognized suffix .%q", cond)
-				return
-			}
-
-		default:
-			p.errorf("unrecognized suffix .%q", cond)
-			return
-		}
-	}
-	if p.firstProg == nil {
-		p.firstProg = prog
-	} else {
-		p.lastProg.Link = prog
-	}
-	p.lastProg = prog
-	if doLabel {
-		p.pc++
-		for _, label := range p.pendingLabels {
-			if p.labels[label] != nil {
-				p.errorf("label %q multiply defined", label)
-				return
-			}
-			p.labels[label] = prog
-		}
-		p.pendingLabels = p.pendingLabels[0:0]
-	}
-	prog.Pc = p.pc
-	if *flags.Debug {
-		fmt.Println(p.histLineNum, prog)
-	}
-	if testOut != nil {
-		fmt.Fprintln(testOut, prog)
-	}
-}
-
-// validSymbol checks that addr represents a valid name for a pseudo-op.
-func (p *Parser) validSymbol(pseudo string, addr *obj.Addr, offsetOk bool) bool {
-	if addr.Name != obj.NAME_EXTERN && addr.Name != obj.NAME_STATIC || addr.Scale != 0 || addr.Reg != 0 {
-		p.errorf("%s symbol %q must be a symbol(SB)", pseudo, symbolName(addr))
-		return false
-	}
-	if !offsetOk && addr.Offset != 0 {
-		p.errorf("%s symbol %q must not be offset from SB", pseudo, symbolName(addr))
-		return false
-	}
-	return true
-}
-
-// evalInteger evaluates an integer constant for a pseudo-op.
-func (p *Parser) evalInteger(pseudo string, operands []lex.Token) int64 {
-	addr := p.address(operands)
-	return p.getConstantPseudo(pseudo, &addr)
-}
-
-// validImmediate checks that addr represents an immediate constant.
-func (p *Parser) validImmediate(pseudo string, addr *obj.Addr) bool {
-	if addr.Type != obj.TYPE_CONST || addr.Name != 0 || addr.Reg != 0 || addr.Index != 0 {
-		p.errorf("%s: expected immediate constant; found %s", pseudo, obj.Dconv(&emptyProg, addr))
-		return false
-	}
-	return true
-}
-
-// asmText assembles a TEXT pseudo-op.
-// TEXT runtime·sigtramp(SB),4,$0-0
-func (p *Parser) asmText(word string, operands [][]lex.Token) {
-	if len(operands) != 2 && len(operands) != 3 {
-		p.errorf("expect two or three operands for TEXT")
-		return
-	}
-
-	// Labels are function scoped. Patch existing labels and
-	// create a new label space for this TEXT.
-	p.patch()
-	p.labels = make(map[string]*obj.Prog)
-
-	// Operand 0 is the symbol name in the form foo(SB).
-	// That means symbol plus indirect on SB and no offset.
-	nameAddr := p.address(operands[0])
-	if !p.validSymbol("TEXT", &nameAddr, false) {
-		return
-	}
-	name := symbolName(&nameAddr)
-	next := 1
-
-	// Next operand is the optional text flag, a literal integer.
-	var flag = int64(0)
-	if len(operands) == 3 {
-		flag = p.evalInteger("TEXT", operands[1])
-		next++
-	}
-
-	// Next operand is the frame and arg size.
-	// Bizarre syntax: $frameSize-argSize is two words, not subtraction.
-	// Both frameSize and argSize must be simple integers; only frameSize
-	// can be negative.
-	// The "-argSize" may be missing; if so, set it to obj.ArgsSizeUnknown.
-	// Parse left to right.
-	op := operands[next]
-	if len(op) < 2 || op[0].ScanToken != '$' {
-		p.errorf("TEXT %s: frame size must be an immediate constant", name)
-		return
-	}
-	op = op[1:]
-	negative := false
-	if op[0].ScanToken == '-' {
-		negative = true
-		op = op[1:]
-	}
-	if len(op) == 0 || op[0].ScanToken != scanner.Int {
-		p.errorf("TEXT %s: frame size must be an immediate constant", name)
-		return
-	}
-	frameSize := p.positiveAtoi(op[0].String())
-	if negative {
-		frameSize = -frameSize
-	}
-	op = op[1:]
-	argSize := int64(obj.ArgsSizeUnknown)
-	if len(op) > 0 {
-		// There is an argument size. It must be a minus sign followed by a non-negative integer literal.
-		if len(op) != 2 || op[0].ScanToken != '-' || op[1].ScanToken != scanner.Int {
-			p.errorf("TEXT %s: argument size must be of form -integer", name)
-			return
-		}
-		argSize = p.positiveAtoi(op[1].String())
-	}
-	prog := &obj.Prog{
-		Ctxt:   p.ctxt,
-		As:     obj.ATEXT,
-		Lineno: p.histLineNum,
-		From:   nameAddr,
-		From3: &obj.Addr{
-			Type:   obj.TYPE_CONST,
-			Offset: flag,
-		},
-		To: obj.Addr{
-			Type:   obj.TYPE_TEXTSIZE,
-			Offset: frameSize,
-			// Argsize set below.
-		},
-	}
-	prog.To.Val = int32(argSize)
-
-	p.append(prog, "", true)
-}
-
-// asmData assembles a DATA pseudo-op.
-// DATA masks<>+0x00(SB)/4, $0x00000000
-func (p *Parser) asmData(word string, operands [][]lex.Token) {
-	if len(operands) != 2 {
-		p.errorf("expect two operands for DATA")
-		return
-	}
-
-	// Operand 0 has the general form foo<>+0x04(SB)/4.
-	op := operands[0]
-	n := len(op)
-	if n < 3 || op[n-2].ScanToken != '/' || op[n-1].ScanToken != scanner.Int {
-		p.errorf("expect /size for DATA argument")
-		return
-	}
-	scale := p.parseScale(op[n-1].String())
-	op = op[:n-2]
-	nameAddr := p.address(op)
-	if !p.validSymbol("DATA", &nameAddr, true) {
-		return
-	}
-	name := symbolName(&nameAddr)
-
-	// Operand 1 is an immediate constant or address.
-	valueAddr := p.address(operands[1])
-	switch valueAddr.Type {
-	case obj.TYPE_CONST, obj.TYPE_FCONST, obj.TYPE_SCONST, obj.TYPE_ADDR:
-		// OK
-	default:
-		p.errorf("DATA value must be an immediate constant or address")
-		return
-	}
-
-	// The addresses must not overlap. Easiest test: require monotonicity.
-	if lastAddr, ok := p.dataAddr[name]; ok && nameAddr.Offset < lastAddr {
-		p.errorf("overlapping DATA entry for %s", name)
-		return
-	}
-	p.dataAddr[name] = nameAddr.Offset + int64(scale)
-
-	switch valueAddr.Type {
-	case obj.TYPE_CONST:
-		nameAddr.Sym.WriteInt(p.ctxt, nameAddr.Offset, int(scale), valueAddr.Offset)
-	case obj.TYPE_FCONST:
-		switch scale {
-		case 4:
-			nameAddr.Sym.WriteFloat32(p.ctxt, nameAddr.Offset, float32(valueAddr.Val.(float64)))
-		case 8:
-			nameAddr.Sym.WriteFloat64(p.ctxt, nameAddr.Offset, valueAddr.Val.(float64))
-		default:
-			panic("bad float scale")
-		}
-	case obj.TYPE_SCONST:
-		nameAddr.Sym.WriteString(p.ctxt, nameAddr.Offset, int(scale), valueAddr.Val.(string))
-	case obj.TYPE_ADDR:
-		nameAddr.Sym.WriteAddr(p.ctxt, nameAddr.Offset, int(scale), valueAddr.Sym, valueAddr.Offset)
-	}
-}
-
-// asmGlobl assembles a GLOBL pseudo-op.
-// GLOBL shifts<>(SB),8,$256
-// GLOBL shifts<>(SB),$256
-func (p *Parser) asmGlobl(word string, operands [][]lex.Token) {
-	if len(operands) != 2 && len(operands) != 3 {
-		p.errorf("expect two or three operands for GLOBL")
-		return
-	}
-
-	// Operand 0 has the general form foo<>+0x04(SB).
-	nameAddr := p.address(operands[0])
-	if !p.validSymbol("GLOBL", &nameAddr, false) {
-		return
-	}
-	next := 1
-
-	// Next operand is the optional flag, a literal integer.
-	var flag = int64(0)
-	if len(operands) == 3 {
-		flag = p.evalInteger("GLOBL", operands[1])
-		next++
-	}
-
-	// Final operand is an immediate constant.
-	addr := p.address(operands[next])
-	if !p.validImmediate("GLOBL", &addr) {
-		return
-	}
-
-	// log.Printf("GLOBL %s %d, $%d", name, flag, size)
-	p.ctxt.Globl(nameAddr.Sym, addr.Offset, int(flag))
-}
-
-// asmPCData assembles a PCDATA pseudo-op.
-// PCDATA $2, $705
-func (p *Parser) asmPCData(word string, operands [][]lex.Token) {
-	if len(operands) != 2 {
-		p.errorf("expect two operands for PCDATA")
-		return
-	}
-
-	// Operand 0 must be an immediate constant.
-	key := p.address(operands[0])
-	if !p.validImmediate("PCDATA", &key) {
-		return
-	}
-
-	// Operand 1 must be an immediate constant.
-	value := p.address(operands[1])
-	if !p.validImmediate("PCDATA", &value) {
-		return
-	}
-
-	// log.Printf("PCDATA $%d, $%d", key.Offset, value.Offset)
-	prog := &obj.Prog{
-		Ctxt:   p.ctxt,
-		As:     obj.APCDATA,
-		Lineno: p.histLineNum,
-		From:   key,
-		To:     value,
-	}
-	p.append(prog, "", true)
-}
-
-// asmFuncData assembles a FUNCDATA pseudo-op.
-// FUNCDATA $1, funcdata<>+4(SB)
-func (p *Parser) asmFuncData(word string, operands [][]lex.Token) {
-	if len(operands) != 2 {
-		p.errorf("expect two operands for FUNCDATA")
-		return
-	}
-
-	// Operand 0 must be an immediate constant.
-	valueAddr := p.address(operands[0])
-	if !p.validImmediate("FUNCDATA", &valueAddr) {
-		return
-	}
-
-	// Operand 1 is a symbol name in the form foo(SB).
-	nameAddr := p.address(operands[1])
-	if !p.validSymbol("FUNCDATA", &nameAddr, true) {
-		return
-	}
-
-	prog := &obj.Prog{
-		Ctxt:   p.ctxt,
-		As:     obj.AFUNCDATA,
-		Lineno: p.histLineNum,
-		From:   valueAddr,
-		To:     nameAddr,
-	}
-	p.append(prog, "", true)
-}
-
-// asmJump assembles a jump instruction.
-// JMP	R1
-// JMP	exit
-// JMP	3(PC)
-func (p *Parser) asmJump(op obj.As, cond string, a []obj.Addr) {
-	var target *obj.Addr
-	prog := &obj.Prog{
-		Ctxt:   p.ctxt,
-		Lineno: p.histLineNum,
-		As:     op,
-	}
-	switch len(a) {
-	case 1:
-		target = &a[0]
-	case 2:
-		// Special 2-operand jumps.
-		target = &a[1]
-		prog.From = a[0]
-	case 3:
-		if p.arch.Family == sys.PPC64 {
-			// Special 3-operand jumps.
-			// First two must be constants; a[1] is a register number.
-			target = &a[2]
-			prog.From = obj.Addr{
-				Type:   obj.TYPE_CONST,
-				Offset: p.getConstant(prog, op, &a[0]),
-			}
-			reg := int16(p.getConstant(prog, op, &a[1]))
-			reg, ok := p.arch.RegisterNumber("R", reg)
-			if !ok {
-				p.errorf("bad register number %d", reg)
-				return
-			}
-			prog.Reg = reg
-			break
-		}
-		if p.arch.Family == sys.MIPS || p.arch.Family == sys.MIPS64 {
-			// 3-operand jumps.
-			// First two must be registers
-			target = &a[2]
-			prog.From = a[0]
-			prog.Reg = p.getRegister(prog, op, &a[1])
-			break
-		}
-		if p.arch.Family == sys.S390X {
-			// 3-operand jumps.
-			target = &a[2]
-			prog.From = a[0]
-			if a[1].Reg != 0 {
-				// Compare two registers and jump.
-				prog.Reg = p.getRegister(prog, op, &a[1])
-			} else {
-				// Compare register with immediate and jump.
-				prog.From3 = newAddr(a[1])
-			}
-			break
-		}
-
-		fallthrough
-	default:
-		p.errorf("wrong number of arguments to %s instruction", op)
-		return
-	}
-	switch {
-	case target.Type == obj.TYPE_BRANCH:
-		// JMP 4(PC)
-		prog.To = obj.Addr{
-			Type:   obj.TYPE_BRANCH,
-			Offset: p.pc + 1 + target.Offset, // +1 because p.pc is incremented in append, below.
-		}
-	case target.Type == obj.TYPE_REG:
-		// JMP R1
-		prog.To = *target
-	case target.Type == obj.TYPE_MEM && (target.Name == obj.NAME_EXTERN || target.Name == obj.NAME_STATIC):
-		// JMP main·morestack(SB)
-		prog.To = *target
-	case target.Type == obj.TYPE_INDIR && (target.Name == obj.NAME_EXTERN || target.Name == obj.NAME_STATIC):
-		// JMP *main·morestack(SB)
-		prog.To = *target
-		prog.To.Type = obj.TYPE_INDIR
-	case target.Type == obj.TYPE_MEM && target.Reg == 0 && target.Offset == 0:
-		// JMP exit
-		if target.Sym == nil {
-			// Parse error left name unset.
-			return
-		}
-		targetProg := p.labels[target.Sym.Name]
-		if targetProg == nil {
-			p.toPatch = append(p.toPatch, Patch{prog, target.Sym.Name})
-		} else {
-			p.branch(prog, targetProg)
-		}
-	case target.Type == obj.TYPE_MEM && target.Name == obj.NAME_NONE:
-		// JMP 4(R0)
-		prog.To = *target
-		// On the ppc64, 9a encodes BR (CTR) as BR CTR. We do the same.
-		if p.arch.Family == sys.PPC64 && target.Offset == 0 {
-			prog.To.Type = obj.TYPE_REG
-		}
-	case target.Type == obj.TYPE_CONST:
-		// JMP $4
-		prog.To = a[0]
-	default:
-		p.errorf("cannot assemble jump %+v", target)
-		return
-	}
-
-	p.append(prog, cond, true)
-}
-
-func (p *Parser) patch() {
-	for _, patch := range p.toPatch {
-		targetProg := p.labels[patch.label]
-		if targetProg == nil {
-			p.errorf("undefined label %s", patch.label)
-			return
-		}
-		p.branch(patch.prog, targetProg)
-	}
-	p.toPatch = p.toPatch[:0]
-}
-
-func (p *Parser) branch(jmp, target *obj.Prog) {
-	jmp.To = obj.Addr{
-		Type:  obj.TYPE_BRANCH,
-		Index: 0,
-	}
-	jmp.To.Val = target
-}
-
-// asmInstruction assembles an instruction.
-// MOVW R9, (R10)
-func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
-	// fmt.Printf("%s %+v\n", op, a)
-	prog := &obj.Prog{
-		Ctxt:   p.ctxt,
-		Lineno: p.histLineNum,
-		As:     op,
-	}
-	switch len(a) {
-	case 0:
-		// Nothing to do.
-	case 1:
-		if p.arch.UnaryDst[op] {
-			// prog.From is no address.
-			prog.To = a[0]
-		} else {
-			prog.From = a[0]
-			// prog.To is no address.
-		}
-		if p.arch.Family == sys.PPC64 && arch.IsPPC64NEG(op) {
-			// NEG: From and To are both a[0].
-			prog.To = a[0]
-			prog.From = a[0]
-			break
-		}
-	case 2:
-		if p.arch.Family == sys.ARM {
-			if arch.IsARMCMP(op) {
-				prog.From = a[0]
-				prog.Reg = p.getRegister(prog, op, &a[1])
-				break
-			}
-			// Strange special cases.
-			if arch.IsARMSTREX(op) {
-				/*
-					STREX x, (y)
-						from=(y) reg=x to=x
-					STREX (x), y
-						from=(x) reg=y to=y
-				*/
-				if a[0].Type == obj.TYPE_REG && a[1].Type != obj.TYPE_REG {
-					prog.From = a[1]
-					prog.Reg = a[0].Reg
-					prog.To = a[0]
-					break
-				} else if a[0].Type != obj.TYPE_REG && a[1].Type == obj.TYPE_REG {
-					prog.From = a[0]
-					prog.Reg = a[1].Reg
-					prog.To = a[1]
-					break
-				}
-				p.errorf("unrecognized addressing for %s", op)
-				return
-			}
-			if arch.IsARMFloatCmp(op) {
-				prog.From = a[0]
-				prog.Reg = p.getRegister(prog, op, &a[1])
-				break
-			}
-		} else if p.arch.Family == sys.ARM64 && arch.IsARM64CMP(op) {
-			prog.From = a[0]
-			prog.Reg = p.getRegister(prog, op, &a[1])
-			break
-		} else if p.arch.Family == sys.MIPS || p.arch.Family == sys.MIPS64 {
-			if arch.IsMIPSCMP(op) || arch.IsMIPSMUL(op) {
-				prog.From = a[0]
-				prog.Reg = p.getRegister(prog, op, &a[1])
-				break
-			}
-		}
-		prog.From = a[0]
-		prog.To = a[1]
-	case 3:
-		switch p.arch.Family {
-		case sys.MIPS, sys.MIPS64:
-			prog.From = a[0]
-			prog.Reg = p.getRegister(prog, op, &a[1])
-			prog.To = a[2]
-		case sys.ARM:
-			// Special cases.
-			if arch.IsARMSTREX(op) {
-				/*
-					STREX x, (y), z
-						from=(y) reg=x to=z
-				*/
-				prog.From = a[1]
-				prog.Reg = p.getRegister(prog, op, &a[0])
-				prog.To = a[2]
-				break
-			}
-			// Otherwise the 2nd operand (a[1]) must be a register.
-			prog.From = a[0]
-			prog.Reg = p.getRegister(prog, op, &a[1])
-			prog.To = a[2]
-		case sys.AMD64:
-			// Catch missing operand here, because we store immediate as part of From3, and can't distinguish
-			// missing operand from legal value 0 in obj/x86/asm6.
-			if arch.IsAMD4OP(op) {
-				p.errorf("4 operands required, but only 3 are provided for %s instruction", op)
-			}
-			prog.From = a[0]
-			prog.From3 = newAddr(a[1])
-			prog.To = a[2]
-		case sys.ARM64:
-			// ARM64 instructions with one input and two outputs.
-			if arch.IsARM64STLXR(op) {
-				prog.From = a[0]
-				prog.To = a[1]
-				if a[2].Type != obj.TYPE_REG {
-					p.errorf("invalid addressing modes for third operand to %s instruction, must be register", op)
-					return
-				}
-				prog.RegTo2 = a[2].Reg
-				break
-			}
-			prog.From = a[0]
-			prog.Reg = p.getRegister(prog, op, &a[1])
-			prog.To = a[2]
-		case sys.I386:
-			prog.From = a[0]
-			prog.From3 = newAddr(a[1])
-			prog.To = a[2]
-		case sys.PPC64:
-			if arch.IsPPC64CMP(op) {
-				// CMPW etc.; third argument is a CR register that goes into prog.Reg.
-				prog.From = a[0]
-				prog.Reg = p.getRegister(prog, op, &a[2])
-				prog.To = a[1]
-				break
-			}
-			// Arithmetic. Choices are:
-			// reg reg reg
-			// imm reg reg
-			// reg imm reg
-			// If the immediate is the middle argument, use From3.
-			switch a[1].Type {
-			case obj.TYPE_REG:
-				prog.From = a[0]
-				prog.Reg = p.getRegister(prog, op, &a[1])
-				prog.To = a[2]
-			case obj.TYPE_CONST:
-				prog.From = a[0]
-				prog.From3 = newAddr(a[1])
-				prog.To = a[2]
-			default:
-				p.errorf("invalid addressing modes for %s instruction", op)
-				return
-			}
-		case sys.S390X:
-			if arch.IsS390xWithLength(op) || arch.IsS390xWithIndex(op) {
-				prog.From = a[1]
-				prog.From3 = newAddr(a[0])
-			} else {
-				prog.Reg = p.getRegister(prog, op, &a[1])
-				prog.From = a[0]
-			}
-			prog.To = a[2]
-		default:
-			p.errorf("TODO: implement three-operand instructions for this architecture")
-			return
-		}
-	case 4:
-		if p.arch.Family == sys.ARM && arch.IsARMMULA(op) {
-			// All must be registers.
-			p.getRegister(prog, op, &a[0])
-			r1 := p.getRegister(prog, op, &a[1])
-			p.getRegister(prog, op, &a[2])
-			r3 := p.getRegister(prog, op, &a[3])
-			prog.From = a[0]
-			prog.To = a[2]
-			prog.To.Type = obj.TYPE_REGREG2
-			prog.To.Offset = int64(r3)
-			prog.Reg = r1
-			break
-		}
-		if p.arch.Family == sys.AMD64 {
-			// 4 operand instruction have form  ymm1, ymm2, ymm3/m256, imm8
-			// So From3 is always just a register, so we store imm8 in Offset field,
-			// to avoid increasing size of Prog.
-			prog.From = a[1]
-			prog.From3 = newAddr(a[2])
-			if a[0].Type != obj.TYPE_CONST {
-				p.errorf("first operand must be an immediate in %s instruction", op)
-			}
-			if prog.From3.Type != obj.TYPE_REG {
-				p.errorf("third operand must be a register in %s instruction", op)
-			}
-			prog.From3.Offset = int64(p.getImmediate(prog, op, &a[0]))
-			prog.To = a[3]
-			prog.RegTo2 = -1
-			break
-		}
-		if p.arch.Family == sys.ARM64 {
-			prog.From = a[0]
-			prog.Reg = p.getRegister(prog, op, &a[1])
-			prog.From3 = newAddr(a[2])
-			prog.To = a[3]
-			break
-		}
-		if p.arch.Family == sys.PPC64 {
-			if arch.IsPPC64RLD(op) {
-				prog.From = a[0]
-				prog.Reg = p.getRegister(prog, op, &a[1])
-				prog.From3 = newAddr(a[2])
-				prog.To = a[3]
-				break
-			} else if arch.IsPPC64ISEL(op) {
-				// ISEL BC,RB,RA,RT becomes isel rt,ra,rb,bc
-				prog.From3 = newAddr(a[2])                // ra
-				prog.From = a[0]                          // bc
-				prog.Reg = p.getRegister(prog, op, &a[1]) // rb
-				prog.To = a[3]                            // rt
-				break
-			}
-			// Else, it is a VA-form instruction
-			// reg reg reg reg
-			// imm reg reg reg
-			// Or a VX-form instruction
-			// imm imm reg reg
-			if a[1].Type == obj.TYPE_REG {
-				prog.From = a[0]
-				prog.Reg = p.getRegister(prog, op, &a[1])
-				prog.From3 = newAddr(a[2])
-				prog.To = a[3]
-				break
-			} else if a[1].Type == obj.TYPE_CONST {
-				prog.From = a[0]
-				prog.Reg = p.getRegister(prog, op, &a[2])
-				prog.From3 = newAddr(a[1])
-				prog.To = a[3]
-				break
-			} else {
-				p.errorf("invalid addressing modes for %s instruction", op)
-				return
-			}
-		}
-		if p.arch.Family == sys.S390X {
-			prog.From = a[1]
-			prog.Reg = p.getRegister(prog, op, &a[2])
-			prog.From3 = newAddr(a[0])
-			prog.To = a[3]
-			break
-		}
-		p.errorf("can't handle %s instruction with 4 operands", op)
-		return
-	case 5:
-		if p.arch.Family == sys.PPC64 && arch.IsPPC64RLD(op) {
-			// Always reg, reg, con, con, reg.  (con, con is a 'mask').
-			prog.From = a[0]
-			prog.Reg = p.getRegister(prog, op, &a[1])
-			mask1 := p.getConstant(prog, op, &a[2])
-			mask2 := p.getConstant(prog, op, &a[3])
-			var mask uint32
-			if mask1 < mask2 {
-				mask = (^uint32(0) >> uint(mask1)) & (^uint32(0) << uint(31-mask2))
-			} else {
-				mask = (^uint32(0) >> uint(mask2+1)) & (^uint32(0) << uint(31-(mask1-1)))
-			}
-			prog.From3 = &obj.Addr{
-				Type:   obj.TYPE_CONST,
-				Offset: int64(mask),
-			}
-			prog.To = a[4]
-			break
-		}
-		p.errorf("can't handle %s instruction with 5 operands", op)
-		return
-	case 6:
-		if p.arch.Family == sys.ARM && arch.IsARMMRC(op) {
-			// Strange special case: MCR, MRC.
-			prog.To.Type = obj.TYPE_CONST
-			x0 := p.getConstant(prog, op, &a[0])
-			x1 := p.getConstant(prog, op, &a[1])
-			x2 := int64(p.getRegister(prog, op, &a[2]))
-			x3 := int64(p.getRegister(prog, op, &a[3]))
-			x4 := int64(p.getRegister(prog, op, &a[4]))
-			x5 := p.getConstant(prog, op, &a[5])
-			// Cond is handled specially for this instruction.
-			offset, MRC, ok := arch.ARMMRCOffset(op, cond, x0, x1, x2, x3, x4, x5)
-			if !ok {
-				p.errorf("unrecognized condition code .%q", cond)
-			}
-			prog.To.Offset = offset
-			cond = ""
-			prog.As = MRC // Both instructions are coded as MRC.
-			break
-		}
-		fallthrough
-	default:
-		p.errorf("can't handle %s instruction with %d operands", op, len(a))
-		return
-	}
-
-	p.append(prog, cond, true)
-}
-
-// newAddr returns a new(Addr) initialized to x.
-func newAddr(x obj.Addr) *obj.Addr {
-	p := new(obj.Addr)
-	*p = x
-	return p
-}
-
-// symbolName returns the symbol name, or an error string if none if available.
-func symbolName(addr *obj.Addr) string {
-	if addr.Sym != nil {
-		return addr.Sym.Name
-	}
-	return "<erroneous symbol>"
-}
-
-var emptyProg obj.Prog
-
-// getConstantPseudo checks that addr represents a plain constant and returns its value.
-func (p *Parser) getConstantPseudo(pseudo string, addr *obj.Addr) int64 {
-	if addr.Type != obj.TYPE_MEM || addr.Name != 0 || addr.Reg != 0 || addr.Index != 0 {
-		p.errorf("%s: expected integer constant; found %s", pseudo, obj.Dconv(&emptyProg, addr))
-	}
-	return addr.Offset
-}
-
-// getConstant checks that addr represents a plain constant and returns its value.
-func (p *Parser) getConstant(prog *obj.Prog, op obj.As, addr *obj.Addr) int64 {
-	if addr.Type != obj.TYPE_MEM || addr.Name != 0 || addr.Reg != 0 || addr.Index != 0 {
-		p.errorf("%s: expected integer constant; found %s", op, obj.Dconv(prog, addr))
-	}
-	return addr.Offset
-}
-
-// getImmediate checks that addr represents an immediate constant and returns its value.
-func (p *Parser) getImmediate(prog *obj.Prog, op obj.As, addr *obj.Addr) int64 {
-	if addr.Type != obj.TYPE_CONST || addr.Name != 0 || addr.Reg != 0 || addr.Index != 0 {
-		p.errorf("%s: expected immediate constant; found %s", op, obj.Dconv(prog, addr))
-	}
-	return addr.Offset
-}
-
-// getRegister checks that addr represents a register and returns its value.
-func (p *Parser) getRegister(prog *obj.Prog, op obj.As, addr *obj.Addr) int16 {
-	if addr.Type != obj.TYPE_REG || addr.Offset != 0 || addr.Name != 0 || addr.Index != 0 {
-		p.errorf("%s: expected register; found %s", op, obj.Dconv(prog, addr))
-	}
-	return addr.Reg
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/asm/endtoend_test.go b/pkg/bootstrap/src/bootstrap/cmd/asm/internal/asm/endtoend_test.go
deleted file mode 100644
index c84f8aa..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/asm/endtoend_test.go
+++ /dev/null
@@ -1,400 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/asm/endtoend_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/asm/endtoend_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package asm
-
-import (
-	"bufio"
-	"bytes"
-	"fmt"
-	"io/ioutil"
-	"os"
-	"path/filepath"
-	"regexp"
-	"sort"
-	"strconv"
-	"strings"
-	"testing"
-
-	"bootstrap/cmd/asm/internal/lex"
-	"bootstrap/cmd/internal/obj"
-)
-
-// An end-to-end test for the assembler: Do we print what we parse?
-// Output is generated by, in effect, turning on -S and comparing the
-// result against a golden file.
-
-func testEndToEnd(t *testing.T, goarch, file string) {
-	lex.InitHist()
-	input := filepath.Join("testdata", file+".s")
-	architecture, ctxt := setArch(goarch)
-	lexer := lex.NewLexer(input, ctxt)
-	parser := NewParser(ctxt, architecture, lexer)
-	pList := obj.Linknewplist(ctxt)
-	var ok bool
-	testOut = new(bytes.Buffer) // The assembler writes test output to this buffer.
-	ctxt.Bso = bufio.NewWriter(os.Stdout)
-	defer ctxt.Bso.Flush()
-	failed := false
-	ctxt.DiagFunc = func(format string, args ...interface{}) {
-		failed = true
-		t.Errorf(format, args...)
-	}
-	pList.Firstpc, ok = parser.Parse()
-	if !ok || failed {
-		t.Errorf("asm: %s assembly failed", goarch)
-		return
-	}
-	output := strings.Split(testOut.String(), "\n")
-
-	// Reconstruct expected output by independently "parsing" the input.
-	data, err := ioutil.ReadFile(input)
-	if err != nil {
-		t.Error(err)
-		return
-	}
-	lineno := 0
-	seq := 0
-	hexByLine := map[string]string{}
-	lines := strings.SplitAfter(string(data), "\n")
-Diff:
-	for _, line := range lines {
-		lineno++
-
-		// The general form of a test input line is:
-		//	// comment
-		//	INST args [// printed form] [// hex encoding]
-		parts := strings.Split(line, "//")
-		printed := strings.TrimSpace(parts[0])
-		if printed == "" || strings.HasSuffix(printed, ":") { // empty or label
-			continue
-		}
-		seq++
-
-		var hexes string
-		switch len(parts) {
-		default:
-			t.Errorf("%s:%d: unable to understand comments: %s", input, lineno, line)
-		case 1:
-			// no comment
-		case 2:
-			// might be printed form or hex
-			note := strings.TrimSpace(parts[1])
-			if isHexes(note) {
-				hexes = note
-			} else {
-				printed = note
-			}
-		case 3:
-			// printed form, then hex
-			printed = strings.TrimSpace(parts[1])
-			hexes = strings.TrimSpace(parts[2])
-			if !isHexes(hexes) {
-				t.Errorf("%s:%d: malformed hex instruction encoding: %s", input, lineno, line)
-			}
-		}
-
-		if hexes != "" {
-			hexByLine[fmt.Sprintf("%s:%d", input, lineno)] = hexes
-		}
-
-		// Canonicalize spacing in printed form.
-		// First field is opcode, then tab, then arguments separated by spaces.
-		// Canonicalize spaces after commas first.
-		// Comma to separate argument gets a space; comma within does not.
-		var buf []byte
-		nest := 0
-		for i := 0; i < len(printed); i++ {
-			c := printed[i]
-			switch c {
-			case '{', '[':
-				nest++
-			case '}', ']':
-				nest--
-			case ',':
-				buf = append(buf, ',')
-				if nest == 0 {
-					buf = append(buf, ' ')
-				}
-				for i+1 < len(printed) && (printed[i+1] == ' ' || printed[i+1] == '\t') {
-					i++
-				}
-				continue
-			}
-			buf = append(buf, c)
-		}
-
-		f := strings.Fields(string(buf))
-
-		// Turn relative (PC) into absolute (PC) automatically,
-		// so that most branch instructions don't need comments
-		// giving the absolute form.
-		if len(f) > 0 && strings.HasSuffix(printed, "(PC)") {
-			last := f[len(f)-1]
-			n, err := strconv.Atoi(last[:len(last)-len("(PC)")])
-			if err == nil {
-				f[len(f)-1] = fmt.Sprintf("%d(PC)", seq+n)
-			}
-		}
-
-		if len(f) == 1 {
-			printed = f[0]
-		} else {
-			printed = f[0] + "\t" + strings.Join(f[1:], " ")
-		}
-
-		want := fmt.Sprintf("%05d (%s:%d)\t%s", seq, input, lineno, printed)
-		for len(output) > 0 && (output[0] < want || output[0] != want && len(output[0]) >= 5 && output[0][:5] == want[:5]) {
-			if len(output[0]) >= 5 && output[0][:5] == want[:5] {
-				t.Errorf("mismatched output:\nhave %s\nwant %s", output[0], want)
-				output = output[1:]
-				continue Diff
-			}
-			t.Errorf("unexpected output: %q", output[0])
-			output = output[1:]
-		}
-		if len(output) > 0 && output[0] == want {
-			output = output[1:]
-		} else {
-			t.Errorf("missing output: %q", want)
-		}
-	}
-	for len(output) > 0 {
-		if output[0] == "" {
-			// spurious blank caused by Split on "\n"
-			output = output[1:]
-			continue
-		}
-		t.Errorf("unexpected output: %q", output[0])
-		output = output[1:]
-	}
-
-	// Checked printing.
-	// Now check machine code layout.
-
-	top := pList.Firstpc
-	var text *obj.LSym
-	ok = true
-	ctxt.DiagFunc = func(format string, args ...interface{}) {
-		t.Errorf(format, args...)
-		ok = false
-	}
-	obj.FlushplistNoFree(ctxt)
-
-	for p := top; p != nil; p = p.Link {
-		if p.As == obj.ATEXT {
-			text = p.From.Sym
-		}
-		hexes := hexByLine[p.Line()]
-		if hexes == "" {
-			continue
-		}
-		delete(hexByLine, p.Line())
-		if text == nil {
-			t.Errorf("%s: instruction outside TEXT", p)
-		}
-		size := int64(len(text.P)) - p.Pc
-		if p.Link != nil {
-			size = p.Link.Pc - p.Pc
-		} else if p.Isize != 0 {
-			size = int64(p.Isize)
-		}
-		var code []byte
-		if p.Pc < int64(len(text.P)) {
-			code = text.P[p.Pc:]
-			if size < int64(len(code)) {
-				code = code[:size]
-			}
-		}
-		codeHex := fmt.Sprintf("%x", code)
-		if codeHex == "" {
-			codeHex = "empty"
-		}
-		ok := false
-		for _, hex := range strings.Split(hexes, " or ") {
-			if codeHex == hex {
-				ok = true
-				break
-			}
-		}
-		if !ok {
-			t.Errorf("%s: have encoding %s, want %s", p, codeHex, hexes)
-		}
-	}
-
-	if len(hexByLine) > 0 {
-		var missing []string
-		for key := range hexByLine {
-			missing = append(missing, key)
-		}
-		sort.Strings(missing)
-		for _, line := range missing {
-			t.Errorf("%s: did not find instruction encoding", line)
-		}
-	}
-
-}
-
-func isHexes(s string) bool {
-	if s == "" {
-		return false
-	}
-	if s == "empty" {
-		return true
-	}
-	for _, f := range strings.Split(s, " or ") {
-		if f == "" || len(f)%2 != 0 || strings.TrimLeft(f, "0123456789abcdef") != "" {
-			return false
-		}
-	}
-	return true
-}
-
-// It would be nice if the error messages began with
-// the standard file:line: prefix,
-// but that's not where we are today.
-// It might be at the beginning but it might be in the middle of the printed instruction.
-var fileLineRE = regexp.MustCompile(`(?:^|\()(testdata[/\\][0-9a-z]+\.s:[0-9]+)(?:$|\))`)
-
-// Same as in test/run.go
-var (
-	errRE       = regexp.MustCompile(`// ERROR ?(.*)`)
-	errQuotesRE = regexp.MustCompile(`"([^"]*)"`)
-)
-
-func testErrors(t *testing.T, goarch, file string) {
-	lex.InitHist()
-	input := filepath.Join("testdata", file+".s")
-	architecture, ctxt := setArch(goarch)
-	lexer := lex.NewLexer(input, ctxt)
-	parser := NewParser(ctxt, architecture, lexer)
-	pList := obj.Linknewplist(ctxt)
-	var ok bool
-	testOut = new(bytes.Buffer) // The assembler writes test output to this buffer.
-	ctxt.Bso = bufio.NewWriter(os.Stdout)
-	defer ctxt.Bso.Flush()
-	failed := false
-	var errBuf bytes.Buffer
-	ctxt.DiagFunc = func(format string, args ...interface{}) {
-		failed = true
-		s := fmt.Sprintf(format, args...)
-		if !strings.HasSuffix(s, "\n") {
-			s += "\n"
-		}
-		errBuf.WriteString(s)
-	}
-	pList.Firstpc, ok = parser.Parse()
-	obj.Flushplist(ctxt)
-	if ok && !failed {
-		t.Errorf("asm: %s had no errors", goarch)
-	}
-
-	errors := map[string]string{}
-	for _, line := range strings.Split(errBuf.String(), "\n") {
-		if line == "" || strings.HasPrefix(line, "\t") {
-			continue
-		}
-		m := fileLineRE.FindStringSubmatch(line)
-		if m == nil {
-			t.Errorf("unexpected error: %v", line)
-			continue
-		}
-		fileline := m[1]
-		if errors[fileline] != "" {
-			t.Errorf("multiple errors on %s:\n\t%s\n\t%s", fileline, errors[fileline], line)
-			continue
-		}
-		errors[fileline] = line
-	}
-
-	// Reconstruct expected errors by independently "parsing" the input.
-	data, err := ioutil.ReadFile(input)
-	if err != nil {
-		t.Error(err)
-		return
-	}
-	lineno := 0
-	lines := strings.Split(string(data), "\n")
-	for _, line := range lines {
-		lineno++
-
-		fileline := fmt.Sprintf("%s:%d", input, lineno)
-		if m := errRE.FindStringSubmatch(line); m != nil {
-			all := m[1]
-			mm := errQuotesRE.FindAllStringSubmatch(all, -1)
-			if len(mm) != 1 {
-				t.Errorf("%s: invalid errorcheck line:\n%s", fileline, line)
-			} else if err := errors[fileline]; err == "" {
-				t.Errorf("%s: missing error, want %s", fileline, all)
-			} else if !strings.Contains(err, mm[0][1]) {
-				t.Errorf("%s: wrong error for %s:\n%s", fileline, all, err)
-			}
-		} else {
-			if errors[fileline] != "" {
-				t.Errorf("unexpected error on %s: %v", fileline, errors[fileline])
-			}
-		}
-		delete(errors, fileline)
-	}
-	var extra []string
-	for key := range errors {
-		extra = append(extra, key)
-	}
-	sort.Strings(extra)
-	for _, fileline := range extra {
-		t.Errorf("unexpected error on %s: %v", fileline, errors[fileline])
-	}
-}
-
-func Test386EndToEnd(t *testing.T) {
-	defer os.Setenv("GO386", os.Getenv("GO386"))
-
-	for _, go386 := range []string{"387", "sse"} {
-		os.Setenv("GO386", go386)
-		t.Logf("GO386=%v", os.Getenv("GO386"))
-		testEndToEnd(t, "386", "386")
-	}
-}
-
-func TestARMEndToEnd(t *testing.T) {
-	defer os.Setenv("GOARM", os.Getenv("GOARM"))
-
-	for _, goarm := range []string{"5", "6", "7"} {
-		os.Setenv("GOARM", goarm)
-		t.Logf("GOARM=%v", os.Getenv("GOARM"))
-		testEndToEnd(t, "arm", "arm")
-	}
-}
-
-func TestARM64EndToEnd(t *testing.T) {
-	testEndToEnd(t, "arm64", "arm64")
-}
-
-func TestAMD64EndToEnd(t *testing.T) {
-	testEndToEnd(t, "amd64", "amd64")
-}
-
-func TestAMD64Encoder(t *testing.T) {
-	testEndToEnd(t, "amd64", "amd64enc")
-}
-
-func TestAMD64Errors(t *testing.T) {
-	testErrors(t, "amd64", "amd64error")
-}
-
-func TestMIPSEndToEnd(t *testing.T) {
-	testEndToEnd(t, "mips", "mips")
-	testEndToEnd(t, "mips64", "mips64")
-}
-
-func TestPPC64EndToEnd(t *testing.T) {
-	testEndToEnd(t, "ppc64", "ppc64")
-}
-
-func TestS390XEndToEnd(t *testing.T) {
-	testEndToEnd(t, "s390x", "s390x")
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/asm/expr_test.go b/pkg/bootstrap/src/bootstrap/cmd/asm/internal/asm/expr_test.go
deleted file mode 100644
index 6346830..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/asm/expr_test.go
+++ /dev/null
@@ -1,134 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/asm/expr_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/asm/expr_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package asm
-
-import (
-	"bootstrap/cmd/asm/internal/lex"
-	"strings"
-	"testing"
-	"text/scanner"
-)
-
-type exprTest struct {
-	input  string
-	output int64
-	atEOF  bool
-}
-
-var exprTests = []exprTest{
-	// Simple
-	{"0", 0, true},
-	{"3", 3, true},
-	{"070", 8 * 7, true},
-	{"0x0f", 15, true},
-	{"0xFF", 255, true},
-	{"9223372036854775807", 9223372036854775807, true}, // max int64
-	// Unary
-	{"-0", 0, true},
-	{"~0", -1, true},
-	{"~0*0", 0, true},
-	{"+3", 3, true},
-	{"-3", -3, true},
-	{"-9223372036854775808", -9223372036854775808, true}, // min int64
-	// Binary
-	{"3+4", 3 + 4, true},
-	{"3-4", 3 - 4, true},
-	{"2|5", 2 | 5, true},
-	{"3^4", 3 ^ 4, true},
-	{"3*4", 3 * 4, true},
-	{"14/4", 14 / 4, true},
-	{"3<<4", 3 << 4, true},
-	{"48>>3", 48 >> 3, true},
-	{"3&9", 3 & 9, true},
-	// General
-	{"3*2+3", 3*2 + 3, true},
-	{"3+2*3", 3 + 2*3, true},
-	{"3*(2+3)", 3 * (2 + 3), true},
-	{"3*-(2+3)", 3 * -(2 + 3), true},
-	{"3<<2+4", 3<<2 + 4, true},
-	{"3<<2+4", 3<<2 + 4, true},
-	{"3<<(2+4)", 3 << (2 + 4), true},
-	// Junk at EOF.
-	{"3 x", 3, false},
-	// Big number
-	{"4611686018427387904", 4611686018427387904, true},
-}
-
-func TestExpr(t *testing.T) {
-	p := NewParser(nil, nil, nil) // Expression evaluation uses none of these fields of the parser.
-	for i, test := range exprTests {
-		p.start(lex.Tokenize(test.input))
-		result := int64(p.expr())
-		if result != test.output {
-			t.Errorf("%d: %q evaluated to %d; expected %d", i, test.input, result, test.output)
-		}
-		tok := p.next()
-		if test.atEOF && tok.ScanToken != scanner.EOF {
-			t.Errorf("%d: %q: at EOF got %s", i, test.input, tok)
-		} else if !test.atEOF && tok.ScanToken == scanner.EOF {
-			t.Errorf("%d: %q: expected not EOF but at EOF", i, test.input)
-		}
-	}
-}
-
-type badExprTest struct {
-	input string
-	error string // Empty means no error.
-}
-
-var badExprTests = []badExprTest{
-	{"0/0", "division by zero"},
-	{"3/0", "division by zero"},
-	{"(1<<63)/0", "divide of value with high bit set"},
-	{"3%0", "modulo by zero"},
-	{"(1<<63)%0", "modulo of value with high bit set"},
-	{"3<<-4", "negative left shift count"},
-	{"3<<(1<<63)", "negative left shift count"},
-	{"3>>-4", "negative right shift count"},
-	{"3>>(1<<63)", "negative right shift count"},
-	{"(1<<63)>>2", "right shift of value with high bit set"},
-	{"(1<<62)>>2", ""},
-	{`'\x80'`, "illegal UTF-8 encoding for character constant"},
-	{"(23*4", "missing closing paren"},
-	{")23*4", "unexpected ) evaluating expression"},
-	{"18446744073709551616", "value out of range"},
-}
-
-func TestBadExpr(t *testing.T) {
-	panicOnError = true
-	defer func() {
-		panicOnError = false
-	}()
-	for i, test := range badExprTests {
-		err := runBadTest(i, test, t)
-		if err == nil {
-			if test.error != "" {
-				t.Errorf("#%d: %q: expected error %q; got none", i, test.input, test.error)
-			}
-			continue
-		}
-		if !strings.Contains(err.Error(), test.error) {
-			t.Errorf("#%d: expected error %q; got %q", i, test.error, err)
-			continue
-		}
-	}
-}
-
-func runBadTest(i int, test badExprTest, t *testing.T) (err error) {
-	p := NewParser(nil, nil, nil) // Expression evaluation uses none of these fields of the parser.
-	p.start(lex.Tokenize(test.input))
-	defer func() {
-		e := recover()
-		var ok bool
-		if err, ok = e.(error); e != nil && !ok {
-			t.Fatal(e)
-		}
-	}()
-	p.expr()
-	return nil
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/asm/operand_test.go b/pkg/bootstrap/src/bootstrap/cmd/asm/internal/asm/operand_test.go
deleted file mode 100644
index 9e6f817..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/asm/operand_test.go
+++ /dev/null
@@ -1,817 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/asm/operand_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/asm/operand_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package asm
-
-import (
-	"testing"
-
-	"bootstrap/cmd/asm/internal/arch"
-	"bootstrap/cmd/asm/internal/lex"
-	"bootstrap/cmd/internal/obj"
-)
-
-// A simple in-out test: Do we print what we parse?
-
-func setArch(goarch string) (*arch.Arch, *obj.Link) {
-	obj.GOOS = "linux" // obj can handle this OS for all architectures.
-	obj.GOARCH = goarch
-	architecture := arch.Set(goarch)
-	if architecture == nil {
-		panic("asm: unrecognized architecture " + goarch)
-	}
-	return architecture, obj.Linknew(architecture.LinkArch)
-}
-
-func newParser(goarch string) *Parser {
-	architecture, ctxt := setArch(goarch)
-	return NewParser(ctxt, architecture, nil)
-}
-
-func testOperandParser(t *testing.T, parser *Parser, tests []operandTest) {
-	for _, test := range tests {
-		parser.start(lex.Tokenize(test.input))
-		addr := obj.Addr{}
-		parser.operand(&addr)
-		result := obj.Dconv(&emptyProg, &addr)
-		if result != test.output {
-			t.Errorf("fail at %s: got %s; expected %s\n", test.input, result, test.output)
-		}
-	}
-}
-
-func TestAMD64OperandParser(t *testing.T) {
-	parser := newParser("amd64")
-	testOperandParser(t, parser, amd64OperandTests)
-}
-
-func Test386OperandParser(t *testing.T) {
-	parser := newParser("386")
-	testOperandParser(t, parser, x86OperandTests)
-}
-
-func TestARMOperandParser(t *testing.T) {
-	parser := newParser("arm")
-	testOperandParser(t, parser, armOperandTests)
-}
-func TestARM64OperandParser(t *testing.T) {
-	parser := newParser("arm64")
-	testOperandParser(t, parser, arm64OperandTests)
-}
-
-func TestPPC64OperandParser(t *testing.T) {
-	parser := newParser("ppc64")
-	testOperandParser(t, parser, ppc64OperandTests)
-}
-
-func TestMIPSOperandParser(t *testing.T) {
-	parser := newParser("mips")
-	testOperandParser(t, parser, mipsOperandTests)
-}
-
-func TestMIPS64OperandParser(t *testing.T) {
-	parser := newParser("mips64")
-	testOperandParser(t, parser, mips64OperandTests)
-}
-
-func TestS390XOperandParser(t *testing.T) {
-	parser := newParser("s390x")
-	testOperandParser(t, parser, s390xOperandTests)
-}
-
-type operandTest struct {
-	input, output string
-}
-
-// Examples collected by scanning all the assembly in the standard repo.
-
-var amd64OperandTests = []operandTest{
-	{"$(-1.0)", "$(-1.0)"},
-	{"$(0.0)", "$(0.0)"},
-	{"$(0x2000000+116)", "$33554548"},
-	{"$(0x3F<<7)", "$8064"},
-	{"$(112+8)", "$120"},
-	{"$(1<<63)", "$-9223372036854775808"},
-	{"$-1", "$-1"},
-	{"$0", "$0"},
-	{"$0-0", "$0"},
-	{"$0-16", "$-16"},
-	{"$0x000FFFFFFFFFFFFF", "$4503599627370495"},
-	{"$0x01", "$1"},
-	{"$0x02", "$2"},
-	{"$0x04", "$4"},
-	{"$0x3FE", "$1022"},
-	{"$0x7fffffe00000", "$140737486258176"},
-	{"$0xfffffffffffff001", "$-4095"},
-	{"$1", "$1"},
-	{"$1.0", "$(1.0)"},
-	{"$10", "$10"},
-	{"$1000", "$1000"},
-	{"$1000000", "$1000000"},
-	{"$1000000000", "$1000000000"},
-	{"$__tsan_func_enter(SB)", "$__tsan_func_enter(SB)"},
-	{"$main(SB)", "$main(SB)"},
-	{"$masks<>(SB)", "$masks<>(SB)"},
-	{"$setg_gcc<>(SB)", "$setg_gcc<>(SB)"},
-	{"$shifts<>(SB)", "$shifts<>(SB)"},
-	{"$~(1<<63)", "$9223372036854775807"},
-	{"$~0x3F", "$-64"},
-	{"$~15", "$-16"},
-	{"(((8)&0xf)*4)(SP)", "32(SP)"},
-	{"(((8-14)&0xf)*4)(SP)", "40(SP)"},
-	{"(6+8)(AX)", "14(AX)"},
-	{"(8*4)(BP)", "32(BP)"},
-	{"(AX)", "(AX)"},
-	{"(AX)(CX*8)", "(AX)(CX*8)"},
-	{"(BP)(CX*4)", "(BP)(CX*4)"},
-	{"(BP)(DX*4)", "(BP)(DX*4)"},
-	{"(BP)(R8*4)", "(BP)(R8*4)"},
-	{"(BX)", "(BX)"},
-	{"(DI)", "(DI)"},
-	{"(DI)(BX*1)", "(DI)(BX*1)"},
-	{"(DX)", "(DX)"},
-	{"(R9)", "(R9)"},
-	{"(R9)(BX*8)", "(R9)(BX*8)"},
-	{"(SI)", "(SI)"},
-	{"(SI)(BX*1)", "(SI)(BX*1)"},
-	{"(SI)(DX*1)", "(SI)(DX*1)"},
-	{"(SP)", "(SP)"},
-	{"(SP)(AX*4)", "(SP)(AX*4)"},
-	{"32(SP)(BX*2)", "32(SP)(BX*2)"},
-	{"32323(SP)(R8*4)", "32323(SP)(R8*4)"},
-	{"+3(PC)", "3(PC)"},
-	{"-1(DI)(BX*1)", "-1(DI)(BX*1)"},
-	{"-3(PC)", "-3(PC)"},
-	{"-64(SI)(BX*1)", "-64(SI)(BX*1)"},
-	{"-96(SI)(BX*1)", "-96(SI)(BX*1)"},
-	{"AL", "AL"},
-	{"AX", "AX"},
-	{"BP", "BP"},
-	{"BX", "BX"},
-	{"CX", "CX"},
-	{"DI", "DI"},
-	{"DX", "DX"},
-	{"R10", "R10"},
-	{"R10", "R10"},
-	{"R11", "R11"},
-	{"R12", "R12"},
-	{"R13", "R13"},
-	{"R14", "R14"},
-	{"R15", "R15"},
-	{"R8", "R8"},
-	{"R9", "R9"},
-	{"SI", "SI"},
-	{"SP", "SP"},
-	{"X0", "X0"},
-	{"X1", "X1"},
-	{"X10", "X10"},
-	{"X11", "X11"},
-	{"X12", "X12"},
-	{"X13", "X13"},
-	{"X14", "X14"},
-	{"X15", "X15"},
-	{"X2", "X2"},
-	{"X3", "X3"},
-	{"X4", "X4"},
-	{"X5", "X5"},
-	{"X6", "X6"},
-	{"X7", "X7"},
-	{"X8", "X8"},
-	{"X9", "X9"},
-	{"_expand_key_128<>(SB)", "_expand_key_128<>(SB)"},
-	{"_seek<>(SB)", "_seek<>(SB)"},
-	{"a2+16(FP)", "a2+16(FP)"},
-	{"addr2+24(FP)", "addr2+24(FP)"},
-	{"asmcgocall<>(SB)", "asmcgocall<>(SB)"},
-	{"b+24(FP)", "b+24(FP)"},
-	{"b_len+32(FP)", "b_len+32(FP)"},
-	{"racecall<>(SB)", "racecall<>(SB)"},
-	{"rcv_name+20(FP)", "rcv_name+20(FP)"},
-	{"retoffset+28(FP)", "retoffset+28(FP)"},
-	{"runtime·_GetStdHandle(SB)", "runtime._GetStdHandle(SB)"},
-	{"sync\u2215atomic·AddInt64(SB)", "sync/atomic.AddInt64(SB)"},
-	{"timeout+20(FP)", "timeout+20(FP)"},
-	{"ts+16(FP)", "ts+16(FP)"},
-	{"x+24(FP)", "x+24(FP)"},
-	{"x·y(SB)", "x.y(SB)"},
-	{"x·y(SP)", "x.y(SP)"},
-	{"x·y+8(SB)", "x.y+8(SB)"},
-	{"x·y+8(SP)", "x.y+8(SP)"},
-	{"y+56(FP)", "y+56(FP)"},
-	{"·AddUint32(SB)", "\"\".AddUint32(SB)"},
-	{"·callReflect(SB)", "\"\".callReflect(SB)"},
-	{"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms.
-}
-
-var x86OperandTests = []operandTest{
-	{"$(2.928932188134524e-01)", "$(0.29289321881345243)"},
-	{"$-1", "$-1"},
-	{"$0", "$0"},
-	{"$0x00000000", "$0"},
-	{"$runtime·badmcall(SB)", "$runtime.badmcall(SB)"},
-	{"$setg_gcc<>(SB)", "$setg_gcc<>(SB)"},
-	{"$~15", "$-16"},
-	{"(-64*1024+104)(SP)", "-65432(SP)"},
-	{"(0*4)(BP)", "(BP)"},
-	{"(1*4)(DI)", "4(DI)"},
-	{"(4*4)(BP)", "16(BP)"},
-	{"(AX)", "(AX)"},
-	{"(BP)(CX*4)", "(BP)(CX*4)"},
-	{"(BP*8)", "0(BP*8)"},
-	{"(BX)", "(BX)"},
-	{"(SP)", "(SP)"},
-	{"*AX", "AX"}, // TODO: Should make * illegal here; a simple alias for JMP AX.
-	{"*runtime·_GetStdHandle(SB)", "*runtime._GetStdHandle(SB)"},
-	{"-(4+12)(DI)", "-16(DI)"},
-	{"-1(DI)(BX*1)", "-1(DI)(BX*1)"},
-	{"-96(DI)(BX*1)", "-96(DI)(BX*1)"},
-	{"0(AX)", "(AX)"},
-	{"0(BP)", "(BP)"},
-	{"0(BX)", "(BX)"},
-	{"4(AX)", "4(AX)"},
-	{"AL", "AL"},
-	{"AX", "AX"},
-	{"BP", "BP"},
-	{"BX", "BX"},
-	{"CX", "CX"},
-	{"DI", "DI"},
-	{"DX", "DX"},
-	{"F0", "F0"},
-	{"GS", "GS"},
-	{"SI", "SI"},
-	{"SP", "SP"},
-	{"X0", "X0"},
-	{"X1", "X1"},
-	{"X2", "X2"},
-	{"X3", "X3"},
-	{"X4", "X4"},
-	{"X5", "X5"},
-	{"X6", "X6"},
-	{"X7", "X7"},
-	{"asmcgocall<>(SB)", "asmcgocall<>(SB)"},
-	{"ax+4(FP)", "ax+4(FP)"},
-	{"ptime-12(SP)", "ptime-12(SP)"},
-	{"runtime·_NtWaitForSingleObject(SB)", "runtime._NtWaitForSingleObject(SB)"},
-	{"s(FP)", "s(FP)"},
-	{"sec+4(FP)", "sec+4(FP)"},
-	{"shifts<>(SB)(CX*8)", "shifts<>(SB)(CX*8)"},
-	{"x+4(FP)", "x+4(FP)"},
-	{"·AddUint32(SB)", "\"\".AddUint32(SB)"},
-	{"·reflectcall(SB)", "\"\".reflectcall(SB)"},
-	{"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms.
-}
-
-var armOperandTests = []operandTest{
-	{"$0", "$0"},
-	{"$256", "$256"},
-	{"(R0)", "(R0)"},
-	{"(R11)", "(R11)"},
-	{"(g)", "(g)"},
-	{"-12(R4)", "-12(R4)"},
-	{"0(PC)", "0(PC)"},
-	{"1024", "1024"},
-	{"12(R(1))", "12(R1)"},
-	{"12(R13)", "12(R13)"},
-	{"R0", "R0"},
-	{"R0->(32-1)", "R0->31"},
-	{"R0<<R1", "R0<<R1"},
-	{"R0>>R(1)", "R0>>R1"},
-	{"R0@>(32-1)", "R0@>31"},
-	{"R1", "R1"},
-	{"R11", "R11"},
-	{"R12", "R12"},
-	{"R13", "R13"},
-	{"R14", "R14"},
-	{"R15", "R15"},
-	{"R1<<2(R3)", "R1<<2(R3)"},
-	{"R(1)<<2(R(3))", "R1<<2(R3)"},
-	{"R2", "R2"},
-	{"R3", "R3"},
-	{"R4", "R4"},
-	{"R(4)", "R4"},
-	{"R5", "R5"},
-	{"R6", "R6"},
-	{"R7", "R7"},
-	{"R8", "R8"},
-	{"[R0,R1,g,R15]", "[R0,R1,g,R15]"},
-	{"[R0-R7]", "[R0,R1,R2,R3,R4,R5,R6,R7]"},
-	{"[R(0)-R(7)]", "[R0,R1,R2,R3,R4,R5,R6,R7]"},
-	{"[R0]", "[R0]"},
-	{"[R1-R12]", "[R1,R2,R3,R4,R5,R6,R7,R8,R9,g,R11,R12]"},
-	{"armCAS64(SB)", "armCAS64(SB)"},
-	{"asmcgocall<>(SB)", "asmcgocall<>(SB)"},
-	{"c+28(FP)", "c+28(FP)"},
-	{"g", "g"},
-	{"gosave<>(SB)", "gosave<>(SB)"},
-	{"retlo+12(FP)", "retlo+12(FP)"},
-	{"runtime·_sfloat2(SB)", "runtime._sfloat2(SB)"},
-	{"·AddUint32(SB)", "\"\".AddUint32(SB)"},
-	{"(R1, R3)", "(R1, R3)"},
-	{"[R0,R1,g,R15", ""}, // Issue 11764 - asm hung parsing ']' missing register lists.
-	{"[):[o-FP", ""},     // Issue 12469 - there was no infinite loop for ARM; these are just sanity checks.
-	{"[):[R0-FP", ""},
-	{"(", ""}, // Issue 12466 - backed up before beginning of line.
-}
-
-var ppc64OperandTests = []operandTest{
-	{"$((1<<63)-1)", "$9223372036854775807"},
-	{"$(-64*1024)", "$-65536"},
-	{"$(1024 * 8)", "$8192"},
-	{"$-1", "$-1"},
-	{"$-24(R4)", "$-24(R4)"},
-	{"$0", "$0"},
-	{"$0(R1)", "$(R1)"},
-	{"$0.5", "$(0.5)"},
-	{"$0x7000", "$28672"},
-	{"$0x88888eef", "$2290650863"},
-	{"$1", "$1"},
-	{"$_main<>(SB)", "$_main<>(SB)"},
-	{"$argframe(FP)", "$argframe(FP)"},
-	{"$runtime·tlsg(SB)", "$runtime.tlsg(SB)"},
-	{"$~3", "$-4"},
-	{"(-288-3*8)(R1)", "-312(R1)"},
-	{"(16)(R7)", "16(R7)"},
-	{"(8)(g)", "8(g)"},
-	{"(CTR)", "(CTR)"},
-	{"(R0)", "(R0)"},
-	{"(R3)", "(R3)"},
-	{"(R4)", "(R4)"},
-	{"(R5)", "(R5)"},
-	{"(R5)(R6*1)", "(R5)(R6*1)"},
-	{"(R5+R6)", "(R5)(R6*1)"}, // Old syntax.
-	{"-1(R4)", "-1(R4)"},
-	{"-1(R5)", "-1(R5)"},
-	{"6(PC)", "6(PC)"},
-	{"CR7", "CR7"},
-	{"CTR", "CTR"},
-	{"VS0", "VS0"},
-	{"VS1", "VS1"},
-	{"VS2", "VS2"},
-	{"VS3", "VS3"},
-	{"VS4", "VS4"},
-	{"VS5", "VS5"},
-	{"VS6", "VS6"},
-	{"VS7", "VS7"},
-	{"VS8", "VS8"},
-	{"VS9", "VS9"},
-	{"VS10", "VS10"},
-	{"VS11", "VS11"},
-	{"VS12", "VS12"},
-	{"VS13", "VS13"},
-	{"VS14", "VS14"},
-	{"VS15", "VS15"},
-	{"VS16", "VS16"},
-	{"VS17", "VS17"},
-	{"VS18", "VS18"},
-	{"VS19", "VS19"},
-	{"VS20", "VS20"},
-	{"VS21", "VS21"},
-	{"VS22", "VS22"},
-	{"VS23", "VS23"},
-	{"VS24", "VS24"},
-	{"VS25", "VS25"},
-	{"VS26", "VS26"},
-	{"VS27", "VS27"},
-	{"VS28", "VS28"},
-	{"VS29", "VS29"},
-	{"VS30", "VS30"},
-	{"VS31", "VS31"},
-	{"VS32", "VS32"},
-	{"VS33", "VS33"},
-	{"VS34", "VS34"},
-	{"VS35", "VS35"},
-	{"VS36", "VS36"},
-	{"VS37", "VS37"},
-	{"VS38", "VS38"},
-	{"VS39", "VS39"},
-	{"VS40", "VS40"},
-	{"VS41", "VS41"},
-	{"VS42", "VS42"},
-	{"VS43", "VS43"},
-	{"VS44", "VS44"},
-	{"VS45", "VS45"},
-	{"VS46", "VS46"},
-	{"VS47", "VS47"},
-	{"VS48", "VS48"},
-	{"VS49", "VS49"},
-	{"VS50", "VS50"},
-	{"VS51", "VS51"},
-	{"VS52", "VS52"},
-	{"VS53", "VS53"},
-	{"VS54", "VS54"},
-	{"VS55", "VS55"},
-	{"VS56", "VS56"},
-	{"VS57", "VS57"},
-	{"VS58", "VS58"},
-	{"VS59", "VS59"},
-	{"VS60", "VS60"},
-	{"VS61", "VS61"},
-	{"VS62", "VS62"},
-	{"VS63", "VS63"},
-	{"V0", "V0"},
-	{"V1", "V1"},
-	{"V2", "V2"},
-	{"V3", "V3"},
-	{"V4", "V4"},
-	{"V5", "V5"},
-	{"V6", "V6"},
-	{"V7", "V7"},
-	{"V8", "V8"},
-	{"V9", "V9"},
-	{"V10", "V10"},
-	{"V11", "V11"},
-	{"V12", "V12"},
-	{"V13", "V13"},
-	{"V14", "V14"},
-	{"V15", "V15"},
-	{"V16", "V16"},
-	{"V17", "V17"},
-	{"V18", "V18"},
-	{"V19", "V19"},
-	{"V20", "V20"},
-	{"V21", "V21"},
-	{"V22", "V22"},
-	{"V23", "V23"},
-	{"V24", "V24"},
-	{"V25", "V25"},
-	{"V26", "V26"},
-	{"V27", "V27"},
-	{"V28", "V28"},
-	{"V29", "V29"},
-	{"V30", "V30"},
-	{"V31", "V31"},
-	{"F14", "F14"},
-	{"F15", "F15"},
-	{"F16", "F16"},
-	{"F17", "F17"},
-	{"F18", "F18"},
-	{"F19", "F19"},
-	{"F20", "F20"},
-	{"F21", "F21"},
-	{"F22", "F22"},
-	{"F23", "F23"},
-	{"F24", "F24"},
-	{"F25", "F25"},
-	{"F26", "F26"},
-	{"F27", "F27"},
-	{"F28", "F28"},
-	{"F29", "F29"},
-	{"F30", "F30"},
-	{"F31", "F31"},
-	{"LR", "LR"},
-	{"R0", "R0"},
-	{"R1", "R1"},
-	{"R11", "R11"},
-	{"R12", "R12"},
-	{"R13", "R13"},
-	{"R14", "R14"},
-	{"R15", "R15"},
-	{"R16", "R16"},
-	{"R17", "R17"},
-	{"R18", "R18"},
-	{"R19", "R19"},
-	{"R2", "R2"},
-	{"R20", "R20"},
-	{"R21", "R21"},
-	{"R22", "R22"},
-	{"R23", "R23"},
-	{"R24", "R24"},
-	{"R25", "R25"},
-	{"R26", "R26"},
-	{"R27", "R27"},
-	{"R28", "R28"},
-	{"R29", "R29"},
-	{"R3", "R3"},
-	{"R31", "R31"},
-	{"R4", "R4"},
-	{"R5", "R5"},
-	{"R6", "R6"},
-	{"R7", "R7"},
-	{"R8", "R8"},
-	{"R9", "R9"},
-	{"SPR(269)", "SPR(269)"},
-	{"a(FP)", "a(FP)"},
-	{"g", "g"},
-	{"ret+8(FP)", "ret+8(FP)"},
-	{"runtime·abort(SB)", "runtime.abort(SB)"},
-	{"·AddUint32(SB)", "\"\".AddUint32(SB)"},
-	{"·trunc(SB)", "\"\".trunc(SB)"},
-	{"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms.
-}
-
-var arm64OperandTests = []operandTest{
-	{"$0", "$0"},
-	{"$0.5", "$(0.5)"},
-	{"0(R26)", "(R26)"},
-	{"0(RSP)", "(RSP)"},
-	{"$1", "$1"},
-	{"$-1", "$-1"},
-	{"$1000", "$1000"},
-	{"$1000000000", "$1000000000"},
-	{"$0x7fff3c000", "$34358935552"},
-	{"$1234", "$1234"},
-	{"$~15", "$-16"},
-	{"$16", "$16"},
-	{"-16(RSP)", "-16(RSP)"},
-	{"16(RSP)", "16(RSP)"},
-	{"1(R1)", "1(R1)"},
-	{"-1(R4)", "-1(R4)"},
-	{"18740(R5)", "18740(R5)"},
-	{"$2", "$2"},
-	{"$-24(R4)", "$-24(R4)"},
-	{"-24(RSP)", "-24(RSP)"},
-	{"$24(RSP)", "$24(RSP)"},
-	{"-32(RSP)", "-32(RSP)"},
-	{"$48", "$48"},
-	{"$(-64*1024)(R7)", "$-65536(R7)"},
-	{"$(8-1)", "$7"},
-	{"a+0(FP)", "a(FP)"},
-	{"a1+8(FP)", "a1+8(FP)"},
-	{"·AddInt32(SB)", `"".AddInt32(SB)`},
-	{"runtime·divWVW(SB)", "runtime.divWVW(SB)"},
-	{"$argframe+0(FP)", "$argframe(FP)"},
-	{"$asmcgocall<>(SB)", "$asmcgocall<>(SB)"},
-	{"EQ", "EQ"},
-	{"F29", "F29"},
-	{"F3", "F3"},
-	{"F30", "F30"},
-	{"g", "g"},
-	{"LR", "R30"},
-	{"(LR)", "(R30)"},
-	{"R0", "R0"},
-	{"R10", "R10"},
-	{"R11", "R11"},
-	{"$4503601774854144.0", "$(4503601774854144.0)"},
-	{"$runtime·badsystemstack(SB)", "$runtime.badsystemstack(SB)"},
-	{"ZR", "ZR"},
-	{"(ZR)", "(ZR)"},
-	{"(R29, RSP)", "(R29, RSP)"},
-	{"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms.
-}
-
-var mips64OperandTests = []operandTest{
-	{"$((1<<63)-1)", "$9223372036854775807"},
-	{"$(-64*1024)", "$-65536"},
-	{"$(1024 * 8)", "$8192"},
-	{"$-1", "$-1"},
-	{"$-24(R4)", "$-24(R4)"},
-	{"$0", "$0"},
-	{"$0(R1)", "$(R1)"},
-	{"$0.5", "$(0.5)"},
-	{"$0x7000", "$28672"},
-	{"$0x88888eef", "$2290650863"},
-	{"$1", "$1"},
-	{"$_main<>(SB)", "$_main<>(SB)"},
-	{"$argframe(FP)", "$argframe(FP)"},
-	{"$~3", "$-4"},
-	{"(-288-3*8)(R1)", "-312(R1)"},
-	{"(16)(R7)", "16(R7)"},
-	{"(8)(g)", "8(g)"},
-	{"(R0)", "(R0)"},
-	{"(R3)", "(R3)"},
-	{"(R4)", "(R4)"},
-	{"(R5)", "(R5)"},
-	{"-1(R4)", "-1(R4)"},
-	{"-1(R5)", "-1(R5)"},
-	{"6(PC)", "6(PC)"},
-	{"F14", "F14"},
-	{"F15", "F15"},
-	{"F16", "F16"},
-	{"F17", "F17"},
-	{"F18", "F18"},
-	{"F19", "F19"},
-	{"F20", "F20"},
-	{"F21", "F21"},
-	{"F22", "F22"},
-	{"F23", "F23"},
-	{"F24", "F24"},
-	{"F25", "F25"},
-	{"F26", "F26"},
-	{"F27", "F27"},
-	{"F28", "F28"},
-	{"F29", "F29"},
-	{"F30", "F30"},
-	{"F31", "F31"},
-	{"R0", "R0"},
-	{"R1", "R1"},
-	{"R11", "R11"},
-	{"R12", "R12"},
-	{"R13", "R13"},
-	{"R14", "R14"},
-	{"R15", "R15"},
-	{"R16", "R16"},
-	{"R17", "R17"},
-	{"R18", "R18"},
-	{"R19", "R19"},
-	{"R2", "R2"},
-	{"R20", "R20"},
-	{"R21", "R21"},
-	{"R22", "R22"},
-	{"R23", "R23"},
-	{"R24", "R24"},
-	{"R25", "R25"},
-	{"R26", "R26"},
-	{"R27", "R27"},
-	{"R29", "R29"},
-	{"R3", "R3"},
-	{"R31", "R31"},
-	{"R4", "R4"},
-	{"R5", "R5"},
-	{"R6", "R6"},
-	{"R7", "R7"},
-	{"R8", "R8"},
-	{"R9", "R9"},
-	{"LO", "LO"},
-	{"a(FP)", "a(FP)"},
-	{"g", "g"},
-	{"RSB", "RSB"},
-	{"ret+8(FP)", "ret+8(FP)"},
-	{"runtime·abort(SB)", "runtime.abort(SB)"},
-	{"·AddUint32(SB)", "\"\".AddUint32(SB)"},
-	{"·trunc(SB)", "\"\".trunc(SB)"},
-	{"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms.
-}
-
-var mipsOperandTests = []operandTest{
-	{"$((1<<63)-1)", "$9223372036854775807"},
-	{"$(-64*1024)", "$-65536"},
-	{"$(1024 * 8)", "$8192"},
-	{"$-1", "$-1"},
-	{"$-24(R4)", "$-24(R4)"},
-	{"$0", "$0"},
-	{"$0(R1)", "$(R1)"},
-	{"$0.5", "$(0.5)"},
-	{"$0x7000", "$28672"},
-	{"$0x88888eef", "$2290650863"},
-	{"$1", "$1"},
-	{"$_main<>(SB)", "$_main<>(SB)"},
-	{"$argframe(FP)", "$argframe(FP)"},
-	{"$~3", "$-4"},
-	{"(-288-3*8)(R1)", "-312(R1)"},
-	{"(16)(R7)", "16(R7)"},
-	{"(8)(g)", "8(g)"},
-	{"(R0)", "(R0)"},
-	{"(R3)", "(R3)"},
-	{"(R4)", "(R4)"},
-	{"(R5)", "(R5)"},
-	{"-1(R4)", "-1(R4)"},
-	{"-1(R5)", "-1(R5)"},
-	{"6(PC)", "6(PC)"},
-	{"F14", "F14"},
-	{"F15", "F15"},
-	{"F16", "F16"},
-	{"F17", "F17"},
-	{"F18", "F18"},
-	{"F19", "F19"},
-	{"F20", "F20"},
-	{"F21", "F21"},
-	{"F22", "F22"},
-	{"F23", "F23"},
-	{"F24", "F24"},
-	{"F25", "F25"},
-	{"F26", "F26"},
-	{"F27", "F27"},
-	{"F28", "F28"},
-	{"F29", "F29"},
-	{"F30", "F30"},
-	{"F31", "F31"},
-	{"R0", "R0"},
-	{"R1", "R1"},
-	{"R11", "R11"},
-	{"R12", "R12"},
-	{"R13", "R13"},
-	{"R14", "R14"},
-	{"R15", "R15"},
-	{"R16", "R16"},
-	{"R17", "R17"},
-	{"R18", "R18"},
-	{"R19", "R19"},
-	{"R2", "R2"},
-	{"R20", "R20"},
-	{"R21", "R21"},
-	{"R22", "R22"},
-	{"R23", "R23"},
-	{"R24", "R24"},
-	{"R25", "R25"},
-	{"R26", "R26"},
-	{"R27", "R27"},
-	{"R29", "R29"},
-	{"R3", "R3"},
-	{"R31", "R31"},
-	{"R4", "R4"},
-	{"R5", "R5"},
-	{"R6", "R6"},
-	{"R7", "R7"},
-	{"R8", "R8"},
-	{"R9", "R9"},
-	{"LO", "LO"},
-	{"a(FP)", "a(FP)"},
-	{"g", "g"},
-	{"ret+8(FP)", "ret+8(FP)"},
-	{"runtime·abort(SB)", "runtime.abort(SB)"},
-	{"·AddUint32(SB)", "\"\".AddUint32(SB)"},
-	{"·trunc(SB)", "\"\".trunc(SB)"},
-	{"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms.
-}
-
-var s390xOperandTests = []operandTest{
-	{"$((1<<63)-1)", "$9223372036854775807"},
-	{"$(-64*1024)", "$-65536"},
-	{"$(1024 * 8)", "$8192"},
-	{"$-1", "$-1"},
-	{"$-24(R4)", "$-24(R4)"},
-	{"$0", "$0"},
-	{"$0(R1)", "$(R1)"},
-	{"$0.5", "$(0.5)"},
-	{"$0x7000", "$28672"},
-	{"$0x88888eef", "$2290650863"},
-	{"$1", "$1"},
-	{"$_main<>(SB)", "$_main<>(SB)"},
-	{"$argframe(FP)", "$argframe(FP)"},
-	{"$~3", "$-4"},
-	{"(-288-3*8)(R1)", "-312(R1)"},
-	{"(16)(R7)", "16(R7)"},
-	{"(8)(g)", "8(g)"},
-	{"(R0)", "(R0)"},
-	{"(R3)", "(R3)"},
-	{"(R4)", "(R4)"},
-	{"(R5)", "(R5)"},
-	{"-1(R4)", "-1(R4)"},
-	{"-1(R5)", "-1(R5)"},
-	{"6(PC)", "6(PC)"},
-	{"R0", "R0"},
-	{"R1", "R1"},
-	{"R2", "R2"},
-	{"R3", "R3"},
-	{"R4", "R4"},
-	{"R5", "R5"},
-	{"R6", "R6"},
-	{"R7", "R7"},
-	{"R8", "R8"},
-	{"R9", "R9"},
-	{"R10", "R10"},
-	{"R11", "R11"},
-	{"R12", "R12"},
-	// {"R13", "R13"}, R13 is g
-	{"R14", "R14"},
-	{"R15", "R15"},
-	{"F0", "F0"},
-	{"F1", "F1"},
-	{"F2", "F2"},
-	{"F3", "F3"},
-	{"F4", "F4"},
-	{"F5", "F5"},
-	{"F6", "F6"},
-	{"F7", "F7"},
-	{"F8", "F8"},
-	{"F9", "F9"},
-	{"F10", "F10"},
-	{"F11", "F11"},
-	{"F12", "F12"},
-	{"F13", "F13"},
-	{"F14", "F14"},
-	{"F15", "F15"},
-	{"V0", "V0"},
-	{"V1", "V1"},
-	{"V2", "V2"},
-	{"V3", "V3"},
-	{"V4", "V4"},
-	{"V5", "V5"},
-	{"V6", "V6"},
-	{"V7", "V7"},
-	{"V8", "V8"},
-	{"V9", "V9"},
-	{"V10", "V10"},
-	{"V11", "V11"},
-	{"V12", "V12"},
-	{"V13", "V13"},
-	{"V14", "V14"},
-	{"V15", "V15"},
-	{"V16", "V16"},
-	{"V17", "V17"},
-	{"V18", "V18"},
-	{"V19", "V19"},
-	{"V20", "V20"},
-	{"V21", "V21"},
-	{"V22", "V22"},
-	{"V23", "V23"},
-	{"V24", "V24"},
-	{"V25", "V25"},
-	{"V26", "V26"},
-	{"V27", "V27"},
-	{"V28", "V28"},
-	{"V29", "V29"},
-	{"V30", "V30"},
-	{"V31", "V31"},
-	{"a(FP)", "a(FP)"},
-	{"g", "g"},
-	{"ret+8(FP)", "ret+8(FP)"},
-	{"runtime·abort(SB)", "runtime.abort(SB)"},
-	{"·AddUint32(SB)", "\"\".AddUint32(SB)"},
-	{"·trunc(SB)", "\"\".trunc(SB)"},
-	{"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms.
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/asm/parse.go b/pkg/bootstrap/src/bootstrap/cmd/asm/internal/asm/parse.go
deleted file mode 100644
index 287b1fa..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/asm/parse.go
+++ /dev/null
@@ -1,1017 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/asm/parse.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/asm/parse.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package asm implements the parser and instruction generator for the assembler.
-// TODO: Split apart?
-package asm
-
-import (
-	"fmt"
-	"io"
-	"log"
-	"os"
-	"strconv"
-	"text/scanner"
-	"unicode/utf8"
-
-	"bootstrap/cmd/asm/internal/arch"
-	"bootstrap/cmd/asm/internal/flags"
-	"bootstrap/cmd/asm/internal/lex"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-)
-
-type Parser struct {
-	lex           lex.TokenReader
-	lineNum       int   // Line number in source file.
-	histLineNum   int32 // Cumulative line number across source files.
-	errorLine     int32 // (Cumulative) line number of last error.
-	errorCount    int   // Number of errors.
-	pc            int64 // virtual PC; count of Progs; doesn't advance for GLOBL or DATA.
-	input         []lex.Token
-	inputPos      int
-	pendingLabels []string // Labels to attach to next instruction.
-	labels        map[string]*obj.Prog
-	toPatch       []Patch
-	addr          []obj.Addr
-	arch          *arch.Arch
-	ctxt          *obj.Link
-	firstProg     *obj.Prog
-	lastProg      *obj.Prog
-	dataAddr      map[string]int64 // Most recent address for DATA for this symbol.
-	isJump        bool             // Instruction being assembled is a jump.
-	errorWriter   io.Writer
-}
-
-type Patch struct {
-	prog  *obj.Prog
-	label string
-}
-
-func NewParser(ctxt *obj.Link, ar *arch.Arch, lexer lex.TokenReader) *Parser {
-	return &Parser{
-		ctxt:        ctxt,
-		arch:        ar,
-		lex:         lexer,
-		labels:      make(map[string]*obj.Prog),
-		dataAddr:    make(map[string]int64),
-		errorWriter: os.Stderr,
-	}
-}
-
-// panicOnError is enable when testing to abort execution on the first error
-// and turn it into a recoverable panic.
-var panicOnError bool
-
-func (p *Parser) errorf(format string, args ...interface{}) {
-	if panicOnError {
-		panic(fmt.Errorf(format, args...))
-	}
-	if p.histLineNum == p.errorLine {
-		// Only one error per line.
-		return
-	}
-	p.errorLine = p.histLineNum
-	if p.lex != nil {
-		// Put file and line information on head of message.
-		format = "%s:%d: " + format + "\n"
-		args = append([]interface{}{p.lex.File(), p.lineNum}, args...)
-	}
-	fmt.Fprintf(p.errorWriter, format, args...)
-	p.errorCount++
-	if p.errorCount > 10 && !*flags.AllErrors {
-		log.Fatal("too many errors")
-	}
-}
-
-func (p *Parser) Parse() (*obj.Prog, bool) {
-	for p.line() {
-	}
-	if p.errorCount > 0 {
-		return nil, false
-	}
-	p.patch()
-	return p.firstProg, true
-}
-
-// WORD [ arg {, arg} ] (';' | '\n')
-func (p *Parser) line() bool {
-	// Skip newlines.
-	var tok lex.ScanToken
-	for {
-		tok = p.lex.Next()
-		// We save the line number here so error messages from this instruction
-		// are labeled with this line. Otherwise we complain after we've absorbed
-		// the terminating newline and the line numbers are off by one in errors.
-		p.lineNum = p.lex.Line()
-		p.histLineNum = lex.HistLine()
-		switch tok {
-		case '\n', ';':
-			continue
-		case scanner.EOF:
-			return false
-		}
-		break
-	}
-	// First item must be an identifier.
-	if tok != scanner.Ident {
-		p.errorf("expected identifier, found %q", p.lex.Text())
-		return false // Might as well stop now.
-	}
-	word := p.lex.Text()
-	var cond string
-	operands := make([][]lex.Token, 0, 3)
-	// Zero or more comma-separated operands, one per loop.
-	nesting := 0
-	colon := -1
-	for tok != '\n' && tok != ';' {
-		// Process one operand.
-		items := make([]lex.Token, 0, 3)
-		for {
-			tok = p.lex.Next()
-			if len(operands) == 0 && len(items) == 0 {
-				if p.arch.InFamily(sys.ARM, sys.ARM64) && tok == '.' {
-					// ARM conditionals.
-					tok = p.lex.Next()
-					str := p.lex.Text()
-					if tok != scanner.Ident {
-						p.errorf("ARM condition expected identifier, found %s", str)
-					}
-					cond = cond + "." + str
-					continue
-				}
-				if tok == ':' {
-					// Labels.
-					p.pendingLabels = append(p.pendingLabels, word)
-					return true
-				}
-			}
-			if tok == scanner.EOF {
-				p.errorf("unexpected EOF")
-				return false
-			}
-			// Split operands on comma. Also, the old syntax on x86 for a "register pair"
-			// was AX:DX, for which the new syntax is DX, AX. Note the reordering.
-			if tok == '\n' || tok == ';' || (nesting == 0 && (tok == ',' || tok == ':')) {
-				if tok == ':' {
-					// Remember this location so we can swap the operands below.
-					if colon >= 0 {
-						p.errorf("invalid ':' in operand")
-						return true
-					}
-					colon = len(operands)
-				}
-				break
-			}
-			if tok == '(' || tok == '[' {
-				nesting++
-			}
-			if tok == ')' || tok == ']' {
-				nesting--
-			}
-			items = append(items, lex.Make(tok, p.lex.Text()))
-		}
-		if len(items) > 0 {
-			operands = append(operands, items)
-			if colon >= 0 && len(operands) == colon+2 {
-				// AX:DX becomes DX, AX.
-				operands[colon], operands[colon+1] = operands[colon+1], operands[colon]
-				colon = -1
-			}
-		} else if len(operands) > 0 || tok == ',' || colon >= 0 {
-			// Had a separator with nothing after.
-			p.errorf("missing operand")
-		}
-	}
-	if p.pseudo(word, operands) {
-		return true
-	}
-	i, present := p.arch.Instructions[word]
-	if present {
-		p.instruction(i, word, cond, operands)
-		return true
-	}
-	p.errorf("unrecognized instruction %q", word)
-	return true
-}
-
-func (p *Parser) instruction(op obj.As, word, cond string, operands [][]lex.Token) {
-	p.addr = p.addr[0:0]
-	p.isJump = p.arch.IsJump(word)
-	for _, op := range operands {
-		addr := p.address(op)
-		if !p.isJump && addr.Reg < 0 { // Jumps refer to PC, a pseudo.
-			p.errorf("illegal use of pseudo-register in %s", word)
-		}
-		p.addr = append(p.addr, addr)
-	}
-	if p.isJump {
-		p.asmJump(op, cond, p.addr)
-		return
-	}
-	p.asmInstruction(op, cond, p.addr)
-}
-
-func (p *Parser) pseudo(word string, operands [][]lex.Token) bool {
-	switch word {
-	case "DATA":
-		p.asmData(word, operands)
-	case "FUNCDATA":
-		p.asmFuncData(word, operands)
-	case "GLOBL":
-		p.asmGlobl(word, operands)
-	case "PCDATA":
-		p.asmPCData(word, operands)
-	case "TEXT":
-		p.asmText(word, operands)
-	default:
-		return false
-	}
-	return true
-}
-
-func (p *Parser) start(operand []lex.Token) {
-	p.input = operand
-	p.inputPos = 0
-}
-
-// address parses the operand into a link address structure.
-func (p *Parser) address(operand []lex.Token) obj.Addr {
-	p.start(operand)
-	addr := obj.Addr{}
-	p.operand(&addr)
-	return addr
-}
-
-// parseScale converts a decimal string into a valid scale factor.
-func (p *Parser) parseScale(s string) int8 {
-	switch s {
-	case "1", "2", "4", "8":
-		return int8(s[0] - '0')
-	}
-	p.errorf("bad scale: %s", s)
-	return 0
-}
-
-// operand parses a general operand and stores the result in *a.
-func (p *Parser) operand(a *obj.Addr) bool {
-	//fmt.Printf("Operand: %v\n", p.input)
-	if len(p.input) == 0 {
-		p.errorf("empty operand: cannot happen")
-		return false
-	}
-	// General address (with a few exceptions) looks like
-	//	$sym±offset(SB)(reg)(index*scale)
-	// Exceptions are:
-	//
-	//	R1
-	//	offset
-	//	$offset
-	// Every piece is optional, so we scan left to right and what
-	// we discover tells us where we are.
-
-	// Prefix: $.
-	var prefix rune
-	switch tok := p.peek(); tok {
-	case '$', '*':
-		prefix = rune(tok)
-		p.next()
-	}
-
-	// Symbol: sym±offset(SB)
-	tok := p.next()
-	name := tok.String()
-	if tok.ScanToken == scanner.Ident && !p.atStartOfRegister(name) {
-		// We have a symbol. Parse $sym±offset(symkind)
-		p.symbolReference(a, name, prefix)
-		// fmt.Printf("SYM %s\n", obj.Dconv(&emptyProg, 0, a))
-		if p.peek() == scanner.EOF {
-			return true
-		}
-	}
-
-	// Special register list syntax for arm: [R1,R3-R7]
-	if tok.ScanToken == '[' {
-		if prefix != 0 {
-			p.errorf("illegal use of register list")
-		}
-		p.registerList(a)
-		p.expectOperandEnd()
-		return true
-	}
-
-	// Register: R1
-	if tok.ScanToken == scanner.Ident && p.atStartOfRegister(name) {
-		if p.atRegisterShift() {
-			// ARM shifted register such as R1<<R2 or R1>>2.
-			a.Type = obj.TYPE_SHIFT
-			a.Offset = p.registerShift(tok.String(), prefix)
-			if p.peek() == '(' {
-				// Can only be a literal register here.
-				p.next()
-				tok := p.next()
-				name := tok.String()
-				if !p.atStartOfRegister(name) {
-					p.errorf("expected register; found %s", name)
-				}
-				a.Reg, _ = p.registerReference(name)
-				p.get(')')
-			}
-		} else if r1, r2, scale, ok := p.register(tok.String(), prefix); ok {
-			if scale != 0 {
-				p.errorf("expected simple register reference")
-			}
-			a.Type = obj.TYPE_REG
-			a.Reg = r1
-			if r2 != 0 {
-				// Form is R1:R2. It is on RHS and the second register
-				// needs to go into the LHS.
-				panic("cannot happen (Addr.Reg2)")
-			}
-		}
-		// fmt.Printf("REG %s\n", obj.Dconv(&emptyProg, 0, a))
-		p.expectOperandEnd()
-		return true
-	}
-
-	// Constant.
-	haveConstant := false
-	switch tok.ScanToken {
-	case scanner.Int, scanner.Float, scanner.String, scanner.Char, '+', '-', '~':
-		haveConstant = true
-	case '(':
-		// Could be parenthesized expression or (R). Must be something, though.
-		tok := p.next()
-		if tok.ScanToken == scanner.EOF {
-			p.errorf("missing right parenthesis")
-			return false
-		}
-		rname := tok.String()
-		p.back()
-		haveConstant = !p.atStartOfRegister(rname)
-		if !haveConstant {
-			p.back() // Put back the '('.
-		}
-	}
-	if haveConstant {
-		p.back()
-		if p.have(scanner.Float) {
-			if prefix != '$' {
-				p.errorf("floating-point constant must be an immediate")
-			}
-			a.Type = obj.TYPE_FCONST
-			a.Val = p.floatExpr()
-			// fmt.Printf("FCONST %s\n", obj.Dconv(&emptyProg, 0, a))
-			p.expectOperandEnd()
-			return true
-		}
-		if p.have(scanner.String) {
-			if prefix != '$' {
-				p.errorf("string constant must be an immediate")
-				return false
-			}
-			str, err := strconv.Unquote(p.get(scanner.String).String())
-			if err != nil {
-				p.errorf("string parse error: %s", err)
-			}
-			a.Type = obj.TYPE_SCONST
-			a.Val = str
-			// fmt.Printf("SCONST %s\n", obj.Dconv(&emptyProg, 0, a))
-			p.expectOperandEnd()
-			return true
-		}
-		a.Offset = int64(p.expr())
-		if p.peek() != '(' {
-			switch prefix {
-			case '$':
-				a.Type = obj.TYPE_CONST
-			case '*':
-				a.Type = obj.TYPE_INDIR // Can appear but is illegal, will be rejected by the linker.
-			default:
-				a.Type = obj.TYPE_MEM
-			}
-			// fmt.Printf("CONST %d %s\n", a.Offset, obj.Dconv(&emptyProg, 0, a))
-			p.expectOperandEnd()
-			return true
-		}
-		// fmt.Printf("offset %d \n", a.Offset)
-	}
-
-	// Register indirection: (reg) or (index*scale). We are on the opening paren.
-	p.registerIndirect(a, prefix)
-	// fmt.Printf("DONE %s\n", p.arch.Dconv(&emptyProg, 0, a))
-
-	p.expectOperandEnd()
-	return true
-}
-
-// atStartOfRegister reports whether the parser is at the start of a register definition.
-func (p *Parser) atStartOfRegister(name string) bool {
-	// Simple register: R10.
-	_, present := p.arch.Register[name]
-	if present {
-		return true
-	}
-	// Parenthesized register: R(10).
-	return p.arch.RegisterPrefix[name] && p.peek() == '('
-}
-
-// atRegisterShift reports whether we are at the start of an ARM shifted register.
-// We have consumed the register or R prefix.
-func (p *Parser) atRegisterShift() bool {
-	// ARM only.
-	if p.arch.Family != sys.ARM {
-		return false
-	}
-	// R1<<...
-	if lex.IsRegisterShift(p.peek()) {
-		return true
-	}
-	// R(1)<<...   Ugly check. TODO: Rethink how we handle ARM register shifts to be
-	// less special.
-	if p.peek() != '(' || len(p.input)-p.inputPos < 4 {
-		return false
-	}
-	return p.at('(', scanner.Int, ')') && lex.IsRegisterShift(p.input[p.inputPos+3].ScanToken)
-}
-
-// registerReference parses a register given either the name, R10, or a parenthesized form, SPR(10).
-func (p *Parser) registerReference(name string) (int16, bool) {
-	r, present := p.arch.Register[name]
-	if present {
-		return r, true
-	}
-	if !p.arch.RegisterPrefix[name] {
-		p.errorf("expected register; found %s", name)
-		return 0, false
-	}
-	p.get('(')
-	tok := p.get(scanner.Int)
-	num, err := strconv.ParseInt(tok.String(), 10, 16)
-	p.get(')')
-	if err != nil {
-		p.errorf("parsing register list: %s", err)
-		return 0, false
-	}
-	r, ok := p.arch.RegisterNumber(name, int16(num))
-	if !ok {
-		p.errorf("illegal register %s(%d)", name, r)
-		return 0, false
-	}
-	return r, true
-}
-
-// register parses a full register reference where there is no symbol present (as in 4(R0) or R(10) but not sym(SB))
-// including forms involving multiple registers such as R1:R2.
-func (p *Parser) register(name string, prefix rune) (r1, r2 int16, scale int8, ok bool) {
-	// R1 or R(1) R1:R2 R1,R2 R1+R2, or R1*scale.
-	r1, ok = p.registerReference(name)
-	if !ok {
-		return
-	}
-	if prefix != 0 && prefix != '*' { // *AX is OK.
-		p.errorf("prefix %c not allowed for register: %c%s", prefix, prefix, name)
-	}
-	c := p.peek()
-	if c == ':' || c == ',' || c == '+' {
-		// 2nd register; syntax (R1+R2) etc. No two architectures agree.
-		// Check the architectures match the syntax.
-		switch p.next().ScanToken {
-		case ',':
-			if !p.arch.InFamily(sys.ARM, sys.ARM64) {
-				p.errorf("(register,register) not supported on this architecture")
-				return
-			}
-		case '+':
-			if p.arch.Family != sys.PPC64 {
-				p.errorf("(register+register) not supported on this architecture")
-				return
-			}
-		}
-		name := p.next().String()
-		r2, ok = p.registerReference(name)
-		if !ok {
-			return
-		}
-	}
-	if p.peek() == '*' {
-		// Scale
-		p.next()
-		scale = p.parseScale(p.next().String())
-	}
-	return r1, r2, scale, true
-}
-
-// registerShift parses an ARM shifted register reference and returns the encoded representation.
-// There is known to be a register (current token) and a shift operator (peeked token).
-func (p *Parser) registerShift(name string, prefix rune) int64 {
-	if prefix != 0 {
-		p.errorf("prefix %c not allowed for shifted register: $%s", prefix, name)
-	}
-	// R1 op R2 or r1 op constant.
-	// op is:
-	//	"<<" == 0
-	//	">>" == 1
-	//	"->" == 2
-	//	"@>" == 3
-	r1, ok := p.registerReference(name)
-	if !ok {
-		return 0
-	}
-	var op int16
-	switch p.next().ScanToken {
-	case lex.LSH:
-		op = 0
-	case lex.RSH:
-		op = 1
-	case lex.ARR:
-		op = 2
-	case lex.ROT:
-		op = 3
-	}
-	tok := p.next()
-	str := tok.String()
-	var count int16
-	switch tok.ScanToken {
-	case scanner.Ident:
-		r2, ok := p.registerReference(str)
-		if !ok {
-			p.errorf("rhs of shift must be register or integer: %s", str)
-		}
-		count = (r2&15)<<8 | 1<<4
-	case scanner.Int, '(':
-		p.back()
-		x := int64(p.expr())
-		if x >= 32 {
-			p.errorf("register shift count too large: %s", str)
-		}
-		count = int16((x & 31) << 7)
-	default:
-		p.errorf("unexpected %s in register shift", tok.String())
-	}
-	return int64((r1 & 15) | op<<5 | count)
-}
-
-// symbolReference parses a symbol that is known not to be a register.
-func (p *Parser) symbolReference(a *obj.Addr, name string, prefix rune) {
-	// Identifier is a name.
-	switch prefix {
-	case 0:
-		a.Type = obj.TYPE_MEM
-	case '$':
-		a.Type = obj.TYPE_ADDR
-	case '*':
-		a.Type = obj.TYPE_INDIR
-	}
-	// Weirdness with statics: Might now have "<>".
-	isStatic := 0 // TODO: Really a boolean, but Linklookup wants a "version" integer.
-	if p.peek() == '<' {
-		isStatic = 1
-		p.next()
-		p.get('>')
-	}
-	if p.peek() == '+' || p.peek() == '-' {
-		a.Offset = int64(p.expr())
-	}
-	a.Sym = obj.Linklookup(p.ctxt, name, isStatic)
-	if p.peek() == scanner.EOF {
-		if prefix == 0 && p.isJump {
-			// Symbols without prefix or suffix are jump labels.
-			return
-		}
-		p.errorf("illegal or missing addressing mode for symbol %s", name)
-		return
-	}
-	// Expect (SB), (FP), (PC), or (SP)
-	p.get('(')
-	reg := p.get(scanner.Ident).String()
-	p.get(')')
-	p.setPseudoRegister(a, reg, isStatic != 0, prefix)
-}
-
-// setPseudoRegister sets the NAME field of addr for a pseudo-register reference such as (SB).
-func (p *Parser) setPseudoRegister(addr *obj.Addr, reg string, isStatic bool, prefix rune) {
-	if addr.Reg != 0 {
-		p.errorf("internal error: reg %s already set in pseudo", reg)
-	}
-	switch reg {
-	case "FP":
-		addr.Name = obj.NAME_PARAM
-	case "PC":
-		if prefix != 0 {
-			p.errorf("illegal addressing mode for PC")
-		}
-		addr.Type = obj.TYPE_BRANCH // We set the type and leave NAME untouched. See asmJump.
-	case "SB":
-		addr.Name = obj.NAME_EXTERN
-		if isStatic {
-			addr.Name = obj.NAME_STATIC
-		}
-	case "SP":
-		addr.Name = obj.NAME_AUTO // The pseudo-stack.
-	default:
-		p.errorf("expected pseudo-register; found %s", reg)
-	}
-	if prefix == '$' {
-		addr.Type = obj.TYPE_ADDR
-	}
-}
-
-// registerIndirect parses the general form of a register indirection.
-// It is can be (R1), (R2*scale), or (R1)(R2*scale) where R1 may be a simple
-// register or register pair R:R or (R, R) or (R+R).
-// Or it might be a pseudo-indirection like (FP).
-// We are sitting on the opening parenthesis.
-func (p *Parser) registerIndirect(a *obj.Addr, prefix rune) {
-	p.get('(')
-	tok := p.next()
-	name := tok.String()
-	r1, r2, scale, ok := p.register(name, 0)
-	if !ok {
-		p.errorf("indirect through non-register %s", tok)
-	}
-	p.get(')')
-	a.Type = obj.TYPE_MEM
-	if r1 < 0 {
-		// Pseudo-register reference.
-		if r2 != 0 {
-			p.errorf("cannot use pseudo-register in pair")
-			return
-		}
-		// For SB, SP, and FP, there must be a name here. 0(FP) is not legal.
-		if name != "PC" && a.Name == obj.NAME_NONE {
-			p.errorf("cannot reference %s without a symbol", name)
-		}
-		p.setPseudoRegister(a, name, false, prefix)
-		return
-	}
-	a.Reg = r1
-	if r2 != 0 {
-		// TODO: Consistency in the encoding would be nice here.
-		if p.arch.InFamily(sys.ARM, sys.ARM64) {
-			// Special form
-			// ARM: destination register pair (R1, R2).
-			// ARM64: register pair (R1, R2) for LDP/STP.
-			if prefix != 0 || scale != 0 {
-				p.errorf("illegal address mode for register pair")
-				return
-			}
-			a.Type = obj.TYPE_REGREG
-			a.Offset = int64(r2)
-			// Nothing may follow
-			return
-		}
-		if p.arch.Family == sys.PPC64 {
-			// Special form for PPC64: (R1+R2); alias for (R1)(R2*1).
-			if prefix != 0 || scale != 0 {
-				p.errorf("illegal address mode for register+register")
-				return
-			}
-			a.Type = obj.TYPE_MEM
-			a.Scale = 1
-			a.Index = r2
-			// Nothing may follow.
-			return
-		}
-	}
-	if r2 != 0 {
-		p.errorf("indirect through register pair")
-	}
-	if prefix == '$' {
-		a.Type = obj.TYPE_ADDR
-	}
-	if r1 == arch.RPC && prefix != 0 {
-		p.errorf("illegal addressing mode for PC")
-	}
-	if scale == 0 && p.peek() == '(' {
-		// General form (R)(R*scale).
-		p.next()
-		tok := p.next()
-		r1, r2, scale, ok = p.register(tok.String(), 0)
-		if !ok {
-			p.errorf("indirect through non-register %s", tok)
-		}
-		if r2 != 0 {
-			p.errorf("unimplemented two-register form")
-		}
-		a.Index = r1
-		a.Scale = int16(scale)
-		p.get(')')
-	} else if scale != 0 {
-		// First (R) was missing, all we have is (R*scale).
-		a.Reg = 0
-		a.Index = r1
-		a.Scale = int16(scale)
-	}
-}
-
-// registerList parses an ARM register list expression, a list of registers in [].
-// There may be comma-separated ranges or individual registers, as in
-// [R1,R3-R5]. Only R0 through R15 may appear.
-// The opening bracket has been consumed.
-func (p *Parser) registerList(a *obj.Addr) {
-	// One range per loop.
-	const maxReg = 16
-	var bits uint16
-ListLoop:
-	for {
-		tok := p.next()
-		switch tok.ScanToken {
-		case ']':
-			break ListLoop
-		case scanner.EOF:
-			p.errorf("missing ']' in register list")
-			return
-		}
-		// Parse the upper and lower bounds.
-		lo := p.registerNumber(tok.String())
-		hi := lo
-		if p.peek() == '-' {
-			p.next()
-			hi = p.registerNumber(p.next().String())
-		}
-		if hi < lo {
-			lo, hi = hi, lo
-		}
-		// Check there are no duplicates in the register list.
-		for i := 0; lo <= hi && i < maxReg; i++ {
-			if bits&(1<<lo) != 0 {
-				p.errorf("register R%d already in list", lo)
-			}
-			bits |= 1 << lo
-			lo++
-		}
-		if p.peek() != ']' {
-			p.get(',')
-		}
-	}
-	a.Type = obj.TYPE_REGLIST
-	a.Offset = int64(bits)
-}
-
-// register number is ARM-specific. It returns the number of the specified register.
-func (p *Parser) registerNumber(name string) uint16 {
-	if p.arch.Family == sys.ARM && name == "g" {
-		return 10
-	}
-	if name[0] != 'R' {
-		p.errorf("expected g or R0 through R15; found %s", name)
-		return 0
-	}
-	r, ok := p.registerReference(name)
-	if !ok {
-		return 0
-	}
-	reg := r - p.arch.Register["R0"]
-	if reg < 0 {
-		// Could happen for an architecture having other registers prefixed by R
-		p.errorf("expected g or R0 through R15; found %s", name)
-		return 0
-	}
-	return uint16(reg)
-}
-
-// Note: There are two changes in the expression handling here
-// compared to the old yacc/C implementations. Neither has
-// much practical consequence because the expressions we
-// see in assembly code are simple, but for the record:
-//
-// 1) Evaluation uses uint64; the old one used int64.
-// 2) Precedence uses Go rules not C rules.
-
-// expr = term | term ('+' | '-' | '|' | '^') term.
-func (p *Parser) expr() uint64 {
-	value := p.term()
-	for {
-		switch p.peek() {
-		case '+':
-			p.next()
-			value += p.term()
-		case '-':
-			p.next()
-			value -= p.term()
-		case '|':
-			p.next()
-			value |= p.term()
-		case '^':
-			p.next()
-			value ^= p.term()
-		default:
-			return value
-		}
-	}
-}
-
-// floatExpr = fconst | '-' floatExpr | '+' floatExpr | '(' floatExpr ')'
-func (p *Parser) floatExpr() float64 {
-	tok := p.next()
-	switch tok.ScanToken {
-	case '(':
-		v := p.floatExpr()
-		if p.next().ScanToken != ')' {
-			p.errorf("missing closing paren")
-		}
-		return v
-	case '+':
-		return +p.floatExpr()
-	case '-':
-		return -p.floatExpr()
-	case scanner.Float:
-		return p.atof(tok.String())
-	}
-	p.errorf("unexpected %s evaluating float expression", tok)
-	return 0
-}
-
-// term = factor | factor ('*' | '/' | '%' | '>>' | '<<' | '&') factor
-func (p *Parser) term() uint64 {
-	value := p.factor()
-	for {
-		switch p.peek() {
-		case '*':
-			p.next()
-			value *= p.factor()
-		case '/':
-			p.next()
-			if int64(value) < 0 {
-				p.errorf("divide of value with high bit set")
-			}
-			divisor := p.factor()
-			if divisor == 0 {
-				p.errorf("division by zero")
-			} else {
-				value /= divisor
-			}
-		case '%':
-			p.next()
-			divisor := p.factor()
-			if int64(value) < 0 {
-				p.errorf("modulo of value with high bit set")
-			}
-			if divisor == 0 {
-				p.errorf("modulo by zero")
-			} else {
-				value %= divisor
-			}
-		case lex.LSH:
-			p.next()
-			shift := p.factor()
-			if int64(shift) < 0 {
-				p.errorf("negative left shift count")
-			}
-			return value << shift
-		case lex.RSH:
-			p.next()
-			shift := p.term()
-			if int64(shift) < 0 {
-				p.errorf("negative right shift count")
-			}
-			if int64(value) < 0 {
-				p.errorf("right shift of value with high bit set")
-			}
-			value >>= shift
-		case '&':
-			p.next()
-			value &= p.factor()
-		default:
-			return value
-		}
-	}
-}
-
-// factor = const | '+' factor | '-' factor | '~' factor | '(' expr ')'
-func (p *Parser) factor() uint64 {
-	tok := p.next()
-	switch tok.ScanToken {
-	case scanner.Int:
-		return p.atoi(tok.String())
-	case scanner.Char:
-		str, err := strconv.Unquote(tok.String())
-		if err != nil {
-			p.errorf("%s", err)
-		}
-		r, w := utf8.DecodeRuneInString(str)
-		if w == 1 && r == utf8.RuneError {
-			p.errorf("illegal UTF-8 encoding for character constant")
-		}
-		return uint64(r)
-	case '+':
-		return +p.factor()
-	case '-':
-		return -p.factor()
-	case '~':
-		return ^p.factor()
-	case '(':
-		v := p.expr()
-		if p.next().ScanToken != ')' {
-			p.errorf("missing closing paren")
-		}
-		return v
-	}
-	p.errorf("unexpected %s evaluating expression", tok)
-	return 0
-}
-
-// positiveAtoi returns an int64 that must be >= 0.
-func (p *Parser) positiveAtoi(str string) int64 {
-	value, err := strconv.ParseInt(str, 0, 64)
-	if err != nil {
-		p.errorf("%s", err)
-	}
-	if value < 0 {
-		p.errorf("%s overflows int64", str)
-	}
-	return value
-}
-
-func (p *Parser) atoi(str string) uint64 {
-	value, err := strconv.ParseUint(str, 0, 64)
-	if err != nil {
-		p.errorf("%s", err)
-	}
-	return value
-}
-
-func (p *Parser) atof(str string) float64 {
-	value, err := strconv.ParseFloat(str, 64)
-	if err != nil {
-		p.errorf("%s", err)
-	}
-	return value
-}
-
-// EOF represents the end of input.
-var EOF = lex.Make(scanner.EOF, "EOF")
-
-func (p *Parser) next() lex.Token {
-	if !p.more() {
-		return EOF
-	}
-	tok := p.input[p.inputPos]
-	p.inputPos++
-	return tok
-}
-
-func (p *Parser) back() {
-	if p.inputPos == 0 {
-		p.errorf("internal error: backing up before BOL")
-	} else {
-		p.inputPos--
-	}
-}
-
-func (p *Parser) peek() lex.ScanToken {
-	if p.more() {
-		return p.input[p.inputPos].ScanToken
-	}
-	return scanner.EOF
-}
-
-func (p *Parser) more() bool {
-	return p.inputPos < len(p.input)
-}
-
-// get verifies that the next item has the expected type and returns it.
-func (p *Parser) get(expected lex.ScanToken) lex.Token {
-	p.expect(expected, expected.String())
-	return p.next()
-}
-
-// expectOperandEnd verifies that the parsing state is properly at the end of an operand.
-func (p *Parser) expectOperandEnd() {
-	p.expect(scanner.EOF, "end of operand")
-}
-
-// expect verifies that the next item has the expected type. It does not consume it.
-func (p *Parser) expect(expectedToken lex.ScanToken, expectedMessage string) {
-	if p.peek() != expectedToken {
-		p.errorf("expected %s, found %s", expectedMessage, p.next())
-	}
-}
-
-// have reports whether the remaining tokens (including the current one) contain the specified token.
-func (p *Parser) have(token lex.ScanToken) bool {
-	for i := p.inputPos; i < len(p.input); i++ {
-		if p.input[i].ScanToken == token {
-			return true
-		}
-	}
-	return false
-}
-
-// at reports whether the next tokens are as requested.
-func (p *Parser) at(next ...lex.ScanToken) bool {
-	if len(p.input)-p.inputPos < len(next) {
-		return false
-	}
-	for i, r := range next {
-		if p.input[p.inputPos+i].ScanToken != r {
-			return false
-		}
-	}
-	return true
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/asm/pseudo_test.go b/pkg/bootstrap/src/bootstrap/cmd/asm/internal/asm/pseudo_test.go
deleted file mode 100644
index df37464..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/asm/pseudo_test.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/asm/pseudo_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/asm/pseudo_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package asm
-
-import (
-	"bytes"
-	"strings"
-	"testing"
-
-	"bootstrap/cmd/asm/internal/lex"
-)
-
-func tokenize(s string) [][]lex.Token {
-	res := [][]lex.Token{}
-	if len(s) == 0 {
-		return res
-	}
-	for _, o := range strings.Split(s, ",") {
-		res = append(res, lex.Tokenize(o))
-	}
-	return res
-}
-
-func TestErroneous(t *testing.T) {
-
-	tests := []struct {
-		pseudo   string
-		operands string
-		expected string
-	}{
-		{"TEXT", "", "expect two or three operands for TEXT"},
-		{"TEXT", "%", "expect two or three operands for TEXT"},
-		{"TEXT", "1, 1", "TEXT symbol \"<erroneous symbol>\" must be a symbol(SB)"},
-		{"TEXT", "$\"foo\", 0, $1", "TEXT symbol \"<erroneous symbol>\" must be a symbol(SB)"},
-		{"TEXT", "$0É:0, 0, $1", "expected end of operand, found É"}, // Issue #12467.
-		{"TEXT", "$:0:(SB, 0, $1", "expected '(', found 0"},          // Issue 12468.
-		{"FUNCDATA", "", "expect two operands for FUNCDATA"},
-		{"FUNCDATA", "(SB ", "expect two operands for FUNCDATA"},
-		{"DATA", "", "expect two operands for DATA"},
-		{"DATA", "0", "expect two operands for DATA"},
-		{"DATA", "(0), 1", "expect /size for DATA argument"},
-		{"GLOBL", "", "expect two or three operands for GLOBL"},
-		{"GLOBL", "0,1", "GLOBL symbol \"<erroneous symbol>\" must be a symbol(SB)"},
-		{"PCDATA", "", "expect two operands for PCDATA"},
-		{"PCDATA", "1", "expect two operands for PCDATA"},
-	}
-
-	// Note these errors should be independent of the architecture.
-	// Just run the test with amd64.
-	parser := newParser("amd64")
-	var buf bytes.Buffer
-	parser.errorWriter = &buf
-
-	for _, test := range tests {
-		parser.errorCount = 0
-		parser.lineNum++
-		parser.histLineNum++
-		if !parser.pseudo(test.pseudo, tokenize(test.operands)) {
-			t.Fatalf("Wrong pseudo-instruction: %s", test.pseudo)
-		}
-		errorLine := buf.String()
-		if test.expected != errorLine {
-			t.Errorf("Unexpected error %q; expected %q", errorLine, test.expected)
-		}
-		buf.Reset()
-	}
-
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/flags/flags.go b/pkg/bootstrap/src/bootstrap/cmd/asm/internal/flags/flags.go
deleted file mode 100644
index 0c32024..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/flags/flags.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/flags/flags.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/flags/flags.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package flags implements top-level flags and the usage message for the assembler.
-package flags
-
-import (
-	"flag"
-	"fmt"
-	"os"
-	"path/filepath"
-	"strings"
-)
-
-var (
-	Debug      = flag.Bool("debug", false, "dump instructions as they are parsed")
-	OutputFile = flag.String("o", "", "output file; default foo.o for /a/b/c/foo.s as first argument")
-	PrintOut   = flag.Bool("S", false, "print assembly and machine code")
-	TrimPath   = flag.String("trimpath", "", "remove prefix from recorded source file paths")
-	Shared     = flag.Bool("shared", false, "generate code that can be linked into a shared library")
-	Dynlink    = flag.Bool("dynlink", false, "support references to Go symbols defined in other shared libraries")
-	AllErrors  = flag.Bool("e", false, "no limit on number of errors reported")
-)
-
-var (
-	D MultiFlag
-	I MultiFlag
-)
-
-func init() {
-	flag.Var(&D, "D", "predefined symbol with optional simple value -D=identifier=value; can be set multiple times")
-	flag.Var(&I, "I", "include directory; can be set multiple times")
-}
-
-// MultiFlag allows setting a value multiple times to collect a list, as in -I=dir1 -I=dir2.
-type MultiFlag []string
-
-func (m *MultiFlag) String() string {
-	if len(*m) == 0 {
-		return ""
-	}
-	return fmt.Sprint(*m)
-}
-
-func (m *MultiFlag) Set(val string) error {
-	(*m) = append(*m, val)
-	return nil
-}
-
-func Usage() {
-	fmt.Fprintf(os.Stderr, "usage: asm [options] file.s ...\n")
-	fmt.Fprintf(os.Stderr, "Flags:\n")
-	flag.PrintDefaults()
-	os.Exit(2)
-}
-
-func Parse() {
-	flag.Usage = Usage
-	flag.Parse()
-	if flag.NArg() == 0 {
-		flag.Usage()
-	}
-
-	// Flag refinement.
-	if *OutputFile == "" {
-		if flag.NArg() != 1 {
-			flag.Usage()
-		}
-		input := filepath.Base(flag.Arg(0))
-		if strings.HasSuffix(input, ".s") {
-			input = input[:len(input)-2]
-		}
-		*OutputFile = fmt.Sprintf("%s.o", input)
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/lex/input.go b/pkg/bootstrap/src/bootstrap/cmd/asm/internal/lex/input.go
deleted file mode 100644
index f8298aa..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/lex/input.go
+++ /dev/null
@@ -1,484 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/lex/input.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/lex/input.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lex
-
-import (
-	"fmt"
-	"os"
-	"path/filepath"
-	"strconv"
-	"strings"
-	"text/scanner"
-
-	"bootstrap/cmd/asm/internal/flags"
-)
-
-// Input is the main input: a stack of readers and some macro definitions.
-// It also handles #include processing (by pushing onto the input stack)
-// and parses and instantiates macro definitions.
-type Input struct {
-	Stack
-	includes        []string
-	beginningOfLine bool
-	ifdefStack      []bool
-	macros          map[string]*Macro
-	text            string // Text of last token returned by Next.
-	peek            bool
-	peekToken       ScanToken
-	peekText        string
-}
-
-// NewInput returns an Input from the given path.
-func NewInput(name string) *Input {
-	return &Input{
-		// include directories: look in source dir, then -I directories.
-		includes:        append([]string{filepath.Dir(name)}, flags.I...),
-		beginningOfLine: true,
-		macros:          predefine(flags.D),
-	}
-}
-
-// predefine installs the macros set by the -D flag on the command line.
-func predefine(defines flags.MultiFlag) map[string]*Macro {
-	macros := make(map[string]*Macro)
-	for _, name := range defines {
-		value := "1"
-		i := strings.IndexRune(name, '=')
-		if i > 0 {
-			name, value = name[:i], name[i+1:]
-		}
-		tokens := Tokenize(name)
-		if len(tokens) != 1 || tokens[0].ScanToken != scanner.Ident {
-			fmt.Fprintf(os.Stderr, "asm: parsing -D: %q is not a valid identifier name\n", tokens[0])
-			flags.Usage()
-		}
-		macros[name] = &Macro{
-			name:   name,
-			args:   nil,
-			tokens: Tokenize(value),
-		}
-	}
-	return macros
-}
-
-var panicOnError bool // For testing.
-
-func (in *Input) Error(args ...interface{}) {
-	if panicOnError {
-		panic(fmt.Errorf("%s:%d: %s", in.File(), in.Line(), fmt.Sprintln(args...)))
-	}
-	fmt.Fprintf(os.Stderr, "%s:%d: %s", in.File(), in.Line(), fmt.Sprintln(args...))
-	os.Exit(1)
-}
-
-// expectText is like Error but adds "got XXX" where XXX is a quoted representation of the most recent token.
-func (in *Input) expectText(args ...interface{}) {
-	in.Error(append(args, "; got", strconv.Quote(in.Stack.Text()))...)
-}
-
-// enabled reports whether the input is enabled by an ifdef, or is at the top level.
-func (in *Input) enabled() bool {
-	return len(in.ifdefStack) == 0 || in.ifdefStack[len(in.ifdefStack)-1]
-}
-
-func (in *Input) expectNewline(directive string) {
-	tok := in.Stack.Next()
-	if tok != '\n' {
-		in.expectText("expected newline after", directive)
-	}
-}
-
-func (in *Input) Next() ScanToken {
-	if in.peek {
-		in.peek = false
-		tok := in.peekToken
-		in.text = in.peekText
-		return tok
-	}
-	// If we cannot generate a token after 100 macro invocations, we're in trouble.
-	// The usual case is caught by Push, below, but be safe.
-	for nesting := 0; nesting < 100; {
-		tok := in.Stack.Next()
-		switch tok {
-		case '#':
-			if !in.beginningOfLine {
-				in.Error("'#' must be first item on line")
-			}
-			in.beginningOfLine = in.hash()
-		case scanner.Ident:
-			// Is it a macro name?
-			name := in.Stack.Text()
-			macro := in.macros[name]
-			if macro != nil {
-				nesting++
-				in.invokeMacro(macro)
-				continue
-			}
-			fallthrough
-		default:
-			if tok == scanner.EOF && len(in.ifdefStack) > 0 {
-				// We're skipping text but have run out of input with no #endif.
-				in.Error("unclosed #ifdef or #ifndef")
-			}
-			in.beginningOfLine = tok == '\n'
-			if in.enabled() {
-				in.text = in.Stack.Text()
-				return tok
-			}
-		}
-	}
-	in.Error("recursive macro invocation")
-	return 0
-}
-
-func (in *Input) Text() string {
-	return in.text
-}
-
-// hash processes a # preprocessor directive. It returns true iff it completes.
-func (in *Input) hash() bool {
-	// We have a '#'; it must be followed by a known word (define, include, etc.).
-	tok := in.Stack.Next()
-	if tok != scanner.Ident {
-		in.expectText("expected identifier after '#'")
-	}
-	if !in.enabled() {
-		// Can only start including again if we are at #else or #endif but also
-		// need to keep track of nested #if[n]defs.
-		// We let #line through because it might affect errors.
-		switch in.Stack.Text() {
-		case "else", "endif", "ifdef", "ifndef", "line":
-			// Press on.
-		default:
-			return false
-		}
-	}
-	switch in.Stack.Text() {
-	case "define":
-		in.define()
-	case "else":
-		in.else_()
-	case "endif":
-		in.endif()
-	case "ifdef":
-		in.ifdef(true)
-	case "ifndef":
-		in.ifdef(false)
-	case "include":
-		in.include()
-	case "line":
-		in.line()
-	case "undef":
-		in.undef()
-	default:
-		in.Error("unexpected token after '#':", in.Stack.Text())
-	}
-	return true
-}
-
-// macroName returns the name for the macro being referenced.
-func (in *Input) macroName() string {
-	// We use the Stack's input method; no macro processing at this stage.
-	tok := in.Stack.Next()
-	if tok != scanner.Ident {
-		in.expectText("expected identifier after # directive")
-	}
-	// Name is alphanumeric by definition.
-	return in.Stack.Text()
-}
-
-// #define processing.
-func (in *Input) define() {
-	name := in.macroName()
-	args, tokens := in.macroDefinition(name)
-	in.defineMacro(name, args, tokens)
-}
-
-// defineMacro stores the macro definition in the Input.
-func (in *Input) defineMacro(name string, args []string, tokens []Token) {
-	if in.macros[name] != nil {
-		in.Error("redefinition of macro:", name)
-	}
-	in.macros[name] = &Macro{
-		name:   name,
-		args:   args,
-		tokens: tokens,
-	}
-}
-
-// macroDefinition returns the list of formals and the tokens of the definition.
-// The argument list is nil for no parens on the definition; otherwise a list of
-// formal argument names.
-func (in *Input) macroDefinition(name string) ([]string, []Token) {
-	prevCol := in.Stack.Col()
-	tok := in.Stack.Next()
-	if tok == '\n' || tok == scanner.EOF {
-		return nil, nil // No definition for macro
-	}
-	var args []string
-	// The C preprocessor treats
-	//	#define A(x)
-	// and
-	//	#define A (x)
-	// distinctly: the first is a macro with arguments, the second without.
-	// Distinguish these cases using the column number, since we don't
-	// see the space itself. Note that text/scanner reports the position at the
-	// end of the token. It's where you are now, and you just read this token.
-	if tok == '(' && in.Stack.Col() == prevCol+1 {
-		// Macro has arguments. Scan list of formals.
-		acceptArg := true
-		args = []string{} // Zero length but not nil.
-	Loop:
-		for {
-			tok = in.Stack.Next()
-			switch tok {
-			case ')':
-				tok = in.Stack.Next() // First token of macro definition.
-				break Loop
-			case ',':
-				if acceptArg {
-					in.Error("bad syntax in definition for macro:", name)
-				}
-				acceptArg = true
-			case scanner.Ident:
-				if !acceptArg {
-					in.Error("bad syntax in definition for macro:", name)
-				}
-				arg := in.Stack.Text()
-				if i := lookup(args, arg); i >= 0 {
-					in.Error("duplicate argument", arg, "in definition for macro:", name)
-				}
-				args = append(args, arg)
-				acceptArg = false
-			default:
-				in.Error("bad definition for macro:", name)
-			}
-		}
-	}
-	var tokens []Token
-	// Scan to newline. Backslashes escape newlines.
-	for tok != '\n' {
-		if tok == scanner.EOF {
-			in.Error("missing newline in definition for macro:", name)
-		}
-		if tok == '\\' {
-			tok = in.Stack.Next()
-			if tok != '\n' && tok != '\\' {
-				in.Error(`can only escape \ or \n in definition for macro:`, name)
-			}
-		}
-		tokens = append(tokens, Make(tok, in.Stack.Text()))
-		tok = in.Stack.Next()
-	}
-	return args, tokens
-}
-
-func lookup(args []string, arg string) int {
-	for i, a := range args {
-		if a == arg {
-			return i
-		}
-	}
-	return -1
-}
-
-// invokeMacro pushes onto the input Stack a Slice that holds the macro definition with the actual
-// parameters substituted for the formals.
-// Invoking a macro does not touch the PC/line history.
-func (in *Input) invokeMacro(macro *Macro) {
-	// If the macro has no arguments, just substitute the text.
-	if macro.args == nil {
-		in.Push(NewSlice(in.File(), in.Line(), macro.tokens))
-		return
-	}
-	tok := in.Stack.Next()
-	if tok != '(' {
-		// If the macro has arguments but is invoked without them, all we push is the macro name.
-		// First, put back the token.
-		in.peekToken = tok
-		in.peekText = in.text
-		in.peek = true
-		in.Push(NewSlice(in.File(), in.Line(), []Token{Make(macroName, macro.name)}))
-		return
-	}
-	actuals := in.argsFor(macro)
-	var tokens []Token
-	for _, tok := range macro.tokens {
-		if tok.ScanToken != scanner.Ident {
-			tokens = append(tokens, tok)
-			continue
-		}
-		substitution := actuals[tok.text]
-		if substitution == nil {
-			tokens = append(tokens, tok)
-			continue
-		}
-		tokens = append(tokens, substitution...)
-	}
-	in.Push(NewSlice(in.File(), in.Line(), tokens))
-}
-
-// argsFor returns a map from formal name to actual value for this argumented macro invocation.
-// The opening parenthesis has been absorbed.
-func (in *Input) argsFor(macro *Macro) map[string][]Token {
-	var args [][]Token
-	// One macro argument per iteration. Collect them all and check counts afterwards.
-	for argNum := 0; ; argNum++ {
-		tokens, tok := in.collectArgument(macro)
-		args = append(args, tokens)
-		if tok == ')' {
-			break
-		}
-	}
-	// Zero-argument macros are tricky.
-	if len(macro.args) == 0 && len(args) == 1 && args[0] == nil {
-		args = nil
-	} else if len(args) != len(macro.args) {
-		in.Error("wrong arg count for macro", macro.name)
-	}
-	argMap := make(map[string][]Token)
-	for i, arg := range args {
-		argMap[macro.args[i]] = arg
-	}
-	return argMap
-}
-
-// collectArgument returns the actual tokens for a single argument of a macro.
-// It also returns the token that terminated the argument, which will always
-// be either ',' or ')'. The starting '(' has been scanned.
-func (in *Input) collectArgument(macro *Macro) ([]Token, ScanToken) {
-	nesting := 0
-	var tokens []Token
-	for {
-		tok := in.Stack.Next()
-		if tok == scanner.EOF || tok == '\n' {
-			in.Error("unterminated arg list invoking macro:", macro.name)
-		}
-		if nesting == 0 && (tok == ')' || tok == ',') {
-			return tokens, tok
-		}
-		if tok == '(' {
-			nesting++
-		}
-		if tok == ')' {
-			nesting--
-		}
-		tokens = append(tokens, Make(tok, in.Stack.Text()))
-	}
-}
-
-// #ifdef and #ifndef processing.
-func (in *Input) ifdef(truth bool) {
-	name := in.macroName()
-	in.expectNewline("#if[n]def")
-	if !in.enabled() {
-		truth = false
-	} else if _, defined := in.macros[name]; !defined {
-		truth = !truth
-	}
-	in.ifdefStack = append(in.ifdefStack, truth)
-}
-
-// #else processing
-func (in *Input) else_() {
-	in.expectNewline("#else")
-	if len(in.ifdefStack) == 0 {
-		in.Error("unmatched #else")
-	}
-	if len(in.ifdefStack) == 1 || in.ifdefStack[len(in.ifdefStack)-2] {
-		in.ifdefStack[len(in.ifdefStack)-1] = !in.ifdefStack[len(in.ifdefStack)-1]
-	}
-}
-
-// #endif processing.
-func (in *Input) endif() {
-	in.expectNewline("#endif")
-	if len(in.ifdefStack) == 0 {
-		in.Error("unmatched #endif")
-	}
-	in.ifdefStack = in.ifdefStack[:len(in.ifdefStack)-1]
-}
-
-// #include processing.
-func (in *Input) include() {
-	// Find and parse string.
-	tok := in.Stack.Next()
-	if tok != scanner.String {
-		in.expectText("expected string after #include")
-	}
-	name, err := strconv.Unquote(in.Stack.Text())
-	if err != nil {
-		in.Error("unquoting include file name: ", err)
-	}
-	in.expectNewline("#include")
-	// Push tokenizer for file onto stack.
-	fd, err := os.Open(name)
-	if err != nil {
-		for _, dir := range in.includes {
-			fd, err = os.Open(filepath.Join(dir, name))
-			if err == nil {
-				break
-			}
-		}
-		if err != nil {
-			in.Error("#include:", err)
-		}
-	}
-	in.Push(NewTokenizer(name, fd, fd))
-}
-
-// #line processing.
-func (in *Input) line() {
-	// Only need to handle Plan 9 format: #line 337 "filename"
-	tok := in.Stack.Next()
-	if tok != scanner.Int {
-		in.expectText("expected line number after #line")
-	}
-	line, err := strconv.Atoi(in.Stack.Text())
-	if err != nil {
-		in.Error("error parsing #line (cannot happen):", err)
-	}
-	tok = in.Stack.Next()
-	if tok != scanner.String {
-		in.expectText("expected file name in #line")
-	}
-	file, err := strconv.Unquote(in.Stack.Text())
-	if err != nil {
-		in.Error("unquoting #line file name: ", err)
-	}
-	tok = in.Stack.Next()
-	if tok != '\n' {
-		in.Error("unexpected token at end of #line: ", tok)
-	}
-	linkCtxt.LineHist.Update(histLine, file, line)
-	in.Stack.SetPos(line, file)
-}
-
-// #undef processing
-func (in *Input) undef() {
-	name := in.macroName()
-	if in.macros[name] == nil {
-		in.Error("#undef for undefined macro:", name)
-	}
-	// Newline must be next.
-	tok := in.Stack.Next()
-	if tok != '\n' {
-		in.Error("syntax error in #undef for macro:", name)
-	}
-	delete(in.macros, name)
-}
-
-func (in *Input) Push(r TokenReader) {
-	if len(in.tr) > 100 {
-		in.Error("input recursion")
-	}
-	in.Stack.Push(r)
-}
-
-func (in *Input) Close() {
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/lex/lex.go b/pkg/bootstrap/src/bootstrap/cmd/asm/internal/lex/lex.go
deleted file mode 100644
index cc594b7..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/lex/lex.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/lex/lex.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/lex/lex.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package lex implements lexical analysis for the assembler.
-package lex
-
-import (
-	"fmt"
-	"log"
-	"os"
-	"strings"
-	"text/scanner"
-
-	"bootstrap/cmd/internal/obj"
-)
-
-// A ScanToken represents an input item. It is a simple wrapping of rune, as
-// returned by text/scanner.Scanner, plus a couple of extra values.
-type ScanToken rune
-
-const (
-	// Asm defines some two-character lexemes. We make up
-	// a rune/ScanToken value for them - ugly but simple.
-	LSH       ScanToken = -1000 - iota // << Left shift.
-	RSH                                // >> Logical right shift.
-	ARR                                // -> Used on ARM for shift type 3, arithmetic right shift.
-	ROT                                // @> Used on ARM for shift type 4, rotate right.
-	macroName                          // name of macro that should not be expanded
-)
-
-// IsRegisterShift reports whether the token is one of the ARM register shift operators.
-func IsRegisterShift(r ScanToken) bool {
-	return ROT <= r && r <= LSH // Order looks backwards because these are negative.
-}
-
-func (t ScanToken) String() string {
-	switch t {
-	case scanner.EOF:
-		return "EOF"
-	case scanner.Ident:
-		return "identifier"
-	case scanner.Int:
-		return "integer constant"
-	case scanner.Float:
-		return "float constant"
-	case scanner.Char:
-		return "rune constant"
-	case scanner.String:
-		return "string constant"
-	case scanner.RawString:
-		return "raw string constant"
-	case scanner.Comment:
-		return "comment"
-	default:
-		return fmt.Sprintf("%q", rune(t))
-	}
-}
-
-var (
-	// It might be nice if these weren't global.
-	linkCtxt *obj.Link     // The link context for all instructions.
-	histLine int       = 1 // The cumulative count of lines processed.
-)
-
-// HistLine reports the cumulative source line number of the token,
-// for use in the Prog structure for the linker. (It's always handling the
-// instruction from the current lex line.)
-// It returns int32 because that's what type ../asm prefers.
-func HistLine() int32 {
-	return int32(histLine)
-}
-
-// NewLexer returns a lexer for the named file and the given link context.
-func NewLexer(name string, ctxt *obj.Link) TokenReader {
-	linkCtxt = ctxt
-	input := NewInput(name)
-	fd, err := os.Open(name)
-	if err != nil {
-		log.Fatalf("%s\n", err)
-	}
-	input.Push(NewTokenizer(name, fd, fd))
-	return input
-}
-
-// InitHist sets the line count to 1, for reproducible testing.
-func InitHist() {
-	histLine = 1
-}
-
-// The other files in this directory each contain an implementation of TokenReader.
-
-// A TokenReader is like a reader, but returns lex tokens of type Token. It also can tell you what
-// the text of the most recently returned token is, and where it was found.
-// The underlying scanner elides all spaces except newline, so the input looks like a  stream of
-// Tokens; original spacing is lost but we don't need it.
-type TokenReader interface {
-	// Next returns the next token.
-	Next() ScanToken
-	// The following methods all refer to the most recent token returned by Next.
-	// Text returns the original string representation of the token.
-	Text() string
-	// File reports the source file name of the token.
-	File() string
-	// Line reports the source line number of the token.
-	Line() int
-	// Col reports the source column number of the token.
-	Col() int
-	// SetPos sets the file and line number.
-	SetPos(line int, file string)
-	// Close does any teardown required.
-	Close()
-}
-
-// A Token is a scan token plus its string value.
-// A macro is stored as a sequence of Tokens with spaces stripped.
-type Token struct {
-	ScanToken
-	text string
-}
-
-// Make returns a Token with the given rune (ScanToken) and text representation.
-func Make(token ScanToken, text string) Token {
-	// If the symbol starts with center dot, as in ·x, rewrite it as ""·x
-	if token == scanner.Ident && strings.HasPrefix(text, "\u00B7") {
-		text = `""` + text
-	}
-	// Substitute the substitutes for . and /.
-	text = strings.Replace(text, "\u00B7", ".", -1)
-	text = strings.Replace(text, "\u2215", "/", -1)
-	return Token{ScanToken: token, text: text}
-}
-
-func (l Token) String() string {
-	return l.text
-}
-
-// A Macro represents the definition of a #defined macro.
-type Macro struct {
-	name   string   // The #define name.
-	args   []string // Formal arguments.
-	tokens []Token  // Body of macro.
-}
-
-// Tokenize turns a string into a list of Tokens; used to parse the -D flag and in tests.
-func Tokenize(str string) []Token {
-	t := NewTokenizer("command line", strings.NewReader(str), nil)
-	var tokens []Token
-	for {
-		tok := t.Next()
-		if tok == scanner.EOF {
-			break
-		}
-		tokens = append(tokens, Make(tok, t.Text()))
-	}
-	return tokens
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/lex/lex_test.go b/pkg/bootstrap/src/bootstrap/cmd/asm/internal/lex/lex_test.go
deleted file mode 100644
index a9058f6..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/lex/lex_test.go
+++ /dev/null
@@ -1,365 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/lex/lex_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/lex/lex_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lex
-
-import (
-	"bytes"
-	"strings"
-	"testing"
-	"text/scanner"
-)
-
-type lexTest struct {
-	name   string
-	input  string
-	output string
-}
-
-var lexTests = []lexTest{
-	{
-		"empty",
-		"",
-		"",
-	},
-	{
-		"simple",
-		"1 (a)",
-		"1.(.a.)",
-	},
-	{
-		"simple define",
-		lines(
-			"#define A 1234",
-			"A",
-		),
-		"1234.\n",
-	},
-	{
-		"define without value",
-		"#define A",
-		"",
-	},
-	{
-		"macro without arguments",
-		"#define A() 1234\n" + "A()\n",
-		"1234.\n",
-	},
-	{
-		"macro with just parens as body",
-		"#define A () \n" + "A\n",
-		"(.).\n",
-	},
-	{
-		"macro with parens but no arguments",
-		"#define A (x) \n" + "A\n",
-		"(.x.).\n",
-	},
-	{
-		"macro with arguments",
-		"#define A(x, y, z) x+z+y\n" + "A(1, 2, 3)\n",
-		"1.+.3.+.2.\n",
-	},
-	{
-		"argumented macro invoked without arguments",
-		lines(
-			"#define X() foo ",
-			"X()",
-			"X",
-		),
-		"foo.\n.X.\n",
-	},
-	{
-		"multiline macro without arguments",
-		lines(
-			"#define A 1\\",
-			"\t2\\",
-			"\t3",
-			"before",
-			"A",
-			"after",
-		),
-		"before.\n.1.\n.2.\n.3.\n.after.\n",
-	},
-	{
-		"multiline macro with arguments",
-		lines(
-			"#define A(a, b, c) a\\",
-			"\tb\\",
-			"\tc",
-			"before",
-			"A(1, 2, 3)",
-			"after",
-		),
-		"before.\n.1.\n.2.\n.3.\n.after.\n",
-	},
-	{
-		"LOAD macro",
-		lines(
-			"#define LOAD(off, reg) \\",
-			"\tMOVBLZX	(off*4)(R12),	reg \\",
-			"\tADDB	reg,		DX",
-			"",
-			"LOAD(8, AX)",
-		),
-		"\n.\n.MOVBLZX.(.8.*.4.).(.R12.).,.AX.\n.ADDB.AX.,.DX.\n",
-	},
-	{
-		"nested multiline macro",
-		lines(
-			"#define KEYROUND(xmm, load, off, r1, r2, index) \\",
-			"\tMOVBLZX	(BP)(DX*4),	R8 \\",
-			"\tload((off+1), r2) \\",
-			"\tMOVB	R8,		(off*4)(R12) \\",
-			"\tPINSRW	$index, (BP)(R8*4), xmm",
-			"#define LOAD(off, reg) \\",
-			"\tMOVBLZX	(off*4)(R12),	reg \\",
-			"\tADDB	reg,		DX",
-			"KEYROUND(X0, LOAD, 8, AX, BX, 0)",
-		),
-		"\n.MOVBLZX.(.BP.).(.DX.*.4.).,.R8.\n.\n.MOVBLZX.(.(.8.+.1.).*.4.).(.R12.).,.BX.\n.ADDB.BX.,.DX.\n.MOVB.R8.,.(.8.*.4.).(.R12.).\n.PINSRW.$.0.,.(.BP.).(.R8.*.4.).,.X0.\n",
-	},
-	{
-		"taken #ifdef",
-		lines(
-			"#define A",
-			"#ifdef A",
-			"#define B 1234",
-			"#endif",
-			"B",
-		),
-		"1234.\n",
-	},
-	{
-		"not taken #ifdef",
-		lines(
-			"#ifdef A",
-			"#define B 1234",
-			"#endif",
-			"B",
-		),
-		"B.\n",
-	},
-	{
-		"taken #ifdef with else",
-		lines(
-			"#define A",
-			"#ifdef A",
-			"#define B 1234",
-			"#else",
-			"#define B 5678",
-			"#endif",
-			"B",
-		),
-		"1234.\n",
-	},
-	{
-		"not taken #ifdef with else",
-		lines(
-			"#ifdef A",
-			"#define B 1234",
-			"#else",
-			"#define B 5678",
-			"#endif",
-			"B",
-		),
-		"5678.\n",
-	},
-	{
-		"nested taken/taken #ifdef",
-		lines(
-			"#define A",
-			"#define B",
-			"#ifdef A",
-			"#ifdef B",
-			"#define C 1234",
-			"#else",
-			"#define C 5678",
-			"#endif",
-			"#endif",
-			"C",
-		),
-		"1234.\n",
-	},
-	{
-		"nested taken/not-taken #ifdef",
-		lines(
-			"#define A",
-			"#ifdef A",
-			"#ifdef B",
-			"#define C 1234",
-			"#else",
-			"#define C 5678",
-			"#endif",
-			"#endif",
-			"C",
-		),
-		"5678.\n",
-	},
-	{
-		"nested not-taken/would-be-taken #ifdef",
-		lines(
-			"#define B",
-			"#ifdef A",
-			"#ifdef B",
-			"#define C 1234",
-			"#else",
-			"#define C 5678",
-			"#endif",
-			"#endif",
-			"C",
-		),
-		"C.\n",
-	},
-	{
-		"nested not-taken/not-taken #ifdef",
-		lines(
-			"#ifdef A",
-			"#ifdef B",
-			"#define C 1234",
-			"#else",
-			"#define C 5678",
-			"#endif",
-			"#endif",
-			"C",
-		),
-		"C.\n",
-	},
-	{
-		"nested #define",
-		lines(
-			"#define A #define B THIS",
-			"A",
-			"B",
-		),
-		"THIS.\n",
-	},
-	{
-		"nested #define with args",
-		lines(
-			"#define A #define B(x) x",
-			"A",
-			"B(THIS)",
-		),
-		"THIS.\n",
-	},
-	/* This one fails. See comment in Slice.Col.
-	{
-		"nested #define with args",
-		lines(
-			"#define A #define B (x) x",
-			"A",
-			"B(THIS)",
-		),
-		"x.\n",
-	},
-	*/
-}
-
-func TestLex(t *testing.T) {
-	for _, test := range lexTests {
-		input := NewInput(test.name)
-		input.Push(NewTokenizer(test.name, strings.NewReader(test.input), nil))
-		result := drain(input)
-		if result != test.output {
-			t.Errorf("%s: got %q expected %q", test.name, result, test.output)
-		}
-	}
-}
-
-// lines joins the arguments together as complete lines.
-func lines(a ...string) string {
-	return strings.Join(a, "\n") + "\n"
-}
-
-// drain returns a single string representing the processed input tokens.
-func drain(input *Input) string {
-	var buf bytes.Buffer
-	for {
-		tok := input.Next()
-		if tok == scanner.EOF {
-			return buf.String()
-		}
-		if buf.Len() > 0 {
-			buf.WriteByte('.')
-		}
-		buf.WriteString(input.Text())
-	}
-}
-
-type badLexTest struct {
-	input string
-	error string
-}
-
-var badLexTests = []badLexTest{
-	{
-		"3 #define foo bar\n",
-		"'#' must be first item on line",
-	},
-	{
-		"#ifdef foo\nhello",
-		"unclosed #ifdef or #ifndef",
-	},
-	{
-		"#ifndef foo\nhello",
-		"unclosed #ifdef or #ifndef",
-	},
-	{
-		"#ifdef foo\nhello\n#else\nbye",
-		"unclosed #ifdef or #ifndef",
-	},
-	{
-		"#define A() A()\nA()",
-		"recursive macro invocation",
-	},
-	{
-		"#define A a\n#define A a\n",
-		"redefinition of macro",
-	},
-	{
-		"#define A a",
-		"no newline after macro definition",
-	},
-}
-
-func TestBadLex(t *testing.T) {
-	for _, test := range badLexTests {
-		input := NewInput(test.error)
-		input.Push(NewTokenizer(test.error, strings.NewReader(test.input), nil))
-		err := firstError(input)
-		if err == nil {
-			t.Errorf("%s: got no error", test.error)
-			continue
-		}
-		if !strings.Contains(err.Error(), test.error) {
-			t.Errorf("got error %q expected %q", err.Error(), test.error)
-		}
-	}
-}
-
-// firstError returns the first error value triggered by the input.
-func firstError(input *Input) (err error) {
-	panicOnError = true
-	defer func() {
-		panicOnError = false
-		switch e := recover(); e := e.(type) {
-		case nil:
-		case error:
-			err = e
-		default:
-			panic(e)
-		}
-	}()
-
-	for {
-		tok := input.Next()
-		if tok == scanner.EOF {
-			return
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/lex/slice.go b/pkg/bootstrap/src/bootstrap/cmd/asm/internal/lex/slice.go
deleted file mode 100644
index d40fc7f..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/lex/slice.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/lex/slice.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/lex/slice.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lex
-
-import "text/scanner"
-
-// A Slice reads from a slice of Tokens.
-type Slice struct {
-	tokens   []Token
-	fileName string
-	line     int
-	pos      int
-}
-
-func NewSlice(fileName string, line int, tokens []Token) *Slice {
-	return &Slice{
-		tokens:   tokens,
-		fileName: fileName,
-		line:     line,
-		pos:      -1, // Next will advance to zero.
-	}
-}
-
-func (s *Slice) Next() ScanToken {
-	s.pos++
-	if s.pos >= len(s.tokens) {
-		return scanner.EOF
-	}
-	return s.tokens[s.pos].ScanToken
-}
-
-func (s *Slice) Text() string {
-	return s.tokens[s.pos].text
-}
-
-func (s *Slice) File() string {
-	return s.fileName
-}
-
-func (s *Slice) Line() int {
-	return s.line
-}
-
-func (s *Slice) Col() int {
-	// TODO: Col is only called when defining a macro and all it cares about is increasing
-	// position to discover whether there is a blank before the parenthesis.
-	// We only get here if defining a macro inside a macro.
-	// This imperfect implementation means we cannot tell the difference between
-	//	#define A #define B(x) x
-	// and
-	//	#define A #define B (x) x
-	// The first has definition of B has an argument, the second doesn't. Because we let
-	// text/scanner strip the blanks for us, this is extremely rare, hard to fix, and not worth it.
-	return s.pos
-}
-
-func (s *Slice) SetPos(line int, file string) {
-	// Cannot happen because we only have slices of already-scanned
-	// text, but be prepared.
-	s.line = line
-	s.fileName = file
-}
-
-func (s *Slice) Close() {
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/lex/stack.go b/pkg/bootstrap/src/bootstrap/cmd/asm/internal/lex/stack.go
deleted file mode 100644
index 4586e63..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/lex/stack.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/lex/stack.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/lex/stack.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lex
-
-import "text/scanner"
-
-// A Stack is a stack of TokenReaders. As the top TokenReader hits EOF,
-// it resumes reading the next one down.
-type Stack struct {
-	tr []TokenReader
-}
-
-// Push adds tr to the top (end) of the input stack. (Popping happens automatically.)
-func (s *Stack) Push(tr TokenReader) {
-	s.tr = append(s.tr, tr)
-}
-
-func (s *Stack) Next() ScanToken {
-	tos := s.tr[len(s.tr)-1]
-	tok := tos.Next()
-	for tok == scanner.EOF && len(s.tr) > 1 {
-		tos.Close()
-		// Pop the topmost item from the stack and resume with the next one down.
-		s.tr = s.tr[:len(s.tr)-1]
-		tok = s.Next()
-	}
-	return tok
-}
-
-func (s *Stack) Text() string {
-	return s.tr[len(s.tr)-1].Text()
-}
-
-func (s *Stack) File() string {
-	return s.tr[len(s.tr)-1].File()
-}
-
-func (s *Stack) Line() int {
-	return s.tr[len(s.tr)-1].Line()
-}
-
-func (s *Stack) Col() int {
-	return s.tr[len(s.tr)-1].Col()
-}
-
-func (s *Stack) SetPos(line int, file string) {
-	s.tr[len(s.tr)-1].SetPos(line, file)
-}
-
-func (s *Stack) Close() { // Unused.
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/lex/tokenizer.go b/pkg/bootstrap/src/bootstrap/cmd/asm/internal/lex/tokenizer.go
deleted file mode 100644
index 07a9d13..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/asm/internal/lex/tokenizer.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/lex/tokenizer.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/internal/lex/tokenizer.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package lex
-
-import (
-	"io"
-	"os"
-	"strings"
-	"text/scanner"
-	"unicode"
-)
-
-// A Tokenizer is a simple wrapping of text/scanner.Scanner, configured
-// for our purposes and made a TokenReader. It forms the lowest level,
-// turning text from readers into tokens.
-type Tokenizer struct {
-	tok      ScanToken
-	s        *scanner.Scanner
-	line     int
-	fileName string
-	file     *os.File // If non-nil, file descriptor to close.
-}
-
-func NewTokenizer(name string, r io.Reader, file *os.File) *Tokenizer {
-	var s scanner.Scanner
-	s.Init(r)
-	// Newline is like a semicolon; other space characters are fine.
-	s.Whitespace = 1<<'\t' | 1<<'\r' | 1<<' '
-	// Don't skip comments: we need to count newlines.
-	s.Mode = scanner.ScanChars |
-		scanner.ScanFloats |
-		scanner.ScanIdents |
-		scanner.ScanInts |
-		scanner.ScanStrings |
-		scanner.ScanComments
-	s.Position.Filename = name
-	s.IsIdentRune = isIdentRune
-	if file != nil {
-		linkCtxt.LineHist.Push(histLine, name)
-	}
-	return &Tokenizer{
-		s:        &s,
-		line:     1,
-		fileName: name,
-		file:     file,
-	}
-}
-
-// We want center dot (·) and division slash (∕) to work as identifier characters.
-func isIdentRune(ch rune, i int) bool {
-	if unicode.IsLetter(ch) {
-		return true
-	}
-	switch ch {
-	case '_': // Underscore; traditional.
-		return true
-	case '\u00B7': // Represents the period in runtime.exit. U+00B7 '·' middle dot
-		return true
-	case '\u2215': // Represents the slash in runtime/debug.setGCPercent. U+2215 '∕' division slash
-		return true
-	}
-	// Digits are OK only after the first character.
-	return i > 0 && unicode.IsDigit(ch)
-}
-
-func (t *Tokenizer) Text() string {
-	switch t.tok {
-	case LSH:
-		return "<<"
-	case RSH:
-		return ">>"
-	case ARR:
-		return "->"
-	case ROT:
-		return "@>"
-	}
-	return t.s.TokenText()
-}
-
-func (t *Tokenizer) File() string {
-	return t.fileName
-}
-
-func (t *Tokenizer) Line() int {
-	return t.line
-}
-
-func (t *Tokenizer) Col() int {
-	return t.s.Pos().Column
-}
-
-func (t *Tokenizer) SetPos(line int, file string) {
-	t.line = line
-	t.fileName = file
-}
-
-func (t *Tokenizer) Next() ScanToken {
-	s := t.s
-	for {
-		t.tok = ScanToken(s.Scan())
-		if t.tok != scanner.Comment {
-			break
-		}
-		length := strings.Count(s.TokenText(), "\n")
-		t.line += length
-		histLine += length
-		// TODO: If we ever have //go: comments in assembly, will need to keep them here.
-		// For now, just discard all comments.
-	}
-	switch t.tok {
-	case '\n':
-		if t.file != nil {
-			histLine++
-		}
-		t.line++
-	case '-':
-		if s.Peek() == '>' {
-			s.Next()
-			t.tok = ARR
-			return ARR
-		}
-	case '@':
-		if s.Peek() == '>' {
-			s.Next()
-			t.tok = ROT
-			return ROT
-		}
-	case '<':
-		if s.Peek() == '<' {
-			s.Next()
-			t.tok = LSH
-			return LSH
-		}
-	case '>':
-		if s.Peek() == '>' {
-			s.Next()
-			t.tok = RSH
-			return RSH
-		}
-	}
-	return t.tok
-}
-
-func (t *Tokenizer) Close() {
-	if t.file != nil {
-		t.file.Close()
-		// It's an open file, so pop the line history.
-		linkCtxt.LineHist.Pop(histLine)
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/asm/main.go b/pkg/bootstrap/src/bootstrap/cmd/asm/main.go
deleted file mode 100644
index a0af02e..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/asm/main.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/main.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/asm/main.go:1
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
-	"bufio"
-	"flag"
-	"fmt"
-	"log"
-	"os"
-
-	"bootstrap/cmd/asm/internal/arch"
-	"bootstrap/cmd/asm/internal/asm"
-	"bootstrap/cmd/asm/internal/flags"
-	"bootstrap/cmd/asm/internal/lex"
-
-	"bootstrap/cmd/internal/bio"
-	"bootstrap/cmd/internal/obj"
-)
-
-func main() {
-	log.SetFlags(0)
-	log.SetPrefix("asm: ")
-
-	GOARCH := obj.GOARCH
-
-	architecture := arch.Set(GOARCH)
-	if architecture == nil {
-		log.Fatalf("unrecognized architecture %s", GOARCH)
-	}
-
-	flags.Parse()
-
-	ctxt := obj.Linknew(architecture.LinkArch)
-	if *flags.PrintOut {
-		ctxt.Debugasm = 1
-	}
-	ctxt.LineHist.TrimPathPrefix = *flags.TrimPath
-	ctxt.Flag_dynlink = *flags.Dynlink
-	ctxt.Flag_shared = *flags.Shared || *flags.Dynlink
-	ctxt.Bso = bufio.NewWriter(os.Stdout)
-	defer ctxt.Bso.Flush()
-
-	// Create object file, write header.
-	out, err := os.Create(*flags.OutputFile)
-	if err != nil {
-		log.Fatal(err)
-	}
-	defer bio.MustClose(out)
-	buf := bufio.NewWriter(bio.MustWriter(out))
-
-	fmt.Fprintf(buf, "go object %s %s %s\n", obj.GOOS, obj.GOARCH, obj.Version)
-	fmt.Fprintf(buf, "!\n")
-
-	var ok, diag bool
-	var failedFile string
-	for _, f := range flag.Args() {
-		lexer := lex.NewLexer(f, ctxt)
-		parser := asm.NewParser(ctxt, architecture, lexer)
-		ctxt.DiagFunc = func(format string, args ...interface{}) {
-			diag = true
-			log.Printf(format, args...)
-		}
-		pList := obj.Linknewplist(ctxt)
-		pList.Firstpc, ok = parser.Parse()
-		if !ok {
-			failedFile = f
-			break
-		}
-	}
-	if ok {
-		// reports errors to parser.Errorf
-		obj.Writeobjdirect(ctxt, buf)
-	}
-	if !ok || diag {
-		if failedFile != "" {
-			log.Printf("assembly of %s failed", failedFile)
-		} else {
-			log.Print("assembly failed")
-		}
-		out.Close()
-		os.Remove(*flags.OutputFile)
-		os.Exit(1)
-	}
-	buf.Flush()
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/doc.go b/pkg/bootstrap/src/bootstrap/cmd/compile/doc.go
deleted file mode 100644
index 9521377..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/doc.go
+++ /dev/null
@@ -1,141 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/doc.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/doc.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Compile, typically invoked as ``go tool compile,'' compiles a single Go package
-comprising the files named on the command line. It then writes a single
-object file named for the basename of the first source file with a .o suffix.
-The object file can then be combined with other objects into a package archive
-or passed directly to the linker (``go tool link''). If invoked with -pack, the compiler
-writes an archive directly, bypassing the intermediate object file.
-
-The generated files contain type information about the symbols exported by
-the package and about types used by symbols imported by the package from
-other packages. It is therefore not necessary when compiling client C of
-package P to read the files of P's dependencies, only the compiled output of P.
-
-Command Line
-
-Usage:
-
-	go tool compile [flags] file...
-
-The specified files must be Go source files and all part of the same package.
-The same compiler is used for all target operating systems and architectures.
-The GOOS and GOARCH environment variables set the desired target.
-
-Flags:
-
-	-D path
-		Set relative path for local imports.
-	-I dir1 -I dir2
-		Search for imported packages in dir1, dir2, etc,
-		after consulting $GOROOT/pkg/$GOOS_$GOARCH.
-	-L
-		Show complete file path in error messages.
-	-N
-		Disable optimizations.
-	-S
-		Print assembly listing to standard output (code only).
-	-S -S
-		Print assembly listing to standard output (code and data).
-	-V
-		Print compiler version and exit.
-	-asmhdr file
-		Write assembly header to file.
-	-complete
-		Assume package has no non-Go components.
-	-cpuprofile file
-		Write a CPU profile for the compilation to file.
-	-dynlink
-		Allow references to Go symbols in shared libraries (experimental).
-	-e
-		Remove the limit on the number of errors reported (default limit is 10).
-	-h
-		Halt with a stack trace at the first error detected.
-	-importmap old=new
-		Interpret import "old" as import "new" during compilation.
-		The option may be repeated to add multiple mappings.
-	-installsuffix suffix
-		Look for packages in $GOROOT/pkg/$GOOS_$GOARCH_suffix
-		instead of $GOROOT/pkg/$GOOS_$GOARCH.
-	-l
-		Disable inlining.
-	-largemodel
-		Generate code that assumes a large memory model.
-	-linkobj file
-		Write linker-specific object to file and compiler-specific
-		object to usual output file (as specified by -o).
-		Without this flag, the -o output is a combination of both
-		linker and compiler input.
-	-memprofile file
-		Write memory profile for the compilation to file.
-	-memprofilerate rate
-		Set runtime.MemProfileRate for the compilation to rate.
-	-msan
-		Insert calls to C/C++ memory sanitizer.
-	-nolocalimports
-		Disallow local (relative) imports.
-	-o file
-		Write object to file (default file.o or, with -pack, file.a).
-	-p path
-		Set expected package import path for the code being compiled,
-		and diagnose imports that would cause a circular dependency.
-	-pack
-		Write a package (archive) file rather than an object file
-	-race
-		Compile with race detector enabled.
-	-trimpath prefix
-		Remove prefix from recorded source file paths.
-	-u
-		Disallow importing packages not marked as safe; implies -nolocalimports.
-
-There are also a number of debugging flags; run the command with no arguments
-for a usage message.
-
-Compiler Directives
-
-The compiler accepts compiler directives in the form of // comments at the
-beginning of a line. To distinguish them from non-directive comments, the directives
-require no space between the slashes and the name of the directive. However, since
-they are comments, tools unaware of the directive convention or of a particular
-directive can skip over a directive like any other comment.
-
-	//line path/to/file:linenumber
-
-The //line directive specifies that the source line that follows should be recorded
-as having come from the given file path and line number. Successive lines are
-recorded using increasing line numbers, until the next directive. This directive
-typically appears in machine-generated code, so that compilers and debuggers
-will show lines in the original input to the generator.
-
-The //line directive is an historical special case; all other directives are of the form
-//go:name, indicating that the directive is defined by the Go toolchain.
-
-	//go:noescape
-
-The //go:noescape directive specifies that the next declaration in the file, which
-must be a func without a body (meaning that it has an implementation not written
-in Go) does not allow any of the pointers passed as arguments to escape into the
-heap or into the values returned from the function. This information can be used
-during the compiler's escape analysis of Go code calling the function.
-
-	//go:nosplit
-
-The //go:nosplit directive specifies that the next function declared in the file must
-not include a stack overflow check. This is most commonly used by low-level
-runtime sources invoked at times when it is unsafe for the calling goroutine to be
-preempted.
-
-	//go:linkname localname importpath.name
-
-The //go:linkname directive instructs the compiler to use ``importpath.name'' as the
-object file symbol name for the variable or function declared as ``localname'' in the
-source code. Because this directive can subvert the type system and package
-modularity, it is only enabled in files that have imported "unsafe".
-*/
-package main
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/fmt_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/fmt_test.go
deleted file mode 100644
index cd220d9..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/fmt_test.go
+++ /dev/null
@@ -1,719 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/fmt_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/fmt_test.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements TestFormats; a test that verifies
-// format strings in the compiler (this directory and all
-// subdirectories, recursively).
-//
-// TestFormats finds potential (Printf, etc.) format strings.
-// If they are used in a call, the format verbs are verified
-// based on the matching argument type against a precomputed
-// table of valid formats. The knownFormats table can be used
-// to automatically rewrite format strings with the -u flag.
-//
-// A new knownFormats table based on the found formats is printed
-// when the test is run in verbose mode (-v flag). The table
-// needs to be updated whenever a new (type, format) combination
-// is found and the format verb is not 'v' or 'T' (as in "%v" or
-// "%T").
-//
-// Run as: go test -run Formats [-u][-v]
-//
-// Known bugs:
-// - indexed format strings ("%[2]s", etc.) are not supported
-//   (the test will fail)
-// - format strings that are not simple string literals cannot
-//   be updated automatically
-//   (the test will fail with respective warnings)
-// - format strings in _test packages outside the current
-//   package are not processed
-//   (the test will report those files)
-//
-package main_test
-
-import (
-	"bytes"
-	"flag"
-	"fmt"
-	"go/ast"
-	"go/build"
-	"go/constant"
-	"go/format"
-	"go/importer"
-	"go/parser"
-	"go/token"
-	"go/types"
-	"internal/testenv"
-	"io/ioutil"
-	"log"
-	"os"
-	"path/filepath"
-	"sort"
-	"strconv"
-	"strings"
-	"testing"
-	"unicode/utf8"
-)
-
-var update = flag.Bool("u", false, "update format strings")
-
-// The following variables collect information across all processed files.
-var (
-	fset          = token.NewFileSet()
-	formatStrings = make(map[*ast.BasicLit]bool)      // set of all potential format strings found
-	foundFormats  = make(map[string]bool)             // set of all formats found
-	callSites     = make(map[*ast.CallExpr]*callSite) // map of all calls
-)
-
-// A File is a corresponding (filename, ast) pair.
-type File struct {
-	name string
-	ast  *ast.File
-}
-
-func TestFormats(t *testing.T) {
-	testenv.MustHaveGoBuild(t) // more restrictive than necessary, but that's ok
-
-	// process all directories
-	filepath.Walk(".", func(path string, info os.FileInfo, err error) error {
-		if info.IsDir() {
-			if info.Name() == "testdata" {
-				return filepath.SkipDir
-			}
-
-			importPath := filepath.Join("cmd/compile", path)
-			if blacklistedPackages[filepath.ToSlash(importPath)] {
-				return filepath.SkipDir
-			}
-
-			pkg, err := build.Import(importPath, path, 0)
-			if err != nil {
-				if _, ok := err.(*build.NoGoError); ok {
-					return nil // nothing to do here
-				}
-				t.Fatal(err)
-			}
-			collectPkgFormats(t, pkg)
-		}
-		return nil
-	})
-
-	// test and rewrite formats
-	updatedFiles := make(map[string]File) // files that were rewritten
-	for _, p := range callSites {
-		// test current format literal and determine updated one
-		out := formatReplace(p.str, func(index int, in string) string {
-			if in == "*" {
-				return in // cannot rewrite '*' (as in "%*d")
-			}
-			// in != '*'
-			typ := p.types[index]
-			format := typ + " " + in // e.g., "*Node %n"
-
-			// check if format is known
-			out, known := knownFormats[format]
-
-			// record format if not yet found
-			_, found := foundFormats[format]
-			if !found {
-				foundFormats[format] = true
-			}
-
-			// report an error if the format is unknown and this is the first
-			// time we see it; ignore "%v" and "%T" which are always valid
-			if !known && !found && in != "%v" && in != "%T" {
-				t.Errorf("%s: unknown format %q for %s argument", posString(p.arg), in, typ)
-			}
-
-			if out == "" {
-				out = in
-			}
-			return out
-		})
-
-		// replace existing format literal if it changed
-		if out != p.str {
-			// we cannot replace the argument if it's not a string literal for now
-			// (e.g., it may be "foo" + "bar")
-			lit, ok := p.arg.(*ast.BasicLit)
-			if !ok {
-				delete(callSites, p.call) // treat as if we hadn't found this site
-				continue
-			}
-
-			if testing.Verbose() {
-				fmt.Printf("%s:\n\t- %q\n\t+ %q\n", posString(p.arg), p.str, out)
-			}
-
-			// find argument index of format argument
-			index := -1
-			for i, arg := range p.call.Args {
-				if p.arg == arg {
-					index = i
-					break
-				}
-			}
-			if index < 0 {
-				// we may have processed the same call site twice,
-				// but that shouldn't happen
-				panic("internal error: matching argument not found")
-			}
-
-			// replace literal
-			new := *lit                    // make a copy
-			new.Value = strconv.Quote(out) // this may introduce "-quotes where there were `-quotes
-			p.call.Args[index] = &new
-			updatedFiles[p.file.name] = p.file
-		}
-	}
-
-	// write dirty files back
-	var filesUpdated bool
-	if len(updatedFiles) > 0 && *update {
-		for _, file := range updatedFiles {
-			var buf bytes.Buffer
-			if err := format.Node(&buf, fset, file.ast); err != nil {
-				t.Errorf("WARNING: formatting %s failed: %v", file.name, err)
-				continue
-			}
-			if err := ioutil.WriteFile(file.name, buf.Bytes(), 0x666); err != nil {
-				t.Errorf("WARNING: writing %s failed: %v", file.name, err)
-				continue
-			}
-			fmt.Printf("updated %s\n", file.name)
-			filesUpdated = true
-		}
-	}
-
-	// report all function names containing a format string
-	if len(callSites) > 0 && testing.Verbose() {
-		set := make(map[string]bool)
-		for _, p := range callSites {
-			set[nodeString(p.call.Fun)] = true
-		}
-		var list []string
-		for s := range set {
-			list = append(list, s)
-		}
-		fmt.Println("\nFunctions")
-		printList(list)
-	}
-
-	// report all formats found
-	if len(foundFormats) > 0 && testing.Verbose() {
-		var list []string
-		for s := range foundFormats {
-			list = append(list, fmt.Sprintf("%q: \"\",", s))
-		}
-		fmt.Println("\nvar knownFormats = map[string]string{")
-		printList(list)
-		fmt.Println("}")
-	}
-
-	// check that knownFormats is up to date
-	if !testing.Verbose() && !*update {
-		var mismatch bool
-		for s := range foundFormats {
-			if _, ok := knownFormats[s]; !ok {
-				mismatch = true
-				break
-			}
-		}
-		if !mismatch {
-			for s := range knownFormats {
-				if _, ok := foundFormats[s]; !ok {
-					mismatch = true
-					break
-				}
-			}
-		}
-		if mismatch {
-			t.Errorf("knownFormats is out of date; please run with -v to regenerate")
-		}
-	}
-
-	// all format strings of calls must be in the formatStrings set (self-verification)
-	for _, p := range callSites {
-		if lit, ok := p.arg.(*ast.BasicLit); ok && lit.Kind == token.STRING {
-			if formatStrings[lit] {
-				// ok
-				delete(formatStrings, lit)
-			} else {
-				// this should never happen
-				panic(fmt.Sprintf("internal error: format string not found (%s)", posString(lit)))
-			}
-		}
-	}
-
-	// if we have any strings left, we may need to update them manually
-	if len(formatStrings) > 0 && filesUpdated {
-		var list []string
-		for lit := range formatStrings {
-			list = append(list, fmt.Sprintf("%s: %s", posString(lit), nodeString(lit)))
-		}
-		fmt.Println("\nWARNING: Potentially missed format strings")
-		printList(list)
-		t.Fail()
-	}
-
-	fmt.Println()
-}
-
-// A callSite describes a function call that appears to contain
-// a format string.
-type callSite struct {
-	file  File
-	call  *ast.CallExpr // call containing the format string
-	arg   ast.Expr      // format argument (string literal or constant)
-	str   string        // unquoted format string
-	types []string      // argument types
-}
-
-func collectPkgFormats(t *testing.T, pkg *build.Package) {
-	// collect all files
-	var filenames []string
-	filenames = append(filenames, pkg.GoFiles...)
-	filenames = append(filenames, pkg.CgoFiles...)
-	filenames = append(filenames, pkg.TestGoFiles...)
-
-	// TODO(gri) verify _test files outside package
-	for _, name := range pkg.XTestGoFiles {
-		// don't process this test itself
-		if name != "fmt_test.go" && testing.Verbose() {
-			fmt.Printf("WARNING: %s not processed\n", filepath.Join(pkg.Dir, name))
-		}
-	}
-
-	// make filenames relative to .
-	for i, name := range filenames {
-		filenames[i] = filepath.Join(pkg.Dir, name)
-	}
-
-	// parse all files
-	files := make([]*ast.File, len(filenames))
-	for i, filename := range filenames {
-		f, err := parser.ParseFile(fset, filename, nil, parser.ParseComments)
-		if err != nil {
-			t.Fatal(err)
-		}
-		files[i] = f
-	}
-
-	// typecheck package
-	conf := types.Config{Importer: importer.Default()}
-	etypes := make(map[ast.Expr]types.TypeAndValue)
-	if _, err := conf.Check(pkg.ImportPath, fset, files, &types.Info{Types: etypes}); err != nil {
-		t.Fatal(err)
-	}
-
-	// collect all potential format strings (for extra verification later)
-	for _, file := range files {
-		ast.Inspect(file, func(n ast.Node) bool {
-			if s, ok := stringLit(n); ok && isFormat(s) {
-				formatStrings[n.(*ast.BasicLit)] = true
-			}
-			return true
-		})
-	}
-
-	// collect all formats/arguments of calls with format strings
-	for index, file := range files {
-		ast.Inspect(file, func(n ast.Node) bool {
-			if call, ok := n.(*ast.CallExpr); ok {
-				// ignore blacklisted functions
-				if blacklistedFunctions[nodeString(call.Fun)] {
-					return true
-				}
-				// look for an arguments that might be a format string
-				for i, arg := range call.Args {
-					if s, ok := stringVal(etypes[arg]); ok && isFormat(s) {
-						// make sure we have enough arguments
-						n := numFormatArgs(s)
-						if i+1+n > len(call.Args) {
-							t.Errorf("%s: not enough format args (blacklist %s?)", posString(call), nodeString(call.Fun))
-							break // ignore this call
-						}
-						// assume last n arguments are to be formatted;
-						// determine their types
-						argTypes := make([]string, n)
-						for i, arg := range call.Args[len(call.Args)-n:] {
-							if tv, ok := etypes[arg]; ok {
-								argTypes[i] = typeString(tv.Type)
-							}
-						}
-						// collect call site
-						if callSites[call] != nil {
-							panic("internal error: file processed twice?")
-						}
-						callSites[call] = &callSite{
-							file:  File{filenames[index], file},
-							call:  call,
-							arg:   arg,
-							str:   s,
-							types: argTypes,
-						}
-						break // at most one format per argument list
-					}
-				}
-			}
-			return true
-		})
-	}
-}
-
-// printList prints list in sorted order.
-func printList(list []string) {
-	sort.Strings(list)
-	for _, s := range list {
-		fmt.Println("\t", s)
-	}
-}
-
-// posString returns a string representation of n's position
-// in the form filename:line:col: .
-func posString(n ast.Node) string {
-	if n == nil {
-		return ""
-	}
-	return fset.Position(n.Pos()).String()
-}
-
-// nodeString returns a string representation of n.
-func nodeString(n ast.Node) string {
-	var buf bytes.Buffer
-	if err := format.Node(&buf, fset, n); err != nil {
-		log.Fatal(err) // should always succeed
-	}
-	return buf.String()
-}
-
-// typeString returns a string representation of n.
-func typeString(typ types.Type) string {
-	return filepath.ToSlash(typ.String())
-}
-
-// stringLit returns the unquoted string value and true if
-// n represents a string literal; otherwise it returns ""
-// and false.
-func stringLit(n ast.Node) (string, bool) {
-	if lit, ok := n.(*ast.BasicLit); ok && lit.Kind == token.STRING {
-		s, err := strconv.Unquote(lit.Value)
-		if err != nil {
-			log.Fatal(err) // should not happen with correct ASTs
-		}
-		return s, true
-	}
-	return "", false
-}
-
-// stringVal returns the (unquoted) string value and true if
-// tv is a string constant; otherwise it returns "" and false.
-func stringVal(tv types.TypeAndValue) (string, bool) {
-	if tv.IsValue() && tv.Value != nil && tv.Value.Kind() == constant.String {
-		return constant.StringVal(tv.Value), true
-	}
-	return "", false
-}
-
-// formatIter iterates through the string s in increasing
-// index order and calls f for each format specifier '%..v'.
-// The arguments for f describe the specifier's index range.
-// If a format specifier contains a  "*", f is called with
-// the index range for "*" alone, before being called for
-// the entire specifier. The result of f is the index of
-// the rune at which iteration continues.
-func formatIter(s string, f func(i, j int) int) {
-	i := 0     // index after current rune
-	var r rune // current rune
-
-	next := func() {
-		r1, w := utf8.DecodeRuneInString(s[i:])
-		if w == 0 {
-			r1 = -1 // signal end-of-string
-		}
-		r = r1
-		i += w
-	}
-
-	flags := func() {
-		for r == ' ' || r == '#' || r == '+' || r == '-' || r == '0' {
-			next()
-		}
-	}
-
-	index := func() {
-		if r == '[' {
-			log.Fatalf("cannot handle indexed arguments: %s", s)
-		}
-	}
-
-	digits := func() {
-		index()
-		if r == '*' {
-			i = f(i-1, i)
-			next()
-			return
-		}
-		for '0' <= r && r <= '9' {
-			next()
-		}
-	}
-
-	for next(); r >= 0; next() {
-		if r == '%' {
-			i0 := i
-			next()
-			flags()
-			digits()
-			if r == '.' {
-				next()
-				digits()
-			}
-			index()
-			// accept any letter (a-z, A-Z) as format verb;
-			// ignore anything else
-			if 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' {
-				i = f(i0-1, i)
-			}
-		}
-	}
-}
-
-// isFormat reports whether s contains format specifiers.
-func isFormat(s string) (yes bool) {
-	formatIter(s, func(i, j int) int {
-		yes = true
-		return len(s) // stop iteration
-	})
-	return
-}
-
-// oneFormat reports whether s is exactly one format specifier.
-func oneFormat(s string) (yes bool) {
-	formatIter(s, func(i, j int) int {
-		yes = i == 0 && j == len(s)
-		return j
-	})
-	return
-}
-
-// numFormatArgs returns the number of format specifiers in s.
-func numFormatArgs(s string) int {
-	count := 0
-	formatIter(s, func(i, j int) int {
-		count++
-		return j
-	})
-	return count
-}
-
-// formatReplace replaces the i'th format specifier s in the incoming
-// string in with the result of f(i, s) and returns the new string.
-func formatReplace(in string, f func(i int, s string) string) string {
-	var buf []byte
-	i0 := 0
-	index := 0
-	formatIter(in, func(i, j int) int {
-		if sub := in[i:j]; sub != "*" { // ignore calls for "*" width/length specifiers
-			buf = append(buf, in[i0:i]...)
-			buf = append(buf, f(index, sub)...)
-			i0 = j
-		}
-		index++
-		return j
-	})
-	return string(append(buf, in[i0:]...))
-}
-
-// blacklistedPackages is the set of packages which can
-// be ignored.
-var blacklistedPackages = map[string]bool{}
-
-// blacklistedFunctions is the set of functions which may have
-// format-like arguments but which don't do any formatting and
-// thus may be ignored.
-var blacklistedFunctions = map[string]bool{}
-
-func init() {
-	// verify that knownFormats entries are correctly formatted
-	for key, val := range knownFormats {
-		// key must be "typename format", and format starts with a '%'
-		// (formats containing '*' alone are not collected in this table)
-		i := strings.Index(key, "%")
-		if i < 0 || !oneFormat(key[i:]) {
-			log.Fatalf("incorrect knownFormats key: %q", key)
-		}
-		// val must be "format" or ""
-		if val != "" && !oneFormat(val) {
-			log.Fatalf("incorrect knownFormats value: %q (key = %q)", val, key)
-		}
-	}
-}
-
-// knownFormats entries are of the form "typename format" -> "newformat".
-// An absent entry means that the format is not recognized as valid.
-// An empty new format means that the format should remain unchanged.
-// To print out a new table, run: go test -run Formats -v.
-var knownFormats = map[string]string{
-	"*bytes.Buffer %s":                                "",
-	"*cmd/compile/internal/gc.Field %p":               "",
-	"*cmd/compile/internal/gc.Field %v":               "",
-	"*cmd/compile/internal/gc.Mpflt %v":               "",
-	"*cmd/compile/internal/gc.Mpint %v":               "",
-	"*cmd/compile/internal/gc.Node %#v":               "",
-	"*cmd/compile/internal/gc.Node %+S":               "",
-	"*cmd/compile/internal/gc.Node %+v":               "",
-	"*cmd/compile/internal/gc.Node %0j":               "",
-	"*cmd/compile/internal/gc.Node %L":                "",
-	"*cmd/compile/internal/gc.Node %S":                "",
-	"*cmd/compile/internal/gc.Node %j":                "",
-	"*cmd/compile/internal/gc.Node %p":                "",
-	"*cmd/compile/internal/gc.Node %v":                "",
-	"*cmd/compile/internal/gc.Sym %+v":                "",
-	"*cmd/compile/internal/gc.Sym %-v":                "",
-	"*cmd/compile/internal/gc.Sym %0S":                "",
-	"*cmd/compile/internal/gc.Sym %S":                 "",
-	"*cmd/compile/internal/gc.Sym %p":                 "",
-	"*cmd/compile/internal/gc.Sym %v":                 "",
-	"*cmd/compile/internal/gc.Type %#v":               "",
-	"*cmd/compile/internal/gc.Type %+v":               "",
-	"*cmd/compile/internal/gc.Type %-S":               "",
-	"*cmd/compile/internal/gc.Type %0S":               "",
-	"*cmd/compile/internal/gc.Type %L":                "",
-	"*cmd/compile/internal/gc.Type %S":                "",
-	"*cmd/compile/internal/gc.Type %p":                "",
-	"*cmd/compile/internal/gc.Type %v":                "",
-	"*cmd/compile/internal/ssa.Block %s":              "",
-	"*cmd/compile/internal/ssa.Block %v":              "",
-	"*cmd/compile/internal/ssa.Func %s":               "",
-	"*cmd/compile/internal/ssa.SparseTreeNode %v":     "",
-	"*cmd/compile/internal/ssa.Value %s":              "",
-	"*cmd/compile/internal/ssa.Value %v":              "",
-	"*cmd/compile/internal/ssa.sparseTreeMapEntry %v": "",
-	"*cmd/internal/obj.Addr %v":                       "",
-	"*cmd/internal/obj.Prog %p":                       "",
-	"*cmd/internal/obj.Prog %s":                       "",
-	"*cmd/internal/obj.Prog %v":                       "",
-	"*math/big.Int %#x":                               "",
-	"[16]byte %x":                                     "",
-	"[]*cmd/compile/internal/gc.Node %v":              "",
-	"[]*cmd/compile/internal/gc.Sig %#v":              "",
-	"[]*cmd/compile/internal/ssa.Value %v":            "",
-	"[]byte %s":                                       "",
-	"[]byte %x":                                       "",
-	"[]cmd/compile/internal/ssa.Edge %v":              "",
-	"[]cmd/compile/internal/ssa.ID %v":                "",
-	"[]string %v":                                     "",
-	"bool %v":                                         "",
-	"byte %02x":                                       "",
-	"byte %08b":                                       "",
-	"byte %c":                                         "",
-	"cmd/compile/internal/arm.shift %d":               "",
-	"cmd/compile/internal/gc.Class %d":                "",
-	"cmd/compile/internal/gc.Ctype %d":                "",
-	"cmd/compile/internal/gc.Ctype %v":                "",
-	"cmd/compile/internal/gc.EType %d":                "",
-	"cmd/compile/internal/gc.EType %s":                "",
-	"cmd/compile/internal/gc.EType %v":                "",
-	"cmd/compile/internal/gc.Level %d":                "",
-	"cmd/compile/internal/gc.Level %v":                "",
-	"cmd/compile/internal/gc.Node %#v":                "",
-	"cmd/compile/internal/gc.Nodes %#v":               "",
-	"cmd/compile/internal/gc.Nodes %+v":               "",
-	"cmd/compile/internal/gc.Nodes %.v":               "",
-	"cmd/compile/internal/gc.Nodes %v":                "",
-	"cmd/compile/internal/gc.Op %#v":                  "",
-	"cmd/compile/internal/gc.Op %v":                   "",
-	"cmd/compile/internal/gc.Val %#v":                 "",
-	"cmd/compile/internal/gc.Val %T":                  "",
-	"cmd/compile/internal/gc.Val %v":                  "",
-	"cmd/compile/internal/gc.initKind %d":             "",
-	"cmd/compile/internal/ssa.BranchPrediction %d":    "",
-	"cmd/compile/internal/ssa.Edge %v":                "",
-	"cmd/compile/internal/ssa.GCNode %v":              "",
-	"cmd/compile/internal/ssa.ID %d":                  "",
-	"cmd/compile/internal/ssa.LocalSlot %v":           "",
-	"cmd/compile/internal/ssa.Location %v":            "",
-	"cmd/compile/internal/ssa.Op %s":                  "",
-	"cmd/compile/internal/ssa.Op %v":                  "",
-	"cmd/compile/internal/ssa.SizeAndAlign %s":        "",
-	"cmd/compile/internal/ssa.Type %s":                "",
-	"cmd/compile/internal/ssa.Type %v":                "",
-	"cmd/compile/internal/ssa.ValAndOff %s":           "",
-	"cmd/compile/internal/ssa.markKind %d":            "",
-	"cmd/compile/internal/ssa.rbrank %d":              "",
-	"cmd/compile/internal/ssa.regMask %d":             "",
-	"cmd/compile/internal/ssa.register %d":            "",
-	"cmd/compile/internal/syntax.Expr %#v":            "",
-	"cmd/compile/internal/syntax.Expr %s":             "",
-	"cmd/compile/internal/syntax.Node %T":             "",
-	"cmd/compile/internal/syntax.Operator %d":         "",
-	"cmd/compile/internal/syntax.Operator %s":         "",
-	"cmd/compile/internal/syntax.token %d":            "",
-	"cmd/compile/internal/syntax.token %q":            "",
-	"cmd/compile/internal/syntax.token %s":            "",
-	"cmd/internal/obj.As %v":                          "",
-	"error %v":                                        "",
-	"float64 %.2f":                                    "",
-	"float64 %.3f":                                    "",
-	"float64 %.6g":                                    "",
-	"float64 %g":                                      "",
-	"fmt.Stringer %T":                                 "",
-	"int %-12d":                                       "",
-	"int %-6d":                                        "",
-	"int %-8o":                                        "",
-	"int %5d":                                         "",
-	"int %6d":                                         "",
-	"int %c":                                          "",
-	"int %d":                                          "",
-	"int %v":                                          "",
-	"int %x":                                          "",
-	"int16 %d":                                        "",
-	"int16 %x":                                        "",
-	"int32 %d":                                        "",
-	"int32 %v":                                        "",
-	"int32 %x":                                        "",
-	"int64 %+d":                                       "",
-	"int64 %-10d":                                     "",
-	"int64 %X":                                        "",
-	"int64 %d":                                        "",
-	"int64 %v":                                        "",
-	"int64 %x":                                        "",
-	"int8 %d":                                         "",
-	"int8 %x":                                         "",
-	"interface{} %#v":                                 "",
-	"interface{} %T":                                  "",
-	"interface{} %q":                                  "",
-	"interface{} %s":                                  "",
-	"interface{} %v":                                  "",
-	"map[*cmd/compile/internal/gc.Node]*cmd/compile/internal/ssa.Value %v": "",
-	"reflect.Type %s":  "",
-	"rune %#U":         "",
-	"rune %c":          "",
-	"string %-16s":     "",
-	"string %.*s":      "",
-	"string %q":        "",
-	"string %s":        "",
-	"string %v":        "",
-	"time.Duration %d": "",
-	"time.Duration %v": "",
-	"uint %04x":        "",
-	"uint %d":          "",
-	"uint16 %d":        "",
-	"uint16 %v":        "",
-	"uint16 %x":        "",
-	"uint32 %08x":      "",
-	"uint32 %d":        "",
-	"uint32 %x":        "",
-	"uint64 %016x":     "",
-	"uint64 %08x":      "",
-	"uint64 %d":        "",
-	"uint64 %x":        "",
-	"uint8 %d":         "",
-	"uint8 %x":         "",
-	"uintptr %d":       "",
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/amd64/galign.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/amd64/galign.go
deleted file mode 100644
index 4eb3a41..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/amd64/galign.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/amd64/galign.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/amd64/galign.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package amd64
-
-import (
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/x86"
-)
-
-var leaptr = x86.ALEAQ
-
-func Init() {
-	gc.Thearch.LinkArch = &x86.Linkamd64
-	if obj.GOARCH == "amd64p32" {
-		gc.Thearch.LinkArch = &x86.Linkamd64p32
-		leaptr = x86.ALEAL
-	}
-	gc.Thearch.REGSP = x86.REGSP
-	gc.Thearch.MAXWIDTH = 1 << 50
-
-	gc.Thearch.Defframe = defframe
-	gc.Thearch.Proginfo = proginfo
-
-	gc.Thearch.SSAMarkMoves = ssaMarkMoves
-	gc.Thearch.SSAGenValue = ssaGenValue
-	gc.Thearch.SSAGenBlock = ssaGenBlock
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/amd64/ggen.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/amd64/ggen.go
deleted file mode 100644
index 9d15766..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/amd64/ggen.go
+++ /dev/null
@@ -1,181 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/amd64/ggen.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/amd64/ggen.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package amd64
-
-import (
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/x86"
-)
-
-// no floating point in note handlers on Plan 9
-var isPlan9 = obj.GOOS == "plan9"
-
-func defframe(ptxt *obj.Prog) {
-	// fill in argument size, stack size
-	ptxt.To.Type = obj.TYPE_TEXTSIZE
-
-	ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.ArgWidth(), int64(gc.Widthptr)))
-	frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
-	ptxt.To.Offset = int64(frame)
-
-	// insert code to zero ambiguously live variables
-	// so that the garbage collector only sees initialized values
-	// when it looks for pointers.
-	p := ptxt
-
-	hi := int64(0)
-	lo := hi
-	ax := uint32(0)
-	x0 := uint32(0)
-
-	// iterate through declarations - they are sorted in decreasing xoffset order.
-	for _, n := range gc.Curfn.Func.Dcl {
-		if !n.Name.Needzero {
-			continue
-		}
-		if n.Class != gc.PAUTO {
-			gc.Fatalf("needzero class %d", n.Class)
-		}
-		if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
-			gc.Fatalf("var %L has size %d offset %d", n, int(n.Type.Width), int(n.Xoffset))
-		}
-
-		if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) {
-			// merge with range we already have
-			lo = n.Xoffset
-
-			continue
-		}
-
-		// zero old range
-		p = zerorange(p, int64(frame), lo, hi, &ax, &x0)
-
-		// set new range
-		hi = n.Xoffset + n.Type.Width
-
-		lo = n.Xoffset
-	}
-
-	// zero final range
-	zerorange(p, int64(frame), lo, hi, &ax, &x0)
-}
-
-// DUFFZERO consists of repeated blocks of 4 MOVUPSs + ADD,
-// See runtime/mkduff.go.
-const (
-	dzBlocks    = 16 // number of MOV/ADD blocks
-	dzBlockLen  = 4  // number of clears per block
-	dzBlockSize = 19 // size of instructions in a single block
-	dzMovSize   = 4  // size of single MOV instruction w/ offset
-	dzAddSize   = 4  // size of single ADD instruction
-	dzClearStep = 16 // number of bytes cleared by each MOV instruction
-
-	dzClearLen = dzClearStep * dzBlockLen // bytes cleared by one block
-	dzSize     = dzBlocks * dzBlockSize
-)
-
-// dzOff returns the offset for a jump into DUFFZERO.
-// b is the number of bytes to zero.
-func dzOff(b int64) int64 {
-	off := int64(dzSize)
-	off -= b / dzClearLen * dzBlockSize
-	tailLen := b % dzClearLen
-	if tailLen >= dzClearStep {
-		off -= dzAddSize + dzMovSize*(tailLen/dzClearStep)
-	}
-	return off
-}
-
-// duffzeroDI returns the pre-adjustment to DI for a call to DUFFZERO.
-// b is the number of bytes to zero.
-func dzDI(b int64) int64 {
-	tailLen := b % dzClearLen
-	if tailLen < dzClearStep {
-		return 0
-	}
-	tailSteps := tailLen / dzClearStep
-	return -dzClearStep * (dzBlockLen - tailSteps)
-}
-
-func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32, x0 *uint32) *obj.Prog {
-	cnt := hi - lo
-	if cnt == 0 {
-		return p
-	}
-
-	if cnt%int64(gc.Widthreg) != 0 {
-		// should only happen with nacl
-		if cnt%int64(gc.Widthptr) != 0 {
-			gc.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
-		}
-		if *ax == 0 {
-			p = gc.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
-			*ax = 1
-		}
-		p = gc.Appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo)
-		lo += int64(gc.Widthptr)
-		cnt -= int64(gc.Widthptr)
-	}
-
-	if cnt == 8 {
-		if *ax == 0 {
-			p = gc.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
-			*ax = 1
-		}
-		p = gc.Appendpp(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo)
-	} else if !isPlan9 && cnt <= int64(8*gc.Widthreg) {
-		if *x0 == 0 {
-			p = gc.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
-			*x0 = 1
-		}
-
-		for i := int64(0); i < cnt/16; i++ {
-			p = gc.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo+i*16)
-		}
-
-		if cnt%16 != 0 {
-			p = gc.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo+cnt-int64(16))
-		}
-	} else if !gc.Nacl && !isPlan9 && (cnt <= int64(128*gc.Widthreg)) {
-		if *x0 == 0 {
-			p = gc.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
-			*x0 = 1
-		}
-		p = gc.Appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, frame+lo+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0)
-		p = gc.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt))
-		p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
-
-		if cnt%16 != 0 {
-			p = gc.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8))
-		}
-	} else {
-		if *ax == 0 {
-			p = gc.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
-			*ax = 1
-		}
-
-		p = gc.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0)
-		p = gc.Appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, frame+lo, obj.TYPE_REG, x86.REG_DI, 0)
-		p = gc.Appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
-		p = gc.Appendpp(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
-	}
-
-	return p
-}
-
-func ginsnop() {
-	// This is actually not the x86 NOP anymore,
-	// but at the point where it gets used, AX is dead
-	// so it's okay if we lose the high bits.
-	p := gc.Prog(x86.AXCHGL)
-	p.From.Type = obj.TYPE_REG
-	p.From.Reg = x86.REG_AX
-	p.To.Type = obj.TYPE_REG
-	p.To.Reg = x86.REG_AX
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/amd64/prog.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/amd64/prog.go
deleted file mode 100644
index ac9c734..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/amd64/prog.go
+++ /dev/null
@@ -1,290 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/amd64/prog.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/amd64/prog.go:1
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package amd64
-
-import (
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/x86"
-)
-
-const (
-	LeftRdwr  uint32 = gc.LeftRead | gc.LeftWrite
-	RightRdwr uint32 = gc.RightRead | gc.RightWrite
-)
-
-// This table gives the basic information about instruction
-// generated by the compiler and processed in the optimizer.
-// See opt.h for bit definitions.
-//
-// Instructions not generated need not be listed.
-// As an exception to that rule, we typically write down all the
-// size variants of an operation even if we just use a subset.
-var progtable = [x86.ALAST & obj.AMask]gc.ProgInfo{
-	obj.ATYPE:     {Flags: gc.Pseudo | gc.Skip},
-	obj.ATEXT:     {Flags: gc.Pseudo},
-	obj.AFUNCDATA: {Flags: gc.Pseudo},
-	obj.APCDATA:   {Flags: gc.Pseudo},
-	obj.AUNDEF:    {Flags: gc.Break},
-	obj.AUSEFIELD: {Flags: gc.OK},
-	obj.AVARDEF:   {Flags: gc.Pseudo | gc.RightWrite},
-	obj.AVARKILL:  {Flags: gc.Pseudo | gc.RightWrite},
-	obj.AVARLIVE:  {Flags: gc.Pseudo | gc.LeftRead},
-
-	// NOP is an internal no-op that also stands
-	// for USED and SET annotations, not the Intel opcode.
-	obj.ANOP:               {Flags: gc.LeftRead | gc.RightWrite},
-	x86.AADCL & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry},
-	x86.AADCQ & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry},
-	x86.AADCW & obj.AMask:  {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry},
-	x86.AADDB & obj.AMask:  {Flags: gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.AADDL & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.AADDW & obj.AMask:  {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.AADDQ & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.AADDSD & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | RightRdwr},
-	x86.AADDSS & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | RightRdwr},
-	x86.AANDB & obj.AMask:  {Flags: gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.AANDL & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.AANDQ & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.AANDW & obj.AMask:  {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry},
-
-	x86.ABSFL & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.SetCarry},
-	x86.ABSFQ & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.SetCarry},
-	x86.ABSFW & obj.AMask:   {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.SetCarry},
-	x86.ABSRL & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.SetCarry},
-	x86.ABSRQ & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.SetCarry},
-	x86.ABSRW & obj.AMask:   {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.SetCarry},
-	x86.ABSWAPL & obj.AMask: {Flags: gc.SizeL | RightRdwr},
-	x86.ABSWAPQ & obj.AMask: {Flags: gc.SizeQ | RightRdwr},
-
-	obj.ACALL & obj.AMask: {Flags: gc.RightAddr | gc.Call | gc.KillCarry},
-	x86.ACDQ & obj.AMask:  {Flags: gc.OK},
-	x86.ACQO & obj.AMask:  {Flags: gc.OK},
-	x86.ACWD & obj.AMask:  {Flags: gc.OK},
-	x86.ACLD & obj.AMask:  {Flags: gc.OK},
-	x86.ASTD & obj.AMask:  {Flags: gc.OK},
-
-	x86.ACMOVLEQ & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.UseCarry},
-	x86.ACMOVLNE & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.UseCarry},
-	x86.ACMOVQEQ & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | RightRdwr | gc.UseCarry},
-	x86.ACMOVQNE & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | RightRdwr | gc.UseCarry},
-	x86.ACMOVWEQ & obj.AMask: {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.UseCarry},
-	x86.ACMOVWNE & obj.AMask: {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.UseCarry},
-
-	x86.ACMPB & obj.AMask:      {Flags: gc.SizeB | gc.LeftRead | gc.RightRead | gc.SetCarry},
-	x86.ACMPL & obj.AMask:      {Flags: gc.SizeL | gc.LeftRead | gc.RightRead | gc.SetCarry},
-	x86.ACMPQ & obj.AMask:      {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead | gc.SetCarry},
-	x86.ACMPW & obj.AMask:      {Flags: gc.SizeW | gc.LeftRead | gc.RightRead | gc.SetCarry},
-	x86.ACMPXCHGL & obj.AMask:  {Flags: gc.SizeL | LeftRdwr | RightRdwr | gc.SetCarry},
-	x86.ACMPXCHGQ & obj.AMask:  {Flags: gc.SizeQ | LeftRdwr | RightRdwr | gc.SetCarry},
-	x86.ACOMISD & obj.AMask:    {Flags: gc.SizeD | gc.LeftRead | gc.RightRead | gc.SetCarry},
-	x86.ACOMISS & obj.AMask:    {Flags: gc.SizeF | gc.LeftRead | gc.RightRead | gc.SetCarry},
-	x86.ACVTSD2SL & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.ACVTSD2SQ & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.ACVTSD2SS & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.ACVTSL2SD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.ACVTSL2SS & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.ACVTSQ2SD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.ACVTSQ2SS & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.ACVTSS2SD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.ACVTSS2SL & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.ACVTSS2SQ & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.ACVTTSD2SL & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.ACVTTSD2SQ & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.ACVTTSS2SL & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.ACVTTSS2SQ & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.ADECB & obj.AMask:      {Flags: gc.SizeB | RightRdwr},
-	x86.ADECL & obj.AMask:      {Flags: gc.SizeL | RightRdwr},
-	x86.ADECQ & obj.AMask:      {Flags: gc.SizeQ | RightRdwr},
-	x86.ADECW & obj.AMask:      {Flags: gc.SizeW | RightRdwr},
-	x86.ADIVB & obj.AMask:      {Flags: gc.SizeB | gc.LeftRead | gc.SetCarry},
-	x86.ADIVL & obj.AMask:      {Flags: gc.SizeL | gc.LeftRead | gc.SetCarry},
-	x86.ADIVQ & obj.AMask:      {Flags: gc.SizeQ | gc.LeftRead | gc.SetCarry},
-	x86.ADIVW & obj.AMask:      {Flags: gc.SizeW | gc.LeftRead | gc.SetCarry},
-	x86.ADIVSD & obj.AMask:     {Flags: gc.SizeD | gc.LeftRead | RightRdwr},
-	x86.ADIVSS & obj.AMask:     {Flags: gc.SizeF | gc.LeftRead | RightRdwr},
-	x86.AIDIVB & obj.AMask:     {Flags: gc.SizeB | gc.LeftRead | gc.SetCarry},
-	x86.AIDIVL & obj.AMask:     {Flags: gc.SizeL | gc.LeftRead | gc.SetCarry},
-	x86.AIDIVQ & obj.AMask:     {Flags: gc.SizeQ | gc.LeftRead | gc.SetCarry},
-	x86.AIDIVW & obj.AMask:     {Flags: gc.SizeW | gc.LeftRead | gc.SetCarry},
-	x86.AIMULB & obj.AMask:     {Flags: gc.SizeB | gc.LeftRead | gc.SetCarry},
-	x86.AIMULL & obj.AMask:     {Flags: gc.SizeL | gc.LeftRead | gc.ImulAXDX | gc.SetCarry},
-	x86.AIMULQ & obj.AMask:     {Flags: gc.SizeQ | gc.LeftRead | gc.ImulAXDX | gc.SetCarry},
-	x86.AIMULW & obj.AMask:     {Flags: gc.SizeW | gc.LeftRead | gc.ImulAXDX | gc.SetCarry},
-	x86.AINCB & obj.AMask:      {Flags: gc.SizeB | RightRdwr},
-	x86.AINCL & obj.AMask:      {Flags: gc.SizeL | RightRdwr},
-	x86.AINCQ & obj.AMask:      {Flags: gc.SizeQ | RightRdwr},
-	x86.AINCW & obj.AMask:      {Flags: gc.SizeW | RightRdwr},
-	x86.AJCC & obj.AMask:       {Flags: gc.Cjmp | gc.UseCarry},
-	x86.AJCS & obj.AMask:       {Flags: gc.Cjmp | gc.UseCarry},
-	x86.AJEQ & obj.AMask:       {Flags: gc.Cjmp | gc.UseCarry},
-	x86.AJGE & obj.AMask:       {Flags: gc.Cjmp | gc.UseCarry},
-	x86.AJGT & obj.AMask:       {Flags: gc.Cjmp | gc.UseCarry},
-	x86.AJHI & obj.AMask:       {Flags: gc.Cjmp | gc.UseCarry},
-	x86.AJLE & obj.AMask:       {Flags: gc.Cjmp | gc.UseCarry},
-	x86.AJLS & obj.AMask:       {Flags: gc.Cjmp | gc.UseCarry},
-	x86.AJLT & obj.AMask:       {Flags: gc.Cjmp | gc.UseCarry},
-	x86.AJMI & obj.AMask:       {Flags: gc.Cjmp | gc.UseCarry},
-	x86.AJNE & obj.AMask:       {Flags: gc.Cjmp | gc.UseCarry},
-	x86.AJOC & obj.AMask:       {Flags: gc.Cjmp | gc.UseCarry},
-	x86.AJOS & obj.AMask:       {Flags: gc.Cjmp | gc.UseCarry},
-	x86.AJPC & obj.AMask:       {Flags: gc.Cjmp | gc.UseCarry},
-	x86.AJPL & obj.AMask:       {Flags: gc.Cjmp | gc.UseCarry},
-	x86.AJPS & obj.AMask:       {Flags: gc.Cjmp | gc.UseCarry},
-	obj.AJMP & obj.AMask:       {Flags: gc.Jump | gc.Break | gc.KillCarry},
-	x86.ALEAW & obj.AMask:      {Flags: gc.LeftAddr | gc.RightWrite},
-	x86.ALEAL & obj.AMask:      {Flags: gc.LeftAddr | gc.RightWrite},
-	x86.ALEAQ & obj.AMask:      {Flags: gc.LeftAddr | gc.RightWrite},
-	x86.ALOCK & obj.AMask:      {Flags: gc.OK},
-	x86.AMOVBLSX & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.AMOVBLZX & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.AMOVBQSX & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.AMOVBQZX & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.AMOVBWSX & obj.AMask:   {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.AMOVBWZX & obj.AMask:   {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.AMOVLQSX & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.AMOVLQZX & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.AMOVWLSX & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.AMOVWLZX & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.AMOVWQSX & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.AMOVWQZX & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.AMOVQL & obj.AMask:     {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.AMOVB & obj.AMask:      {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move},
-	x86.AMOVL & obj.AMask:      {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move},
-	x86.AMOVQ & obj.AMask:      {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
-	x86.AMOVW & obj.AMask:      {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move},
-	x86.AMOVUPS & obj.AMask:    {Flags: gc.LeftRead | gc.RightWrite | gc.Move},
-	x86.AMOVSB & obj.AMask:     {Flags: gc.OK},
-	x86.AMOVSL & obj.AMask:     {Flags: gc.OK},
-	x86.AMOVSQ & obj.AMask:     {Flags: gc.OK},
-	x86.AMOVSW & obj.AMask:     {Flags: gc.OK},
-	obj.ADUFFCOPY & obj.AMask:  {Flags: gc.OK},
-	x86.AMOVSD & obj.AMask:     {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move},
-	x86.AMOVSS & obj.AMask:     {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move},
-
-	// We use&obj.AMask MOVAPD as a faster synonym for MOVSD.
-	x86.AMOVAPD & obj.AMask:   {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move},
-	x86.AMULB & obj.AMask:     {Flags: gc.SizeB | gc.LeftRead | gc.SetCarry},
-	x86.AMULL & obj.AMask:     {Flags: gc.SizeL | gc.LeftRead | gc.SetCarry},
-	x86.AMULQ & obj.AMask:     {Flags: gc.SizeQ | gc.LeftRead | gc.SetCarry},
-	x86.AMULW & obj.AMask:     {Flags: gc.SizeW | gc.LeftRead | gc.SetCarry},
-	x86.AMULSD & obj.AMask:    {Flags: gc.SizeD | gc.LeftRead | RightRdwr},
-	x86.AMULSS & obj.AMask:    {Flags: gc.SizeF | gc.LeftRead | RightRdwr},
-	x86.ANEGB & obj.AMask:     {Flags: gc.SizeB | RightRdwr | gc.SetCarry},
-	x86.ANEGL & obj.AMask:     {Flags: gc.SizeL | RightRdwr | gc.SetCarry},
-	x86.ANEGQ & obj.AMask:     {Flags: gc.SizeQ | RightRdwr | gc.SetCarry},
-	x86.ANEGW & obj.AMask:     {Flags: gc.SizeW | RightRdwr | gc.SetCarry},
-	x86.ANOTB & obj.AMask:     {Flags: gc.SizeB | RightRdwr},
-	x86.ANOTL & obj.AMask:     {Flags: gc.SizeL | RightRdwr},
-	x86.ANOTQ & obj.AMask:     {Flags: gc.SizeQ | RightRdwr},
-	x86.ANOTW & obj.AMask:     {Flags: gc.SizeW | RightRdwr},
-	x86.AORB & obj.AMask:      {Flags: gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.AORL & obj.AMask:      {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.AORQ & obj.AMask:      {Flags: gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.AORW & obj.AMask:      {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.APOPQ & obj.AMask:     {Flags: gc.SizeQ | gc.RightWrite},
-	x86.APUSHQ & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead},
-	x86.APXOR & obj.AMask:     {Flags: gc.SizeD | gc.LeftRead | RightRdwr},
-	x86.ARCLB & obj.AMask:     {Flags: gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry},
-	x86.ARCLL & obj.AMask:     {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry},
-	x86.ARCLQ & obj.AMask:     {Flags: gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry},
-	x86.ARCLW & obj.AMask:     {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry},
-	x86.ARCRB & obj.AMask:     {Flags: gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry},
-	x86.ARCRL & obj.AMask:     {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry},
-	x86.ARCRQ & obj.AMask:     {Flags: gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry},
-	x86.ARCRW & obj.AMask:     {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry},
-	x86.AREP & obj.AMask:      {Flags: gc.OK},
-	x86.AREPN & obj.AMask:     {Flags: gc.OK},
-	obj.ARET & obj.AMask:      {Flags: gc.Break | gc.KillCarry},
-	x86.AROLB & obj.AMask:     {Flags: gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.AROLL & obj.AMask:     {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.AROLQ & obj.AMask:     {Flags: gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.AROLW & obj.AMask:     {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ARORB & obj.AMask:     {Flags: gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ARORL & obj.AMask:     {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ARORQ & obj.AMask:     {Flags: gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ARORW & obj.AMask:     {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ASALB & obj.AMask:     {Flags: gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ASALL & obj.AMask:     {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ASALQ & obj.AMask:     {Flags: gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ASALW & obj.AMask:     {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ASARB & obj.AMask:     {Flags: gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ASARL & obj.AMask:     {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ASARQ & obj.AMask:     {Flags: gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ASARW & obj.AMask:     {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ASBBB & obj.AMask:     {Flags: gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry},
-	x86.ASBBL & obj.AMask:     {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry},
-	x86.ASBBQ & obj.AMask:     {Flags: gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry},
-	x86.ASBBW & obj.AMask:     {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry},
-	x86.ASETCC & obj.AMask:    {Flags: gc.SizeB | gc.RightWrite | gc.UseCarry},
-	x86.ASETCS & obj.AMask:    {Flags: gc.SizeB | gc.RightWrite | gc.UseCarry},
-	x86.ASETEQ & obj.AMask:    {Flags: gc.SizeB | gc.RightWrite | gc.UseCarry},
-	x86.ASETGE & obj.AMask:    {Flags: gc.SizeB | gc.RightWrite | gc.UseCarry},
-	x86.ASETGT & obj.AMask:    {Flags: gc.SizeB | gc.RightWrite | gc.UseCarry},
-	x86.ASETHI & obj.AMask:    {Flags: gc.SizeB | gc.RightWrite | gc.UseCarry},
-	x86.ASETLE & obj.AMask:    {Flags: gc.SizeB | gc.RightWrite | gc.UseCarry},
-	x86.ASETLS & obj.AMask:    {Flags: gc.SizeB | gc.RightWrite | gc.UseCarry},
-	x86.ASETLT & obj.AMask:    {Flags: gc.SizeB | gc.RightWrite | gc.UseCarry},
-	x86.ASETMI & obj.AMask:    {Flags: gc.SizeB | gc.RightWrite | gc.UseCarry},
-	x86.ASETNE & obj.AMask:    {Flags: gc.SizeB | gc.RightWrite | gc.UseCarry},
-	x86.ASETOC & obj.AMask:    {Flags: gc.SizeB | gc.RightWrite | gc.UseCarry},
-	x86.ASETOS & obj.AMask:    {Flags: gc.SizeB | gc.RightWrite | gc.UseCarry},
-	x86.ASETPC & obj.AMask:    {Flags: gc.SizeB | gc.RightWrite | gc.UseCarry},
-	x86.ASETPL & obj.AMask:    {Flags: gc.SizeB | gc.RightWrite | gc.UseCarry},
-	x86.ASETPS & obj.AMask:    {Flags: gc.SizeB | gc.RightWrite | gc.UseCarry},
-	x86.ASHLB & obj.AMask:     {Flags: gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ASHLL & obj.AMask:     {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ASHLQ & obj.AMask:     {Flags: gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ASHLW & obj.AMask:     {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ASHRB & obj.AMask:     {Flags: gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ASHRL & obj.AMask:     {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ASHRQ & obj.AMask:     {Flags: gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ASHRW & obj.AMask:     {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ASQRTSD & obj.AMask:   {Flags: gc.SizeD | gc.LeftRead | RightRdwr},
-	x86.ASTOSB & obj.AMask:    {Flags: gc.OK},
-	x86.ASTOSL & obj.AMask:    {Flags: gc.OK},
-	x86.ASTOSQ & obj.AMask:    {Flags: gc.OK},
-	x86.ASTOSW & obj.AMask:    {Flags: gc.OK},
-	obj.ADUFFZERO & obj.AMask: {Flags: gc.OK},
-	x86.ASUBB & obj.AMask:     {Flags: gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.ASUBL & obj.AMask:     {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.ASUBQ & obj.AMask:     {Flags: gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.ASUBW & obj.AMask:     {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.ASUBSD & obj.AMask:    {Flags: gc.SizeD | gc.LeftRead | RightRdwr},
-	x86.ASUBSS & obj.AMask:    {Flags: gc.SizeF | gc.LeftRead | RightRdwr},
-	x86.ATESTB & obj.AMask:    {Flags: gc.SizeB | gc.LeftRead | gc.RightRead | gc.SetCarry},
-	x86.ATESTL & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.RightRead | gc.SetCarry},
-	x86.ATESTQ & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead | gc.SetCarry},
-	x86.ATESTW & obj.AMask:    {Flags: gc.SizeW | gc.LeftRead | gc.RightRead | gc.SetCarry},
-	x86.AUCOMISD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RightRead},
-	x86.AUCOMISS & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RightRead},
-	x86.AXADDL & obj.AMask:    {Flags: gc.SizeL | LeftRdwr | RightRdwr | gc.KillCarry},
-	x86.AXADDQ & obj.AMask:    {Flags: gc.SizeQ | LeftRdwr | RightRdwr | gc.KillCarry},
-	x86.AXCHGB & obj.AMask:    {Flags: gc.SizeB | LeftRdwr | RightRdwr},
-	x86.AXCHGL & obj.AMask:    {Flags: gc.SizeL | LeftRdwr | RightRdwr},
-	x86.AXCHGQ & obj.AMask:    {Flags: gc.SizeQ | LeftRdwr | RightRdwr},
-	x86.AXCHGW & obj.AMask:    {Flags: gc.SizeW | LeftRdwr | RightRdwr},
-	x86.AXORB & obj.AMask:     {Flags: gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.AXORL & obj.AMask:     {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.AXORQ & obj.AMask:     {Flags: gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.AXORW & obj.AMask:     {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.AXORPS & obj.AMask:    {Flags: gc.LeftRead | RightRdwr},
-}
-
-func proginfo(p *obj.Prog) gc.ProgInfo {
-	info := progtable[p.As&obj.AMask]
-	if info.Flags == 0 {
-		gc.Fatalf("unknown instruction %v", p)
-	}
-
-	if info.Flags&gc.ImulAXDX != 0 && p.To.Type != obj.TYPE_NONE {
-		info.Flags |= RightRdwr
-	}
-
-	return info
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/amd64/ssa.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/amd64/ssa.go
deleted file mode 100644
index ed09932..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/amd64/ssa.go
+++ /dev/null
@@ -1,1053 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/amd64/ssa.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/amd64/ssa.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package amd64
-
-import (
-	"fmt"
-	"math"
-
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/compile/internal/ssa"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/x86"
-)
-
-// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
-func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
-	flive := b.FlagsLiveAtEnd
-	if b.Control != nil && b.Control.Type.IsFlags() {
-		flive = true
-	}
-	for i := len(b.Values) - 1; i >= 0; i-- {
-		v := b.Values[i]
-		if flive && (v.Op == ssa.OpAMD64MOVLconst || v.Op == ssa.OpAMD64MOVQconst) {
-			// The "mark" is any non-nil Aux value.
-			v.Aux = v
-		}
-		if v.Type.IsFlags() {
-			flive = false
-		}
-		for _, a := range v.Args {
-			if a.Type.IsFlags() {
-				flive = true
-			}
-		}
-	}
-}
-
-// loadByType returns the load instruction of the given type.
-func loadByType(t ssa.Type) obj.As {
-	// Avoid partial register write
-	if !t.IsFloat() && t.Size() <= 2 {
-		if t.Size() == 1 {
-			return x86.AMOVBLZX
-		} else {
-			return x86.AMOVWLZX
-		}
-	}
-	// Otherwise, there's no difference between load and store opcodes.
-	return storeByType(t)
-}
-
-// storeByType returns the store instruction of the given type.
-func storeByType(t ssa.Type) obj.As {
-	width := t.Size()
-	if t.IsFloat() {
-		switch width {
-		case 4:
-			return x86.AMOVSS
-		case 8:
-			return x86.AMOVSD
-		}
-	} else {
-		switch width {
-		case 1:
-			return x86.AMOVB
-		case 2:
-			return x86.AMOVW
-		case 4:
-			return x86.AMOVL
-		case 8:
-			return x86.AMOVQ
-		}
-	}
-	panic("bad store type")
-}
-
-// moveByType returns the reg->reg move instruction of the given type.
-func moveByType(t ssa.Type) obj.As {
-	if t.IsFloat() {
-		// Moving the whole sse2 register is faster
-		// than moving just the correct low portion of it.
-		// There is no xmm->xmm move with 1 byte opcode,
-		// so use movups, which has 2 byte opcode.
-		return x86.AMOVUPS
-	} else {
-		switch t.Size() {
-		case 1:
-			// Avoids partial register write
-			return x86.AMOVL
-		case 2:
-			return x86.AMOVL
-		case 4:
-			return x86.AMOVL
-		case 8:
-			return x86.AMOVQ
-		case 16:
-			return x86.AMOVUPS // int128s are in SSE registers
-		default:
-			panic(fmt.Sprintf("bad int register width %d:%s", t.Size(), t))
-		}
-	}
-}
-
-// opregreg emits instructions for
-//     dest := dest(To) op src(From)
-// and also returns the created obj.Prog so it
-// may be further adjusted (offset, scale, etc).
-func opregreg(op obj.As, dest, src int16) *obj.Prog {
-	p := gc.Prog(op)
-	p.From.Type = obj.TYPE_REG
-	p.To.Type = obj.TYPE_REG
-	p.To.Reg = dest
-	p.From.Reg = src
-	return p
-}
-
-// DUFFZERO consists of repeated blocks of 4 MOVUPSs + ADD,
-// See runtime/mkduff.go.
-func duffStart(size int64) int64 {
-	x, _ := duff(size)
-	return x
-}
-func duffAdj(size int64) int64 {
-	_, x := duff(size)
-	return x
-}
-
-// duff returns the offset (from duffzero, in bytes) and pointer adjust (in bytes)
-// required to use the duffzero mechanism for a block of the given size.
-func duff(size int64) (int64, int64) {
-	if size < 32 || size > 1024 || size%dzClearStep != 0 {
-		panic("bad duffzero size")
-	}
-	steps := size / dzClearStep
-	blocks := steps / dzBlockLen
-	steps %= dzBlockLen
-	off := dzBlockSize * (dzBlocks - blocks)
-	var adj int64
-	if steps != 0 {
-		off -= dzAddSize
-		off -= dzMovSize * steps
-		adj -= dzClearStep * (dzBlockLen - steps)
-	}
-	return off, adj
-}
-
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
-	s.SetLineno(v.Line)
-	switch v.Op {
-	case ssa.OpAMD64ADDQ, ssa.OpAMD64ADDL:
-		r := v.Reg()
-		r1 := v.Args[0].Reg()
-		r2 := v.Args[1].Reg()
-		switch {
-		case r == r1:
-			p := gc.Prog(v.Op.Asm())
-			p.From.Type = obj.TYPE_REG
-			p.From.Reg = r2
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = r
-		case r == r2:
-			p := gc.Prog(v.Op.Asm())
-			p.From.Type = obj.TYPE_REG
-			p.From.Reg = r1
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = r
-		default:
-			var asm obj.As
-			if v.Op == ssa.OpAMD64ADDQ {
-				asm = x86.ALEAQ
-			} else {
-				asm = x86.ALEAL
-			}
-			p := gc.Prog(asm)
-			p.From.Type = obj.TYPE_MEM
-			p.From.Reg = r1
-			p.From.Scale = 1
-			p.From.Index = r2
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = r
-		}
-	// 2-address opcode arithmetic
-	case ssa.OpAMD64SUBQ, ssa.OpAMD64SUBL,
-		ssa.OpAMD64MULQ, ssa.OpAMD64MULL,
-		ssa.OpAMD64ANDQ, ssa.OpAMD64ANDL,
-		ssa.OpAMD64ORQ, ssa.OpAMD64ORL,
-		ssa.OpAMD64XORQ, ssa.OpAMD64XORL,
-		ssa.OpAMD64SHLQ, ssa.OpAMD64SHLL,
-		ssa.OpAMD64SHRQ, ssa.OpAMD64SHRL, ssa.OpAMD64SHRW, ssa.OpAMD64SHRB,
-		ssa.OpAMD64SARQ, ssa.OpAMD64SARL, ssa.OpAMD64SARW, ssa.OpAMD64SARB,
-		ssa.OpAMD64ADDSS, ssa.OpAMD64ADDSD, ssa.OpAMD64SUBSS, ssa.OpAMD64SUBSD,
-		ssa.OpAMD64MULSS, ssa.OpAMD64MULSD, ssa.OpAMD64DIVSS, ssa.OpAMD64DIVSD,
-		ssa.OpAMD64PXOR:
-		r := v.Reg()
-		if r != v.Args[0].Reg() {
-			v.Fatalf("input[0] and output not in same register %s", v.LongString())
-		}
-		opregreg(v.Op.Asm(), r, v.Args[1].Reg())
-
-	case ssa.OpAMD64DIVQU, ssa.OpAMD64DIVLU, ssa.OpAMD64DIVWU:
-		// Arg[0] (the dividend) is in AX.
-		// Arg[1] (the divisor) can be in any other register.
-		// Result[0] (the quotient) is in AX.
-		// Result[1] (the remainder) is in DX.
-		r := v.Args[1].Reg()
-
-		// Zero extend dividend.
-		c := gc.Prog(x86.AXORL)
-		c.From.Type = obj.TYPE_REG
-		c.From.Reg = x86.REG_DX
-		c.To.Type = obj.TYPE_REG
-		c.To.Reg = x86.REG_DX
-
-		// Issue divide.
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = r
-
-	case ssa.OpAMD64DIVQ, ssa.OpAMD64DIVL, ssa.OpAMD64DIVW:
-		// Arg[0] (the dividend) is in AX.
-		// Arg[1] (the divisor) can be in any other register.
-		// Result[0] (the quotient) is in AX.
-		// Result[1] (the remainder) is in DX.
-		r := v.Args[1].Reg()
-
-		// CPU faults upon signed overflow, which occurs when the most
-		// negative int is divided by -1. Handle divide by -1 as a special case.
-		var c *obj.Prog
-		switch v.Op {
-		case ssa.OpAMD64DIVQ:
-			c = gc.Prog(x86.ACMPQ)
-		case ssa.OpAMD64DIVL:
-			c = gc.Prog(x86.ACMPL)
-		case ssa.OpAMD64DIVW:
-			c = gc.Prog(x86.ACMPW)
-		}
-		c.From.Type = obj.TYPE_REG
-		c.From.Reg = r
-		c.To.Type = obj.TYPE_CONST
-		c.To.Offset = -1
-		j1 := gc.Prog(x86.AJEQ)
-		j1.To.Type = obj.TYPE_BRANCH
-
-		// Sign extend dividend.
-		switch v.Op {
-		case ssa.OpAMD64DIVQ:
-			gc.Prog(x86.ACQO)
-		case ssa.OpAMD64DIVL:
-			gc.Prog(x86.ACDQ)
-		case ssa.OpAMD64DIVW:
-			gc.Prog(x86.ACWD)
-		}
-
-		// Issue divide.
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = r
-
-		// Skip over -1 fixup code.
-		j2 := gc.Prog(obj.AJMP)
-		j2.To.Type = obj.TYPE_BRANCH
-
-		// Issue -1 fixup code.
-		// n / -1 = -n
-		n1 := gc.Prog(x86.ANEGQ)
-		n1.To.Type = obj.TYPE_REG
-		n1.To.Reg = x86.REG_AX
-
-		// n % -1 == 0
-		n2 := gc.Prog(x86.AXORL)
-		n2.From.Type = obj.TYPE_REG
-		n2.From.Reg = x86.REG_DX
-		n2.To.Type = obj.TYPE_REG
-		n2.To.Reg = x86.REG_DX
-
-		// TODO(khr): issue only the -1 fixup code we need.
-		// For instance, if only the quotient is used, no point in zeroing the remainder.
-
-		j1.To.Val = n1
-		j2.To.Val = s.Pc()
-
-	case ssa.OpAMD64HMULQ, ssa.OpAMD64HMULL, ssa.OpAMD64HMULW, ssa.OpAMD64HMULB,
-		ssa.OpAMD64HMULQU, ssa.OpAMD64HMULLU, ssa.OpAMD64HMULWU, ssa.OpAMD64HMULBU:
-		// the frontend rewrites constant division by 8/16/32 bit integers into
-		// HMUL by a constant
-		// SSA rewrites generate the 64 bit versions
-
-		// Arg[0] is already in AX as it's the only register we allow
-		// and DX is the only output we care about (the high bits)
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[1].Reg()
-
-		// IMULB puts the high portion in AH instead of DL,
-		// so move it to DL for consistency
-		if v.Type.Size() == 1 {
-			m := gc.Prog(x86.AMOVB)
-			m.From.Type = obj.TYPE_REG
-			m.From.Reg = x86.REG_AH
-			m.To.Type = obj.TYPE_REG
-			m.To.Reg = x86.REG_DX
-		}
-
-	case ssa.OpAMD64MULQU2:
-		// Arg[0] is already in AX as it's the only register we allow
-		// results hi in DX, lo in AX
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[1].Reg()
-
-	case ssa.OpAMD64DIVQU2:
-		// Arg[0], Arg[1] are already in Dx, AX, as they're the only registers we allow
-		// results q in AX, r in DX
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[2].Reg()
-
-	case ssa.OpAMD64AVGQU:
-		// compute (x+y)/2 unsigned.
-		// Do a 64-bit add, the overflow goes into the carry.
-		// Shift right once and pull the carry back into the 63rd bit.
-		r := v.Reg()
-		if r != v.Args[0].Reg() {
-			v.Fatalf("input[0] and output not in same register %s", v.LongString())
-		}
-		p := gc.Prog(x86.AADDQ)
-		p.From.Type = obj.TYPE_REG
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-		p.From.Reg = v.Args[1].Reg()
-		p = gc.Prog(x86.ARCRQ)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = 1
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-
-	case ssa.OpAMD64ADDQconst, ssa.OpAMD64ADDLconst:
-		r := v.Reg()
-		a := v.Args[0].Reg()
-		if r == a {
-			if v.AuxInt == 1 {
-				var asm obj.As
-				// Software optimization manual recommends add $1,reg.
-				// But inc/dec is 1 byte smaller. ICC always uses inc
-				// Clang/GCC choose depending on flags, but prefer add.
-				// Experiments show that inc/dec is both a little faster
-				// and make a binary a little smaller.
-				if v.Op == ssa.OpAMD64ADDQconst {
-					asm = x86.AINCQ
-				} else {
-					asm = x86.AINCL
-				}
-				p := gc.Prog(asm)
-				p.To.Type = obj.TYPE_REG
-				p.To.Reg = r
-				return
-			}
-			if v.AuxInt == -1 {
-				var asm obj.As
-				if v.Op == ssa.OpAMD64ADDQconst {
-					asm = x86.ADECQ
-				} else {
-					asm = x86.ADECL
-				}
-				p := gc.Prog(asm)
-				p.To.Type = obj.TYPE_REG
-				p.To.Reg = r
-				return
-			}
-			p := gc.Prog(v.Op.Asm())
-			p.From.Type = obj.TYPE_CONST
-			p.From.Offset = v.AuxInt
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = r
-			return
-		}
-		var asm obj.As
-		if v.Op == ssa.OpAMD64ADDQconst {
-			asm = x86.ALEAQ
-		} else {
-			asm = x86.ALEAL
-		}
-		p := gc.Prog(asm)
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = a
-		p.From.Offset = v.AuxInt
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-
-	case ssa.OpAMD64CMOVQEQ, ssa.OpAMD64CMOVLEQ:
-		r := v.Reg()
-		if r != v.Args[0].Reg() {
-			v.Fatalf("input[0] and output not in same register %s", v.LongString())
-		}
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[1].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-
-	case ssa.OpAMD64MULQconst, ssa.OpAMD64MULLconst:
-		r := v.Reg()
-		if r != v.Args[0].Reg() {
-			v.Fatalf("input[0] and output not in same register %s", v.LongString())
-		}
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = v.AuxInt
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-		// TODO: Teach doasm to compile the three-address multiply imul $c, r1, r2
-		// then we don't need to use resultInArg0 for these ops.
-		//p.From3 = new(obj.Addr)
-		//p.From3.Type = obj.TYPE_REG
-		//p.From3.Reg = v.Args[0].Reg()
-
-	case ssa.OpAMD64SUBQconst, ssa.OpAMD64SUBLconst,
-		ssa.OpAMD64ANDQconst, ssa.OpAMD64ANDLconst,
-		ssa.OpAMD64ORQconst, ssa.OpAMD64ORLconst,
-		ssa.OpAMD64XORQconst, ssa.OpAMD64XORLconst,
-		ssa.OpAMD64SHLQconst, ssa.OpAMD64SHLLconst,
-		ssa.OpAMD64SHRQconst, ssa.OpAMD64SHRLconst, ssa.OpAMD64SHRWconst, ssa.OpAMD64SHRBconst,
-		ssa.OpAMD64SARQconst, ssa.OpAMD64SARLconst, ssa.OpAMD64SARWconst, ssa.OpAMD64SARBconst,
-		ssa.OpAMD64ROLQconst, ssa.OpAMD64ROLLconst, ssa.OpAMD64ROLWconst, ssa.OpAMD64ROLBconst:
-		r := v.Reg()
-		if r != v.Args[0].Reg() {
-			v.Fatalf("input[0] and output not in same register %s", v.LongString())
-		}
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = v.AuxInt
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-	case ssa.OpAMD64SBBQcarrymask, ssa.OpAMD64SBBLcarrymask:
-		r := v.Reg()
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = r
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-	case ssa.OpAMD64LEAQ1, ssa.OpAMD64LEAQ2, ssa.OpAMD64LEAQ4, ssa.OpAMD64LEAQ8:
-		r := v.Args[0].Reg()
-		i := v.Args[1].Reg()
-		p := gc.Prog(x86.ALEAQ)
-		switch v.Op {
-		case ssa.OpAMD64LEAQ1:
-			p.From.Scale = 1
-			if i == x86.REG_SP {
-				r, i = i, r
-			}
-		case ssa.OpAMD64LEAQ2:
-			p.From.Scale = 2
-		case ssa.OpAMD64LEAQ4:
-			p.From.Scale = 4
-		case ssa.OpAMD64LEAQ8:
-			p.From.Scale = 8
-		}
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = r
-		p.From.Index = i
-		gc.AddAux(&p.From, v)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpAMD64LEAQ, ssa.OpAMD64LEAL:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpAMD64CMPQ, ssa.OpAMD64CMPL, ssa.OpAMD64CMPW, ssa.OpAMD64CMPB,
-		ssa.OpAMD64TESTQ, ssa.OpAMD64TESTL, ssa.OpAMD64TESTW, ssa.OpAMD64TESTB:
-		opregreg(v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
-	case ssa.OpAMD64UCOMISS, ssa.OpAMD64UCOMISD:
-		// Go assembler has swapped operands for UCOMISx relative to CMP,
-		// must account for that right here.
-		opregreg(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg())
-	case ssa.OpAMD64CMPQconst, ssa.OpAMD64CMPLconst, ssa.OpAMD64CMPWconst, ssa.OpAMD64CMPBconst:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_CONST
-		p.To.Offset = v.AuxInt
-	case ssa.OpAMD64TESTQconst, ssa.OpAMD64TESTLconst, ssa.OpAMD64TESTWconst, ssa.OpAMD64TESTBconst:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = v.AuxInt
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Args[0].Reg()
-	case ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst:
-		x := v.Reg()
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = v.AuxInt
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = x
-		// If flags are live at this instruction, suppress the
-		// MOV $0,AX -> XOR AX,AX optimization.
-		if v.Aux != nil {
-			p.Mark |= x86.PRESERVEFLAGS
-		}
-	case ssa.OpAMD64MOVSSconst, ssa.OpAMD64MOVSDconst:
-		x := v.Reg()
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_FCONST
-		p.From.Val = math.Float64frombits(uint64(v.AuxInt))
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = x
-	case ssa.OpAMD64MOVQload, ssa.OpAMD64MOVSSload, ssa.OpAMD64MOVSDload, ssa.OpAMD64MOVLload, ssa.OpAMD64MOVWload, ssa.OpAMD64MOVBload, ssa.OpAMD64MOVBQSXload, ssa.OpAMD64MOVWQSXload, ssa.OpAMD64MOVLQSXload, ssa.OpAMD64MOVOload:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
-		p.From.Scale = 8
-		p.From.Index = v.Args[1].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
-		p.From.Scale = 4
-		p.From.Index = v.Args[1].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpAMD64MOVWloadidx2:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
-		p.From.Scale = 2
-		p.From.Index = v.Args[1].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpAMD64MOVBloadidx1, ssa.OpAMD64MOVWloadidx1, ssa.OpAMD64MOVLloadidx1, ssa.OpAMD64MOVQloadidx1, ssa.OpAMD64MOVSSloadidx1, ssa.OpAMD64MOVSDloadidx1:
-		r := v.Args[0].Reg()
-		i := v.Args[1].Reg()
-		if i == x86.REG_SP {
-			r, i = i, r
-		}
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = r
-		p.From.Scale = 1
-		p.From.Index = i
-		gc.AddAux(&p.From, v)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore, ssa.OpAMD64MOVOstore:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[1].Reg()
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
-	case ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[2].Reg()
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		p.To.Scale = 8
-		p.To.Index = v.Args[1].Reg()
-		gc.AddAux(&p.To, v)
-	case ssa.OpAMD64MOVSSstoreidx4, ssa.OpAMD64MOVLstoreidx4:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[2].Reg()
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		p.To.Scale = 4
-		p.To.Index = v.Args[1].Reg()
-		gc.AddAux(&p.To, v)
-	case ssa.OpAMD64MOVWstoreidx2:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[2].Reg()
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		p.To.Scale = 2
-		p.To.Index = v.Args[1].Reg()
-		gc.AddAux(&p.To, v)
-	case ssa.OpAMD64MOVBstoreidx1, ssa.OpAMD64MOVWstoreidx1, ssa.OpAMD64MOVLstoreidx1, ssa.OpAMD64MOVQstoreidx1, ssa.OpAMD64MOVSSstoreidx1, ssa.OpAMD64MOVSDstoreidx1:
-		r := v.Args[0].Reg()
-		i := v.Args[1].Reg()
-		if i == x86.REG_SP {
-			r, i = i, r
-		}
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[2].Reg()
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = r
-		p.To.Scale = 1
-		p.To.Index = i
-		gc.AddAux(&p.To, v)
-	case ssa.OpAMD64MOVQstoreconst, ssa.OpAMD64MOVLstoreconst, ssa.OpAMD64MOVWstoreconst, ssa.OpAMD64MOVBstoreconst:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_CONST
-		sc := v.AuxValAndOff()
-		p.From.Offset = sc.Val()
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux2(&p.To, v, sc.Off())
-	case ssa.OpAMD64MOVQstoreconstidx1, ssa.OpAMD64MOVQstoreconstidx8, ssa.OpAMD64MOVLstoreconstidx1, ssa.OpAMD64MOVLstoreconstidx4, ssa.OpAMD64MOVWstoreconstidx1, ssa.OpAMD64MOVWstoreconstidx2, ssa.OpAMD64MOVBstoreconstidx1:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_CONST
-		sc := v.AuxValAndOff()
-		p.From.Offset = sc.Val()
-		r := v.Args[0].Reg()
-		i := v.Args[1].Reg()
-		switch v.Op {
-		case ssa.OpAMD64MOVBstoreconstidx1, ssa.OpAMD64MOVWstoreconstidx1, ssa.OpAMD64MOVLstoreconstidx1, ssa.OpAMD64MOVQstoreconstidx1:
-			p.To.Scale = 1
-			if i == x86.REG_SP {
-				r, i = i, r
-			}
-		case ssa.OpAMD64MOVWstoreconstidx2:
-			p.To.Scale = 2
-		case ssa.OpAMD64MOVLstoreconstidx4:
-			p.To.Scale = 4
-		case ssa.OpAMD64MOVQstoreconstidx8:
-			p.To.Scale = 8
-		}
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = r
-		p.To.Index = i
-		gc.AddAux2(&p.To, v, sc.Off())
-	case ssa.OpAMD64MOVLQSX, ssa.OpAMD64MOVWQSX, ssa.OpAMD64MOVBQSX, ssa.OpAMD64MOVLQZX, ssa.OpAMD64MOVWQZX, ssa.OpAMD64MOVBQZX,
-		ssa.OpAMD64CVTTSS2SL, ssa.OpAMD64CVTTSD2SL, ssa.OpAMD64CVTTSS2SQ, ssa.OpAMD64CVTTSD2SQ,
-		ssa.OpAMD64CVTSS2SD, ssa.OpAMD64CVTSD2SS:
-		opregreg(v.Op.Asm(), v.Reg(), v.Args[0].Reg())
-	case ssa.OpAMD64CVTSL2SD, ssa.OpAMD64CVTSQ2SD, ssa.OpAMD64CVTSQ2SS, ssa.OpAMD64CVTSL2SS:
-		r := v.Reg()
-		// Break false dependency on destination register.
-		opregreg(x86.AXORPS, r, r)
-		opregreg(v.Op.Asm(), r, v.Args[0].Reg())
-	case ssa.OpAMD64DUFFZERO:
-		off := duffStart(v.AuxInt)
-		adj := duffAdj(v.AuxInt)
-		var p *obj.Prog
-		if adj != 0 {
-			p = gc.Prog(x86.AADDQ)
-			p.From.Type = obj.TYPE_CONST
-			p.From.Offset = adj
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = x86.REG_DI
-		}
-		p = gc.Prog(obj.ADUFFZERO)
-		p.To.Type = obj.TYPE_ADDR
-		p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
-		p.To.Offset = off
-	case ssa.OpAMD64MOVOconst:
-		if v.AuxInt != 0 {
-			v.Fatalf("MOVOconst can only do constant=0")
-		}
-		r := v.Reg()
-		opregreg(x86.AXORPS, r, r)
-	case ssa.OpAMD64DUFFCOPY:
-		p := gc.Prog(obj.ADUFFCOPY)
-		p.To.Type = obj.TYPE_ADDR
-		p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
-		p.To.Offset = v.AuxInt
-
-	case ssa.OpCopy, ssa.OpAMD64MOVQconvert, ssa.OpAMD64MOVLconvert: // TODO: use MOVQreg for reg->reg copies instead of OpCopy?
-		if v.Type.IsMemory() {
-			return
-		}
-		x := v.Args[0].Reg()
-		y := v.Reg()
-		if x != y {
-			opregreg(moveByType(v.Type), y, x)
-		}
-	case ssa.OpLoadReg:
-		if v.Type.IsFlags() {
-			v.Fatalf("load flags not implemented: %v", v.LongString())
-			return
-		}
-		p := gc.Prog(loadByType(v.Type))
-		gc.AddrAuto(&p.From, v.Args[0])
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-
-	case ssa.OpStoreReg:
-		if v.Type.IsFlags() {
-			v.Fatalf("store flags not implemented: %v", v.LongString())
-			return
-		}
-		p := gc.Prog(storeByType(v.Type))
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddrAuto(&p.To, v)
-	case ssa.OpPhi:
-		gc.CheckLoweredPhi(v)
-	case ssa.OpInitMem:
-		// memory arg needs no code
-	case ssa.OpArg:
-		// input args need no code
-	case ssa.OpAMD64LoweredGetClosurePtr:
-		// Closure pointer is DX.
-		gc.CheckLoweredGetClosurePtr(v)
-	case ssa.OpAMD64LoweredGetG:
-		r := v.Reg()
-		// See the comments in cmd/internal/obj/x86/obj6.go
-		// near CanUse1InsnTLS for a detailed explanation of these instructions.
-		if x86.CanUse1InsnTLS(gc.Ctxt) {
-			// MOVQ (TLS), r
-			p := gc.Prog(x86.AMOVQ)
-			p.From.Type = obj.TYPE_MEM
-			p.From.Reg = x86.REG_TLS
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = r
-		} else {
-			// MOVQ TLS, r
-			// MOVQ (r)(TLS*1), r
-			p := gc.Prog(x86.AMOVQ)
-			p.From.Type = obj.TYPE_REG
-			p.From.Reg = x86.REG_TLS
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = r
-			q := gc.Prog(x86.AMOVQ)
-			q.From.Type = obj.TYPE_MEM
-			q.From.Reg = r
-			q.From.Index = x86.REG_TLS
-			q.From.Scale = 1
-			q.To.Type = obj.TYPE_REG
-			q.To.Reg = r
-		}
-	case ssa.OpAMD64CALLstatic:
-		if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym {
-			// Deferred calls will appear to be returning to
-			// the CALL deferreturn(SB) that we are about to emit.
-			// However, the stack trace code will show the line
-			// of the instruction byte before the return PC.
-			// To avoid that being an unrelated instruction,
-			// insert an actual hardware NOP that will have the right line number.
-			// This is different from obj.ANOP, which is a virtual no-op
-			// that doesn't make it into the instruction stream.
-			ginsnop()
-		}
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(v.Aux.(*gc.Sym))
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpAMD64CALLclosure:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Args[0].Reg()
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpAMD64CALLdefer:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(gc.Deferproc.Sym)
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpAMD64CALLgo:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(gc.Newproc.Sym)
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpAMD64CALLinter:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Args[0].Reg()
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL,
-		ssa.OpAMD64BSWAPQ, ssa.OpAMD64BSWAPL,
-		ssa.OpAMD64NOTQ, ssa.OpAMD64NOTL:
-		r := v.Reg()
-		if r != v.Args[0].Reg() {
-			v.Fatalf("input[0] and output not in same register %s", v.LongString())
-		}
-		p := gc.Prog(v.Op.Asm())
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-	case ssa.OpAMD64BSFQ, ssa.OpAMD64BSFL:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg0()
-	case ssa.OpAMD64SQRTSD:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpSP, ssa.OpSB:
-		// nothing to do
-	case ssa.OpSelect0, ssa.OpSelect1:
-		// nothing to do
-	case ssa.OpAMD64SETEQ, ssa.OpAMD64SETNE,
-		ssa.OpAMD64SETL, ssa.OpAMD64SETLE,
-		ssa.OpAMD64SETG, ssa.OpAMD64SETGE,
-		ssa.OpAMD64SETGF, ssa.OpAMD64SETGEF,
-		ssa.OpAMD64SETB, ssa.OpAMD64SETBE,
-		ssa.OpAMD64SETORD, ssa.OpAMD64SETNAN,
-		ssa.OpAMD64SETA, ssa.OpAMD64SETAE:
-		p := gc.Prog(v.Op.Asm())
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-
-	case ssa.OpAMD64SETNEF:
-		p := gc.Prog(v.Op.Asm())
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-		q := gc.Prog(x86.ASETPS)
-		q.To.Type = obj.TYPE_REG
-		q.To.Reg = x86.REG_AX
-		// ORL avoids partial register write and is smaller than ORQ, used by old compiler
-		opregreg(x86.AORL, v.Reg(), x86.REG_AX)
-
-	case ssa.OpAMD64SETEQF:
-		p := gc.Prog(v.Op.Asm())
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-		q := gc.Prog(x86.ASETPC)
-		q.To.Type = obj.TYPE_REG
-		q.To.Reg = x86.REG_AX
-		// ANDL avoids partial register write and is smaller than ANDQ, used by old compiler
-		opregreg(x86.AANDL, v.Reg(), x86.REG_AX)
-
-	case ssa.OpAMD64InvertFlags:
-		v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
-	case ssa.OpAMD64FlagEQ, ssa.OpAMD64FlagLT_ULT, ssa.OpAMD64FlagLT_UGT, ssa.OpAMD64FlagGT_ULT, ssa.OpAMD64FlagGT_UGT:
-		v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
-	case ssa.OpAMD64AddTupleFirst32, ssa.OpAMD64AddTupleFirst64:
-		v.Fatalf("AddTupleFirst* should never make it to codegen %v", v.LongString())
-	case ssa.OpAMD64REPSTOSQ:
-		gc.Prog(x86.AREP)
-		gc.Prog(x86.ASTOSQ)
-	case ssa.OpAMD64REPMOVSQ:
-		gc.Prog(x86.AREP)
-		gc.Prog(x86.AMOVSQ)
-	case ssa.OpVarDef:
-		gc.Gvardef(v.Aux.(*gc.Node))
-	case ssa.OpVarKill:
-		gc.Gvarkill(v.Aux.(*gc.Node))
-	case ssa.OpVarLive:
-		gc.Gvarlive(v.Aux.(*gc.Node))
-	case ssa.OpKeepAlive:
-		gc.KeepAlive(v)
-	case ssa.OpAMD64LoweredNilCheck:
-		// Issue a load which will fault if the input is nil.
-		// TODO: We currently use the 2-byte instruction TESTB AX, (reg).
-		// Should we use the 3-byte TESTB $0, (reg) instead?  It is larger
-		// but it doesn't have false dependency on AX.
-		// Or maybe allocate an output register and use MOVL (reg),reg2 ?
-		// That trades clobbering flags for clobbering a register.
-		p := gc.Prog(x86.ATESTB)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = x86.REG_AX
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
-		if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers
-			gc.Warnl(v.Line, "generated nil check")
-		}
-	case ssa.OpAMD64MOVLatomicload, ssa.OpAMD64MOVQatomicload:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg0()
-	case ssa.OpAMD64XCHGL, ssa.OpAMD64XCHGQ:
-		r := v.Reg0()
-		if r != v.Args[0].Reg() {
-			v.Fatalf("input[0] and output[0] not in same register %s", v.LongString())
-		}
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = r
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[1].Reg()
-		gc.AddAux(&p.To, v)
-	case ssa.OpAMD64XADDLlock, ssa.OpAMD64XADDQlock:
-		r := v.Reg0()
-		if r != v.Args[0].Reg() {
-			v.Fatalf("input[0] and output[0] not in same register %s", v.LongString())
-		}
-		gc.Prog(x86.ALOCK)
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = r
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[1].Reg()
-		gc.AddAux(&p.To, v)
-	case ssa.OpAMD64CMPXCHGLlock, ssa.OpAMD64CMPXCHGQlock:
-		if v.Args[1].Reg() != x86.REG_AX {
-			v.Fatalf("input[1] not in AX %s", v.LongString())
-		}
-		gc.Prog(x86.ALOCK)
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[2].Reg()
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
-		p = gc.Prog(x86.ASETEQ)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg0()
-	case ssa.OpAMD64ANDBlock, ssa.OpAMD64ORBlock:
-		gc.Prog(x86.ALOCK)
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[1].Reg()
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
-	default:
-		v.Fatalf("genValue not implemented: %s", v.LongString())
-	}
-}
-
-var blockJump = [...]struct {
-	asm, invasm obj.As
-}{
-	ssa.BlockAMD64EQ:  {x86.AJEQ, x86.AJNE},
-	ssa.BlockAMD64NE:  {x86.AJNE, x86.AJEQ},
-	ssa.BlockAMD64LT:  {x86.AJLT, x86.AJGE},
-	ssa.BlockAMD64GE:  {x86.AJGE, x86.AJLT},
-	ssa.BlockAMD64LE:  {x86.AJLE, x86.AJGT},
-	ssa.BlockAMD64GT:  {x86.AJGT, x86.AJLE},
-	ssa.BlockAMD64ULT: {x86.AJCS, x86.AJCC},
-	ssa.BlockAMD64UGE: {x86.AJCC, x86.AJCS},
-	ssa.BlockAMD64UGT: {x86.AJHI, x86.AJLS},
-	ssa.BlockAMD64ULE: {x86.AJLS, x86.AJHI},
-	ssa.BlockAMD64ORD: {x86.AJPC, x86.AJPS},
-	ssa.BlockAMD64NAN: {x86.AJPS, x86.AJPC},
-}
-
-var eqfJumps = [2][2]gc.FloatingEQNEJump{
-	{{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPS, Index: 1}}, // next == b.Succs[0]
-	{{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPC, Index: 0}}, // next == b.Succs[1]
-}
-var nefJumps = [2][2]gc.FloatingEQNEJump{
-	{{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPC, Index: 1}}, // next == b.Succs[0]
-	{{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPS, Index: 0}}, // next == b.Succs[1]
-}
-
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
-	s.SetLineno(b.Line)
-
-	switch b.Kind {
-	case ssa.BlockPlain:
-		if b.Succs[0].Block() != next {
-			p := gc.Prog(obj.AJMP)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-		}
-	case ssa.BlockDefer:
-		// defer returns in rax:
-		// 0 if we should continue executing
-		// 1 if we should jump to deferreturn call
-		p := gc.Prog(x86.ATESTL)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = x86.REG_AX
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = x86.REG_AX
-		p = gc.Prog(x86.AJNE)
-		p.To.Type = obj.TYPE_BRANCH
-		s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
-		if b.Succs[0].Block() != next {
-			p := gc.Prog(obj.AJMP)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-		}
-	case ssa.BlockExit:
-		gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
-	case ssa.BlockRet:
-		gc.Prog(obj.ARET)
-	case ssa.BlockRetJmp:
-		p := gc.Prog(obj.AJMP)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(b.Aux.(*gc.Sym))
-
-	case ssa.BlockAMD64EQF:
-		gc.SSAGenFPJump(s, b, next, &eqfJumps)
-
-	case ssa.BlockAMD64NEF:
-		gc.SSAGenFPJump(s, b, next, &nefJumps)
-
-	case ssa.BlockAMD64EQ, ssa.BlockAMD64NE,
-		ssa.BlockAMD64LT, ssa.BlockAMD64GE,
-		ssa.BlockAMD64LE, ssa.BlockAMD64GT,
-		ssa.BlockAMD64ULT, ssa.BlockAMD64UGT,
-		ssa.BlockAMD64ULE, ssa.BlockAMD64UGE:
-		jmp := blockJump[b.Kind]
-		likely := b.Likely
-		var p *obj.Prog
-		switch next {
-		case b.Succs[0].Block():
-			p = gc.Prog(jmp.invasm)
-			likely *= -1
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
-		case b.Succs[1].Block():
-			p = gc.Prog(jmp.asm)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-		default:
-			p = gc.Prog(jmp.asm)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-			q := gc.Prog(obj.AJMP)
-			q.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
-		}
-
-		// liblink reorders the instruction stream as it sees fit.
-		// Pass along what we know so liblink can make use of it.
-		// TODO: Once we've fully switched to SSA,
-		// make liblink leave our output alone.
-		switch likely {
-		case ssa.BranchUnlikely:
-			p.From.Type = obj.TYPE_CONST
-			p.From.Offset = 0
-		case ssa.BranchLikely:
-			p.From.Type = obj.TYPE_CONST
-			p.From.Offset = 1
-		}
-
-	default:
-		b.Fatalf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString())
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/arm/galign.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/arm/galign.go
deleted file mode 100644
index d42de1b..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/arm/galign.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/arm/galign.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/arm/galign.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package arm
-
-import (
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/compile/internal/ssa"
-	"bootstrap/cmd/internal/obj/arm"
-)
-
-func Init() {
-	gc.Thearch.LinkArch = &arm.Linkarm
-	gc.Thearch.REGSP = arm.REGSP
-	gc.Thearch.MAXWIDTH = (1 << 32) - 1
-
-	gc.Thearch.Defframe = defframe
-	gc.Thearch.Proginfo = proginfo
-
-	gc.Thearch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
-	gc.Thearch.SSAGenValue = ssaGenValue
-	gc.Thearch.SSAGenBlock = ssaGenBlock
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/arm/ggen.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/arm/ggen.go
deleted file mode 100644
index d5e2639..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/arm/ggen.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/arm/ggen.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/arm/ggen.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package arm
-
-import (
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/arm"
-)
-
-func defframe(ptxt *obj.Prog) {
-	// fill in argument size, stack size
-	ptxt.To.Type = obj.TYPE_TEXTSIZE
-
-	ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.ArgWidth(), int64(gc.Widthptr)))
-	frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
-	ptxt.To.Offset = int64(frame)
-
-	// insert code to contain ambiguously live variables
-	// so that garbage collector only sees initialized values
-	// when it looks for pointers.
-	p := ptxt
-
-	hi := int64(0)
-	lo := hi
-	r0 := uint32(0)
-	for _, n := range gc.Curfn.Func.Dcl {
-		if !n.Name.Needzero {
-			continue
-		}
-		if n.Class != gc.PAUTO {
-			gc.Fatalf("needzero class %d", n.Class)
-		}
-		if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
-			gc.Fatalf("var %L has size %d offset %d", n, int(n.Type.Width), int(n.Xoffset))
-		}
-		if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthptr) {
-			// merge with range we already have
-			lo = gc.Rnd(n.Xoffset, int64(gc.Widthptr))
-
-			continue
-		}
-
-		// zero old range
-		p = zerorange(p, int64(frame), lo, hi, &r0)
-
-		// set new range
-		hi = n.Xoffset + n.Type.Width
-
-		lo = n.Xoffset
-	}
-
-	// zero final range
-	zerorange(p, int64(frame), lo, hi, &r0)
-}
-
-func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, r0 *uint32) *obj.Prog {
-	cnt := hi - lo
-	if cnt == 0 {
-		return p
-	}
-	if *r0 == 0 {
-		p = gc.Appendpp(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
-		*r0 = 1
-	}
-
-	if cnt < int64(4*gc.Widthptr) {
-		for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
-			p = gc.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+frame+lo+i)
-		}
-	} else if !gc.Nacl && (cnt <= int64(128*gc.Widthptr)) {
-		p = gc.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+frame+lo, obj.TYPE_REG, arm.REG_R1, 0)
-		p.Reg = arm.REGSP
-		p = gc.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
-		gc.Naddr(&p.To, gc.Sysfunc("duffzero"))
-		p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
-	} else {
-		p = gc.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+frame+lo, obj.TYPE_REG, arm.REG_R1, 0)
-		p.Reg = arm.REGSP
-		p = gc.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm.REG_R2, 0)
-		p.Reg = arm.REG_R1
-		p = gc.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
-		p1 := p
-		p.Scond |= arm.C_PBIT
-		p = gc.Appendpp(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
-		p.Reg = arm.REG_R2
-		p = gc.Appendpp(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
-		gc.Patch(p, p1)
-	}
-
-	return p
-}
-
-func ginsnop() {
-	p := gc.Prog(arm.AAND)
-	p.From.Type = obj.TYPE_REG
-	p.From.Reg = arm.REG_R0
-	p.To.Type = obj.TYPE_REG
-	p.To.Reg = arm.REG_R0
-	p.Scond = arm.C_SCOND_EQ
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/arm/prog.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/arm/prog.go
deleted file mode 100644
index 1823a98..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/arm/prog.go
+++ /dev/null
@@ -1,162 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/arm/prog.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/arm/prog.go:1
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package arm
-
-import (
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/arm"
-)
-
-const (
-	RightRdwr = gc.RightRead | gc.RightWrite
-)
-
-// This table gives the basic information about instruction
-// generated by the compiler and processed in the optimizer.
-// See opt.h for bit definitions.
-//
-// Instructions not generated need not be listed.
-// As an exception to that rule, we typically write down all the
-// size variants of an operation even if we just use a subset.
-var progtable = [arm.ALAST & obj.AMask]gc.ProgInfo{
-	obj.ATYPE:     {Flags: gc.Pseudo | gc.Skip},
-	obj.ATEXT:     {Flags: gc.Pseudo},
-	obj.AFUNCDATA: {Flags: gc.Pseudo},
-	obj.APCDATA:   {Flags: gc.Pseudo},
-	obj.AUNDEF:    {Flags: gc.Break},
-	obj.AUSEFIELD: {Flags: gc.OK},
-	obj.AVARDEF:   {Flags: gc.Pseudo | gc.RightWrite},
-	obj.AVARKILL:  {Flags: gc.Pseudo | gc.RightWrite},
-	obj.AVARLIVE:  {Flags: gc.Pseudo | gc.LeftRead},
-
-	// NOP is an internal no-op that also stands
-	// for USED and SET annotations, not the Intel opcode.
-	obj.ANOP: {Flags: gc.LeftRead | gc.RightWrite},
-
-	// Integer.
-	arm.AADC & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm.AADD & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm.AAND & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm.ABIC & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm.ACMN & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.RightRead},
-	arm.ACMP & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.RightRead},
-	arm.ADIVU & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm.ADIV & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm.AEOR & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm.AMODU & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm.AMOD & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm.AMULALU & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | RightRdwr},
-	arm.AMULAL & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | RightRdwr},
-	arm.AMULA & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | RightRdwr},
-	arm.AMULU & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm.AMUL & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm.AMULL & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm.AMULLU & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm.AMVN & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite},
-	arm.AORR & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm.ARSB & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm.ARSC & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm.ASBC & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm.ASLL & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm.ASRA & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm.ASRL & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm.ASUB & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm.ACLZ & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite},
-	arm.ATEQ & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.RightRead},
-	arm.ATST & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.RightRead},
-
-	// Floating point.
-	arm.AADDD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | RightRdwr},
-	arm.AADDF & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | RightRdwr},
-	arm.ACMPD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RightRead},
-	arm.ACMPF & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RightRead},
-	arm.ADIVD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | RightRdwr},
-	arm.ADIVF & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | RightRdwr},
-	arm.AMULD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | RightRdwr},
-	arm.AMULF & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | RightRdwr},
-	arm.ASUBD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | RightRdwr},
-	arm.ASUBF & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | RightRdwr},
-	arm.ANEGD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | RightRdwr},
-	arm.ANEGF & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | RightRdwr},
-	arm.ASQRTD & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | RightRdwr},
-
-	// Conversions.
-	arm.AMOVWD & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
-	arm.AMOVWF & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
-	arm.AMOVDF & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
-	arm.AMOVDW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	arm.AMOVFD & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
-	arm.AMOVFW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-
-	// Moves.
-	arm.AMOVB & obj.AMask: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move},
-	arm.AMOVD & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move},
-	arm.AMOVF & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move},
-	arm.AMOVH & obj.AMask: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move},
-	arm.AMOVW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move},
-
-	// In addition, duffzero reads R0,R1 and writes R1.  This fact is
-	// encoded in peep.c
-	obj.ADUFFZERO: {Flags: gc.Call},
-
-	// In addition, duffcopy reads R1,R2 and writes R0,R1,R2.  This fact is
-	// encoded in peep.c
-	obj.ADUFFCOPY: {Flags: gc.Call},
-
-	// These should be split into the two different conversions instead
-	// of overloading the one.
-	arm.AMOVBS & obj.AMask: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Conv},
-	arm.AMOVBU & obj.AMask: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Conv},
-	arm.AMOVHS & obj.AMask: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Conv},
-	arm.AMOVHU & obj.AMask: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Conv},
-
-	// Jumps.
-	arm.AB & obj.AMask:   {Flags: gc.Jump | gc.Break},
-	arm.ABL & obj.AMask:  {Flags: gc.Call},
-	arm.ABEQ & obj.AMask: {Flags: gc.Cjmp},
-	arm.ABNE & obj.AMask: {Flags: gc.Cjmp},
-	arm.ABCS & obj.AMask: {Flags: gc.Cjmp},
-	arm.ABHS & obj.AMask: {Flags: gc.Cjmp},
-	arm.ABCC & obj.AMask: {Flags: gc.Cjmp},
-	arm.ABLO & obj.AMask: {Flags: gc.Cjmp},
-	arm.ABMI & obj.AMask: {Flags: gc.Cjmp},
-	arm.ABPL & obj.AMask: {Flags: gc.Cjmp},
-	arm.ABVS & obj.AMask: {Flags: gc.Cjmp},
-	arm.ABVC & obj.AMask: {Flags: gc.Cjmp},
-	arm.ABHI & obj.AMask: {Flags: gc.Cjmp},
-	arm.ABLS & obj.AMask: {Flags: gc.Cjmp},
-	arm.ABGE & obj.AMask: {Flags: gc.Cjmp},
-	arm.ABLT & obj.AMask: {Flags: gc.Cjmp},
-	arm.ABGT & obj.AMask: {Flags: gc.Cjmp},
-	arm.ABLE & obj.AMask: {Flags: gc.Cjmp},
-	obj.ARET:             {Flags: gc.Break},
-}
-
-func proginfo(p *obj.Prog) gc.ProgInfo {
-	info := progtable[p.As&obj.AMask]
-	if info.Flags == 0 {
-		gc.Fatalf("unknown instruction %v", p)
-	}
-
-	if p.From.Type == obj.TYPE_ADDR && p.From.Sym != nil && (info.Flags&gc.LeftRead != 0) {
-		info.Flags &^= gc.LeftRead
-		info.Flags |= gc.LeftAddr
-	}
-
-	if (info.Flags&gc.RegRead != 0) && p.Reg == 0 {
-		info.Flags &^= gc.RegRead
-		info.Flags |= gc.CanRegRead | gc.RightRead
-	}
-
-	if (p.Scond&arm.C_SCOND != arm.C_SCOND_NONE) && (info.Flags&gc.RightWrite != 0) {
-		info.Flags |= gc.RightRead
-	}
-
-	return info
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/arm/ssa.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/arm/ssa.go
deleted file mode 100644
index 5e1aa85..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/arm/ssa.go
+++ /dev/null
@@ -1,934 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/arm/ssa.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/arm/ssa.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package arm
-
-import (
-	"fmt"
-	"math"
-
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/compile/internal/ssa"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/arm"
-)
-
-// loadByType returns the load instruction of the given type.
-func loadByType(t ssa.Type) obj.As {
-	if t.IsFloat() {
-		switch t.Size() {
-		case 4:
-			return arm.AMOVF
-		case 8:
-			return arm.AMOVD
-		}
-	} else {
-		switch t.Size() {
-		case 1:
-			if t.IsSigned() {
-				return arm.AMOVB
-			} else {
-				return arm.AMOVBU
-			}
-		case 2:
-			if t.IsSigned() {
-				return arm.AMOVH
-			} else {
-				return arm.AMOVHU
-			}
-		case 4:
-			return arm.AMOVW
-		}
-	}
-	panic("bad load type")
-}
-
-// storeByType returns the store instruction of the given type.
-func storeByType(t ssa.Type) obj.As {
-	if t.IsFloat() {
-		switch t.Size() {
-		case 4:
-			return arm.AMOVF
-		case 8:
-			return arm.AMOVD
-		}
-	} else {
-		switch t.Size() {
-		case 1:
-			return arm.AMOVB
-		case 2:
-			return arm.AMOVH
-		case 4:
-			return arm.AMOVW
-		}
-	}
-	panic("bad store type")
-}
-
-// shift type is used as Offset in obj.TYPE_SHIFT operands to encode shifted register operands
-type shift int64
-
-// copied from ../../../internal/obj/util.go:/TYPE_SHIFT
-func (v shift) String() string {
-	op := "<<>>->@>"[((v>>5)&3)<<1:]
-	if v&(1<<4) != 0 {
-		// register shift
-		return fmt.Sprintf("R%d%c%cR%d", v&15, op[0], op[1], (v>>8)&15)
-	} else {
-		// constant shift
-		return fmt.Sprintf("R%d%c%c%d", v&15, op[0], op[1], (v>>7)&31)
-	}
-}
-
-// makeshift encodes a register shifted by a constant
-func makeshift(reg int16, typ int64, s int64) shift {
-	return shift(int64(reg&0xf) | typ | (s&31)<<7)
-}
-
-// genshift generates a Prog for r = r0 op (r1 shifted by s)
-func genshift(as obj.As, r0, r1, r int16, typ int64, s int64) *obj.Prog {
-	p := gc.Prog(as)
-	p.From.Type = obj.TYPE_SHIFT
-	p.From.Offset = int64(makeshift(r1, typ, s))
-	p.Reg = r0
-	if r != 0 {
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-	}
-	return p
-}
-
-// makeregshift encodes a register shifted by a register
-func makeregshift(r1 int16, typ int64, r2 int16) shift {
-	return shift(int64(r1&0xf) | typ | int64(r2&0xf)<<8 | 1<<4)
-}
-
-// genregshift generates a Prog for r = r0 op (r1 shifted by r2)
-func genregshift(as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
-	p := gc.Prog(as)
-	p.From.Type = obj.TYPE_SHIFT
-	p.From.Offset = int64(makeregshift(r1, typ, r2))
-	p.Reg = r0
-	if r != 0 {
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-	}
-	return p
-}
-
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
-	s.SetLineno(v.Line)
-	switch v.Op {
-	case ssa.OpInitMem:
-		// memory arg needs no code
-	case ssa.OpArg:
-		// input args need no code
-	case ssa.OpSP, ssa.OpSB, ssa.OpGetG:
-		// nothing to do
-	case ssa.OpCopy, ssa.OpARMMOVWconvert, ssa.OpARMMOVWreg:
-		if v.Type.IsMemory() {
-			return
-		}
-		x := v.Args[0].Reg()
-		y := v.Reg()
-		if x == y {
-			return
-		}
-		as := arm.AMOVW
-		if v.Type.IsFloat() {
-			switch v.Type.Size() {
-			case 4:
-				as = arm.AMOVF
-			case 8:
-				as = arm.AMOVD
-			default:
-				panic("bad float size")
-			}
-		}
-		p := gc.Prog(as)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = x
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = y
-	case ssa.OpARMMOVWnop:
-		if v.Reg() != v.Args[0].Reg() {
-			v.Fatalf("input[0] and output not in same register %s", v.LongString())
-		}
-		// nothing to do
-	case ssa.OpLoadReg:
-		if v.Type.IsFlags() {
-			v.Fatalf("load flags not implemented: %v", v.LongString())
-			return
-		}
-		p := gc.Prog(loadByType(v.Type))
-		gc.AddrAuto(&p.From, v.Args[0])
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpPhi:
-		gc.CheckLoweredPhi(v)
-	case ssa.OpStoreReg:
-		if v.Type.IsFlags() {
-			v.Fatalf("store flags not implemented: %v", v.LongString())
-			return
-		}
-		p := gc.Prog(storeByType(v.Type))
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddrAuto(&p.To, v)
-	case ssa.OpARMUDIVrtcall:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = obj.Linklookup(gc.Ctxt, "udiv", 0)
-	case ssa.OpARMADD,
-		ssa.OpARMADC,
-		ssa.OpARMSUB,
-		ssa.OpARMSBC,
-		ssa.OpARMRSB,
-		ssa.OpARMAND,
-		ssa.OpARMOR,
-		ssa.OpARMXOR,
-		ssa.OpARMBIC,
-		ssa.OpARMMUL,
-		ssa.OpARMADDF,
-		ssa.OpARMADDD,
-		ssa.OpARMSUBF,
-		ssa.OpARMSUBD,
-		ssa.OpARMMULF,
-		ssa.OpARMMULD,
-		ssa.OpARMDIVF,
-		ssa.OpARMDIVD:
-		r := v.Reg()
-		r1 := v.Args[0].Reg()
-		r2 := v.Args[1].Reg()
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = r2
-		p.Reg = r1
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-	case ssa.OpARMADDS,
-		ssa.OpARMSUBS:
-		r := v.Reg0()
-		r1 := v.Args[0].Reg()
-		r2 := v.Args[1].Reg()
-		p := gc.Prog(v.Op.Asm())
-		p.Scond = arm.C_SBIT
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = r2
-		p.Reg = r1
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-	case ssa.OpARMSLL,
-		ssa.OpARMSRL,
-		ssa.OpARMSRA:
-		r := v.Reg()
-		r1 := v.Args[0].Reg()
-		r2 := v.Args[1].Reg()
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = r2
-		p.Reg = r1
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-	case ssa.OpARMSRAcond:
-		// ARM shift instructions uses only the low-order byte of the shift amount
-		// generate conditional instructions to deal with large shifts
-		// flag is already set
-		// SRA.HS	$31, Rarg0, Rdst // shift 31 bits to get the sign bit
-		// SRA.LO	Rarg1, Rarg0, Rdst
-		r := v.Reg()
-		r1 := v.Args[0].Reg()
-		r2 := v.Args[1].Reg()
-		p := gc.Prog(arm.ASRA)
-		p.Scond = arm.C_SCOND_HS
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = 31
-		p.Reg = r1
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-		p = gc.Prog(arm.ASRA)
-		p.Scond = arm.C_SCOND_LO
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = r2
-		p.Reg = r1
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-	case ssa.OpARMADDconst,
-		ssa.OpARMADCconst,
-		ssa.OpARMSUBconst,
-		ssa.OpARMSBCconst,
-		ssa.OpARMRSBconst,
-		ssa.OpARMRSCconst,
-		ssa.OpARMANDconst,
-		ssa.OpARMORconst,
-		ssa.OpARMXORconst,
-		ssa.OpARMBICconst,
-		ssa.OpARMSLLconst,
-		ssa.OpARMSRLconst,
-		ssa.OpARMSRAconst:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = v.AuxInt
-		p.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpARMADDSconst,
-		ssa.OpARMSUBSconst,
-		ssa.OpARMRSBSconst:
-		p := gc.Prog(v.Op.Asm())
-		p.Scond = arm.C_SBIT
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = v.AuxInt
-		p.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg0()
-	case ssa.OpARMSRRconst:
-		genshift(arm.AMOVW, 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt)
-	case ssa.OpARMADDshiftLL,
-		ssa.OpARMADCshiftLL,
-		ssa.OpARMSUBshiftLL,
-		ssa.OpARMSBCshiftLL,
-		ssa.OpARMRSBshiftLL,
-		ssa.OpARMRSCshiftLL,
-		ssa.OpARMANDshiftLL,
-		ssa.OpARMORshiftLL,
-		ssa.OpARMXORshiftLL,
-		ssa.OpARMBICshiftLL:
-		genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
-	case ssa.OpARMADDSshiftLL,
-		ssa.OpARMSUBSshiftLL,
-		ssa.OpARMRSBSshiftLL:
-		p := genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LL, v.AuxInt)
-		p.Scond = arm.C_SBIT
-	case ssa.OpARMADDshiftRL,
-		ssa.OpARMADCshiftRL,
-		ssa.OpARMSUBshiftRL,
-		ssa.OpARMSBCshiftRL,
-		ssa.OpARMRSBshiftRL,
-		ssa.OpARMRSCshiftRL,
-		ssa.OpARMANDshiftRL,
-		ssa.OpARMORshiftRL,
-		ssa.OpARMXORshiftRL,
-		ssa.OpARMBICshiftRL:
-		genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
-	case ssa.OpARMADDSshiftRL,
-		ssa.OpARMSUBSshiftRL,
-		ssa.OpARMRSBSshiftRL:
-		p := genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LR, v.AuxInt)
-		p.Scond = arm.C_SBIT
-	case ssa.OpARMADDshiftRA,
-		ssa.OpARMADCshiftRA,
-		ssa.OpARMSUBshiftRA,
-		ssa.OpARMSBCshiftRA,
-		ssa.OpARMRSBshiftRA,
-		ssa.OpARMRSCshiftRA,
-		ssa.OpARMANDshiftRA,
-		ssa.OpARMORshiftRA,
-		ssa.OpARMXORshiftRA,
-		ssa.OpARMBICshiftRA:
-		genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
-	case ssa.OpARMADDSshiftRA,
-		ssa.OpARMSUBSshiftRA,
-		ssa.OpARMRSBSshiftRA:
-		p := genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_AR, v.AuxInt)
-		p.Scond = arm.C_SBIT
-	case ssa.OpARMXORshiftRR:
-		genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt)
-	case ssa.OpARMMVNshiftLL:
-		genshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
-	case ssa.OpARMMVNshiftRL:
-		genshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
-	case ssa.OpARMMVNshiftRA:
-		genshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
-	case ssa.OpARMMVNshiftLLreg:
-		genregshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL)
-	case ssa.OpARMMVNshiftRLreg:
-		genregshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR)
-	case ssa.OpARMMVNshiftRAreg:
-		genregshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR)
-	case ssa.OpARMADDshiftLLreg,
-		ssa.OpARMADCshiftLLreg,
-		ssa.OpARMSUBshiftLLreg,
-		ssa.OpARMSBCshiftLLreg,
-		ssa.OpARMRSBshiftLLreg,
-		ssa.OpARMRSCshiftLLreg,
-		ssa.OpARMANDshiftLLreg,
-		ssa.OpARMORshiftLLreg,
-		ssa.OpARMXORshiftLLreg,
-		ssa.OpARMBICshiftLLreg:
-		genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LL)
-	case ssa.OpARMADDSshiftLLreg,
-		ssa.OpARMSUBSshiftLLreg,
-		ssa.OpARMRSBSshiftLLreg:
-		p := genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LL)
-		p.Scond = arm.C_SBIT
-	case ssa.OpARMADDshiftRLreg,
-		ssa.OpARMADCshiftRLreg,
-		ssa.OpARMSUBshiftRLreg,
-		ssa.OpARMSBCshiftRLreg,
-		ssa.OpARMRSBshiftRLreg,
-		ssa.OpARMRSCshiftRLreg,
-		ssa.OpARMANDshiftRLreg,
-		ssa.OpARMORshiftRLreg,
-		ssa.OpARMXORshiftRLreg,
-		ssa.OpARMBICshiftRLreg:
-		genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LR)
-	case ssa.OpARMADDSshiftRLreg,
-		ssa.OpARMSUBSshiftRLreg,
-		ssa.OpARMRSBSshiftRLreg:
-		p := genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LR)
-		p.Scond = arm.C_SBIT
-	case ssa.OpARMADDshiftRAreg,
-		ssa.OpARMADCshiftRAreg,
-		ssa.OpARMSUBshiftRAreg,
-		ssa.OpARMSBCshiftRAreg,
-		ssa.OpARMRSBshiftRAreg,
-		ssa.OpARMRSCshiftRAreg,
-		ssa.OpARMANDshiftRAreg,
-		ssa.OpARMORshiftRAreg,
-		ssa.OpARMXORshiftRAreg,
-		ssa.OpARMBICshiftRAreg:
-		genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_AR)
-	case ssa.OpARMADDSshiftRAreg,
-		ssa.OpARMSUBSshiftRAreg,
-		ssa.OpARMRSBSshiftRAreg:
-		p := genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_AR)
-		p.Scond = arm.C_SBIT
-	case ssa.OpARMHMUL,
-		ssa.OpARMHMULU:
-		// 32-bit high multiplication
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		p.Reg = v.Args[1].Reg()
-		p.To.Type = obj.TYPE_REGREG
-		p.To.Reg = v.Reg()
-		p.To.Offset = arm.REGTMP // throw away low 32-bit into tmp register
-	case ssa.OpARMMULLU:
-		// 32-bit multiplication, results 64-bit, high 32-bit in out0, low 32-bit in out1
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		p.Reg = v.Args[1].Reg()
-		p.To.Type = obj.TYPE_REGREG
-		p.To.Reg = v.Reg0()           // high 32-bit
-		p.To.Offset = int64(v.Reg1()) // low 32-bit
-	case ssa.OpARMMULA:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		p.Reg = v.Args[1].Reg()
-		p.To.Type = obj.TYPE_REGREG2
-		p.To.Reg = v.Reg()                   // result
-		p.To.Offset = int64(v.Args[2].Reg()) // addend
-	case ssa.OpARMMOVWconst:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = v.AuxInt
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpARMMOVFconst,
-		ssa.OpARMMOVDconst:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_FCONST
-		p.From.Val = math.Float64frombits(uint64(v.AuxInt))
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpARMCMP,
-		ssa.OpARMCMN,
-		ssa.OpARMTST,
-		ssa.OpARMTEQ,
-		ssa.OpARMCMPF,
-		ssa.OpARMCMPD:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		// Special layout in ARM assembly
-		// Comparing to x86, the operands of ARM's CMP are reversed.
-		p.From.Reg = v.Args[1].Reg()
-		p.Reg = v.Args[0].Reg()
-	case ssa.OpARMCMPconst,
-		ssa.OpARMCMNconst,
-		ssa.OpARMTSTconst,
-		ssa.OpARMTEQconst:
-		// Special layout in ARM assembly
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = v.AuxInt
-		p.Reg = v.Args[0].Reg()
-	case ssa.OpARMCMPF0,
-		ssa.OpARMCMPD0:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-	case ssa.OpARMCMPshiftLL:
-		genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LL, v.AuxInt)
-	case ssa.OpARMCMPshiftRL:
-		genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LR, v.AuxInt)
-	case ssa.OpARMCMPshiftRA:
-		genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_AR, v.AuxInt)
-	case ssa.OpARMCMPshiftLLreg:
-		genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LL)
-	case ssa.OpARMCMPshiftRLreg:
-		genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LR)
-	case ssa.OpARMCMPshiftRAreg:
-		genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_AR)
-	case ssa.OpARMMOVWaddr:
-		p := gc.Prog(arm.AMOVW)
-		p.From.Type = obj.TYPE_ADDR
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-
-		var wantreg string
-		// MOVW $sym+off(base), R
-		// the assembler expands it as the following:
-		// - base is SP: add constant offset to SP (R13)
-		//               when constant is large, tmp register (R11) may be used
-		// - base is SB: load external address from constant pool (use relocation)
-		switch v.Aux.(type) {
-		default:
-			v.Fatalf("aux is of unknown type %T", v.Aux)
-		case *ssa.ExternSymbol:
-			wantreg = "SB"
-			gc.AddAux(&p.From, v)
-		case *ssa.ArgSymbol, *ssa.AutoSymbol:
-			wantreg = "SP"
-			gc.AddAux(&p.From, v)
-		case nil:
-			// No sym, just MOVW $off(SP), R
-			wantreg = "SP"
-			p.From.Reg = arm.REGSP
-			p.From.Offset = v.AuxInt
-		}
-		if reg := v.Args[0].RegName(); reg != wantreg {
-			v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
-		}
-
-	case ssa.OpARMMOVBload,
-		ssa.OpARMMOVBUload,
-		ssa.OpARMMOVHload,
-		ssa.OpARMMOVHUload,
-		ssa.OpARMMOVWload,
-		ssa.OpARMMOVFload,
-		ssa.OpARMMOVDload:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpARMMOVBstore,
-		ssa.OpARMMOVHstore,
-		ssa.OpARMMOVWstore,
-		ssa.OpARMMOVFstore,
-		ssa.OpARMMOVDstore:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[1].Reg()
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
-	case ssa.OpARMMOVWloadidx:
-		// this is just shift 0 bits
-		fallthrough
-	case ssa.OpARMMOVWloadshiftLL:
-		p := genshift(v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
-		p.From.Reg = v.Args[0].Reg()
-	case ssa.OpARMMOVWloadshiftRL:
-		p := genshift(v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
-		p.From.Reg = v.Args[0].Reg()
-	case ssa.OpARMMOVWloadshiftRA:
-		p := genshift(v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
-		p.From.Reg = v.Args[0].Reg()
-	case ssa.OpARMMOVWstoreidx:
-		// this is just shift 0 bits
-		fallthrough
-	case ssa.OpARMMOVWstoreshiftLL:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[2].Reg()
-		p.To.Type = obj.TYPE_SHIFT
-		p.To.Reg = v.Args[0].Reg()
-		p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_LL, v.AuxInt))
-	case ssa.OpARMMOVWstoreshiftRL:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[2].Reg()
-		p.To.Type = obj.TYPE_SHIFT
-		p.To.Reg = v.Args[0].Reg()
-		p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_LR, v.AuxInt))
-	case ssa.OpARMMOVWstoreshiftRA:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[2].Reg()
-		p.To.Type = obj.TYPE_SHIFT
-		p.To.Reg = v.Args[0].Reg()
-		p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_AR, v.AuxInt))
-	case ssa.OpARMMOVBreg,
-		ssa.OpARMMOVBUreg,
-		ssa.OpARMMOVHreg,
-		ssa.OpARMMOVHUreg:
-		a := v.Args[0]
-		for a.Op == ssa.OpCopy || a.Op == ssa.OpARMMOVWreg || a.Op == ssa.OpARMMOVWnop {
-			a = a.Args[0]
-		}
-		if a.Op == ssa.OpLoadReg {
-			t := a.Type
-			switch {
-			case v.Op == ssa.OpARMMOVBreg && t.Size() == 1 && t.IsSigned(),
-				v.Op == ssa.OpARMMOVBUreg && t.Size() == 1 && !t.IsSigned(),
-				v.Op == ssa.OpARMMOVHreg && t.Size() == 2 && t.IsSigned(),
-				v.Op == ssa.OpARMMOVHUreg && t.Size() == 2 && !t.IsSigned():
-				// arg is a proper-typed load, already zero/sign-extended, don't extend again
-				if v.Reg() == v.Args[0].Reg() {
-					return
-				}
-				p := gc.Prog(arm.AMOVW)
-				p.From.Type = obj.TYPE_REG
-				p.From.Reg = v.Args[0].Reg()
-				p.To.Type = obj.TYPE_REG
-				p.To.Reg = v.Reg()
-				return
-			default:
-			}
-		}
-		fallthrough
-	case ssa.OpARMMVN,
-		ssa.OpARMCLZ,
-		ssa.OpARMSQRTD,
-		ssa.OpARMNEGF,
-		ssa.OpARMNEGD,
-		ssa.OpARMMOVWF,
-		ssa.OpARMMOVWD,
-		ssa.OpARMMOVFW,
-		ssa.OpARMMOVDW,
-		ssa.OpARMMOVFD,
-		ssa.OpARMMOVDF:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpARMMOVWUF,
-		ssa.OpARMMOVWUD,
-		ssa.OpARMMOVFWU,
-		ssa.OpARMMOVDWU:
-		p := gc.Prog(v.Op.Asm())
-		p.Scond = arm.C_UBIT
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpARMCMOVWHSconst:
-		p := gc.Prog(arm.AMOVW)
-		p.Scond = arm.C_SCOND_HS
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = v.AuxInt
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpARMCMOVWLSconst:
-		p := gc.Prog(arm.AMOVW)
-		p.Scond = arm.C_SCOND_LS
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = v.AuxInt
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpARMCALLstatic:
-		if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym {
-			// Deferred calls will appear to be returning to
-			// the CALL deferreturn(SB) that we are about to emit.
-			// However, the stack trace code will show the line
-			// of the instruction byte before the return PC.
-			// To avoid that being an unrelated instruction,
-			// insert an actual hardware NOP that will have the right line number.
-			// This is different from obj.ANOP, which is a virtual no-op
-			// that doesn't make it into the instruction stream.
-			ginsnop()
-		}
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(v.Aux.(*gc.Sym))
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpARMCALLclosure:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Offset = 0
-		p.To.Reg = v.Args[0].Reg()
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpARMCALLdefer:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(gc.Deferproc.Sym)
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpARMCALLgo:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(gc.Newproc.Sym)
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpARMCALLinter:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Offset = 0
-		p.To.Reg = v.Args[0].Reg()
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpARMDUFFZERO:
-		p := gc.Prog(obj.ADUFFZERO)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
-		p.To.Offset = v.AuxInt
-	case ssa.OpARMDUFFCOPY:
-		p := gc.Prog(obj.ADUFFCOPY)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
-		p.To.Offset = v.AuxInt
-	case ssa.OpARMLoweredNilCheck:
-		// Issue a load which will fault if arg is nil.
-		p := gc.Prog(arm.AMOVB)
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = arm.REGTMP
-		if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers
-			gc.Warnl(v.Line, "generated nil check")
-		}
-	case ssa.OpARMLoweredZero:
-		// MOVW.P	Rarg2, 4(R1)
-		// CMP	Rarg1, R1
-		// BLE	-2(PC)
-		// arg1 is the address of the last element to zero
-		// arg2 is known to be zero
-		// auxint is alignment
-		var sz int64
-		var mov obj.As
-		switch {
-		case v.AuxInt%4 == 0:
-			sz = 4
-			mov = arm.AMOVW
-		case v.AuxInt%2 == 0:
-			sz = 2
-			mov = arm.AMOVH
-		default:
-			sz = 1
-			mov = arm.AMOVB
-		}
-		p := gc.Prog(mov)
-		p.Scond = arm.C_PBIT
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[2].Reg()
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = arm.REG_R1
-		p.To.Offset = sz
-		p2 := gc.Prog(arm.ACMP)
-		p2.From.Type = obj.TYPE_REG
-		p2.From.Reg = v.Args[1].Reg()
-		p2.Reg = arm.REG_R1
-		p3 := gc.Prog(arm.ABLE)
-		p3.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p3, p)
-	case ssa.OpARMLoweredMove:
-		// MOVW.P	4(R1), Rtmp
-		// MOVW.P	Rtmp, 4(R2)
-		// CMP	Rarg2, R1
-		// BLE	-3(PC)
-		// arg2 is the address of the last element of src
-		// auxint is alignment
-		var sz int64
-		var mov obj.As
-		switch {
-		case v.AuxInt%4 == 0:
-			sz = 4
-			mov = arm.AMOVW
-		case v.AuxInt%2 == 0:
-			sz = 2
-			mov = arm.AMOVH
-		default:
-			sz = 1
-			mov = arm.AMOVB
-		}
-		p := gc.Prog(mov)
-		p.Scond = arm.C_PBIT
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = arm.REG_R1
-		p.From.Offset = sz
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = arm.REGTMP
-		p2 := gc.Prog(mov)
-		p2.Scond = arm.C_PBIT
-		p2.From.Type = obj.TYPE_REG
-		p2.From.Reg = arm.REGTMP
-		p2.To.Type = obj.TYPE_MEM
-		p2.To.Reg = arm.REG_R2
-		p2.To.Offset = sz
-		p3 := gc.Prog(arm.ACMP)
-		p3.From.Type = obj.TYPE_REG
-		p3.From.Reg = v.Args[2].Reg()
-		p3.Reg = arm.REG_R1
-		p4 := gc.Prog(arm.ABLE)
-		p4.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p4, p)
-	case ssa.OpVarDef:
-		gc.Gvardef(v.Aux.(*gc.Node))
-	case ssa.OpVarKill:
-		gc.Gvarkill(v.Aux.(*gc.Node))
-	case ssa.OpVarLive:
-		gc.Gvarlive(v.Aux.(*gc.Node))
-	case ssa.OpKeepAlive:
-		gc.KeepAlive(v)
-	case ssa.OpARMEqual,
-		ssa.OpARMNotEqual,
-		ssa.OpARMLessThan,
-		ssa.OpARMLessEqual,
-		ssa.OpARMGreaterThan,
-		ssa.OpARMGreaterEqual,
-		ssa.OpARMLessThanU,
-		ssa.OpARMLessEqualU,
-		ssa.OpARMGreaterThanU,
-		ssa.OpARMGreaterEqualU:
-		// generate boolean values
-		// use conditional move
-		p := gc.Prog(arm.AMOVW)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = 0
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-		p = gc.Prog(arm.AMOVW)
-		p.Scond = condBits[v.Op]
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = 1
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpSelect0, ssa.OpSelect1:
-		// nothing to do
-	case ssa.OpARMLoweredGetClosurePtr:
-		// Closure pointer is R7 (arm.REGCTXT).
-		gc.CheckLoweredGetClosurePtr(v)
-	case ssa.OpARMFlagEQ,
-		ssa.OpARMFlagLT_ULT,
-		ssa.OpARMFlagLT_UGT,
-		ssa.OpARMFlagGT_ULT,
-		ssa.OpARMFlagGT_UGT:
-		v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
-	case ssa.OpARMInvertFlags:
-		v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
-	default:
-		v.Fatalf("genValue not implemented: %s", v.LongString())
-	}
-}
-
-var condBits = map[ssa.Op]uint8{
-	ssa.OpARMEqual:         arm.C_SCOND_EQ,
-	ssa.OpARMNotEqual:      arm.C_SCOND_NE,
-	ssa.OpARMLessThan:      arm.C_SCOND_LT,
-	ssa.OpARMLessThanU:     arm.C_SCOND_LO,
-	ssa.OpARMLessEqual:     arm.C_SCOND_LE,
-	ssa.OpARMLessEqualU:    arm.C_SCOND_LS,
-	ssa.OpARMGreaterThan:   arm.C_SCOND_GT,
-	ssa.OpARMGreaterThanU:  arm.C_SCOND_HI,
-	ssa.OpARMGreaterEqual:  arm.C_SCOND_GE,
-	ssa.OpARMGreaterEqualU: arm.C_SCOND_HS,
-}
-
-var blockJump = map[ssa.BlockKind]struct {
-	asm, invasm obj.As
-}{
-	ssa.BlockARMEQ:  {arm.ABEQ, arm.ABNE},
-	ssa.BlockARMNE:  {arm.ABNE, arm.ABEQ},
-	ssa.BlockARMLT:  {arm.ABLT, arm.ABGE},
-	ssa.BlockARMGE:  {arm.ABGE, arm.ABLT},
-	ssa.BlockARMLE:  {arm.ABLE, arm.ABGT},
-	ssa.BlockARMGT:  {arm.ABGT, arm.ABLE},
-	ssa.BlockARMULT: {arm.ABLO, arm.ABHS},
-	ssa.BlockARMUGE: {arm.ABHS, arm.ABLO},
-	ssa.BlockARMUGT: {arm.ABHI, arm.ABLS},
-	ssa.BlockARMULE: {arm.ABLS, arm.ABHI},
-}
-
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
-	s.SetLineno(b.Line)
-
-	switch b.Kind {
-	case ssa.BlockPlain:
-		if b.Succs[0].Block() != next {
-			p := gc.Prog(obj.AJMP)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-		}
-
-	case ssa.BlockDefer:
-		// defer returns in R0:
-		// 0 if we should continue executing
-		// 1 if we should jump to deferreturn call
-		p := gc.Prog(arm.ACMP)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = 0
-		p.Reg = arm.REG_R0
-		p = gc.Prog(arm.ABNE)
-		p.To.Type = obj.TYPE_BRANCH
-		s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
-		if b.Succs[0].Block() != next {
-			p := gc.Prog(obj.AJMP)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-		}
-
-	case ssa.BlockExit:
-		gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
-
-	case ssa.BlockRet:
-		gc.Prog(obj.ARET)
-
-	case ssa.BlockRetJmp:
-		p := gc.Prog(obj.ARET)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(b.Aux.(*gc.Sym))
-
-	case ssa.BlockARMEQ, ssa.BlockARMNE,
-		ssa.BlockARMLT, ssa.BlockARMGE,
-		ssa.BlockARMLE, ssa.BlockARMGT,
-		ssa.BlockARMULT, ssa.BlockARMUGT,
-		ssa.BlockARMULE, ssa.BlockARMUGE:
-		jmp := blockJump[b.Kind]
-		var p *obj.Prog
-		switch next {
-		case b.Succs[0].Block():
-			p = gc.Prog(jmp.invasm)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
-		case b.Succs[1].Block():
-			p = gc.Prog(jmp.asm)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-		default:
-			p = gc.Prog(jmp.asm)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-			q := gc.Prog(obj.AJMP)
-			q.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
-		}
-
-	default:
-		b.Fatalf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString())
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/arm64/galign.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/arm64/galign.go
deleted file mode 100644
index 546fefc..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/arm64/galign.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/arm64/galign.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/arm64/galign.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package arm64
-
-import (
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/compile/internal/ssa"
-	"bootstrap/cmd/internal/obj/arm64"
-)
-
-func Init() {
-	gc.Thearch.LinkArch = &arm64.Linkarm64
-	gc.Thearch.REGSP = arm64.REGSP
-	gc.Thearch.MAXWIDTH = 1 << 50
-
-	gc.Thearch.Defframe = defframe
-	gc.Thearch.Proginfo = proginfo
-
-	gc.Thearch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
-	gc.Thearch.SSAGenValue = ssaGenValue
-	gc.Thearch.SSAGenBlock = ssaGenBlock
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/arm64/ggen.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/arm64/ggen.go
deleted file mode 100644
index 62dc486..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/arm64/ggen.go
+++ /dev/null
@@ -1,112 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/arm64/ggen.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/arm64/ggen.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package arm64
-
-import (
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/arm64"
-)
-
-func defframe(ptxt *obj.Prog) {
-	// fill in argument size, stack size
-	ptxt.To.Type = obj.TYPE_TEXTSIZE
-
-	ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.ArgWidth(), int64(gc.Widthptr)))
-	frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
-
-	// arm64 requires that the frame size (not counting saved LR)
-	// be empty or be 8 mod 16. If not, pad it.
-	if frame != 0 && frame%16 != 8 {
-		frame += 8
-	}
-
-	ptxt.To.Offset = int64(frame)
-
-	// insert code to zero ambiguously live variables
-	// so that the garbage collector only sees initialized values
-	// when it looks for pointers.
-	p := ptxt
-
-	hi := int64(0)
-	lo := hi
-
-	// iterate through declarations - they are sorted in decreasing xoffset order.
-	for _, n := range gc.Curfn.Func.Dcl {
-		if !n.Name.Needzero {
-			continue
-		}
-		if n.Class != gc.PAUTO {
-			gc.Fatalf("needzero class %d", n.Class)
-		}
-		if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
-			gc.Fatalf("var %L has size %d offset %d", n, int(n.Type.Width), int(n.Xoffset))
-		}
-
-		if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) {
-			// merge with range we already have
-			lo = n.Xoffset
-
-			continue
-		}
-
-		// zero old range
-		p = zerorange(p, int64(frame), lo, hi)
-
-		// set new range
-		hi = n.Xoffset + n.Type.Width
-
-		lo = n.Xoffset
-	}
-
-	// zero final range
-	zerorange(p, int64(frame), lo, hi)
-}
-
-var darwin = obj.GOOS == "darwin"
-
-func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
-	cnt := hi - lo
-	if cnt == 0 {
-		return p
-	}
-	if cnt < int64(4*gc.Widthptr) {
-		for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
-			p = gc.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+frame+lo+i)
-		}
-	} else if cnt <= int64(128*gc.Widthptr) && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend
-		p = gc.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
-		p = gc.Appendpp(p, arm64.AADD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, arm64.REGRT1, 0)
-		p.Reg = arm64.REGRT1
-		p = gc.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
-		gc.Naddr(&p.To, gc.Sysfunc("duffzero"))
-		p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
-	} else {
-		p = gc.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, arm64.REGTMP, 0)
-		p = gc.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
-		p = gc.Appendpp(p, arm64.AADD, obj.TYPE_REG, arm64.REGTMP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
-		p.Reg = arm64.REGRT1
-		p = gc.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm64.REGTMP, 0)
-		p = gc.Appendpp(p, arm64.AADD, obj.TYPE_REG, arm64.REGTMP, 0, obj.TYPE_REG, arm64.REGRT2, 0)
-		p.Reg = arm64.REGRT1
-		p = gc.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(gc.Widthptr))
-		p.Scond = arm64.C_XPRE
-		p1 := p
-		p = gc.Appendpp(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0)
-		p.Reg = arm64.REGRT2
-		p = gc.Appendpp(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
-		gc.Patch(p, p1)
-	}
-
-	return p
-}
-
-func ginsnop() {
-	p := gc.Prog(arm64.AHINT)
-	p.From.Type = obj.TYPE_CONST
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/arm64/prog.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/arm64/prog.go
deleted file mode 100644
index af0ae01..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/arm64/prog.go
+++ /dev/null
@@ -1,187 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/arm64/prog.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/arm64/prog.go:1
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package arm64
-
-import (
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/arm64"
-)
-
-const (
-	LeftRdwr  uint32 = gc.LeftRead | gc.LeftWrite
-	RightRdwr uint32 = gc.RightRead | gc.RightWrite
-)
-
-// This table gives the basic information about instruction
-// generated by the compiler and processed in the optimizer.
-// See opt.h for bit definitions.
-//
-// Instructions not generated need not be listed.
-// As an exception to that rule, we typically write down all the
-// size variants of an operation even if we just use a subset.
-//
-// The table is formatted for 8-space tabs.
-var progtable = [arm64.ALAST & obj.AMask]gc.ProgInfo{
-	obj.ATYPE:     {Flags: gc.Pseudo | gc.Skip},
-	obj.ATEXT:     {Flags: gc.Pseudo},
-	obj.AFUNCDATA: {Flags: gc.Pseudo},
-	obj.APCDATA:   {Flags: gc.Pseudo},
-	obj.AUNDEF:    {Flags: gc.Break},
-	obj.AUSEFIELD: {Flags: gc.OK},
-	obj.AVARDEF:   {Flags: gc.Pseudo | gc.RightWrite},
-	obj.AVARKILL:  {Flags: gc.Pseudo | gc.RightWrite},
-	obj.AVARLIVE:  {Flags: gc.Pseudo | gc.LeftRead},
-
-	// NOP is an internal no-op that also stands
-	// for USED and SET annotations, not the Power opcode.
-	obj.ANOP:                {Flags: gc.LeftRead | gc.RightWrite},
-	arm64.AHINT & obj.AMask: {Flags: gc.OK},
-
-	// Integer
-	arm64.AADD & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.ASUB & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.ANEG & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, // why RegRead? revisit once the old backend gone
-	arm64.AAND & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.AORR & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.AEOR & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.ABIC & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.AMVN & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite},
-	arm64.AMUL & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.AMULW & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.ASMULL & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.AUMULL & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.ASMULH & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.AUMULH & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.ASDIV & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.AUDIV & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.ASDIVW & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.AUDIVW & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.AREM & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.AUREM & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.AREMW & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.AUREMW & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.ALSL & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.ALSR & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.AASR & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.ACMP & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead},
-	arm64.ACMPW & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead},
-	arm64.AADC & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite | gc.UseCarry},
-	arm64.AROR & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.ARORW & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.AADDS & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite | gc.SetCarry},
-	arm64.ACSET & obj.AMask:   {Flags: gc.SizeQ | gc.RightWrite},
-	arm64.ACSEL & obj.AMask:   {Flags: gc.SizeQ | gc.RegRead | gc.RightWrite},
-	arm64.AREV & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite},
-	arm64.AREVW & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite},
-	arm64.AREV16W & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite},
-	arm64.ARBIT & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite},
-	arm64.ARBITW & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite},
-	arm64.ACLZ & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite},
-	arm64.ACLZW & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite},
-
-	// Floating point.
-	arm64.AFADDD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.AFADDS & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.AFSUBD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.AFSUBS & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.AFNEGD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite},
-	arm64.AFNEGS & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite},
-	arm64.AFSQRTD & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite},
-	arm64.AFMULD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.AFMULS & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.AFDIVD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.AFDIVS & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	arm64.AFCMPD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RegRead},
-	arm64.AFCMPS & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RegRead},
-
-	// float -> integer
-	arm64.AFCVTZSD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
-	arm64.AFCVTZSS & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
-	arm64.AFCVTZSDW & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
-	arm64.AFCVTZSSW & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
-	arm64.AFCVTZUD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
-	arm64.AFCVTZUS & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
-	arm64.AFCVTZUDW & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
-	arm64.AFCVTZUSW & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
-
-	// float -> float
-	arm64.AFCVTSD & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
-	arm64.AFCVTDS & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
-
-	// integer -> float
-	arm64.ASCVTFD & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
-	arm64.ASCVTFS & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
-	arm64.ASCVTFWD & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	arm64.ASCVTFWS & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	arm64.AUCVTFD & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
-	arm64.AUCVTFS & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
-	arm64.AUCVTFWD & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	arm64.AUCVTFWS & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-
-	// Moves
-	arm64.AMOVB & obj.AMask:   {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	arm64.AMOVBU & obj.AMask:  {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	arm64.AMOVH & obj.AMask:   {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	arm64.AMOVHU & obj.AMask:  {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	arm64.AMOVW & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	arm64.AMOVWU & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	arm64.AMOVD & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
-	arm64.AFMOVS & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	arm64.AFMOVD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move},
-	arm64.ALDARW & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move},
-	arm64.ALDAR & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
-	arm64.ALDAXRB & obj.AMask: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move},
-	arm64.ALDAXRW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move},
-	arm64.ALDAXR & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
-	arm64.ASTLRW & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move},
-	arm64.ASTLR & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
-	arm64.ASTLXRB & obj.AMask: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move},
-	arm64.ASTLXRW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move},
-	arm64.ASTLXR & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
-
-	// Jumps
-	arm64.AB & obj.AMask:     {Flags: gc.Jump | gc.Break},
-	arm64.ABL & obj.AMask:    {Flags: gc.Call},
-	arm64.ABEQ & obj.AMask:   {Flags: gc.Cjmp},
-	arm64.ABNE & obj.AMask:   {Flags: gc.Cjmp},
-	arm64.ABGE & obj.AMask:   {Flags: gc.Cjmp},
-	arm64.ABLT & obj.AMask:   {Flags: gc.Cjmp},
-	arm64.ABGT & obj.AMask:   {Flags: gc.Cjmp},
-	arm64.ABLE & obj.AMask:   {Flags: gc.Cjmp},
-	arm64.ABLO & obj.AMask:   {Flags: gc.Cjmp},
-	arm64.ABLS & obj.AMask:   {Flags: gc.Cjmp},
-	arm64.ABHI & obj.AMask:   {Flags: gc.Cjmp},
-	arm64.ABHS & obj.AMask:   {Flags: gc.Cjmp},
-	arm64.ACBZ & obj.AMask:   {Flags: gc.Cjmp},
-	arm64.ACBNZ & obj.AMask:  {Flags: gc.Cjmp},
-	arm64.ACBZW & obj.AMask:  {Flags: gc.Cjmp},
-	arm64.ACBNZW & obj.AMask: {Flags: gc.Cjmp},
-	obj.ARET:                 {Flags: gc.Break},
-	obj.ADUFFZERO:            {Flags: gc.Call},
-	obj.ADUFFCOPY:            {Flags: gc.Call},
-}
-
-func proginfo(p *obj.Prog) gc.ProgInfo {
-	info := progtable[p.As&obj.AMask]
-	if info.Flags == 0 {
-		gc.Fatalf("proginfo: unknown instruction %v", p)
-	}
-
-	if (info.Flags&gc.RegRead != 0) && p.Reg == 0 {
-		info.Flags &^= gc.RegRead
-		info.Flags |= gc.RightRead /*CanRegRead |*/
-	}
-
-	if p.From.Type == obj.TYPE_ADDR && p.From.Sym != nil && (info.Flags&gc.LeftRead != 0) {
-		info.Flags &^= gc.LeftRead
-		info.Flags |= gc.LeftAddr
-	}
-
-	return info
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/arm64/ssa.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/arm64/ssa.go
deleted file mode 100644
index df344ec..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/arm64/ssa.go
+++ /dev/null
@@ -1,847 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/arm64/ssa.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/arm64/ssa.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package arm64
-
-import (
-	"math"
-
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/compile/internal/ssa"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/arm64"
-)
-
-// loadByType returns the load instruction of the given type.
-func loadByType(t ssa.Type) obj.As {
-	if t.IsFloat() {
-		switch t.Size() {
-		case 4:
-			return arm64.AFMOVS
-		case 8:
-			return arm64.AFMOVD
-		}
-	} else {
-		switch t.Size() {
-		case 1:
-			if t.IsSigned() {
-				return arm64.AMOVB
-			} else {
-				return arm64.AMOVBU
-			}
-		case 2:
-			if t.IsSigned() {
-				return arm64.AMOVH
-			} else {
-				return arm64.AMOVHU
-			}
-		case 4:
-			if t.IsSigned() {
-				return arm64.AMOVW
-			} else {
-				return arm64.AMOVWU
-			}
-		case 8:
-			return arm64.AMOVD
-		}
-	}
-	panic("bad load type")
-}
-
-// storeByType returns the store instruction of the given type.
-func storeByType(t ssa.Type) obj.As {
-	if t.IsFloat() {
-		switch t.Size() {
-		case 4:
-			return arm64.AFMOVS
-		case 8:
-			return arm64.AFMOVD
-		}
-	} else {
-		switch t.Size() {
-		case 1:
-			return arm64.AMOVB
-		case 2:
-			return arm64.AMOVH
-		case 4:
-			return arm64.AMOVW
-		case 8:
-			return arm64.AMOVD
-		}
-	}
-	panic("bad store type")
-}
-
-// makeshift encodes a register shifted by a constant, used as an Offset in Prog
-func makeshift(reg int16, typ int64, s int64) int64 {
-	return int64(reg&31)<<16 | typ | (s&63)<<10
-}
-
-// genshift generates a Prog for r = r0 op (r1 shifted by s)
-func genshift(as obj.As, r0, r1, r int16, typ int64, s int64) *obj.Prog {
-	p := gc.Prog(as)
-	p.From.Type = obj.TYPE_SHIFT
-	p.From.Offset = makeshift(r1, typ, s)
-	p.Reg = r0
-	if r != 0 {
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-	}
-	return p
-}
-
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
-	s.SetLineno(v.Line)
-	switch v.Op {
-	case ssa.OpInitMem:
-		// memory arg needs no code
-	case ssa.OpArg:
-		// input args need no code
-	case ssa.OpSP, ssa.OpSB, ssa.OpGetG:
-		// nothing to do
-	case ssa.OpCopy, ssa.OpARM64MOVDconvert, ssa.OpARM64MOVDreg:
-		if v.Type.IsMemory() {
-			return
-		}
-		x := v.Args[0].Reg()
-		y := v.Reg()
-		if x == y {
-			return
-		}
-		as := arm64.AMOVD
-		if v.Type.IsFloat() {
-			switch v.Type.Size() {
-			case 4:
-				as = arm64.AFMOVS
-			case 8:
-				as = arm64.AFMOVD
-			default:
-				panic("bad float size")
-			}
-		}
-		p := gc.Prog(as)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = x
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = y
-	case ssa.OpARM64MOVDnop:
-		if v.Reg() != v.Args[0].Reg() {
-			v.Fatalf("input[0] and output not in same register %s", v.LongString())
-		}
-		// nothing to do
-	case ssa.OpLoadReg:
-		if v.Type.IsFlags() {
-			v.Fatalf("load flags not implemented: %v", v.LongString())
-			return
-		}
-		p := gc.Prog(loadByType(v.Type))
-		gc.AddrAuto(&p.From, v.Args[0])
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpPhi:
-		gc.CheckLoweredPhi(v)
-	case ssa.OpStoreReg:
-		if v.Type.IsFlags() {
-			v.Fatalf("store flags not implemented: %v", v.LongString())
-			return
-		}
-		p := gc.Prog(storeByType(v.Type))
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddrAuto(&p.To, v)
-	case ssa.OpARM64ADD,
-		ssa.OpARM64SUB,
-		ssa.OpARM64AND,
-		ssa.OpARM64OR,
-		ssa.OpARM64XOR,
-		ssa.OpARM64BIC,
-		ssa.OpARM64MUL,
-		ssa.OpARM64MULW,
-		ssa.OpARM64MULH,
-		ssa.OpARM64UMULH,
-		ssa.OpARM64MULL,
-		ssa.OpARM64UMULL,
-		ssa.OpARM64DIV,
-		ssa.OpARM64UDIV,
-		ssa.OpARM64DIVW,
-		ssa.OpARM64UDIVW,
-		ssa.OpARM64MOD,
-		ssa.OpARM64UMOD,
-		ssa.OpARM64MODW,
-		ssa.OpARM64UMODW,
-		ssa.OpARM64SLL,
-		ssa.OpARM64SRL,
-		ssa.OpARM64SRA,
-		ssa.OpARM64FADDS,
-		ssa.OpARM64FADDD,
-		ssa.OpARM64FSUBS,
-		ssa.OpARM64FSUBD,
-		ssa.OpARM64FMULS,
-		ssa.OpARM64FMULD,
-		ssa.OpARM64FDIVS,
-		ssa.OpARM64FDIVD:
-		r := v.Reg()
-		r1 := v.Args[0].Reg()
-		r2 := v.Args[1].Reg()
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = r2
-		p.Reg = r1
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-	case ssa.OpARM64ADDconst,
-		ssa.OpARM64SUBconst,
-		ssa.OpARM64ANDconst,
-		ssa.OpARM64ORconst,
-		ssa.OpARM64XORconst,
-		ssa.OpARM64BICconst,
-		ssa.OpARM64SLLconst,
-		ssa.OpARM64SRLconst,
-		ssa.OpARM64SRAconst,
-		ssa.OpARM64RORconst,
-		ssa.OpARM64RORWconst:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = v.AuxInt
-		p.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpARM64ADDshiftLL,
-		ssa.OpARM64SUBshiftLL,
-		ssa.OpARM64ANDshiftLL,
-		ssa.OpARM64ORshiftLL,
-		ssa.OpARM64XORshiftLL,
-		ssa.OpARM64BICshiftLL:
-		genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_LL, v.AuxInt)
-	case ssa.OpARM64ADDshiftRL,
-		ssa.OpARM64SUBshiftRL,
-		ssa.OpARM64ANDshiftRL,
-		ssa.OpARM64ORshiftRL,
-		ssa.OpARM64XORshiftRL,
-		ssa.OpARM64BICshiftRL:
-		genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_LR, v.AuxInt)
-	case ssa.OpARM64ADDshiftRA,
-		ssa.OpARM64SUBshiftRA,
-		ssa.OpARM64ANDshiftRA,
-		ssa.OpARM64ORshiftRA,
-		ssa.OpARM64XORshiftRA,
-		ssa.OpARM64BICshiftRA:
-		genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_AR, v.AuxInt)
-	case ssa.OpARM64MOVDconst:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = v.AuxInt
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpARM64FMOVSconst,
-		ssa.OpARM64FMOVDconst:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_FCONST
-		p.From.Val = math.Float64frombits(uint64(v.AuxInt))
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpARM64CMP,
-		ssa.OpARM64CMPW,
-		ssa.OpARM64CMN,
-		ssa.OpARM64CMNW,
-		ssa.OpARM64FCMPS,
-		ssa.OpARM64FCMPD:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[1].Reg()
-		p.Reg = v.Args[0].Reg()
-	case ssa.OpARM64CMPconst,
-		ssa.OpARM64CMPWconst,
-		ssa.OpARM64CMNconst,
-		ssa.OpARM64CMNWconst:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = v.AuxInt
-		p.Reg = v.Args[0].Reg()
-	case ssa.OpARM64CMPshiftLL:
-		genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_LL, v.AuxInt)
-	case ssa.OpARM64CMPshiftRL:
-		genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_LR, v.AuxInt)
-	case ssa.OpARM64CMPshiftRA:
-		genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_AR, v.AuxInt)
-	case ssa.OpARM64MOVDaddr:
-		p := gc.Prog(arm64.AMOVD)
-		p.From.Type = obj.TYPE_ADDR
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-
-		var wantreg string
-		// MOVD $sym+off(base), R
-		// the assembler expands it as the following:
-		// - base is SP: add constant offset to SP (R13)
-		//               when constant is large, tmp register (R11) may be used
-		// - base is SB: load external address from constant pool (use relocation)
-		switch v.Aux.(type) {
-		default:
-			v.Fatalf("aux is of unknown type %T", v.Aux)
-		case *ssa.ExternSymbol:
-			wantreg = "SB"
-			gc.AddAux(&p.From, v)
-		case *ssa.ArgSymbol, *ssa.AutoSymbol:
-			wantreg = "SP"
-			gc.AddAux(&p.From, v)
-		case nil:
-			// No sym, just MOVD $off(SP), R
-			wantreg = "SP"
-			p.From.Reg = arm64.REGSP
-			p.From.Offset = v.AuxInt
-		}
-		if reg := v.Args[0].RegName(); reg != wantreg {
-			v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
-		}
-	case ssa.OpARM64MOVBload,
-		ssa.OpARM64MOVBUload,
-		ssa.OpARM64MOVHload,
-		ssa.OpARM64MOVHUload,
-		ssa.OpARM64MOVWload,
-		ssa.OpARM64MOVWUload,
-		ssa.OpARM64MOVDload,
-		ssa.OpARM64FMOVSload,
-		ssa.OpARM64FMOVDload:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpARM64LDAR,
-		ssa.OpARM64LDARW:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg0()
-	case ssa.OpARM64MOVBstore,
-		ssa.OpARM64MOVHstore,
-		ssa.OpARM64MOVWstore,
-		ssa.OpARM64MOVDstore,
-		ssa.OpARM64FMOVSstore,
-		ssa.OpARM64FMOVDstore,
-		ssa.OpARM64STLR,
-		ssa.OpARM64STLRW:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[1].Reg()
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
-	case ssa.OpARM64MOVBstorezero,
-		ssa.OpARM64MOVHstorezero,
-		ssa.OpARM64MOVWstorezero,
-		ssa.OpARM64MOVDstorezero:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = arm64.REGZERO
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
-	case ssa.OpARM64LoweredAtomicExchange64,
-		ssa.OpARM64LoweredAtomicExchange32:
-		// LDAXR	(Rarg0), Rout
-		// STLXR	Rarg1, (Rarg0), Rtmp
-		// CBNZ		Rtmp, -2(PC)
-		ld := arm64.ALDAXR
-		st := arm64.ASTLXR
-		if v.Op == ssa.OpARM64LoweredAtomicExchange32 {
-			ld = arm64.ALDAXRW
-			st = arm64.ASTLXRW
-		}
-		r0 := v.Args[0].Reg()
-		r1 := v.Args[1].Reg()
-		out := v.Reg0()
-		p := gc.Prog(ld)
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = r0
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = out
-		p1 := gc.Prog(st)
-		p1.From.Type = obj.TYPE_REG
-		p1.From.Reg = r1
-		p1.To.Type = obj.TYPE_MEM
-		p1.To.Reg = r0
-		p1.RegTo2 = arm64.REGTMP
-		p2 := gc.Prog(arm64.ACBNZ)
-		p2.From.Type = obj.TYPE_REG
-		p2.From.Reg = arm64.REGTMP
-		p2.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p2, p)
-	case ssa.OpARM64LoweredAtomicAdd64,
-		ssa.OpARM64LoweredAtomicAdd32:
-		// LDAXR	(Rarg0), Rout
-		// ADD		Rarg1, Rout
-		// STLXR	Rout, (Rarg0), Rtmp
-		// CBNZ		Rtmp, -3(PC)
-		ld := arm64.ALDAXR
-		st := arm64.ASTLXR
-		if v.Op == ssa.OpARM64LoweredAtomicAdd32 {
-			ld = arm64.ALDAXRW
-			st = arm64.ASTLXRW
-		}
-		r0 := v.Args[0].Reg()
-		r1 := v.Args[1].Reg()
-		out := v.Reg0()
-		p := gc.Prog(ld)
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = r0
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = out
-		p1 := gc.Prog(arm64.AADD)
-		p1.From.Type = obj.TYPE_REG
-		p1.From.Reg = r1
-		p1.To.Type = obj.TYPE_REG
-		p1.To.Reg = out
-		p2 := gc.Prog(st)
-		p2.From.Type = obj.TYPE_REG
-		p2.From.Reg = out
-		p2.To.Type = obj.TYPE_MEM
-		p2.To.Reg = r0
-		p2.RegTo2 = arm64.REGTMP
-		p3 := gc.Prog(arm64.ACBNZ)
-		p3.From.Type = obj.TYPE_REG
-		p3.From.Reg = arm64.REGTMP
-		p3.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p3, p)
-	case ssa.OpARM64LoweredAtomicCas64,
-		ssa.OpARM64LoweredAtomicCas32:
-		// LDAXR	(Rarg0), Rtmp
-		// CMP		Rarg1, Rtmp
-		// BNE		3(PC)
-		// STLXR	Rarg2, (Rarg0), Rtmp
-		// CBNZ		Rtmp, -4(PC)
-		// CSET		EQ, Rout
-		ld := arm64.ALDAXR
-		st := arm64.ASTLXR
-		cmp := arm64.ACMP
-		if v.Op == ssa.OpARM64LoweredAtomicCas32 {
-			ld = arm64.ALDAXRW
-			st = arm64.ASTLXRW
-			cmp = arm64.ACMPW
-		}
-		r0 := v.Args[0].Reg()
-		r1 := v.Args[1].Reg()
-		r2 := v.Args[2].Reg()
-		out := v.Reg0()
-		p := gc.Prog(ld)
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = r0
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = arm64.REGTMP
-		p1 := gc.Prog(cmp)
-		p1.From.Type = obj.TYPE_REG
-		p1.From.Reg = r1
-		p1.Reg = arm64.REGTMP
-		p2 := gc.Prog(arm64.ABNE)
-		p2.To.Type = obj.TYPE_BRANCH
-		p3 := gc.Prog(st)
-		p3.From.Type = obj.TYPE_REG
-		p3.From.Reg = r2
-		p3.To.Type = obj.TYPE_MEM
-		p3.To.Reg = r0
-		p3.RegTo2 = arm64.REGTMP
-		p4 := gc.Prog(arm64.ACBNZ)
-		p4.From.Type = obj.TYPE_REG
-		p4.From.Reg = arm64.REGTMP
-		p4.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p4, p)
-		p5 := gc.Prog(arm64.ACSET)
-		p5.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
-		p5.From.Reg = arm64.COND_EQ
-		p5.To.Type = obj.TYPE_REG
-		p5.To.Reg = out
-		gc.Patch(p2, p5)
-	case ssa.OpARM64LoweredAtomicAnd8,
-		ssa.OpARM64LoweredAtomicOr8:
-		// LDAXRB	(Rarg0), Rtmp
-		// AND/OR	Rarg1, Rtmp
-		// STLXRB	Rtmp, (Rarg0), Rtmp
-		// CBNZ		Rtmp, -3(PC)
-		r0 := v.Args[0].Reg()
-		r1 := v.Args[1].Reg()
-		p := gc.Prog(arm64.ALDAXRB)
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = r0
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = arm64.REGTMP
-		p1 := gc.Prog(v.Op.Asm())
-		p1.From.Type = obj.TYPE_REG
-		p1.From.Reg = r1
-		p1.To.Type = obj.TYPE_REG
-		p1.To.Reg = arm64.REGTMP
-		p2 := gc.Prog(arm64.ASTLXRB)
-		p2.From.Type = obj.TYPE_REG
-		p2.From.Reg = arm64.REGTMP
-		p2.To.Type = obj.TYPE_MEM
-		p2.To.Reg = r0
-		p2.RegTo2 = arm64.REGTMP
-		p3 := gc.Prog(arm64.ACBNZ)
-		p3.From.Type = obj.TYPE_REG
-		p3.From.Reg = arm64.REGTMP
-		p3.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p3, p)
-	case ssa.OpARM64MOVBreg,
-		ssa.OpARM64MOVBUreg,
-		ssa.OpARM64MOVHreg,
-		ssa.OpARM64MOVHUreg,
-		ssa.OpARM64MOVWreg,
-		ssa.OpARM64MOVWUreg:
-		a := v.Args[0]
-		for a.Op == ssa.OpCopy || a.Op == ssa.OpARM64MOVDreg {
-			a = a.Args[0]
-		}
-		if a.Op == ssa.OpLoadReg {
-			t := a.Type
-			switch {
-			case v.Op == ssa.OpARM64MOVBreg && t.Size() == 1 && t.IsSigned(),
-				v.Op == ssa.OpARM64MOVBUreg && t.Size() == 1 && !t.IsSigned(),
-				v.Op == ssa.OpARM64MOVHreg && t.Size() == 2 && t.IsSigned(),
-				v.Op == ssa.OpARM64MOVHUreg && t.Size() == 2 && !t.IsSigned(),
-				v.Op == ssa.OpARM64MOVWreg && t.Size() == 4 && t.IsSigned(),
-				v.Op == ssa.OpARM64MOVWUreg && t.Size() == 4 && !t.IsSigned():
-				// arg is a proper-typed load, already zero/sign-extended, don't extend again
-				if v.Reg() == v.Args[0].Reg() {
-					return
-				}
-				p := gc.Prog(arm64.AMOVD)
-				p.From.Type = obj.TYPE_REG
-				p.From.Reg = v.Args[0].Reg()
-				p.To.Type = obj.TYPE_REG
-				p.To.Reg = v.Reg()
-				return
-			default:
-			}
-		}
-		fallthrough
-	case ssa.OpARM64MVN,
-		ssa.OpARM64NEG,
-		ssa.OpARM64FNEGS,
-		ssa.OpARM64FNEGD,
-		ssa.OpARM64FSQRTD,
-		ssa.OpARM64FCVTZSSW,
-		ssa.OpARM64FCVTZSDW,
-		ssa.OpARM64FCVTZUSW,
-		ssa.OpARM64FCVTZUDW,
-		ssa.OpARM64FCVTZSS,
-		ssa.OpARM64FCVTZSD,
-		ssa.OpARM64FCVTZUS,
-		ssa.OpARM64FCVTZUD,
-		ssa.OpARM64SCVTFWS,
-		ssa.OpARM64SCVTFWD,
-		ssa.OpARM64SCVTFS,
-		ssa.OpARM64SCVTFD,
-		ssa.OpARM64UCVTFWS,
-		ssa.OpARM64UCVTFWD,
-		ssa.OpARM64UCVTFS,
-		ssa.OpARM64UCVTFD,
-		ssa.OpARM64FCVTSD,
-		ssa.OpARM64FCVTDS,
-		ssa.OpARM64REV,
-		ssa.OpARM64REVW,
-		ssa.OpARM64REV16W,
-		ssa.OpARM64RBIT,
-		ssa.OpARM64RBITW,
-		ssa.OpARM64CLZ,
-		ssa.OpARM64CLZW:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpARM64CSELULT,
-		ssa.OpARM64CSELULT0:
-		r1 := int16(arm64.REGZERO)
-		if v.Op == ssa.OpARM64CSELULT {
-			r1 = v.Args[1].Reg()
-		}
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
-		p.From.Reg = arm64.COND_LO
-		p.Reg = v.Args[0].Reg()
-		p.From3 = &obj.Addr{Type: obj.TYPE_REG, Reg: r1}
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpARM64DUFFZERO:
-		// runtime.duffzero expects start address - 8 in R16
-		p := gc.Prog(arm64.ASUB)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = 8
-		p.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = arm64.REG_R16
-		p = gc.Prog(obj.ADUFFZERO)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
-		p.To.Offset = v.AuxInt
-	case ssa.OpARM64LoweredZero:
-		// MOVD.P	ZR, 8(R16)
-		// CMP	Rarg1, R16
-		// BLE	-2(PC)
-		// arg1 is the address of the last element to zero
-		p := gc.Prog(arm64.AMOVD)
-		p.Scond = arm64.C_XPOST
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = arm64.REGZERO
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = arm64.REG_R16
-		p.To.Offset = 8
-		p2 := gc.Prog(arm64.ACMP)
-		p2.From.Type = obj.TYPE_REG
-		p2.From.Reg = v.Args[1].Reg()
-		p2.Reg = arm64.REG_R16
-		p3 := gc.Prog(arm64.ABLE)
-		p3.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p3, p)
-	case ssa.OpARM64DUFFCOPY:
-		p := gc.Prog(obj.ADUFFCOPY)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
-		p.To.Offset = v.AuxInt
-	case ssa.OpARM64LoweredMove:
-		// MOVD.P	8(R16), Rtmp
-		// MOVD.P	Rtmp, 8(R17)
-		// CMP	Rarg2, R16
-		// BLE	-3(PC)
-		// arg2 is the address of the last element of src
-		p := gc.Prog(arm64.AMOVD)
-		p.Scond = arm64.C_XPOST
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = arm64.REG_R16
-		p.From.Offset = 8
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = arm64.REGTMP
-		p2 := gc.Prog(arm64.AMOVD)
-		p2.Scond = arm64.C_XPOST
-		p2.From.Type = obj.TYPE_REG
-		p2.From.Reg = arm64.REGTMP
-		p2.To.Type = obj.TYPE_MEM
-		p2.To.Reg = arm64.REG_R17
-		p2.To.Offset = 8
-		p3 := gc.Prog(arm64.ACMP)
-		p3.From.Type = obj.TYPE_REG
-		p3.From.Reg = v.Args[2].Reg()
-		p3.Reg = arm64.REG_R16
-		p4 := gc.Prog(arm64.ABLE)
-		p4.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p4, p)
-	case ssa.OpARM64CALLstatic:
-		if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym {
-			// Deferred calls will appear to be returning to
-			// the CALL deferreturn(SB) that we are about to emit.
-			// However, the stack trace code will show the line
-			// of the instruction byte before the return PC.
-			// To avoid that being an unrelated instruction,
-			// insert an actual hardware NOP that will have the right line number.
-			// This is different from obj.ANOP, which is a virtual no-op
-			// that doesn't make it into the instruction stream.
-			ginsnop()
-		}
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(v.Aux.(*gc.Sym))
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpARM64CALLclosure:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Offset = 0
-		p.To.Reg = v.Args[0].Reg()
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpARM64CALLdefer:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(gc.Deferproc.Sym)
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpARM64CALLgo:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(gc.Newproc.Sym)
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpARM64CALLinter:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Offset = 0
-		p.To.Reg = v.Args[0].Reg()
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpARM64LoweredNilCheck:
-		// Issue a load which will fault if arg is nil.
-		p := gc.Prog(arm64.AMOVB)
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = arm64.REGTMP
-		if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers
-			gc.Warnl(v.Line, "generated nil check")
-		}
-	case ssa.OpVarDef:
-		gc.Gvardef(v.Aux.(*gc.Node))
-	case ssa.OpVarKill:
-		gc.Gvarkill(v.Aux.(*gc.Node))
-	case ssa.OpVarLive:
-		gc.Gvarlive(v.Aux.(*gc.Node))
-	case ssa.OpKeepAlive:
-		gc.KeepAlive(v)
-	case ssa.OpARM64Equal,
-		ssa.OpARM64NotEqual,
-		ssa.OpARM64LessThan,
-		ssa.OpARM64LessEqual,
-		ssa.OpARM64GreaterThan,
-		ssa.OpARM64GreaterEqual,
-		ssa.OpARM64LessThanU,
-		ssa.OpARM64LessEqualU,
-		ssa.OpARM64GreaterThanU,
-		ssa.OpARM64GreaterEqualU:
-		// generate boolean values using CSET
-		p := gc.Prog(arm64.ACSET)
-		p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
-		p.From.Reg = condBits[v.Op]
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpSelect0, ssa.OpSelect1:
-		// nothing to do
-	case ssa.OpARM64LoweredGetClosurePtr:
-		// Closure pointer is R26 (arm64.REGCTXT).
-		gc.CheckLoweredGetClosurePtr(v)
-	case ssa.OpARM64FlagEQ,
-		ssa.OpARM64FlagLT_ULT,
-		ssa.OpARM64FlagLT_UGT,
-		ssa.OpARM64FlagGT_ULT,
-		ssa.OpARM64FlagGT_UGT:
-		v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
-	case ssa.OpARM64InvertFlags:
-		v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
-	default:
-		v.Fatalf("genValue not implemented: %s", v.LongString())
-	}
-}
-
-var condBits = map[ssa.Op]int16{
-	ssa.OpARM64Equal:         arm64.COND_EQ,
-	ssa.OpARM64NotEqual:      arm64.COND_NE,
-	ssa.OpARM64LessThan:      arm64.COND_LT,
-	ssa.OpARM64LessThanU:     arm64.COND_LO,
-	ssa.OpARM64LessEqual:     arm64.COND_LE,
-	ssa.OpARM64LessEqualU:    arm64.COND_LS,
-	ssa.OpARM64GreaterThan:   arm64.COND_GT,
-	ssa.OpARM64GreaterThanU:  arm64.COND_HI,
-	ssa.OpARM64GreaterEqual:  arm64.COND_GE,
-	ssa.OpARM64GreaterEqualU: arm64.COND_HS,
-}
-
-var blockJump = map[ssa.BlockKind]struct {
-	asm, invasm obj.As
-}{
-	ssa.BlockARM64EQ:  {arm64.ABEQ, arm64.ABNE},
-	ssa.BlockARM64NE:  {arm64.ABNE, arm64.ABEQ},
-	ssa.BlockARM64LT:  {arm64.ABLT, arm64.ABGE},
-	ssa.BlockARM64GE:  {arm64.ABGE, arm64.ABLT},
-	ssa.BlockARM64LE:  {arm64.ABLE, arm64.ABGT},
-	ssa.BlockARM64GT:  {arm64.ABGT, arm64.ABLE},
-	ssa.BlockARM64ULT: {arm64.ABLO, arm64.ABHS},
-	ssa.BlockARM64UGE: {arm64.ABHS, arm64.ABLO},
-	ssa.BlockARM64UGT: {arm64.ABHI, arm64.ABLS},
-	ssa.BlockARM64ULE: {arm64.ABLS, arm64.ABHI},
-	ssa.BlockARM64Z:   {arm64.ACBZ, arm64.ACBNZ},
-	ssa.BlockARM64NZ:  {arm64.ACBNZ, arm64.ACBZ},
-	ssa.BlockARM64ZW:  {arm64.ACBZW, arm64.ACBNZW},
-	ssa.BlockARM64NZW: {arm64.ACBNZW, arm64.ACBZW},
-}
-
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
-	s.SetLineno(b.Line)
-
-	switch b.Kind {
-	case ssa.BlockPlain:
-		if b.Succs[0].Block() != next {
-			p := gc.Prog(obj.AJMP)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-		}
-
-	case ssa.BlockDefer:
-		// defer returns in R0:
-		// 0 if we should continue executing
-		// 1 if we should jump to deferreturn call
-		p := gc.Prog(arm64.ACMP)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = 0
-		p.Reg = arm64.REG_R0
-		p = gc.Prog(arm64.ABNE)
-		p.To.Type = obj.TYPE_BRANCH
-		s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
-		if b.Succs[0].Block() != next {
-			p := gc.Prog(obj.AJMP)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-		}
-
-	case ssa.BlockExit:
-		gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
-
-	case ssa.BlockRet:
-		gc.Prog(obj.ARET)
-
-	case ssa.BlockRetJmp:
-		p := gc.Prog(obj.ARET)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(b.Aux.(*gc.Sym))
-
-	case ssa.BlockARM64EQ, ssa.BlockARM64NE,
-		ssa.BlockARM64LT, ssa.BlockARM64GE,
-		ssa.BlockARM64LE, ssa.BlockARM64GT,
-		ssa.BlockARM64ULT, ssa.BlockARM64UGT,
-		ssa.BlockARM64ULE, ssa.BlockARM64UGE,
-		ssa.BlockARM64Z, ssa.BlockARM64NZ,
-		ssa.BlockARM64ZW, ssa.BlockARM64NZW:
-		jmp := blockJump[b.Kind]
-		var p *obj.Prog
-		switch next {
-		case b.Succs[0].Block():
-			p = gc.Prog(jmp.invasm)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
-		case b.Succs[1].Block():
-			p = gc.Prog(jmp.asm)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-		default:
-			p = gc.Prog(jmp.asm)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-			q := gc.Prog(obj.AJMP)
-			q.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
-		}
-		if !b.Control.Type.IsFlags() {
-			p.From.Type = obj.TYPE_REG
-			p.From.Reg = b.Control.Reg()
-		}
-
-	default:
-		b.Fatalf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString())
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/alg.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/alg.go
deleted file mode 100644
index 118079d..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/alg.go
+++ /dev/null
@@ -1,603 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/alg.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/alg.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import "fmt"
-
-// AlgKind describes the kind of algorithms used for comparing and
-// hashing a Type.
-type AlgKind int
-
-const (
-	// These values are known by runtime.
-	ANOEQ AlgKind = iota
-	AMEM0
-	AMEM8
-	AMEM16
-	AMEM32
-	AMEM64
-	AMEM128
-	ASTRING
-	AINTER
-	ANILINTER
-	AFLOAT32
-	AFLOAT64
-	ACPLX64
-	ACPLX128
-
-	// Type can be compared/hashed as regular memory.
-	AMEM AlgKind = 100
-
-	// Type needs special comparison/hashing functions.
-	ASPECIAL AlgKind = -1
-)
-
-// IsComparable reports whether t is a comparable type.
-func (t *Type) IsComparable() bool {
-	a, _ := algtype1(t)
-	return a != ANOEQ
-}
-
-// IsRegularMemory reports whether t can be compared/hashed as regular memory.
-func (t *Type) IsRegularMemory() bool {
-	a, _ := algtype1(t)
-	return a == AMEM
-}
-
-// IncomparableField returns an incomparable Field of struct Type t, if any.
-func (t *Type) IncomparableField() *Field {
-	for _, f := range t.FieldSlice() {
-		if !f.Type.IsComparable() {
-			return f
-		}
-	}
-	return nil
-}
-
-// algtype is like algtype1, except it returns the fixed-width AMEMxx variants
-// instead of the general AMEM kind when possible.
-func algtype(t *Type) AlgKind {
-	a, _ := algtype1(t)
-	if a == AMEM {
-		switch t.Width {
-		case 0:
-			return AMEM0
-		case 1:
-			return AMEM8
-		case 2:
-			return AMEM16
-		case 4:
-			return AMEM32
-		case 8:
-			return AMEM64
-		case 16:
-			return AMEM128
-		}
-	}
-
-	return a
-}
-
-// algtype1 returns the AlgKind used for comparing and hashing Type t.
-// If it returns ANOEQ, it also returns the component type of t that
-// makes it incomparable.
-func algtype1(t *Type) (AlgKind, *Type) {
-	if t.Broke {
-		return AMEM, nil
-	}
-	if t.Noalg {
-		return ANOEQ, t
-	}
-
-	switch t.Etype {
-	case TANY, TFORW:
-		// will be defined later.
-		return ANOEQ, t
-
-	case TINT8, TUINT8, TINT16, TUINT16,
-		TINT32, TUINT32, TINT64, TUINT64,
-		TINT, TUINT, TUINTPTR,
-		TBOOL, TPTR32, TPTR64,
-		TCHAN, TUNSAFEPTR:
-		return AMEM, nil
-
-	case TFUNC, TMAP:
-		return ANOEQ, t
-
-	case TFLOAT32:
-		return AFLOAT32, nil
-
-	case TFLOAT64:
-		return AFLOAT64, nil
-
-	case TCOMPLEX64:
-		return ACPLX64, nil
-
-	case TCOMPLEX128:
-		return ACPLX128, nil
-
-	case TSTRING:
-		return ASTRING, nil
-
-	case TINTER:
-		if t.IsEmptyInterface() {
-			return ANILINTER, nil
-		}
-		return AINTER, nil
-
-	case TSLICE:
-		return ANOEQ, t
-
-	case TARRAY:
-		a, bad := algtype1(t.Elem())
-		switch a {
-		case AMEM:
-			return AMEM, nil
-		case ANOEQ:
-			return ANOEQ, bad
-		}
-
-		switch t.NumElem() {
-		case 0:
-			// We checked above that the element type is comparable.
-			return AMEM, nil
-		case 1:
-			// Single-element array is same as its lone element.
-			return a, nil
-		}
-
-		return ASPECIAL, nil
-
-	case TSTRUCT:
-		fields := t.FieldSlice()
-
-		// One-field struct is same as that one field alone.
-		if len(fields) == 1 && !isblanksym(fields[0].Sym) {
-			return algtype1(fields[0].Type)
-		}
-
-		ret := AMEM
-		for i, f := range fields {
-			// All fields must be comparable.
-			a, bad := algtype1(f.Type)
-			if a == ANOEQ {
-				return ANOEQ, bad
-			}
-
-			// Blank fields, padded fields, fields with non-memory
-			// equality need special compare.
-			if a != AMEM || isblanksym(f.Sym) || ispaddedfield(t, i) {
-				ret = ASPECIAL
-			}
-		}
-
-		return ret, nil
-	}
-
-	Fatalf("algtype1: unexpected type %v", t)
-	return 0, nil
-}
-
-// Generate a helper function to compute the hash of a value of type t.
-func genhash(sym *Sym, t *Type) {
-	if Debug['r'] != 0 {
-		fmt.Printf("genhash %v %v\n", sym, t)
-	}
-
-	lineno = 1 // less confusing than end of input
-	dclcontext = PEXTERN
-	markdcl()
-
-	// func sym(p *T, h uintptr) uintptr
-	fn := nod(ODCLFUNC, nil, nil)
-
-	fn.Func.Nname = newname(sym)
-	fn.Func.Nname.Class = PFUNC
-	tfn := nod(OTFUNC, nil, nil)
-	fn.Func.Nname.Name.Param.Ntype = tfn
-
-	n := nod(ODCLFIELD, newname(lookup("p")), typenod(ptrto(t)))
-	tfn.List.Append(n)
-	np := n.Left
-	n = nod(ODCLFIELD, newname(lookup("h")), typenod(Types[TUINTPTR]))
-	tfn.List.Append(n)
-	nh := n.Left
-	n = nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])) // return value
-	tfn.Rlist.Append(n)
-
-	funchdr(fn)
-	fn.Func.Nname.Name.Param.Ntype = typecheck(fn.Func.Nname.Name.Param.Ntype, Etype)
-
-	// genhash is only called for types that have equality but
-	// cannot be handled by the standard algorithms,
-	// so t must be either an array or a struct.
-	switch t.Etype {
-	default:
-		Fatalf("genhash %v", t)
-
-	case TARRAY:
-		// An array of pure memory would be handled by the
-		// standard algorithm, so the element type must not be
-		// pure memory.
-		hashel := hashfor(t.Elem())
-
-		n := nod(ORANGE, nil, nod(OIND, np, nil))
-		ni := newname(lookup("i"))
-		ni.Type = Types[TINT]
-		n.List.Set1(ni)
-		n.Colas = true
-		colasdefn(n.List.Slice(), n)
-		ni = n.List.First()
-
-		// h = hashel(&p[i], h)
-		call := nod(OCALL, hashel, nil)
-
-		nx := nod(OINDEX, np, ni)
-		nx.Bounded = true
-		na := nod(OADDR, nx, nil)
-		na.Etype = 1 // no escape to heap
-		call.List.Append(na)
-		call.List.Append(nh)
-		n.Nbody.Append(nod(OAS, nh, call))
-
-		fn.Nbody.Append(n)
-
-	case TSTRUCT:
-		// Walk the struct using memhash for runs of AMEM
-		// and calling specific hash functions for the others.
-		for i, fields := 0, t.FieldSlice(); i < len(fields); {
-			f := fields[i]
-
-			// Skip blank fields.
-			if isblanksym(f.Sym) {
-				i++
-				continue
-			}
-
-			// Hash non-memory fields with appropriate hash function.
-			if !f.Type.IsRegularMemory() {
-				hashel := hashfor(f.Type)
-				call := nod(OCALL, hashel, nil)
-				nx := nodSym(OXDOT, np, f.Sym) // TODO: fields from other packages?
-				na := nod(OADDR, nx, nil)
-				na.Etype = 1 // no escape to heap
-				call.List.Append(na)
-				call.List.Append(nh)
-				fn.Nbody.Append(nod(OAS, nh, call))
-				i++
-				continue
-			}
-
-			// Otherwise, hash a maximal length run of raw memory.
-			size, next := memrun(t, i)
-
-			// h = hashel(&p.first, size, h)
-			hashel := hashmem(f.Type)
-			call := nod(OCALL, hashel, nil)
-			nx := nodSym(OXDOT, np, f.Sym) // TODO: fields from other packages?
-			na := nod(OADDR, nx, nil)
-			na.Etype = 1 // no escape to heap
-			call.List.Append(na)
-			call.List.Append(nh)
-			call.List.Append(nodintconst(size))
-			fn.Nbody.Append(nod(OAS, nh, call))
-
-			i = next
-		}
-	}
-
-	r := nod(ORETURN, nil, nil)
-	r.List.Append(nh)
-	fn.Nbody.Append(r)
-
-	if Debug['r'] != 0 {
-		dumplist("genhash body", fn.Nbody)
-	}
-
-	funcbody(fn)
-	Curfn = fn
-	fn.Func.Dupok = true
-	fn = typecheck(fn, Etop)
-	typecheckslice(fn.Nbody.Slice(), Etop)
-	Curfn = nil
-	popdcl()
-	if debug_dclstack != 0 {
-		testdclstack()
-	}
-
-	// Disable safemode while compiling this code: the code we
-	// generate internally can refer to unsafe.Pointer.
-	// In this case it can happen if we need to generate an ==
-	// for a struct containing a reflect.Value, which itself has
-	// an unexported field of type unsafe.Pointer.
-	old_safemode := safemode
-	safemode = false
-
-	disable_checknil++
-	funccompile(fn)
-	disable_checknil--
-
-	safemode = old_safemode
-}
-
-func hashfor(t *Type) *Node {
-	var sym *Sym
-
-	switch a, _ := algtype1(t); a {
-	case AMEM:
-		Fatalf("hashfor with AMEM type")
-	case AINTER:
-		sym = Pkglookup("interhash", Runtimepkg)
-	case ANILINTER:
-		sym = Pkglookup("nilinterhash", Runtimepkg)
-	case ASTRING:
-		sym = Pkglookup("strhash", Runtimepkg)
-	case AFLOAT32:
-		sym = Pkglookup("f32hash", Runtimepkg)
-	case AFLOAT64:
-		sym = Pkglookup("f64hash", Runtimepkg)
-	case ACPLX64:
-		sym = Pkglookup("c64hash", Runtimepkg)
-	case ACPLX128:
-		sym = Pkglookup("c128hash", Runtimepkg)
-	default:
-		sym = typesymprefix(".hash", t)
-	}
-
-	n := newname(sym)
-	n.Class = PFUNC
-	tfn := nod(OTFUNC, nil, nil)
-	tfn.List.Append(nod(ODCLFIELD, nil, typenod(ptrto(t))))
-	tfn.List.Append(nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
-	tfn.Rlist.Append(nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
-	tfn = typecheck(tfn, Etype)
-	n.Type = tfn.Type
-	return n
-}
-
-// geneq generates a helper function to
-// check equality of two values of type t.
-func geneq(sym *Sym, t *Type) {
-	if Debug['r'] != 0 {
-		fmt.Printf("geneq %v %v\n", sym, t)
-	}
-
-	lineno = 1 // less confusing than end of input
-	dclcontext = PEXTERN
-	markdcl()
-
-	// func sym(p, q *T) bool
-	fn := nod(ODCLFUNC, nil, nil)
-
-	fn.Func.Nname = newname(sym)
-	fn.Func.Nname.Class = PFUNC
-	tfn := nod(OTFUNC, nil, nil)
-	fn.Func.Nname.Name.Param.Ntype = tfn
-
-	n := nod(ODCLFIELD, newname(lookup("p")), typenod(ptrto(t)))
-	tfn.List.Append(n)
-	np := n.Left
-	n = nod(ODCLFIELD, newname(lookup("q")), typenod(ptrto(t)))
-	tfn.List.Append(n)
-	nq := n.Left
-	n = nod(ODCLFIELD, nil, typenod(Types[TBOOL]))
-	tfn.Rlist.Append(n)
-
-	funchdr(fn)
-	fn.Func.Nname.Name.Param.Ntype = typecheck(fn.Func.Nname.Name.Param.Ntype, Etype)
-
-	// geneq is only called for types that have equality but
-	// cannot be handled by the standard algorithms,
-	// so t must be either an array or a struct.
-	switch t.Etype {
-	default:
-		Fatalf("geneq %v", t)
-
-	case TARRAY:
-		// An array of pure memory would be handled by the
-		// standard memequal, so the element type must not be
-		// pure memory. Even if we unrolled the range loop,
-		// each iteration would be a function call, so don't bother
-		// unrolling.
-		nrange := nod(ORANGE, nil, nod(OIND, np, nil))
-
-		ni := newname(lookup("i"))
-		ni.Type = Types[TINT]
-		nrange.List.Set1(ni)
-		nrange.Colas = true
-		colasdefn(nrange.List.Slice(), nrange)
-		ni = nrange.List.First()
-
-		// if p[i] != q[i] { return false }
-		nx := nod(OINDEX, np, ni)
-
-		nx.Bounded = true
-		ny := nod(OINDEX, nq, ni)
-		ny.Bounded = true
-
-		nif := nod(OIF, nil, nil)
-		nif.Left = nod(ONE, nx, ny)
-		r := nod(ORETURN, nil, nil)
-		r.List.Append(nodbool(false))
-		nif.Nbody.Append(r)
-		nrange.Nbody.Append(nif)
-		fn.Nbody.Append(nrange)
-
-		// return true
-		ret := nod(ORETURN, nil, nil)
-		ret.List.Append(nodbool(true))
-		fn.Nbody.Append(ret)
-
-	case TSTRUCT:
-		var cond *Node
-		and := func(n *Node) {
-			if cond == nil {
-				cond = n
-				return
-			}
-			cond = nod(OANDAND, cond, n)
-		}
-
-		// Walk the struct using memequal for runs of AMEM
-		// and calling specific equality tests for the others.
-		for i, fields := 0, t.FieldSlice(); i < len(fields); {
-			f := fields[i]
-
-			// Skip blank-named fields.
-			if isblanksym(f.Sym) {
-				i++
-				continue
-			}
-
-			// Compare non-memory fields with field equality.
-			if !f.Type.IsRegularMemory() {
-				and(eqfield(np, nq, f.Sym))
-				i++
-				continue
-			}
-
-			// Find maximal length run of memory-only fields.
-			size, next := memrun(t, i)
-
-			// TODO(rsc): All the calls to newname are wrong for
-			// cross-package unexported fields.
-			if s := fields[i:next]; len(s) <= 2 {
-				// Two or fewer fields: use plain field equality.
-				for _, f := range s {
-					and(eqfield(np, nq, f.Sym))
-				}
-			} else {
-				// More than two fields: use memequal.
-				and(eqmem(np, nq, f.Sym, size))
-			}
-			i = next
-		}
-
-		if cond == nil {
-			cond = nodbool(true)
-		}
-
-		ret := nod(ORETURN, nil, nil)
-		ret.List.Append(cond)
-		fn.Nbody.Append(ret)
-	}
-
-	if Debug['r'] != 0 {
-		dumplist("geneq body", fn.Nbody)
-	}
-
-	funcbody(fn)
-	Curfn = fn
-	fn.Func.Dupok = true
-	fn = typecheck(fn, Etop)
-	typecheckslice(fn.Nbody.Slice(), Etop)
-	Curfn = nil
-	popdcl()
-	if debug_dclstack != 0 {
-		testdclstack()
-	}
-
-	// Disable safemode while compiling this code: the code we
-	// generate internally can refer to unsafe.Pointer.
-	// In this case it can happen if we need to generate an ==
-	// for a struct containing a reflect.Value, which itself has
-	// an unexported field of type unsafe.Pointer.
-	old_safemode := safemode
-	safemode = false
-
-	// Disable checknils while compiling this code.
-	// We are comparing a struct or an array,
-	// neither of which can be nil, and our comparisons
-	// are shallow.
-	disable_checknil++
-
-	funccompile(fn)
-
-	safemode = old_safemode
-	disable_checknil--
-}
-
-// eqfield returns the node
-// 	p.field == q.field
-func eqfield(p *Node, q *Node, field *Sym) *Node {
-	nx := nodSym(OXDOT, p, field)
-	ny := nodSym(OXDOT, q, field)
-	ne := nod(OEQ, nx, ny)
-	return ne
-}
-
-// eqmem returns the node
-// 	memequal(&p.field, &q.field [, size])
-func eqmem(p *Node, q *Node, field *Sym, size int64) *Node {
-	nx := nod(OADDR, nodSym(OXDOT, p, field), nil)
-	nx.Etype = 1 // does not escape
-	ny := nod(OADDR, nodSym(OXDOT, q, field), nil)
-	ny.Etype = 1 // does not escape
-	nx = typecheck(nx, Erv)
-	ny = typecheck(ny, Erv)
-
-	fn, needsize := eqmemfunc(size, nx.Type.Elem())
-	call := nod(OCALL, fn, nil)
-	call.List.Append(nx)
-	call.List.Append(ny)
-	if needsize {
-		call.List.Append(nodintconst(size))
-	}
-
-	return call
-}
-
-func eqmemfunc(size int64, t *Type) (fn *Node, needsize bool) {
-	switch size {
-	default:
-		fn = syslook("memequal")
-		needsize = true
-	case 1, 2, 4, 8, 16:
-		buf := fmt.Sprintf("memequal%d", int(size)*8)
-		fn = syslook(buf)
-	}
-
-	fn = substArgTypes(fn, t, t)
-	return fn, needsize
-}
-
-// memrun finds runs of struct fields for which memory-only algs are appropriate.
-// t is the parent struct type, and start is the field index at which to start the run.
-// size is the length in bytes of the memory included in the run.
-// next is the index just after the end of the memory run.
-func memrun(t *Type, start int) (size int64, next int) {
-	next = start
-	for {
-		next++
-		if next == t.NumFields() {
-			break
-		}
-		// Stop run after a padded field.
-		if ispaddedfield(t, next-1) {
-			break
-		}
-		// Also, stop before a blank or non-memory field.
-		if f := t.Field(next); isblanksym(f.Sym) || !f.Type.IsRegularMemory() {
-			break
-		}
-	}
-	return t.Field(next-1).End() - t.Field(start).Offset, next
-}
-
-// ispaddedfield reports whether the i'th field of struct type t is followed
-// by padding.
-func ispaddedfield(t *Type, i int) bool {
-	if !t.IsStruct() {
-		Fatalf("ispaddedfield called non-struct %v", t)
-	}
-	end := t.Width
-	if i+1 < t.NumFields() {
-		end = t.Field(i + 1).Offset
-	}
-	return t.Field(i).End() != end
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/align.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/align.go
deleted file mode 100644
index fbb9134..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/align.go
+++ /dev/null
@@ -1,384 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/align.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/align.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-// machine size and rounding alignment is dictated around
-// the size of a pointer, set in betypeinit (see ../amd64/galign.go).
-var defercalc int
-
-func Rnd(o int64, r int64) int64 {
-	if r < 1 || r > 8 || r&(r-1) != 0 {
-		Fatalf("rnd %d", r)
-	}
-	return (o + r - 1) &^ (r - 1)
-}
-
-func offmod(t *Type) {
-	o := int32(0)
-	for _, f := range t.Fields().Slice() {
-		f.Offset = int64(o)
-		o += int32(Widthptr)
-		if int64(o) >= Thearch.MAXWIDTH {
-			yyerror("interface too large")
-			o = int32(Widthptr)
-		}
-	}
-}
-
-func widstruct(errtype *Type, t *Type, o int64, flag int) int64 {
-	starto := o
-	maxalign := int32(flag)
-	if maxalign < 1 {
-		maxalign = 1
-	}
-	lastzero := int64(0)
-	var w int64
-	for _, f := range t.Fields().Slice() {
-		if f.Type == nil {
-			// broken field, just skip it so that other valid fields
-			// get a width.
-			continue
-		}
-
-		dowidth(f.Type)
-		if int32(f.Type.Align) > maxalign {
-			maxalign = int32(f.Type.Align)
-		}
-		if f.Type.Width < 0 {
-			Fatalf("invalid width %d", f.Type.Width)
-		}
-		w = f.Type.Width
-		if f.Type.Align > 0 {
-			o = Rnd(o, int64(f.Type.Align))
-		}
-		f.Offset = o
-		if f.Nname != nil {
-			// addrescapes has similar code to update these offsets.
-			// Usually addrescapes runs after widstruct,
-			// in which case we could drop this,
-			// but function closure functions are the exception.
-			// NOTE(rsc): This comment may be stale.
-			// It's possible the ordering has changed and this is
-			// now the common case. I'm not sure.
-			if f.Nname.Name.Param.Stackcopy != nil {
-				f.Nname.Name.Param.Stackcopy.Xoffset = o
-				f.Nname.Xoffset = 0
-			} else {
-				f.Nname.Xoffset = o
-			}
-		}
-
-		if w == 0 {
-			lastzero = o
-		}
-		o += w
-		if o >= Thearch.MAXWIDTH {
-			yyerror("type %L too large", errtype)
-			o = 8 // small but nonzero
-		}
-	}
-
-	// For nonzero-sized structs which end in a zero-sized thing, we add
-	// an extra byte of padding to the type. This padding ensures that
-	// taking the address of the zero-sized thing can't manufacture a
-	// pointer to the next object in the heap. See issue 9401.
-	if flag == 1 && o > starto && o == lastzero {
-		o++
-	}
-
-	// final width is rounded
-	if flag != 0 {
-		o = Rnd(o, int64(maxalign))
-	}
-	t.Align = uint8(maxalign)
-
-	// type width only includes back to first field's offset
-	t.Width = o - starto
-
-	return o
-}
-
-func dowidth(t *Type) {
-	if Widthptr == 0 {
-		Fatalf("dowidth without betypeinit")
-	}
-
-	if t == nil {
-		return
-	}
-
-	if t.Width > 0 {
-		if t.Align == 0 {
-			// See issue 11354
-			Fatalf("zero alignment with nonzero size %v", t)
-		}
-		return
-	}
-
-	if t.Width == -2 {
-		if !t.Broke {
-			t.Broke = true
-			yyerrorl(t.Lineno, "invalid recursive type %v", t)
-		}
-
-		t.Width = 0
-		return
-	}
-
-	// break infinite recursion if the broken recursive type
-	// is referenced again
-	if t.Broke && t.Width == 0 {
-		return
-	}
-
-	// defer checkwidth calls until after we're done
-	defercalc++
-
-	lno := lineno
-	lineno = t.Lineno
-	t.Width = -2
-	t.Align = 0
-
-	et := t.Etype
-	switch et {
-	case TFUNC, TCHAN, TMAP, TSTRING:
-		break
-
-	// simtype == 0 during bootstrap
-	default:
-		if simtype[t.Etype] != 0 {
-			et = simtype[t.Etype]
-		}
-	}
-
-	w := int64(0)
-	switch et {
-	default:
-		Fatalf("dowidth: unknown type: %v", t)
-
-	// compiler-specific stuff
-	case TINT8, TUINT8, TBOOL:
-		// bool is int8
-		w = 1
-
-	case TINT16, TUINT16:
-		w = 2
-
-	case TINT32, TUINT32, TFLOAT32:
-		w = 4
-
-	case TINT64, TUINT64, TFLOAT64:
-		w = 8
-		t.Align = uint8(Widthreg)
-
-	case TCOMPLEX64:
-		w = 8
-		t.Align = 4
-
-	case TCOMPLEX128:
-		w = 16
-		t.Align = uint8(Widthreg)
-
-	case TPTR32:
-		w = 4
-		checkwidth(t.Elem())
-
-	case TPTR64:
-		w = 8
-		checkwidth(t.Elem())
-
-	case TUNSAFEPTR:
-		w = int64(Widthptr)
-
-	case TINTER: // implemented as 2 pointers
-		w = 2 * int64(Widthptr)
-
-		t.Align = uint8(Widthptr)
-		offmod(t)
-
-	case TCHAN: // implemented as pointer
-		w = int64(Widthptr)
-
-		checkwidth(t.Elem())
-
-		// make fake type to check later to
-		// trigger channel argument check.
-		t1 := typChanArgs(t)
-		checkwidth(t1)
-
-	case TCHANARGS:
-		t1 := t.ChanArgs()
-		dowidth(t1) // just in case
-		if t1.Elem().Width >= 1<<16 {
-			yyerror("channel element type too large (>64kB)")
-		}
-		t.Width = 1
-
-	case TMAP: // implemented as pointer
-		w = int64(Widthptr)
-		checkwidth(t.Val())
-		checkwidth(t.Key())
-
-	case TFORW: // should have been filled in
-		if !t.Broke {
-			yyerror("invalid recursive type %v", t)
-		}
-		w = 1 // anything will do
-
-	case TANY:
-		// dummy type; should be replaced before use.
-		Fatalf("dowidth any")
-
-	case TSTRING:
-		if sizeof_String == 0 {
-			Fatalf("early dowidth string")
-		}
-		w = int64(sizeof_String)
-		t.Align = uint8(Widthptr)
-
-	case TARRAY:
-		if t.Elem() == nil {
-			break
-		}
-		if t.isDDDArray() {
-			if !t.Broke {
-				yyerror("use of [...] array outside of array literal")
-				t.Broke = true
-			}
-			break
-		}
-
-		dowidth(t.Elem())
-		if t.Elem().Width != 0 {
-			cap := (uint64(Thearch.MAXWIDTH) - 1) / uint64(t.Elem().Width)
-			if uint64(t.NumElem()) > cap {
-				yyerror("type %L larger than address space", t)
-			}
-		}
-		w = t.NumElem() * t.Elem().Width
-		t.Align = t.Elem().Align
-
-	case TSLICE:
-		if t.Elem() == nil {
-			break
-		}
-		w = int64(sizeof_Array)
-		checkwidth(t.Elem())
-		t.Align = uint8(Widthptr)
-
-	case TSTRUCT:
-		if t.IsFuncArgStruct() {
-			Fatalf("dowidth fn struct %v", t)
-		}
-		w = widstruct(t, t, 0, 1)
-
-	// make fake type to check later to
-	// trigger function argument computation.
-	case TFUNC:
-		t1 := typFuncArgs(t)
-		checkwidth(t1)
-		w = int64(Widthptr) // width of func type is pointer
-
-	// function is 3 cated structures;
-	// compute their widths as side-effect.
-	case TFUNCARGS:
-		t1 := t.FuncArgs()
-		w = widstruct(t1, t1.Recvs(), 0, 0)
-		w = widstruct(t1, t1.Params(), w, Widthreg)
-		w = widstruct(t1, t1.Results(), w, Widthreg)
-		t1.Extra.(*FuncType).Argwid = w
-		if w%int64(Widthreg) != 0 {
-			Warn("bad type %v %d\n", t1, w)
-		}
-		t.Align = 1
-	}
-
-	if Widthptr == 4 && w != int64(int32(w)) {
-		yyerror("type %v too large", t)
-	}
-
-	t.Width = w
-	if t.Align == 0 {
-		if w > 8 || w&(w-1) != 0 {
-			Fatalf("invalid alignment for %v", t)
-		}
-		t.Align = uint8(w)
-	}
-
-	lineno = lno
-
-	if defercalc == 1 {
-		resumecheckwidth()
-	} else {
-		defercalc--
-	}
-}
-
-// when a type's width should be known, we call checkwidth
-// to compute it.  during a declaration like
-//
-//	type T *struct { next T }
-//
-// it is necessary to defer the calculation of the struct width
-// until after T has been initialized to be a pointer to that struct.
-// similarly, during import processing structs may be used
-// before their definition.  in those situations, calling
-// defercheckwidth() stops width calculations until
-// resumecheckwidth() is called, at which point all the
-// checkwidths that were deferred are executed.
-// dowidth should only be called when the type's size
-// is needed immediately.  checkwidth makes sure the
-// size is evaluated eventually.
-
-var deferredTypeStack []*Type
-
-func checkwidth(t *Type) {
-	if t == nil {
-		return
-	}
-
-	// function arg structs should not be checked
-	// outside of the enclosing function.
-	if t.IsFuncArgStruct() {
-		Fatalf("checkwidth %v", t)
-	}
-
-	if defercalc == 0 {
-		dowidth(t)
-		return
-	}
-
-	if t.Deferwidth {
-		return
-	}
-	t.Deferwidth = true
-
-	deferredTypeStack = append(deferredTypeStack, t)
-}
-
-func defercheckwidth() {
-	// we get out of sync on syntax errors, so don't be pedantic.
-	if defercalc != 0 && nerrors == 0 {
-		Fatalf("defercheckwidth")
-	}
-	defercalc = 1
-}
-
-func resumecheckwidth() {
-	if defercalc == 0 {
-		Fatalf("resumecheckwidth")
-	}
-	for len(deferredTypeStack) > 0 {
-		t := deferredTypeStack[len(deferredTypeStack)-1]
-		deferredTypeStack = deferredTypeStack[:len(deferredTypeStack)-1]
-		t.Deferwidth = false
-		dowidth(t)
-	}
-
-	defercalc = 0
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/asm_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/asm_test.go
deleted file mode 100644
index 43a9aad..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/asm_test.go
+++ /dev/null
@@ -1,294 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/asm_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/asm_test.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"bytes"
-	"fmt"
-	"internal/testenv"
-	"io/ioutil"
-	"os"
-	"os/exec"
-	"path/filepath"
-	"regexp"
-	"runtime"
-	"strings"
-	"testing"
-)
-
-// TestAssembly checks to make sure the assembly generated for
-// functions contains certain expected instructions.
-func TestAssembly(t *testing.T) {
-	if testing.Short() {
-		t.Skip("slow test; skipping")
-	}
-	testenv.MustHaveGoBuild(t)
-	if runtime.GOOS == "windows" {
-		// TODO: remove if we can get "go tool compile -S" to work on windows.
-		t.Skipf("skipping test: recursive windows compile not working")
-	}
-	dir, err := ioutil.TempDir("", "TestAssembly")
-	if err != nil {
-		t.Fatalf("could not create directory: %v", err)
-	}
-	defer os.RemoveAll(dir)
-
-	for _, test := range asmTests {
-		asm := compileToAsm(t, dir, test.arch, test.os, fmt.Sprintf(template, test.function))
-		// Get rid of code for "".init. Also gets rid of type algorithms & other junk.
-		if i := strings.Index(asm, "\n\"\".init "); i >= 0 {
-			asm = asm[:i+1]
-		}
-		for _, r := range test.regexps {
-			if b, err := regexp.MatchString(r, asm); !b || err != nil {
-				t.Errorf("expected:%s\ngo:%s\nasm:%s\n", r, test.function, asm)
-			}
-		}
-	}
-}
-
-// compile compiles the package pkg for architecture arch and
-// returns the generated assembly.  dir is a scratch directory.
-func compileToAsm(t *testing.T, dir, goarch, goos, pkg string) string {
-	// Create source.
-	src := filepath.Join(dir, "test.go")
-	f, err := os.Create(src)
-	if err != nil {
-		panic(err)
-	}
-	f.Write([]byte(pkg))
-	f.Close()
-
-	// First, install any dependencies we need.  This builds the required export data
-	// for any packages that are imported.
-	// TODO: extract dependencies automatically?
-	var stdout, stderr bytes.Buffer
-	cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", filepath.Join(dir, "encoding/binary.a"), "encoding/binary")
-	cmd.Env = mergeEnvLists([]string{"GOARCH=" + goarch, "GOOS=" + goos}, os.Environ())
-	cmd.Stdout = &stdout
-	cmd.Stderr = &stderr
-	if err := cmd.Run(); err != nil {
-		panic(err)
-	}
-	if s := stdout.String(); s != "" {
-		panic(fmt.Errorf("Stdout = %s\nWant empty", s))
-	}
-	if s := stderr.String(); s != "" {
-		panic(fmt.Errorf("Stderr = %s\nWant empty", s))
-	}
-
-	// Now, compile the individual file for which we want to see the generated assembly.
-	cmd = exec.Command(testenv.GoToolPath(t), "tool", "compile", "-I", dir, "-S", "-o", filepath.Join(dir, "out.o"), src)
-	cmd.Env = mergeEnvLists([]string{"GOARCH=" + goarch, "GOOS=" + goos}, os.Environ())
-	cmd.Stdout = &stdout
-	cmd.Stderr = &stderr
-	if err := cmd.Run(); err != nil {
-		panic(err)
-	}
-	if s := stderr.String(); s != "" {
-		panic(fmt.Errorf("Stderr = %s\nWant empty", s))
-	}
-	return stdout.String()
-}
-
-// template to convert a function to a full file
-const template = `
-package main
-%s
-`
-
-type asmTest struct {
-	// architecture to compile to
-	arch string
-	// os to compile to
-	os string
-	// function to compile
-	function string
-	// regexps that must match the generated assembly
-	regexps []string
-}
-
-var asmTests = [...]asmTest{
-	{"amd64", "linux", `
-func f(x int) int {
-	return x * 64
-}
-`,
-		[]string{"\tSHLQ\t\\$6,"},
-	},
-	{"amd64", "linux", `
-func f(x int) int {
-	return x * 96
-}`,
-		[]string{"\tSHLQ\t\\$5,", "\tLEAQ\t\\(.*\\)\\(.*\\*2\\),"},
-	},
-	// Load-combining tests.
-	{"amd64", "linux", `
-import "encoding/binary"
-func f(b []byte) uint64 {
-	return binary.LittleEndian.Uint64(b)
-}
-`,
-		[]string{"\tMOVQ\t\\(.*\\),"},
-	},
-	{"amd64", "linux", `
-import "encoding/binary"
-func f(b []byte, i int) uint64 {
-	return binary.LittleEndian.Uint64(b[i:])
-}
-`,
-		[]string{"\tMOVQ\t\\(.*\\)\\(.*\\*1\\),"},
-	},
-	{"amd64", "linux", `
-import "encoding/binary"
-func f(b []byte) uint32 {
-	return binary.LittleEndian.Uint32(b)
-}
-`,
-		[]string{"\tMOVL\t\\(.*\\),"},
-	},
-	{"amd64", "linux", `
-import "encoding/binary"
-func f(b []byte, i int) uint32 {
-	return binary.LittleEndian.Uint32(b[i:])
-}
-`,
-		[]string{"\tMOVL\t\\(.*\\)\\(.*\\*1\\),"},
-	},
-	{"amd64", "linux", `
-import "encoding/binary"
-func f(b []byte) uint64 {
-	return binary.BigEndian.Uint64(b)
-}
-`,
-		[]string{"\tBSWAPQ\t"},
-	},
-	{"amd64", "linux", `
-import "encoding/binary"
-func f(b []byte, i int) uint64 {
-	return binary.BigEndian.Uint64(b[i:])
-}
-`,
-		[]string{"\tBSWAPQ\t"},
-	},
-	{"amd64", "linux", `
-import "encoding/binary"
-func f(b []byte, v uint64) {
-	binary.BigEndian.PutUint64(b, v)
-}
-`,
-		[]string{"\tBSWAPQ\t"},
-	},
-	{"amd64", "linux", `
-import "encoding/binary"
-func f(b []byte) uint32 {
-	return binary.BigEndian.Uint32(b)
-}
-`,
-		[]string{"\tBSWAPL\t"},
-	},
-	{"amd64", "linux", `
-import "encoding/binary"
-func f(b []byte, i int) uint32 {
-	return binary.BigEndian.Uint32(b[i:])
-}
-`,
-		[]string{"\tBSWAPL\t"},
-	},
-	{"amd64", "linux", `
-import "encoding/binary"
-func f(b []byte, v uint32) {
-	binary.BigEndian.PutUint32(b, v)
-}
-`,
-		[]string{"\tBSWAPL\t"},
-	},
-	{"386", "linux", `
-import "encoding/binary"
-func f(b []byte) uint32 {
-	return binary.LittleEndian.Uint32(b)
-}
-`,
-		[]string{"\tMOVL\t\\(.*\\),"},
-	},
-	{"386", "linux", `
-import "encoding/binary"
-func f(b []byte, i int) uint32 {
-	return binary.LittleEndian.Uint32(b[i:])
-}
-`,
-		[]string{"\tMOVL\t\\(.*\\)\\(.*\\*1\\),"},
-	},
-
-	// Structure zeroing.  See issue #18370.
-	{"amd64", "linux", `
-type T struct {
-	a, b, c int
-}
-func f(t *T) {
-	*t = T{}
-}
-`,
-		[]string{"\tMOVQ\t\\$0, \\(.*\\)", "\tMOVQ\t\\$0, 8\\(.*\\)", "\tMOVQ\t\\$0, 16\\(.*\\)"},
-	},
-	// TODO: add a test for *t = T{3,4,5} when we fix that.
-}
-
-// mergeEnvLists merges the two environment lists such that
-// variables with the same name in "in" replace those in "out".
-// This always returns a newly allocated slice.
-func mergeEnvLists(in, out []string) []string {
-	out = append([]string(nil), out...)
-NextVar:
-	for _, inkv := range in {
-		k := strings.SplitAfterN(inkv, "=", 2)[0]
-		for i, outkv := range out {
-			if strings.HasPrefix(outkv, k) {
-				out[i] = inkv
-				continue NextVar
-			}
-		}
-		out = append(out, inkv)
-	}
-	return out
-}
-
-// TestLineNumber checks to make sure the generated assembly has line numbers
-// see issue #16214
-func TestLineNumber(t *testing.T) {
-	testenv.MustHaveGoBuild(t)
-	dir, err := ioutil.TempDir("", "TestLineNumber")
-	if err != nil {
-		t.Fatalf("could not create directory: %v", err)
-	}
-	defer os.RemoveAll(dir)
-
-	src := filepath.Join(dir, "x.go")
-	err = ioutil.WriteFile(src, []byte(issue16214src), 0644)
-	if err != nil {
-		t.Fatalf("could not write file: %v", err)
-	}
-
-	cmd := exec.Command(testenv.GoToolPath(t), "tool", "compile", "-S", "-o", filepath.Join(dir, "out.o"), src)
-	out, err := cmd.CombinedOutput()
-	if err != nil {
-		t.Fatalf("fail to run go tool compile: %v", err)
-	}
-
-	if strings.Contains(string(out), "unknown line number") {
-		t.Errorf("line number missing in assembly:\n%s", out)
-	}
-}
-
-var issue16214src = `
-package main
-
-func Mod32(x uint32) uint32 {
-	return x % 3 // frontend rewrites it as HMUL with 2863311531, the LITERAL node has Lineno 0
-}
-`
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/bexport.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/bexport.go
deleted file mode 100644
index 9ccbf42..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/bexport.go
+++ /dev/null
@@ -1,1922 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/bexport.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/bexport.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Binary package export.
-
-/*
-1) Export data encoding principles:
-
-The export data is a serialized description of the graph of exported
-"objects": constants, types, variables, and functions. Aliases may be
-directly reexported, and unaliased types may be indirectly reexported
-(as part of the type of a directly exported object). More generally,
-objects referred to from inlined function bodies can be reexported.
-We need to know which package declares these reexported objects, and
-therefore packages are also part of the export graph.
-
-The roots of the graph are two lists of objects. The 1st list (phase 1,
-see Export) contains all objects that are exported at the package level.
-These objects are the full representation of the package's API, and they
-are the only information a platform-independent tool (e.g., go/types)
-needs to know to type-check against a package.
-
-The 2nd list of objects contains all objects referred to from exported
-inlined function bodies. These objects are needed by the compiler to
-make sense of the function bodies; the exact list contents are compiler-
-specific.
-
-Finally, the export data contains a list of representations for inlined
-function bodies. The format of this representation is compiler specific.
-
-The graph is serialized in in-order fashion, starting with the roots.
-Each object in the graph is serialized by writing its fields sequentially.
-If the field is a pointer to another object, that object is serialized in
-place, recursively. Otherwise the field is written in place. Non-pointer
-fields are all encoded as integer or string values.
-
-Some objects (packages, types) may be referred to more than once. When
-reaching an object that was not serialized before, an integer _index_
-is assigned to it, starting at 0. In this case, the encoding starts
-with an integer _tag_ < 0. The tag value indicates the kind of object
-that follows and that this is the first time that we see this object.
-If the object was already serialized, the encoding is simply the object
-index >= 0. An importer can trivially determine if an object needs to
-be read in for the first time (tag < 0) and entered into the respective
-object table, or if the object was seen already (index >= 0), in which
-case the index is used to look up the object in the respective table.
-
-Before exporting or importing, the type tables are populated with the
-predeclared types (int, string, error, unsafe.Pointer, etc.). This way
-they are automatically encoded with a known and fixed type index.
-
-2) Encoding format:
-
-The export data starts with two newline-terminated strings: a version
-string and either an empty string, or "debug", when emitting the debug
-format. These strings are followed by version-specific encoding options.
-
-(The Go1.7 version starts with a couple of bytes specifying the format.
-That format encoding is no longer used but is supported to avoid spurious
-errors when importing old installed package files.)
-
-This header is followed by the package object for the exported package,
-two lists of objects, and the list of inlined function bodies.
-
-The encoding of objects is straight-forward: Constants, variables, and
-functions start with their name, type, and possibly a value. Named types
-record their name and package so that they can be canonicalized: If the
-same type was imported before via another import, the importer must use
-the previously imported type pointer so that we have exactly one version
-(i.e., one pointer) for each named type (and read but discard the current
-type encoding). Unnamed types simply encode their respective fields.
-Aliases are encoded starting with their name followed by the qualified
-identifier denoting the original (aliased) object, which was exported
-earlier.
-
-In the encoding, some lists start with the list length. Some lists are
-terminated with an end marker (usually for lists where we may not know
-the length a priori).
-
-Integers use variable-length encoding for compact representation.
-
-Strings are canonicalized similar to objects that may occur multiple times:
-If the string was exported already, it is represented by its index only.
-Otherwise, the export data starts with the negative string length (negative,
-so we can distinguish from string index), followed by the string bytes.
-The empty string is mapped to index 0. (The initial format string is an
-exception; it is encoded as the string bytes followed by a newline).
-
-The exporter and importer are completely symmetric in implementation: For
-each encoding routine there is a matching and symmetric decoding routine.
-This symmetry makes it very easy to change or extend the format: If a new
-field needs to be encoded, a symmetric change can be made to exporter and
-importer.
-
-3) Making changes to the encoding format:
-
-Any change to the encoding format requires a respective change in the
-exporter below and a corresponding symmetric change to the importer in
-bimport.go.
-
-Furthermore, it requires a corresponding change to go/internal/gcimporter
-and golang.org/x/tools/go/gcimporter15. Changes to the latter must preserve
-compatibility with both the last release of the compiler, and with the
-corresponding compiler at tip. That change is necessarily more involved,
-as it must switch based on the version number in the export data file.
-
-It is recommended to turn on debugFormat temporarily when working on format
-changes as it will help finding encoding/decoding inconsistencies quickly.
-*/
-
-package gc
-
-import (
-	"bufio"
-	"bytes"
-	"encoding/binary"
-	"fmt"
-	"bootstrap/math/big"
-	"sort"
-	"strings"
-)
-
-// If debugFormat is set, each integer and string value is preceded by a marker
-// and position information in the encoding. This mechanism permits an importer
-// to recognize immediately when it is out of sync. The importer recognizes this
-// mode automatically (i.e., it can import export data produced with debugging
-// support even if debugFormat is not set at the time of import). This mode will
-// lead to massively larger export data (by a factor of 2 to 3) and should only
-// be enabled during development and debugging.
-//
-// NOTE: This flag is the first flag to enable if importing dies because of
-// (suspected) format errors, and whenever a change is made to the format.
-const debugFormat = false // default: false
-
-// forceObjFileStability enforces additional constraints in export data
-// and other parts of the compiler to eliminate object file differences
-// only due to the choice of export format.
-// TODO(gri) disable and remove once there is only one export format again
-const forceObjFileStability = true
-
-// Current export format version. Increase with each format change.
-// 3: added aliasTag and export of aliases
-// 2: removed unused bool in ODCL export
-// 1: header format change (more regular), export package for _ struct fields
-// 0: Go1.7 encoding
-const exportVersion = 3
-
-// exportInlined enables the export of inlined function bodies and related
-// dependencies. The compiler should work w/o any loss of functionality with
-// the flag disabled, but the generated code will lose access to inlined
-// function bodies across packages, leading to performance bugs.
-// Leave for debugging.
-const exportInlined = true // default: true
-
-// trackAllTypes enables cycle tracking for all types, not just named
-// types. The existing compiler invariants assume that unnamed types
-// that are not completely set up are not used, or else there are spurious
-// errors.
-// If disabled, only named types are tracked, possibly leading to slightly
-// less efficient encoding in rare cases. It also prevents the export of
-// some corner-case type declarations (but those were not handled correctly
-// with the former textual export format either).
-// TODO(gri) enable and remove once issues caused by it are fixed
-const trackAllTypes = false
-
-type exporter struct {
-	out *bufio.Writer
-
-	// object -> index maps, indexed in order of serialization
-	strIndex map[string]int
-	pkgIndex map[*Pkg]int
-	typIndex map[*Type]int
-	funcList []*Func
-
-	// position encoding
-	posInfoFormat bool
-	prevFile      string
-	prevLine      int
-
-	// debugging support
-	written int // bytes written
-	indent  int // for p.trace
-	trace   bool
-
-	// work-around for issue #16369 only
-	nesting int // amount of "nesting" of interface types
-}
-
-// export writes the exportlist for localpkg to out and returns the number of bytes written.
-func export(out *bufio.Writer, trace bool) int {
-	p := exporter{
-		out:           out,
-		strIndex:      map[string]int{"": 0}, // empty string is mapped to 0
-		pkgIndex:      make(map[*Pkg]int),
-		typIndex:      make(map[*Type]int),
-		posInfoFormat: true,
-		trace:         trace,
-	}
-
-	// write version info
-	// The version string must start with "version %d" where %d is the version
-	// number. Additional debugging information may follow after a blank; that
-	// text is ignored by the importer.
-	p.rawStringln(fmt.Sprintf("version %d", exportVersion))
-	var debug string
-	if debugFormat {
-		debug = "debug"
-	}
-	p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly
-	p.bool(trackAllTypes)
-	p.bool(p.posInfoFormat)
-
-	// --- generic export data ---
-
-	// populate type map with predeclared "known" types
-	predecl := predeclared()
-	for index, typ := range predecl {
-		p.typIndex[typ] = index
-	}
-	if len(p.typIndex) != len(predecl) {
-		Fatalf("exporter: duplicate entries in type map?")
-	}
-
-	// write package data
-	if localpkg.Path != "" {
-		Fatalf("exporter: local package path not empty: %q", localpkg.Path)
-	}
-	p.pkg(localpkg)
-	if p.trace {
-		p.tracef("\n")
-	}
-
-	// export objects
-	//
-	// First, export all exported (package-level) objects; i.e., all objects
-	// in the current exportlist. These objects represent all information
-	// required to import this package and type-check against it; i.e., this
-	// is the platform-independent export data. The format is generic in the
-	// sense that different compilers can use the same representation.
-	//
-	// During this first phase, more objects may be added to the exportlist
-	// (due to inlined function bodies and their dependencies). Export those
-	// objects in a second phase. That data is platform-specific as it depends
-	// on the inlining decisions of the compiler and the representation of the
-	// inlined function bodies.
-
-	// remember initial exportlist length
-	var numglobals = len(exportlist)
-
-	// Phase 1: Export objects in _current_ exportlist; exported objects at
-	//          package level.
-	// Use range since we want to ignore objects added to exportlist during
-	// this phase.
-	objcount := 0
-	for _, n := range exportlist {
-		sym := n.Sym
-
-		if sym.Flags&SymExported != 0 {
-			continue
-		}
-		sym.Flags |= SymExported
-
-		// TODO(gri) Closures have dots in their names;
-		// e.g., TestFloatZeroValue.func1 in math/big tests.
-		if strings.Contains(sym.Name, ".") {
-			Fatalf("exporter: unexpected symbol: %v", sym)
-		}
-
-		// TODO(gri) Should we do this check?
-		// if sym.Flags&SymExport == 0 {
-		// 	continue
-		// }
-
-		if sym.Def == nil {
-			Fatalf("exporter: unknown export symbol: %v", sym)
-		}
-
-		// TODO(gri) Optimization: Probably worthwhile collecting
-		// long runs of constants and export them "in bulk" (saving
-		// tags and types, and making import faster).
-
-		if p.trace {
-			p.tracef("\n")
-		}
-		p.obj(sym)
-		objcount++
-	}
-
-	// indicate end of list
-	if p.trace {
-		p.tracef("\n")
-	}
-	p.tag(endTag)
-
-	// for self-verification only (redundant)
-	p.int(objcount)
-
-	// --- compiler-specific export data ---
-
-	if p.trace {
-		p.tracef("\n--- compiler-specific export data ---\n[ ")
-		if p.indent != 0 {
-			Fatalf("exporter: incorrect indentation")
-		}
-	}
-
-	// write compiler-specific flags
-	if p.trace {
-		p.tracef("\n")
-	}
-
-	// Phase 2: Export objects added to exportlist during phase 1.
-	// Don't use range since exportlist may grow during this phase
-	// and we want to export all remaining objects.
-	objcount = 0
-	for i := numglobals; exportInlined && i < len(exportlist); i++ {
-		n := exportlist[i]
-		sym := n.Sym
-
-		// TODO(gri) The rest of this loop body is identical with
-		// the loop body above. Leave alone for now since there
-		// are different optimization opportunities, but factor
-		// eventually.
-
-		if sym.Flags&SymExported != 0 {
-			continue
-		}
-		sym.Flags |= SymExported
-
-		// TODO(gri) Closures have dots in their names;
-		// e.g., TestFloatZeroValue.func1 in math/big tests.
-		if strings.Contains(sym.Name, ".") {
-			Fatalf("exporter: unexpected symbol: %v", sym)
-		}
-
-		// TODO(gri) Should we do this check?
-		// if sym.Flags&SymExport == 0 {
-		// 	continue
-		// }
-
-		if sym.Def == nil {
-			Fatalf("exporter: unknown export symbol: %v", sym)
-		}
-
-		// TODO(gri) Optimization: Probably worthwhile collecting
-		// long runs of constants and export them "in bulk" (saving
-		// tags and types, and making import faster).
-
-		if p.trace {
-			p.tracef("\n")
-		}
-
-		if sym.Flags&SymAlias != 0 {
-			Fatalf("exporter: unexpected alias %v in inlined function body", sym)
-		}
-
-		p.obj(sym)
-		objcount++
-	}
-
-	// indicate end of list
-	if p.trace {
-		p.tracef("\n")
-	}
-	p.tag(endTag)
-
-	// for self-verification only (redundant)
-	p.int(objcount)
-
-	// --- inlined function bodies ---
-
-	if p.trace {
-		p.tracef("\n--- inlined function bodies ---\n")
-		if p.indent != 0 {
-			Fatalf("exporter: incorrect indentation")
-		}
-	}
-
-	// write inlineable function bodies
-	objcount = 0
-	for i, f := range p.funcList {
-		if f != nil {
-			// function has inlineable body:
-			// write index and body
-			if p.trace {
-				p.tracef("\n----\nfunc { %#v }\n", f.Inl)
-			}
-			p.int(i)
-			p.stmtList(f.Inl)
-			if p.trace {
-				p.tracef("\n")
-			}
-			objcount++
-		}
-	}
-
-	// indicate end of list
-	if p.trace {
-		p.tracef("\n")
-	}
-	p.int(-1) // invalid index terminates list
-
-	// for self-verification only (redundant)
-	p.int(objcount)
-
-	if p.trace {
-		p.tracef("\n--- end ---\n")
-	}
-
-	// --- end of export data ---
-
-	return p.written
-}
-
-func (p *exporter) pkg(pkg *Pkg) {
-	if pkg == nil {
-		Fatalf("exporter: unexpected nil pkg")
-	}
-
-	// if we saw the package before, write its index (>= 0)
-	if i, ok := p.pkgIndex[pkg]; ok {
-		p.index('P', i)
-		return
-	}
-
-	// otherwise, remember the package, write the package tag (< 0) and package data
-	if p.trace {
-		p.tracef("P%d = { ", len(p.pkgIndex))
-		defer p.tracef("} ")
-	}
-	p.pkgIndex[pkg] = len(p.pkgIndex)
-
-	p.tag(packageTag)
-	p.string(pkg.Name)
-	p.string(pkg.Path)
-}
-
-func unidealType(typ *Type, val Val) *Type {
-	// Untyped (ideal) constants get their own type. This decouples
-	// the constant type from the encoding of the constant value.
-	if typ == nil || typ.IsUntyped() {
-		typ = untype(val.Ctype())
-	}
-	return typ
-}
-
-func (p *exporter) obj(sym *Sym) {
-	if sym.Flags&SymAlias != 0 {
-		p.tag(aliasTag)
-		p.pos(nil) // TODO(gri) fix position information
-		// Aliases can only be exported from the package that
-		// declares them (aliases to aliases are resolved to the
-		// original object, and so are uses of aliases in inlined
-		// exported function bodies). Thus, we only need the alias
-		// name without package qualification.
-		if sym.Pkg != localpkg {
-			Fatalf("exporter: export of non-local alias: %v", sym)
-		}
-		p.string(sym.Name)
-		orig := sym.Def.Sym
-		if orig.Flags&SymAlias != 0 {
-			Fatalf("exporter: original object %v marked as alias", sym)
-		}
-		p.qualifiedName(orig)
-		return
-	}
-
-	if sym != sym.Def.Sym {
-		Fatalf("exporter: exported object %v is not original %v", sym, sym.Def.Sym)
-	}
-
-	// Exported objects may be from different packages because they
-	// may be re-exported via an exported alias or as dependencies in
-	// exported inlined function bodies. Thus, exported object names
-	// must be fully qualified.
-	//
-	// (This can only happen for aliased objects or during phase 2
-	// (exportInlined enabled) of object export. Unaliased Objects
-	// exported in phase 1 (compiler-indendepent objects) are by
-	// definition only the objects from the current package and not
-	// pulled in via inlined function bodies. In that case the package
-	// qualifier is not needed. Possible space optimization.)
-
-	n := sym.Def
-	switch n.Op {
-	case OLITERAL:
-		// constant
-		// TODO(gri) determine if we need the typecheck call here
-		n = typecheck(n, Erv)
-		if n == nil || n.Op != OLITERAL {
-			Fatalf("exporter: dumpexportconst: oconst nil: %v", sym)
-		}
-
-		p.tag(constTag)
-		p.pos(n)
-		// TODO(gri) In inlined functions, constants are used directly
-		// so they should never occur as re-exported objects. We may
-		// not need the qualified name here. See also comment above.
-		// Possible space optimization.
-		p.qualifiedName(sym)
-		p.typ(unidealType(n.Type, n.Val()))
-		p.value(n.Val())
-
-	case OTYPE:
-		// named type
-		t := n.Type
-		if t.Etype == TFORW {
-			Fatalf("exporter: export of incomplete type %v", sym)
-		}
-
-		p.tag(typeTag)
-		p.typ(t)
-
-	case ONAME:
-		// variable or function
-		n = typecheck(n, Erv|Ecall)
-		if n == nil || n.Type == nil {
-			Fatalf("exporter: variable/function exported but not defined: %v", sym)
-		}
-
-		if n.Type.Etype == TFUNC && n.Class == PFUNC {
-			// function
-			p.tag(funcTag)
-			p.pos(n)
-			p.qualifiedName(sym)
-
-			sig := sym.Def.Type
-			inlineable := isInlineable(sym.Def)
-
-			p.paramList(sig.Params(), inlineable)
-			p.paramList(sig.Results(), inlineable)
-
-			var f *Func
-			if inlineable {
-				f = sym.Def.Func
-				// TODO(gri) re-examine reexportdeplist:
-				// Because we can trivially export types
-				// in-place, we don't need to collect types
-				// inside function bodies in the exportlist.
-				// With an adjusted reexportdeplist used only
-				// by the binary exporter, we can also avoid
-				// the global exportlist.
-				reexportdeplist(f.Inl)
-			}
-			p.funcList = append(p.funcList, f)
-		} else {
-			// variable
-			p.tag(varTag)
-			p.pos(n)
-			p.qualifiedName(sym)
-			p.typ(sym.Def.Type)
-		}
-
-	default:
-		Fatalf("exporter: unexpected export symbol: %v %v", n.Op, sym)
-	}
-}
-
-func (p *exporter) pos(n *Node) {
-	if !p.posInfoFormat {
-		return
-	}
-
-	file, line := fileLine(n)
-	if file == p.prevFile {
-		// common case: write line delta
-		// delta == 0 means different file or no line change
-		delta := line - p.prevLine
-		p.int(delta)
-		if delta == 0 {
-			p.int(-1) // -1 means no file change
-		}
-	} else {
-		// different file
-		p.int(0)
-		// Encode filename as length of common prefix with previous
-		// filename, followed by (possibly empty) suffix. Filenames
-		// frequently share path prefixes, so this can save a lot
-		// of space and make export data size less dependent on file
-		// path length. The suffix is unlikely to be empty because
-		// file names tend to end in ".go".
-		n := commonPrefixLen(p.prevFile, file)
-		p.int(n)           // n >= 0
-		p.string(file[n:]) // write suffix only
-		p.prevFile = file
-		p.int(line)
-	}
-	p.prevLine = line
-}
-
-func fileLine(n *Node) (file string, line int) {
-	if n != nil {
-		file, line = Ctxt.LineHist.AbsFileLine(int(n.Lineno))
-	}
-	return
-}
-
-func commonPrefixLen(a, b string) int {
-	if len(a) > len(b) {
-		a, b = b, a
-	}
-	// len(a) <= len(b)
-	i := 0
-	for i < len(a) && a[i] == b[i] {
-		i++
-	}
-	return i
-}
-
-func isInlineable(n *Node) bool {
-	if exportInlined && n != nil && n.Func != nil && n.Func.Inl.Len() != 0 {
-		// when lazily typechecking inlined bodies, some re-exported ones may not have been typechecked yet.
-		// currently that can leave unresolved ONONAMEs in import-dot-ed packages in the wrong package
-		if Debug['l'] < 2 {
-			typecheckinl(n)
-		}
-		return true
-	}
-	return false
-}
-
-var errorInterface *Type // lazily initialized
-
-func (p *exporter) typ(t *Type) {
-	if t == nil {
-		Fatalf("exporter: nil type")
-	}
-
-	// Possible optimization: Anonymous pointer types *T where
-	// T is a named type are common. We could canonicalize all
-	// such types *T to a single type PT = *T. This would lead
-	// to at most one *T entry in typIndex, and all future *T's
-	// would be encoded as the respective index directly. Would
-	// save 1 byte (pointerTag) per *T and reduce the typIndex
-	// size (at the cost of a canonicalization map). We can do
-	// this later, without encoding format change.
-
-	// if we saw the type before, write its index (>= 0)
-	if i, ok := p.typIndex[t]; ok {
-		p.index('T', i)
-		return
-	}
-
-	// otherwise, remember the type, write the type tag (< 0) and type data
-	if trackAllTypes {
-		if p.trace {
-			p.tracef("T%d = {>\n", len(p.typIndex))
-			defer p.tracef("<\n} ")
-		}
-		p.typIndex[t] = len(p.typIndex)
-	}
-
-	// pick off named types
-	if tsym := t.Sym; tsym != nil {
-		if !trackAllTypes {
-			// if we don't track all types, track named types now
-			p.typIndex[t] = len(p.typIndex)
-		}
-
-		// Predeclared types should have been found in the type map.
-		if t.Orig == t {
-			Fatalf("exporter: predeclared type missing from type map?")
-		}
-
-		n := typenod(t)
-		if n.Type != t {
-			Fatalf("exporter: named type definition incorrectly set up")
-		}
-
-		p.tag(namedTag)
-		p.pos(n)
-		p.qualifiedName(tsym)
-
-		// write underlying type
-		orig := t.Orig
-		if orig == errortype {
-			// The error type is the only predeclared type which has
-			// a composite underlying type. When we encode that type,
-			// make sure to encode the underlying interface rather than
-			// the named type again. See also the comment in universe.go
-			// regarding the errortype and issue #15920.
-			if errorInterface == nil {
-				errorInterface = makeErrorInterface()
-			}
-			orig = errorInterface
-		}
-		p.typ(orig)
-
-		// interfaces don't have associated methods
-		if t.Orig.IsInterface() {
-			return
-		}
-
-		// sort methods for reproducible export format
-		// TODO(gri) Determine if they are already sorted
-		// in which case we can drop this step.
-		var methods []*Field
-		for _, m := range t.Methods().Slice() {
-			methods = append(methods, m)
-		}
-		sort.Sort(methodbyname(methods))
-		p.int(len(methods))
-
-		if p.trace && len(methods) > 0 {
-			p.tracef("associated methods {>")
-		}
-
-		for _, m := range methods {
-			if p.trace {
-				p.tracef("\n")
-			}
-			if strings.Contains(m.Sym.Name, ".") {
-				Fatalf("invalid symbol name: %s (%v)", m.Sym.Name, m.Sym)
-			}
-
-			p.pos(m.Nname)
-			p.fieldSym(m.Sym, false)
-
-			sig := m.Type
-			mfn := sig.Nname()
-			inlineable := isInlineable(mfn)
-
-			p.paramList(sig.Recvs(), inlineable)
-			p.paramList(sig.Params(), inlineable)
-			p.paramList(sig.Results(), inlineable)
-			p.bool(m.Nointerface) // record go:nointerface pragma value (see also #16243)
-
-			var f *Func
-			if inlineable {
-				f = mfn.Func
-				reexportdeplist(mfn.Func.Inl)
-			}
-			p.funcList = append(p.funcList, f)
-		}
-
-		if p.trace && len(methods) > 0 {
-			p.tracef("<\n} ")
-		}
-
-		return
-	}
-
-	// otherwise we have a type literal
-	switch t.Etype {
-	case TARRAY:
-		if t.isDDDArray() {
-			Fatalf("array bounds should be known at export time: %v", t)
-		}
-		p.tag(arrayTag)
-		p.int64(t.NumElem())
-		p.typ(t.Elem())
-
-	case TSLICE:
-		p.tag(sliceTag)
-		p.typ(t.Elem())
-
-	case TDDDFIELD:
-		// see p.param use of TDDDFIELD
-		p.tag(dddTag)
-		p.typ(t.DDDField())
-
-	case TSTRUCT:
-		p.tag(structTag)
-		p.fieldList(t)
-
-	case TPTR32, TPTR64: // could use Tptr but these are constants
-		p.tag(pointerTag)
-		p.typ(t.Elem())
-
-	case TFUNC:
-		p.tag(signatureTag)
-		p.paramList(t.Params(), false)
-		p.paramList(t.Results(), false)
-
-	case TINTER:
-		p.tag(interfaceTag)
-		// gc doesn't separate between embedded interfaces
-		// and methods declared explicitly with an interface
-		p.int(0) // no embedded interfaces
-
-		// Because the compiler flattens interfaces containing
-		// embedded interfaces, it is possible to create interface
-		// types that recur through an unnamed type.
-		// If trackAllTypes is disabled, such recursion is not
-		// detected, leading to a stack overflow during export
-		// (issue #16369).
-		// As a crude work-around we terminate deep recursion
-		// through interface types with an empty interface and
-		// report an error.
-		// This will catch endless recursion, but is unlikely
-		// to trigger for valid, deeply nested types given the
-		// high threshold.
-		// It would be ok to continue without reporting an error
-		// since the export format is valid. But a subsequent
-		// import would import an incorrect type. The textual
-		// exporter does not report an error but importing the
-		// resulting package will lead to a syntax error during
-		// import.
-		// TODO(gri) remove this once we have a permanent fix
-		// for the issue.
-		if p.nesting > 100 {
-			p.int(0) // 0 methods to indicate empty interface
-			yyerrorl(t.Lineno, "cannot export unnamed recursive interface")
-			break
-		}
-
-		p.nesting++
-		p.methodList(t)
-		p.nesting--
-
-	case TMAP:
-		p.tag(mapTag)
-		p.typ(t.Key())
-		p.typ(t.Val())
-
-	case TCHAN:
-		p.tag(chanTag)
-		p.int(int(t.ChanDir()))
-		p.typ(t.Elem())
-
-	default:
-		Fatalf("exporter: unexpected type: %v (Etype = %d)", t, t.Etype)
-	}
-}
-
-func (p *exporter) qualifiedName(sym *Sym) {
-	p.string(sym.Name)
-	p.pkg(sym.Pkg)
-}
-
-func (p *exporter) fieldList(t *Type) {
-	if p.trace && t.NumFields() > 0 {
-		p.tracef("fields {>")
-		defer p.tracef("<\n} ")
-	}
-
-	p.int(t.NumFields())
-	for _, f := range t.Fields().Slice() {
-		if p.trace {
-			p.tracef("\n")
-		}
-		p.field(f)
-	}
-}
-
-func (p *exporter) field(f *Field) {
-	p.pos(f.Nname)
-	p.fieldName(f)
-	p.typ(f.Type)
-	p.string(f.Note)
-}
-
-func (p *exporter) methodList(t *Type) {
-	if p.trace && t.NumFields() > 0 {
-		p.tracef("methods {>")
-		defer p.tracef("<\n} ")
-	}
-
-	p.int(t.NumFields())
-	for _, m := range t.Fields().Slice() {
-		if p.trace {
-			p.tracef("\n")
-		}
-		p.method(m)
-	}
-}
-
-func (p *exporter) method(m *Field) {
-	p.pos(m.Nname)
-	p.fieldName(m)
-	p.paramList(m.Type.Params(), false)
-	p.paramList(m.Type.Results(), false)
-}
-
-// fieldName is like qualifiedName but it doesn't record the package for exported names.
-func (p *exporter) fieldName(t *Field) {
-	name := t.Sym.Name
-	if t.Embedded != 0 {
-		name = "" // anonymous field
-		if bname := basetypeName(t.Type); bname != "" && !exportname(bname) {
-			// anonymous field with unexported base type name
-			name = "?" // unexported name to force export of package
-		}
-	}
-	p.string(name)
-	if name != "" && !exportname(name) {
-		p.pkg(t.Sym.Pkg)
-	}
-}
-
-func basetypeName(t *Type) string {
-	s := t.Sym
-	if s == nil && t.IsPtr() {
-		s = t.Elem().Sym // deref
-	}
-	// s should exist, but be conservative
-	if s != nil {
-		return s.Name
-	}
-	return ""
-}
-
-func (p *exporter) paramList(params *Type, numbered bool) {
-	if !params.IsFuncArgStruct() {
-		Fatalf("exporter: parameter list expected")
-	}
-
-	// use negative length to indicate unnamed parameters
-	// (look at the first parameter only since either all
-	// names are present or all are absent)
-	//
-	// TODO(gri) If we don't have an exported function
-	// body, the parameter names are irrelevant for the
-	// compiler (though they may be of use for other tools).
-	// Possible space optimization.
-	n := params.NumFields()
-	if n > 0 && parName(params.Field(0), numbered) == "" {
-		n = -n
-	}
-	p.int(n)
-	for _, q := range params.Fields().Slice() {
-		p.param(q, n, numbered)
-	}
-}
-
-func (p *exporter) param(q *Field, n int, numbered bool) {
-	t := q.Type
-	if q.Isddd {
-		// create a fake type to encode ... just for the p.typ call
-		t = typDDDField(t.Elem())
-	}
-	p.typ(t)
-	if n > 0 {
-		name := parName(q, numbered)
-		if name == "" {
-			// Sometimes we see an empty name even for n > 0.
-			// This appears to happen for interface methods
-			// with _ (blank) parameter names. Make sure we
-			// have a proper name and package so we don't crash
-			// during import (see also issue #15470).
-			// (parName uses "" instead of "?" as in fmt.go)
-			// TODO(gri) review parameter name encoding
-			name = "_"
-		}
-		p.string(name)
-		if name != "_" {
-			// Because of (re-)exported inlined functions
-			// the importpkg may not be the package to which this
-			// function (and thus its parameter) belongs. We need to
-			// supply the parameter package here. We need the package
-			// when the function is inlined so we can properly resolve
-			// the name. The _ (blank) parameter cannot be accessed, so
-			// we don't need to export a package.
-			//
-			// TODO(gri) This is compiler-specific. Try using importpkg
-			// here and then update the symbols if we find an inlined
-			// body only. Otherwise, the parameter name is ignored and
-			// the package doesn't matter. This would remove an int
-			// (likely 1 byte) for each named parameter.
-			p.pkg(q.Sym.Pkg)
-		}
-	}
-	// TODO(gri) This is compiler-specific (escape info).
-	// Move into compiler-specific section eventually?
-	// (Not having escape info causes tests to fail, e.g. runtime GCInfoTest)
-	p.string(q.Note)
-}
-
-func parName(f *Field, numbered bool) string {
-	s := f.Sym
-	if s == nil {
-		return ""
-	}
-
-	// Take the name from the original, lest we substituted it with ~r%d or ~b%d.
-	// ~r%d is a (formerly) unnamed result.
-	if f.Nname != nil {
-		if f.Nname.Orig != nil {
-			s = f.Nname.Orig.Sym
-			if s != nil && s.Name[0] == '~' {
-				if s.Name[1] == 'r' { // originally an unnamed result
-					return "" // s = nil
-				} else if s.Name[1] == 'b' { // originally the blank identifier _
-					return "_" // belongs to localpkg
-				}
-			}
-		} else {
-			return "" // s = nil
-		}
-	}
-
-	if s == nil {
-		return ""
-	}
-
-	// print symbol with Vargen number or not as desired
-	name := s.Name
-	if strings.Contains(name, ".") {
-		Fatalf("invalid symbol name: %s", name)
-	}
-
-	// Functions that can be inlined use numbered parameters so we can distinguish them
-	// from other names in their context after inlining (i.e., the parameter numbering
-	// is a form of parameter rewriting). See issue 4326 for an example and test case.
-	if forceObjFileStability || numbered {
-		if !strings.Contains(name, "·") && f.Nname != nil && f.Nname.Name != nil && f.Nname.Name.Vargen > 0 {
-			name = fmt.Sprintf("%s·%d", name, f.Nname.Name.Vargen) // append Vargen
-		}
-	} else {
-		if i := strings.Index(name, "·"); i > 0 {
-			name = name[:i] // cut off Vargen
-		}
-	}
-	return name
-}
-
-func (p *exporter) value(x Val) {
-	if p.trace {
-		p.tracef("= ")
-	}
-
-	switch x := x.U.(type) {
-	case bool:
-		tag := falseTag
-		if x {
-			tag = trueTag
-		}
-		p.tag(tag)
-
-	case *Mpint:
-		if minintval[TINT64].Cmp(x) <= 0 && x.Cmp(maxintval[TINT64]) <= 0 {
-			// common case: x fits into an int64 - use compact encoding
-			p.tag(int64Tag)
-			p.int64(x.Int64())
-			return
-		}
-		// uncommon case: large x - use float encoding
-		// (powers of 2 will be encoded efficiently with exponent)
-		f := newMpflt()
-		f.SetInt(x)
-		p.tag(floatTag)
-		p.float(f)
-
-	case *Mpflt:
-		p.tag(floatTag)
-		p.float(x)
-
-	case *Mpcplx:
-		p.tag(complexTag)
-		p.float(&x.Real)
-		p.float(&x.Imag)
-
-	case string:
-		p.tag(stringTag)
-		p.string(x)
-
-	case *NilVal:
-		// not a constant but used in exported function bodies
-		p.tag(nilTag)
-
-	default:
-		Fatalf("exporter: unexpected value %v (%T)", x, x)
-	}
-}
-
-func (p *exporter) float(x *Mpflt) {
-	// extract sign (there is no -0)
-	f := &x.Val
-	sign := f.Sign()
-	if sign == 0 {
-		// x == 0
-		p.int(0)
-		return
-	}
-	// x != 0
-
-	// extract exponent such that 0.5 <= m < 1.0
-	var m big.Float
-	exp := f.MantExp(&m)
-
-	// extract mantissa as *big.Int
-	// - set exponent large enough so mant satisfies mant.IsInt()
-	// - get *big.Int from mant
-	m.SetMantExp(&m, int(m.MinPrec()))
-	mant, acc := m.Int(nil)
-	if acc != big.Exact {
-		Fatalf("exporter: internal error")
-	}
-
-	p.int(sign)
-	p.int(exp)
-	p.string(string(mant.Bytes()))
-}
-
-// ----------------------------------------------------------------------------
-// Inlined function bodies
-
-// Approach: More or less closely follow what fmt.go is doing for FExp mode
-// but instead of emitting the information textually, emit the node tree in
-// binary form.
-
-// TODO(gri) Improve tracing output. The current format is difficult to read.
-
-// stmtList may emit more (or fewer) than len(list) nodes.
-func (p *exporter) stmtList(list Nodes) {
-	if p.trace {
-		if list.Len() == 0 {
-			p.tracef("{}")
-		} else {
-			p.tracef("{>")
-			defer p.tracef("<\n}")
-		}
-	}
-
-	for _, n := range list.Slice() {
-		if p.trace {
-			p.tracef("\n")
-		}
-		// TODO inlining produces expressions with ninits. we can't export these yet.
-		// (from fmt.go:1461ff)
-		if opprec[n.Op] < 0 {
-			p.stmt(n)
-		} else {
-			p.expr(n)
-		}
-	}
-
-	p.op(OEND)
-}
-
-func (p *exporter) exprList(list Nodes) {
-	if p.trace {
-		if list.Len() == 0 {
-			p.tracef("{}")
-		} else {
-			p.tracef("{>")
-			defer p.tracef("<\n}")
-		}
-	}
-
-	for _, n := range list.Slice() {
-		if p.trace {
-			p.tracef("\n")
-		}
-		p.expr(n)
-	}
-
-	p.op(OEND)
-}
-
-func (p *exporter) elemList(list Nodes) {
-	if p.trace {
-		p.tracef("[ ")
-	}
-	p.int(list.Len())
-	if p.trace {
-		if list.Len() == 0 {
-			p.tracef("] {}")
-		} else {
-			p.tracef("] {>")
-			defer p.tracef("<\n}")
-		}
-	}
-
-	for _, n := range list.Slice() {
-		if p.trace {
-			p.tracef("\n")
-		}
-		p.fieldSym(n.Sym, false)
-		p.expr(n.Left)
-	}
-}
-
-func (p *exporter) expr(n *Node) {
-	if p.trace {
-		p.tracef("( ")
-		defer p.tracef(") ")
-	}
-
-	// from nodefmt (fmt.go)
-	//
-	// nodefmt reverts nodes back to their original - we don't need to do
-	// it because we are not bound to produce valid Go syntax when exporting
-	//
-	// if (fmtmode != FExp || n.Op != OLITERAL) && n.Orig != nil {
-	// 	n = n.Orig
-	// }
-
-	// from exprfmt (fmt.go)
-	for n != nil && n.Implicit && (n.Op == OIND || n.Op == OADDR) {
-		n = n.Left
-	}
-
-	switch op := n.Op; op {
-	// expressions
-	// (somewhat closely following the structure of exprfmt in fmt.go)
-	case OPAREN:
-		p.expr(n.Left) // unparen
-
-	// case ODDDARG:
-	//	unimplemented - handled by default case
-
-	case OLITERAL:
-		if n.Val().Ctype() == CTNIL && n.Orig != nil && n.Orig != n {
-			p.expr(n.Orig)
-			break
-		}
-		p.op(OLITERAL)
-		p.typ(unidealType(n.Type, n.Val()))
-		p.value(n.Val())
-
-	case ONAME:
-		// Special case: name used as local variable in export.
-		// _ becomes ~b%d internally; print as _ for export
-		if n.Sym != nil && n.Sym.Name[0] == '~' && n.Sym.Name[1] == 'b' {
-			p.op(ONAME)
-			p.string("_") // inlined and customized version of p.sym(n)
-			break
-		}
-
-		if n.Sym != nil && !isblank(n) && n.Name.Vargen > 0 {
-			p.op(ONAME)
-			p.sym(n)
-			break
-		}
-
-		// Special case: explicit name of func (*T) method(...) is turned into pkg.(*T).method,
-		// but for export, this should be rendered as (*pkg.T).meth.
-		// These nodes have the special property that they are names with a left OTYPE and a right ONAME.
-		if n.Left != nil && n.Left.Op == OTYPE && n.Right != nil && n.Right.Op == ONAME {
-			p.op(OXDOT)
-			p.expr(n.Left) // n.Left.Op == OTYPE
-			p.fieldSym(n.Right.Sym, true)
-			break
-		}
-
-		p.op(ONAME)
-		p.sym(n)
-
-	// case OPACK, ONONAME:
-	// 	should have been resolved by typechecking - handled by default case
-
-	case OTYPE:
-		p.op(OTYPE)
-		if p.bool(n.Type == nil) {
-			p.sym(n)
-		} else {
-			p.typ(n.Type)
-		}
-
-	// case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC:
-	// 	should have been resolved by typechecking - handled by default case
-
-	// case OCLOSURE:
-	//	unimplemented - handled by default case
-
-	// case OCOMPLIT:
-	// 	should have been resolved by typechecking - handled by default case
-
-	case OPTRLIT:
-		p.op(OPTRLIT)
-		p.expr(n.Left)
-		p.bool(n.Implicit)
-
-	case OSTRUCTLIT:
-		p.op(OSTRUCTLIT)
-		p.typ(n.Type)
-		p.elemList(n.List) // special handling of field names
-
-	case OARRAYLIT, OSLICELIT, OMAPLIT:
-		p.op(OCOMPLIT)
-		p.typ(n.Type)
-		p.exprList(n.List)
-
-	case OKEY:
-		p.op(OKEY)
-		p.exprsOrNil(n.Left, n.Right)
-
-	// case OSTRUCTKEY:
-	//	unreachable - handled in case OSTRUCTLIT by elemList
-
-	// case OCALLPART:
-	//	unimplemented - handled by default case
-
-	case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
-		p.op(OXDOT)
-		p.expr(n.Left)
-		p.fieldSym(n.Sym, true)
-
-	case ODOTTYPE, ODOTTYPE2:
-		p.op(ODOTTYPE)
-		p.expr(n.Left)
-		if p.bool(n.Right != nil) {
-			p.expr(n.Right)
-		} else {
-			p.typ(n.Type)
-		}
-
-	case OINDEX, OINDEXMAP:
-		p.op(OINDEX)
-		p.expr(n.Left)
-		p.expr(n.Right)
-
-	case OSLICE, OSLICESTR, OSLICEARR:
-		p.op(OSLICE)
-		p.expr(n.Left)
-		low, high, _ := n.SliceBounds()
-		p.exprsOrNil(low, high)
-
-	case OSLICE3, OSLICE3ARR:
-		p.op(OSLICE3)
-		p.expr(n.Left)
-		low, high, max := n.SliceBounds()
-		p.exprsOrNil(low, high)
-		p.expr(max)
-
-	case OCOPY, OCOMPLEX:
-		// treated like other builtin calls (see e.g., OREAL)
-		p.op(op)
-		p.expr(n.Left)
-		p.expr(n.Right)
-		p.op(OEND)
-
-	case OCONV, OCONVIFACE, OCONVNOP, OARRAYBYTESTR, OARRAYRUNESTR, OSTRARRAYBYTE, OSTRARRAYRUNE, ORUNESTR:
-		p.op(OCONV)
-		p.typ(n.Type)
-		if n.Left != nil {
-			p.expr(n.Left)
-			p.op(OEND)
-		} else {
-			p.exprList(n.List) // emits terminating OEND
-		}
-
-	case OREAL, OIMAG, OAPPEND, OCAP, OCLOSE, ODELETE, OLEN, OMAKE, ONEW, OPANIC, ORECOVER, OPRINT, OPRINTN:
-		p.op(op)
-		if n.Left != nil {
-			p.expr(n.Left)
-			p.op(OEND)
-		} else {
-			p.exprList(n.List) // emits terminating OEND
-		}
-		// only append() calls may contain '...' arguments
-		if op == OAPPEND {
-			p.bool(n.Isddd)
-		} else if n.Isddd {
-			Fatalf("exporter: unexpected '...' with %s call", opnames[op])
-		}
-
-	case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER, OGETG:
-		p.op(OCALL)
-		p.expr(n.Left)
-		p.exprList(n.List)
-		p.bool(n.Isddd)
-
-	case OMAKEMAP, OMAKECHAN, OMAKESLICE:
-		p.op(op) // must keep separate from OMAKE for importer
-		p.typ(n.Type)
-		switch {
-		default:
-			// empty list
-			p.op(OEND)
-		case n.List.Len() != 0: // pre-typecheck
-			p.exprList(n.List) // emits terminating OEND
-		case n.Right != nil:
-			p.expr(n.Left)
-			p.expr(n.Right)
-			p.op(OEND)
-		case n.Left != nil && (n.Op == OMAKESLICE || !n.Left.Type.IsUntyped()):
-			p.expr(n.Left)
-			p.op(OEND)
-		}
-
-	// unary expressions
-	case OPLUS, OMINUS, OADDR, OCOM, OIND, ONOT, ORECV:
-		p.op(op)
-		p.expr(n.Left)
-
-	// binary expressions
-	case OADD, OAND, OANDAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE, OLT,
-		OLSH, OMOD, OMUL, ONE, OOR, OOROR, ORSH, OSEND, OSUB, OXOR:
-		p.op(op)
-		p.expr(n.Left)
-		p.expr(n.Right)
-
-	case OADDSTR:
-		p.op(OADDSTR)
-		p.exprList(n.List)
-
-	case OCMPSTR, OCMPIFACE:
-		p.op(Op(n.Etype))
-		p.expr(n.Left)
-		p.expr(n.Right)
-
-	case ODCLCONST:
-		// if exporting, DCLCONST should just be removed as its usage
-		// has already been replaced with literals
-		// TODO(gri) these should not be exported in the first place
-		// TODO(gri) why is this considered an expression in fmt.go?
-		p.op(ODCLCONST)
-
-	default:
-		Fatalf("cannot export %v (%d) node\n"+
-			"==> please file an issue and assign to gri@\n", n.Op, int(n.Op))
-	}
-}
-
-// Caution: stmt will emit more than one node for statement nodes n that have a non-empty
-// n.Ninit and where n cannot have a natural init section (such as in "if", "for", etc.).
-func (p *exporter) stmt(n *Node) {
-	if p.trace {
-		p.tracef("( ")
-		defer p.tracef(") ")
-	}
-
-	if n.Ninit.Len() > 0 && !stmtwithinit(n.Op) {
-		if p.trace {
-			p.tracef("( /* Ninits */ ")
-		}
-
-		// can't use stmtList here since we don't want the final OEND
-		for _, n := range n.Ninit.Slice() {
-			p.stmt(n)
-		}
-
-		if p.trace {
-			p.tracef(") ")
-		}
-	}
-
-	switch op := n.Op; op {
-	case ODCL:
-		p.op(ODCL)
-		p.sym(n.Left)
-		p.typ(n.Left.Type)
-
-	// case ODCLFIELD:
-	//	unimplemented - handled by default case
-
-	case OAS, OASWB:
-		// Don't export "v = <N>" initializing statements, hope they're always
-		// preceded by the DCL which will be re-parsed and typecheck to reproduce
-		// the "v = <N>" again.
-		if n.Right != nil {
-			p.op(OAS)
-			p.expr(n.Left)
-			p.expr(n.Right)
-		}
-
-	case OASOP:
-		p.op(OASOP)
-		p.int(int(n.Etype))
-		p.expr(n.Left)
-		if p.bool(!n.Implicit) {
-			p.expr(n.Right)
-		}
-
-	case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
-		p.op(OAS2)
-		p.exprList(n.List)
-		p.exprList(n.Rlist)
-
-	case ORETURN:
-		p.op(ORETURN)
-		p.exprList(n.List)
-
-	// case ORETJMP:
-	// 	unreachable - generated by compiler for trampolin routines
-
-	case OPROC, ODEFER:
-		p.op(op)
-		p.expr(n.Left)
-
-	case OIF:
-		p.op(OIF)
-		p.stmtList(n.Ninit)
-		p.expr(n.Left)
-		p.stmtList(n.Nbody)
-		p.stmtList(n.Rlist)
-
-	case OFOR:
-		p.op(OFOR)
-		p.stmtList(n.Ninit)
-		p.exprsOrNil(n.Left, n.Right)
-		p.stmtList(n.Nbody)
-
-	case ORANGE:
-		p.op(ORANGE)
-		p.stmtList(n.List)
-		p.expr(n.Right)
-		p.stmtList(n.Nbody)
-
-	case OSELECT, OSWITCH:
-		p.op(op)
-		p.stmtList(n.Ninit)
-		p.exprsOrNil(n.Left, nil)
-		p.stmtList(n.List)
-
-	case OCASE, OXCASE:
-		p.op(OXCASE)
-		p.stmtList(n.List)
-		p.stmtList(n.Nbody)
-
-	case OFALL, OXFALL:
-		p.op(OXFALL)
-
-	case OBREAK, OCONTINUE:
-		p.op(op)
-		p.exprsOrNil(n.Left, nil)
-
-	case OEMPTY:
-		// nothing to emit
-
-	case OGOTO, OLABEL:
-		p.op(op)
-		p.expr(n.Left)
-
-	default:
-		Fatalf("exporter: CANNOT EXPORT: %v\nPlease notify gri@\n", n.Op)
-	}
-}
-
-func (p *exporter) exprsOrNil(a, b *Node) {
-	ab := 0
-	if a != nil {
-		ab |= 1
-	}
-	if b != nil {
-		ab |= 2
-	}
-	p.int(ab)
-	if ab&1 != 0 {
-		p.expr(a)
-	}
-	if ab&2 != 0 {
-		p.expr(b)
-	}
-}
-
-func (p *exporter) fieldSym(s *Sym, short bool) {
-	name := s.Name
-
-	// remove leading "type." in method names ("(T).m" -> "m")
-	if short {
-		if i := strings.LastIndex(name, "."); i >= 0 {
-			name = name[i+1:]
-		}
-	}
-
-	// we should never see a _ (blank) here - these are accessible ("read") fields
-	// TODO(gri) can we assert this with an explicit check?
-	p.string(name)
-	if !exportname(name) {
-		p.pkg(s.Pkg)
-	}
-}
-
-// sym must encode the _ (blank) identifier as a single string "_" since
-// encoding for some nodes is based on this assumption (e.g. ONAME nodes).
-func (p *exporter) sym(n *Node) {
-	s := n.Sym
-	if s.Pkg != nil {
-		if len(s.Name) > 0 && s.Name[0] == '.' {
-			Fatalf("exporter: exporting synthetic symbol %s", s.Name)
-		}
-	}
-
-	if p.trace {
-		p.tracef("{ SYM ")
-		defer p.tracef("} ")
-	}
-
-	name := s.Name
-
-	// remove leading "type." in method names ("(T).m" -> "m")
-	if i := strings.LastIndex(name, "."); i >= 0 {
-		name = name[i+1:]
-	}
-
-	if strings.Contains(name, "·") && n.Name.Vargen > 0 {
-		Fatalf("exporter: unexpected · in symbol name")
-	}
-
-	if i := n.Name.Vargen; i > 0 {
-		name = fmt.Sprintf("%s·%d", name, i)
-	}
-
-	p.string(name)
-	if name != "_" {
-		p.pkg(s.Pkg)
-	}
-}
-
-func (p *exporter) bool(b bool) bool {
-	if p.trace {
-		p.tracef("[")
-		defer p.tracef("= %v] ", b)
-	}
-
-	x := 0
-	if b {
-		x = 1
-	}
-	p.int(x)
-	return b
-}
-
-func (p *exporter) op(op Op) {
-	if p.trace {
-		p.tracef("[")
-		defer p.tracef("= %v] ", op)
-	}
-
-	p.int(int(op))
-}
-
-// ----------------------------------------------------------------------------
-// Low-level encoders
-
-func (p *exporter) index(marker byte, index int) {
-	if index < 0 {
-		Fatalf("exporter: invalid index < 0")
-	}
-	if debugFormat {
-		p.marker('t')
-	}
-	if p.trace {
-		p.tracef("%c%d ", marker, index)
-	}
-	p.rawInt64(int64(index))
-}
-
-func (p *exporter) tag(tag int) {
-	if tag >= 0 {
-		Fatalf("exporter: invalid tag >= 0")
-	}
-	if debugFormat {
-		p.marker('t')
-	}
-	if p.trace {
-		p.tracef("%s ", tagString[-tag])
-	}
-	p.rawInt64(int64(tag))
-}
-
-func (p *exporter) int(x int) {
-	p.int64(int64(x))
-}
-
-func (p *exporter) int64(x int64) {
-	if debugFormat {
-		p.marker('i')
-	}
-	if p.trace {
-		p.tracef("%d ", x)
-	}
-	p.rawInt64(x)
-}
-
-func (p *exporter) string(s string) {
-	if debugFormat {
-		p.marker('s')
-	}
-	if p.trace {
-		p.tracef("%q ", s)
-	}
-	// if we saw the string before, write its index (>= 0)
-	// (the empty string is mapped to 0)
-	if i, ok := p.strIndex[s]; ok {
-		p.rawInt64(int64(i))
-		return
-	}
-	// otherwise, remember string and write its negative length and bytes
-	p.strIndex[s] = len(p.strIndex)
-	p.rawInt64(-int64(len(s)))
-	for i := 0; i < len(s); i++ {
-		p.rawByte(s[i])
-	}
-}
-
-// marker emits a marker byte and position information which makes
-// it easy for a reader to detect if it is "out of sync". Used only
-// if debugFormat is set.
-func (p *exporter) marker(m byte) {
-	p.rawByte(m)
-	// Uncomment this for help tracking down the location
-	// of an incorrect marker when running in debugFormat.
-	// if p.trace {
-	// 	p.tracef("#%d ", p.written)
-	// }
-	p.rawInt64(int64(p.written))
-}
-
-// rawInt64 should only be used by low-level encoders.
-func (p *exporter) rawInt64(x int64) {
-	var tmp [binary.MaxVarintLen64]byte
-	n := binary.PutVarint(tmp[:], x)
-	for i := 0; i < n; i++ {
-		p.rawByte(tmp[i])
-	}
-}
-
-// rawStringln should only be used to emit the initial version string.
-func (p *exporter) rawStringln(s string) {
-	for i := 0; i < len(s); i++ {
-		p.rawByte(s[i])
-	}
-	p.rawByte('\n')
-}
-
-// rawByte is the bottleneck interface to write to p.out.
-// rawByte escapes b as follows (any encoding does that
-// hides '$'):
-//
-//	'$'  => '|' 'S'
-//	'|'  => '|' '|'
-//
-// Necessary so other tools can find the end of the
-// export data by searching for "$$".
-// rawByte should only be used by low-level encoders.
-func (p *exporter) rawByte(b byte) {
-	switch b {
-	case '$':
-		// write '$' as '|' 'S'
-		b = 'S'
-		fallthrough
-	case '|':
-		// write '|' as '|' '|'
-		p.out.WriteByte('|')
-		p.written++
-	}
-	p.out.WriteByte(b)
-	p.written++
-}
-
-// tracef is like fmt.Printf but it rewrites the format string
-// to take care of indentation.
-func (p *exporter) tracef(format string, args ...interface{}) {
-	if strings.ContainsAny(format, "<>\n") {
-		var buf bytes.Buffer
-		for i := 0; i < len(format); i++ {
-			// no need to deal with runes
-			ch := format[i]
-			switch ch {
-			case '>':
-				p.indent++
-				continue
-			case '<':
-				p.indent--
-				continue
-			}
-			buf.WriteByte(ch)
-			if ch == '\n' {
-				for j := p.indent; j > 0; j-- {
-					buf.WriteString(".  ")
-				}
-			}
-		}
-		format = buf.String()
-	}
-	fmt.Printf(format, args...)
-}
-
-// ----------------------------------------------------------------------------
-// Export format
-
-// Tags. Must be < 0.
-const (
-	// Objects
-	packageTag = -(iota + 1)
-	constTag
-	typeTag
-	varTag
-	funcTag
-	endTag
-
-	// Types
-	namedTag
-	arrayTag
-	sliceTag
-	dddTag
-	structTag
-	pointerTag
-	signatureTag
-	interfaceTag
-	mapTag
-	chanTag
-
-	// Values
-	falseTag
-	trueTag
-	int64Tag
-	floatTag
-	fractionTag // not used by gc
-	complexTag
-	stringTag
-	nilTag
-	unknownTag // not used by gc (only appears in packages with errors)
-
-	// Aliases
-	aliasTag
-)
-
-// Debugging support.
-// (tagString is only used when tracing is enabled)
-var tagString = [...]string{
-	// Objects
-	-packageTag: "package",
-	-constTag:   "const",
-	-typeTag:    "type",
-	-varTag:     "var",
-	-funcTag:    "func",
-	-endTag:     "end",
-
-	// Types
-	-namedTag:     "named type",
-	-arrayTag:     "array",
-	-sliceTag:     "slice",
-	-dddTag:       "ddd",
-	-structTag:    "struct",
-	-pointerTag:   "pointer",
-	-signatureTag: "signature",
-	-interfaceTag: "interface",
-	-mapTag:       "map",
-	-chanTag:      "chan",
-
-	// Values
-	-falseTag:    "false",
-	-trueTag:     "true",
-	-int64Tag:    "int64",
-	-floatTag:    "float",
-	-fractionTag: "fraction",
-	-complexTag:  "complex",
-	-stringTag:   "string",
-	-nilTag:      "nil",
-	-unknownTag:  "unknown",
-
-	// Aliases
-	-aliasTag: "alias",
-}
-
-// untype returns the "pseudo" untyped type for a Ctype (import/export use only).
-// (we can't use an pre-initialized array because we must be sure all types are
-// set up)
-func untype(ctype Ctype) *Type {
-	switch ctype {
-	case CTINT:
-		return idealint
-	case CTRUNE:
-		return idealrune
-	case CTFLT:
-		return idealfloat
-	case CTCPLX:
-		return idealcomplex
-	case CTSTR:
-		return idealstring
-	case CTBOOL:
-		return idealbool
-	case CTNIL:
-		return Types[TNIL]
-	}
-	Fatalf("exporter: unknown Ctype")
-	return nil
-}
-
-var predecl []*Type // initialized lazily
-
-func predeclared() []*Type {
-	if predecl == nil {
-		// initialize lazily to be sure that all
-		// elements have been initialized before
-		predecl = []*Type{
-			// basic types
-			Types[TBOOL],
-			Types[TINT],
-			Types[TINT8],
-			Types[TINT16],
-			Types[TINT32],
-			Types[TINT64],
-			Types[TUINT],
-			Types[TUINT8],
-			Types[TUINT16],
-			Types[TUINT32],
-			Types[TUINT64],
-			Types[TUINTPTR],
-			Types[TFLOAT32],
-			Types[TFLOAT64],
-			Types[TCOMPLEX64],
-			Types[TCOMPLEX128],
-			Types[TSTRING],
-
-			// aliases
-			bytetype,
-			runetype,
-
-			// error
-			errortype,
-
-			// untyped types
-			untype(CTBOOL),
-			untype(CTINT),
-			untype(CTRUNE),
-			untype(CTFLT),
-			untype(CTCPLX),
-			untype(CTSTR),
-			untype(CTNIL),
-
-			// package unsafe
-			Types[TUNSAFEPTR],
-
-			// invalid type (package contains errors)
-			Types[Txxx],
-
-			// any type, for builtin export data
-			Types[TANY],
-		}
-	}
-	return predecl
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/bimport.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/bimport.go
deleted file mode 100644
index a4cf074..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/bimport.go
+++ /dev/null
@@ -1,1296 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/bimport.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/bimport.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Binary package import.
-// See bexport.go for the export data format and how
-// to make a format change.
-
-package gc
-
-import (
-	"bufio"
-	"encoding/binary"
-	"fmt"
-	"bootstrap/math/big"
-	"strconv"
-	"strings"
-)
-
-// The overall structure of Import is symmetric to Export: For each
-// export method in bexport.go there is a matching and symmetric method
-// in bimport.go. Changing the export format requires making symmetric
-// changes to bimport.go and bexport.go.
-
-type importer struct {
-	in      *bufio.Reader
-	buf     []byte // reused for reading strings
-	version int    // export format version
-
-	// object lists, in order of deserialization
-	strList       []string
-	pkgList       []*Pkg
-	typList       []*Type
-	funcList      []*Node // nil entry means already declared
-	trackAllTypes bool
-
-	// for delayed type verification
-	cmpList []struct{ pt, t *Type }
-
-	// position encoding
-	posInfoFormat bool
-	prevFile      string
-	prevLine      int
-
-	// debugging support
-	debugFormat bool
-	read        int // bytes read
-}
-
-// Import populates importpkg from the serialized package data.
-func Import(in *bufio.Reader) {
-	p := importer{
-		in:      in,
-		version: -1,           // unknown version
-		strList: []string{""}, // empty string is mapped to 0
-	}
-
-	// read version info
-	var versionstr string
-	if b := p.rawByte(); b == 'c' || b == 'd' {
-		// Go1.7 encoding; first byte encodes low-level
-		// encoding format (compact vs debug).
-		// For backward-compatibility only (avoid problems with
-		// old installed packages). Newly compiled packages use
-		// the extensible format string.
-		// TODO(gri) Remove this support eventually; after Go1.8.
-		if b == 'd' {
-			p.debugFormat = true
-		}
-		p.trackAllTypes = p.rawByte() == 'a'
-		p.posInfoFormat = p.bool()
-		versionstr = p.string()
-		if versionstr == "v1" {
-			p.version = 0
-		}
-	} else {
-		// Go1.8 extensible encoding
-		// read version string and extract version number (ignore anything after the version number)
-		versionstr = p.rawStringln(b)
-		if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" {
-			if v, err := strconv.Atoi(s[1]); err == nil && v > 0 {
-				p.version = v
-			}
-		}
-	}
-
-	// read version specific flags - extend as necessary
-	switch p.version {
-	// case 4:
-	// 	...
-	//	fallthrough
-	case 3, 2, 1:
-		p.debugFormat = p.rawStringln(p.rawByte()) == "debug"
-		p.trackAllTypes = p.bool()
-		p.posInfoFormat = p.bool()
-	case 0:
-		// Go1.7 encoding format - nothing to do here
-	default:
-		formatErrorf("unknown export format version %d (%q)", p.version, versionstr)
-	}
-
-	// --- generic export data ---
-
-	// populate typList with predeclared "known" types
-	p.typList = append(p.typList, predeclared()...)
-
-	// read package data
-	p.pkg()
-
-	// defer some type-checking until all types are read in completely
-	tcok := typecheckok
-	typecheckok = true
-	defercheckwidth()
-
-	// read objects
-
-	// phase 1
-	objcount := 0
-	for {
-		tag := p.tagOrIndex()
-		if tag == endTag {
-			break
-		}
-		p.obj(tag)
-		objcount++
-	}
-
-	// self-verification
-	if count := p.int(); count != objcount {
-		formatErrorf("got %d objects; want %d", objcount, count)
-	}
-
-	// --- compiler-specific export data ---
-
-	// read compiler-specific flags
-
-	// phase 2
-	objcount = 0
-	for {
-		tag := p.tagOrIndex()
-		if tag == endTag {
-			break
-		}
-		p.obj(tag)
-		objcount++
-	}
-
-	// self-verification
-	if count := p.int(); count != objcount {
-		formatErrorf("got %d objects; want %d", objcount, count)
-	}
-
-	// read inlineable functions bodies
-	if dclcontext != PEXTERN {
-		formatErrorf("unexpected context %d", dclcontext)
-	}
-
-	objcount = 0
-	for i0 := -1; ; {
-		i := p.int() // index of function with inlineable body
-		if i < 0 {
-			break
-		}
-
-		// don't process the same function twice
-		if i <= i0 {
-			formatErrorf("index not increasing: %d <= %d", i, i0)
-		}
-		i0 = i
-
-		if funcdepth != 0 {
-			formatErrorf("unexpected Funcdepth %d", funcdepth)
-		}
-
-		// Note: In the original code, funchdr and funcbody are called for
-		// all functions (that were not yet imported). Now, we are calling
-		// them only for functions with inlineable bodies. funchdr does
-		// parameter renaming which doesn't matter if we don't have a body.
-
-		if f := p.funcList[i]; f != nil {
-			// function not yet imported - read body and set it
-			funchdr(f)
-			body := p.stmtList()
-			if body == nil {
-				// Make sure empty body is not interpreted as
-				// no inlineable body (see also parser.fnbody)
-				// (not doing so can cause significant performance
-				// degradation due to unnecessary calls to empty
-				// functions).
-				body = []*Node{nod(OEMPTY, nil, nil)}
-			}
-			f.Func.Inl.Set(body)
-			funcbody(f)
-		} else {
-			// function already imported - read body but discard declarations
-			dclcontext = PDISCARD // throw away any declarations
-			p.stmtList()
-			dclcontext = PEXTERN
-		}
-
-		objcount++
-	}
-
-	// self-verification
-	if count := p.int(); count != objcount {
-		formatErrorf("got %d functions; want %d", objcount, count)
-	}
-
-	if dclcontext != PEXTERN {
-		formatErrorf("unexpected context %d", dclcontext)
-	}
-
-	p.verifyTypes()
-
-	// --- end of export data ---
-
-	typecheckok = tcok
-	resumecheckwidth()
-
-	if debug_dclstack != 0 {
-		testdclstack()
-	}
-}
-
-func formatErrorf(format string, args ...interface{}) {
-	if debugFormat {
-		Fatalf(format, args...)
-	}
-
-	yyerror("cannot import %q due to version skew - reinstall package (%s)",
-		importpkg.Path, fmt.Sprintf(format, args...))
-	errorexit()
-}
-
-func (p *importer) verifyTypes() {
-	for _, pair := range p.cmpList {
-		pt := pair.pt
-		t := pair.t
-		if !eqtype(pt.Orig, t) {
-			formatErrorf("inconsistent definition for type %v during import\n\t%L (in %q)\n\t%L (in %q)", pt.Sym, pt, pt.Sym.Importdef.Path, t, importpkg.Path)
-		}
-	}
-}
-
-// numImport tracks how often a package with a given name is imported.
-// It is used to provide a better error message (by using the package
-// path to disambiguate) if a package that appears multiple times with
-// the same name appears in an error message.
-var numImport = make(map[string]int)
-
-func (p *importer) pkg() *Pkg {
-	// if the package was seen before, i is its index (>= 0)
-	i := p.tagOrIndex()
-	if i >= 0 {
-		return p.pkgList[i]
-	}
-
-	// otherwise, i is the package tag (< 0)
-	if i != packageTag {
-		formatErrorf("expected package tag, found tag = %d", i)
-	}
-
-	// read package data
-	name := p.string()
-	path := p.string()
-
-	// we should never see an empty package name
-	if name == "" {
-		formatErrorf("empty package name for path %q", path)
-	}
-
-	// we should never see a bad import path
-	if isbadimport(path) {
-		formatErrorf("bad package path %q for package %s", path, name)
-	}
-
-	// an empty path denotes the package we are currently importing;
-	// it must be the first package we see
-	if (path == "") != (len(p.pkgList) == 0) {
-		formatErrorf("package path %q for pkg index %d", path, len(p.pkgList))
-	}
-
-	// add package to pkgList
-	pkg := importpkg
-	if path != "" {
-		pkg = mkpkg(path)
-	}
-	if pkg.Name == "" {
-		pkg.Name = name
-		numImport[name]++
-	} else if pkg.Name != name {
-		yyerror("conflicting package names %s and %s for path %q", pkg.Name, name, path)
-	}
-	if myimportpath != "" && path == myimportpath {
-		yyerror("import %q: package depends on %q (import cycle)", importpkg.Path, path)
-		errorexit()
-	}
-	p.pkgList = append(p.pkgList, pkg)
-
-	return pkg
-}
-
-func idealType(typ *Type) *Type {
-	if typ.IsUntyped() {
-		// canonicalize ideal types
-		typ = Types[TIDEAL]
-	}
-	return typ
-}
-
-func (p *importer) obj(tag int) {
-	switch tag {
-	case constTag:
-		p.pos()
-		sym := p.qualifiedName()
-		typ := p.typ()
-		val := p.value(typ)
-		importconst(sym, idealType(typ), nodlit(val))
-
-	case typeTag:
-		p.typ()
-
-	case varTag:
-		p.pos()
-		sym := p.qualifiedName()
-		typ := p.typ()
-		importvar(sym, typ)
-
-	case funcTag:
-		p.pos()
-		sym := p.qualifiedName()
-		params := p.paramList()
-		result := p.paramList()
-
-		sig := functypefield(nil, params, result)
-		importsym(sym, ONAME)
-		if sym.Def != nil && sym.Def.Op == ONAME {
-			// function was imported before (via another import)
-			if !eqtype(sig, sym.Def.Type) {
-				formatErrorf("inconsistent definition for func %v during import\n\t%v\n\t%v", sym, sym.Def.Type, sig)
-			}
-			p.funcList = append(p.funcList, nil)
-			break
-		}
-
-		n := newfuncname(sym)
-		n.Type = sig
-		declare(n, PFUNC)
-		p.funcList = append(p.funcList, n)
-		importlist = append(importlist, n)
-
-		if Debug['E'] > 0 {
-			fmt.Printf("import [%q] func %v \n", importpkg.Path, n)
-			if Debug['m'] > 2 && n.Func.Inl.Len() != 0 {
-				fmt.Printf("inl body: %v\n", n.Func.Inl)
-			}
-		}
-
-	case aliasTag:
-		p.pos()
-		alias := importpkg.Lookup(p.string())
-		orig := p.qualifiedName()
-
-		// Although the protocol allows the alias to precede the original,
-		// this never happens in files produced by gc.
-		alias.Flags |= SymAlias
-		alias.Def = orig.Def
-		importsym(alias, orig.Def.Op)
-
-	default:
-		formatErrorf("unexpected object (tag = %d)", tag)
-	}
-}
-
-func (p *importer) pos() {
-	if !p.posInfoFormat {
-		return
-	}
-
-	file := p.prevFile
-	line := p.prevLine
-	if delta := p.int(); delta != 0 {
-		// line changed
-		line += delta
-	} else if n := p.int(); n >= 0 {
-		// file changed
-		file = p.prevFile[:n] + p.string()
-		p.prevFile = file
-		line = p.int()
-	}
-	p.prevLine = line
-
-	// TODO(gri) register new position
-}
-
-func (p *importer) newtyp(etype EType) *Type {
-	t := typ(etype)
-	if p.trackAllTypes {
-		p.typList = append(p.typList, t)
-	}
-	return t
-}
-
-// importtype declares that pt, an imported named type, has underlying type t.
-func (p *importer) importtype(pt, t *Type) {
-	if pt.Etype == TFORW {
-		n := pt.nod
-		copytype(pt.nod, t)
-		pt.nod = n // unzero nod
-		pt.Sym.Importdef = importpkg
-		pt.Sym.Lastlineno = lineno
-		declare(n, PEXTERN)
-		checkwidth(pt)
-	} else {
-		// pt.Orig and t must be identical.
-		if p.trackAllTypes {
-			// If we track all types, t may not be fully set up yet.
-			// Collect the types and verify identity later.
-			p.cmpList = append(p.cmpList, struct{ pt, t *Type }{pt, t})
-		} else if !eqtype(pt.Orig, t) {
-			yyerror("inconsistent definition for type %v during import\n\t%L (in %q)\n\t%L (in %q)", pt.Sym, pt, pt.Sym.Importdef.Path, t, importpkg.Path)
-		}
-	}
-
-	if Debug['E'] != 0 {
-		fmt.Printf("import type %v %L\n", pt, t)
-	}
-}
-
-func (p *importer) typ() *Type {
-	// if the type was seen before, i is its index (>= 0)
-	i := p.tagOrIndex()
-	if i >= 0 {
-		return p.typList[i]
-	}
-
-	// otherwise, i is the type tag (< 0)
-	var t *Type
-	switch i {
-	case namedTag:
-		p.pos()
-		tsym := p.qualifiedName()
-
-		t = pkgtype(tsym)
-		p.typList = append(p.typList, t)
-
-		// read underlying type
-		t0 := p.typ()
-		p.importtype(t, t0)
-
-		// interfaces don't have associated methods
-		if t0.IsInterface() {
-			break
-		}
-
-		// set correct import context (since p.typ() may be called
-		// while importing the body of an inlined function)
-		savedContext := dclcontext
-		dclcontext = PEXTERN
-
-		// read associated methods
-		for i := p.int(); i > 0; i-- {
-			p.pos()
-			sym := p.fieldSym()
-
-			// during import unexported method names should be in the type's package
-			if !exportname(sym.Name) && sym.Pkg != tsym.Pkg {
-				Fatalf("imported method name %+v in wrong package %s\n", sym, tsym.Pkg.Name)
-			}
-
-			recv := p.paramList() // TODO(gri) do we need a full param list for the receiver?
-			params := p.paramList()
-			result := p.paramList()
-			nointerface := p.bool()
-
-			base := recv[0].Type
-			star := false
-			if base.IsPtr() {
-				base = base.Elem()
-				star = true
-			}
-
-			n := methodname0(sym, star, base.Sym)
-			n.Type = functypefield(recv[0], params, result)
-			checkwidth(n.Type)
-			addmethod(sym, n.Type, false, nointerface)
-			p.funcList = append(p.funcList, n)
-			importlist = append(importlist, n)
-
-			// (comment from parser.go)
-			// inl.C's inlnode in on a dotmeth node expects to find the inlineable body as
-			// (dotmeth's type).Nname.Inl, and dotmeth's type has been pulled
-			// out by typecheck's lookdot as this $$.ttype. So by providing
-			// this back link here we avoid special casing there.
-			n.Type.SetNname(n)
-
-			if Debug['E'] > 0 {
-				fmt.Printf("import [%q] meth %v \n", importpkg.Path, n)
-				if Debug['m'] > 2 && n.Func.Inl.Len() != 0 {
-					fmt.Printf("inl body: %v\n", n.Func.Inl)
-				}
-			}
-		}
-
-		dclcontext = savedContext
-
-	case arrayTag:
-		t = p.newtyp(TARRAY)
-		bound := p.int64()
-		elem := p.typ()
-		t.Extra = &ArrayType{Elem: elem, Bound: bound}
-
-	case sliceTag:
-		t = p.newtyp(TSLICE)
-		elem := p.typ()
-		t.Extra = SliceType{Elem: elem}
-
-	case dddTag:
-		t = p.newtyp(TDDDFIELD)
-		t.Extra = DDDFieldType{T: p.typ()}
-
-	case structTag:
-		t = p.newtyp(TSTRUCT)
-		t.SetFields(p.fieldList())
-		checkwidth(t)
-
-	case pointerTag:
-		t = p.newtyp(Tptr)
-		t.Extra = PtrType{Elem: p.typ()}
-
-	case signatureTag:
-		t = p.newtyp(TFUNC)
-		params := p.paramList()
-		result := p.paramList()
-		functypefield0(t, nil, params, result)
-
-	case interfaceTag:
-		t = p.newtyp(TINTER)
-		if p.int() != 0 {
-			formatErrorf("unexpected embedded interface")
-		}
-		t.SetFields(p.methodList())
-		checkwidth(t)
-
-	case mapTag:
-		t = p.newtyp(TMAP)
-		mt := t.MapType()
-		mt.Key = p.typ()
-		mt.Val = p.typ()
-
-	case chanTag:
-		t = p.newtyp(TCHAN)
-		ct := t.ChanType()
-		ct.Dir = ChanDir(p.int())
-		ct.Elem = p.typ()
-
-	default:
-		formatErrorf("unexpected type (tag = %d)", i)
-	}
-
-	if t == nil {
-		formatErrorf("nil type (type tag = %d)", i)
-	}
-
-	return t
-}
-
-func (p *importer) qualifiedName() *Sym {
-	name := p.string()
-	pkg := p.pkg()
-	return pkg.Lookup(name)
-}
-
-func (p *importer) fieldList() (fields []*Field) {
-	if n := p.int(); n > 0 {
-		fields = make([]*Field, n)
-		for i := range fields {
-			fields[i] = p.field()
-		}
-	}
-	return
-}
-
-func (p *importer) field() *Field {
-	p.pos()
-	sym := p.fieldName()
-	typ := p.typ()
-	note := p.string()
-
-	f := newField()
-	if sym.Name == "" {
-		// anonymous field - typ must be T or *T and T must be a type name
-		s := typ.Sym
-		if s == nil && typ.IsPtr() {
-			s = typ.Elem().Sym // deref
-		}
-		sym = sym.Pkg.Lookup(s.Name)
-		f.Embedded = 1
-	}
-
-	f.Sym = sym
-	f.Nname = newname(sym)
-	f.Type = typ
-	f.Note = note
-
-	return f
-}
-
-func (p *importer) methodList() (methods []*Field) {
-	if n := p.int(); n > 0 {
-		methods = make([]*Field, n)
-		for i := range methods {
-			methods[i] = p.method()
-		}
-	}
-	return
-}
-
-func (p *importer) method() *Field {
-	p.pos()
-	sym := p.fieldName()
-	params := p.paramList()
-	result := p.paramList()
-
-	f := newField()
-	f.Sym = sym
-	f.Nname = newname(sym)
-	f.Type = functypefield(fakethisfield(), params, result)
-	return f
-}
-
-func (p *importer) fieldName() *Sym {
-	name := p.string()
-	if p.version == 0 && name == "_" {
-		// version 0 didn't export a package for _ fields
-		// but used the builtin package instead
-		return builtinpkg.Lookup(name)
-	}
-	pkg := localpkg
-	if name != "" && !exportname(name) {
-		if name == "?" {
-			name = ""
-		}
-		pkg = p.pkg()
-	}
-	return pkg.Lookup(name)
-}
-
-func (p *importer) paramList() []*Field {
-	i := p.int()
-	if i == 0 {
-		return nil
-	}
-	// negative length indicates unnamed parameters
-	named := true
-	if i < 0 {
-		i = -i
-		named = false
-	}
-	// i > 0
-	fs := make([]*Field, i)
-	for i := range fs {
-		fs[i] = p.param(named)
-	}
-	return fs
-}
-
-func (p *importer) param(named bool) *Field {
-	f := newField()
-	f.Type = p.typ()
-	if f.Type.Etype == TDDDFIELD {
-		// TDDDFIELD indicates wrapped ... slice type
-		f.Type = typSlice(f.Type.DDDField())
-		f.Isddd = true
-	}
-
-	if named {
-		name := p.string()
-		if name == "" {
-			formatErrorf("expected named parameter")
-		}
-		// TODO(gri) Supply function/method package rather than
-		// encoding the package for each parameter repeatedly.
-		pkg := localpkg
-		if name != "_" {
-			pkg = p.pkg()
-		}
-		f.Sym = pkg.Lookup(name)
-		f.Nname = newname(f.Sym)
-	}
-
-	// TODO(gri) This is compiler-specific (escape info).
-	// Move into compiler-specific section eventually?
-	f.Note = p.string()
-
-	return f
-}
-
-func (p *importer) value(typ *Type) (x Val) {
-	switch tag := p.tagOrIndex(); tag {
-	case falseTag:
-		x.U = false
-
-	case trueTag:
-		x.U = true
-
-	case int64Tag:
-		u := new(Mpint)
-		u.SetInt64(p.int64())
-		u.Rune = typ == idealrune
-		x.U = u
-
-	case floatTag:
-		f := newMpflt()
-		p.float(f)
-		if typ == idealint || typ.IsInteger() {
-			// uncommon case: large int encoded as float
-			u := new(Mpint)
-			u.SetFloat(f)
-			x.U = u
-			break
-		}
-		x.U = f
-
-	case complexTag:
-		u := new(Mpcplx)
-		p.float(&u.Real)
-		p.float(&u.Imag)
-		x.U = u
-
-	case stringTag:
-		x.U = p.string()
-
-	case unknownTag:
-		formatErrorf("unknown constant (importing package with errors)")
-
-	case nilTag:
-		x.U = new(NilVal)
-
-	default:
-		formatErrorf("unexpected value tag %d", tag)
-	}
-
-	// verify ideal type
-	if typ.IsUntyped() && untype(x.Ctype()) != typ {
-		formatErrorf("value %v and type %v don't match", x, typ)
-	}
-
-	return
-}
-
-func (p *importer) float(x *Mpflt) {
-	sign := p.int()
-	if sign == 0 {
-		x.SetFloat64(0)
-		return
-	}
-
-	exp := p.int()
-	mant := new(big.Int).SetBytes([]byte(p.string()))
-
-	m := x.Val.SetInt(mant)
-	m.SetMantExp(m, exp-mant.BitLen())
-	if sign < 0 {
-		m.Neg(m)
-	}
-}
-
-// ----------------------------------------------------------------------------
-// Inlined function bodies
-
-// Approach: Read nodes and use them to create/declare the same data structures
-// as done originally by the (hidden) parser by closely following the parser's
-// original code. In other words, "parsing" the import data (which happens to
-// be encoded in binary rather textual form) is the best way at the moment to
-// re-establish the syntax tree's invariants. At some future point we might be
-// able to avoid this round-about way and create the rewritten nodes directly,
-// possibly avoiding a lot of duplicate work (name resolution, type checking).
-//
-// Refined nodes (e.g., ODOTPTR as a refinement of OXDOT) are exported as their
-// unrefined nodes (since this is what the importer uses). The respective case
-// entries are unreachable in the importer.
-
-func (p *importer) stmtList() []*Node {
-	var list []*Node
-	for {
-		n := p.node()
-		if n == nil {
-			break
-		}
-		// OBLOCK nodes may be created when importing ODCL nodes - unpack them
-		if n.Op == OBLOCK {
-			list = append(list, n.List.Slice()...)
-		} else {
-			list = append(list, n)
-		}
-	}
-	return list
-}
-
-func (p *importer) exprList() []*Node {
-	var list []*Node
-	for {
-		n := p.expr()
-		if n == nil {
-			break
-		}
-		list = append(list, n)
-	}
-	return list
-}
-
-func (p *importer) elemList() []*Node {
-	c := p.int()
-	list := make([]*Node, c)
-	for i := range list {
-		s := p.fieldSym()
-		list[i] = nodSym(OSTRUCTKEY, p.expr(), s)
-	}
-	return list
-}
-
-func (p *importer) expr() *Node {
-	n := p.node()
-	if n != nil && n.Op == OBLOCK {
-		Fatalf("unexpected block node: %v", n)
-	}
-	return n
-}
-
-// TODO(gri) split into expr and stmt
-func (p *importer) node() *Node {
-	switch op := p.op(); op {
-	// expressions
-	// case OPAREN:
-	// 	unreachable - unpacked by exporter
-
-	// case ODDDARG:
-	//	unimplemented
-
-	case OLITERAL:
-		typ := p.typ()
-		n := nodlit(p.value(typ))
-		if !typ.IsUntyped() {
-			// Type-checking simplifies unsafe.Pointer(uintptr(c))
-			// to unsafe.Pointer(c) which then cannot type-checked
-			// again. Re-introduce explicit uintptr(c) conversion.
-			// (issue 16317).
-			if typ.IsUnsafePtr() {
-				conv := nod(OCALL, typenod(Types[TUINTPTR]), nil)
-				conv.List.Set1(n)
-				n = conv
-			}
-			conv := nod(OCALL, typenod(typ), nil)
-			conv.List.Set1(n)
-			n = conv
-		}
-		return n
-
-	case ONAME:
-		return mkname(p.sym())
-
-	// case OPACK, ONONAME:
-	// 	unreachable - should have been resolved by typechecking
-
-	case OTYPE:
-		if p.bool() {
-			return mkname(p.sym())
-		}
-		return typenod(p.typ())
-
-	// case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC:
-	//      unreachable - should have been resolved by typechecking
-
-	// case OCLOSURE:
-	//	unimplemented
-
-	case OPTRLIT:
-		n := p.expr()
-		if !p.bool() /* !implicit, i.e. '&' operator */ {
-			if n.Op == OCOMPLIT {
-				// Special case for &T{...}: turn into (*T){...}.
-				n.Right = nod(OIND, n.Right, nil)
-				n.Right.Implicit = true
-			} else {
-				n = nod(OADDR, n, nil)
-			}
-		}
-		return n
-
-	case OSTRUCTLIT:
-		n := nod(OCOMPLIT, nil, typenod(p.typ()))
-		n.List.Set(p.elemList()) // special handling of field names
-		return n
-
-	// case OARRAYLIT, OSLICELIT, OMAPLIT:
-	// 	unreachable - mapped to case OCOMPLIT below by exporter
-
-	case OCOMPLIT:
-		n := nod(OCOMPLIT, nil, typenod(p.typ()))
-		n.List.Set(p.exprList())
-		return n
-
-	case OKEY:
-		left, right := p.exprsOrNil()
-		return nod(OKEY, left, right)
-
-	// case OSTRUCTKEY:
-	//	unreachable - handled in case OSTRUCTLIT by elemList
-
-	// case OCALLPART:
-	//	unimplemented
-
-	// case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
-	// 	unreachable - mapped to case OXDOT below by exporter
-
-	case OXDOT:
-		// see parser.new_dotname
-		return nodSym(OXDOT, p.expr(), p.fieldSym())
-
-	// case ODOTTYPE, ODOTTYPE2:
-	// 	unreachable - mapped to case ODOTTYPE below by exporter
-
-	case ODOTTYPE:
-		n := nod(ODOTTYPE, p.expr(), nil)
-		if p.bool() {
-			n.Right = p.expr()
-		} else {
-			n.Right = typenod(p.typ())
-		}
-		return n
-
-	// case OINDEX, OINDEXMAP, OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR:
-	// 	unreachable - mapped to cases below by exporter
-
-	case OINDEX:
-		return nod(op, p.expr(), p.expr())
-
-	case OSLICE, OSLICE3:
-		n := nod(op, p.expr(), nil)
-		low, high := p.exprsOrNil()
-		var max *Node
-		if n.Op.IsSlice3() {
-			max = p.expr()
-		}
-		n.SetSliceBounds(low, high, max)
-		return n
-
-	// case OCONV, OCONVIFACE, OCONVNOP, OARRAYBYTESTR, OARRAYRUNESTR, OSTRARRAYBYTE, OSTRARRAYRUNE, ORUNESTR:
-	// 	unreachable - mapped to OCONV case below by exporter
-
-	case OCONV:
-		n := nod(OCALL, typenod(p.typ()), nil)
-		n.List.Set(p.exprList())
-		return n
-
-	case OCOPY, OCOMPLEX, OREAL, OIMAG, OAPPEND, OCAP, OCLOSE, ODELETE, OLEN, OMAKE, ONEW, OPANIC, ORECOVER, OPRINT, OPRINTN:
-		n := builtinCall(op)
-		n.List.Set(p.exprList())
-		if op == OAPPEND {
-			n.Isddd = p.bool()
-		}
-		return n
-
-	// case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER, OGETG:
-	// 	unreachable - mapped to OCALL case below by exporter
-
-	case OCALL:
-		n := nod(OCALL, p.expr(), nil)
-		n.List.Set(p.exprList())
-		n.Isddd = p.bool()
-		return n
-
-	case OMAKEMAP, OMAKECHAN, OMAKESLICE:
-		n := builtinCall(OMAKE)
-		n.List.Append(typenod(p.typ()))
-		n.List.Append(p.exprList()...)
-		return n
-
-	// unary expressions
-	case OPLUS, OMINUS, OADDR, OCOM, OIND, ONOT, ORECV:
-		return nod(op, p.expr(), nil)
-
-	// binary expressions
-	case OADD, OAND, OANDAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE, OLT,
-		OLSH, OMOD, OMUL, ONE, OOR, OOROR, ORSH, OSEND, OSUB, OXOR:
-		return nod(op, p.expr(), p.expr())
-
-	case OADDSTR:
-		list := p.exprList()
-		x := list[0]
-		for _, y := range list[1:] {
-			x = nod(OADD, x, y)
-		}
-		return x
-
-	// case OCMPSTR, OCMPIFACE:
-	// 	unreachable - mapped to std comparison operators by exporter
-
-	case ODCLCONST:
-		// TODO(gri) these should not be exported in the first place
-		return nod(OEMPTY, nil, nil)
-
-	// --------------------------------------------------------------------
-	// statements
-	case ODCL:
-		if p.version < 2 {
-			// versions 0 and 1 exported a bool here but it
-			// was always false - simply ignore in this case
-			p.bool()
-		}
-		lhs := dclname(p.sym())
-		typ := typenod(p.typ())
-		return liststmt(variter([]*Node{lhs}, typ, nil)) // TODO(gri) avoid list creation
-
-	// case ODCLFIELD:
-	//	unimplemented
-
-	// case OAS, OASWB:
-	// 	unreachable - mapped to OAS case below by exporter
-
-	case OAS:
-		return nod(OAS, p.expr(), p.expr())
-
-	case OASOP:
-		n := nod(OASOP, nil, nil)
-		n.Etype = EType(p.int())
-		n.Left = p.expr()
-		if !p.bool() {
-			n.Right = nodintconst(1)
-			n.Implicit = true
-		} else {
-			n.Right = p.expr()
-		}
-		return n
-
-	// case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
-	// 	unreachable - mapped to OAS2 case below by exporter
-
-	case OAS2:
-		n := nod(OAS2, nil, nil)
-		n.List.Set(p.exprList())
-		n.Rlist.Set(p.exprList())
-		return n
-
-	case ORETURN:
-		n := nod(ORETURN, nil, nil)
-		n.List.Set(p.exprList())
-		return n
-
-	// case ORETJMP:
-	// 	unreachable - generated by compiler for trampolin routines (not exported)
-
-	case OPROC, ODEFER:
-		return nod(op, p.expr(), nil)
-
-	case OIF:
-		markdcl()
-		n := nod(OIF, nil, nil)
-		n.Ninit.Set(p.stmtList())
-		n.Left = p.expr()
-		n.Nbody.Set(p.stmtList())
-		n.Rlist.Set(p.stmtList())
-		popdcl()
-		return n
-
-	case OFOR:
-		markdcl()
-		n := nod(OFOR, nil, nil)
-		n.Ninit.Set(p.stmtList())
-		n.Left, n.Right = p.exprsOrNil()
-		n.Nbody.Set(p.stmtList())
-		popdcl()
-		return n
-
-	case ORANGE:
-		markdcl()
-		n := nod(ORANGE, nil, nil)
-		n.List.Set(p.stmtList())
-		n.Right = p.expr()
-		n.Nbody.Set(p.stmtList())
-		popdcl()
-		return n
-
-	case OSELECT, OSWITCH:
-		markdcl()
-		n := nod(op, nil, nil)
-		n.Ninit.Set(p.stmtList())
-		n.Left, _ = p.exprsOrNil()
-		n.List.Set(p.stmtList())
-		popdcl()
-		return n
-
-	// case OCASE, OXCASE:
-	// 	unreachable - mapped to OXCASE case below by exporter
-
-	case OXCASE:
-		markdcl()
-		n := nod(OXCASE, nil, nil)
-		n.Xoffset = int64(block)
-		n.List.Set(p.exprList())
-		// TODO(gri) eventually we must declare variables for type switch
-		// statements (type switch statements are not yet exported)
-		n.Nbody.Set(p.stmtList())
-		popdcl()
-		return n
-
-	// case OFALL:
-	// 	unreachable - mapped to OXFALL case below by exporter
-
-	case OXFALL:
-		n := nod(OXFALL, nil, nil)
-		n.Xoffset = int64(block)
-		return n
-
-	case OBREAK, OCONTINUE:
-		left, _ := p.exprsOrNil()
-		if left != nil {
-			left = newname(left.Sym)
-		}
-		return nod(op, left, nil)
-
-	// case OEMPTY:
-	// 	unreachable - not emitted by exporter
-
-	case OGOTO, OLABEL:
-		n := nod(op, newname(p.expr().Sym), nil)
-		n.Sym = dclstack // context, for goto restrictions
-		return n
-
-	case OEND:
-		return nil
-
-	default:
-		Fatalf("cannot import %v (%d) node\n"+
-			"==> please file an issue and assign to gri@\n", op, int(op))
-		panic("unreachable") // satisfy compiler
-	}
-}
-
-func builtinCall(op Op) *Node {
-	return nod(OCALL, mkname(builtinpkg.Lookup(goopnames[op])), nil)
-}
-
-func (p *importer) exprsOrNil() (a, b *Node) {
-	ab := p.int()
-	if ab&1 != 0 {
-		a = p.expr()
-	}
-	if ab&2 != 0 {
-		b = p.expr()
-	}
-	return
-}
-
-func (p *importer) fieldSym() *Sym {
-	name := p.string()
-	pkg := localpkg
-	if !exportname(name) {
-		pkg = p.pkg()
-	}
-	return pkg.Lookup(name)
-}
-
-func (p *importer) sym() *Sym {
-	name := p.string()
-	pkg := localpkg
-	if name != "_" {
-		pkg = p.pkg()
-	}
-	return pkg.Lookup(name)
-}
-
-func (p *importer) bool() bool {
-	return p.int() != 0
-}
-
-func (p *importer) op() Op {
-	return Op(p.int())
-}
-
-// ----------------------------------------------------------------------------
-// Low-level decoders
-
-func (p *importer) tagOrIndex() int {
-	if p.debugFormat {
-		p.marker('t')
-	}
-
-	return int(p.rawInt64())
-}
-
-func (p *importer) int() int {
-	x := p.int64()
-	if int64(int(x)) != x {
-		formatErrorf("exported integer too large")
-	}
-	return int(x)
-}
-
-func (p *importer) int64() int64 {
-	if p.debugFormat {
-		p.marker('i')
-	}
-
-	return p.rawInt64()
-}
-
-func (p *importer) string() string {
-	if p.debugFormat {
-		p.marker('s')
-	}
-	// if the string was seen before, i is its index (>= 0)
-	// (the empty string is at index 0)
-	i := p.rawInt64()
-	if i >= 0 {
-		return p.strList[i]
-	}
-	// otherwise, i is the negative string length (< 0)
-	if n := int(-i); n <= cap(p.buf) {
-		p.buf = p.buf[:n]
-	} else {
-		p.buf = make([]byte, n)
-	}
-	for i := range p.buf {
-		p.buf[i] = p.rawByte()
-	}
-	s := string(p.buf)
-	p.strList = append(p.strList, s)
-	return s
-}
-
-func (p *importer) marker(want byte) {
-	if got := p.rawByte(); got != want {
-		formatErrorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read)
-	}
-
-	pos := p.read
-	if n := int(p.rawInt64()); n != pos {
-		formatErrorf("incorrect position: got %d; want %d", n, pos)
-	}
-}
-
-// rawInt64 should only be used by low-level decoders.
-func (p *importer) rawInt64() int64 {
-	i, err := binary.ReadVarint(p)
-	if err != nil {
-		formatErrorf("read error: %v", err)
-	}
-	return i
-}
-
-// rawStringln should only be used to read the initial version string.
-func (p *importer) rawStringln(b byte) string {
-	p.buf = p.buf[:0]
-	for b != '\n' {
-		p.buf = append(p.buf, b)
-		b = p.rawByte()
-	}
-	return string(p.buf)
-}
-
-// needed for binary.ReadVarint in rawInt64
-func (p *importer) ReadByte() (byte, error) {
-	return p.rawByte(), nil
-}
-
-// rawByte is the bottleneck interface for reading from p.in.
-// It unescapes '|' 'S' to '$' and '|' '|' to '|'.
-// rawByte should only be used by low-level decoders.
-func (p *importer) rawByte() byte {
-	c, err := p.in.ReadByte()
-	p.read++
-	if err != nil {
-		formatErrorf("read error: %v", err)
-	}
-	if c == '|' {
-		c, err = p.in.ReadByte()
-		p.read++
-		if err != nil {
-			formatErrorf("read error: %v", err)
-		}
-		switch c {
-		case 'S':
-			c = '$'
-		case '|':
-			// nothing to do
-		default:
-			formatErrorf("unexpected escape sequence in export data")
-		}
-	}
-	return c
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/builtin.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/builtin.go
deleted file mode 100644
index ba96818..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/builtin.go
+++ /dev/null
@@ -1,243 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/builtin.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/builtin.go:1
-// AUTO-GENERATED by mkbuiltin.go; DO NOT EDIT
-
-package gc
-
-var runtimeDecls = [...]struct {
-	name string
-	tag  int
-	typ  int
-}{
-	{"newobject", funcTag, 4},
-	{"panicindex", funcTag, 5},
-	{"panicslice", funcTag, 5},
-	{"panicdivide", funcTag, 5},
-	{"throwinit", funcTag, 5},
-	{"panicwrap", funcTag, 7},
-	{"gopanic", funcTag, 9},
-	{"gorecover", funcTag, 12},
-	{"goschedguarded", funcTag, 5},
-	{"printbool", funcTag, 14},
-	{"printfloat", funcTag, 16},
-	{"printint", funcTag, 18},
-	{"printhex", funcTag, 20},
-	{"printuint", funcTag, 20},
-	{"printcomplex", funcTag, 22},
-	{"printstring", funcTag, 23},
-	{"printpointer", funcTag, 24},
-	{"printiface", funcTag, 24},
-	{"printeface", funcTag, 24},
-	{"printslice", funcTag, 24},
-	{"printnl", funcTag, 5},
-	{"printsp", funcTag, 5},
-	{"printlock", funcTag, 5},
-	{"printunlock", funcTag, 5},
-	{"concatstring2", funcTag, 27},
-	{"concatstring3", funcTag, 28},
-	{"concatstring4", funcTag, 29},
-	{"concatstring5", funcTag, 30},
-	{"concatstrings", funcTag, 32},
-	{"cmpstring", funcTag, 34},
-	{"eqstring", funcTag, 35},
-	{"intstring", funcTag, 38},
-	{"slicebytetostring", funcTag, 40},
-	{"slicebytetostringtmp", funcTag, 41},
-	{"slicerunetostring", funcTag, 44},
-	{"stringtoslicebyte", funcTag, 45},
-	{"stringtoslicerune", funcTag, 48},
-	{"decoderune", funcTag, 49},
-	{"slicecopy", funcTag, 51},
-	{"slicestringcopy", funcTag, 52},
-	{"convI2I", funcTag, 53},
-	{"convT2E", funcTag, 54},
-	{"convT2I", funcTag, 54},
-	{"assertE2I", funcTag, 53},
-	{"assertE2I2", funcTag, 55},
-	{"assertI2I", funcTag, 53},
-	{"assertI2I2", funcTag, 55},
-	{"panicdottype", funcTag, 56},
-	{"panicnildottype", funcTag, 57},
-	{"ifaceeq", funcTag, 58},
-	{"efaceeq", funcTag, 58},
-	{"makemap", funcTag, 60},
-	{"mapaccess1", funcTag, 61},
-	{"mapaccess1_fast32", funcTag, 62},
-	{"mapaccess1_fast64", funcTag, 62},
-	{"mapaccess1_faststr", funcTag, 62},
-	{"mapaccess1_fat", funcTag, 63},
-	{"mapaccess2", funcTag, 64},
-	{"mapaccess2_fast32", funcTag, 65},
-	{"mapaccess2_fast64", funcTag, 65},
-	{"mapaccess2_faststr", funcTag, 65},
-	{"mapaccess2_fat", funcTag, 66},
-	{"mapassign", funcTag, 61},
-	{"mapiterinit", funcTag, 67},
-	{"mapdelete", funcTag, 67},
-	{"mapiternext", funcTag, 68},
-	{"makechan", funcTag, 70},
-	{"chanrecv1", funcTag, 72},
-	{"chanrecv2", funcTag, 73},
-	{"chansend1", funcTag, 75},
-	{"closechan", funcTag, 24},
-	{"writeBarrier", varTag, 76},
-	{"writebarrierptr", funcTag, 77},
-	{"typedmemmove", funcTag, 78},
-	{"typedmemclr", funcTag, 79},
-	{"typedslicecopy", funcTag, 80},
-	{"selectnbsend", funcTag, 81},
-	{"selectnbrecv", funcTag, 82},
-	{"selectnbrecv2", funcTag, 84},
-	{"newselect", funcTag, 85},
-	{"selectsend", funcTag, 81},
-	{"selectrecv", funcTag, 73},
-	{"selectrecv2", funcTag, 86},
-	{"selectdefault", funcTag, 87},
-	{"selectgo", funcTag, 57},
-	{"block", funcTag, 5},
-	{"makeslice", funcTag, 89},
-	{"makeslice64", funcTag, 90},
-	{"growslice", funcTag, 91},
-	{"memmove", funcTag, 92},
-	{"memclrNoHeapPointers", funcTag, 93},
-	{"memclrHasPointers", funcTag, 93},
-	{"memequal", funcTag, 94},
-	{"memequal8", funcTag, 95},
-	{"memequal16", funcTag, 95},
-	{"memequal32", funcTag, 95},
-	{"memequal64", funcTag, 95},
-	{"memequal128", funcTag, 95},
-	{"int64div", funcTag, 96},
-	{"uint64div", funcTag, 97},
-	{"int64mod", funcTag, 96},
-	{"uint64mod", funcTag, 97},
-	{"float64toint64", funcTag, 98},
-	{"float64touint64", funcTag, 99},
-	{"float64touint32", funcTag, 101},
-	{"int64tofloat64", funcTag, 102},
-	{"uint64tofloat64", funcTag, 103},
-	{"uint32tofloat64", funcTag, 104},
-	{"complex128div", funcTag, 105},
-	{"racefuncenter", funcTag, 106},
-	{"racefuncexit", funcTag, 5},
-	{"raceread", funcTag, 106},
-	{"racewrite", funcTag, 106},
-	{"racereadrange", funcTag, 107},
-	{"racewriterange", funcTag, 107},
-	{"msanread", funcTag, 107},
-	{"msanwrite", funcTag, 107},
-}
-
-func runtimeTypes() []*Type {
-	var typs [108]*Type
-	typs[0] = bytetype
-	typs[1] = typPtr(typs[0])
-	typs[2] = Types[TANY]
-	typs[3] = typPtr(typs[2])
-	typs[4] = functype(nil, []*Node{anonfield(typs[1])}, []*Node{anonfield(typs[3])})
-	typs[5] = functype(nil, nil, nil)
-	typs[6] = Types[TSTRING]
-	typs[7] = functype(nil, []*Node{anonfield(typs[6]), anonfield(typs[6]), anonfield(typs[6])}, nil)
-	typs[8] = Types[TINTER]
-	typs[9] = functype(nil, []*Node{anonfield(typs[8])}, nil)
-	typs[10] = Types[TINT32]
-	typs[11] = typPtr(typs[10])
-	typs[12] = functype(nil, []*Node{anonfield(typs[11])}, []*Node{anonfield(typs[8])})
-	typs[13] = Types[TBOOL]
-	typs[14] = functype(nil, []*Node{anonfield(typs[13])}, nil)
-	typs[15] = Types[TFLOAT64]
-	typs[16] = functype(nil, []*Node{anonfield(typs[15])}, nil)
-	typs[17] = Types[TINT64]
-	typs[18] = functype(nil, []*Node{anonfield(typs[17])}, nil)
-	typs[19] = Types[TUINT64]
-	typs[20] = functype(nil, []*Node{anonfield(typs[19])}, nil)
-	typs[21] = Types[TCOMPLEX128]
-	typs[22] = functype(nil, []*Node{anonfield(typs[21])}, nil)
-	typs[23] = functype(nil, []*Node{anonfield(typs[6])}, nil)
-	typs[24] = functype(nil, []*Node{anonfield(typs[2])}, nil)
-	typs[25] = typArray(typs[0], 32)
-	typs[26] = typPtr(typs[25])
-	typs[27] = functype(nil, []*Node{anonfield(typs[26]), anonfield(typs[6]), anonfield(typs[6])}, []*Node{anonfield(typs[6])})
-	typs[28] = functype(nil, []*Node{anonfield(typs[26]), anonfield(typs[6]), anonfield(typs[6]), anonfield(typs[6])}, []*Node{anonfield(typs[6])})
-	typs[29] = functype(nil, []*Node{anonfield(typs[26]), anonfield(typs[6]), anonfield(typs[6]), anonfield(typs[6]), anonfield(typs[6])}, []*Node{anonfield(typs[6])})
-	typs[30] = functype(nil, []*Node{anonfield(typs[26]), anonfield(typs[6]), anonfield(typs[6]), anonfield(typs[6]), anonfield(typs[6]), anonfield(typs[6])}, []*Node{anonfield(typs[6])})
-	typs[31] = typSlice(typs[6])
-	typs[32] = functype(nil, []*Node{anonfield(typs[26]), anonfield(typs[31])}, []*Node{anonfield(typs[6])})
-	typs[33] = Types[TINT]
-	typs[34] = functype(nil, []*Node{anonfield(typs[6]), anonfield(typs[6])}, []*Node{anonfield(typs[33])})
-	typs[35] = functype(nil, []*Node{anonfield(typs[6]), anonfield(typs[6])}, []*Node{anonfield(typs[13])})
-	typs[36] = typArray(typs[0], 4)
-	typs[37] = typPtr(typs[36])
-	typs[38] = functype(nil, []*Node{anonfield(typs[37]), anonfield(typs[17])}, []*Node{anonfield(typs[6])})
-	typs[39] = typSlice(typs[0])
-	typs[40] = functype(nil, []*Node{anonfield(typs[26]), anonfield(typs[39])}, []*Node{anonfield(typs[6])})
-	typs[41] = functype(nil, []*Node{anonfield(typs[39])}, []*Node{anonfield(typs[6])})
-	typs[42] = runetype
-	typs[43] = typSlice(typs[42])
-	typs[44] = functype(nil, []*Node{anonfield(typs[26]), anonfield(typs[43])}, []*Node{anonfield(typs[6])})
-	typs[45] = functype(nil, []*Node{anonfield(typs[26]), anonfield(typs[6])}, []*Node{anonfield(typs[39])})
-	typs[46] = typArray(typs[42], 32)
-	typs[47] = typPtr(typs[46])
-	typs[48] = functype(nil, []*Node{anonfield(typs[47]), anonfield(typs[6])}, []*Node{anonfield(typs[43])})
-	typs[49] = functype(nil, []*Node{anonfield(typs[6]), anonfield(typs[33])}, []*Node{anonfield(typs[42]), anonfield(typs[33])})
-	typs[50] = Types[TUINTPTR]
-	typs[51] = functype(nil, []*Node{anonfield(typs[2]), anonfield(typs[2]), anonfield(typs[50])}, []*Node{anonfield(typs[33])})
-	typs[52] = functype(nil, []*Node{anonfield(typs[2]), anonfield(typs[2])}, []*Node{anonfield(typs[33])})
-	typs[53] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2])})
-	typs[54] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, []*Node{anonfield(typs[2])})
-	typs[55] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2]), anonfield(typs[13])})
-	typs[56] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
-	typs[57] = functype(nil, []*Node{anonfield(typs[1])}, nil)
-	typs[58] = functype(nil, []*Node{anonfield(typs[2]), anonfield(typs[2])}, []*Node{anonfield(typs[13])})
-	typs[59] = typMap(typs[2], typs[2])
-	typs[60] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[17]), anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[59])})
-	typs[61] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[59]), anonfield(typs[3])}, []*Node{anonfield(typs[3])})
-	typs[62] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[59]), anonfield(typs[2])}, []*Node{anonfield(typs[3])})
-	typs[63] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[59]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3])})
-	typs[64] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[59]), anonfield(typs[3])}, []*Node{anonfield(typs[3]), anonfield(typs[13])})
-	typs[65] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[59]), anonfield(typs[2])}, []*Node{anonfield(typs[3]), anonfield(typs[13])})
-	typs[66] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[59]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3]), anonfield(typs[13])})
-	typs[67] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[59]), anonfield(typs[3])}, nil)
-	typs[68] = functype(nil, []*Node{anonfield(typs[3])}, nil)
-	typs[69] = typChan(typs[2], Cboth)
-	typs[70] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[17])}, []*Node{anonfield(typs[69])})
-	typs[71] = typChan(typs[2], Crecv)
-	typs[72] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[71]), anonfield(typs[3])}, nil)
-	typs[73] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[71]), anonfield(typs[3])}, []*Node{anonfield(typs[13])})
-	typs[74] = typChan(typs[2], Csend)
-	typs[75] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[74]), anonfield(typs[3])}, nil)
-	typs[76] = tostruct([]*Node{namedfield("enabled", typs[13]), namedfield("needed", typs[13]), namedfield("cgo", typs[13])})
-	typs[77] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[2])}, nil)
-	typs[78] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
-	typs[79] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, nil)
-	typs[80] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2]), anonfield(typs[2])}, []*Node{anonfield(typs[33])})
-	typs[81] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[74]), anonfield(typs[3])}, []*Node{anonfield(typs[13])})
-	typs[82] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[71])}, []*Node{anonfield(typs[13])})
-	typs[83] = typPtr(typs[13])
-	typs[84] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[83]), anonfield(typs[71])}, []*Node{anonfield(typs[13])})
-	typs[85] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[17]), anonfield(typs[10])}, nil)
-	typs[86] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[71]), anonfield(typs[3]), anonfield(typs[83])}, []*Node{anonfield(typs[13])})
-	typs[87] = functype(nil, []*Node{anonfield(typs[1])}, []*Node{anonfield(typs[13])})
-	typs[88] = typSlice(typs[2])
-	typs[89] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[33]), anonfield(typs[33])}, []*Node{anonfield(typs[88])})
-	typs[90] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[17]), anonfield(typs[17])}, []*Node{anonfield(typs[88])})
-	typs[91] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[88]), anonfield(typs[33])}, []*Node{anonfield(typs[88])})
-	typs[92] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[50])}, nil)
-	typs[93] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[50])}, nil)
-	typs[94] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[50])}, []*Node{anonfield(typs[13])})
-	typs[95] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[13])})
-	typs[96] = functype(nil, []*Node{anonfield(typs[17]), anonfield(typs[17])}, []*Node{anonfield(typs[17])})
-	typs[97] = functype(nil, []*Node{anonfield(typs[19]), anonfield(typs[19])}, []*Node{anonfield(typs[19])})
-	typs[98] = functype(nil, []*Node{anonfield(typs[15])}, []*Node{anonfield(typs[17])})
-	typs[99] = functype(nil, []*Node{anonfield(typs[15])}, []*Node{anonfield(typs[19])})
-	typs[100] = Types[TUINT32]
-	typs[101] = functype(nil, []*Node{anonfield(typs[15])}, []*Node{anonfield(typs[100])})
-	typs[102] = functype(nil, []*Node{anonfield(typs[17])}, []*Node{anonfield(typs[15])})
-	typs[103] = functype(nil, []*Node{anonfield(typs[19])}, []*Node{anonfield(typs[15])})
-	typs[104] = functype(nil, []*Node{anonfield(typs[100])}, []*Node{anonfield(typs[15])})
-	typs[105] = functype(nil, []*Node{anonfield(typs[21]), anonfield(typs[21])}, []*Node{anonfield(typs[21])})
-	typs[106] = functype(nil, []*Node{anonfield(typs[50])}, nil)
-	typs[107] = functype(nil, []*Node{anonfield(typs[50]), anonfield(typs[50])}, nil)
-	return typs[:]
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/builtin_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/builtin_test.go
deleted file mode 100644
index e7170bd..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/builtin_test.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/builtin_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/builtin_test.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc_test
-
-import (
-	"bytes"
-	"internal/testenv"
-	"io/ioutil"
-	"os/exec"
-	"testing"
-)
-
-func TestBuiltin(t *testing.T) {
-	testenv.MustHaveGoRun(t)
-
-	old, err := ioutil.ReadFile("builtin.go")
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	new, err := exec.Command(testenv.GoToolPath(t), "run", "mkbuiltin.go", "-stdout").Output()
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if !bytes.Equal(old, new) {
-		t.Fatal("builtin.go out of date; run mkbuiltin.go")
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/bv.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/bv.go
deleted file mode 100644
index d468353..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/bv.go
+++ /dev/null
@@ -1,168 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/bv.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/bv.go:1
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-const (
-	WORDBITS  = 32
-	WORDMASK  = WORDBITS - 1
-	WORDSHIFT = 5
-)
-
-// A bvec is a bit vector.
-type bvec struct {
-	n int32    // number of bits in vector
-	b []uint32 // words holding bits
-}
-
-func bvalloc(n int32) bvec {
-	nword := (n + WORDBITS - 1) / WORDBITS
-	return bvec{n, make([]uint32, nword)}
-}
-
-type bulkBvec struct {
-	words []uint32
-	nbit  int32
-	nword int32
-}
-
-func bvbulkalloc(nbit int32, count int32) bulkBvec {
-	nword := (nbit + WORDBITS - 1) / WORDBITS
-	return bulkBvec{
-		words: make([]uint32, nword*count),
-		nbit:  nbit,
-		nword: nword,
-	}
-}
-
-func (b *bulkBvec) next() bvec {
-	out := bvec{b.nbit, b.words[:b.nword]}
-	b.words = b.words[b.nword:]
-	return out
-}
-
-func (bv1 bvec) Eq(bv2 bvec) bool {
-	if bv1.n != bv2.n {
-		Fatalf("bvequal: lengths %d and %d are not equal", bv1.n, bv2.n)
-	}
-	for i, x := range bv1.b {
-		if x != bv2.b[i] {
-			return false
-		}
-	}
-	return true
-}
-
-func (dst bvec) Copy(src bvec) {
-	for i, x := range src.b {
-		dst.b[i] = x
-	}
-}
-
-func (bv bvec) Get(i int32) bool {
-	if i < 0 || i >= bv.n {
-		Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.n)
-	}
-	mask := uint32(1 << uint(i%WORDBITS))
-	return bv.b[i>>WORDSHIFT]&mask != 0
-}
-
-func (bv bvec) Set(i int32) {
-	if i < 0 || i >= bv.n {
-		Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.n)
-	}
-	mask := uint32(1 << uint(i%WORDBITS))
-	bv.b[i/WORDBITS] |= mask
-}
-
-// bvnext returns the smallest index >= i for which bvget(bv, i) == 1.
-// If there is no such index, bvnext returns -1.
-func (bv bvec) Next(i int32) int32 {
-	if i >= bv.n {
-		return -1
-	}
-
-	// Jump i ahead to next word with bits.
-	if bv.b[i>>WORDSHIFT]>>uint(i&WORDMASK) == 0 {
-		i &^= WORDMASK
-		i += WORDBITS
-		for i < bv.n && bv.b[i>>WORDSHIFT] == 0 {
-			i += WORDBITS
-		}
-	}
-
-	if i >= bv.n {
-		return -1
-	}
-
-	// Find 1 bit.
-	w := bv.b[i>>WORDSHIFT] >> uint(i&WORDMASK)
-
-	for w&1 == 0 {
-		w >>= 1
-		i++
-	}
-
-	return i
-}
-
-func (bv bvec) IsEmpty() bool {
-	for i := int32(0); i < bv.n; i += WORDBITS {
-		if bv.b[i>>WORDSHIFT] != 0 {
-			return false
-		}
-	}
-	return true
-}
-
-func (bv bvec) Not() {
-	i := int32(0)
-	w := int32(0)
-	for ; i < bv.n; i, w = i+WORDBITS, w+1 {
-		bv.b[w] = ^bv.b[w]
-	}
-}
-
-// union
-func (dst bvec) Or(src1, src2 bvec) {
-	for i, x := range src1.b {
-		dst.b[i] = x | src2.b[i]
-	}
-}
-
-// intersection
-func (dst bvec) And(src1, src2 bvec) {
-	for i, x := range src1.b {
-		dst.b[i] = x & src2.b[i]
-	}
-}
-
-// difference
-func (dst bvec) AndNot(src1, src2 bvec) {
-	for i, x := range src1.b {
-		dst.b[i] = x &^ src2.b[i]
-	}
-}
-
-func (bv bvec) String() string {
-	s := make([]byte, 2+bv.n)
-	copy(s, "#*")
-	for i := int32(0); i < bv.n; i++ {
-		ch := byte('0')
-		if bv.Get(i) {
-			ch = '1'
-		}
-		s[2+i] = ch
-	}
-	return string(s)
-}
-
-func (bv bvec) Clear() {
-	for i := range bv.b {
-		bv.b[i] = 0
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/closure.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/closure.go
deleted file mode 100644
index b4f56f9..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/closure.go
+++ /dev/null
@@ -1,719 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/closure.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/closure.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"fmt"
-)
-
-// function literals aka closures
-func closurehdr(ntype *Node) {
-	n := nod(OCLOSURE, nil, nil)
-	n.Func.Ntype = ntype
-	n.Func.Depth = funcdepth
-	n.Func.Outerfunc = Curfn
-
-	funchdr(n)
-
-	// steal ntype's argument names and
-	// leave a fresh copy in their place.
-	// references to these variables need to
-	// refer to the variables in the external
-	// function declared below; see walkclosure.
-	n.List.Set(ntype.List.Slice())
-
-	n.Rlist.Set(ntype.Rlist.Slice())
-	ntype.List.Set(nil)
-	ntype.Rlist.Set(nil)
-	for _, n1 := range n.List.Slice() {
-		name := n1.Left
-		if name != nil {
-			name = newname(name.Sym)
-		}
-		a := nod(ODCLFIELD, name, n1.Right)
-		a.Isddd = n1.Isddd
-		if name != nil {
-			name.Isddd = a.Isddd
-		}
-		ntype.List.Append(a)
-	}
-	for _, n2 := range n.Rlist.Slice() {
-		name := n2.Left
-		if name != nil {
-			name = newname(name.Sym)
-		}
-		ntype.Rlist.Append(nod(ODCLFIELD, name, n2.Right))
-	}
-}
-
-func closurebody(body []*Node) *Node {
-	if len(body) == 0 {
-		body = []*Node{nod(OEMPTY, nil, nil)}
-	}
-
-	func_ := Curfn
-	func_.Nbody.Set(body)
-	func_.Func.Endlineno = lineno
-	funcbody(func_)
-
-	// closure-specific variables are hanging off the
-	// ordinary ones in the symbol table; see oldname.
-	// unhook them.
-	// make the list of pointers for the closure call.
-	for _, v := range func_.Func.Cvars.Slice() {
-		// Unlink from v1; see comment in syntax.go type Param for these fields.
-		v1 := v.Name.Defn
-		v1.Name.Param.Innermost = v.Name.Param.Outer
-
-		// If the closure usage of v is not dense,
-		// we need to make it dense; now that we're out
-		// of the function in which v appeared,
-		// look up v.Sym in the enclosing function
-		// and keep it around for use in the compiled code.
-		//
-		// That is, suppose we just finished parsing the innermost
-		// closure f4 in this code:
-		//
-		//	func f() {
-		//		v := 1
-		//		func() { // f2
-		//			use(v)
-		//			func() { // f3
-		//				func() { // f4
-		//					use(v)
-		//				}()
-		//			}()
-		//		}()
-		//	}
-		//
-		// At this point v.Outer is f2's v; there is no f3's v.
-		// To construct the closure f4 from within f3,
-		// we need to use f3's v and in this case we need to create f3's v.
-		// We are now in the context of f3, so calling oldname(v.Sym)
-		// obtains f3's v, creating it if necessary (as it is in the example).
-		//
-		// capturevars will decide whether to use v directly or &v.
-		v.Name.Param.Outer = oldname(v.Sym)
-	}
-
-	return func_
-}
-
-func typecheckclosure(func_ *Node, top int) {
-	for _, ln := range func_.Func.Cvars.Slice() {
-		n := ln.Name.Defn
-		if !n.Name.Captured {
-			n.Name.Captured = true
-			if n.Name.Decldepth == 0 {
-				Fatalf("typecheckclosure: var %S does not have decldepth assigned", n)
-			}
-
-			// Ignore assignments to the variable in straightline code
-			// preceding the first capturing by a closure.
-			if n.Name.Decldepth == decldepth {
-				n.Assigned = false
-			}
-		}
-	}
-
-	for _, ln := range func_.Func.Dcl {
-		if ln.Op == ONAME && (ln.Class == PPARAM || ln.Class == PPARAMOUT) {
-			ln.Name.Decldepth = 1
-		}
-	}
-
-	oldfn := Curfn
-	func_.Func.Ntype = typecheck(func_.Func.Ntype, Etype)
-	func_.Type = func_.Func.Ntype.Type
-	func_.Func.Top = top
-
-	// Type check the body now, but only if we're inside a function.
-	// At top level (in a variable initialization: curfn==nil) we're not
-	// ready to type check code yet; we'll check it later, because the
-	// underlying closure function we create is added to xtop.
-	if Curfn != nil && func_.Type != nil {
-		Curfn = func_
-		olddd := decldepth
-		decldepth = 1
-		typecheckslice(func_.Nbody.Slice(), Etop)
-		decldepth = olddd
-		Curfn = oldfn
-	}
-
-	// Create top-level function
-	xtop = append(xtop, makeclosure(func_))
-}
-
-// closurename returns name for OCLOSURE n.
-// It is not as simple as it ought to be, because we typecheck nested closures
-// starting from the innermost one. So when we check the inner closure,
-// we don't yet have name for the outer closure. This function uses recursion
-// to generate names all the way up if necessary.
-
-var closurename_closgen int
-
-func closurename(n *Node) *Sym {
-	if n.Sym != nil {
-		return n.Sym
-	}
-	gen := 0
-	outer := ""
-	prefix := ""
-	switch {
-	case n.Func.Outerfunc == nil:
-		// Global closure.
-		outer = "glob."
-
-		prefix = "func"
-		closurename_closgen++
-		gen = closurename_closgen
-	case n.Func.Outerfunc.Op == ODCLFUNC:
-		// The outermost closure inside of a named function.
-		outer = n.Func.Outerfunc.Func.Nname.Sym.Name
-
-		prefix = "func"
-
-		// Yes, functions can be named _.
-		// Can't use function closgen in such case,
-		// because it would lead to name clashes.
-		if !isblank(n.Func.Outerfunc.Func.Nname) {
-			n.Func.Outerfunc.Func.Closgen++
-			gen = n.Func.Outerfunc.Func.Closgen
-		} else {
-			closurename_closgen++
-			gen = closurename_closgen
-		}
-	case n.Func.Outerfunc.Op == OCLOSURE:
-		// Nested closure, recurse.
-		outer = closurename(n.Func.Outerfunc).Name
-
-		prefix = ""
-		n.Func.Outerfunc.Func.Closgen++
-		gen = n.Func.Outerfunc.Func.Closgen
-	default:
-		Fatalf("closurename called for %S", n)
-	}
-	n.Sym = lookupf("%s.%s%d", outer, prefix, gen)
-	return n.Sym
-}
-
-func makeclosure(func_ *Node) *Node {
-	// wrap body in external function
-	// that begins by reading closure parameters.
-	xtype := nod(OTFUNC, nil, nil)
-
-	xtype.List.Set(func_.List.Slice())
-	xtype.Rlist.Set(func_.Rlist.Slice())
-
-	// create the function
-	xfunc := nod(ODCLFUNC, nil, nil)
-
-	xfunc.Func.Nname = newfuncname(closurename(func_))
-	xfunc.Func.Nname.Sym.Flags |= SymExported // disable export
-	xfunc.Func.Nname.Name.Param.Ntype = xtype
-	xfunc.Func.Nname.Name.Defn = xfunc
-	declare(xfunc.Func.Nname, PFUNC)
-	xfunc.Func.Nname.Name.Funcdepth = func_.Func.Depth
-	xfunc.Func.Depth = func_.Func.Depth
-	xfunc.Func.Endlineno = func_.Func.Endlineno
-	if Ctxt.Flag_dynlink {
-		makefuncsym(xfunc.Func.Nname.Sym)
-	}
-
-	xfunc.Nbody.Set(func_.Nbody.Slice())
-	xfunc.Func.Dcl = append(func_.Func.Dcl, xfunc.Func.Dcl...)
-	func_.Func.Dcl = nil
-	if xfunc.Nbody.Len() == 0 {
-		Fatalf("empty body - won't generate any code")
-	}
-	xfunc = typecheck(xfunc, Etop)
-
-	xfunc.Func.Closure = func_
-	func_.Func.Closure = xfunc
-
-	func_.Nbody.Set(nil)
-	func_.List.Set(nil)
-	func_.Rlist.Set(nil)
-
-	return xfunc
-}
-
-// capturevars is called in a separate phase after all typechecking is done.
-// It decides whether each variable captured by a closure should be captured
-// by value or by reference.
-// We use value capturing for values <= 128 bytes that are never reassigned
-// after capturing (effectively constant).
-func capturevars(xfunc *Node) {
-	lno := lineno
-	lineno = xfunc.Lineno
-
-	func_ := xfunc.Func.Closure
-	func_.Func.Enter.Set(nil)
-	for _, v := range func_.Func.Cvars.Slice() {
-		if v.Type == nil {
-			// if v->type is nil, it means v looked like it was
-			// going to be used in the closure but wasn't.
-			// this happens because when parsing a, b, c := f()
-			// the a, b, c gets parsed as references to older
-			// a, b, c before the parser figures out this is a
-			// declaration.
-			v.Op = OXXX
-
-			continue
-		}
-
-		// type check the & of closed variables outside the closure,
-		// so that the outer frame also grabs them and knows they escape.
-		dowidth(v.Type)
-
-		outer := v.Name.Param.Outer
-		outermost := v.Name.Defn
-
-		// out parameters will be assigned to implicitly upon return.
-		if outer.Class != PPARAMOUT && !outermost.Addrtaken && !outermost.Assigned && v.Type.Width <= 128 {
-			v.Name.Byval = true
-		} else {
-			outermost.Addrtaken = true
-			outer = nod(OADDR, outer, nil)
-		}
-
-		if Debug['m'] > 1 {
-			var name *Sym
-			if v.Name.Curfn != nil && v.Name.Curfn.Func.Nname != nil {
-				name = v.Name.Curfn.Func.Nname.Sym
-			}
-			how := "ref"
-			if v.Name.Byval {
-				how = "value"
-			}
-			Warnl(v.Lineno, "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym, outermost.Addrtaken, outermost.Assigned, int32(v.Type.Width))
-		}
-
-		outer = typecheck(outer, Erv)
-		func_.Func.Enter.Append(outer)
-	}
-
-	lineno = lno
-}
-
-// transformclosure is called in a separate phase after escape analysis.
-// It transform closure bodies to properly reference captured variables.
-func transformclosure(xfunc *Node) {
-	lno := lineno
-	lineno = xfunc.Lineno
-	func_ := xfunc.Func.Closure
-
-	if func_.Func.Top&Ecall != 0 {
-		// If the closure is directly called, we transform it to a plain function call
-		// with variables passed as args. This avoids allocation of a closure object.
-		// Here we do only a part of the transformation. Walk of OCALLFUNC(OCLOSURE)
-		// will complete the transformation later.
-		// For illustration, the following closure:
-		//	func(a int) {
-		//		println(byval)
-		//		byref++
-		//	}(42)
-		// becomes:
-		//	func(a int, byval int, &byref *int) {
-		//		println(byval)
-		//		(*&byref)++
-		//	}(byval, &byref, 42)
-
-		// f is ONAME of the actual function.
-		f := xfunc.Func.Nname
-
-		// We are going to insert captured variables before input args.
-		var params []*Field
-		var decls []*Node
-		for _, v := range func_.Func.Cvars.Slice() {
-			if v.Op == OXXX {
-				continue
-			}
-			fld := newField()
-			fld.Funarg = FunargParams
-			if v.Name.Byval {
-				// If v is captured by value, we merely downgrade it to PPARAM.
-				v.Class = PPARAM
-
-				v.Ullman = 1
-				fld.Nname = v
-			} else {
-				// If v of type T is captured by reference,
-				// we introduce function param &v *T
-				// and v remains PAUTOHEAP with &v heapaddr
-				// (accesses will implicitly deref &v).
-				addr := newname(lookupf("&%s", v.Sym.Name))
-				addr.Type = ptrto(v.Type)
-				addr.Class = PPARAM
-				v.Name.Heapaddr = addr
-				fld.Nname = addr
-			}
-
-			fld.Type = fld.Nname.Type
-			fld.Sym = fld.Nname.Sym
-
-			params = append(params, fld)
-			decls = append(decls, fld.Nname)
-		}
-
-		if len(params) > 0 {
-			// Prepend params and decls.
-			f.Type.Params().SetFields(append(params, f.Type.Params().FieldSlice()...))
-			xfunc.Func.Dcl = append(decls, xfunc.Func.Dcl...)
-		}
-
-		// Recalculate param offsets.
-		if f.Type.Width > 0 {
-			Fatalf("transformclosure: width is already calculated")
-		}
-		dowidth(f.Type)
-		xfunc.Type = f.Type // update type of ODCLFUNC
-	} else {
-		// The closure is not called, so it is going to stay as closure.
-		var body []*Node
-		offset := int64(Widthptr)
-		for _, v := range func_.Func.Cvars.Slice() {
-			if v.Op == OXXX {
-				continue
-			}
-
-			// cv refers to the field inside of closure OSTRUCTLIT.
-			cv := nod(OCLOSUREVAR, nil, nil)
-
-			cv.Type = v.Type
-			if !v.Name.Byval {
-				cv.Type = ptrto(v.Type)
-			}
-			offset = Rnd(offset, int64(cv.Type.Align))
-			cv.Xoffset = offset
-			offset += cv.Type.Width
-
-			if v.Name.Byval && v.Type.Width <= int64(2*Widthptr) {
-				// If it is a small variable captured by value, downgrade it to PAUTO.
-				v.Class = PAUTO
-				v.Ullman = 1
-				xfunc.Func.Dcl = append(xfunc.Func.Dcl, v)
-				body = append(body, nod(OAS, v, cv))
-			} else {
-				// Declare variable holding addresses taken from closure
-				// and initialize in entry prologue.
-				addr := newname(lookupf("&%s", v.Sym.Name))
-				addr.Name.Param.Ntype = nod(OIND, typenod(v.Type), nil)
-				addr.Class = PAUTO
-				addr.Used = true
-				addr.Name.Curfn = xfunc
-				xfunc.Func.Dcl = append(xfunc.Func.Dcl, addr)
-				v.Name.Heapaddr = addr
-				if v.Name.Byval {
-					cv = nod(OADDR, cv, nil)
-				}
-				body = append(body, nod(OAS, addr, cv))
-			}
-		}
-
-		if len(body) > 0 {
-			typecheckslice(body, Etop)
-			walkstmtlist(body)
-			xfunc.Func.Enter.Set(body)
-			xfunc.Func.Needctxt = true
-		}
-	}
-
-	lineno = lno
-}
-
-// hasemptycvars returns true iff closure func_ has an
-// empty list of captured vars. OXXX nodes don't count.
-func hasemptycvars(func_ *Node) bool {
-	for _, v := range func_.Func.Cvars.Slice() {
-		if v.Op == OXXX {
-			continue
-		}
-		return false
-	}
-	return true
-}
-
-// closuredebugruntimecheck applies boilerplate checks for debug flags
-// and compiling runtime
-func closuredebugruntimecheck(r *Node) {
-	if Debug_closure > 0 {
-		if r.Esc == EscHeap {
-			Warnl(r.Lineno, "heap closure, captured vars = %v", r.Func.Cvars)
-		} else {
-			Warnl(r.Lineno, "stack closure, captured vars = %v", r.Func.Cvars)
-		}
-	}
-	if compiling_runtime && r.Esc == EscHeap {
-		yyerrorl(r.Lineno, "heap-allocated closure, not allowed in runtime.")
-	}
-}
-
-func walkclosure(func_ *Node, init *Nodes) *Node {
-	// If no closure vars, don't bother wrapping.
-	if hasemptycvars(func_) {
-		if Debug_closure > 0 {
-			Warnl(func_.Lineno, "closure converted to global")
-		}
-		return func_.Func.Closure.Func.Nname
-	} else {
-		closuredebugruntimecheck(func_)
-	}
-
-	// Create closure in the form of a composite literal.
-	// supposing the closure captures an int i and a string s
-	// and has one float64 argument and no results,
-	// the generated code looks like:
-	//
-	//	clos = &struct{.F uintptr; i *int; s *string}{func.1, &i, &s}
-	//
-	// The use of the struct provides type information to the garbage
-	// collector so that it can walk the closure. We could use (in this case)
-	// [3]unsafe.Pointer instead, but that would leave the gc in the dark.
-	// The information appears in the binary in the form of type descriptors;
-	// the struct is unnamed so that closures in multiple packages with the
-	// same struct type can share the descriptor.
-
-	typ := nod(OTSTRUCT, nil, nil)
-
-	typ.List.Set1(nod(ODCLFIELD, newname(lookup(".F")), typenod(Types[TUINTPTR])))
-	for _, v := range func_.Func.Cvars.Slice() {
-		if v.Op == OXXX {
-			continue
-		}
-		typ1 := typenod(v.Type)
-		if !v.Name.Byval {
-			typ1 = nod(OIND, typ1, nil)
-		}
-		typ.List.Append(nod(ODCLFIELD, newname(v.Sym), typ1))
-	}
-
-	clos := nod(OCOMPLIT, nil, nod(OIND, typ, nil))
-	clos.Esc = func_.Esc
-	clos.Right.Implicit = true
-	clos.List.Set(append([]*Node{nod(OCFUNC, func_.Func.Closure.Func.Nname, nil)}, func_.Func.Enter.Slice()...))
-
-	// Force type conversion from *struct to the func type.
-	clos = nod(OCONVNOP, clos, nil)
-
-	clos.Type = func_.Type
-
-	clos = typecheck(clos, Erv)
-
-	// typecheck will insert a PTRLIT node under CONVNOP,
-	// tag it with escape analysis result.
-	clos.Left.Esc = func_.Esc
-
-	// non-escaping temp to use, if any.
-	// orderexpr did not compute the type; fill it in now.
-	if x := prealloc[func_]; x != nil {
-		x.Type = clos.Left.Left.Type
-		x.Orig.Type = x.Type
-		clos.Left.Right = x
-		delete(prealloc, func_)
-	}
-
-	return walkexpr(clos, init)
-}
-
-func typecheckpartialcall(fn *Node, sym *Sym) {
-	switch fn.Op {
-	case ODOTINTER, ODOTMETH:
-		break
-
-	default:
-		Fatalf("invalid typecheckpartialcall")
-	}
-
-	// Create top-level function.
-	xfunc := makepartialcall(fn, fn.Type, sym)
-	fn.Func = xfunc.Func
-	fn.Right = newname(sym)
-	fn.Op = OCALLPART
-	fn.Type = xfunc.Type
-}
-
-var makepartialcall_gopkg *Pkg
-
-func makepartialcall(fn *Node, t0 *Type, meth *Sym) *Node {
-	var p string
-
-	rcvrtype := fn.Left.Type
-	if exportname(meth.Name) {
-		p = fmt.Sprintf("(%-S).%s-fm", rcvrtype, meth.Name)
-	} else {
-		p = fmt.Sprintf("(%-S).(%-v)-fm", rcvrtype, meth)
-	}
-	basetype := rcvrtype
-	if rcvrtype.IsPtr() {
-		basetype = basetype.Elem()
-	}
-	if !basetype.IsInterface() && basetype.Sym == nil {
-		Fatalf("missing base type for %v", rcvrtype)
-	}
-
-	var spkg *Pkg
-	if basetype.Sym != nil {
-		spkg = basetype.Sym.Pkg
-	}
-	if spkg == nil {
-		if makepartialcall_gopkg == nil {
-			makepartialcall_gopkg = mkpkg("go")
-		}
-		spkg = makepartialcall_gopkg
-	}
-
-	sym := Pkglookup(p, spkg)
-
-	if sym.Flags&SymUniq != 0 {
-		return sym.Def
-	}
-	sym.Flags |= SymUniq
-
-	savecurfn := Curfn
-	Curfn = nil
-
-	xtype := nod(OTFUNC, nil, nil)
-	var l []*Node
-	var callargs []*Node
-	ddd := false
-	xfunc := nod(ODCLFUNC, nil, nil)
-	Curfn = xfunc
-	for i, t := range t0.Params().Fields().Slice() {
-		n := newname(lookupN("a", i))
-		n.Class = PPARAM
-		xfunc.Func.Dcl = append(xfunc.Func.Dcl, n)
-		callargs = append(callargs, n)
-		fld := nod(ODCLFIELD, n, typenod(t.Type))
-		if t.Isddd {
-			fld.Isddd = true
-			ddd = true
-		}
-
-		l = append(l, fld)
-	}
-
-	xtype.List.Set(l)
-	l = nil
-	var retargs []*Node
-	for i, t := range t0.Results().Fields().Slice() {
-		n := newname(lookupN("r", i))
-		n.Class = PPARAMOUT
-		xfunc.Func.Dcl = append(xfunc.Func.Dcl, n)
-		retargs = append(retargs, n)
-		l = append(l, nod(ODCLFIELD, n, typenod(t.Type)))
-	}
-
-	xtype.Rlist.Set(l)
-
-	xfunc.Func.Dupok = true
-	xfunc.Func.Nname = newfuncname(sym)
-	xfunc.Func.Nname.Sym.Flags |= SymExported // disable export
-	xfunc.Func.Nname.Name.Param.Ntype = xtype
-	xfunc.Func.Nname.Name.Defn = xfunc
-	declare(xfunc.Func.Nname, PFUNC)
-
-	// Declare and initialize variable holding receiver.
-
-	xfunc.Func.Needctxt = true
-	cv := nod(OCLOSUREVAR, nil, nil)
-	cv.Xoffset = int64(Widthptr)
-	cv.Type = rcvrtype
-	if int(cv.Type.Align) > Widthptr {
-		cv.Xoffset = int64(cv.Type.Align)
-	}
-	ptr := nod(ONAME, nil, nil)
-	ptr.Sym = lookup("rcvr")
-	ptr.Class = PAUTO
-	ptr.Addable = true
-	ptr.Ullman = 1
-	ptr.Used = true
-	ptr.Name.Curfn = xfunc
-	ptr.Xoffset = 0
-	xfunc.Func.Dcl = append(xfunc.Func.Dcl, ptr)
-	var body []*Node
-	if rcvrtype.IsPtr() || rcvrtype.IsInterface() {
-		ptr.Name.Param.Ntype = typenod(rcvrtype)
-		body = append(body, nod(OAS, ptr, cv))
-	} else {
-		ptr.Name.Param.Ntype = typenod(ptrto(rcvrtype))
-		body = append(body, nod(OAS, ptr, nod(OADDR, cv, nil)))
-	}
-
-	call := nod(OCALL, nodSym(OXDOT, ptr, meth), nil)
-	call.List.Set(callargs)
-	call.Isddd = ddd
-	if t0.Results().NumFields() == 0 {
-		body = append(body, call)
-	} else {
-		n := nod(OAS2, nil, nil)
-		n.List.Set(retargs)
-		n.Rlist.Set1(call)
-		body = append(body, n)
-		n = nod(ORETURN, nil, nil)
-		body = append(body, n)
-	}
-
-	xfunc.Nbody.Set(body)
-
-	xfunc = typecheck(xfunc, Etop)
-	sym.Def = xfunc
-	xtop = append(xtop, xfunc)
-	Curfn = savecurfn
-
-	return xfunc
-}
-
-func walkpartialcall(n *Node, init *Nodes) *Node {
-	// Create closure in the form of a composite literal.
-	// For x.M with receiver (x) type T, the generated code looks like:
-	//
-	//	clos = &struct{F uintptr; R T}{M.T·f, x}
-	//
-	// Like walkclosure above.
-
-	if n.Left.Type.IsInterface() {
-		// Trigger panic for method on nil interface now.
-		// Otherwise it happens in the wrapper and is confusing.
-		n.Left = cheapexpr(n.Left, init)
-
-		checknil(n.Left, init)
-	}
-
-	typ := nod(OTSTRUCT, nil, nil)
-	typ.List.Set1(nod(ODCLFIELD, newname(lookup("F")), typenod(Types[TUINTPTR])))
-	typ.List.Append(nod(ODCLFIELD, newname(lookup("R")), typenod(n.Left.Type)))
-
-	clos := nod(OCOMPLIT, nil, nod(OIND, typ, nil))
-	clos.Esc = n.Esc
-	clos.Right.Implicit = true
-	clos.List.Set1(nod(OCFUNC, n.Func.Nname, nil))
-	clos.List.Append(n.Left)
-
-	// Force type conversion from *struct to the func type.
-	clos = nod(OCONVNOP, clos, nil)
-
-	clos.Type = n.Type
-
-	clos = typecheck(clos, Erv)
-
-	// typecheck will insert a PTRLIT node under CONVNOP,
-	// tag it with escape analysis result.
-	clos.Left.Esc = n.Esc
-
-	// non-escaping temp to use, if any.
-	// orderexpr did not compute the type; fill it in now.
-	if x := prealloc[n]; x != nil {
-		x.Type = clos.Left.Left.Type
-		x.Orig.Type = x.Type
-		clos.Left.Right = x
-		delete(prealloc, n)
-	}
-
-	return walkexpr(clos, init)
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/const.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/const.go
deleted file mode 100644
index 1f0ffcc..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/const.go
+++ /dev/null
@@ -1,1714 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/const.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/const.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import "strings"
-
-// Ctype describes the constant kind of an "ideal" (untyped) constant.
-type Ctype int8
-
-const (
-	CTxxx Ctype = iota
-
-	CTINT
-	CTRUNE
-	CTFLT
-	CTCPLX
-	CTSTR
-	CTBOOL
-	CTNIL
-)
-
-type Val struct {
-	// U contains one of:
-	// bool     bool when n.ValCtype() == CTBOOL
-	// *Mpint   int when n.ValCtype() == CTINT, rune when n.ValCtype() == CTRUNE
-	// *Mpflt   float when n.ValCtype() == CTFLT
-	// *Mpcplx  pair of floats when n.ValCtype() == CTCPLX
-	// string   string when n.ValCtype() == CTSTR
-	// *Nilval  when n.ValCtype() == CTNIL
-	U interface{}
-}
-
-func (v Val) Ctype() Ctype {
-	switch x := v.U.(type) {
-	default:
-		Fatalf("unexpected Ctype for %T", v.U)
-		panic("not reached")
-	case nil:
-		return 0
-	case *NilVal:
-		return CTNIL
-	case bool:
-		return CTBOOL
-	case *Mpint:
-		if x.Rune {
-			return CTRUNE
-		}
-		return CTINT
-	case *Mpflt:
-		return CTFLT
-	case *Mpcplx:
-		return CTCPLX
-	case string:
-		return CTSTR
-	}
-}
-
-func eqval(a, b Val) bool {
-	if a.Ctype() != b.Ctype() {
-		return false
-	}
-	switch x := a.U.(type) {
-	default:
-		Fatalf("unexpected Ctype for %T", a.U)
-		panic("not reached")
-	case *NilVal:
-		return true
-	case bool:
-		y := b.U.(bool)
-		return x == y
-	case *Mpint:
-		y := b.U.(*Mpint)
-		return x.Cmp(y) == 0
-	case *Mpflt:
-		y := b.U.(*Mpflt)
-		return x.Cmp(y) == 0
-	case *Mpcplx:
-		y := b.U.(*Mpcplx)
-		return x.Real.Cmp(&y.Real) == 0 && x.Imag.Cmp(&y.Imag) == 0
-	case string:
-		y := b.U.(string)
-		return x == y
-	}
-}
-
-// Interface returns the constant value stored in v as an interface{}.
-// It returns int64s for ints and runes, float64s for floats,
-// complex128s for complex values, and nil for constant nils.
-func (v Val) Interface() interface{} {
-	switch x := v.U.(type) {
-	default:
-		Fatalf("unexpected Interface for %T", v.U)
-		panic("not reached")
-	case *NilVal:
-		return nil
-	case bool, string:
-		return x
-	case *Mpint:
-		return x.Int64()
-	case *Mpflt:
-		return x.Float64()
-	case *Mpcplx:
-		return complex(x.Real.Float64(), x.Imag.Float64())
-	}
-}
-
-type NilVal struct{}
-
-// Int64 returns n as an int64.
-// n must be an integer or rune constant.
-func (n *Node) Int64() int64 {
-	if !Isconst(n, CTINT) {
-		Fatalf("Int(%v)", n)
-	}
-	return n.Val().U.(*Mpint).Int64()
-}
-
-// truncate float literal fv to 32-bit or 64-bit precision
-// according to type; return truncated value.
-func truncfltlit(oldv *Mpflt, t *Type) *Mpflt {
-	if t == nil {
-		return oldv
-	}
-
-	var v Val
-	v.U = oldv
-	overflow(v, t)
-
-	fv := newMpflt()
-	fv.Set(oldv)
-
-	// convert large precision literal floating
-	// into limited precision (float64 or float32)
-	switch t.Etype {
-	case TFLOAT64:
-		d := fv.Float64()
-		fv.SetFloat64(d)
-
-	case TFLOAT32:
-		d := fv.Float32()
-		fv.SetFloat64(d)
-	}
-
-	return fv
-}
-
-// canReuseNode indicates whether it is known to be safe
-// to reuse a Node.
-type canReuseNode bool
-
-const (
-	noReuse canReuseNode = false // not necessarily safe to reuse
-	reuseOK canReuseNode = true  // safe to reuse
-)
-
-// convert n, if literal, to type t.
-// implicit conversion.
-// The result of convlit MUST be assigned back to n, e.g.
-// 	n.Left = convlit(n.Left, t)
-func convlit(n *Node, t *Type) *Node {
-	return convlit1(n, t, false, noReuse)
-}
-
-// convlit1 converts n, if literal, to type t.
-// It returns a new node if necessary.
-// The result of convlit1 MUST be assigned back to n, e.g.
-// 	n.Left = convlit1(n.Left, t, explicit, reuse)
-func convlit1(n *Node, t *Type, explicit bool, reuse canReuseNode) *Node {
-	if n == nil || t == nil || n.Type == nil || t.IsUntyped() || n.Type == t {
-		return n
-	}
-	if !explicit && !n.Type.IsUntyped() {
-		return n
-	}
-
-	if n.Op == OLITERAL && !reuse {
-		// Can't always set n.Type directly on OLITERAL nodes.
-		// See discussion on CL 20813.
-		nn := *n
-		n = &nn
-		reuse = true
-	}
-
-	switch n.Op {
-	default:
-		if n.Type == idealbool {
-			if t.IsBoolean() {
-				n.Type = t
-			} else {
-				n.Type = Types[TBOOL]
-			}
-		}
-
-		if n.Type.Etype == TIDEAL {
-			n.Left = convlit(n.Left, t)
-			n.Right = convlit(n.Right, t)
-			n.Type = t
-		}
-
-		return n
-
-		// target is invalid type for a constant?  leave alone.
-	case OLITERAL:
-		if !okforconst[t.Etype] && n.Type.Etype != TNIL {
-			return defaultlitreuse(n, nil, reuse)
-		}
-
-	case OLSH, ORSH:
-		n.Left = convlit1(n.Left, t, explicit && n.Left.Type.IsUntyped(), noReuse)
-		t = n.Left.Type
-		if t != nil && t.Etype == TIDEAL && n.Val().Ctype() != CTINT {
-			n.SetVal(toint(n.Val()))
-		}
-		if t != nil && !t.IsInteger() {
-			yyerror("invalid operation: %v (shift of type %v)", n, t)
-			t = nil
-		}
-
-		n.Type = t
-		return n
-
-	case OCOMPLEX:
-		if n.Type.Etype == TIDEAL {
-			switch t.Etype {
-			default:
-				// If trying to convert to non-complex type,
-				// leave as complex128 and let typechecker complain.
-				t = Types[TCOMPLEX128]
-				fallthrough
-			case TCOMPLEX128:
-				n.Type = t
-				n.Left = convlit(n.Left, Types[TFLOAT64])
-				n.Right = convlit(n.Right, Types[TFLOAT64])
-
-			case TCOMPLEX64:
-				n.Type = t
-				n.Left = convlit(n.Left, Types[TFLOAT32])
-				n.Right = convlit(n.Right, Types[TFLOAT32])
-			}
-		}
-
-		return n
-	}
-
-	// avoided repeated calculations, errors
-	if eqtype(n.Type, t) {
-		return n
-	}
-
-	ct := consttype(n)
-	var et EType
-	if ct < 0 {
-		goto bad
-	}
-
-	et = t.Etype
-	if et == TINTER {
-		if ct == CTNIL && n.Type == Types[TNIL] {
-			n.Type = t
-			return n
-		}
-		return defaultlitreuse(n, nil, reuse)
-	}
-
-	switch ct {
-	default:
-		goto bad
-
-	case CTNIL:
-		switch et {
-		default:
-			n.Type = nil
-			goto bad
-
-			// let normal conversion code handle it
-		case TSTRING:
-			return n
-
-		case TARRAY:
-			goto bad
-
-		case TPTR32,
-			TPTR64,
-			TINTER,
-			TMAP,
-			TCHAN,
-			TFUNC,
-			TSLICE,
-			TUNSAFEPTR:
-			break
-
-		// A nil literal may be converted to uintptr
-		// if it is an unsafe.Pointer
-		case TUINTPTR:
-			if n.Type.Etype == TUNSAFEPTR {
-				n.SetVal(Val{new(Mpint)})
-				n.Val().U.(*Mpint).SetInt64(0)
-			} else {
-				goto bad
-			}
-		}
-
-	case CTSTR, CTBOOL:
-		if et != n.Type.Etype {
-			goto bad
-		}
-
-	case CTINT, CTRUNE, CTFLT, CTCPLX:
-		if n.Type.Etype == TUNSAFEPTR && t.Etype != TUINTPTR {
-			goto bad
-		}
-		ct := n.Val().Ctype()
-		if isInt[et] {
-			switch ct {
-			default:
-				goto bad
-
-			case CTCPLX, CTFLT, CTRUNE:
-				n.SetVal(toint(n.Val()))
-				fallthrough
-
-			case CTINT:
-				overflow(n.Val(), t)
-			}
-		} else if isFloat[et] {
-			switch ct {
-			default:
-				goto bad
-
-			case CTCPLX, CTINT, CTRUNE:
-				n.SetVal(toflt(n.Val()))
-				fallthrough
-
-			case CTFLT:
-				n.SetVal(Val{truncfltlit(n.Val().U.(*Mpflt), t)})
-			}
-		} else if isComplex[et] {
-			switch ct {
-			default:
-				goto bad
-
-			case CTFLT, CTINT, CTRUNE:
-				n.SetVal(tocplx(n.Val()))
-				fallthrough
-
-			case CTCPLX:
-				overflow(n.Val(), t)
-			}
-		} else if et == TSTRING && (ct == CTINT || ct == CTRUNE) && explicit {
-			n.SetVal(tostr(n.Val()))
-		} else {
-			goto bad
-		}
-	}
-
-	n.Type = t
-	return n
-
-bad:
-	if !n.Diag {
-		if !t.Broke {
-			yyerror("cannot convert %v to type %v", n, t)
-		}
-		n.Diag = true
-	}
-
-	if n.Type.IsUntyped() {
-		n = defaultlitreuse(n, nil, reuse)
-	}
-	return n
-}
-
-func copyval(v Val) Val {
-	switch u := v.U.(type) {
-	case *Mpint:
-		i := new(Mpint)
-		i.Set(u)
-		i.Rune = u.Rune
-		v.U = i
-
-	case *Mpflt:
-		f := newMpflt()
-		f.Set(u)
-		v.U = f
-
-	case *Mpcplx:
-		c := new(Mpcplx)
-		c.Real.Set(&u.Real)
-		c.Imag.Set(&u.Imag)
-		v.U = c
-	}
-
-	return v
-}
-
-func tocplx(v Val) Val {
-	switch u := v.U.(type) {
-	case *Mpint:
-		c := new(Mpcplx)
-		c.Real.SetInt(u)
-		c.Imag.SetFloat64(0.0)
-		v.U = c
-
-	case *Mpflt:
-		c := new(Mpcplx)
-		c.Real.Set(u)
-		c.Imag.SetFloat64(0.0)
-		v.U = c
-	}
-
-	return v
-}
-
-func toflt(v Val) Val {
-	switch u := v.U.(type) {
-	case *Mpint:
-		f := newMpflt()
-		f.SetInt(u)
-		v.U = f
-
-	case *Mpcplx:
-		f := newMpflt()
-		f.Set(&u.Real)
-		if u.Imag.CmpFloat64(0) != 0 {
-			yyerror("constant %v%vi truncated to real", fconv(&u.Real, FmtSharp), fconv(&u.Imag, FmtSharp|FmtSign))
-		}
-		v.U = f
-	}
-
-	return v
-}
-
-func toint(v Val) Val {
-	switch u := v.U.(type) {
-	case *Mpint:
-		if u.Rune {
-			i := new(Mpint)
-			i.Set(u)
-			v.U = i
-		}
-
-	case *Mpflt:
-		i := new(Mpint)
-		if i.SetFloat(u) < 0 {
-			msg := "constant %v truncated to integer"
-			// provide better error message if SetFloat failed because f was too large
-			if u.Val.IsInt() {
-				msg = "constant %v overflows integer"
-			}
-			yyerror(msg, fconv(u, FmtSharp))
-		}
-		v.U = i
-
-	case *Mpcplx:
-		i := new(Mpint)
-		if i.SetFloat(&u.Real) < 0 {
-			yyerror("constant %v%vi truncated to integer", fconv(&u.Real, FmtSharp), fconv(&u.Imag, FmtSharp|FmtSign))
-		}
-		if u.Imag.CmpFloat64(0) != 0 {
-			yyerror("constant %v%vi truncated to real", fconv(&u.Real, FmtSharp), fconv(&u.Imag, FmtSharp|FmtSign))
-		}
-		v.U = i
-	}
-
-	return v
-}
-
-func doesoverflow(v Val, t *Type) bool {
-	switch u := v.U.(type) {
-	case *Mpint:
-		if !t.IsInteger() {
-			Fatalf("overflow: %v integer constant", t)
-		}
-		return u.Cmp(minintval[t.Etype]) < 0 || u.Cmp(maxintval[t.Etype]) > 0
-
-	case *Mpflt:
-		if !t.IsFloat() {
-			Fatalf("overflow: %v floating-point constant", t)
-		}
-		return u.Cmp(minfltval[t.Etype]) <= 0 || u.Cmp(maxfltval[t.Etype]) >= 0
-
-	case *Mpcplx:
-		if !t.IsComplex() {
-			Fatalf("overflow: %v complex constant", t)
-		}
-		return u.Real.Cmp(minfltval[t.Etype]) <= 0 || u.Real.Cmp(maxfltval[t.Etype]) >= 0 ||
-			u.Imag.Cmp(minfltval[t.Etype]) <= 0 || u.Imag.Cmp(maxfltval[t.Etype]) >= 0
-	}
-
-	return false
-}
-
-func overflow(v Val, t *Type) {
-	// v has already been converted
-	// to appropriate form for t.
-	if t == nil || t.Etype == TIDEAL {
-		return
-	}
-
-	// Only uintptrs may be converted to unsafe.Pointer, which cannot overflow.
-	if t.Etype == TUNSAFEPTR {
-		return
-	}
-
-	if doesoverflow(v, t) {
-		yyerror("constant %v overflows %v", v, t)
-	}
-}
-
-func tostr(v Val) Val {
-	switch u := v.U.(type) {
-	case *Mpint:
-		var i int64 = 0xFFFD
-		if u.Cmp(minintval[TUINT32]) >= 0 && u.Cmp(maxintval[TUINT32]) <= 0 {
-			i = u.Int64()
-		}
-		v.U = string(i)
-
-	case *NilVal:
-		// Can happen because of string([]byte(nil)).
-		v.U = ""
-	}
-
-	return v
-}
-
-func consttype(n *Node) Ctype {
-	if n == nil || n.Op != OLITERAL {
-		return -1
-	}
-	return n.Val().Ctype()
-}
-
-func Isconst(n *Node, ct Ctype) bool {
-	t := consttype(n)
-
-	// If the caller is asking for CTINT, allow CTRUNE too.
-	// Makes life easier for back ends.
-	return t == ct || (ct == CTINT && t == CTRUNE)
-}
-
-func saveorig(n *Node) *Node {
-	if n == n.Orig {
-		// duplicate node for n->orig.
-		n1 := nod(OLITERAL, nil, nil)
-
-		n.Orig = n1
-		*n1 = *n
-	}
-
-	return n.Orig
-}
-
-// if n is constant, rewrite as OLITERAL node.
-func evconst(n *Node) {
-	// pick off just the opcodes that can be
-	// constant evaluated.
-	switch n.Op {
-	default:
-		return
-
-	case OADD,
-		OAND,
-		OANDAND,
-		OANDNOT,
-		OARRAYBYTESTR,
-		OCOM,
-		ODIV,
-		OEQ,
-		OGE,
-		OGT,
-		OLE,
-		OLSH,
-		OLT,
-		OMINUS,
-		OMOD,
-		OMUL,
-		ONE,
-		ONOT,
-		OOR,
-		OOROR,
-		OPLUS,
-		ORSH,
-		OSUB,
-		OXOR:
-		break
-
-	case OCONV:
-		if n.Type == nil {
-			return
-		}
-		if !okforconst[n.Type.Etype] && n.Type.Etype != TNIL {
-			return
-		}
-
-		// merge adjacent constants in the argument list.
-	case OADDSTR:
-		s := n.List.Slice()
-		for i1 := 0; i1 < len(s); i1++ {
-			if Isconst(s[i1], CTSTR) && i1+1 < len(s) && Isconst(s[i1+1], CTSTR) {
-				// merge from i1 up to but not including i2
-				var strs []string
-				i2 := i1
-				for i2 < len(s) && Isconst(s[i2], CTSTR) {
-					strs = append(strs, s[i2].Val().U.(string))
-					i2++
-				}
-
-				nl := *s[i1]
-				nl.Orig = &nl
-				nl.SetVal(Val{strings.Join(strs, "")})
-				s[i1] = &nl
-				s = append(s[:i1+1], s[i2:]...)
-			}
-		}
-
-		if len(s) == 1 && Isconst(s[0], CTSTR) {
-			n.Op = OLITERAL
-			n.SetVal(s[0].Val())
-		} else {
-			n.List.Set(s)
-		}
-
-		return
-	}
-
-	nl := n.Left
-	if nl == nil || nl.Type == nil {
-		return
-	}
-	if consttype(nl) < 0 {
-		return
-	}
-	wl := nl.Type.Etype
-	if isInt[wl] || isFloat[wl] || isComplex[wl] {
-		wl = TIDEAL
-	}
-
-	// avoid constant conversions in switches below
-	const (
-		CTINT_         = uint32(CTINT)
-		CTRUNE_        = uint32(CTRUNE)
-		CTFLT_         = uint32(CTFLT)
-		CTCPLX_        = uint32(CTCPLX)
-		CTSTR_         = uint32(CTSTR)
-		CTBOOL_        = uint32(CTBOOL)
-		CTNIL_         = uint32(CTNIL)
-		OCONV_         = uint32(OCONV) << 16
-		OARRAYBYTESTR_ = uint32(OARRAYBYTESTR) << 16
-		OPLUS_         = uint32(OPLUS) << 16
-		OMINUS_        = uint32(OMINUS) << 16
-		OCOM_          = uint32(OCOM) << 16
-		ONOT_          = uint32(ONOT) << 16
-		OLSH_          = uint32(OLSH) << 16
-		ORSH_          = uint32(ORSH) << 16
-		OADD_          = uint32(OADD) << 16
-		OSUB_          = uint32(OSUB) << 16
-		OMUL_          = uint32(OMUL) << 16
-		ODIV_          = uint32(ODIV) << 16
-		OMOD_          = uint32(OMOD) << 16
-		OOR_           = uint32(OOR) << 16
-		OAND_          = uint32(OAND) << 16
-		OANDNOT_       = uint32(OANDNOT) << 16
-		OXOR_          = uint32(OXOR) << 16
-		OEQ_           = uint32(OEQ) << 16
-		ONE_           = uint32(ONE) << 16
-		OLT_           = uint32(OLT) << 16
-		OLE_           = uint32(OLE) << 16
-		OGE_           = uint32(OGE) << 16
-		OGT_           = uint32(OGT) << 16
-		OOROR_         = uint32(OOROR) << 16
-		OANDAND_       = uint32(OANDAND) << 16
-	)
-
-	nr := n.Right
-	var rv Val
-	var lno int32
-	var wr EType
-	var v Val
-	var norig *Node
-	var nn *Node
-	if nr == nil {
-		// copy numeric value to avoid modifying
-		// nl, in case someone still refers to it (e.g. iota).
-		v = nl.Val()
-
-		if wl == TIDEAL {
-			v = copyval(v)
-		}
-
-		switch uint32(n.Op)<<16 | uint32(v.Ctype()) {
-		default:
-			if !n.Diag {
-				yyerror("illegal constant expression %v %v", n.Op, nl.Type)
-				n.Diag = true
-			}
-			return
-
-		case OCONV_ | CTNIL_,
-			OARRAYBYTESTR_ | CTNIL_:
-			if n.Type.IsString() {
-				v = tostr(v)
-				nl.Type = n.Type
-				break
-			}
-			fallthrough
-		case OCONV_ | CTINT_,
-			OCONV_ | CTRUNE_,
-			OCONV_ | CTFLT_,
-			OCONV_ | CTSTR_,
-			OCONV_ | CTBOOL_:
-			nl = convlit1(nl, n.Type, true, false)
-			v = nl.Val()
-
-		case OPLUS_ | CTINT_,
-			OPLUS_ | CTRUNE_:
-			break
-
-		case OMINUS_ | CTINT_,
-			OMINUS_ | CTRUNE_:
-			v.U.(*Mpint).Neg()
-
-		case OCOM_ | CTINT_,
-			OCOM_ | CTRUNE_:
-			var et EType = Txxx
-			if nl.Type != nil {
-				et = nl.Type.Etype
-			}
-
-			// calculate the mask in b
-			// result will be (a ^ mask)
-			var b Mpint
-			switch et {
-			// signed guys change sign
-			default:
-				b.SetInt64(-1)
-
-				// unsigned guys invert their bits
-			case TUINT8,
-				TUINT16,
-				TUINT32,
-				TUINT64,
-				TUINT,
-				TUINTPTR:
-				b.Set(maxintval[et])
-			}
-
-			v.U.(*Mpint).Xor(&b)
-
-		case OPLUS_ | CTFLT_:
-			break
-
-		case OMINUS_ | CTFLT_:
-			v.U.(*Mpflt).Neg()
-
-		case OPLUS_ | CTCPLX_:
-			break
-
-		case OMINUS_ | CTCPLX_:
-			v.U.(*Mpcplx).Real.Neg()
-			v.U.(*Mpcplx).Imag.Neg()
-
-		case ONOT_ | CTBOOL_:
-			if !v.U.(bool) {
-				goto settrue
-			}
-			goto setfalse
-		}
-		goto ret
-	}
-	if nr.Type == nil {
-		return
-	}
-	if consttype(nr) < 0 {
-		return
-	}
-	wr = nr.Type.Etype
-	if isInt[wr] || isFloat[wr] || isComplex[wr] {
-		wr = TIDEAL
-	}
-
-	// check for compatible general types (numeric, string, etc)
-	if wl != wr {
-		if wl == TINTER || wr == TINTER {
-			goto setfalse
-		}
-		goto illegal
-	}
-
-	// check for compatible types.
-	switch n.Op {
-	// ideal const mixes with anything but otherwise must match.
-	default:
-		if nl.Type.Etype != TIDEAL {
-			nr = defaultlit(nr, nl.Type)
-			n.Right = nr
-		}
-
-		if nr.Type.Etype != TIDEAL {
-			nl = defaultlit(nl, nr.Type)
-			n.Left = nl
-		}
-
-		if nl.Type.Etype != nr.Type.Etype {
-			goto illegal
-		}
-
-	// right must be unsigned.
-	// left can be ideal.
-	case OLSH, ORSH:
-		nr = defaultlit(nr, Types[TUINT])
-
-		n.Right = nr
-		if nr.Type != nil && (nr.Type.IsSigned() || !nr.Type.IsInteger()) {
-			goto illegal
-		}
-		if nl.Val().Ctype() != CTRUNE {
-			nl.SetVal(toint(nl.Val()))
-		}
-		nr.SetVal(toint(nr.Val()))
-	}
-
-	// copy numeric value to avoid modifying
-	// n->left, in case someone still refers to it (e.g. iota).
-	v = nl.Val()
-
-	if wl == TIDEAL {
-		v = copyval(v)
-	}
-
-	rv = nr.Val()
-
-	// convert to common ideal
-	if v.Ctype() == CTCPLX || rv.Ctype() == CTCPLX {
-		v = tocplx(v)
-		rv = tocplx(rv)
-	}
-
-	if v.Ctype() == CTFLT || rv.Ctype() == CTFLT {
-		v = toflt(v)
-		rv = toflt(rv)
-	}
-
-	// Rune and int turns into rune.
-	if v.Ctype() == CTRUNE && rv.Ctype() == CTINT {
-		i := new(Mpint)
-		i.Set(rv.U.(*Mpint))
-		i.Rune = true
-		rv.U = i
-	}
-	if v.Ctype() == CTINT && rv.Ctype() == CTRUNE {
-		if n.Op == OLSH || n.Op == ORSH {
-			i := new(Mpint)
-			i.Set(rv.U.(*Mpint))
-			rv.U = i
-		} else {
-			i := new(Mpint)
-			i.Set(v.U.(*Mpint))
-			i.Rune = true
-			v.U = i
-		}
-	}
-
-	if v.Ctype() != rv.Ctype() {
-		// Use of undefined name as constant?
-		if (v.Ctype() == 0 || rv.Ctype() == 0) && nerrors > 0 {
-			return
-		}
-		Fatalf("constant type mismatch %v(%d) %v(%d)", nl.Type, v.Ctype(), nr.Type, rv.Ctype())
-	}
-
-	// run op
-	switch uint32(n.Op)<<16 | uint32(v.Ctype()) {
-	default:
-		goto illegal
-
-	case OADD_ | CTINT_,
-		OADD_ | CTRUNE_:
-		v.U.(*Mpint).Add(rv.U.(*Mpint))
-
-	case OSUB_ | CTINT_,
-		OSUB_ | CTRUNE_:
-		v.U.(*Mpint).Sub(rv.U.(*Mpint))
-
-	case OMUL_ | CTINT_,
-		OMUL_ | CTRUNE_:
-		v.U.(*Mpint).Mul(rv.U.(*Mpint))
-
-	case ODIV_ | CTINT_,
-		ODIV_ | CTRUNE_:
-		if rv.U.(*Mpint).CmpInt64(0) == 0 {
-			yyerror("division by zero")
-			v.U.(*Mpint).SetOverflow()
-			break
-		}
-
-		v.U.(*Mpint).Quo(rv.U.(*Mpint))
-
-	case OMOD_ | CTINT_,
-		OMOD_ | CTRUNE_:
-		if rv.U.(*Mpint).CmpInt64(0) == 0 {
-			yyerror("division by zero")
-			v.U.(*Mpint).SetOverflow()
-			break
-		}
-
-		v.U.(*Mpint).Rem(rv.U.(*Mpint))
-
-	case OLSH_ | CTINT_,
-		OLSH_ | CTRUNE_:
-		v.U.(*Mpint).Lsh(rv.U.(*Mpint))
-
-	case ORSH_ | CTINT_,
-		ORSH_ | CTRUNE_:
-		v.U.(*Mpint).Rsh(rv.U.(*Mpint))
-
-	case OOR_ | CTINT_,
-		OOR_ | CTRUNE_:
-		v.U.(*Mpint).Or(rv.U.(*Mpint))
-
-	case OAND_ | CTINT_,
-		OAND_ | CTRUNE_:
-		v.U.(*Mpint).And(rv.U.(*Mpint))
-
-	case OANDNOT_ | CTINT_,
-		OANDNOT_ | CTRUNE_:
-		v.U.(*Mpint).AndNot(rv.U.(*Mpint))
-
-	case OXOR_ | CTINT_,
-		OXOR_ | CTRUNE_:
-		v.U.(*Mpint).Xor(rv.U.(*Mpint))
-
-	case OADD_ | CTFLT_:
-		v.U.(*Mpflt).Add(rv.U.(*Mpflt))
-
-	case OSUB_ | CTFLT_:
-		v.U.(*Mpflt).Sub(rv.U.(*Mpflt))
-
-	case OMUL_ | CTFLT_:
-		v.U.(*Mpflt).Mul(rv.U.(*Mpflt))
-
-	case ODIV_ | CTFLT_:
-		if rv.U.(*Mpflt).CmpFloat64(0) == 0 {
-			yyerror("division by zero")
-			v.U.(*Mpflt).SetFloat64(1.0)
-			break
-		}
-
-		v.U.(*Mpflt).Quo(rv.U.(*Mpflt))
-
-	// The default case above would print 'ideal % ideal',
-	// which is not quite an ideal error.
-	case OMOD_ | CTFLT_:
-		if !n.Diag {
-			yyerror("illegal constant expression: floating-point %% operation")
-			n.Diag = true
-		}
-
-		return
-
-	case OADD_ | CTCPLX_:
-		v.U.(*Mpcplx).Real.Add(&rv.U.(*Mpcplx).Real)
-		v.U.(*Mpcplx).Imag.Add(&rv.U.(*Mpcplx).Imag)
-
-	case OSUB_ | CTCPLX_:
-		v.U.(*Mpcplx).Real.Sub(&rv.U.(*Mpcplx).Real)
-		v.U.(*Mpcplx).Imag.Sub(&rv.U.(*Mpcplx).Imag)
-
-	case OMUL_ | CTCPLX_:
-		cmplxmpy(v.U.(*Mpcplx), rv.U.(*Mpcplx))
-
-	case ODIV_ | CTCPLX_:
-		if rv.U.(*Mpcplx).Real.CmpFloat64(0) == 0 && rv.U.(*Mpcplx).Imag.CmpFloat64(0) == 0 {
-			yyerror("complex division by zero")
-			rv.U.(*Mpcplx).Real.SetFloat64(1.0)
-			rv.U.(*Mpcplx).Imag.SetFloat64(0.0)
-			break
-		}
-
-		cmplxdiv(v.U.(*Mpcplx), rv.U.(*Mpcplx))
-
-	case OEQ_ | CTNIL_:
-		goto settrue
-
-	case ONE_ | CTNIL_:
-		goto setfalse
-
-	case OEQ_ | CTINT_,
-		OEQ_ | CTRUNE_:
-		if v.U.(*Mpint).Cmp(rv.U.(*Mpint)) == 0 {
-			goto settrue
-		}
-		goto setfalse
-
-	case ONE_ | CTINT_,
-		ONE_ | CTRUNE_:
-		if v.U.(*Mpint).Cmp(rv.U.(*Mpint)) != 0 {
-			goto settrue
-		}
-		goto setfalse
-
-	case OLT_ | CTINT_,
-		OLT_ | CTRUNE_:
-		if v.U.(*Mpint).Cmp(rv.U.(*Mpint)) < 0 {
-			goto settrue
-		}
-		goto setfalse
-
-	case OLE_ | CTINT_,
-		OLE_ | CTRUNE_:
-		if v.U.(*Mpint).Cmp(rv.U.(*Mpint)) <= 0 {
-			goto settrue
-		}
-		goto setfalse
-
-	case OGE_ | CTINT_,
-		OGE_ | CTRUNE_:
-		if v.U.(*Mpint).Cmp(rv.U.(*Mpint)) >= 0 {
-			goto settrue
-		}
-		goto setfalse
-
-	case OGT_ | CTINT_,
-		OGT_ | CTRUNE_:
-		if v.U.(*Mpint).Cmp(rv.U.(*Mpint)) > 0 {
-			goto settrue
-		}
-		goto setfalse
-
-	case OEQ_ | CTFLT_:
-		if v.U.(*Mpflt).Cmp(rv.U.(*Mpflt)) == 0 {
-			goto settrue
-		}
-		goto setfalse
-
-	case ONE_ | CTFLT_:
-		if v.U.(*Mpflt).Cmp(rv.U.(*Mpflt)) != 0 {
-			goto settrue
-		}
-		goto setfalse
-
-	case OLT_ | CTFLT_:
-		if v.U.(*Mpflt).Cmp(rv.U.(*Mpflt)) < 0 {
-			goto settrue
-		}
-		goto setfalse
-
-	case OLE_ | CTFLT_:
-		if v.U.(*Mpflt).Cmp(rv.U.(*Mpflt)) <= 0 {
-			goto settrue
-		}
-		goto setfalse
-
-	case OGE_ | CTFLT_:
-		if v.U.(*Mpflt).Cmp(rv.U.(*Mpflt)) >= 0 {
-			goto settrue
-		}
-		goto setfalse
-
-	case OGT_ | CTFLT_:
-		if v.U.(*Mpflt).Cmp(rv.U.(*Mpflt)) > 0 {
-			goto settrue
-		}
-		goto setfalse
-
-	case OEQ_ | CTCPLX_:
-		if v.U.(*Mpcplx).Real.Cmp(&rv.U.(*Mpcplx).Real) == 0 && v.U.(*Mpcplx).Imag.Cmp(&rv.U.(*Mpcplx).Imag) == 0 {
-			goto settrue
-		}
-		goto setfalse
-
-	case ONE_ | CTCPLX_:
-		if v.U.(*Mpcplx).Real.Cmp(&rv.U.(*Mpcplx).Real) != 0 || v.U.(*Mpcplx).Imag.Cmp(&rv.U.(*Mpcplx).Imag) != 0 {
-			goto settrue
-		}
-		goto setfalse
-
-	case OEQ_ | CTSTR_:
-		if strlit(nl) == strlit(nr) {
-			goto settrue
-		}
-		goto setfalse
-
-	case ONE_ | CTSTR_:
-		if strlit(nl) != strlit(nr) {
-			goto settrue
-		}
-		goto setfalse
-
-	case OLT_ | CTSTR_:
-		if strlit(nl) < strlit(nr) {
-			goto settrue
-		}
-		goto setfalse
-
-	case OLE_ | CTSTR_:
-		if strlit(nl) <= strlit(nr) {
-			goto settrue
-		}
-		goto setfalse
-
-	case OGE_ | CTSTR_:
-		if strlit(nl) >= strlit(nr) {
-			goto settrue
-		}
-		goto setfalse
-
-	case OGT_ | CTSTR_:
-		if strlit(nl) > strlit(nr) {
-			goto settrue
-		}
-		goto setfalse
-
-	case OOROR_ | CTBOOL_:
-		if v.U.(bool) || rv.U.(bool) {
-			goto settrue
-		}
-		goto setfalse
-
-	case OANDAND_ | CTBOOL_:
-		if v.U.(bool) && rv.U.(bool) {
-			goto settrue
-		}
-		goto setfalse
-
-	case OEQ_ | CTBOOL_:
-		if v.U.(bool) == rv.U.(bool) {
-			goto settrue
-		}
-		goto setfalse
-
-	case ONE_ | CTBOOL_:
-		if v.U.(bool) != rv.U.(bool) {
-			goto settrue
-		}
-		goto setfalse
-	}
-
-	goto ret
-
-ret:
-	norig = saveorig(n)
-	*n = *nl
-
-	// restore value of n->orig.
-	n.Orig = norig
-
-	n.SetVal(v)
-
-	// check range.
-	lno = setlineno(n)
-	overflow(v, n.Type)
-	lineno = lno
-
-	// truncate precision for non-ideal float.
-	if v.Ctype() == CTFLT && n.Type.Etype != TIDEAL {
-		n.SetVal(Val{truncfltlit(v.U.(*Mpflt), n.Type)})
-	}
-	return
-
-settrue:
-	nn = nodbool(true)
-	nn.Orig = saveorig(n)
-	if !iscmp[n.Op] {
-		nn.Type = nl.Type
-	}
-	*n = *nn
-	return
-
-setfalse:
-	nn = nodbool(false)
-	nn.Orig = saveorig(n)
-	if !iscmp[n.Op] {
-		nn.Type = nl.Type
-	}
-	*n = *nn
-	return
-
-illegal:
-	if !n.Diag {
-		yyerror("illegal constant expression: %v %v %v", nl.Type, n.Op, nr.Type)
-		n.Diag = true
-	}
-}
-
-func nodlit(v Val) *Node {
-	n := nod(OLITERAL, nil, nil)
-	n.SetVal(v)
-	switch v.Ctype() {
-	default:
-		Fatalf("nodlit ctype %d", v.Ctype())
-
-	case CTSTR:
-		n.Type = idealstring
-
-	case CTBOOL:
-		n.Type = idealbool
-
-	case CTINT, CTRUNE, CTFLT, CTCPLX:
-		n.Type = Types[TIDEAL]
-
-	case CTNIL:
-		n.Type = Types[TNIL]
-	}
-
-	return n
-}
-
-func nodcplxlit(r Val, i Val) *Node {
-	r = toflt(r)
-	i = toflt(i)
-
-	c := new(Mpcplx)
-	n := nod(OLITERAL, nil, nil)
-	n.Type = Types[TIDEAL]
-	n.SetVal(Val{c})
-
-	if r.Ctype() != CTFLT || i.Ctype() != CTFLT {
-		Fatalf("nodcplxlit ctype %d/%d", r.Ctype(), i.Ctype())
-	}
-
-	c.Real.Set(r.U.(*Mpflt))
-	c.Imag.Set(i.U.(*Mpflt))
-	return n
-}
-
-// idealkind returns a constant kind like consttype
-// but for an arbitrary "ideal" (untyped constant) expression.
-func idealkind(n *Node) Ctype {
-	if n == nil || !n.Type.IsUntyped() {
-		return CTxxx
-	}
-
-	switch n.Op {
-	default:
-		return CTxxx
-
-	case OLITERAL:
-		return n.Val().Ctype()
-
-		// numeric kinds.
-	case OADD,
-		OAND,
-		OANDNOT,
-		OCOM,
-		ODIV,
-		OMINUS,
-		OMOD,
-		OMUL,
-		OSUB,
-		OXOR,
-		OOR,
-		OPLUS:
-		k1 := idealkind(n.Left)
-
-		k2 := idealkind(n.Right)
-		if k1 > k2 {
-			return k1
-		} else {
-			return k2
-		}
-
-	case OREAL, OIMAG:
-		return CTFLT
-
-	case OCOMPLEX:
-		return CTCPLX
-
-	case OADDSTR:
-		return CTSTR
-
-	case OANDAND,
-		OEQ,
-		OGE,
-		OGT,
-		OLE,
-		OLT,
-		ONE,
-		ONOT,
-		OOROR,
-		OCMPSTR,
-		OCMPIFACE:
-		return CTBOOL
-
-		// shifts (beware!).
-	case OLSH, ORSH:
-		return idealkind(n.Left)
-	}
-}
-
-// The result of defaultlit MUST be assigned back to n, e.g.
-// 	n.Left = defaultlit(n.Left, t)
-func defaultlit(n *Node, t *Type) *Node {
-	return defaultlitreuse(n, t, noReuse)
-}
-
-// The result of defaultlitreuse MUST be assigned back to n, e.g.
-// 	n.Left = defaultlitreuse(n.Left, t, reuse)
-func defaultlitreuse(n *Node, t *Type, reuse canReuseNode) *Node {
-	if n == nil || !n.Type.IsUntyped() {
-		return n
-	}
-
-	if n.Op == OLITERAL && !reuse {
-		nn := *n
-		n = &nn
-		reuse = true
-	}
-
-	lno := setlineno(n)
-	ctype := idealkind(n)
-	var t1 *Type
-	switch ctype {
-	default:
-		if t != nil {
-			return convlit(n, t)
-		}
-
-		if n.Val().Ctype() == CTNIL {
-			lineno = lno
-			if !n.Diag {
-				yyerror("use of untyped nil")
-				n.Diag = true
-			}
-
-			n.Type = nil
-			break
-		}
-
-		if n.Val().Ctype() == CTSTR {
-			t1 := Types[TSTRING]
-			n = convlit1(n, t1, false, reuse)
-			break
-		}
-
-		yyerror("defaultlit: unknown literal: %v", n)
-
-	case CTxxx:
-		Fatalf("defaultlit: idealkind is CTxxx: %+v", n)
-
-	case CTBOOL:
-		t1 := Types[TBOOL]
-		if t != nil && t.IsBoolean() {
-			t1 = t
-		}
-		n = convlit1(n, t1, false, reuse)
-
-	case CTINT:
-		t1 = Types[TINT]
-		goto num
-
-	case CTRUNE:
-		t1 = runetype
-		goto num
-
-	case CTFLT:
-		t1 = Types[TFLOAT64]
-		goto num
-
-	case CTCPLX:
-		t1 = Types[TCOMPLEX128]
-		goto num
-	}
-
-	lineno = lno
-	return n
-
-num:
-	// Note: n.Val().Ctype() can be CTxxx (not a constant) here
-	// in the case of an untyped non-constant value, like 1<<i.
-	v1 := n.Val()
-	if t != nil {
-		if t.IsInteger() {
-			t1 = t
-			v1 = toint(n.Val())
-		} else if t.IsFloat() {
-			t1 = t
-			v1 = toflt(n.Val())
-		} else if t.IsComplex() {
-			t1 = t
-			v1 = tocplx(n.Val())
-		}
-		if n.Val().Ctype() != CTxxx {
-			n.SetVal(v1)
-		}
-	}
-
-	if n.Val().Ctype() != CTxxx {
-		overflow(n.Val(), t1)
-	}
-	n = convlit1(n, t1, false, reuse)
-	lineno = lno
-	return n
-}
-
-// defaultlit on both nodes simultaneously;
-// if they're both ideal going in they better
-// get the same type going out.
-// force means must assign concrete (non-ideal) type.
-// The results of defaultlit2 MUST be assigned back to l and r, e.g.
-// 	n.Left, n.Right = defaultlit2(n.Left, n.Right, force)
-func defaultlit2(l *Node, r *Node, force bool) (*Node, *Node) {
-	if l.Type == nil || r.Type == nil {
-		return l, r
-	}
-	if !l.Type.IsUntyped() {
-		r = convlit(r, l.Type)
-		return l, r
-	}
-
-	if !r.Type.IsUntyped() {
-		l = convlit(l, r.Type)
-		return l, r
-	}
-
-	if !force {
-		return l, r
-	}
-
-	if l.Type.IsBoolean() {
-		l = convlit(l, Types[TBOOL])
-		r = convlit(r, Types[TBOOL])
-	}
-
-	lkind := idealkind(l)
-	rkind := idealkind(r)
-	if lkind == CTCPLX || rkind == CTCPLX {
-		l = convlit(l, Types[TCOMPLEX128])
-		r = convlit(r, Types[TCOMPLEX128])
-		return l, r
-	}
-
-	if lkind == CTFLT || rkind == CTFLT {
-		l = convlit(l, Types[TFLOAT64])
-		r = convlit(r, Types[TFLOAT64])
-		return l, r
-	}
-
-	if lkind == CTRUNE || rkind == CTRUNE {
-		l = convlit(l, runetype)
-		r = convlit(r, runetype)
-		return l, r
-	}
-
-	l = convlit(l, Types[TINT])
-	r = convlit(r, Types[TINT])
-
-	return l, r
-}
-
-// strlit returns the value of a literal string Node as a string.
-func strlit(n *Node) string {
-	return n.Val().U.(string)
-}
-
-func smallintconst(n *Node) bool {
-	if n.Op == OLITERAL && Isconst(n, CTINT) && n.Type != nil {
-		switch simtype[n.Type.Etype] {
-		case TINT8,
-			TUINT8,
-			TINT16,
-			TUINT16,
-			TINT32,
-			TUINT32,
-			TBOOL,
-			TPTR32:
-			return true
-
-		case TIDEAL, TINT64, TUINT64, TPTR64:
-			v, ok := n.Val().U.(*Mpint)
-			if ok && v.Cmp(minintval[TINT32]) > 0 && v.Cmp(maxintval[TINT32]) < 0 {
-				return true
-			}
-		}
-	}
-
-	return false
-}
-
-// nonnegintconst checks if Node n contains a constant expression
-// representable as a non-negative small integer, and returns its
-// (integer) value if that's the case. Otherwise, it returns -1.
-func nonnegintconst(n *Node) int64 {
-	if n.Op != OLITERAL {
-		return -1
-	}
-
-	// toint will leave n.Val unchanged if it's not castable to an
-	// Mpint, so we still have to guard the conversion.
-	v := toint(n.Val())
-	vi, ok := v.U.(*Mpint)
-	if !ok || vi.Val.Sign() < 0 || vi.Cmp(maxintval[TINT32]) > 0 {
-		return -1
-	}
-
-	return vi.Int64()
-}
-
-// complex multiply v *= rv
-//	(a, b) * (c, d) = (a*c - b*d, b*c + a*d)
-func cmplxmpy(v *Mpcplx, rv *Mpcplx) {
-	var ac Mpflt
-	var bd Mpflt
-	var bc Mpflt
-	var ad Mpflt
-
-	ac.Set(&v.Real)
-	ac.Mul(&rv.Real) // ac
-
-	bd.Set(&v.Imag)
-
-	bd.Mul(&rv.Imag) // bd
-
-	bc.Set(&v.Imag)
-
-	bc.Mul(&rv.Real) // bc
-
-	ad.Set(&v.Real)
-
-	ad.Mul(&rv.Imag) // ad
-
-	v.Real.Set(&ac)
-
-	v.Real.Sub(&bd) // ac-bd
-
-	v.Imag.Set(&bc)
-
-	v.Imag.Add(&ad) // bc+ad
-}
-
-// complex divide v /= rv
-//	(a, b) / (c, d) = ((a*c + b*d), (b*c - a*d))/(c*c + d*d)
-func cmplxdiv(v *Mpcplx, rv *Mpcplx) {
-	var ac Mpflt
-	var bd Mpflt
-	var bc Mpflt
-	var ad Mpflt
-	var cc_plus_dd Mpflt
-
-	cc_plus_dd.Set(&rv.Real)
-	cc_plus_dd.Mul(&rv.Real) // cc
-
-	ac.Set(&rv.Imag)
-
-	ac.Mul(&rv.Imag) // dd
-
-	cc_plus_dd.Add(&ac) // cc+dd
-
-	ac.Set(&v.Real)
-
-	ac.Mul(&rv.Real) // ac
-
-	bd.Set(&v.Imag)
-
-	bd.Mul(&rv.Imag) // bd
-
-	bc.Set(&v.Imag)
-
-	bc.Mul(&rv.Real) // bc
-
-	ad.Set(&v.Real)
-
-	ad.Mul(&rv.Imag) // ad
-
-	v.Real.Set(&ac)
-
-	v.Real.Add(&bd)         // ac+bd
-	v.Real.Quo(&cc_plus_dd) // (ac+bd)/(cc+dd)
-
-	v.Imag.Set(&bc)
-
-	v.Imag.Sub(&ad)         // bc-ad
-	v.Imag.Quo(&cc_plus_dd) // (bc+ad)/(cc+dd)
-}
-
-// Is n a Go language constant (as opposed to a compile-time constant)?
-// Expressions derived from nil, like string([]byte(nil)), while they
-// may be known at compile time, are not Go language constants.
-// Only called for expressions known to evaluated to compile-time
-// constants.
-func isgoconst(n *Node) bool {
-	if n.Orig != nil {
-		n = n.Orig
-	}
-
-	switch n.Op {
-	case OADD,
-		OADDSTR,
-		OAND,
-		OANDAND,
-		OANDNOT,
-		OCOM,
-		ODIV,
-		OEQ,
-		OGE,
-		OGT,
-		OLE,
-		OLSH,
-		OLT,
-		OMINUS,
-		OMOD,
-		OMUL,
-		ONE,
-		ONOT,
-		OOR,
-		OOROR,
-		OPLUS,
-		ORSH,
-		OSUB,
-		OXOR,
-		OIOTA,
-		OCOMPLEX,
-		OREAL,
-		OIMAG:
-		if isgoconst(n.Left) && (n.Right == nil || isgoconst(n.Right)) {
-			return true
-		}
-
-	case OCONV:
-		if okforconst[n.Type.Etype] && isgoconst(n.Left) {
-			return true
-		}
-
-	case OLEN, OCAP:
-		l := n.Left
-		if isgoconst(l) {
-			return true
-		}
-
-		// Special case: len/cap is constant when applied to array or
-		// pointer to array when the expression does not contain
-		// function calls or channel receive operations.
-		t := l.Type
-
-		if t != nil && t.IsPtr() {
-			t = t.Elem()
-		}
-		if t != nil && t.IsArray() && !hascallchan(l) {
-			return true
-		}
-
-	case OLITERAL:
-		if n.Val().Ctype() != CTNIL {
-			return true
-		}
-
-	case ONAME:
-		l := n.Sym.Def
-		if l != nil && l.Op == OLITERAL && n.Val().Ctype() != CTNIL {
-			return true
-		}
-
-	case ONONAME:
-		if n.Sym.Def != nil && n.Sym.Def.Op == OIOTA {
-			return true
-		}
-
-	case OALIGNOF, OOFFSETOF, OSIZEOF:
-		return true
-	}
-
-	//dump("nonconst", n);
-	return false
-}
-
-func hascallchan(n *Node) bool {
-	if n == nil {
-		return false
-	}
-	switch n.Op {
-	case OAPPEND,
-		OCALL,
-		OCALLFUNC,
-		OCALLINTER,
-		OCALLMETH,
-		OCAP,
-		OCLOSE,
-		OCOMPLEX,
-		OCOPY,
-		ODELETE,
-		OIMAG,
-		OLEN,
-		OMAKE,
-		ONEW,
-		OPANIC,
-		OPRINT,
-		OPRINTN,
-		OREAL,
-		ORECOVER,
-		ORECV:
-		return true
-	}
-
-	if hascallchan(n.Left) || hascallchan(n.Right) {
-		return true
-	}
-	for _, n1 := range n.List.Slice() {
-		if hascallchan(n1) {
-			return true
-		}
-	}
-	for _, n2 := range n.Rlist.Slice() {
-		if hascallchan(n2) {
-			return true
-		}
-	}
-
-	return false
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/constFold_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/constFold_test.go
deleted file mode 100644
index f45cc13..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/constFold_test.go
+++ /dev/null
@@ -1,18111 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/constFold_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/constFold_test.go:1
-package gc
-
-import "testing"
-
-func TestConstFolduint64add(t *testing.T) {
-	var x, y, r uint64
-	x = 0
-	y = 0
-	r = x + y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != 1 {
-		t.Errorf("0 %s 1 = %d, want 1", "+", r)
-	}
-	y = 4294967296
-	r = x + y
-	if r != 4294967296 {
-		t.Errorf("0 %s 4294967296 = %d, want 4294967296", "+", r)
-	}
-	y = 18446744073709551615
-	r = x + y
-	if r != 18446744073709551615 {
-		t.Errorf("0 %s 18446744073709551615 = %d, want 18446744073709551615", "+", r)
-	}
-	x = 1
-	y = 0
-	r = x + y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "+", r)
-	}
-	y = 4294967296
-	r = x + y
-	if r != 4294967297 {
-		t.Errorf("1 %s 4294967296 = %d, want 4294967297", "+", r)
-	}
-	y = 18446744073709551615
-	r = x + y
-	if r != 0 {
-		t.Errorf("1 %s 18446744073709551615 = %d, want 0", "+", r)
-	}
-	x = 4294967296
-	y = 0
-	r = x + y
-	if r != 4294967296 {
-		t.Errorf("4294967296 %s 0 = %d, want 4294967296", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != 4294967297 {
-		t.Errorf("4294967296 %s 1 = %d, want 4294967297", "+", r)
-	}
-	y = 4294967296
-	r = x + y
-	if r != 8589934592 {
-		t.Errorf("4294967296 %s 4294967296 = %d, want 8589934592", "+", r)
-	}
-	y = 18446744073709551615
-	r = x + y
-	if r != 4294967295 {
-		t.Errorf("4294967296 %s 18446744073709551615 = %d, want 4294967295", "+", r)
-	}
-	x = 18446744073709551615
-	y = 0
-	r = x + y
-	if r != 18446744073709551615 {
-		t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != 0 {
-		t.Errorf("18446744073709551615 %s 1 = %d, want 0", "+", r)
-	}
-	y = 4294967296
-	r = x + y
-	if r != 4294967295 {
-		t.Errorf("18446744073709551615 %s 4294967296 = %d, want 4294967295", "+", r)
-	}
-	y = 18446744073709551615
-	r = x + y
-	if r != 18446744073709551614 {
-		t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 18446744073709551614", "+", r)
-	}
-}
-func TestConstFolduint64sub(t *testing.T) {
-	var x, y, r uint64
-	x = 0
-	y = 0
-	r = x - y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != 18446744073709551615 {
-		t.Errorf("0 %s 1 = %d, want 18446744073709551615", "-", r)
-	}
-	y = 4294967296
-	r = x - y
-	if r != 18446744069414584320 {
-		t.Errorf("0 %s 4294967296 = %d, want 18446744069414584320", "-", r)
-	}
-	y = 18446744073709551615
-	r = x - y
-	if r != 1 {
-		t.Errorf("0 %s 18446744073709551615 = %d, want 1", "-", r)
-	}
-	x = 1
-	y = 0
-	r = x - y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", "-", r)
-	}
-	y = 4294967296
-	r = x - y
-	if r != 18446744069414584321 {
-		t.Errorf("1 %s 4294967296 = %d, want 18446744069414584321", "-", r)
-	}
-	y = 18446744073709551615
-	r = x - y
-	if r != 2 {
-		t.Errorf("1 %s 18446744073709551615 = %d, want 2", "-", r)
-	}
-	x = 4294967296
-	y = 0
-	r = x - y
-	if r != 4294967296 {
-		t.Errorf("4294967296 %s 0 = %d, want 4294967296", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != 4294967295 {
-		t.Errorf("4294967296 %s 1 = %d, want 4294967295", "-", r)
-	}
-	y = 4294967296
-	r = x - y
-	if r != 0 {
-		t.Errorf("4294967296 %s 4294967296 = %d, want 0", "-", r)
-	}
-	y = 18446744073709551615
-	r = x - y
-	if r != 4294967297 {
-		t.Errorf("4294967296 %s 18446744073709551615 = %d, want 4294967297", "-", r)
-	}
-	x = 18446744073709551615
-	y = 0
-	r = x - y
-	if r != 18446744073709551615 {
-		t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != 18446744073709551614 {
-		t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551614", "-", r)
-	}
-	y = 4294967296
-	r = x - y
-	if r != 18446744069414584319 {
-		t.Errorf("18446744073709551615 %s 4294967296 = %d, want 18446744069414584319", "-", r)
-	}
-	y = 18446744073709551615
-	r = x - y
-	if r != 0 {
-		t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 0", "-", r)
-	}
-}
-func TestConstFolduint64div(t *testing.T) {
-	var x, y, r uint64
-	x = 0
-	y = 1
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "/", r)
-	}
-	y = 4294967296
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s 4294967296 = %d, want 0", "/", r)
-	}
-	y = 18446744073709551615
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s 18446744073709551615 = %d, want 0", "/", r)
-	}
-	x = 1
-	y = 1
-	r = x / y
-	if r != 1 {
-		t.Errorf("1 %s 1 = %d, want 1", "/", r)
-	}
-	y = 4294967296
-	r = x / y
-	if r != 0 {
-		t.Errorf("1 %s 4294967296 = %d, want 0", "/", r)
-	}
-	y = 18446744073709551615
-	r = x / y
-	if r != 0 {
-		t.Errorf("1 %s 18446744073709551615 = %d, want 0", "/", r)
-	}
-	x = 4294967296
-	y = 1
-	r = x / y
-	if r != 4294967296 {
-		t.Errorf("4294967296 %s 1 = %d, want 4294967296", "/", r)
-	}
-	y = 4294967296
-	r = x / y
-	if r != 1 {
-		t.Errorf("4294967296 %s 4294967296 = %d, want 1", "/", r)
-	}
-	y = 18446744073709551615
-	r = x / y
-	if r != 0 {
-		t.Errorf("4294967296 %s 18446744073709551615 = %d, want 0", "/", r)
-	}
-	x = 18446744073709551615
-	y = 1
-	r = x / y
-	if r != 18446744073709551615 {
-		t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551615", "/", r)
-	}
-	y = 4294967296
-	r = x / y
-	if r != 4294967295 {
-		t.Errorf("18446744073709551615 %s 4294967296 = %d, want 4294967295", "/", r)
-	}
-	y = 18446744073709551615
-	r = x / y
-	if r != 1 {
-		t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 1", "/", r)
-	}
-}
-func TestConstFolduint64mul(t *testing.T) {
-	var x, y, r uint64
-	x = 0
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "*", r)
-	}
-	y = 4294967296
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s 4294967296 = %d, want 0", "*", r)
-	}
-	y = 18446744073709551615
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s 18446744073709551615 = %d, want 0", "*", r)
-	}
-	x = 1
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("1 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != 1 {
-		t.Errorf("1 %s 1 = %d, want 1", "*", r)
-	}
-	y = 4294967296
-	r = x * y
-	if r != 4294967296 {
-		t.Errorf("1 %s 4294967296 = %d, want 4294967296", "*", r)
-	}
-	y = 18446744073709551615
-	r = x * y
-	if r != 18446744073709551615 {
-		t.Errorf("1 %s 18446744073709551615 = %d, want 18446744073709551615", "*", r)
-	}
-	x = 4294967296
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("4294967296 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != 4294967296 {
-		t.Errorf("4294967296 %s 1 = %d, want 4294967296", "*", r)
-	}
-	y = 4294967296
-	r = x * y
-	if r != 0 {
-		t.Errorf("4294967296 %s 4294967296 = %d, want 0", "*", r)
-	}
-	y = 18446744073709551615
-	r = x * y
-	if r != 18446744069414584320 {
-		t.Errorf("4294967296 %s 18446744073709551615 = %d, want 18446744069414584320", "*", r)
-	}
-	x = 18446744073709551615
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("18446744073709551615 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != 18446744073709551615 {
-		t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551615", "*", r)
-	}
-	y = 4294967296
-	r = x * y
-	if r != 18446744069414584320 {
-		t.Errorf("18446744073709551615 %s 4294967296 = %d, want 18446744069414584320", "*", r)
-	}
-	y = 18446744073709551615
-	r = x * y
-	if r != 1 {
-		t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 1", "*", r)
-	}
-}
-func TestConstFolduint64mod(t *testing.T) {
-	var x, y, r uint64
-	x = 0
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "%", r)
-	}
-	y = 4294967296
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s 4294967296 = %d, want 0", "%", r)
-	}
-	y = 18446744073709551615
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s 18446744073709551615 = %d, want 0", "%", r)
-	}
-	x = 1
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", "%", r)
-	}
-	y = 4294967296
-	r = x % y
-	if r != 1 {
-		t.Errorf("1 %s 4294967296 = %d, want 1", "%", r)
-	}
-	y = 18446744073709551615
-	r = x % y
-	if r != 1 {
-		t.Errorf("1 %s 18446744073709551615 = %d, want 1", "%", r)
-	}
-	x = 4294967296
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("4294967296 %s 1 = %d, want 0", "%", r)
-	}
-	y = 4294967296
-	r = x % y
-	if r != 0 {
-		t.Errorf("4294967296 %s 4294967296 = %d, want 0", "%", r)
-	}
-	y = 18446744073709551615
-	r = x % y
-	if r != 4294967296 {
-		t.Errorf("4294967296 %s 18446744073709551615 = %d, want 4294967296", "%", r)
-	}
-	x = 18446744073709551615
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("18446744073709551615 %s 1 = %d, want 0", "%", r)
-	}
-	y = 4294967296
-	r = x % y
-	if r != 4294967295 {
-		t.Errorf("18446744073709551615 %s 4294967296 = %d, want 4294967295", "%", r)
-	}
-	y = 18446744073709551615
-	r = x % y
-	if r != 0 {
-		t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 0", "%", r)
-	}
-}
-func TestConstFoldint64add(t *testing.T) {
-	var x, y, r int64
-	x = -9223372036854775808
-	y = -9223372036854775808
-	r = x + y
-	if r != 0 {
-		t.Errorf("-9223372036854775808 %s -9223372036854775808 = %d, want 0", "+", r)
-	}
-	y = -9223372036854775807
-	r = x + y
-	if r != 1 {
-		t.Errorf("-9223372036854775808 %s -9223372036854775807 = %d, want 1", "+", r)
-	}
-	y = -4294967296
-	r = x + y
-	if r != 9223372032559808512 {
-		t.Errorf("-9223372036854775808 %s -4294967296 = %d, want 9223372032559808512", "+", r)
-	}
-	y = -1
-	r = x + y
-	if r != 9223372036854775807 {
-		t.Errorf("-9223372036854775808 %s -1 = %d, want 9223372036854775807", "+", r)
-	}
-	y = 0
-	r = x + y
-	if r != -9223372036854775808 {
-		t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != -9223372036854775807 {
-		t.Errorf("-9223372036854775808 %s 1 = %d, want -9223372036854775807", "+", r)
-	}
-	y = 4294967296
-	r = x + y
-	if r != -9223372032559808512 {
-		t.Errorf("-9223372036854775808 %s 4294967296 = %d, want -9223372032559808512", "+", r)
-	}
-	y = 9223372036854775806
-	r = x + y
-	if r != -2 {
-		t.Errorf("-9223372036854775808 %s 9223372036854775806 = %d, want -2", "+", r)
-	}
-	y = 9223372036854775807
-	r = x + y
-	if r != -1 {
-		t.Errorf("-9223372036854775808 %s 9223372036854775807 = %d, want -1", "+", r)
-	}
-	x = -9223372036854775807
-	y = -9223372036854775808
-	r = x + y
-	if r != 1 {
-		t.Errorf("-9223372036854775807 %s -9223372036854775808 = %d, want 1", "+", r)
-	}
-	y = -9223372036854775807
-	r = x + y
-	if r != 2 {
-		t.Errorf("-9223372036854775807 %s -9223372036854775807 = %d, want 2", "+", r)
-	}
-	y = -4294967296
-	r = x + y
-	if r != 9223372032559808513 {
-		t.Errorf("-9223372036854775807 %s -4294967296 = %d, want 9223372032559808513", "+", r)
-	}
-	y = -1
-	r = x + y
-	if r != -9223372036854775808 {
-		t.Errorf("-9223372036854775807 %s -1 = %d, want -9223372036854775808", "+", r)
-	}
-	y = 0
-	r = x + y
-	if r != -9223372036854775807 {
-		t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != -9223372036854775806 {
-		t.Errorf("-9223372036854775807 %s 1 = %d, want -9223372036854775806", "+", r)
-	}
-	y = 4294967296
-	r = x + y
-	if r != -9223372032559808511 {
-		t.Errorf("-9223372036854775807 %s 4294967296 = %d, want -9223372032559808511", "+", r)
-	}
-	y = 9223372036854775806
-	r = x + y
-	if r != -1 {
-		t.Errorf("-9223372036854775807 %s 9223372036854775806 = %d, want -1", "+", r)
-	}
-	y = 9223372036854775807
-	r = x + y
-	if r != 0 {
-		t.Errorf("-9223372036854775807 %s 9223372036854775807 = %d, want 0", "+", r)
-	}
-	x = -4294967296
-	y = -9223372036854775808
-	r = x + y
-	if r != 9223372032559808512 {
-		t.Errorf("-4294967296 %s -9223372036854775808 = %d, want 9223372032559808512", "+", r)
-	}
-	y = -9223372036854775807
-	r = x + y
-	if r != 9223372032559808513 {
-		t.Errorf("-4294967296 %s -9223372036854775807 = %d, want 9223372032559808513", "+", r)
-	}
-	y = -4294967296
-	r = x + y
-	if r != -8589934592 {
-		t.Errorf("-4294967296 %s -4294967296 = %d, want -8589934592", "+", r)
-	}
-	y = -1
-	r = x + y
-	if r != -4294967297 {
-		t.Errorf("-4294967296 %s -1 = %d, want -4294967297", "+", r)
-	}
-	y = 0
-	r = x + y
-	if r != -4294967296 {
-		t.Errorf("-4294967296 %s 0 = %d, want -4294967296", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != -4294967295 {
-		t.Errorf("-4294967296 %s 1 = %d, want -4294967295", "+", r)
-	}
-	y = 4294967296
-	r = x + y
-	if r != 0 {
-		t.Errorf("-4294967296 %s 4294967296 = %d, want 0", "+", r)
-	}
-	y = 9223372036854775806
-	r = x + y
-	if r != 9223372032559808510 {
-		t.Errorf("-4294967296 %s 9223372036854775806 = %d, want 9223372032559808510", "+", r)
-	}
-	y = 9223372036854775807
-	r = x + y
-	if r != 9223372032559808511 {
-		t.Errorf("-4294967296 %s 9223372036854775807 = %d, want 9223372032559808511", "+", r)
-	}
-	x = -1
-	y = -9223372036854775808
-	r = x + y
-	if r != 9223372036854775807 {
-		t.Errorf("-1 %s -9223372036854775808 = %d, want 9223372036854775807", "+", r)
-	}
-	y = -9223372036854775807
-	r = x + y
-	if r != -9223372036854775808 {
-		t.Errorf("-1 %s -9223372036854775807 = %d, want -9223372036854775808", "+", r)
-	}
-	y = -4294967296
-	r = x + y
-	if r != -4294967297 {
-		t.Errorf("-1 %s -4294967296 = %d, want -4294967297", "+", r)
-	}
-	y = -1
-	r = x + y
-	if r != -2 {
-		t.Errorf("-1 %s -1 = %d, want -2", "+", r)
-	}
-	y = 0
-	r = x + y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != 0 {
-		t.Errorf("-1 %s 1 = %d, want 0", "+", r)
-	}
-	y = 4294967296
-	r = x + y
-	if r != 4294967295 {
-		t.Errorf("-1 %s 4294967296 = %d, want 4294967295", "+", r)
-	}
-	y = 9223372036854775806
-	r = x + y
-	if r != 9223372036854775805 {
-		t.Errorf("-1 %s 9223372036854775806 = %d, want 9223372036854775805", "+", r)
-	}
-	y = 9223372036854775807
-	r = x + y
-	if r != 9223372036854775806 {
-		t.Errorf("-1 %s 9223372036854775807 = %d, want 9223372036854775806", "+", r)
-	}
-	x = 0
-	y = -9223372036854775808
-	r = x + y
-	if r != -9223372036854775808 {
-		t.Errorf("0 %s -9223372036854775808 = %d, want -9223372036854775808", "+", r)
-	}
-	y = -9223372036854775807
-	r = x + y
-	if r != -9223372036854775807 {
-		t.Errorf("0 %s -9223372036854775807 = %d, want -9223372036854775807", "+", r)
-	}
-	y = -4294967296
-	r = x + y
-	if r != -4294967296 {
-		t.Errorf("0 %s -4294967296 = %d, want -4294967296", "+", r)
-	}
-	y = -1
-	r = x + y
-	if r != -1 {
-		t.Errorf("0 %s -1 = %d, want -1", "+", r)
-	}
-	y = 0
-	r = x + y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != 1 {
-		t.Errorf("0 %s 1 = %d, want 1", "+", r)
-	}
-	y = 4294967296
-	r = x + y
-	if r != 4294967296 {
-		t.Errorf("0 %s 4294967296 = %d, want 4294967296", "+", r)
-	}
-	y = 9223372036854775806
-	r = x + y
-	if r != 9223372036854775806 {
-		t.Errorf("0 %s 9223372036854775806 = %d, want 9223372036854775806", "+", r)
-	}
-	y = 9223372036854775807
-	r = x + y
-	if r != 9223372036854775807 {
-		t.Errorf("0 %s 9223372036854775807 = %d, want 9223372036854775807", "+", r)
-	}
-	x = 1
-	y = -9223372036854775808
-	r = x + y
-	if r != -9223372036854775807 {
-		t.Errorf("1 %s -9223372036854775808 = %d, want -9223372036854775807", "+", r)
-	}
-	y = -9223372036854775807
-	r = x + y
-	if r != -9223372036854775806 {
-		t.Errorf("1 %s -9223372036854775807 = %d, want -9223372036854775806", "+", r)
-	}
-	y = -4294967296
-	r = x + y
-	if r != -4294967295 {
-		t.Errorf("1 %s -4294967296 = %d, want -4294967295", "+", r)
-	}
-	y = -1
-	r = x + y
-	if r != 0 {
-		t.Errorf("1 %s -1 = %d, want 0", "+", r)
-	}
-	y = 0
-	r = x + y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "+", r)
-	}
-	y = 4294967296
-	r = x + y
-	if r != 4294967297 {
-		t.Errorf("1 %s 4294967296 = %d, want 4294967297", "+", r)
-	}
-	y = 9223372036854775806
-	r = x + y
-	if r != 9223372036854775807 {
-		t.Errorf("1 %s 9223372036854775806 = %d, want 9223372036854775807", "+", r)
-	}
-	y = 9223372036854775807
-	r = x + y
-	if r != -9223372036854775808 {
-		t.Errorf("1 %s 9223372036854775807 = %d, want -9223372036854775808", "+", r)
-	}
-	x = 4294967296
-	y = -9223372036854775808
-	r = x + y
-	if r != -9223372032559808512 {
-		t.Errorf("4294967296 %s -9223372036854775808 = %d, want -9223372032559808512", "+", r)
-	}
-	y = -9223372036854775807
-	r = x + y
-	if r != -9223372032559808511 {
-		t.Errorf("4294967296 %s -9223372036854775807 = %d, want -9223372032559808511", "+", r)
-	}
-	y = -4294967296
-	r = x + y
-	if r != 0 {
-		t.Errorf("4294967296 %s -4294967296 = %d, want 0", "+", r)
-	}
-	y = -1
-	r = x + y
-	if r != 4294967295 {
-		t.Errorf("4294967296 %s -1 = %d, want 4294967295", "+", r)
-	}
-	y = 0
-	r = x + y
-	if r != 4294967296 {
-		t.Errorf("4294967296 %s 0 = %d, want 4294967296", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != 4294967297 {
-		t.Errorf("4294967296 %s 1 = %d, want 4294967297", "+", r)
-	}
-	y = 4294967296
-	r = x + y
-	if r != 8589934592 {
-		t.Errorf("4294967296 %s 4294967296 = %d, want 8589934592", "+", r)
-	}
-	y = 9223372036854775806
-	r = x + y
-	if r != -9223372032559808514 {
-		t.Errorf("4294967296 %s 9223372036854775806 = %d, want -9223372032559808514", "+", r)
-	}
-	y = 9223372036854775807
-	r = x + y
-	if r != -9223372032559808513 {
-		t.Errorf("4294967296 %s 9223372036854775807 = %d, want -9223372032559808513", "+", r)
-	}
-	x = 9223372036854775806
-	y = -9223372036854775808
-	r = x + y
-	if r != -2 {
-		t.Errorf("9223372036854775806 %s -9223372036854775808 = %d, want -2", "+", r)
-	}
-	y = -9223372036854775807
-	r = x + y
-	if r != -1 {
-		t.Errorf("9223372036854775806 %s -9223372036854775807 = %d, want -1", "+", r)
-	}
-	y = -4294967296
-	r = x + y
-	if r != 9223372032559808510 {
-		t.Errorf("9223372036854775806 %s -4294967296 = %d, want 9223372032559808510", "+", r)
-	}
-	y = -1
-	r = x + y
-	if r != 9223372036854775805 {
-		t.Errorf("9223372036854775806 %s -1 = %d, want 9223372036854775805", "+", r)
-	}
-	y = 0
-	r = x + y
-	if r != 9223372036854775806 {
-		t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != 9223372036854775807 {
-		t.Errorf("9223372036854775806 %s 1 = %d, want 9223372036854775807", "+", r)
-	}
-	y = 4294967296
-	r = x + y
-	if r != -9223372032559808514 {
-		t.Errorf("9223372036854775806 %s 4294967296 = %d, want -9223372032559808514", "+", r)
-	}
-	y = 9223372036854775806
-	r = x + y
-	if r != -4 {
-		t.Errorf("9223372036854775806 %s 9223372036854775806 = %d, want -4", "+", r)
-	}
-	y = 9223372036854775807
-	r = x + y
-	if r != -3 {
-		t.Errorf("9223372036854775806 %s 9223372036854775807 = %d, want -3", "+", r)
-	}
-	x = 9223372036854775807
-	y = -9223372036854775808
-	r = x + y
-	if r != -1 {
-		t.Errorf("9223372036854775807 %s -9223372036854775808 = %d, want -1", "+", r)
-	}
-	y = -9223372036854775807
-	r = x + y
-	if r != 0 {
-		t.Errorf("9223372036854775807 %s -9223372036854775807 = %d, want 0", "+", r)
-	}
-	y = -4294967296
-	r = x + y
-	if r != 9223372032559808511 {
-		t.Errorf("9223372036854775807 %s -4294967296 = %d, want 9223372032559808511", "+", r)
-	}
-	y = -1
-	r = x + y
-	if r != 9223372036854775806 {
-		t.Errorf("9223372036854775807 %s -1 = %d, want 9223372036854775806", "+", r)
-	}
-	y = 0
-	r = x + y
-	if r != 9223372036854775807 {
-		t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != -9223372036854775808 {
-		t.Errorf("9223372036854775807 %s 1 = %d, want -9223372036854775808", "+", r)
-	}
-	y = 4294967296
-	r = x + y
-	if r != -9223372032559808513 {
-		t.Errorf("9223372036854775807 %s 4294967296 = %d, want -9223372032559808513", "+", r)
-	}
-	y = 9223372036854775806
-	r = x + y
-	if r != -3 {
-		t.Errorf("9223372036854775807 %s 9223372036854775806 = %d, want -3", "+", r)
-	}
-	y = 9223372036854775807
-	r = x + y
-	if r != -2 {
-		t.Errorf("9223372036854775807 %s 9223372036854775807 = %d, want -2", "+", r)
-	}
-}
-func TestConstFoldint64sub(t *testing.T) {
-	var x, y, r int64
-	x = -9223372036854775808
-	y = -9223372036854775808
-	r = x - y
-	if r != 0 {
-		t.Errorf("-9223372036854775808 %s -9223372036854775808 = %d, want 0", "-", r)
-	}
-	y = -9223372036854775807
-	r = x - y
-	if r != -1 {
-		t.Errorf("-9223372036854775808 %s -9223372036854775807 = %d, want -1", "-", r)
-	}
-	y = -4294967296
-	r = x - y
-	if r != -9223372032559808512 {
-		t.Errorf("-9223372036854775808 %s -4294967296 = %d, want -9223372032559808512", "-", r)
-	}
-	y = -1
-	r = x - y
-	if r != -9223372036854775807 {
-		t.Errorf("-9223372036854775808 %s -1 = %d, want -9223372036854775807", "-", r)
-	}
-	y = 0
-	r = x - y
-	if r != -9223372036854775808 {
-		t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != 9223372036854775807 {
-		t.Errorf("-9223372036854775808 %s 1 = %d, want 9223372036854775807", "-", r)
-	}
-	y = 4294967296
-	r = x - y
-	if r != 9223372032559808512 {
-		t.Errorf("-9223372036854775808 %s 4294967296 = %d, want 9223372032559808512", "-", r)
-	}
-	y = 9223372036854775806
-	r = x - y
-	if r != 2 {
-		t.Errorf("-9223372036854775808 %s 9223372036854775806 = %d, want 2", "-", r)
-	}
-	y = 9223372036854775807
-	r = x - y
-	if r != 1 {
-		t.Errorf("-9223372036854775808 %s 9223372036854775807 = %d, want 1", "-", r)
-	}
-	x = -9223372036854775807
-	y = -9223372036854775808
-	r = x - y
-	if r != 1 {
-		t.Errorf("-9223372036854775807 %s -9223372036854775808 = %d, want 1", "-", r)
-	}
-	y = -9223372036854775807
-	r = x - y
-	if r != 0 {
-		t.Errorf("-9223372036854775807 %s -9223372036854775807 = %d, want 0", "-", r)
-	}
-	y = -4294967296
-	r = x - y
-	if r != -9223372032559808511 {
-		t.Errorf("-9223372036854775807 %s -4294967296 = %d, want -9223372032559808511", "-", r)
-	}
-	y = -1
-	r = x - y
-	if r != -9223372036854775806 {
-		t.Errorf("-9223372036854775807 %s -1 = %d, want -9223372036854775806", "-", r)
-	}
-	y = 0
-	r = x - y
-	if r != -9223372036854775807 {
-		t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != -9223372036854775808 {
-		t.Errorf("-9223372036854775807 %s 1 = %d, want -9223372036854775808", "-", r)
-	}
-	y = 4294967296
-	r = x - y
-	if r != 9223372032559808513 {
-		t.Errorf("-9223372036854775807 %s 4294967296 = %d, want 9223372032559808513", "-", r)
-	}
-	y = 9223372036854775806
-	r = x - y
-	if r != 3 {
-		t.Errorf("-9223372036854775807 %s 9223372036854775806 = %d, want 3", "-", r)
-	}
-	y = 9223372036854775807
-	r = x - y
-	if r != 2 {
-		t.Errorf("-9223372036854775807 %s 9223372036854775807 = %d, want 2", "-", r)
-	}
-	x = -4294967296
-	y = -9223372036854775808
-	r = x - y
-	if r != 9223372032559808512 {
-		t.Errorf("-4294967296 %s -9223372036854775808 = %d, want 9223372032559808512", "-", r)
-	}
-	y = -9223372036854775807
-	r = x - y
-	if r != 9223372032559808511 {
-		t.Errorf("-4294967296 %s -9223372036854775807 = %d, want 9223372032559808511", "-", r)
-	}
-	y = -4294967296
-	r = x - y
-	if r != 0 {
-		t.Errorf("-4294967296 %s -4294967296 = %d, want 0", "-", r)
-	}
-	y = -1
-	r = x - y
-	if r != -4294967295 {
-		t.Errorf("-4294967296 %s -1 = %d, want -4294967295", "-", r)
-	}
-	y = 0
-	r = x - y
-	if r != -4294967296 {
-		t.Errorf("-4294967296 %s 0 = %d, want -4294967296", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != -4294967297 {
-		t.Errorf("-4294967296 %s 1 = %d, want -4294967297", "-", r)
-	}
-	y = 4294967296
-	r = x - y
-	if r != -8589934592 {
-		t.Errorf("-4294967296 %s 4294967296 = %d, want -8589934592", "-", r)
-	}
-	y = 9223372036854775806
-	r = x - y
-	if r != 9223372032559808514 {
-		t.Errorf("-4294967296 %s 9223372036854775806 = %d, want 9223372032559808514", "-", r)
-	}
-	y = 9223372036854775807
-	r = x - y
-	if r != 9223372032559808513 {
-		t.Errorf("-4294967296 %s 9223372036854775807 = %d, want 9223372032559808513", "-", r)
-	}
-	x = -1
-	y = -9223372036854775808
-	r = x - y
-	if r != 9223372036854775807 {
-		t.Errorf("-1 %s -9223372036854775808 = %d, want 9223372036854775807", "-", r)
-	}
-	y = -9223372036854775807
-	r = x - y
-	if r != 9223372036854775806 {
-		t.Errorf("-1 %s -9223372036854775807 = %d, want 9223372036854775806", "-", r)
-	}
-	y = -4294967296
-	r = x - y
-	if r != 4294967295 {
-		t.Errorf("-1 %s -4294967296 = %d, want 4294967295", "-", r)
-	}
-	y = -1
-	r = x - y
-	if r != 0 {
-		t.Errorf("-1 %s -1 = %d, want 0", "-", r)
-	}
-	y = 0
-	r = x - y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != -2 {
-		t.Errorf("-1 %s 1 = %d, want -2", "-", r)
-	}
-	y = 4294967296
-	r = x - y
-	if r != -4294967297 {
-		t.Errorf("-1 %s 4294967296 = %d, want -4294967297", "-", r)
-	}
-	y = 9223372036854775806
-	r = x - y
-	if r != -9223372036854775807 {
-		t.Errorf("-1 %s 9223372036854775806 = %d, want -9223372036854775807", "-", r)
-	}
-	y = 9223372036854775807
-	r = x - y
-	if r != -9223372036854775808 {
-		t.Errorf("-1 %s 9223372036854775807 = %d, want -9223372036854775808", "-", r)
-	}
-	x = 0
-	y = -9223372036854775808
-	r = x - y
-	if r != -9223372036854775808 {
-		t.Errorf("0 %s -9223372036854775808 = %d, want -9223372036854775808", "-", r)
-	}
-	y = -9223372036854775807
-	r = x - y
-	if r != 9223372036854775807 {
-		t.Errorf("0 %s -9223372036854775807 = %d, want 9223372036854775807", "-", r)
-	}
-	y = -4294967296
-	r = x - y
-	if r != 4294967296 {
-		t.Errorf("0 %s -4294967296 = %d, want 4294967296", "-", r)
-	}
-	y = -1
-	r = x - y
-	if r != 1 {
-		t.Errorf("0 %s -1 = %d, want 1", "-", r)
-	}
-	y = 0
-	r = x - y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != -1 {
-		t.Errorf("0 %s 1 = %d, want -1", "-", r)
-	}
-	y = 4294967296
-	r = x - y
-	if r != -4294967296 {
-		t.Errorf("0 %s 4294967296 = %d, want -4294967296", "-", r)
-	}
-	y = 9223372036854775806
-	r = x - y
-	if r != -9223372036854775806 {
-		t.Errorf("0 %s 9223372036854775806 = %d, want -9223372036854775806", "-", r)
-	}
-	y = 9223372036854775807
-	r = x - y
-	if r != -9223372036854775807 {
-		t.Errorf("0 %s 9223372036854775807 = %d, want -9223372036854775807", "-", r)
-	}
-	x = 1
-	y = -9223372036854775808
-	r = x - y
-	if r != -9223372036854775807 {
-		t.Errorf("1 %s -9223372036854775808 = %d, want -9223372036854775807", "-", r)
-	}
-	y = -9223372036854775807
-	r = x - y
-	if r != -9223372036854775808 {
-		t.Errorf("1 %s -9223372036854775807 = %d, want -9223372036854775808", "-", r)
-	}
-	y = -4294967296
-	r = x - y
-	if r != 4294967297 {
-		t.Errorf("1 %s -4294967296 = %d, want 4294967297", "-", r)
-	}
-	y = -1
-	r = x - y
-	if r != 2 {
-		t.Errorf("1 %s -1 = %d, want 2", "-", r)
-	}
-	y = 0
-	r = x - y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", "-", r)
-	}
-	y = 4294967296
-	r = x - y
-	if r != -4294967295 {
-		t.Errorf("1 %s 4294967296 = %d, want -4294967295", "-", r)
-	}
-	y = 9223372036854775806
-	r = x - y
-	if r != -9223372036854775805 {
-		t.Errorf("1 %s 9223372036854775806 = %d, want -9223372036854775805", "-", r)
-	}
-	y = 9223372036854775807
-	r = x - y
-	if r != -9223372036854775806 {
-		t.Errorf("1 %s 9223372036854775807 = %d, want -9223372036854775806", "-", r)
-	}
-	x = 4294967296
-	y = -9223372036854775808
-	r = x - y
-	if r != -9223372032559808512 {
-		t.Errorf("4294967296 %s -9223372036854775808 = %d, want -9223372032559808512", "-", r)
-	}
-	y = -9223372036854775807
-	r = x - y
-	if r != -9223372032559808513 {
-		t.Errorf("4294967296 %s -9223372036854775807 = %d, want -9223372032559808513", "-", r)
-	}
-	y = -4294967296
-	r = x - y
-	if r != 8589934592 {
-		t.Errorf("4294967296 %s -4294967296 = %d, want 8589934592", "-", r)
-	}
-	y = -1
-	r = x - y
-	if r != 4294967297 {
-		t.Errorf("4294967296 %s -1 = %d, want 4294967297", "-", r)
-	}
-	y = 0
-	r = x - y
-	if r != 4294967296 {
-		t.Errorf("4294967296 %s 0 = %d, want 4294967296", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != 4294967295 {
-		t.Errorf("4294967296 %s 1 = %d, want 4294967295", "-", r)
-	}
-	y = 4294967296
-	r = x - y
-	if r != 0 {
-		t.Errorf("4294967296 %s 4294967296 = %d, want 0", "-", r)
-	}
-	y = 9223372036854775806
-	r = x - y
-	if r != -9223372032559808510 {
-		t.Errorf("4294967296 %s 9223372036854775806 = %d, want -9223372032559808510", "-", r)
-	}
-	y = 9223372036854775807
-	r = x - y
-	if r != -9223372032559808511 {
-		t.Errorf("4294967296 %s 9223372036854775807 = %d, want -9223372032559808511", "-", r)
-	}
-	x = 9223372036854775806
-	y = -9223372036854775808
-	r = x - y
-	if r != -2 {
-		t.Errorf("9223372036854775806 %s -9223372036854775808 = %d, want -2", "-", r)
-	}
-	y = -9223372036854775807
-	r = x - y
-	if r != -3 {
-		t.Errorf("9223372036854775806 %s -9223372036854775807 = %d, want -3", "-", r)
-	}
-	y = -4294967296
-	r = x - y
-	if r != -9223372032559808514 {
-		t.Errorf("9223372036854775806 %s -4294967296 = %d, want -9223372032559808514", "-", r)
-	}
-	y = -1
-	r = x - y
-	if r != 9223372036854775807 {
-		t.Errorf("9223372036854775806 %s -1 = %d, want 9223372036854775807", "-", r)
-	}
-	y = 0
-	r = x - y
-	if r != 9223372036854775806 {
-		t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != 9223372036854775805 {
-		t.Errorf("9223372036854775806 %s 1 = %d, want 9223372036854775805", "-", r)
-	}
-	y = 4294967296
-	r = x - y
-	if r != 9223372032559808510 {
-		t.Errorf("9223372036854775806 %s 4294967296 = %d, want 9223372032559808510", "-", r)
-	}
-	y = 9223372036854775806
-	r = x - y
-	if r != 0 {
-		t.Errorf("9223372036854775806 %s 9223372036854775806 = %d, want 0", "-", r)
-	}
-	y = 9223372036854775807
-	r = x - y
-	if r != -1 {
-		t.Errorf("9223372036854775806 %s 9223372036854775807 = %d, want -1", "-", r)
-	}
-	x = 9223372036854775807
-	y = -9223372036854775808
-	r = x - y
-	if r != -1 {
-		t.Errorf("9223372036854775807 %s -9223372036854775808 = %d, want -1", "-", r)
-	}
-	y = -9223372036854775807
-	r = x - y
-	if r != -2 {
-		t.Errorf("9223372036854775807 %s -9223372036854775807 = %d, want -2", "-", r)
-	}
-	y = -4294967296
-	r = x - y
-	if r != -9223372032559808513 {
-		t.Errorf("9223372036854775807 %s -4294967296 = %d, want -9223372032559808513", "-", r)
-	}
-	y = -1
-	r = x - y
-	if r != -9223372036854775808 {
-		t.Errorf("9223372036854775807 %s -1 = %d, want -9223372036854775808", "-", r)
-	}
-	y = 0
-	r = x - y
-	if r != 9223372036854775807 {
-		t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != 9223372036854775806 {
-		t.Errorf("9223372036854775807 %s 1 = %d, want 9223372036854775806", "-", r)
-	}
-	y = 4294967296
-	r = x - y
-	if r != 9223372032559808511 {
-		t.Errorf("9223372036854775807 %s 4294967296 = %d, want 9223372032559808511", "-", r)
-	}
-	y = 9223372036854775806
-	r = x - y
-	if r != 1 {
-		t.Errorf("9223372036854775807 %s 9223372036854775806 = %d, want 1", "-", r)
-	}
-	y = 9223372036854775807
-	r = x - y
-	if r != 0 {
-		t.Errorf("9223372036854775807 %s 9223372036854775807 = %d, want 0", "-", r)
-	}
-}
-func TestConstFoldint64div(t *testing.T) {
-	var x, y, r int64
-	x = -9223372036854775808
-	y = -9223372036854775808
-	r = x / y
-	if r != 1 {
-		t.Errorf("-9223372036854775808 %s -9223372036854775808 = %d, want 1", "/", r)
-	}
-	y = -9223372036854775807
-	r = x / y
-	if r != 1 {
-		t.Errorf("-9223372036854775808 %s -9223372036854775807 = %d, want 1", "/", r)
-	}
-	y = -4294967296
-	r = x / y
-	if r != 2147483648 {
-		t.Errorf("-9223372036854775808 %s -4294967296 = %d, want 2147483648", "/", r)
-	}
-	y = -1
-	r = x / y
-	if r != -9223372036854775808 {
-		t.Errorf("-9223372036854775808 %s -1 = %d, want -9223372036854775808", "/", r)
-	}
-	y = 1
-	r = x / y
-	if r != -9223372036854775808 {
-		t.Errorf("-9223372036854775808 %s 1 = %d, want -9223372036854775808", "/", r)
-	}
-	y = 4294967296
-	r = x / y
-	if r != -2147483648 {
-		t.Errorf("-9223372036854775808 %s 4294967296 = %d, want -2147483648", "/", r)
-	}
-	y = 9223372036854775806
-	r = x / y
-	if r != -1 {
-		t.Errorf("-9223372036854775808 %s 9223372036854775806 = %d, want -1", "/", r)
-	}
-	y = 9223372036854775807
-	r = x / y
-	if r != -1 {
-		t.Errorf("-9223372036854775808 %s 9223372036854775807 = %d, want -1", "/", r)
-	}
-	x = -9223372036854775807
-	y = -9223372036854775808
-	r = x / y
-	if r != 0 {
-		t.Errorf("-9223372036854775807 %s -9223372036854775808 = %d, want 0", "/", r)
-	}
-	y = -9223372036854775807
-	r = x / y
-	if r != 1 {
-		t.Errorf("-9223372036854775807 %s -9223372036854775807 = %d, want 1", "/", r)
-	}
-	y = -4294967296
-	r = x / y
-	if r != 2147483647 {
-		t.Errorf("-9223372036854775807 %s -4294967296 = %d, want 2147483647", "/", r)
-	}
-	y = -1
-	r = x / y
-	if r != 9223372036854775807 {
-		t.Errorf("-9223372036854775807 %s -1 = %d, want 9223372036854775807", "/", r)
-	}
-	y = 1
-	r = x / y
-	if r != -9223372036854775807 {
-		t.Errorf("-9223372036854775807 %s 1 = %d, want -9223372036854775807", "/", r)
-	}
-	y = 4294967296
-	r = x / y
-	if r != -2147483647 {
-		t.Errorf("-9223372036854775807 %s 4294967296 = %d, want -2147483647", "/", r)
-	}
-	y = 9223372036854775806
-	r = x / y
-	if r != -1 {
-		t.Errorf("-9223372036854775807 %s 9223372036854775806 = %d, want -1", "/", r)
-	}
-	y = 9223372036854775807
-	r = x / y
-	if r != -1 {
-		t.Errorf("-9223372036854775807 %s 9223372036854775807 = %d, want -1", "/", r)
-	}
-	x = -4294967296
-	y = -9223372036854775808
-	r = x / y
-	if r != 0 {
-		t.Errorf("-4294967296 %s -9223372036854775808 = %d, want 0", "/", r)
-	}
-	y = -9223372036854775807
-	r = x / y
-	if r != 0 {
-		t.Errorf("-4294967296 %s -9223372036854775807 = %d, want 0", "/", r)
-	}
-	y = -4294967296
-	r = x / y
-	if r != 1 {
-		t.Errorf("-4294967296 %s -4294967296 = %d, want 1", "/", r)
-	}
-	y = -1
-	r = x / y
-	if r != 4294967296 {
-		t.Errorf("-4294967296 %s -1 = %d, want 4294967296", "/", r)
-	}
-	y = 1
-	r = x / y
-	if r != -4294967296 {
-		t.Errorf("-4294967296 %s 1 = %d, want -4294967296", "/", r)
-	}
-	y = 4294967296
-	r = x / y
-	if r != -1 {
-		t.Errorf("-4294967296 %s 4294967296 = %d, want -1", "/", r)
-	}
-	y = 9223372036854775806
-	r = x / y
-	if r != 0 {
-		t.Errorf("-4294967296 %s 9223372036854775806 = %d, want 0", "/", r)
-	}
-	y = 9223372036854775807
-	r = x / y
-	if r != 0 {
-		t.Errorf("-4294967296 %s 9223372036854775807 = %d, want 0", "/", r)
-	}
-	x = -1
-	y = -9223372036854775808
-	r = x / y
-	if r != 0 {
-		t.Errorf("-1 %s -9223372036854775808 = %d, want 0", "/", r)
-	}
-	y = -9223372036854775807
-	r = x / y
-	if r != 0 {
-		t.Errorf("-1 %s -9223372036854775807 = %d, want 0", "/", r)
-	}
-	y = -4294967296
-	r = x / y
-	if r != 0 {
-		t.Errorf("-1 %s -4294967296 = %d, want 0", "/", r)
-	}
-	y = -1
-	r = x / y
-	if r != 1 {
-		t.Errorf("-1 %s -1 = %d, want 1", "/", r)
-	}
-	y = 1
-	r = x / y
-	if r != -1 {
-		t.Errorf("-1 %s 1 = %d, want -1", "/", r)
-	}
-	y = 4294967296
-	r = x / y
-	if r != 0 {
-		t.Errorf("-1 %s 4294967296 = %d, want 0", "/", r)
-	}
-	y = 9223372036854775806
-	r = x / y
-	if r != 0 {
-		t.Errorf("-1 %s 9223372036854775806 = %d, want 0", "/", r)
-	}
-	y = 9223372036854775807
-	r = x / y
-	if r != 0 {
-		t.Errorf("-1 %s 9223372036854775807 = %d, want 0", "/", r)
-	}
-	x = 0
-	y = -9223372036854775808
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s -9223372036854775808 = %d, want 0", "/", r)
-	}
-	y = -9223372036854775807
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s -9223372036854775807 = %d, want 0", "/", r)
-	}
-	y = -4294967296
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s -4294967296 = %d, want 0", "/", r)
-	}
-	y = -1
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s -1 = %d, want 0", "/", r)
-	}
-	y = 1
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "/", r)
-	}
-	y = 4294967296
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s 4294967296 = %d, want 0", "/", r)
-	}
-	y = 9223372036854775806
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s 9223372036854775806 = %d, want 0", "/", r)
-	}
-	y = 9223372036854775807
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s 9223372036854775807 = %d, want 0", "/", r)
-	}
-	x = 1
-	y = -9223372036854775808
-	r = x / y
-	if r != 0 {
-		t.Errorf("1 %s -9223372036854775808 = %d, want 0", "/", r)
-	}
-	y = -9223372036854775807
-	r = x / y
-	if r != 0 {
-		t.Errorf("1 %s -9223372036854775807 = %d, want 0", "/", r)
-	}
-	y = -4294967296
-	r = x / y
-	if r != 0 {
-		t.Errorf("1 %s -4294967296 = %d, want 0", "/", r)
-	}
-	y = -1
-	r = x / y
-	if r != -1 {
-		t.Errorf("1 %s -1 = %d, want -1", "/", r)
-	}
-	y = 1
-	r = x / y
-	if r != 1 {
-		t.Errorf("1 %s 1 = %d, want 1", "/", r)
-	}
-	y = 4294967296
-	r = x / y
-	if r != 0 {
-		t.Errorf("1 %s 4294967296 = %d, want 0", "/", r)
-	}
-	y = 9223372036854775806
-	r = x / y
-	if r != 0 {
-		t.Errorf("1 %s 9223372036854775806 = %d, want 0", "/", r)
-	}
-	y = 9223372036854775807
-	r = x / y
-	if r != 0 {
-		t.Errorf("1 %s 9223372036854775807 = %d, want 0", "/", r)
-	}
-	x = 4294967296
-	y = -9223372036854775808
-	r = x / y
-	if r != 0 {
-		t.Errorf("4294967296 %s -9223372036854775808 = %d, want 0", "/", r)
-	}
-	y = -9223372036854775807
-	r = x / y
-	if r != 0 {
-		t.Errorf("4294967296 %s -9223372036854775807 = %d, want 0", "/", r)
-	}
-	y = -4294967296
-	r = x / y
-	if r != -1 {
-		t.Errorf("4294967296 %s -4294967296 = %d, want -1", "/", r)
-	}
-	y = -1
-	r = x / y
-	if r != -4294967296 {
-		t.Errorf("4294967296 %s -1 = %d, want -4294967296", "/", r)
-	}
-	y = 1
-	r = x / y
-	if r != 4294967296 {
-		t.Errorf("4294967296 %s 1 = %d, want 4294967296", "/", r)
-	}
-	y = 4294967296
-	r = x / y
-	if r != 1 {
-		t.Errorf("4294967296 %s 4294967296 = %d, want 1", "/", r)
-	}
-	y = 9223372036854775806
-	r = x / y
-	if r != 0 {
-		t.Errorf("4294967296 %s 9223372036854775806 = %d, want 0", "/", r)
-	}
-	y = 9223372036854775807
-	r = x / y
-	if r != 0 {
-		t.Errorf("4294967296 %s 9223372036854775807 = %d, want 0", "/", r)
-	}
-	x = 9223372036854775806
-	y = -9223372036854775808
-	r = x / y
-	if r != 0 {
-		t.Errorf("9223372036854775806 %s -9223372036854775808 = %d, want 0", "/", r)
-	}
-	y = -9223372036854775807
-	r = x / y
-	if r != 0 {
-		t.Errorf("9223372036854775806 %s -9223372036854775807 = %d, want 0", "/", r)
-	}
-	y = -4294967296
-	r = x / y
-	if r != -2147483647 {
-		t.Errorf("9223372036854775806 %s -4294967296 = %d, want -2147483647", "/", r)
-	}
-	y = -1
-	r = x / y
-	if r != -9223372036854775806 {
-		t.Errorf("9223372036854775806 %s -1 = %d, want -9223372036854775806", "/", r)
-	}
-	y = 1
-	r = x / y
-	if r != 9223372036854775806 {
-		t.Errorf("9223372036854775806 %s 1 = %d, want 9223372036854775806", "/", r)
-	}
-	y = 4294967296
-	r = x / y
-	if r != 2147483647 {
-		t.Errorf("9223372036854775806 %s 4294967296 = %d, want 2147483647", "/", r)
-	}
-	y = 9223372036854775806
-	r = x / y
-	if r != 1 {
-		t.Errorf("9223372036854775806 %s 9223372036854775806 = %d, want 1", "/", r)
-	}
-	y = 9223372036854775807
-	r = x / y
-	if r != 0 {
-		t.Errorf("9223372036854775806 %s 9223372036854775807 = %d, want 0", "/", r)
-	}
-	x = 9223372036854775807
-	y = -9223372036854775808
-	r = x / y
-	if r != 0 {
-		t.Errorf("9223372036854775807 %s -9223372036854775808 = %d, want 0", "/", r)
-	}
-	y = -9223372036854775807
-	r = x / y
-	if r != -1 {
-		t.Errorf("9223372036854775807 %s -9223372036854775807 = %d, want -1", "/", r)
-	}
-	y = -4294967296
-	r = x / y
-	if r != -2147483647 {
-		t.Errorf("9223372036854775807 %s -4294967296 = %d, want -2147483647", "/", r)
-	}
-	y = -1
-	r = x / y
-	if r != -9223372036854775807 {
-		t.Errorf("9223372036854775807 %s -1 = %d, want -9223372036854775807", "/", r)
-	}
-	y = 1
-	r = x / y
-	if r != 9223372036854775807 {
-		t.Errorf("9223372036854775807 %s 1 = %d, want 9223372036854775807", "/", r)
-	}
-	y = 4294967296
-	r = x / y
-	if r != 2147483647 {
-		t.Errorf("9223372036854775807 %s 4294967296 = %d, want 2147483647", "/", r)
-	}
-	y = 9223372036854775806
-	r = x / y
-	if r != 1 {
-		t.Errorf("9223372036854775807 %s 9223372036854775806 = %d, want 1", "/", r)
-	}
-	y = 9223372036854775807
-	r = x / y
-	if r != 1 {
-		t.Errorf("9223372036854775807 %s 9223372036854775807 = %d, want 1", "/", r)
-	}
-}
-func TestConstFoldint64mul(t *testing.T) {
-	var x, y, r int64
-	x = -9223372036854775808
-	y = -9223372036854775808
-	r = x * y
-	if r != 0 {
-		t.Errorf("-9223372036854775808 %s -9223372036854775808 = %d, want 0", "*", r)
-	}
-	y = -9223372036854775807
-	r = x * y
-	if r != -9223372036854775808 {
-		t.Errorf("-9223372036854775808 %s -9223372036854775807 = %d, want -9223372036854775808", "*", r)
-	}
-	y = -4294967296
-	r = x * y
-	if r != 0 {
-		t.Errorf("-9223372036854775808 %s -4294967296 = %d, want 0", "*", r)
-	}
-	y = -1
-	r = x * y
-	if r != -9223372036854775808 {
-		t.Errorf("-9223372036854775808 %s -1 = %d, want -9223372036854775808", "*", r)
-	}
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("-9223372036854775808 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != -9223372036854775808 {
-		t.Errorf("-9223372036854775808 %s 1 = %d, want -9223372036854775808", "*", r)
-	}
-	y = 4294967296
-	r = x * y
-	if r != 0 {
-		t.Errorf("-9223372036854775808 %s 4294967296 = %d, want 0", "*", r)
-	}
-	y = 9223372036854775806
-	r = x * y
-	if r != 0 {
-		t.Errorf("-9223372036854775808 %s 9223372036854775806 = %d, want 0", "*", r)
-	}
-	y = 9223372036854775807
-	r = x * y
-	if r != -9223372036854775808 {
-		t.Errorf("-9223372036854775808 %s 9223372036854775807 = %d, want -9223372036854775808", "*", r)
-	}
-	x = -9223372036854775807
-	y = -9223372036854775808
-	r = x * y
-	if r != -9223372036854775808 {
-		t.Errorf("-9223372036854775807 %s -9223372036854775808 = %d, want -9223372036854775808", "*", r)
-	}
-	y = -9223372036854775807
-	r = x * y
-	if r != 1 {
-		t.Errorf("-9223372036854775807 %s -9223372036854775807 = %d, want 1", "*", r)
-	}
-	y = -4294967296
-	r = x * y
-	if r != -4294967296 {
-		t.Errorf("-9223372036854775807 %s -4294967296 = %d, want -4294967296", "*", r)
-	}
-	y = -1
-	r = x * y
-	if r != 9223372036854775807 {
-		t.Errorf("-9223372036854775807 %s -1 = %d, want 9223372036854775807", "*", r)
-	}
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("-9223372036854775807 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != -9223372036854775807 {
-		t.Errorf("-9223372036854775807 %s 1 = %d, want -9223372036854775807", "*", r)
-	}
-	y = 4294967296
-	r = x * y
-	if r != 4294967296 {
-		t.Errorf("-9223372036854775807 %s 4294967296 = %d, want 4294967296", "*", r)
-	}
-	y = 9223372036854775806
-	r = x * y
-	if r != 9223372036854775806 {
-		t.Errorf("-9223372036854775807 %s 9223372036854775806 = %d, want 9223372036854775806", "*", r)
-	}
-	y = 9223372036854775807
-	r = x * y
-	if r != -1 {
-		t.Errorf("-9223372036854775807 %s 9223372036854775807 = %d, want -1", "*", r)
-	}
-	x = -4294967296
-	y = -9223372036854775808
-	r = x * y
-	if r != 0 {
-		t.Errorf("-4294967296 %s -9223372036854775808 = %d, want 0", "*", r)
-	}
-	y = -9223372036854775807
-	r = x * y
-	if r != -4294967296 {
-		t.Errorf("-4294967296 %s -9223372036854775807 = %d, want -4294967296", "*", r)
-	}
-	y = -4294967296
-	r = x * y
-	if r != 0 {
-		t.Errorf("-4294967296 %s -4294967296 = %d, want 0", "*", r)
-	}
-	y = -1
-	r = x * y
-	if r != 4294967296 {
-		t.Errorf("-4294967296 %s -1 = %d, want 4294967296", "*", r)
-	}
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("-4294967296 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != -4294967296 {
-		t.Errorf("-4294967296 %s 1 = %d, want -4294967296", "*", r)
-	}
-	y = 4294967296
-	r = x * y
-	if r != 0 {
-		t.Errorf("-4294967296 %s 4294967296 = %d, want 0", "*", r)
-	}
-	y = 9223372036854775806
-	r = x * y
-	if r != 8589934592 {
-		t.Errorf("-4294967296 %s 9223372036854775806 = %d, want 8589934592", "*", r)
-	}
-	y = 9223372036854775807
-	r = x * y
-	if r != 4294967296 {
-		t.Errorf("-4294967296 %s 9223372036854775807 = %d, want 4294967296", "*", r)
-	}
-	x = -1
-	y = -9223372036854775808
-	r = x * y
-	if r != -9223372036854775808 {
-		t.Errorf("-1 %s -9223372036854775808 = %d, want -9223372036854775808", "*", r)
-	}
-	y = -9223372036854775807
-	r = x * y
-	if r != 9223372036854775807 {
-		t.Errorf("-1 %s -9223372036854775807 = %d, want 9223372036854775807", "*", r)
-	}
-	y = -4294967296
-	r = x * y
-	if r != 4294967296 {
-		t.Errorf("-1 %s -4294967296 = %d, want 4294967296", "*", r)
-	}
-	y = -1
-	r = x * y
-	if r != 1 {
-		t.Errorf("-1 %s -1 = %d, want 1", "*", r)
-	}
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("-1 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != -1 {
-		t.Errorf("-1 %s 1 = %d, want -1", "*", r)
-	}
-	y = 4294967296
-	r = x * y
-	if r != -4294967296 {
-		t.Errorf("-1 %s 4294967296 = %d, want -4294967296", "*", r)
-	}
-	y = 9223372036854775806
-	r = x * y
-	if r != -9223372036854775806 {
-		t.Errorf("-1 %s 9223372036854775806 = %d, want -9223372036854775806", "*", r)
-	}
-	y = 9223372036854775807
-	r = x * y
-	if r != -9223372036854775807 {
-		t.Errorf("-1 %s 9223372036854775807 = %d, want -9223372036854775807", "*", r)
-	}
-	x = 0
-	y = -9223372036854775808
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s -9223372036854775808 = %d, want 0", "*", r)
-	}
-	y = -9223372036854775807
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s -9223372036854775807 = %d, want 0", "*", r)
-	}
-	y = -4294967296
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s -4294967296 = %d, want 0", "*", r)
-	}
-	y = -1
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s -1 = %d, want 0", "*", r)
-	}
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "*", r)
-	}
-	y = 4294967296
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s 4294967296 = %d, want 0", "*", r)
-	}
-	y = 9223372036854775806
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s 9223372036854775806 = %d, want 0", "*", r)
-	}
-	y = 9223372036854775807
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s 9223372036854775807 = %d, want 0", "*", r)
-	}
-	x = 1
-	y = -9223372036854775808
-	r = x * y
-	if r != -9223372036854775808 {
-		t.Errorf("1 %s -9223372036854775808 = %d, want -9223372036854775808", "*", r)
-	}
-	y = -9223372036854775807
-	r = x * y
-	if r != -9223372036854775807 {
-		t.Errorf("1 %s -9223372036854775807 = %d, want -9223372036854775807", "*", r)
-	}
-	y = -4294967296
-	r = x * y
-	if r != -4294967296 {
-		t.Errorf("1 %s -4294967296 = %d, want -4294967296", "*", r)
-	}
-	y = -1
-	r = x * y
-	if r != -1 {
-		t.Errorf("1 %s -1 = %d, want -1", "*", r)
-	}
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("1 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != 1 {
-		t.Errorf("1 %s 1 = %d, want 1", "*", r)
-	}
-	y = 4294967296
-	r = x * y
-	if r != 4294967296 {
-		t.Errorf("1 %s 4294967296 = %d, want 4294967296", "*", r)
-	}
-	y = 9223372036854775806
-	r = x * y
-	if r != 9223372036854775806 {
-		t.Errorf("1 %s 9223372036854775806 = %d, want 9223372036854775806", "*", r)
-	}
-	y = 9223372036854775807
-	r = x * y
-	if r != 9223372036854775807 {
-		t.Errorf("1 %s 9223372036854775807 = %d, want 9223372036854775807", "*", r)
-	}
-	x = 4294967296
-	y = -9223372036854775808
-	r = x * y
-	if r != 0 {
-		t.Errorf("4294967296 %s -9223372036854775808 = %d, want 0", "*", r)
-	}
-	y = -9223372036854775807
-	r = x * y
-	if r != 4294967296 {
-		t.Errorf("4294967296 %s -9223372036854775807 = %d, want 4294967296", "*", r)
-	}
-	y = -4294967296
-	r = x * y
-	if r != 0 {
-		t.Errorf("4294967296 %s -4294967296 = %d, want 0", "*", r)
-	}
-	y = -1
-	r = x * y
-	if r != -4294967296 {
-		t.Errorf("4294967296 %s -1 = %d, want -4294967296", "*", r)
-	}
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("4294967296 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != 4294967296 {
-		t.Errorf("4294967296 %s 1 = %d, want 4294967296", "*", r)
-	}
-	y = 4294967296
-	r = x * y
-	if r != 0 {
-		t.Errorf("4294967296 %s 4294967296 = %d, want 0", "*", r)
-	}
-	y = 9223372036854775806
-	r = x * y
-	if r != -8589934592 {
-		t.Errorf("4294967296 %s 9223372036854775806 = %d, want -8589934592", "*", r)
-	}
-	y = 9223372036854775807
-	r = x * y
-	if r != -4294967296 {
-		t.Errorf("4294967296 %s 9223372036854775807 = %d, want -4294967296", "*", r)
-	}
-	x = 9223372036854775806
-	y = -9223372036854775808
-	r = x * y
-	if r != 0 {
-		t.Errorf("9223372036854775806 %s -9223372036854775808 = %d, want 0", "*", r)
-	}
-	y = -9223372036854775807
-	r = x * y
-	if r != 9223372036854775806 {
-		t.Errorf("9223372036854775806 %s -9223372036854775807 = %d, want 9223372036854775806", "*", r)
-	}
-	y = -4294967296
-	r = x * y
-	if r != 8589934592 {
-		t.Errorf("9223372036854775806 %s -4294967296 = %d, want 8589934592", "*", r)
-	}
-	y = -1
-	r = x * y
-	if r != -9223372036854775806 {
-		t.Errorf("9223372036854775806 %s -1 = %d, want -9223372036854775806", "*", r)
-	}
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("9223372036854775806 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != 9223372036854775806 {
-		t.Errorf("9223372036854775806 %s 1 = %d, want 9223372036854775806", "*", r)
-	}
-	y = 4294967296
-	r = x * y
-	if r != -8589934592 {
-		t.Errorf("9223372036854775806 %s 4294967296 = %d, want -8589934592", "*", r)
-	}
-	y = 9223372036854775806
-	r = x * y
-	if r != 4 {
-		t.Errorf("9223372036854775806 %s 9223372036854775806 = %d, want 4", "*", r)
-	}
-	y = 9223372036854775807
-	r = x * y
-	if r != -9223372036854775806 {
-		t.Errorf("9223372036854775806 %s 9223372036854775807 = %d, want -9223372036854775806", "*", r)
-	}
-	x = 9223372036854775807
-	y = -9223372036854775808
-	r = x * y
-	if r != -9223372036854775808 {
-		t.Errorf("9223372036854775807 %s -9223372036854775808 = %d, want -9223372036854775808", "*", r)
-	}
-	y = -9223372036854775807
-	r = x * y
-	if r != -1 {
-		t.Errorf("9223372036854775807 %s -9223372036854775807 = %d, want -1", "*", r)
-	}
-	y = -4294967296
-	r = x * y
-	if r != 4294967296 {
-		t.Errorf("9223372036854775807 %s -4294967296 = %d, want 4294967296", "*", r)
-	}
-	y = -1
-	r = x * y
-	if r != -9223372036854775807 {
-		t.Errorf("9223372036854775807 %s -1 = %d, want -9223372036854775807", "*", r)
-	}
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("9223372036854775807 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != 9223372036854775807 {
-		t.Errorf("9223372036854775807 %s 1 = %d, want 9223372036854775807", "*", r)
-	}
-	y = 4294967296
-	r = x * y
-	if r != -4294967296 {
-		t.Errorf("9223372036854775807 %s 4294967296 = %d, want -4294967296", "*", r)
-	}
-	y = 9223372036854775806
-	r = x * y
-	if r != -9223372036854775806 {
-		t.Errorf("9223372036854775807 %s 9223372036854775806 = %d, want -9223372036854775806", "*", r)
-	}
-	y = 9223372036854775807
-	r = x * y
-	if r != 1 {
-		t.Errorf("9223372036854775807 %s 9223372036854775807 = %d, want 1", "*", r)
-	}
-}
-func TestConstFoldint64mod(t *testing.T) {
-	var x, y, r int64
-	x = -9223372036854775808
-	y = -9223372036854775808
-	r = x % y
-	if r != 0 {
-		t.Errorf("-9223372036854775808 %s -9223372036854775808 = %d, want 0", "%", r)
-	}
-	y = -9223372036854775807
-	r = x % y
-	if r != -1 {
-		t.Errorf("-9223372036854775808 %s -9223372036854775807 = %d, want -1", "%", r)
-	}
-	y = -4294967296
-	r = x % y
-	if r != 0 {
-		t.Errorf("-9223372036854775808 %s -4294967296 = %d, want 0", "%", r)
-	}
-	y = -1
-	r = x % y
-	if r != 0 {
-		t.Errorf("-9223372036854775808 %s -1 = %d, want 0", "%", r)
-	}
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("-9223372036854775808 %s 1 = %d, want 0", "%", r)
-	}
-	y = 4294967296
-	r = x % y
-	if r != 0 {
-		t.Errorf("-9223372036854775808 %s 4294967296 = %d, want 0", "%", r)
-	}
-	y = 9223372036854775806
-	r = x % y
-	if r != -2 {
-		t.Errorf("-9223372036854775808 %s 9223372036854775806 = %d, want -2", "%", r)
-	}
-	y = 9223372036854775807
-	r = x % y
-	if r != -1 {
-		t.Errorf("-9223372036854775808 %s 9223372036854775807 = %d, want -1", "%", r)
-	}
-	x = -9223372036854775807
-	y = -9223372036854775808
-	r = x % y
-	if r != -9223372036854775807 {
-		t.Errorf("-9223372036854775807 %s -9223372036854775808 = %d, want -9223372036854775807", "%", r)
-	}
-	y = -9223372036854775807
-	r = x % y
-	if r != 0 {
-		t.Errorf("-9223372036854775807 %s -9223372036854775807 = %d, want 0", "%", r)
-	}
-	y = -4294967296
-	r = x % y
-	if r != -4294967295 {
-		t.Errorf("-9223372036854775807 %s -4294967296 = %d, want -4294967295", "%", r)
-	}
-	y = -1
-	r = x % y
-	if r != 0 {
-		t.Errorf("-9223372036854775807 %s -1 = %d, want 0", "%", r)
-	}
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("-9223372036854775807 %s 1 = %d, want 0", "%", r)
-	}
-	y = 4294967296
-	r = x % y
-	if r != -4294967295 {
-		t.Errorf("-9223372036854775807 %s 4294967296 = %d, want -4294967295", "%", r)
-	}
-	y = 9223372036854775806
-	r = x % y
-	if r != -1 {
-		t.Errorf("-9223372036854775807 %s 9223372036854775806 = %d, want -1", "%", r)
-	}
-	y = 9223372036854775807
-	r = x % y
-	if r != 0 {
-		t.Errorf("-9223372036854775807 %s 9223372036854775807 = %d, want 0", "%", r)
-	}
-	x = -4294967296
-	y = -9223372036854775808
-	r = x % y
-	if r != -4294967296 {
-		t.Errorf("-4294967296 %s -9223372036854775808 = %d, want -4294967296", "%", r)
-	}
-	y = -9223372036854775807
-	r = x % y
-	if r != -4294967296 {
-		t.Errorf("-4294967296 %s -9223372036854775807 = %d, want -4294967296", "%", r)
-	}
-	y = -4294967296
-	r = x % y
-	if r != 0 {
-		t.Errorf("-4294967296 %s -4294967296 = %d, want 0", "%", r)
-	}
-	y = -1
-	r = x % y
-	if r != 0 {
-		t.Errorf("-4294967296 %s -1 = %d, want 0", "%", r)
-	}
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("-4294967296 %s 1 = %d, want 0", "%", r)
-	}
-	y = 4294967296
-	r = x % y
-	if r != 0 {
-		t.Errorf("-4294967296 %s 4294967296 = %d, want 0", "%", r)
-	}
-	y = 9223372036854775806
-	r = x % y
-	if r != -4294967296 {
-		t.Errorf("-4294967296 %s 9223372036854775806 = %d, want -4294967296", "%", r)
-	}
-	y = 9223372036854775807
-	r = x % y
-	if r != -4294967296 {
-		t.Errorf("-4294967296 %s 9223372036854775807 = %d, want -4294967296", "%", r)
-	}
-	x = -1
-	y = -9223372036854775808
-	r = x % y
-	if r != -1 {
-		t.Errorf("-1 %s -9223372036854775808 = %d, want -1", "%", r)
-	}
-	y = -9223372036854775807
-	r = x % y
-	if r != -1 {
-		t.Errorf("-1 %s -9223372036854775807 = %d, want -1", "%", r)
-	}
-	y = -4294967296
-	r = x % y
-	if r != -1 {
-		t.Errorf("-1 %s -4294967296 = %d, want -1", "%", r)
-	}
-	y = -1
-	r = x % y
-	if r != 0 {
-		t.Errorf("-1 %s -1 = %d, want 0", "%", r)
-	}
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("-1 %s 1 = %d, want 0", "%", r)
-	}
-	y = 4294967296
-	r = x % y
-	if r != -1 {
-		t.Errorf("-1 %s 4294967296 = %d, want -1", "%", r)
-	}
-	y = 9223372036854775806
-	r = x % y
-	if r != -1 {
-		t.Errorf("-1 %s 9223372036854775806 = %d, want -1", "%", r)
-	}
-	y = 9223372036854775807
-	r = x % y
-	if r != -1 {
-		t.Errorf("-1 %s 9223372036854775807 = %d, want -1", "%", r)
-	}
-	x = 0
-	y = -9223372036854775808
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s -9223372036854775808 = %d, want 0", "%", r)
-	}
-	y = -9223372036854775807
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s -9223372036854775807 = %d, want 0", "%", r)
-	}
-	y = -4294967296
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s -4294967296 = %d, want 0", "%", r)
-	}
-	y = -1
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s -1 = %d, want 0", "%", r)
-	}
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "%", r)
-	}
-	y = 4294967296
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s 4294967296 = %d, want 0", "%", r)
-	}
-	y = 9223372036854775806
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s 9223372036854775806 = %d, want 0", "%", r)
-	}
-	y = 9223372036854775807
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s 9223372036854775807 = %d, want 0", "%", r)
-	}
-	x = 1
-	y = -9223372036854775808
-	r = x % y
-	if r != 1 {
-		t.Errorf("1 %s -9223372036854775808 = %d, want 1", "%", r)
-	}
-	y = -9223372036854775807
-	r = x % y
-	if r != 1 {
-		t.Errorf("1 %s -9223372036854775807 = %d, want 1", "%", r)
-	}
-	y = -4294967296
-	r = x % y
-	if r != 1 {
-		t.Errorf("1 %s -4294967296 = %d, want 1", "%", r)
-	}
-	y = -1
-	r = x % y
-	if r != 0 {
-		t.Errorf("1 %s -1 = %d, want 0", "%", r)
-	}
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", "%", r)
-	}
-	y = 4294967296
-	r = x % y
-	if r != 1 {
-		t.Errorf("1 %s 4294967296 = %d, want 1", "%", r)
-	}
-	y = 9223372036854775806
-	r = x % y
-	if r != 1 {
-		t.Errorf("1 %s 9223372036854775806 = %d, want 1", "%", r)
-	}
-	y = 9223372036854775807
-	r = x % y
-	if r != 1 {
-		t.Errorf("1 %s 9223372036854775807 = %d, want 1", "%", r)
-	}
-	x = 4294967296
-	y = -9223372036854775808
-	r = x % y
-	if r != 4294967296 {
-		t.Errorf("4294967296 %s -9223372036854775808 = %d, want 4294967296", "%", r)
-	}
-	y = -9223372036854775807
-	r = x % y
-	if r != 4294967296 {
-		t.Errorf("4294967296 %s -9223372036854775807 = %d, want 4294967296", "%", r)
-	}
-	y = -4294967296
-	r = x % y
-	if r != 0 {
-		t.Errorf("4294967296 %s -4294967296 = %d, want 0", "%", r)
-	}
-	y = -1
-	r = x % y
-	if r != 0 {
-		t.Errorf("4294967296 %s -1 = %d, want 0", "%", r)
-	}
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("4294967296 %s 1 = %d, want 0", "%", r)
-	}
-	y = 4294967296
-	r = x % y
-	if r != 0 {
-		t.Errorf("4294967296 %s 4294967296 = %d, want 0", "%", r)
-	}
-	y = 9223372036854775806
-	r = x % y
-	if r != 4294967296 {
-		t.Errorf("4294967296 %s 9223372036854775806 = %d, want 4294967296", "%", r)
-	}
-	y = 9223372036854775807
-	r = x % y
-	if r != 4294967296 {
-		t.Errorf("4294967296 %s 9223372036854775807 = %d, want 4294967296", "%", r)
-	}
-	x = 9223372036854775806
-	y = -9223372036854775808
-	r = x % y
-	if r != 9223372036854775806 {
-		t.Errorf("9223372036854775806 %s -9223372036854775808 = %d, want 9223372036854775806", "%", r)
-	}
-	y = -9223372036854775807
-	r = x % y
-	if r != 9223372036854775806 {
-		t.Errorf("9223372036854775806 %s -9223372036854775807 = %d, want 9223372036854775806", "%", r)
-	}
-	y = -4294967296
-	r = x % y
-	if r != 4294967294 {
-		t.Errorf("9223372036854775806 %s -4294967296 = %d, want 4294967294", "%", r)
-	}
-	y = -1
-	r = x % y
-	if r != 0 {
-		t.Errorf("9223372036854775806 %s -1 = %d, want 0", "%", r)
-	}
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("9223372036854775806 %s 1 = %d, want 0", "%", r)
-	}
-	y = 4294967296
-	r = x % y
-	if r != 4294967294 {
-		t.Errorf("9223372036854775806 %s 4294967296 = %d, want 4294967294", "%", r)
-	}
-	y = 9223372036854775806
-	r = x % y
-	if r != 0 {
-		t.Errorf("9223372036854775806 %s 9223372036854775806 = %d, want 0", "%", r)
-	}
-	y = 9223372036854775807
-	r = x % y
-	if r != 9223372036854775806 {
-		t.Errorf("9223372036854775806 %s 9223372036854775807 = %d, want 9223372036854775806", "%", r)
-	}
-	x = 9223372036854775807
-	y = -9223372036854775808
-	r = x % y
-	if r != 9223372036854775807 {
-		t.Errorf("9223372036854775807 %s -9223372036854775808 = %d, want 9223372036854775807", "%", r)
-	}
-	y = -9223372036854775807
-	r = x % y
-	if r != 0 {
-		t.Errorf("9223372036854775807 %s -9223372036854775807 = %d, want 0", "%", r)
-	}
-	y = -4294967296
-	r = x % y
-	if r != 4294967295 {
-		t.Errorf("9223372036854775807 %s -4294967296 = %d, want 4294967295", "%", r)
-	}
-	y = -1
-	r = x % y
-	if r != 0 {
-		t.Errorf("9223372036854775807 %s -1 = %d, want 0", "%", r)
-	}
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("9223372036854775807 %s 1 = %d, want 0", "%", r)
-	}
-	y = 4294967296
-	r = x % y
-	if r != 4294967295 {
-		t.Errorf("9223372036854775807 %s 4294967296 = %d, want 4294967295", "%", r)
-	}
-	y = 9223372036854775806
-	r = x % y
-	if r != 1 {
-		t.Errorf("9223372036854775807 %s 9223372036854775806 = %d, want 1", "%", r)
-	}
-	y = 9223372036854775807
-	r = x % y
-	if r != 0 {
-		t.Errorf("9223372036854775807 %s 9223372036854775807 = %d, want 0", "%", r)
-	}
-}
-func TestConstFolduint32add(t *testing.T) {
-	var x, y, r uint32
-	x = 0
-	y = 0
-	r = x + y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != 1 {
-		t.Errorf("0 %s 1 = %d, want 1", "+", r)
-	}
-	y = 4294967295
-	r = x + y
-	if r != 4294967295 {
-		t.Errorf("0 %s 4294967295 = %d, want 4294967295", "+", r)
-	}
-	x = 1
-	y = 0
-	r = x + y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "+", r)
-	}
-	y = 4294967295
-	r = x + y
-	if r != 0 {
-		t.Errorf("1 %s 4294967295 = %d, want 0", "+", r)
-	}
-	x = 4294967295
-	y = 0
-	r = x + y
-	if r != 4294967295 {
-		t.Errorf("4294967295 %s 0 = %d, want 4294967295", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != 0 {
-		t.Errorf("4294967295 %s 1 = %d, want 0", "+", r)
-	}
-	y = 4294967295
-	r = x + y
-	if r != 4294967294 {
-		t.Errorf("4294967295 %s 4294967295 = %d, want 4294967294", "+", r)
-	}
-}
-func TestConstFolduint32sub(t *testing.T) {
-	var x, y, r uint32
-	x = 0
-	y = 0
-	r = x - y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != 4294967295 {
-		t.Errorf("0 %s 1 = %d, want 4294967295", "-", r)
-	}
-	y = 4294967295
-	r = x - y
-	if r != 1 {
-		t.Errorf("0 %s 4294967295 = %d, want 1", "-", r)
-	}
-	x = 1
-	y = 0
-	r = x - y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", "-", r)
-	}
-	y = 4294967295
-	r = x - y
-	if r != 2 {
-		t.Errorf("1 %s 4294967295 = %d, want 2", "-", r)
-	}
-	x = 4294967295
-	y = 0
-	r = x - y
-	if r != 4294967295 {
-		t.Errorf("4294967295 %s 0 = %d, want 4294967295", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != 4294967294 {
-		t.Errorf("4294967295 %s 1 = %d, want 4294967294", "-", r)
-	}
-	y = 4294967295
-	r = x - y
-	if r != 0 {
-		t.Errorf("4294967295 %s 4294967295 = %d, want 0", "-", r)
-	}
-}
-func TestConstFolduint32div(t *testing.T) {
-	var x, y, r uint32
-	x = 0
-	y = 1
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "/", r)
-	}
-	y = 4294967295
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s 4294967295 = %d, want 0", "/", r)
-	}
-	x = 1
-	y = 1
-	r = x / y
-	if r != 1 {
-		t.Errorf("1 %s 1 = %d, want 1", "/", r)
-	}
-	y = 4294967295
-	r = x / y
-	if r != 0 {
-		t.Errorf("1 %s 4294967295 = %d, want 0", "/", r)
-	}
-	x = 4294967295
-	y = 1
-	r = x / y
-	if r != 4294967295 {
-		t.Errorf("4294967295 %s 1 = %d, want 4294967295", "/", r)
-	}
-	y = 4294967295
-	r = x / y
-	if r != 1 {
-		t.Errorf("4294967295 %s 4294967295 = %d, want 1", "/", r)
-	}
-}
-func TestConstFolduint32mul(t *testing.T) {
-	var x, y, r uint32
-	x = 0
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "*", r)
-	}
-	y = 4294967295
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s 4294967295 = %d, want 0", "*", r)
-	}
-	x = 1
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("1 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != 1 {
-		t.Errorf("1 %s 1 = %d, want 1", "*", r)
-	}
-	y = 4294967295
-	r = x * y
-	if r != 4294967295 {
-		t.Errorf("1 %s 4294967295 = %d, want 4294967295", "*", r)
-	}
-	x = 4294967295
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("4294967295 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != 4294967295 {
-		t.Errorf("4294967295 %s 1 = %d, want 4294967295", "*", r)
-	}
-	y = 4294967295
-	r = x * y
-	if r != 1 {
-		t.Errorf("4294967295 %s 4294967295 = %d, want 1", "*", r)
-	}
-}
-func TestConstFolduint32mod(t *testing.T) {
-	var x, y, r uint32
-	x = 0
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "%", r)
-	}
-	y = 4294967295
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s 4294967295 = %d, want 0", "%", r)
-	}
-	x = 1
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", "%", r)
-	}
-	y = 4294967295
-	r = x % y
-	if r != 1 {
-		t.Errorf("1 %s 4294967295 = %d, want 1", "%", r)
-	}
-	x = 4294967295
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("4294967295 %s 1 = %d, want 0", "%", r)
-	}
-	y = 4294967295
-	r = x % y
-	if r != 0 {
-		t.Errorf("4294967295 %s 4294967295 = %d, want 0", "%", r)
-	}
-}
-func TestConstFoldint32add(t *testing.T) {
-	var x, y, r int32
-	x = -2147483648
-	y = -2147483648
-	r = x + y
-	if r != 0 {
-		t.Errorf("-2147483648 %s -2147483648 = %d, want 0", "+", r)
-	}
-	y = -2147483647
-	r = x + y
-	if r != 1 {
-		t.Errorf("-2147483648 %s -2147483647 = %d, want 1", "+", r)
-	}
-	y = -1
-	r = x + y
-	if r != 2147483647 {
-		t.Errorf("-2147483648 %s -1 = %d, want 2147483647", "+", r)
-	}
-	y = 0
-	r = x + y
-	if r != -2147483648 {
-		t.Errorf("-2147483648 %s 0 = %d, want -2147483648", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != -2147483647 {
-		t.Errorf("-2147483648 %s 1 = %d, want -2147483647", "+", r)
-	}
-	y = 2147483647
-	r = x + y
-	if r != -1 {
-		t.Errorf("-2147483648 %s 2147483647 = %d, want -1", "+", r)
-	}
-	x = -2147483647
-	y = -2147483648
-	r = x + y
-	if r != 1 {
-		t.Errorf("-2147483647 %s -2147483648 = %d, want 1", "+", r)
-	}
-	y = -2147483647
-	r = x + y
-	if r != 2 {
-		t.Errorf("-2147483647 %s -2147483647 = %d, want 2", "+", r)
-	}
-	y = -1
-	r = x + y
-	if r != -2147483648 {
-		t.Errorf("-2147483647 %s -1 = %d, want -2147483648", "+", r)
-	}
-	y = 0
-	r = x + y
-	if r != -2147483647 {
-		t.Errorf("-2147483647 %s 0 = %d, want -2147483647", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != -2147483646 {
-		t.Errorf("-2147483647 %s 1 = %d, want -2147483646", "+", r)
-	}
-	y = 2147483647
-	r = x + y
-	if r != 0 {
-		t.Errorf("-2147483647 %s 2147483647 = %d, want 0", "+", r)
-	}
-	x = -1
-	y = -2147483648
-	r = x + y
-	if r != 2147483647 {
-		t.Errorf("-1 %s -2147483648 = %d, want 2147483647", "+", r)
-	}
-	y = -2147483647
-	r = x + y
-	if r != -2147483648 {
-		t.Errorf("-1 %s -2147483647 = %d, want -2147483648", "+", r)
-	}
-	y = -1
-	r = x + y
-	if r != -2 {
-		t.Errorf("-1 %s -1 = %d, want -2", "+", r)
-	}
-	y = 0
-	r = x + y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != 0 {
-		t.Errorf("-1 %s 1 = %d, want 0", "+", r)
-	}
-	y = 2147483647
-	r = x + y
-	if r != 2147483646 {
-		t.Errorf("-1 %s 2147483647 = %d, want 2147483646", "+", r)
-	}
-	x = 0
-	y = -2147483648
-	r = x + y
-	if r != -2147483648 {
-		t.Errorf("0 %s -2147483648 = %d, want -2147483648", "+", r)
-	}
-	y = -2147483647
-	r = x + y
-	if r != -2147483647 {
-		t.Errorf("0 %s -2147483647 = %d, want -2147483647", "+", r)
-	}
-	y = -1
-	r = x + y
-	if r != -1 {
-		t.Errorf("0 %s -1 = %d, want -1", "+", r)
-	}
-	y = 0
-	r = x + y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != 1 {
-		t.Errorf("0 %s 1 = %d, want 1", "+", r)
-	}
-	y = 2147483647
-	r = x + y
-	if r != 2147483647 {
-		t.Errorf("0 %s 2147483647 = %d, want 2147483647", "+", r)
-	}
-	x = 1
-	y = -2147483648
-	r = x + y
-	if r != -2147483647 {
-		t.Errorf("1 %s -2147483648 = %d, want -2147483647", "+", r)
-	}
-	y = -2147483647
-	r = x + y
-	if r != -2147483646 {
-		t.Errorf("1 %s -2147483647 = %d, want -2147483646", "+", r)
-	}
-	y = -1
-	r = x + y
-	if r != 0 {
-		t.Errorf("1 %s -1 = %d, want 0", "+", r)
-	}
-	y = 0
-	r = x + y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "+", r)
-	}
-	y = 2147483647
-	r = x + y
-	if r != -2147483648 {
-		t.Errorf("1 %s 2147483647 = %d, want -2147483648", "+", r)
-	}
-	x = 2147483647
-	y = -2147483648
-	r = x + y
-	if r != -1 {
-		t.Errorf("2147483647 %s -2147483648 = %d, want -1", "+", r)
-	}
-	y = -2147483647
-	r = x + y
-	if r != 0 {
-		t.Errorf("2147483647 %s -2147483647 = %d, want 0", "+", r)
-	}
-	y = -1
-	r = x + y
-	if r != 2147483646 {
-		t.Errorf("2147483647 %s -1 = %d, want 2147483646", "+", r)
-	}
-	y = 0
-	r = x + y
-	if r != 2147483647 {
-		t.Errorf("2147483647 %s 0 = %d, want 2147483647", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != -2147483648 {
-		t.Errorf("2147483647 %s 1 = %d, want -2147483648", "+", r)
-	}
-	y = 2147483647
-	r = x + y
-	if r != -2 {
-		t.Errorf("2147483647 %s 2147483647 = %d, want -2", "+", r)
-	}
-}
-func TestConstFoldint32sub(t *testing.T) {
-	var x, y, r int32
-	x = -2147483648
-	y = -2147483648
-	r = x - y
-	if r != 0 {
-		t.Errorf("-2147483648 %s -2147483648 = %d, want 0", "-", r)
-	}
-	y = -2147483647
-	r = x - y
-	if r != -1 {
-		t.Errorf("-2147483648 %s -2147483647 = %d, want -1", "-", r)
-	}
-	y = -1
-	r = x - y
-	if r != -2147483647 {
-		t.Errorf("-2147483648 %s -1 = %d, want -2147483647", "-", r)
-	}
-	y = 0
-	r = x - y
-	if r != -2147483648 {
-		t.Errorf("-2147483648 %s 0 = %d, want -2147483648", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != 2147483647 {
-		t.Errorf("-2147483648 %s 1 = %d, want 2147483647", "-", r)
-	}
-	y = 2147483647
-	r = x - y
-	if r != 1 {
-		t.Errorf("-2147483648 %s 2147483647 = %d, want 1", "-", r)
-	}
-	x = -2147483647
-	y = -2147483648
-	r = x - y
-	if r != 1 {
-		t.Errorf("-2147483647 %s -2147483648 = %d, want 1", "-", r)
-	}
-	y = -2147483647
-	r = x - y
-	if r != 0 {
-		t.Errorf("-2147483647 %s -2147483647 = %d, want 0", "-", r)
-	}
-	y = -1
-	r = x - y
-	if r != -2147483646 {
-		t.Errorf("-2147483647 %s -1 = %d, want -2147483646", "-", r)
-	}
-	y = 0
-	r = x - y
-	if r != -2147483647 {
-		t.Errorf("-2147483647 %s 0 = %d, want -2147483647", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != -2147483648 {
-		t.Errorf("-2147483647 %s 1 = %d, want -2147483648", "-", r)
-	}
-	y = 2147483647
-	r = x - y
-	if r != 2 {
-		t.Errorf("-2147483647 %s 2147483647 = %d, want 2", "-", r)
-	}
-	x = -1
-	y = -2147483648
-	r = x - y
-	if r != 2147483647 {
-		t.Errorf("-1 %s -2147483648 = %d, want 2147483647", "-", r)
-	}
-	y = -2147483647
-	r = x - y
-	if r != 2147483646 {
-		t.Errorf("-1 %s -2147483647 = %d, want 2147483646", "-", r)
-	}
-	y = -1
-	r = x - y
-	if r != 0 {
-		t.Errorf("-1 %s -1 = %d, want 0", "-", r)
-	}
-	y = 0
-	r = x - y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != -2 {
-		t.Errorf("-1 %s 1 = %d, want -2", "-", r)
-	}
-	y = 2147483647
-	r = x - y
-	if r != -2147483648 {
-		t.Errorf("-1 %s 2147483647 = %d, want -2147483648", "-", r)
-	}
-	x = 0
-	y = -2147483648
-	r = x - y
-	if r != -2147483648 {
-		t.Errorf("0 %s -2147483648 = %d, want -2147483648", "-", r)
-	}
-	y = -2147483647
-	r = x - y
-	if r != 2147483647 {
-		t.Errorf("0 %s -2147483647 = %d, want 2147483647", "-", r)
-	}
-	y = -1
-	r = x - y
-	if r != 1 {
-		t.Errorf("0 %s -1 = %d, want 1", "-", r)
-	}
-	y = 0
-	r = x - y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != -1 {
-		t.Errorf("0 %s 1 = %d, want -1", "-", r)
-	}
-	y = 2147483647
-	r = x - y
-	if r != -2147483647 {
-		t.Errorf("0 %s 2147483647 = %d, want -2147483647", "-", r)
-	}
-	x = 1
-	y = -2147483648
-	r = x - y
-	if r != -2147483647 {
-		t.Errorf("1 %s -2147483648 = %d, want -2147483647", "-", r)
-	}
-	y = -2147483647
-	r = x - y
-	if r != -2147483648 {
-		t.Errorf("1 %s -2147483647 = %d, want -2147483648", "-", r)
-	}
-	y = -1
-	r = x - y
-	if r != 2 {
-		t.Errorf("1 %s -1 = %d, want 2", "-", r)
-	}
-	y = 0
-	r = x - y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", "-", r)
-	}
-	y = 2147483647
-	r = x - y
-	if r != -2147483646 {
-		t.Errorf("1 %s 2147483647 = %d, want -2147483646", "-", r)
-	}
-	x = 2147483647
-	y = -2147483648
-	r = x - y
-	if r != -1 {
-		t.Errorf("2147483647 %s -2147483648 = %d, want -1", "-", r)
-	}
-	y = -2147483647
-	r = x - y
-	if r != -2 {
-		t.Errorf("2147483647 %s -2147483647 = %d, want -2", "-", r)
-	}
-	y = -1
-	r = x - y
-	if r != -2147483648 {
-		t.Errorf("2147483647 %s -1 = %d, want -2147483648", "-", r)
-	}
-	y = 0
-	r = x - y
-	if r != 2147483647 {
-		t.Errorf("2147483647 %s 0 = %d, want 2147483647", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != 2147483646 {
-		t.Errorf("2147483647 %s 1 = %d, want 2147483646", "-", r)
-	}
-	y = 2147483647
-	r = x - y
-	if r != 0 {
-		t.Errorf("2147483647 %s 2147483647 = %d, want 0", "-", r)
-	}
-}
-func TestConstFoldint32div(t *testing.T) {
-	var x, y, r int32
-	x = -2147483648
-	y = -2147483648
-	r = x / y
-	if r != 1 {
-		t.Errorf("-2147483648 %s -2147483648 = %d, want 1", "/", r)
-	}
-	y = -2147483647
-	r = x / y
-	if r != 1 {
-		t.Errorf("-2147483648 %s -2147483647 = %d, want 1", "/", r)
-	}
-	y = -1
-	r = x / y
-	if r != -2147483648 {
-		t.Errorf("-2147483648 %s -1 = %d, want -2147483648", "/", r)
-	}
-	y = 1
-	r = x / y
-	if r != -2147483648 {
-		t.Errorf("-2147483648 %s 1 = %d, want -2147483648", "/", r)
-	}
-	y = 2147483647
-	r = x / y
-	if r != -1 {
-		t.Errorf("-2147483648 %s 2147483647 = %d, want -1", "/", r)
-	}
-	x = -2147483647
-	y = -2147483648
-	r = x / y
-	if r != 0 {
-		t.Errorf("-2147483647 %s -2147483648 = %d, want 0", "/", r)
-	}
-	y = -2147483647
-	r = x / y
-	if r != 1 {
-		t.Errorf("-2147483647 %s -2147483647 = %d, want 1", "/", r)
-	}
-	y = -1
-	r = x / y
-	if r != 2147483647 {
-		t.Errorf("-2147483647 %s -1 = %d, want 2147483647", "/", r)
-	}
-	y = 1
-	r = x / y
-	if r != -2147483647 {
-		t.Errorf("-2147483647 %s 1 = %d, want -2147483647", "/", r)
-	}
-	y = 2147483647
-	r = x / y
-	if r != -1 {
-		t.Errorf("-2147483647 %s 2147483647 = %d, want -1", "/", r)
-	}
-	x = -1
-	y = -2147483648
-	r = x / y
-	if r != 0 {
-		t.Errorf("-1 %s -2147483648 = %d, want 0", "/", r)
-	}
-	y = -2147483647
-	r = x / y
-	if r != 0 {
-		t.Errorf("-1 %s -2147483647 = %d, want 0", "/", r)
-	}
-	y = -1
-	r = x / y
-	if r != 1 {
-		t.Errorf("-1 %s -1 = %d, want 1", "/", r)
-	}
-	y = 1
-	r = x / y
-	if r != -1 {
-		t.Errorf("-1 %s 1 = %d, want -1", "/", r)
-	}
-	y = 2147483647
-	r = x / y
-	if r != 0 {
-		t.Errorf("-1 %s 2147483647 = %d, want 0", "/", r)
-	}
-	x = 0
-	y = -2147483648
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s -2147483648 = %d, want 0", "/", r)
-	}
-	y = -2147483647
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s -2147483647 = %d, want 0", "/", r)
-	}
-	y = -1
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s -1 = %d, want 0", "/", r)
-	}
-	y = 1
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "/", r)
-	}
-	y = 2147483647
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s 2147483647 = %d, want 0", "/", r)
-	}
-	x = 1
-	y = -2147483648
-	r = x / y
-	if r != 0 {
-		t.Errorf("1 %s -2147483648 = %d, want 0", "/", r)
-	}
-	y = -2147483647
-	r = x / y
-	if r != 0 {
-		t.Errorf("1 %s -2147483647 = %d, want 0", "/", r)
-	}
-	y = -1
-	r = x / y
-	if r != -1 {
-		t.Errorf("1 %s -1 = %d, want -1", "/", r)
-	}
-	y = 1
-	r = x / y
-	if r != 1 {
-		t.Errorf("1 %s 1 = %d, want 1", "/", r)
-	}
-	y = 2147483647
-	r = x / y
-	if r != 0 {
-		t.Errorf("1 %s 2147483647 = %d, want 0", "/", r)
-	}
-	x = 2147483647
-	y = -2147483648
-	r = x / y
-	if r != 0 {
-		t.Errorf("2147483647 %s -2147483648 = %d, want 0", "/", r)
-	}
-	y = -2147483647
-	r = x / y
-	if r != -1 {
-		t.Errorf("2147483647 %s -2147483647 = %d, want -1", "/", r)
-	}
-	y = -1
-	r = x / y
-	if r != -2147483647 {
-		t.Errorf("2147483647 %s -1 = %d, want -2147483647", "/", r)
-	}
-	y = 1
-	r = x / y
-	if r != 2147483647 {
-		t.Errorf("2147483647 %s 1 = %d, want 2147483647", "/", r)
-	}
-	y = 2147483647
-	r = x / y
-	if r != 1 {
-		t.Errorf("2147483647 %s 2147483647 = %d, want 1", "/", r)
-	}
-}
-func TestConstFoldint32mul(t *testing.T) {
-	var x, y, r int32
-	x = -2147483648
-	y = -2147483648
-	r = x * y
-	if r != 0 {
-		t.Errorf("-2147483648 %s -2147483648 = %d, want 0", "*", r)
-	}
-	y = -2147483647
-	r = x * y
-	if r != -2147483648 {
-		t.Errorf("-2147483648 %s -2147483647 = %d, want -2147483648", "*", r)
-	}
-	y = -1
-	r = x * y
-	if r != -2147483648 {
-		t.Errorf("-2147483648 %s -1 = %d, want -2147483648", "*", r)
-	}
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("-2147483648 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != -2147483648 {
-		t.Errorf("-2147483648 %s 1 = %d, want -2147483648", "*", r)
-	}
-	y = 2147483647
-	r = x * y
-	if r != -2147483648 {
-		t.Errorf("-2147483648 %s 2147483647 = %d, want -2147483648", "*", r)
-	}
-	x = -2147483647
-	y = -2147483648
-	r = x * y
-	if r != -2147483648 {
-		t.Errorf("-2147483647 %s -2147483648 = %d, want -2147483648", "*", r)
-	}
-	y = -2147483647
-	r = x * y
-	if r != 1 {
-		t.Errorf("-2147483647 %s -2147483647 = %d, want 1", "*", r)
-	}
-	y = -1
-	r = x * y
-	if r != 2147483647 {
-		t.Errorf("-2147483647 %s -1 = %d, want 2147483647", "*", r)
-	}
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("-2147483647 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != -2147483647 {
-		t.Errorf("-2147483647 %s 1 = %d, want -2147483647", "*", r)
-	}
-	y = 2147483647
-	r = x * y
-	if r != -1 {
-		t.Errorf("-2147483647 %s 2147483647 = %d, want -1", "*", r)
-	}
-	x = -1
-	y = -2147483648
-	r = x * y
-	if r != -2147483648 {
-		t.Errorf("-1 %s -2147483648 = %d, want -2147483648", "*", r)
-	}
-	y = -2147483647
-	r = x * y
-	if r != 2147483647 {
-		t.Errorf("-1 %s -2147483647 = %d, want 2147483647", "*", r)
-	}
-	y = -1
-	r = x * y
-	if r != 1 {
-		t.Errorf("-1 %s -1 = %d, want 1", "*", r)
-	}
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("-1 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != -1 {
-		t.Errorf("-1 %s 1 = %d, want -1", "*", r)
-	}
-	y = 2147483647
-	r = x * y
-	if r != -2147483647 {
-		t.Errorf("-1 %s 2147483647 = %d, want -2147483647", "*", r)
-	}
-	x = 0
-	y = -2147483648
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s -2147483648 = %d, want 0", "*", r)
-	}
-	y = -2147483647
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s -2147483647 = %d, want 0", "*", r)
-	}
-	y = -1
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s -1 = %d, want 0", "*", r)
-	}
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "*", r)
-	}
-	y = 2147483647
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s 2147483647 = %d, want 0", "*", r)
-	}
-	x = 1
-	y = -2147483648
-	r = x * y
-	if r != -2147483648 {
-		t.Errorf("1 %s -2147483648 = %d, want -2147483648", "*", r)
-	}
-	y = -2147483647
-	r = x * y
-	if r != -2147483647 {
-		t.Errorf("1 %s -2147483647 = %d, want -2147483647", "*", r)
-	}
-	y = -1
-	r = x * y
-	if r != -1 {
-		t.Errorf("1 %s -1 = %d, want -1", "*", r)
-	}
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("1 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != 1 {
-		t.Errorf("1 %s 1 = %d, want 1", "*", r)
-	}
-	y = 2147483647
-	r = x * y
-	if r != 2147483647 {
-		t.Errorf("1 %s 2147483647 = %d, want 2147483647", "*", r)
-	}
-	x = 2147483647
-	y = -2147483648
-	r = x * y
-	if r != -2147483648 {
-		t.Errorf("2147483647 %s -2147483648 = %d, want -2147483648", "*", r)
-	}
-	y = -2147483647
-	r = x * y
-	if r != -1 {
-		t.Errorf("2147483647 %s -2147483647 = %d, want -1", "*", r)
-	}
-	y = -1
-	r = x * y
-	if r != -2147483647 {
-		t.Errorf("2147483647 %s -1 = %d, want -2147483647", "*", r)
-	}
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("2147483647 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != 2147483647 {
-		t.Errorf("2147483647 %s 1 = %d, want 2147483647", "*", r)
-	}
-	y = 2147483647
-	r = x * y
-	if r != 1 {
-		t.Errorf("2147483647 %s 2147483647 = %d, want 1", "*", r)
-	}
-}
-func TestConstFoldint32mod(t *testing.T) {
-	var x, y, r int32
-	x = -2147483648
-	y = -2147483648
-	r = x % y
-	if r != 0 {
-		t.Errorf("-2147483648 %s -2147483648 = %d, want 0", "%", r)
-	}
-	y = -2147483647
-	r = x % y
-	if r != -1 {
-		t.Errorf("-2147483648 %s -2147483647 = %d, want -1", "%", r)
-	}
-	y = -1
-	r = x % y
-	if r != 0 {
-		t.Errorf("-2147483648 %s -1 = %d, want 0", "%", r)
-	}
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("-2147483648 %s 1 = %d, want 0", "%", r)
-	}
-	y = 2147483647
-	r = x % y
-	if r != -1 {
-		t.Errorf("-2147483648 %s 2147483647 = %d, want -1", "%", r)
-	}
-	x = -2147483647
-	y = -2147483648
-	r = x % y
-	if r != -2147483647 {
-		t.Errorf("-2147483647 %s -2147483648 = %d, want -2147483647", "%", r)
-	}
-	y = -2147483647
-	r = x % y
-	if r != 0 {
-		t.Errorf("-2147483647 %s -2147483647 = %d, want 0", "%", r)
-	}
-	y = -1
-	r = x % y
-	if r != 0 {
-		t.Errorf("-2147483647 %s -1 = %d, want 0", "%", r)
-	}
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("-2147483647 %s 1 = %d, want 0", "%", r)
-	}
-	y = 2147483647
-	r = x % y
-	if r != 0 {
-		t.Errorf("-2147483647 %s 2147483647 = %d, want 0", "%", r)
-	}
-	x = -1
-	y = -2147483648
-	r = x % y
-	if r != -1 {
-		t.Errorf("-1 %s -2147483648 = %d, want -1", "%", r)
-	}
-	y = -2147483647
-	r = x % y
-	if r != -1 {
-		t.Errorf("-1 %s -2147483647 = %d, want -1", "%", r)
-	}
-	y = -1
-	r = x % y
-	if r != 0 {
-		t.Errorf("-1 %s -1 = %d, want 0", "%", r)
-	}
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("-1 %s 1 = %d, want 0", "%", r)
-	}
-	y = 2147483647
-	r = x % y
-	if r != -1 {
-		t.Errorf("-1 %s 2147483647 = %d, want -1", "%", r)
-	}
-	x = 0
-	y = -2147483648
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s -2147483648 = %d, want 0", "%", r)
-	}
-	y = -2147483647
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s -2147483647 = %d, want 0", "%", r)
-	}
-	y = -1
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s -1 = %d, want 0", "%", r)
-	}
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "%", r)
-	}
-	y = 2147483647
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s 2147483647 = %d, want 0", "%", r)
-	}
-	x = 1
-	y = -2147483648
-	r = x % y
-	if r != 1 {
-		t.Errorf("1 %s -2147483648 = %d, want 1", "%", r)
-	}
-	y = -2147483647
-	r = x % y
-	if r != 1 {
-		t.Errorf("1 %s -2147483647 = %d, want 1", "%", r)
-	}
-	y = -1
-	r = x % y
-	if r != 0 {
-		t.Errorf("1 %s -1 = %d, want 0", "%", r)
-	}
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", "%", r)
-	}
-	y = 2147483647
-	r = x % y
-	if r != 1 {
-		t.Errorf("1 %s 2147483647 = %d, want 1", "%", r)
-	}
-	x = 2147483647
-	y = -2147483648
-	r = x % y
-	if r != 2147483647 {
-		t.Errorf("2147483647 %s -2147483648 = %d, want 2147483647", "%", r)
-	}
-	y = -2147483647
-	r = x % y
-	if r != 0 {
-		t.Errorf("2147483647 %s -2147483647 = %d, want 0", "%", r)
-	}
-	y = -1
-	r = x % y
-	if r != 0 {
-		t.Errorf("2147483647 %s -1 = %d, want 0", "%", r)
-	}
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("2147483647 %s 1 = %d, want 0", "%", r)
-	}
-	y = 2147483647
-	r = x % y
-	if r != 0 {
-		t.Errorf("2147483647 %s 2147483647 = %d, want 0", "%", r)
-	}
-}
-func TestConstFolduint16add(t *testing.T) {
-	var x, y, r uint16
-	x = 0
-	y = 0
-	r = x + y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != 1 {
-		t.Errorf("0 %s 1 = %d, want 1", "+", r)
-	}
-	y = 65535
-	r = x + y
-	if r != 65535 {
-		t.Errorf("0 %s 65535 = %d, want 65535", "+", r)
-	}
-	x = 1
-	y = 0
-	r = x + y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "+", r)
-	}
-	y = 65535
-	r = x + y
-	if r != 0 {
-		t.Errorf("1 %s 65535 = %d, want 0", "+", r)
-	}
-	x = 65535
-	y = 0
-	r = x + y
-	if r != 65535 {
-		t.Errorf("65535 %s 0 = %d, want 65535", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != 0 {
-		t.Errorf("65535 %s 1 = %d, want 0", "+", r)
-	}
-	y = 65535
-	r = x + y
-	if r != 65534 {
-		t.Errorf("65535 %s 65535 = %d, want 65534", "+", r)
-	}
-}
-func TestConstFolduint16sub(t *testing.T) {
-	var x, y, r uint16
-	x = 0
-	y = 0
-	r = x - y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != 65535 {
-		t.Errorf("0 %s 1 = %d, want 65535", "-", r)
-	}
-	y = 65535
-	r = x - y
-	if r != 1 {
-		t.Errorf("0 %s 65535 = %d, want 1", "-", r)
-	}
-	x = 1
-	y = 0
-	r = x - y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", "-", r)
-	}
-	y = 65535
-	r = x - y
-	if r != 2 {
-		t.Errorf("1 %s 65535 = %d, want 2", "-", r)
-	}
-	x = 65535
-	y = 0
-	r = x - y
-	if r != 65535 {
-		t.Errorf("65535 %s 0 = %d, want 65535", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != 65534 {
-		t.Errorf("65535 %s 1 = %d, want 65534", "-", r)
-	}
-	y = 65535
-	r = x - y
-	if r != 0 {
-		t.Errorf("65535 %s 65535 = %d, want 0", "-", r)
-	}
-}
-func TestConstFolduint16div(t *testing.T) {
-	var x, y, r uint16
-	x = 0
-	y = 1
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "/", r)
-	}
-	y = 65535
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s 65535 = %d, want 0", "/", r)
-	}
-	x = 1
-	y = 1
-	r = x / y
-	if r != 1 {
-		t.Errorf("1 %s 1 = %d, want 1", "/", r)
-	}
-	y = 65535
-	r = x / y
-	if r != 0 {
-		t.Errorf("1 %s 65535 = %d, want 0", "/", r)
-	}
-	x = 65535
-	y = 1
-	r = x / y
-	if r != 65535 {
-		t.Errorf("65535 %s 1 = %d, want 65535", "/", r)
-	}
-	y = 65535
-	r = x / y
-	if r != 1 {
-		t.Errorf("65535 %s 65535 = %d, want 1", "/", r)
-	}
-}
-func TestConstFolduint16mul(t *testing.T) {
-	var x, y, r uint16
-	x = 0
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "*", r)
-	}
-	y = 65535
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s 65535 = %d, want 0", "*", r)
-	}
-	x = 1
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("1 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != 1 {
-		t.Errorf("1 %s 1 = %d, want 1", "*", r)
-	}
-	y = 65535
-	r = x * y
-	if r != 65535 {
-		t.Errorf("1 %s 65535 = %d, want 65535", "*", r)
-	}
-	x = 65535
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("65535 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != 65535 {
-		t.Errorf("65535 %s 1 = %d, want 65535", "*", r)
-	}
-	y = 65535
-	r = x * y
-	if r != 1 {
-		t.Errorf("65535 %s 65535 = %d, want 1", "*", r)
-	}
-}
-func TestConstFolduint16mod(t *testing.T) {
-	var x, y, r uint16
-	x = 0
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "%", r)
-	}
-	y = 65535
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s 65535 = %d, want 0", "%", r)
-	}
-	x = 1
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", "%", r)
-	}
-	y = 65535
-	r = x % y
-	if r != 1 {
-		t.Errorf("1 %s 65535 = %d, want 1", "%", r)
-	}
-	x = 65535
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("65535 %s 1 = %d, want 0", "%", r)
-	}
-	y = 65535
-	r = x % y
-	if r != 0 {
-		t.Errorf("65535 %s 65535 = %d, want 0", "%", r)
-	}
-}
-func TestConstFoldint16add(t *testing.T) {
-	var x, y, r int16
-	x = -32768
-	y = -32768
-	r = x + y
-	if r != 0 {
-		t.Errorf("-32768 %s -32768 = %d, want 0", "+", r)
-	}
-	y = -32767
-	r = x + y
-	if r != 1 {
-		t.Errorf("-32768 %s -32767 = %d, want 1", "+", r)
-	}
-	y = -1
-	r = x + y
-	if r != 32767 {
-		t.Errorf("-32768 %s -1 = %d, want 32767", "+", r)
-	}
-	y = 0
-	r = x + y
-	if r != -32768 {
-		t.Errorf("-32768 %s 0 = %d, want -32768", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != -32767 {
-		t.Errorf("-32768 %s 1 = %d, want -32767", "+", r)
-	}
-	y = 32766
-	r = x + y
-	if r != -2 {
-		t.Errorf("-32768 %s 32766 = %d, want -2", "+", r)
-	}
-	y = 32767
-	r = x + y
-	if r != -1 {
-		t.Errorf("-32768 %s 32767 = %d, want -1", "+", r)
-	}
-	x = -32767
-	y = -32768
-	r = x + y
-	if r != 1 {
-		t.Errorf("-32767 %s -32768 = %d, want 1", "+", r)
-	}
-	y = -32767
-	r = x + y
-	if r != 2 {
-		t.Errorf("-32767 %s -32767 = %d, want 2", "+", r)
-	}
-	y = -1
-	r = x + y
-	if r != -32768 {
-		t.Errorf("-32767 %s -1 = %d, want -32768", "+", r)
-	}
-	y = 0
-	r = x + y
-	if r != -32767 {
-		t.Errorf("-32767 %s 0 = %d, want -32767", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != -32766 {
-		t.Errorf("-32767 %s 1 = %d, want -32766", "+", r)
-	}
-	y = 32766
-	r = x + y
-	if r != -1 {
-		t.Errorf("-32767 %s 32766 = %d, want -1", "+", r)
-	}
-	y = 32767
-	r = x + y
-	if r != 0 {
-		t.Errorf("-32767 %s 32767 = %d, want 0", "+", r)
-	}
-	x = -1
-	y = -32768
-	r = x + y
-	if r != 32767 {
-		t.Errorf("-1 %s -32768 = %d, want 32767", "+", r)
-	}
-	y = -32767
-	r = x + y
-	if r != -32768 {
-		t.Errorf("-1 %s -32767 = %d, want -32768", "+", r)
-	}
-	y = -1
-	r = x + y
-	if r != -2 {
-		t.Errorf("-1 %s -1 = %d, want -2", "+", r)
-	}
-	y = 0
-	r = x + y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != 0 {
-		t.Errorf("-1 %s 1 = %d, want 0", "+", r)
-	}
-	y = 32766
-	r = x + y
-	if r != 32765 {
-		t.Errorf("-1 %s 32766 = %d, want 32765", "+", r)
-	}
-	y = 32767
-	r = x + y
-	if r != 32766 {
-		t.Errorf("-1 %s 32767 = %d, want 32766", "+", r)
-	}
-	x = 0
-	y = -32768
-	r = x + y
-	if r != -32768 {
-		t.Errorf("0 %s -32768 = %d, want -32768", "+", r)
-	}
-	y = -32767
-	r = x + y
-	if r != -32767 {
-		t.Errorf("0 %s -32767 = %d, want -32767", "+", r)
-	}
-	y = -1
-	r = x + y
-	if r != -1 {
-		t.Errorf("0 %s -1 = %d, want -1", "+", r)
-	}
-	y = 0
-	r = x + y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != 1 {
-		t.Errorf("0 %s 1 = %d, want 1", "+", r)
-	}
-	y = 32766
-	r = x + y
-	if r != 32766 {
-		t.Errorf("0 %s 32766 = %d, want 32766", "+", r)
-	}
-	y = 32767
-	r = x + y
-	if r != 32767 {
-		t.Errorf("0 %s 32767 = %d, want 32767", "+", r)
-	}
-	x = 1
-	y = -32768
-	r = x + y
-	if r != -32767 {
-		t.Errorf("1 %s -32768 = %d, want -32767", "+", r)
-	}
-	y = -32767
-	r = x + y
-	if r != -32766 {
-		t.Errorf("1 %s -32767 = %d, want -32766", "+", r)
-	}
-	y = -1
-	r = x + y
-	if r != 0 {
-		t.Errorf("1 %s -1 = %d, want 0", "+", r)
-	}
-	y = 0
-	r = x + y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "+", r)
-	}
-	y = 32766
-	r = x + y
-	if r != 32767 {
-		t.Errorf("1 %s 32766 = %d, want 32767", "+", r)
-	}
-	y = 32767
-	r = x + y
-	if r != -32768 {
-		t.Errorf("1 %s 32767 = %d, want -32768", "+", r)
-	}
-	x = 32766
-	y = -32768
-	r = x + y
-	if r != -2 {
-		t.Errorf("32766 %s -32768 = %d, want -2", "+", r)
-	}
-	y = -32767
-	r = x + y
-	if r != -1 {
-		t.Errorf("32766 %s -32767 = %d, want -1", "+", r)
-	}
-	y = -1
-	r = x + y
-	if r != 32765 {
-		t.Errorf("32766 %s -1 = %d, want 32765", "+", r)
-	}
-	y = 0
-	r = x + y
-	if r != 32766 {
-		t.Errorf("32766 %s 0 = %d, want 32766", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != 32767 {
-		t.Errorf("32766 %s 1 = %d, want 32767", "+", r)
-	}
-	y = 32766
-	r = x + y
-	if r != -4 {
-		t.Errorf("32766 %s 32766 = %d, want -4", "+", r)
-	}
-	y = 32767
-	r = x + y
-	if r != -3 {
-		t.Errorf("32766 %s 32767 = %d, want -3", "+", r)
-	}
-	x = 32767
-	y = -32768
-	r = x + y
-	if r != -1 {
-		t.Errorf("32767 %s -32768 = %d, want -1", "+", r)
-	}
-	y = -32767
-	r = x + y
-	if r != 0 {
-		t.Errorf("32767 %s -32767 = %d, want 0", "+", r)
-	}
-	y = -1
-	r = x + y
-	if r != 32766 {
-		t.Errorf("32767 %s -1 = %d, want 32766", "+", r)
-	}
-	y = 0
-	r = x + y
-	if r != 32767 {
-		t.Errorf("32767 %s 0 = %d, want 32767", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != -32768 {
-		t.Errorf("32767 %s 1 = %d, want -32768", "+", r)
-	}
-	y = 32766
-	r = x + y
-	if r != -3 {
-		t.Errorf("32767 %s 32766 = %d, want -3", "+", r)
-	}
-	y = 32767
-	r = x + y
-	if r != -2 {
-		t.Errorf("32767 %s 32767 = %d, want -2", "+", r)
-	}
-}
-func TestConstFoldint16sub(t *testing.T) {
-	var x, y, r int16
-	x = -32768
-	y = -32768
-	r = x - y
-	if r != 0 {
-		t.Errorf("-32768 %s -32768 = %d, want 0", "-", r)
-	}
-	y = -32767
-	r = x - y
-	if r != -1 {
-		t.Errorf("-32768 %s -32767 = %d, want -1", "-", r)
-	}
-	y = -1
-	r = x - y
-	if r != -32767 {
-		t.Errorf("-32768 %s -1 = %d, want -32767", "-", r)
-	}
-	y = 0
-	r = x - y
-	if r != -32768 {
-		t.Errorf("-32768 %s 0 = %d, want -32768", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != 32767 {
-		t.Errorf("-32768 %s 1 = %d, want 32767", "-", r)
-	}
-	y = 32766
-	r = x - y
-	if r != 2 {
-		t.Errorf("-32768 %s 32766 = %d, want 2", "-", r)
-	}
-	y = 32767
-	r = x - y
-	if r != 1 {
-		t.Errorf("-32768 %s 32767 = %d, want 1", "-", r)
-	}
-	x = -32767
-	y = -32768
-	r = x - y
-	if r != 1 {
-		t.Errorf("-32767 %s -32768 = %d, want 1", "-", r)
-	}
-	y = -32767
-	r = x - y
-	if r != 0 {
-		t.Errorf("-32767 %s -32767 = %d, want 0", "-", r)
-	}
-	y = -1
-	r = x - y
-	if r != -32766 {
-		t.Errorf("-32767 %s -1 = %d, want -32766", "-", r)
-	}
-	y = 0
-	r = x - y
-	if r != -32767 {
-		t.Errorf("-32767 %s 0 = %d, want -32767", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != -32768 {
-		t.Errorf("-32767 %s 1 = %d, want -32768", "-", r)
-	}
-	y = 32766
-	r = x - y
-	if r != 3 {
-		t.Errorf("-32767 %s 32766 = %d, want 3", "-", r)
-	}
-	y = 32767
-	r = x - y
-	if r != 2 {
-		t.Errorf("-32767 %s 32767 = %d, want 2", "-", r)
-	}
-	x = -1
-	y = -32768
-	r = x - y
-	if r != 32767 {
-		t.Errorf("-1 %s -32768 = %d, want 32767", "-", r)
-	}
-	y = -32767
-	r = x - y
-	if r != 32766 {
-		t.Errorf("-1 %s -32767 = %d, want 32766", "-", r)
-	}
-	y = -1
-	r = x - y
-	if r != 0 {
-		t.Errorf("-1 %s -1 = %d, want 0", "-", r)
-	}
-	y = 0
-	r = x - y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != -2 {
-		t.Errorf("-1 %s 1 = %d, want -2", "-", r)
-	}
-	y = 32766
-	r = x - y
-	if r != -32767 {
-		t.Errorf("-1 %s 32766 = %d, want -32767", "-", r)
-	}
-	y = 32767
-	r = x - y
-	if r != -32768 {
-		t.Errorf("-1 %s 32767 = %d, want -32768", "-", r)
-	}
-	x = 0
-	y = -32768
-	r = x - y
-	if r != -32768 {
-		t.Errorf("0 %s -32768 = %d, want -32768", "-", r)
-	}
-	y = -32767
-	r = x - y
-	if r != 32767 {
-		t.Errorf("0 %s -32767 = %d, want 32767", "-", r)
-	}
-	y = -1
-	r = x - y
-	if r != 1 {
-		t.Errorf("0 %s -1 = %d, want 1", "-", r)
-	}
-	y = 0
-	r = x - y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != -1 {
-		t.Errorf("0 %s 1 = %d, want -1", "-", r)
-	}
-	y = 32766
-	r = x - y
-	if r != -32766 {
-		t.Errorf("0 %s 32766 = %d, want -32766", "-", r)
-	}
-	y = 32767
-	r = x - y
-	if r != -32767 {
-		t.Errorf("0 %s 32767 = %d, want -32767", "-", r)
-	}
-	x = 1
-	y = -32768
-	r = x - y
-	if r != -32767 {
-		t.Errorf("1 %s -32768 = %d, want -32767", "-", r)
-	}
-	y = -32767
-	r = x - y
-	if r != -32768 {
-		t.Errorf("1 %s -32767 = %d, want -32768", "-", r)
-	}
-	y = -1
-	r = x - y
-	if r != 2 {
-		t.Errorf("1 %s -1 = %d, want 2", "-", r)
-	}
-	y = 0
-	r = x - y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", "-", r)
-	}
-	y = 32766
-	r = x - y
-	if r != -32765 {
-		t.Errorf("1 %s 32766 = %d, want -32765", "-", r)
-	}
-	y = 32767
-	r = x - y
-	if r != -32766 {
-		t.Errorf("1 %s 32767 = %d, want -32766", "-", r)
-	}
-	x = 32766
-	y = -32768
-	r = x - y
-	if r != -2 {
-		t.Errorf("32766 %s -32768 = %d, want -2", "-", r)
-	}
-	y = -32767
-	r = x - y
-	if r != -3 {
-		t.Errorf("32766 %s -32767 = %d, want -3", "-", r)
-	}
-	y = -1
-	r = x - y
-	if r != 32767 {
-		t.Errorf("32766 %s -1 = %d, want 32767", "-", r)
-	}
-	y = 0
-	r = x - y
-	if r != 32766 {
-		t.Errorf("32766 %s 0 = %d, want 32766", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != 32765 {
-		t.Errorf("32766 %s 1 = %d, want 32765", "-", r)
-	}
-	y = 32766
-	r = x - y
-	if r != 0 {
-		t.Errorf("32766 %s 32766 = %d, want 0", "-", r)
-	}
-	y = 32767
-	r = x - y
-	if r != -1 {
-		t.Errorf("32766 %s 32767 = %d, want -1", "-", r)
-	}
-	x = 32767
-	y = -32768
-	r = x - y
-	if r != -1 {
-		t.Errorf("32767 %s -32768 = %d, want -1", "-", r)
-	}
-	y = -32767
-	r = x - y
-	if r != -2 {
-		t.Errorf("32767 %s -32767 = %d, want -2", "-", r)
-	}
-	y = -1
-	r = x - y
-	if r != -32768 {
-		t.Errorf("32767 %s -1 = %d, want -32768", "-", r)
-	}
-	y = 0
-	r = x - y
-	if r != 32767 {
-		t.Errorf("32767 %s 0 = %d, want 32767", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != 32766 {
-		t.Errorf("32767 %s 1 = %d, want 32766", "-", r)
-	}
-	y = 32766
-	r = x - y
-	if r != 1 {
-		t.Errorf("32767 %s 32766 = %d, want 1", "-", r)
-	}
-	y = 32767
-	r = x - y
-	if r != 0 {
-		t.Errorf("32767 %s 32767 = %d, want 0", "-", r)
-	}
-}
-func TestConstFoldint16div(t *testing.T) {
-	var x, y, r int16
-	x = -32768
-	y = -32768
-	r = x / y
-	if r != 1 {
-		t.Errorf("-32768 %s -32768 = %d, want 1", "/", r)
-	}
-	y = -32767
-	r = x / y
-	if r != 1 {
-		t.Errorf("-32768 %s -32767 = %d, want 1", "/", r)
-	}
-	y = -1
-	r = x / y
-	if r != -32768 {
-		t.Errorf("-32768 %s -1 = %d, want -32768", "/", r)
-	}
-	y = 1
-	r = x / y
-	if r != -32768 {
-		t.Errorf("-32768 %s 1 = %d, want -32768", "/", r)
-	}
-	y = 32766
-	r = x / y
-	if r != -1 {
-		t.Errorf("-32768 %s 32766 = %d, want -1", "/", r)
-	}
-	y = 32767
-	r = x / y
-	if r != -1 {
-		t.Errorf("-32768 %s 32767 = %d, want -1", "/", r)
-	}
-	x = -32767
-	y = -32768
-	r = x / y
-	if r != 0 {
-		t.Errorf("-32767 %s -32768 = %d, want 0", "/", r)
-	}
-	y = -32767
-	r = x / y
-	if r != 1 {
-		t.Errorf("-32767 %s -32767 = %d, want 1", "/", r)
-	}
-	y = -1
-	r = x / y
-	if r != 32767 {
-		t.Errorf("-32767 %s -1 = %d, want 32767", "/", r)
-	}
-	y = 1
-	r = x / y
-	if r != -32767 {
-		t.Errorf("-32767 %s 1 = %d, want -32767", "/", r)
-	}
-	y = 32766
-	r = x / y
-	if r != -1 {
-		t.Errorf("-32767 %s 32766 = %d, want -1", "/", r)
-	}
-	y = 32767
-	r = x / y
-	if r != -1 {
-		t.Errorf("-32767 %s 32767 = %d, want -1", "/", r)
-	}
-	x = -1
-	y = -32768
-	r = x / y
-	if r != 0 {
-		t.Errorf("-1 %s -32768 = %d, want 0", "/", r)
-	}
-	y = -32767
-	r = x / y
-	if r != 0 {
-		t.Errorf("-1 %s -32767 = %d, want 0", "/", r)
-	}
-	y = -1
-	r = x / y
-	if r != 1 {
-		t.Errorf("-1 %s -1 = %d, want 1", "/", r)
-	}
-	y = 1
-	r = x / y
-	if r != -1 {
-		t.Errorf("-1 %s 1 = %d, want -1", "/", r)
-	}
-	y = 32766
-	r = x / y
-	if r != 0 {
-		t.Errorf("-1 %s 32766 = %d, want 0", "/", r)
-	}
-	y = 32767
-	r = x / y
-	if r != 0 {
-		t.Errorf("-1 %s 32767 = %d, want 0", "/", r)
-	}
-	x = 0
-	y = -32768
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s -32768 = %d, want 0", "/", r)
-	}
-	y = -32767
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s -32767 = %d, want 0", "/", r)
-	}
-	y = -1
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s -1 = %d, want 0", "/", r)
-	}
-	y = 1
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "/", r)
-	}
-	y = 32766
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s 32766 = %d, want 0", "/", r)
-	}
-	y = 32767
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s 32767 = %d, want 0", "/", r)
-	}
-	x = 1
-	y = -32768
-	r = x / y
-	if r != 0 {
-		t.Errorf("1 %s -32768 = %d, want 0", "/", r)
-	}
-	y = -32767
-	r = x / y
-	if r != 0 {
-		t.Errorf("1 %s -32767 = %d, want 0", "/", r)
-	}
-	y = -1
-	r = x / y
-	if r != -1 {
-		t.Errorf("1 %s -1 = %d, want -1", "/", r)
-	}
-	y = 1
-	r = x / y
-	if r != 1 {
-		t.Errorf("1 %s 1 = %d, want 1", "/", r)
-	}
-	y = 32766
-	r = x / y
-	if r != 0 {
-		t.Errorf("1 %s 32766 = %d, want 0", "/", r)
-	}
-	y = 32767
-	r = x / y
-	if r != 0 {
-		t.Errorf("1 %s 32767 = %d, want 0", "/", r)
-	}
-	x = 32766
-	y = -32768
-	r = x / y
-	if r != 0 {
-		t.Errorf("32766 %s -32768 = %d, want 0", "/", r)
-	}
-	y = -32767
-	r = x / y
-	if r != 0 {
-		t.Errorf("32766 %s -32767 = %d, want 0", "/", r)
-	}
-	y = -1
-	r = x / y
-	if r != -32766 {
-		t.Errorf("32766 %s -1 = %d, want -32766", "/", r)
-	}
-	y = 1
-	r = x / y
-	if r != 32766 {
-		t.Errorf("32766 %s 1 = %d, want 32766", "/", r)
-	}
-	y = 32766
-	r = x / y
-	if r != 1 {
-		t.Errorf("32766 %s 32766 = %d, want 1", "/", r)
-	}
-	y = 32767
-	r = x / y
-	if r != 0 {
-		t.Errorf("32766 %s 32767 = %d, want 0", "/", r)
-	}
-	x = 32767
-	y = -32768
-	r = x / y
-	if r != 0 {
-		t.Errorf("32767 %s -32768 = %d, want 0", "/", r)
-	}
-	y = -32767
-	r = x / y
-	if r != -1 {
-		t.Errorf("32767 %s -32767 = %d, want -1", "/", r)
-	}
-	y = -1
-	r = x / y
-	if r != -32767 {
-		t.Errorf("32767 %s -1 = %d, want -32767", "/", r)
-	}
-	y = 1
-	r = x / y
-	if r != 32767 {
-		t.Errorf("32767 %s 1 = %d, want 32767", "/", r)
-	}
-	y = 32766
-	r = x / y
-	if r != 1 {
-		t.Errorf("32767 %s 32766 = %d, want 1", "/", r)
-	}
-	y = 32767
-	r = x / y
-	if r != 1 {
-		t.Errorf("32767 %s 32767 = %d, want 1", "/", r)
-	}
-}
-func TestConstFoldint16mul(t *testing.T) {
-	var x, y, r int16
-	x = -32768
-	y = -32768
-	r = x * y
-	if r != 0 {
-		t.Errorf("-32768 %s -32768 = %d, want 0", "*", r)
-	}
-	y = -32767
-	r = x * y
-	if r != -32768 {
-		t.Errorf("-32768 %s -32767 = %d, want -32768", "*", r)
-	}
-	y = -1
-	r = x * y
-	if r != -32768 {
-		t.Errorf("-32768 %s -1 = %d, want -32768", "*", r)
-	}
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("-32768 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != -32768 {
-		t.Errorf("-32768 %s 1 = %d, want -32768", "*", r)
-	}
-	y = 32766
-	r = x * y
-	if r != 0 {
-		t.Errorf("-32768 %s 32766 = %d, want 0", "*", r)
-	}
-	y = 32767
-	r = x * y
-	if r != -32768 {
-		t.Errorf("-32768 %s 32767 = %d, want -32768", "*", r)
-	}
-	x = -32767
-	y = -32768
-	r = x * y
-	if r != -32768 {
-		t.Errorf("-32767 %s -32768 = %d, want -32768", "*", r)
-	}
-	y = -32767
-	r = x * y
-	if r != 1 {
-		t.Errorf("-32767 %s -32767 = %d, want 1", "*", r)
-	}
-	y = -1
-	r = x * y
-	if r != 32767 {
-		t.Errorf("-32767 %s -1 = %d, want 32767", "*", r)
-	}
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("-32767 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != -32767 {
-		t.Errorf("-32767 %s 1 = %d, want -32767", "*", r)
-	}
-	y = 32766
-	r = x * y
-	if r != 32766 {
-		t.Errorf("-32767 %s 32766 = %d, want 32766", "*", r)
-	}
-	y = 32767
-	r = x * y
-	if r != -1 {
-		t.Errorf("-32767 %s 32767 = %d, want -1", "*", r)
-	}
-	x = -1
-	y = -32768
-	r = x * y
-	if r != -32768 {
-		t.Errorf("-1 %s -32768 = %d, want -32768", "*", r)
-	}
-	y = -32767
-	r = x * y
-	if r != 32767 {
-		t.Errorf("-1 %s -32767 = %d, want 32767", "*", r)
-	}
-	y = -1
-	r = x * y
-	if r != 1 {
-		t.Errorf("-1 %s -1 = %d, want 1", "*", r)
-	}
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("-1 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != -1 {
-		t.Errorf("-1 %s 1 = %d, want -1", "*", r)
-	}
-	y = 32766
-	r = x * y
-	if r != -32766 {
-		t.Errorf("-1 %s 32766 = %d, want -32766", "*", r)
-	}
-	y = 32767
-	r = x * y
-	if r != -32767 {
-		t.Errorf("-1 %s 32767 = %d, want -32767", "*", r)
-	}
-	x = 0
-	y = -32768
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s -32768 = %d, want 0", "*", r)
-	}
-	y = -32767
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s -32767 = %d, want 0", "*", r)
-	}
-	y = -1
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s -1 = %d, want 0", "*", r)
-	}
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "*", r)
-	}
-	y = 32766
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s 32766 = %d, want 0", "*", r)
-	}
-	y = 32767
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s 32767 = %d, want 0", "*", r)
-	}
-	x = 1
-	y = -32768
-	r = x * y
-	if r != -32768 {
-		t.Errorf("1 %s -32768 = %d, want -32768", "*", r)
-	}
-	y = -32767
-	r = x * y
-	if r != -32767 {
-		t.Errorf("1 %s -32767 = %d, want -32767", "*", r)
-	}
-	y = -1
-	r = x * y
-	if r != -1 {
-		t.Errorf("1 %s -1 = %d, want -1", "*", r)
-	}
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("1 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != 1 {
-		t.Errorf("1 %s 1 = %d, want 1", "*", r)
-	}
-	y = 32766
-	r = x * y
-	if r != 32766 {
-		t.Errorf("1 %s 32766 = %d, want 32766", "*", r)
-	}
-	y = 32767
-	r = x * y
-	if r != 32767 {
-		t.Errorf("1 %s 32767 = %d, want 32767", "*", r)
-	}
-	x = 32766
-	y = -32768
-	r = x * y
-	if r != 0 {
-		t.Errorf("32766 %s -32768 = %d, want 0", "*", r)
-	}
-	y = -32767
-	r = x * y
-	if r != 32766 {
-		t.Errorf("32766 %s -32767 = %d, want 32766", "*", r)
-	}
-	y = -1
-	r = x * y
-	if r != -32766 {
-		t.Errorf("32766 %s -1 = %d, want -32766", "*", r)
-	}
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("32766 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != 32766 {
-		t.Errorf("32766 %s 1 = %d, want 32766", "*", r)
-	}
-	y = 32766
-	r = x * y
-	if r != 4 {
-		t.Errorf("32766 %s 32766 = %d, want 4", "*", r)
-	}
-	y = 32767
-	r = x * y
-	if r != -32766 {
-		t.Errorf("32766 %s 32767 = %d, want -32766", "*", r)
-	}
-	x = 32767
-	y = -32768
-	r = x * y
-	if r != -32768 {
-		t.Errorf("32767 %s -32768 = %d, want -32768", "*", r)
-	}
-	y = -32767
-	r = x * y
-	if r != -1 {
-		t.Errorf("32767 %s -32767 = %d, want -1", "*", r)
-	}
-	y = -1
-	r = x * y
-	if r != -32767 {
-		t.Errorf("32767 %s -1 = %d, want -32767", "*", r)
-	}
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("32767 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != 32767 {
-		t.Errorf("32767 %s 1 = %d, want 32767", "*", r)
-	}
-	y = 32766
-	r = x * y
-	if r != -32766 {
-		t.Errorf("32767 %s 32766 = %d, want -32766", "*", r)
-	}
-	y = 32767
-	r = x * y
-	if r != 1 {
-		t.Errorf("32767 %s 32767 = %d, want 1", "*", r)
-	}
-}
-func TestConstFoldint16mod(t *testing.T) {
-	var x, y, r int16
-	x = -32768
-	y = -32768
-	r = x % y
-	if r != 0 {
-		t.Errorf("-32768 %s -32768 = %d, want 0", "%", r)
-	}
-	y = -32767
-	r = x % y
-	if r != -1 {
-		t.Errorf("-32768 %s -32767 = %d, want -1", "%", r)
-	}
-	y = -1
-	r = x % y
-	if r != 0 {
-		t.Errorf("-32768 %s -1 = %d, want 0", "%", r)
-	}
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("-32768 %s 1 = %d, want 0", "%", r)
-	}
-	y = 32766
-	r = x % y
-	if r != -2 {
-		t.Errorf("-32768 %s 32766 = %d, want -2", "%", r)
-	}
-	y = 32767
-	r = x % y
-	if r != -1 {
-		t.Errorf("-32768 %s 32767 = %d, want -1", "%", r)
-	}
-	x = -32767
-	y = -32768
-	r = x % y
-	if r != -32767 {
-		t.Errorf("-32767 %s -32768 = %d, want -32767", "%", r)
-	}
-	y = -32767
-	r = x % y
-	if r != 0 {
-		t.Errorf("-32767 %s -32767 = %d, want 0", "%", r)
-	}
-	y = -1
-	r = x % y
-	if r != 0 {
-		t.Errorf("-32767 %s -1 = %d, want 0", "%", r)
-	}
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("-32767 %s 1 = %d, want 0", "%", r)
-	}
-	y = 32766
-	r = x % y
-	if r != -1 {
-		t.Errorf("-32767 %s 32766 = %d, want -1", "%", r)
-	}
-	y = 32767
-	r = x % y
-	if r != 0 {
-		t.Errorf("-32767 %s 32767 = %d, want 0", "%", r)
-	}
-	x = -1
-	y = -32768
-	r = x % y
-	if r != -1 {
-		t.Errorf("-1 %s -32768 = %d, want -1", "%", r)
-	}
-	y = -32767
-	r = x % y
-	if r != -1 {
-		t.Errorf("-1 %s -32767 = %d, want -1", "%", r)
-	}
-	y = -1
-	r = x % y
-	if r != 0 {
-		t.Errorf("-1 %s -1 = %d, want 0", "%", r)
-	}
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("-1 %s 1 = %d, want 0", "%", r)
-	}
-	y = 32766
-	r = x % y
-	if r != -1 {
-		t.Errorf("-1 %s 32766 = %d, want -1", "%", r)
-	}
-	y = 32767
-	r = x % y
-	if r != -1 {
-		t.Errorf("-1 %s 32767 = %d, want -1", "%", r)
-	}
-	x = 0
-	y = -32768
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s -32768 = %d, want 0", "%", r)
-	}
-	y = -32767
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s -32767 = %d, want 0", "%", r)
-	}
-	y = -1
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s -1 = %d, want 0", "%", r)
-	}
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "%", r)
-	}
-	y = 32766
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s 32766 = %d, want 0", "%", r)
-	}
-	y = 32767
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s 32767 = %d, want 0", "%", r)
-	}
-	x = 1
-	y = -32768
-	r = x % y
-	if r != 1 {
-		t.Errorf("1 %s -32768 = %d, want 1", "%", r)
-	}
-	y = -32767
-	r = x % y
-	if r != 1 {
-		t.Errorf("1 %s -32767 = %d, want 1", "%", r)
-	}
-	y = -1
-	r = x % y
-	if r != 0 {
-		t.Errorf("1 %s -1 = %d, want 0", "%", r)
-	}
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", "%", r)
-	}
-	y = 32766
-	r = x % y
-	if r != 1 {
-		t.Errorf("1 %s 32766 = %d, want 1", "%", r)
-	}
-	y = 32767
-	r = x % y
-	if r != 1 {
-		t.Errorf("1 %s 32767 = %d, want 1", "%", r)
-	}
-	x = 32766
-	y = -32768
-	r = x % y
-	if r != 32766 {
-		t.Errorf("32766 %s -32768 = %d, want 32766", "%", r)
-	}
-	y = -32767
-	r = x % y
-	if r != 32766 {
-		t.Errorf("32766 %s -32767 = %d, want 32766", "%", r)
-	}
-	y = -1
-	r = x % y
-	if r != 0 {
-		t.Errorf("32766 %s -1 = %d, want 0", "%", r)
-	}
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("32766 %s 1 = %d, want 0", "%", r)
-	}
-	y = 32766
-	r = x % y
-	if r != 0 {
-		t.Errorf("32766 %s 32766 = %d, want 0", "%", r)
-	}
-	y = 32767
-	r = x % y
-	if r != 32766 {
-		t.Errorf("32766 %s 32767 = %d, want 32766", "%", r)
-	}
-	x = 32767
-	y = -32768
-	r = x % y
-	if r != 32767 {
-		t.Errorf("32767 %s -32768 = %d, want 32767", "%", r)
-	}
-	y = -32767
-	r = x % y
-	if r != 0 {
-		t.Errorf("32767 %s -32767 = %d, want 0", "%", r)
-	}
-	y = -1
-	r = x % y
-	if r != 0 {
-		t.Errorf("32767 %s -1 = %d, want 0", "%", r)
-	}
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("32767 %s 1 = %d, want 0", "%", r)
-	}
-	y = 32766
-	r = x % y
-	if r != 1 {
-		t.Errorf("32767 %s 32766 = %d, want 1", "%", r)
-	}
-	y = 32767
-	r = x % y
-	if r != 0 {
-		t.Errorf("32767 %s 32767 = %d, want 0", "%", r)
-	}
-}
-func TestConstFolduint8add(t *testing.T) {
-	var x, y, r uint8
-	x = 0
-	y = 0
-	r = x + y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != 1 {
-		t.Errorf("0 %s 1 = %d, want 1", "+", r)
-	}
-	y = 255
-	r = x + y
-	if r != 255 {
-		t.Errorf("0 %s 255 = %d, want 255", "+", r)
-	}
-	x = 1
-	y = 0
-	r = x + y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "+", r)
-	}
-	y = 255
-	r = x + y
-	if r != 0 {
-		t.Errorf("1 %s 255 = %d, want 0", "+", r)
-	}
-	x = 255
-	y = 0
-	r = x + y
-	if r != 255 {
-		t.Errorf("255 %s 0 = %d, want 255", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != 0 {
-		t.Errorf("255 %s 1 = %d, want 0", "+", r)
-	}
-	y = 255
-	r = x + y
-	if r != 254 {
-		t.Errorf("255 %s 255 = %d, want 254", "+", r)
-	}
-}
-func TestConstFolduint8sub(t *testing.T) {
-	var x, y, r uint8
-	x = 0
-	y = 0
-	r = x - y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != 255 {
-		t.Errorf("0 %s 1 = %d, want 255", "-", r)
-	}
-	y = 255
-	r = x - y
-	if r != 1 {
-		t.Errorf("0 %s 255 = %d, want 1", "-", r)
-	}
-	x = 1
-	y = 0
-	r = x - y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", "-", r)
-	}
-	y = 255
-	r = x - y
-	if r != 2 {
-		t.Errorf("1 %s 255 = %d, want 2", "-", r)
-	}
-	x = 255
-	y = 0
-	r = x - y
-	if r != 255 {
-		t.Errorf("255 %s 0 = %d, want 255", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != 254 {
-		t.Errorf("255 %s 1 = %d, want 254", "-", r)
-	}
-	y = 255
-	r = x - y
-	if r != 0 {
-		t.Errorf("255 %s 255 = %d, want 0", "-", r)
-	}
-}
-func TestConstFolduint8div(t *testing.T) {
-	var x, y, r uint8
-	x = 0
-	y = 1
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "/", r)
-	}
-	y = 255
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s 255 = %d, want 0", "/", r)
-	}
-	x = 1
-	y = 1
-	r = x / y
-	if r != 1 {
-		t.Errorf("1 %s 1 = %d, want 1", "/", r)
-	}
-	y = 255
-	r = x / y
-	if r != 0 {
-		t.Errorf("1 %s 255 = %d, want 0", "/", r)
-	}
-	x = 255
-	y = 1
-	r = x / y
-	if r != 255 {
-		t.Errorf("255 %s 1 = %d, want 255", "/", r)
-	}
-	y = 255
-	r = x / y
-	if r != 1 {
-		t.Errorf("255 %s 255 = %d, want 1", "/", r)
-	}
-}
-func TestConstFolduint8mul(t *testing.T) {
-	var x, y, r uint8
-	x = 0
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "*", r)
-	}
-	y = 255
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s 255 = %d, want 0", "*", r)
-	}
-	x = 1
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("1 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != 1 {
-		t.Errorf("1 %s 1 = %d, want 1", "*", r)
-	}
-	y = 255
-	r = x * y
-	if r != 255 {
-		t.Errorf("1 %s 255 = %d, want 255", "*", r)
-	}
-	x = 255
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("255 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != 255 {
-		t.Errorf("255 %s 1 = %d, want 255", "*", r)
-	}
-	y = 255
-	r = x * y
-	if r != 1 {
-		t.Errorf("255 %s 255 = %d, want 1", "*", r)
-	}
-}
-func TestConstFolduint8mod(t *testing.T) {
-	var x, y, r uint8
-	x = 0
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "%", r)
-	}
-	y = 255
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s 255 = %d, want 0", "%", r)
-	}
-	x = 1
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", "%", r)
-	}
-	y = 255
-	r = x % y
-	if r != 1 {
-		t.Errorf("1 %s 255 = %d, want 1", "%", r)
-	}
-	x = 255
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("255 %s 1 = %d, want 0", "%", r)
-	}
-	y = 255
-	r = x % y
-	if r != 0 {
-		t.Errorf("255 %s 255 = %d, want 0", "%", r)
-	}
-}
-func TestConstFoldint8add(t *testing.T) {
-	var x, y, r int8
-	x = -128
-	y = -128
-	r = x + y
-	if r != 0 {
-		t.Errorf("-128 %s -128 = %d, want 0", "+", r)
-	}
-	y = -127
-	r = x + y
-	if r != 1 {
-		t.Errorf("-128 %s -127 = %d, want 1", "+", r)
-	}
-	y = -1
-	r = x + y
-	if r != 127 {
-		t.Errorf("-128 %s -1 = %d, want 127", "+", r)
-	}
-	y = 0
-	r = x + y
-	if r != -128 {
-		t.Errorf("-128 %s 0 = %d, want -128", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != -127 {
-		t.Errorf("-128 %s 1 = %d, want -127", "+", r)
-	}
-	y = 126
-	r = x + y
-	if r != -2 {
-		t.Errorf("-128 %s 126 = %d, want -2", "+", r)
-	}
-	y = 127
-	r = x + y
-	if r != -1 {
-		t.Errorf("-128 %s 127 = %d, want -1", "+", r)
-	}
-	x = -127
-	y = -128
-	r = x + y
-	if r != 1 {
-		t.Errorf("-127 %s -128 = %d, want 1", "+", r)
-	}
-	y = -127
-	r = x + y
-	if r != 2 {
-		t.Errorf("-127 %s -127 = %d, want 2", "+", r)
-	}
-	y = -1
-	r = x + y
-	if r != -128 {
-		t.Errorf("-127 %s -1 = %d, want -128", "+", r)
-	}
-	y = 0
-	r = x + y
-	if r != -127 {
-		t.Errorf("-127 %s 0 = %d, want -127", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != -126 {
-		t.Errorf("-127 %s 1 = %d, want -126", "+", r)
-	}
-	y = 126
-	r = x + y
-	if r != -1 {
-		t.Errorf("-127 %s 126 = %d, want -1", "+", r)
-	}
-	y = 127
-	r = x + y
-	if r != 0 {
-		t.Errorf("-127 %s 127 = %d, want 0", "+", r)
-	}
-	x = -1
-	y = -128
-	r = x + y
-	if r != 127 {
-		t.Errorf("-1 %s -128 = %d, want 127", "+", r)
-	}
-	y = -127
-	r = x + y
-	if r != -128 {
-		t.Errorf("-1 %s -127 = %d, want -128", "+", r)
-	}
-	y = -1
-	r = x + y
-	if r != -2 {
-		t.Errorf("-1 %s -1 = %d, want -2", "+", r)
-	}
-	y = 0
-	r = x + y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != 0 {
-		t.Errorf("-1 %s 1 = %d, want 0", "+", r)
-	}
-	y = 126
-	r = x + y
-	if r != 125 {
-		t.Errorf("-1 %s 126 = %d, want 125", "+", r)
-	}
-	y = 127
-	r = x + y
-	if r != 126 {
-		t.Errorf("-1 %s 127 = %d, want 126", "+", r)
-	}
-	x = 0
-	y = -128
-	r = x + y
-	if r != -128 {
-		t.Errorf("0 %s -128 = %d, want -128", "+", r)
-	}
-	y = -127
-	r = x + y
-	if r != -127 {
-		t.Errorf("0 %s -127 = %d, want -127", "+", r)
-	}
-	y = -1
-	r = x + y
-	if r != -1 {
-		t.Errorf("0 %s -1 = %d, want -1", "+", r)
-	}
-	y = 0
-	r = x + y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != 1 {
-		t.Errorf("0 %s 1 = %d, want 1", "+", r)
-	}
-	y = 126
-	r = x + y
-	if r != 126 {
-		t.Errorf("0 %s 126 = %d, want 126", "+", r)
-	}
-	y = 127
-	r = x + y
-	if r != 127 {
-		t.Errorf("0 %s 127 = %d, want 127", "+", r)
-	}
-	x = 1
-	y = -128
-	r = x + y
-	if r != -127 {
-		t.Errorf("1 %s -128 = %d, want -127", "+", r)
-	}
-	y = -127
-	r = x + y
-	if r != -126 {
-		t.Errorf("1 %s -127 = %d, want -126", "+", r)
-	}
-	y = -1
-	r = x + y
-	if r != 0 {
-		t.Errorf("1 %s -1 = %d, want 0", "+", r)
-	}
-	y = 0
-	r = x + y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "+", r)
-	}
-	y = 126
-	r = x + y
-	if r != 127 {
-		t.Errorf("1 %s 126 = %d, want 127", "+", r)
-	}
-	y = 127
-	r = x + y
-	if r != -128 {
-		t.Errorf("1 %s 127 = %d, want -128", "+", r)
-	}
-	x = 126
-	y = -128
-	r = x + y
-	if r != -2 {
-		t.Errorf("126 %s -128 = %d, want -2", "+", r)
-	}
-	y = -127
-	r = x + y
-	if r != -1 {
-		t.Errorf("126 %s -127 = %d, want -1", "+", r)
-	}
-	y = -1
-	r = x + y
-	if r != 125 {
-		t.Errorf("126 %s -1 = %d, want 125", "+", r)
-	}
-	y = 0
-	r = x + y
-	if r != 126 {
-		t.Errorf("126 %s 0 = %d, want 126", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != 127 {
-		t.Errorf("126 %s 1 = %d, want 127", "+", r)
-	}
-	y = 126
-	r = x + y
-	if r != -4 {
-		t.Errorf("126 %s 126 = %d, want -4", "+", r)
-	}
-	y = 127
-	r = x + y
-	if r != -3 {
-		t.Errorf("126 %s 127 = %d, want -3", "+", r)
-	}
-	x = 127
-	y = -128
-	r = x + y
-	if r != -1 {
-		t.Errorf("127 %s -128 = %d, want -1", "+", r)
-	}
-	y = -127
-	r = x + y
-	if r != 0 {
-		t.Errorf("127 %s -127 = %d, want 0", "+", r)
-	}
-	y = -1
-	r = x + y
-	if r != 126 {
-		t.Errorf("127 %s -1 = %d, want 126", "+", r)
-	}
-	y = 0
-	r = x + y
-	if r != 127 {
-		t.Errorf("127 %s 0 = %d, want 127", "+", r)
-	}
-	y = 1
-	r = x + y
-	if r != -128 {
-		t.Errorf("127 %s 1 = %d, want -128", "+", r)
-	}
-	y = 126
-	r = x + y
-	if r != -3 {
-		t.Errorf("127 %s 126 = %d, want -3", "+", r)
-	}
-	y = 127
-	r = x + y
-	if r != -2 {
-		t.Errorf("127 %s 127 = %d, want -2", "+", r)
-	}
-}
-func TestConstFoldint8sub(t *testing.T) {
-	var x, y, r int8
-	x = -128
-	y = -128
-	r = x - y
-	if r != 0 {
-		t.Errorf("-128 %s -128 = %d, want 0", "-", r)
-	}
-	y = -127
-	r = x - y
-	if r != -1 {
-		t.Errorf("-128 %s -127 = %d, want -1", "-", r)
-	}
-	y = -1
-	r = x - y
-	if r != -127 {
-		t.Errorf("-128 %s -1 = %d, want -127", "-", r)
-	}
-	y = 0
-	r = x - y
-	if r != -128 {
-		t.Errorf("-128 %s 0 = %d, want -128", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != 127 {
-		t.Errorf("-128 %s 1 = %d, want 127", "-", r)
-	}
-	y = 126
-	r = x - y
-	if r != 2 {
-		t.Errorf("-128 %s 126 = %d, want 2", "-", r)
-	}
-	y = 127
-	r = x - y
-	if r != 1 {
-		t.Errorf("-128 %s 127 = %d, want 1", "-", r)
-	}
-	x = -127
-	y = -128
-	r = x - y
-	if r != 1 {
-		t.Errorf("-127 %s -128 = %d, want 1", "-", r)
-	}
-	y = -127
-	r = x - y
-	if r != 0 {
-		t.Errorf("-127 %s -127 = %d, want 0", "-", r)
-	}
-	y = -1
-	r = x - y
-	if r != -126 {
-		t.Errorf("-127 %s -1 = %d, want -126", "-", r)
-	}
-	y = 0
-	r = x - y
-	if r != -127 {
-		t.Errorf("-127 %s 0 = %d, want -127", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != -128 {
-		t.Errorf("-127 %s 1 = %d, want -128", "-", r)
-	}
-	y = 126
-	r = x - y
-	if r != 3 {
-		t.Errorf("-127 %s 126 = %d, want 3", "-", r)
-	}
-	y = 127
-	r = x - y
-	if r != 2 {
-		t.Errorf("-127 %s 127 = %d, want 2", "-", r)
-	}
-	x = -1
-	y = -128
-	r = x - y
-	if r != 127 {
-		t.Errorf("-1 %s -128 = %d, want 127", "-", r)
-	}
-	y = -127
-	r = x - y
-	if r != 126 {
-		t.Errorf("-1 %s -127 = %d, want 126", "-", r)
-	}
-	y = -1
-	r = x - y
-	if r != 0 {
-		t.Errorf("-1 %s -1 = %d, want 0", "-", r)
-	}
-	y = 0
-	r = x - y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != -2 {
-		t.Errorf("-1 %s 1 = %d, want -2", "-", r)
-	}
-	y = 126
-	r = x - y
-	if r != -127 {
-		t.Errorf("-1 %s 126 = %d, want -127", "-", r)
-	}
-	y = 127
-	r = x - y
-	if r != -128 {
-		t.Errorf("-1 %s 127 = %d, want -128", "-", r)
-	}
-	x = 0
-	y = -128
-	r = x - y
-	if r != -128 {
-		t.Errorf("0 %s -128 = %d, want -128", "-", r)
-	}
-	y = -127
-	r = x - y
-	if r != 127 {
-		t.Errorf("0 %s -127 = %d, want 127", "-", r)
-	}
-	y = -1
-	r = x - y
-	if r != 1 {
-		t.Errorf("0 %s -1 = %d, want 1", "-", r)
-	}
-	y = 0
-	r = x - y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != -1 {
-		t.Errorf("0 %s 1 = %d, want -1", "-", r)
-	}
-	y = 126
-	r = x - y
-	if r != -126 {
-		t.Errorf("0 %s 126 = %d, want -126", "-", r)
-	}
-	y = 127
-	r = x - y
-	if r != -127 {
-		t.Errorf("0 %s 127 = %d, want -127", "-", r)
-	}
-	x = 1
-	y = -128
-	r = x - y
-	if r != -127 {
-		t.Errorf("1 %s -128 = %d, want -127", "-", r)
-	}
-	y = -127
-	r = x - y
-	if r != -128 {
-		t.Errorf("1 %s -127 = %d, want -128", "-", r)
-	}
-	y = -1
-	r = x - y
-	if r != 2 {
-		t.Errorf("1 %s -1 = %d, want 2", "-", r)
-	}
-	y = 0
-	r = x - y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", "-", r)
-	}
-	y = 126
-	r = x - y
-	if r != -125 {
-		t.Errorf("1 %s 126 = %d, want -125", "-", r)
-	}
-	y = 127
-	r = x - y
-	if r != -126 {
-		t.Errorf("1 %s 127 = %d, want -126", "-", r)
-	}
-	x = 126
-	y = -128
-	r = x - y
-	if r != -2 {
-		t.Errorf("126 %s -128 = %d, want -2", "-", r)
-	}
-	y = -127
-	r = x - y
-	if r != -3 {
-		t.Errorf("126 %s -127 = %d, want -3", "-", r)
-	}
-	y = -1
-	r = x - y
-	if r != 127 {
-		t.Errorf("126 %s -1 = %d, want 127", "-", r)
-	}
-	y = 0
-	r = x - y
-	if r != 126 {
-		t.Errorf("126 %s 0 = %d, want 126", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != 125 {
-		t.Errorf("126 %s 1 = %d, want 125", "-", r)
-	}
-	y = 126
-	r = x - y
-	if r != 0 {
-		t.Errorf("126 %s 126 = %d, want 0", "-", r)
-	}
-	y = 127
-	r = x - y
-	if r != -1 {
-		t.Errorf("126 %s 127 = %d, want -1", "-", r)
-	}
-	x = 127
-	y = -128
-	r = x - y
-	if r != -1 {
-		t.Errorf("127 %s -128 = %d, want -1", "-", r)
-	}
-	y = -127
-	r = x - y
-	if r != -2 {
-		t.Errorf("127 %s -127 = %d, want -2", "-", r)
-	}
-	y = -1
-	r = x - y
-	if r != -128 {
-		t.Errorf("127 %s -1 = %d, want -128", "-", r)
-	}
-	y = 0
-	r = x - y
-	if r != 127 {
-		t.Errorf("127 %s 0 = %d, want 127", "-", r)
-	}
-	y = 1
-	r = x - y
-	if r != 126 {
-		t.Errorf("127 %s 1 = %d, want 126", "-", r)
-	}
-	y = 126
-	r = x - y
-	if r != 1 {
-		t.Errorf("127 %s 126 = %d, want 1", "-", r)
-	}
-	y = 127
-	r = x - y
-	if r != 0 {
-		t.Errorf("127 %s 127 = %d, want 0", "-", r)
-	}
-}
-func TestConstFoldint8div(t *testing.T) {
-	var x, y, r int8
-	x = -128
-	y = -128
-	r = x / y
-	if r != 1 {
-		t.Errorf("-128 %s -128 = %d, want 1", "/", r)
-	}
-	y = -127
-	r = x / y
-	if r != 1 {
-		t.Errorf("-128 %s -127 = %d, want 1", "/", r)
-	}
-	y = -1
-	r = x / y
-	if r != -128 {
-		t.Errorf("-128 %s -1 = %d, want -128", "/", r)
-	}
-	y = 1
-	r = x / y
-	if r != -128 {
-		t.Errorf("-128 %s 1 = %d, want -128", "/", r)
-	}
-	y = 126
-	r = x / y
-	if r != -1 {
-		t.Errorf("-128 %s 126 = %d, want -1", "/", r)
-	}
-	y = 127
-	r = x / y
-	if r != -1 {
-		t.Errorf("-128 %s 127 = %d, want -1", "/", r)
-	}
-	x = -127
-	y = -128
-	r = x / y
-	if r != 0 {
-		t.Errorf("-127 %s -128 = %d, want 0", "/", r)
-	}
-	y = -127
-	r = x / y
-	if r != 1 {
-		t.Errorf("-127 %s -127 = %d, want 1", "/", r)
-	}
-	y = -1
-	r = x / y
-	if r != 127 {
-		t.Errorf("-127 %s -1 = %d, want 127", "/", r)
-	}
-	y = 1
-	r = x / y
-	if r != -127 {
-		t.Errorf("-127 %s 1 = %d, want -127", "/", r)
-	}
-	y = 126
-	r = x / y
-	if r != -1 {
-		t.Errorf("-127 %s 126 = %d, want -1", "/", r)
-	}
-	y = 127
-	r = x / y
-	if r != -1 {
-		t.Errorf("-127 %s 127 = %d, want -1", "/", r)
-	}
-	x = -1
-	y = -128
-	r = x / y
-	if r != 0 {
-		t.Errorf("-1 %s -128 = %d, want 0", "/", r)
-	}
-	y = -127
-	r = x / y
-	if r != 0 {
-		t.Errorf("-1 %s -127 = %d, want 0", "/", r)
-	}
-	y = -1
-	r = x / y
-	if r != 1 {
-		t.Errorf("-1 %s -1 = %d, want 1", "/", r)
-	}
-	y = 1
-	r = x / y
-	if r != -1 {
-		t.Errorf("-1 %s 1 = %d, want -1", "/", r)
-	}
-	y = 126
-	r = x / y
-	if r != 0 {
-		t.Errorf("-1 %s 126 = %d, want 0", "/", r)
-	}
-	y = 127
-	r = x / y
-	if r != 0 {
-		t.Errorf("-1 %s 127 = %d, want 0", "/", r)
-	}
-	x = 0
-	y = -128
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s -128 = %d, want 0", "/", r)
-	}
-	y = -127
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s -127 = %d, want 0", "/", r)
-	}
-	y = -1
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s -1 = %d, want 0", "/", r)
-	}
-	y = 1
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "/", r)
-	}
-	y = 126
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s 126 = %d, want 0", "/", r)
-	}
-	y = 127
-	r = x / y
-	if r != 0 {
-		t.Errorf("0 %s 127 = %d, want 0", "/", r)
-	}
-	x = 1
-	y = -128
-	r = x / y
-	if r != 0 {
-		t.Errorf("1 %s -128 = %d, want 0", "/", r)
-	}
-	y = -127
-	r = x / y
-	if r != 0 {
-		t.Errorf("1 %s -127 = %d, want 0", "/", r)
-	}
-	y = -1
-	r = x / y
-	if r != -1 {
-		t.Errorf("1 %s -1 = %d, want -1", "/", r)
-	}
-	y = 1
-	r = x / y
-	if r != 1 {
-		t.Errorf("1 %s 1 = %d, want 1", "/", r)
-	}
-	y = 126
-	r = x / y
-	if r != 0 {
-		t.Errorf("1 %s 126 = %d, want 0", "/", r)
-	}
-	y = 127
-	r = x / y
-	if r != 0 {
-		t.Errorf("1 %s 127 = %d, want 0", "/", r)
-	}
-	x = 126
-	y = -128
-	r = x / y
-	if r != 0 {
-		t.Errorf("126 %s -128 = %d, want 0", "/", r)
-	}
-	y = -127
-	r = x / y
-	if r != 0 {
-		t.Errorf("126 %s -127 = %d, want 0", "/", r)
-	}
-	y = -1
-	r = x / y
-	if r != -126 {
-		t.Errorf("126 %s -1 = %d, want -126", "/", r)
-	}
-	y = 1
-	r = x / y
-	if r != 126 {
-		t.Errorf("126 %s 1 = %d, want 126", "/", r)
-	}
-	y = 126
-	r = x / y
-	if r != 1 {
-		t.Errorf("126 %s 126 = %d, want 1", "/", r)
-	}
-	y = 127
-	r = x / y
-	if r != 0 {
-		t.Errorf("126 %s 127 = %d, want 0", "/", r)
-	}
-	x = 127
-	y = -128
-	r = x / y
-	if r != 0 {
-		t.Errorf("127 %s -128 = %d, want 0", "/", r)
-	}
-	y = -127
-	r = x / y
-	if r != -1 {
-		t.Errorf("127 %s -127 = %d, want -1", "/", r)
-	}
-	y = -1
-	r = x / y
-	if r != -127 {
-		t.Errorf("127 %s -1 = %d, want -127", "/", r)
-	}
-	y = 1
-	r = x / y
-	if r != 127 {
-		t.Errorf("127 %s 1 = %d, want 127", "/", r)
-	}
-	y = 126
-	r = x / y
-	if r != 1 {
-		t.Errorf("127 %s 126 = %d, want 1", "/", r)
-	}
-	y = 127
-	r = x / y
-	if r != 1 {
-		t.Errorf("127 %s 127 = %d, want 1", "/", r)
-	}
-}
-func TestConstFoldint8mul(t *testing.T) {
-	var x, y, r int8
-	x = -128
-	y = -128
-	r = x * y
-	if r != 0 {
-		t.Errorf("-128 %s -128 = %d, want 0", "*", r)
-	}
-	y = -127
-	r = x * y
-	if r != -128 {
-		t.Errorf("-128 %s -127 = %d, want -128", "*", r)
-	}
-	y = -1
-	r = x * y
-	if r != -128 {
-		t.Errorf("-128 %s -1 = %d, want -128", "*", r)
-	}
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("-128 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != -128 {
-		t.Errorf("-128 %s 1 = %d, want -128", "*", r)
-	}
-	y = 126
-	r = x * y
-	if r != 0 {
-		t.Errorf("-128 %s 126 = %d, want 0", "*", r)
-	}
-	y = 127
-	r = x * y
-	if r != -128 {
-		t.Errorf("-128 %s 127 = %d, want -128", "*", r)
-	}
-	x = -127
-	y = -128
-	r = x * y
-	if r != -128 {
-		t.Errorf("-127 %s -128 = %d, want -128", "*", r)
-	}
-	y = -127
-	r = x * y
-	if r != 1 {
-		t.Errorf("-127 %s -127 = %d, want 1", "*", r)
-	}
-	y = -1
-	r = x * y
-	if r != 127 {
-		t.Errorf("-127 %s -1 = %d, want 127", "*", r)
-	}
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("-127 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != -127 {
-		t.Errorf("-127 %s 1 = %d, want -127", "*", r)
-	}
-	y = 126
-	r = x * y
-	if r != 126 {
-		t.Errorf("-127 %s 126 = %d, want 126", "*", r)
-	}
-	y = 127
-	r = x * y
-	if r != -1 {
-		t.Errorf("-127 %s 127 = %d, want -1", "*", r)
-	}
-	x = -1
-	y = -128
-	r = x * y
-	if r != -128 {
-		t.Errorf("-1 %s -128 = %d, want -128", "*", r)
-	}
-	y = -127
-	r = x * y
-	if r != 127 {
-		t.Errorf("-1 %s -127 = %d, want 127", "*", r)
-	}
-	y = -1
-	r = x * y
-	if r != 1 {
-		t.Errorf("-1 %s -1 = %d, want 1", "*", r)
-	}
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("-1 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != -1 {
-		t.Errorf("-1 %s 1 = %d, want -1", "*", r)
-	}
-	y = 126
-	r = x * y
-	if r != -126 {
-		t.Errorf("-1 %s 126 = %d, want -126", "*", r)
-	}
-	y = 127
-	r = x * y
-	if r != -127 {
-		t.Errorf("-1 %s 127 = %d, want -127", "*", r)
-	}
-	x = 0
-	y = -128
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s -128 = %d, want 0", "*", r)
-	}
-	y = -127
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s -127 = %d, want 0", "*", r)
-	}
-	y = -1
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s -1 = %d, want 0", "*", r)
-	}
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "*", r)
-	}
-	y = 126
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s 126 = %d, want 0", "*", r)
-	}
-	y = 127
-	r = x * y
-	if r != 0 {
-		t.Errorf("0 %s 127 = %d, want 0", "*", r)
-	}
-	x = 1
-	y = -128
-	r = x * y
-	if r != -128 {
-		t.Errorf("1 %s -128 = %d, want -128", "*", r)
-	}
-	y = -127
-	r = x * y
-	if r != -127 {
-		t.Errorf("1 %s -127 = %d, want -127", "*", r)
-	}
-	y = -1
-	r = x * y
-	if r != -1 {
-		t.Errorf("1 %s -1 = %d, want -1", "*", r)
-	}
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("1 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != 1 {
-		t.Errorf("1 %s 1 = %d, want 1", "*", r)
-	}
-	y = 126
-	r = x * y
-	if r != 126 {
-		t.Errorf("1 %s 126 = %d, want 126", "*", r)
-	}
-	y = 127
-	r = x * y
-	if r != 127 {
-		t.Errorf("1 %s 127 = %d, want 127", "*", r)
-	}
-	x = 126
-	y = -128
-	r = x * y
-	if r != 0 {
-		t.Errorf("126 %s -128 = %d, want 0", "*", r)
-	}
-	y = -127
-	r = x * y
-	if r != 126 {
-		t.Errorf("126 %s -127 = %d, want 126", "*", r)
-	}
-	y = -1
-	r = x * y
-	if r != -126 {
-		t.Errorf("126 %s -1 = %d, want -126", "*", r)
-	}
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("126 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != 126 {
-		t.Errorf("126 %s 1 = %d, want 126", "*", r)
-	}
-	y = 126
-	r = x * y
-	if r != 4 {
-		t.Errorf("126 %s 126 = %d, want 4", "*", r)
-	}
-	y = 127
-	r = x * y
-	if r != -126 {
-		t.Errorf("126 %s 127 = %d, want -126", "*", r)
-	}
-	x = 127
-	y = -128
-	r = x * y
-	if r != -128 {
-		t.Errorf("127 %s -128 = %d, want -128", "*", r)
-	}
-	y = -127
-	r = x * y
-	if r != -1 {
-		t.Errorf("127 %s -127 = %d, want -1", "*", r)
-	}
-	y = -1
-	r = x * y
-	if r != -127 {
-		t.Errorf("127 %s -1 = %d, want -127", "*", r)
-	}
-	y = 0
-	r = x * y
-	if r != 0 {
-		t.Errorf("127 %s 0 = %d, want 0", "*", r)
-	}
-	y = 1
-	r = x * y
-	if r != 127 {
-		t.Errorf("127 %s 1 = %d, want 127", "*", r)
-	}
-	y = 126
-	r = x * y
-	if r != -126 {
-		t.Errorf("127 %s 126 = %d, want -126", "*", r)
-	}
-	y = 127
-	r = x * y
-	if r != 1 {
-		t.Errorf("127 %s 127 = %d, want 1", "*", r)
-	}
-}
-func TestConstFoldint8mod(t *testing.T) {
-	var x, y, r int8
-	x = -128
-	y = -128
-	r = x % y
-	if r != 0 {
-		t.Errorf("-128 %s -128 = %d, want 0", "%", r)
-	}
-	y = -127
-	r = x % y
-	if r != -1 {
-		t.Errorf("-128 %s -127 = %d, want -1", "%", r)
-	}
-	y = -1
-	r = x % y
-	if r != 0 {
-		t.Errorf("-128 %s -1 = %d, want 0", "%", r)
-	}
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("-128 %s 1 = %d, want 0", "%", r)
-	}
-	y = 126
-	r = x % y
-	if r != -2 {
-		t.Errorf("-128 %s 126 = %d, want -2", "%", r)
-	}
-	y = 127
-	r = x % y
-	if r != -1 {
-		t.Errorf("-128 %s 127 = %d, want -1", "%", r)
-	}
-	x = -127
-	y = -128
-	r = x % y
-	if r != -127 {
-		t.Errorf("-127 %s -128 = %d, want -127", "%", r)
-	}
-	y = -127
-	r = x % y
-	if r != 0 {
-		t.Errorf("-127 %s -127 = %d, want 0", "%", r)
-	}
-	y = -1
-	r = x % y
-	if r != 0 {
-		t.Errorf("-127 %s -1 = %d, want 0", "%", r)
-	}
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("-127 %s 1 = %d, want 0", "%", r)
-	}
-	y = 126
-	r = x % y
-	if r != -1 {
-		t.Errorf("-127 %s 126 = %d, want -1", "%", r)
-	}
-	y = 127
-	r = x % y
-	if r != 0 {
-		t.Errorf("-127 %s 127 = %d, want 0", "%", r)
-	}
-	x = -1
-	y = -128
-	r = x % y
-	if r != -1 {
-		t.Errorf("-1 %s -128 = %d, want -1", "%", r)
-	}
-	y = -127
-	r = x % y
-	if r != -1 {
-		t.Errorf("-1 %s -127 = %d, want -1", "%", r)
-	}
-	y = -1
-	r = x % y
-	if r != 0 {
-		t.Errorf("-1 %s -1 = %d, want 0", "%", r)
-	}
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("-1 %s 1 = %d, want 0", "%", r)
-	}
-	y = 126
-	r = x % y
-	if r != -1 {
-		t.Errorf("-1 %s 126 = %d, want -1", "%", r)
-	}
-	y = 127
-	r = x % y
-	if r != -1 {
-		t.Errorf("-1 %s 127 = %d, want -1", "%", r)
-	}
-	x = 0
-	y = -128
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s -128 = %d, want 0", "%", r)
-	}
-	y = -127
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s -127 = %d, want 0", "%", r)
-	}
-	y = -1
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s -1 = %d, want 0", "%", r)
-	}
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "%", r)
-	}
-	y = 126
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s 126 = %d, want 0", "%", r)
-	}
-	y = 127
-	r = x % y
-	if r != 0 {
-		t.Errorf("0 %s 127 = %d, want 0", "%", r)
-	}
-	x = 1
-	y = -128
-	r = x % y
-	if r != 1 {
-		t.Errorf("1 %s -128 = %d, want 1", "%", r)
-	}
-	y = -127
-	r = x % y
-	if r != 1 {
-		t.Errorf("1 %s -127 = %d, want 1", "%", r)
-	}
-	y = -1
-	r = x % y
-	if r != 0 {
-		t.Errorf("1 %s -1 = %d, want 0", "%", r)
-	}
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", "%", r)
-	}
-	y = 126
-	r = x % y
-	if r != 1 {
-		t.Errorf("1 %s 126 = %d, want 1", "%", r)
-	}
-	y = 127
-	r = x % y
-	if r != 1 {
-		t.Errorf("1 %s 127 = %d, want 1", "%", r)
-	}
-	x = 126
-	y = -128
-	r = x % y
-	if r != 126 {
-		t.Errorf("126 %s -128 = %d, want 126", "%", r)
-	}
-	y = -127
-	r = x % y
-	if r != 126 {
-		t.Errorf("126 %s -127 = %d, want 126", "%", r)
-	}
-	y = -1
-	r = x % y
-	if r != 0 {
-		t.Errorf("126 %s -1 = %d, want 0", "%", r)
-	}
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("126 %s 1 = %d, want 0", "%", r)
-	}
-	y = 126
-	r = x % y
-	if r != 0 {
-		t.Errorf("126 %s 126 = %d, want 0", "%", r)
-	}
-	y = 127
-	r = x % y
-	if r != 126 {
-		t.Errorf("126 %s 127 = %d, want 126", "%", r)
-	}
-	x = 127
-	y = -128
-	r = x % y
-	if r != 127 {
-		t.Errorf("127 %s -128 = %d, want 127", "%", r)
-	}
-	y = -127
-	r = x % y
-	if r != 0 {
-		t.Errorf("127 %s -127 = %d, want 0", "%", r)
-	}
-	y = -1
-	r = x % y
-	if r != 0 {
-		t.Errorf("127 %s -1 = %d, want 0", "%", r)
-	}
-	y = 1
-	r = x % y
-	if r != 0 {
-		t.Errorf("127 %s 1 = %d, want 0", "%", r)
-	}
-	y = 126
-	r = x % y
-	if r != 1 {
-		t.Errorf("127 %s 126 = %d, want 1", "%", r)
-	}
-	y = 127
-	r = x % y
-	if r != 0 {
-		t.Errorf("127 %s 127 = %d, want 0", "%", r)
-	}
-}
-func TestConstFolduint64uint64lsh(t *testing.T) {
-	var x, r uint64
-	var y uint64
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = 4294967296
-	y = 0
-	r = x << y
-	if r != 4294967296 {
-		t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 8589934592 {
-		t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("4294967296 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("4294967296 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = 18446744073709551615
-	y = 0
-	r = x << y
-	if r != 18446744073709551615 {
-		t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 18446744073709551614 {
-		t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551614", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("18446744073709551615 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFolduint64uint64rsh(t *testing.T) {
-	var x, r uint64
-	var y uint64
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
-	}
-	x = 4294967296
-	y = 0
-	r = x >> y
-	if r != 4294967296 {
-		t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 2147483648 {
-		t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != 0 {
-		t.Errorf("4294967296 %s 4294967296 = %d, want 0", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != 0 {
-		t.Errorf("4294967296 %s 18446744073709551615 = %d, want 0", ">>", r)
-	}
-	x = 18446744073709551615
-	y = 0
-	r = x >> y
-	if r != 18446744073709551615 {
-		t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 9223372036854775807 {
-		t.Errorf("18446744073709551615 %s 1 = %d, want 9223372036854775807", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != 0 {
-		t.Errorf("18446744073709551615 %s 4294967296 = %d, want 0", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != 0 {
-		t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFolduint64uint32lsh(t *testing.T) {
-	var x, r uint64
-	var y uint32
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = 4294967296
-	y = 0
-	r = x << y
-	if r != 4294967296 {
-		t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 8589934592 {
-		t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("4294967296 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = 18446744073709551615
-	y = 0
-	r = x << y
-	if r != 18446744073709551615 {
-		t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 18446744073709551614 {
-		t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551614", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("18446744073709551615 %s 4294967295 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFolduint64uint32rsh(t *testing.T) {
-	var x, r uint64
-	var y uint32
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
-	}
-	x = 4294967296
-	y = 0
-	r = x >> y
-	if r != 4294967296 {
-		t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 2147483648 {
-		t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != 0 {
-		t.Errorf("4294967296 %s 4294967295 = %d, want 0", ">>", r)
-	}
-	x = 18446744073709551615
-	y = 0
-	r = x >> y
-	if r != 18446744073709551615 {
-		t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 9223372036854775807 {
-		t.Errorf("18446744073709551615 %s 1 = %d, want 9223372036854775807", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != 0 {
-		t.Errorf("18446744073709551615 %s 4294967295 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFolduint64uint16lsh(t *testing.T) {
-	var x, r uint64
-	var y uint16
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = 4294967296
-	y = 0
-	r = x << y
-	if r != 4294967296 {
-		t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 8589934592 {
-		t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("4294967296 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = 18446744073709551615
-	y = 0
-	r = x << y
-	if r != 18446744073709551615 {
-		t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 18446744073709551614 {
-		t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551614", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("18446744073709551615 %s 65535 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFolduint64uint16rsh(t *testing.T) {
-	var x, r uint64
-	var y uint16
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
-	}
-	x = 4294967296
-	y = 0
-	r = x >> y
-	if r != 4294967296 {
-		t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 2147483648 {
-		t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != 0 {
-		t.Errorf("4294967296 %s 65535 = %d, want 0", ">>", r)
-	}
-	x = 18446744073709551615
-	y = 0
-	r = x >> y
-	if r != 18446744073709551615 {
-		t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 9223372036854775807 {
-		t.Errorf("18446744073709551615 %s 1 = %d, want 9223372036854775807", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != 0 {
-		t.Errorf("18446744073709551615 %s 65535 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFolduint64uint8lsh(t *testing.T) {
-	var x, r uint64
-	var y uint8
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 255 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 255 = %d, want 0", "<<", r)
-	}
-	x = 4294967296
-	y = 0
-	r = x << y
-	if r != 4294967296 {
-		t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 8589934592 {
-		t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("4294967296 %s 255 = %d, want 0", "<<", r)
-	}
-	x = 18446744073709551615
-	y = 0
-	r = x << y
-	if r != 18446744073709551615 {
-		t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 18446744073709551614 {
-		t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551614", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("18446744073709551615 %s 255 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFolduint64uint8rsh(t *testing.T) {
-	var x, r uint64
-	var y uint8
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 255 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 255 = %d, want 0", ">>", r)
-	}
-	x = 4294967296
-	y = 0
-	r = x >> y
-	if r != 4294967296 {
-		t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 2147483648 {
-		t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != 0 {
-		t.Errorf("4294967296 %s 255 = %d, want 0", ">>", r)
-	}
-	x = 18446744073709551615
-	y = 0
-	r = x >> y
-	if r != 18446744073709551615 {
-		t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 9223372036854775807 {
-		t.Errorf("18446744073709551615 %s 1 = %d, want 9223372036854775807", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != 0 {
-		t.Errorf("18446744073709551615 %s 255 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFoldint64uint64lsh(t *testing.T) {
-	var x, r int64
-	var y uint64
-	x = -9223372036854775808
-	y = 0
-	r = x << y
-	if r != -9223372036854775808 {
-		t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("-9223372036854775808 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("-9223372036854775808 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("-9223372036854775808 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = -9223372036854775807
-	y = 0
-	r = x << y
-	if r != -9223372036854775807 {
-		t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("-9223372036854775807 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("-9223372036854775807 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("-9223372036854775807 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = -4294967296
-	y = 0
-	r = x << y
-	if r != -4294967296 {
-		t.Errorf("-4294967296 %s 0 = %d, want -4294967296", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -8589934592 {
-		t.Errorf("-4294967296 %s 1 = %d, want -8589934592", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("-4294967296 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("-4294967296 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = -1
-	y = 0
-	r = x << y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("-1 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("-1 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = 4294967296
-	y = 0
-	r = x << y
-	if r != 4294967296 {
-		t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 8589934592 {
-		t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("4294967296 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("4294967296 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = 9223372036854775806
-	y = 0
-	r = x << y
-	if r != 9223372036854775806 {
-		t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -4 {
-		t.Errorf("9223372036854775806 %s 1 = %d, want -4", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("9223372036854775806 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("9223372036854775806 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = 9223372036854775807
-	y = 0
-	r = x << y
-	if r != 9223372036854775807 {
-		t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("9223372036854775807 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("9223372036854775807 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("9223372036854775807 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFoldint64uint64rsh(t *testing.T) {
-	var x, r int64
-	var y uint64
-	x = -9223372036854775808
-	y = 0
-	r = x >> y
-	if r != -9223372036854775808 {
-		t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -4611686018427387904 {
-		t.Errorf("-9223372036854775808 %s 1 = %d, want -4611686018427387904", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-9223372036854775808 %s 4294967296 = %d, want -1", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-9223372036854775808 %s 18446744073709551615 = %d, want -1", ">>", r)
-	}
-	x = -9223372036854775807
-	y = 0
-	r = x >> y
-	if r != -9223372036854775807 {
-		t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -4611686018427387904 {
-		t.Errorf("-9223372036854775807 %s 1 = %d, want -4611686018427387904", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-9223372036854775807 %s 4294967296 = %d, want -1", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-9223372036854775807 %s 18446744073709551615 = %d, want -1", ">>", r)
-	}
-	x = -4294967296
-	y = 0
-	r = x >> y
-	if r != -4294967296 {
-		t.Errorf("-4294967296 %s 0 = %d, want -4294967296", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -2147483648 {
-		t.Errorf("-4294967296 %s 1 = %d, want -2147483648", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-4294967296 %s 4294967296 = %d, want -1", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-4294967296 %s 18446744073709551615 = %d, want -1", ">>", r)
-	}
-	x = -1
-	y = 0
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 4294967296 = %d, want -1", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 18446744073709551615 = %d, want -1", ">>", r)
-	}
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
-	}
-	x = 4294967296
-	y = 0
-	r = x >> y
-	if r != 4294967296 {
-		t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 2147483648 {
-		t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != 0 {
-		t.Errorf("4294967296 %s 4294967296 = %d, want 0", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != 0 {
-		t.Errorf("4294967296 %s 18446744073709551615 = %d, want 0", ">>", r)
-	}
-	x = 9223372036854775806
-	y = 0
-	r = x >> y
-	if r != 9223372036854775806 {
-		t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 4611686018427387903 {
-		t.Errorf("9223372036854775806 %s 1 = %d, want 4611686018427387903", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != 0 {
-		t.Errorf("9223372036854775806 %s 4294967296 = %d, want 0", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != 0 {
-		t.Errorf("9223372036854775806 %s 18446744073709551615 = %d, want 0", ">>", r)
-	}
-	x = 9223372036854775807
-	y = 0
-	r = x >> y
-	if r != 9223372036854775807 {
-		t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 4611686018427387903 {
-		t.Errorf("9223372036854775807 %s 1 = %d, want 4611686018427387903", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != 0 {
-		t.Errorf("9223372036854775807 %s 4294967296 = %d, want 0", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != 0 {
-		t.Errorf("9223372036854775807 %s 18446744073709551615 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFoldint64uint32lsh(t *testing.T) {
-	var x, r int64
-	var y uint32
-	x = -9223372036854775808
-	y = 0
-	r = x << y
-	if r != -9223372036854775808 {
-		t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("-9223372036854775808 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("-9223372036854775808 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = -9223372036854775807
-	y = 0
-	r = x << y
-	if r != -9223372036854775807 {
-		t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("-9223372036854775807 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("-9223372036854775807 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = -4294967296
-	y = 0
-	r = x << y
-	if r != -4294967296 {
-		t.Errorf("-4294967296 %s 0 = %d, want -4294967296", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -8589934592 {
-		t.Errorf("-4294967296 %s 1 = %d, want -8589934592", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("-4294967296 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = -1
-	y = 0
-	r = x << y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("-1 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = 4294967296
-	y = 0
-	r = x << y
-	if r != 4294967296 {
-		t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 8589934592 {
-		t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("4294967296 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = 9223372036854775806
-	y = 0
-	r = x << y
-	if r != 9223372036854775806 {
-		t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -4 {
-		t.Errorf("9223372036854775806 %s 1 = %d, want -4", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("9223372036854775806 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = 9223372036854775807
-	y = 0
-	r = x << y
-	if r != 9223372036854775807 {
-		t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("9223372036854775807 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("9223372036854775807 %s 4294967295 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFoldint64uint32rsh(t *testing.T) {
-	var x, r int64
-	var y uint32
-	x = -9223372036854775808
-	y = 0
-	r = x >> y
-	if r != -9223372036854775808 {
-		t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -4611686018427387904 {
-		t.Errorf("-9223372036854775808 %s 1 = %d, want -4611686018427387904", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-9223372036854775808 %s 4294967295 = %d, want -1", ">>", r)
-	}
-	x = -9223372036854775807
-	y = 0
-	r = x >> y
-	if r != -9223372036854775807 {
-		t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -4611686018427387904 {
-		t.Errorf("-9223372036854775807 %s 1 = %d, want -4611686018427387904", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-9223372036854775807 %s 4294967295 = %d, want -1", ">>", r)
-	}
-	x = -4294967296
-	y = 0
-	r = x >> y
-	if r != -4294967296 {
-		t.Errorf("-4294967296 %s 0 = %d, want -4294967296", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -2147483648 {
-		t.Errorf("-4294967296 %s 1 = %d, want -2147483648", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-4294967296 %s 4294967295 = %d, want -1", ">>", r)
-	}
-	x = -1
-	y = 0
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 4294967295 = %d, want -1", ">>", r)
-	}
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
-	}
-	x = 4294967296
-	y = 0
-	r = x >> y
-	if r != 4294967296 {
-		t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 2147483648 {
-		t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != 0 {
-		t.Errorf("4294967296 %s 4294967295 = %d, want 0", ">>", r)
-	}
-	x = 9223372036854775806
-	y = 0
-	r = x >> y
-	if r != 9223372036854775806 {
-		t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 4611686018427387903 {
-		t.Errorf("9223372036854775806 %s 1 = %d, want 4611686018427387903", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != 0 {
-		t.Errorf("9223372036854775806 %s 4294967295 = %d, want 0", ">>", r)
-	}
-	x = 9223372036854775807
-	y = 0
-	r = x >> y
-	if r != 9223372036854775807 {
-		t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 4611686018427387903 {
-		t.Errorf("9223372036854775807 %s 1 = %d, want 4611686018427387903", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != 0 {
-		t.Errorf("9223372036854775807 %s 4294967295 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFoldint64uint16lsh(t *testing.T) {
-	var x, r int64
-	var y uint16
-	x = -9223372036854775808
-	y = 0
-	r = x << y
-	if r != -9223372036854775808 {
-		t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("-9223372036854775808 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("-9223372036854775808 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = -9223372036854775807
-	y = 0
-	r = x << y
-	if r != -9223372036854775807 {
-		t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("-9223372036854775807 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("-9223372036854775807 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = -4294967296
-	y = 0
-	r = x << y
-	if r != -4294967296 {
-		t.Errorf("-4294967296 %s 0 = %d, want -4294967296", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -8589934592 {
-		t.Errorf("-4294967296 %s 1 = %d, want -8589934592", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("-4294967296 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = -1
-	y = 0
-	r = x << y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("-1 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = 4294967296
-	y = 0
-	r = x << y
-	if r != 4294967296 {
-		t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 8589934592 {
-		t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("4294967296 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = 9223372036854775806
-	y = 0
-	r = x << y
-	if r != 9223372036854775806 {
-		t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -4 {
-		t.Errorf("9223372036854775806 %s 1 = %d, want -4", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("9223372036854775806 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = 9223372036854775807
-	y = 0
-	r = x << y
-	if r != 9223372036854775807 {
-		t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("9223372036854775807 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("9223372036854775807 %s 65535 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFoldint64uint16rsh(t *testing.T) {
-	var x, r int64
-	var y uint16
-	x = -9223372036854775808
-	y = 0
-	r = x >> y
-	if r != -9223372036854775808 {
-		t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -4611686018427387904 {
-		t.Errorf("-9223372036854775808 %s 1 = %d, want -4611686018427387904", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-9223372036854775808 %s 65535 = %d, want -1", ">>", r)
-	}
-	x = -9223372036854775807
-	y = 0
-	r = x >> y
-	if r != -9223372036854775807 {
-		t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -4611686018427387904 {
-		t.Errorf("-9223372036854775807 %s 1 = %d, want -4611686018427387904", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-9223372036854775807 %s 65535 = %d, want -1", ">>", r)
-	}
-	x = -4294967296
-	y = 0
-	r = x >> y
-	if r != -4294967296 {
-		t.Errorf("-4294967296 %s 0 = %d, want -4294967296", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -2147483648 {
-		t.Errorf("-4294967296 %s 1 = %d, want -2147483648", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-4294967296 %s 65535 = %d, want -1", ">>", r)
-	}
-	x = -1
-	y = 0
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 65535 = %d, want -1", ">>", r)
-	}
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
-	}
-	x = 4294967296
-	y = 0
-	r = x >> y
-	if r != 4294967296 {
-		t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 2147483648 {
-		t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != 0 {
-		t.Errorf("4294967296 %s 65535 = %d, want 0", ">>", r)
-	}
-	x = 9223372036854775806
-	y = 0
-	r = x >> y
-	if r != 9223372036854775806 {
-		t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 4611686018427387903 {
-		t.Errorf("9223372036854775806 %s 1 = %d, want 4611686018427387903", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != 0 {
-		t.Errorf("9223372036854775806 %s 65535 = %d, want 0", ">>", r)
-	}
-	x = 9223372036854775807
-	y = 0
-	r = x >> y
-	if r != 9223372036854775807 {
-		t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 4611686018427387903 {
-		t.Errorf("9223372036854775807 %s 1 = %d, want 4611686018427387903", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != 0 {
-		t.Errorf("9223372036854775807 %s 65535 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFoldint64uint8lsh(t *testing.T) {
-	var x, r int64
-	var y uint8
-	x = -9223372036854775808
-	y = 0
-	r = x << y
-	if r != -9223372036854775808 {
-		t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("-9223372036854775808 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("-9223372036854775808 %s 255 = %d, want 0", "<<", r)
-	}
-	x = -9223372036854775807
-	y = 0
-	r = x << y
-	if r != -9223372036854775807 {
-		t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("-9223372036854775807 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("-9223372036854775807 %s 255 = %d, want 0", "<<", r)
-	}
-	x = -4294967296
-	y = 0
-	r = x << y
-	if r != -4294967296 {
-		t.Errorf("-4294967296 %s 0 = %d, want -4294967296", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -8589934592 {
-		t.Errorf("-4294967296 %s 1 = %d, want -8589934592", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("-4294967296 %s 255 = %d, want 0", "<<", r)
-	}
-	x = -1
-	y = 0
-	r = x << y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("-1 %s 255 = %d, want 0", "<<", r)
-	}
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 255 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 255 = %d, want 0", "<<", r)
-	}
-	x = 4294967296
-	y = 0
-	r = x << y
-	if r != 4294967296 {
-		t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 8589934592 {
-		t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("4294967296 %s 255 = %d, want 0", "<<", r)
-	}
-	x = 9223372036854775806
-	y = 0
-	r = x << y
-	if r != 9223372036854775806 {
-		t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -4 {
-		t.Errorf("9223372036854775806 %s 1 = %d, want -4", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("9223372036854775806 %s 255 = %d, want 0", "<<", r)
-	}
-	x = 9223372036854775807
-	y = 0
-	r = x << y
-	if r != 9223372036854775807 {
-		t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("9223372036854775807 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("9223372036854775807 %s 255 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFoldint64uint8rsh(t *testing.T) {
-	var x, r int64
-	var y uint8
-	x = -9223372036854775808
-	y = 0
-	r = x >> y
-	if r != -9223372036854775808 {
-		t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -4611686018427387904 {
-		t.Errorf("-9223372036854775808 %s 1 = %d, want -4611686018427387904", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-9223372036854775808 %s 255 = %d, want -1", ">>", r)
-	}
-	x = -9223372036854775807
-	y = 0
-	r = x >> y
-	if r != -9223372036854775807 {
-		t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -4611686018427387904 {
-		t.Errorf("-9223372036854775807 %s 1 = %d, want -4611686018427387904", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-9223372036854775807 %s 255 = %d, want -1", ">>", r)
-	}
-	x = -4294967296
-	y = 0
-	r = x >> y
-	if r != -4294967296 {
-		t.Errorf("-4294967296 %s 0 = %d, want -4294967296", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -2147483648 {
-		t.Errorf("-4294967296 %s 1 = %d, want -2147483648", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-4294967296 %s 255 = %d, want -1", ">>", r)
-	}
-	x = -1
-	y = 0
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 255 = %d, want -1", ">>", r)
-	}
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 255 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 255 = %d, want 0", ">>", r)
-	}
-	x = 4294967296
-	y = 0
-	r = x >> y
-	if r != 4294967296 {
-		t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 2147483648 {
-		t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != 0 {
-		t.Errorf("4294967296 %s 255 = %d, want 0", ">>", r)
-	}
-	x = 9223372036854775806
-	y = 0
-	r = x >> y
-	if r != 9223372036854775806 {
-		t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 4611686018427387903 {
-		t.Errorf("9223372036854775806 %s 1 = %d, want 4611686018427387903", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != 0 {
-		t.Errorf("9223372036854775806 %s 255 = %d, want 0", ">>", r)
-	}
-	x = 9223372036854775807
-	y = 0
-	r = x >> y
-	if r != 9223372036854775807 {
-		t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 4611686018427387903 {
-		t.Errorf("9223372036854775807 %s 1 = %d, want 4611686018427387903", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != 0 {
-		t.Errorf("9223372036854775807 %s 255 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFolduint32uint64lsh(t *testing.T) {
-	var x, r uint32
-	var y uint64
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = 4294967295
-	y = 0
-	r = x << y
-	if r != 4294967295 {
-		t.Errorf("4294967295 %s 0 = %d, want 4294967295", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 4294967294 {
-		t.Errorf("4294967295 %s 1 = %d, want 4294967294", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("4294967295 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("4294967295 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFolduint32uint64rsh(t *testing.T) {
-	var x, r uint32
-	var y uint64
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
-	}
-	x = 4294967295
-	y = 0
-	r = x >> y
-	if r != 4294967295 {
-		t.Errorf("4294967295 %s 0 = %d, want 4294967295", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 2147483647 {
-		t.Errorf("4294967295 %s 1 = %d, want 2147483647", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != 0 {
-		t.Errorf("4294967295 %s 4294967296 = %d, want 0", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != 0 {
-		t.Errorf("4294967295 %s 18446744073709551615 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFolduint32uint32lsh(t *testing.T) {
-	var x, r uint32
-	var y uint32
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = 4294967295
-	y = 0
-	r = x << y
-	if r != 4294967295 {
-		t.Errorf("4294967295 %s 0 = %d, want 4294967295", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 4294967294 {
-		t.Errorf("4294967295 %s 1 = %d, want 4294967294", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("4294967295 %s 4294967295 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFolduint32uint32rsh(t *testing.T) {
-	var x, r uint32
-	var y uint32
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
-	}
-	x = 4294967295
-	y = 0
-	r = x >> y
-	if r != 4294967295 {
-		t.Errorf("4294967295 %s 0 = %d, want 4294967295", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 2147483647 {
-		t.Errorf("4294967295 %s 1 = %d, want 2147483647", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != 0 {
-		t.Errorf("4294967295 %s 4294967295 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFolduint32uint16lsh(t *testing.T) {
-	var x, r uint32
-	var y uint16
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = 4294967295
-	y = 0
-	r = x << y
-	if r != 4294967295 {
-		t.Errorf("4294967295 %s 0 = %d, want 4294967295", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 4294967294 {
-		t.Errorf("4294967295 %s 1 = %d, want 4294967294", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("4294967295 %s 65535 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFolduint32uint16rsh(t *testing.T) {
-	var x, r uint32
-	var y uint16
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
-	}
-	x = 4294967295
-	y = 0
-	r = x >> y
-	if r != 4294967295 {
-		t.Errorf("4294967295 %s 0 = %d, want 4294967295", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 2147483647 {
-		t.Errorf("4294967295 %s 1 = %d, want 2147483647", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != 0 {
-		t.Errorf("4294967295 %s 65535 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFolduint32uint8lsh(t *testing.T) {
-	var x, r uint32
-	var y uint8
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 255 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 255 = %d, want 0", "<<", r)
-	}
-	x = 4294967295
-	y = 0
-	r = x << y
-	if r != 4294967295 {
-		t.Errorf("4294967295 %s 0 = %d, want 4294967295", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 4294967294 {
-		t.Errorf("4294967295 %s 1 = %d, want 4294967294", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("4294967295 %s 255 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFolduint32uint8rsh(t *testing.T) {
-	var x, r uint32
-	var y uint8
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 255 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 255 = %d, want 0", ">>", r)
-	}
-	x = 4294967295
-	y = 0
-	r = x >> y
-	if r != 4294967295 {
-		t.Errorf("4294967295 %s 0 = %d, want 4294967295", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 2147483647 {
-		t.Errorf("4294967295 %s 1 = %d, want 2147483647", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != 0 {
-		t.Errorf("4294967295 %s 255 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFoldint32uint64lsh(t *testing.T) {
-	var x, r int32
-	var y uint64
-	x = -2147483648
-	y = 0
-	r = x << y
-	if r != -2147483648 {
-		t.Errorf("-2147483648 %s 0 = %d, want -2147483648", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("-2147483648 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("-2147483648 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("-2147483648 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = -2147483647
-	y = 0
-	r = x << y
-	if r != -2147483647 {
-		t.Errorf("-2147483647 %s 0 = %d, want -2147483647", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("-2147483647 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("-2147483647 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("-2147483647 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = -1
-	y = 0
-	r = x << y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("-1 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("-1 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = 2147483647
-	y = 0
-	r = x << y
-	if r != 2147483647 {
-		t.Errorf("2147483647 %s 0 = %d, want 2147483647", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("2147483647 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("2147483647 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("2147483647 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFoldint32uint64rsh(t *testing.T) {
-	var x, r int32
-	var y uint64
-	x = -2147483648
-	y = 0
-	r = x >> y
-	if r != -2147483648 {
-		t.Errorf("-2147483648 %s 0 = %d, want -2147483648", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -1073741824 {
-		t.Errorf("-2147483648 %s 1 = %d, want -1073741824", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-2147483648 %s 4294967296 = %d, want -1", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-2147483648 %s 18446744073709551615 = %d, want -1", ">>", r)
-	}
-	x = -2147483647
-	y = 0
-	r = x >> y
-	if r != -2147483647 {
-		t.Errorf("-2147483647 %s 0 = %d, want -2147483647", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -1073741824 {
-		t.Errorf("-2147483647 %s 1 = %d, want -1073741824", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-2147483647 %s 4294967296 = %d, want -1", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-2147483647 %s 18446744073709551615 = %d, want -1", ">>", r)
-	}
-	x = -1
-	y = 0
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 4294967296 = %d, want -1", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 18446744073709551615 = %d, want -1", ">>", r)
-	}
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
-	}
-	x = 2147483647
-	y = 0
-	r = x >> y
-	if r != 2147483647 {
-		t.Errorf("2147483647 %s 0 = %d, want 2147483647", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 1073741823 {
-		t.Errorf("2147483647 %s 1 = %d, want 1073741823", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != 0 {
-		t.Errorf("2147483647 %s 4294967296 = %d, want 0", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != 0 {
-		t.Errorf("2147483647 %s 18446744073709551615 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFoldint32uint32lsh(t *testing.T) {
-	var x, r int32
-	var y uint32
-	x = -2147483648
-	y = 0
-	r = x << y
-	if r != -2147483648 {
-		t.Errorf("-2147483648 %s 0 = %d, want -2147483648", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("-2147483648 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("-2147483648 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = -2147483647
-	y = 0
-	r = x << y
-	if r != -2147483647 {
-		t.Errorf("-2147483647 %s 0 = %d, want -2147483647", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("-2147483647 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("-2147483647 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = -1
-	y = 0
-	r = x << y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("-1 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = 2147483647
-	y = 0
-	r = x << y
-	if r != 2147483647 {
-		t.Errorf("2147483647 %s 0 = %d, want 2147483647", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("2147483647 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("2147483647 %s 4294967295 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFoldint32uint32rsh(t *testing.T) {
-	var x, r int32
-	var y uint32
-	x = -2147483648
-	y = 0
-	r = x >> y
-	if r != -2147483648 {
-		t.Errorf("-2147483648 %s 0 = %d, want -2147483648", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -1073741824 {
-		t.Errorf("-2147483648 %s 1 = %d, want -1073741824", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-2147483648 %s 4294967295 = %d, want -1", ">>", r)
-	}
-	x = -2147483647
-	y = 0
-	r = x >> y
-	if r != -2147483647 {
-		t.Errorf("-2147483647 %s 0 = %d, want -2147483647", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -1073741824 {
-		t.Errorf("-2147483647 %s 1 = %d, want -1073741824", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-2147483647 %s 4294967295 = %d, want -1", ">>", r)
-	}
-	x = -1
-	y = 0
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 4294967295 = %d, want -1", ">>", r)
-	}
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
-	}
-	x = 2147483647
-	y = 0
-	r = x >> y
-	if r != 2147483647 {
-		t.Errorf("2147483647 %s 0 = %d, want 2147483647", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 1073741823 {
-		t.Errorf("2147483647 %s 1 = %d, want 1073741823", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != 0 {
-		t.Errorf("2147483647 %s 4294967295 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFoldint32uint16lsh(t *testing.T) {
-	var x, r int32
-	var y uint16
-	x = -2147483648
-	y = 0
-	r = x << y
-	if r != -2147483648 {
-		t.Errorf("-2147483648 %s 0 = %d, want -2147483648", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("-2147483648 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("-2147483648 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = -2147483647
-	y = 0
-	r = x << y
-	if r != -2147483647 {
-		t.Errorf("-2147483647 %s 0 = %d, want -2147483647", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("-2147483647 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("-2147483647 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = -1
-	y = 0
-	r = x << y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("-1 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = 2147483647
-	y = 0
-	r = x << y
-	if r != 2147483647 {
-		t.Errorf("2147483647 %s 0 = %d, want 2147483647", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("2147483647 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("2147483647 %s 65535 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFoldint32uint16rsh(t *testing.T) {
-	var x, r int32
-	var y uint16
-	x = -2147483648
-	y = 0
-	r = x >> y
-	if r != -2147483648 {
-		t.Errorf("-2147483648 %s 0 = %d, want -2147483648", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -1073741824 {
-		t.Errorf("-2147483648 %s 1 = %d, want -1073741824", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-2147483648 %s 65535 = %d, want -1", ">>", r)
-	}
-	x = -2147483647
-	y = 0
-	r = x >> y
-	if r != -2147483647 {
-		t.Errorf("-2147483647 %s 0 = %d, want -2147483647", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -1073741824 {
-		t.Errorf("-2147483647 %s 1 = %d, want -1073741824", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-2147483647 %s 65535 = %d, want -1", ">>", r)
-	}
-	x = -1
-	y = 0
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 65535 = %d, want -1", ">>", r)
-	}
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
-	}
-	x = 2147483647
-	y = 0
-	r = x >> y
-	if r != 2147483647 {
-		t.Errorf("2147483647 %s 0 = %d, want 2147483647", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 1073741823 {
-		t.Errorf("2147483647 %s 1 = %d, want 1073741823", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != 0 {
-		t.Errorf("2147483647 %s 65535 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFoldint32uint8lsh(t *testing.T) {
-	var x, r int32
-	var y uint8
-	x = -2147483648
-	y = 0
-	r = x << y
-	if r != -2147483648 {
-		t.Errorf("-2147483648 %s 0 = %d, want -2147483648", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("-2147483648 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("-2147483648 %s 255 = %d, want 0", "<<", r)
-	}
-	x = -2147483647
-	y = 0
-	r = x << y
-	if r != -2147483647 {
-		t.Errorf("-2147483647 %s 0 = %d, want -2147483647", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("-2147483647 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("-2147483647 %s 255 = %d, want 0", "<<", r)
-	}
-	x = -1
-	y = 0
-	r = x << y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("-1 %s 255 = %d, want 0", "<<", r)
-	}
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 255 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 255 = %d, want 0", "<<", r)
-	}
-	x = 2147483647
-	y = 0
-	r = x << y
-	if r != 2147483647 {
-		t.Errorf("2147483647 %s 0 = %d, want 2147483647", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("2147483647 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("2147483647 %s 255 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFoldint32uint8rsh(t *testing.T) {
-	var x, r int32
-	var y uint8
-	x = -2147483648
-	y = 0
-	r = x >> y
-	if r != -2147483648 {
-		t.Errorf("-2147483648 %s 0 = %d, want -2147483648", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -1073741824 {
-		t.Errorf("-2147483648 %s 1 = %d, want -1073741824", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-2147483648 %s 255 = %d, want -1", ">>", r)
-	}
-	x = -2147483647
-	y = 0
-	r = x >> y
-	if r != -2147483647 {
-		t.Errorf("-2147483647 %s 0 = %d, want -2147483647", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -1073741824 {
-		t.Errorf("-2147483647 %s 1 = %d, want -1073741824", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-2147483647 %s 255 = %d, want -1", ">>", r)
-	}
-	x = -1
-	y = 0
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 255 = %d, want -1", ">>", r)
-	}
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 255 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 255 = %d, want 0", ">>", r)
-	}
-	x = 2147483647
-	y = 0
-	r = x >> y
-	if r != 2147483647 {
-		t.Errorf("2147483647 %s 0 = %d, want 2147483647", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 1073741823 {
-		t.Errorf("2147483647 %s 1 = %d, want 1073741823", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != 0 {
-		t.Errorf("2147483647 %s 255 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFolduint16uint64lsh(t *testing.T) {
-	var x, r uint16
-	var y uint64
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = 65535
-	y = 0
-	r = x << y
-	if r != 65535 {
-		t.Errorf("65535 %s 0 = %d, want 65535", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 65534 {
-		t.Errorf("65535 %s 1 = %d, want 65534", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("65535 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("65535 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFolduint16uint64rsh(t *testing.T) {
-	var x, r uint16
-	var y uint64
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
-	}
-	x = 65535
-	y = 0
-	r = x >> y
-	if r != 65535 {
-		t.Errorf("65535 %s 0 = %d, want 65535", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 32767 {
-		t.Errorf("65535 %s 1 = %d, want 32767", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != 0 {
-		t.Errorf("65535 %s 4294967296 = %d, want 0", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != 0 {
-		t.Errorf("65535 %s 18446744073709551615 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFolduint16uint32lsh(t *testing.T) {
-	var x, r uint16
-	var y uint32
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = 65535
-	y = 0
-	r = x << y
-	if r != 65535 {
-		t.Errorf("65535 %s 0 = %d, want 65535", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 65534 {
-		t.Errorf("65535 %s 1 = %d, want 65534", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("65535 %s 4294967295 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFolduint16uint32rsh(t *testing.T) {
-	var x, r uint16
-	var y uint32
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
-	}
-	x = 65535
-	y = 0
-	r = x >> y
-	if r != 65535 {
-		t.Errorf("65535 %s 0 = %d, want 65535", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 32767 {
-		t.Errorf("65535 %s 1 = %d, want 32767", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != 0 {
-		t.Errorf("65535 %s 4294967295 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFolduint16uint16lsh(t *testing.T) {
-	var x, r uint16
-	var y uint16
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = 65535
-	y = 0
-	r = x << y
-	if r != 65535 {
-		t.Errorf("65535 %s 0 = %d, want 65535", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 65534 {
-		t.Errorf("65535 %s 1 = %d, want 65534", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("65535 %s 65535 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFolduint16uint16rsh(t *testing.T) {
-	var x, r uint16
-	var y uint16
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
-	}
-	x = 65535
-	y = 0
-	r = x >> y
-	if r != 65535 {
-		t.Errorf("65535 %s 0 = %d, want 65535", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 32767 {
-		t.Errorf("65535 %s 1 = %d, want 32767", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != 0 {
-		t.Errorf("65535 %s 65535 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFolduint16uint8lsh(t *testing.T) {
-	var x, r uint16
-	var y uint8
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 255 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 255 = %d, want 0", "<<", r)
-	}
-	x = 65535
-	y = 0
-	r = x << y
-	if r != 65535 {
-		t.Errorf("65535 %s 0 = %d, want 65535", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 65534 {
-		t.Errorf("65535 %s 1 = %d, want 65534", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("65535 %s 255 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFolduint16uint8rsh(t *testing.T) {
-	var x, r uint16
-	var y uint8
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 255 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 255 = %d, want 0", ">>", r)
-	}
-	x = 65535
-	y = 0
-	r = x >> y
-	if r != 65535 {
-		t.Errorf("65535 %s 0 = %d, want 65535", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 32767 {
-		t.Errorf("65535 %s 1 = %d, want 32767", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != 0 {
-		t.Errorf("65535 %s 255 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFoldint16uint64lsh(t *testing.T) {
-	var x, r int16
-	var y uint64
-	x = -32768
-	y = 0
-	r = x << y
-	if r != -32768 {
-		t.Errorf("-32768 %s 0 = %d, want -32768", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("-32768 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("-32768 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("-32768 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = -32767
-	y = 0
-	r = x << y
-	if r != -32767 {
-		t.Errorf("-32767 %s 0 = %d, want -32767", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("-32767 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("-32767 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("-32767 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = -1
-	y = 0
-	r = x << y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("-1 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("-1 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = 32766
-	y = 0
-	r = x << y
-	if r != 32766 {
-		t.Errorf("32766 %s 0 = %d, want 32766", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -4 {
-		t.Errorf("32766 %s 1 = %d, want -4", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("32766 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("32766 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = 32767
-	y = 0
-	r = x << y
-	if r != 32767 {
-		t.Errorf("32767 %s 0 = %d, want 32767", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("32767 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("32767 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("32767 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFoldint16uint64rsh(t *testing.T) {
-	var x, r int16
-	var y uint64
-	x = -32768
-	y = 0
-	r = x >> y
-	if r != -32768 {
-		t.Errorf("-32768 %s 0 = %d, want -32768", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -16384 {
-		t.Errorf("-32768 %s 1 = %d, want -16384", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-32768 %s 4294967296 = %d, want -1", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-32768 %s 18446744073709551615 = %d, want -1", ">>", r)
-	}
-	x = -32767
-	y = 0
-	r = x >> y
-	if r != -32767 {
-		t.Errorf("-32767 %s 0 = %d, want -32767", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -16384 {
-		t.Errorf("-32767 %s 1 = %d, want -16384", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-32767 %s 4294967296 = %d, want -1", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-32767 %s 18446744073709551615 = %d, want -1", ">>", r)
-	}
-	x = -1
-	y = 0
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 4294967296 = %d, want -1", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 18446744073709551615 = %d, want -1", ">>", r)
-	}
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
-	}
-	x = 32766
-	y = 0
-	r = x >> y
-	if r != 32766 {
-		t.Errorf("32766 %s 0 = %d, want 32766", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 16383 {
-		t.Errorf("32766 %s 1 = %d, want 16383", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != 0 {
-		t.Errorf("32766 %s 4294967296 = %d, want 0", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != 0 {
-		t.Errorf("32766 %s 18446744073709551615 = %d, want 0", ">>", r)
-	}
-	x = 32767
-	y = 0
-	r = x >> y
-	if r != 32767 {
-		t.Errorf("32767 %s 0 = %d, want 32767", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 16383 {
-		t.Errorf("32767 %s 1 = %d, want 16383", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != 0 {
-		t.Errorf("32767 %s 4294967296 = %d, want 0", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != 0 {
-		t.Errorf("32767 %s 18446744073709551615 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFoldint16uint32lsh(t *testing.T) {
-	var x, r int16
-	var y uint32
-	x = -32768
-	y = 0
-	r = x << y
-	if r != -32768 {
-		t.Errorf("-32768 %s 0 = %d, want -32768", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("-32768 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("-32768 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = -32767
-	y = 0
-	r = x << y
-	if r != -32767 {
-		t.Errorf("-32767 %s 0 = %d, want -32767", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("-32767 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("-32767 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = -1
-	y = 0
-	r = x << y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("-1 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = 32766
-	y = 0
-	r = x << y
-	if r != 32766 {
-		t.Errorf("32766 %s 0 = %d, want 32766", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -4 {
-		t.Errorf("32766 %s 1 = %d, want -4", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("32766 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = 32767
-	y = 0
-	r = x << y
-	if r != 32767 {
-		t.Errorf("32767 %s 0 = %d, want 32767", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("32767 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("32767 %s 4294967295 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFoldint16uint32rsh(t *testing.T) {
-	var x, r int16
-	var y uint32
-	x = -32768
-	y = 0
-	r = x >> y
-	if r != -32768 {
-		t.Errorf("-32768 %s 0 = %d, want -32768", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -16384 {
-		t.Errorf("-32768 %s 1 = %d, want -16384", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-32768 %s 4294967295 = %d, want -1", ">>", r)
-	}
-	x = -32767
-	y = 0
-	r = x >> y
-	if r != -32767 {
-		t.Errorf("-32767 %s 0 = %d, want -32767", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -16384 {
-		t.Errorf("-32767 %s 1 = %d, want -16384", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-32767 %s 4294967295 = %d, want -1", ">>", r)
-	}
-	x = -1
-	y = 0
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 4294967295 = %d, want -1", ">>", r)
-	}
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
-	}
-	x = 32766
-	y = 0
-	r = x >> y
-	if r != 32766 {
-		t.Errorf("32766 %s 0 = %d, want 32766", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 16383 {
-		t.Errorf("32766 %s 1 = %d, want 16383", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != 0 {
-		t.Errorf("32766 %s 4294967295 = %d, want 0", ">>", r)
-	}
-	x = 32767
-	y = 0
-	r = x >> y
-	if r != 32767 {
-		t.Errorf("32767 %s 0 = %d, want 32767", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 16383 {
-		t.Errorf("32767 %s 1 = %d, want 16383", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != 0 {
-		t.Errorf("32767 %s 4294967295 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFoldint16uint16lsh(t *testing.T) {
-	var x, r int16
-	var y uint16
-	x = -32768
-	y = 0
-	r = x << y
-	if r != -32768 {
-		t.Errorf("-32768 %s 0 = %d, want -32768", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("-32768 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("-32768 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = -32767
-	y = 0
-	r = x << y
-	if r != -32767 {
-		t.Errorf("-32767 %s 0 = %d, want -32767", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("-32767 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("-32767 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = -1
-	y = 0
-	r = x << y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("-1 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = 32766
-	y = 0
-	r = x << y
-	if r != 32766 {
-		t.Errorf("32766 %s 0 = %d, want 32766", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -4 {
-		t.Errorf("32766 %s 1 = %d, want -4", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("32766 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = 32767
-	y = 0
-	r = x << y
-	if r != 32767 {
-		t.Errorf("32767 %s 0 = %d, want 32767", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("32767 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("32767 %s 65535 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFoldint16uint16rsh(t *testing.T) {
-	var x, r int16
-	var y uint16
-	x = -32768
-	y = 0
-	r = x >> y
-	if r != -32768 {
-		t.Errorf("-32768 %s 0 = %d, want -32768", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -16384 {
-		t.Errorf("-32768 %s 1 = %d, want -16384", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-32768 %s 65535 = %d, want -1", ">>", r)
-	}
-	x = -32767
-	y = 0
-	r = x >> y
-	if r != -32767 {
-		t.Errorf("-32767 %s 0 = %d, want -32767", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -16384 {
-		t.Errorf("-32767 %s 1 = %d, want -16384", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-32767 %s 65535 = %d, want -1", ">>", r)
-	}
-	x = -1
-	y = 0
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 65535 = %d, want -1", ">>", r)
-	}
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
-	}
-	x = 32766
-	y = 0
-	r = x >> y
-	if r != 32766 {
-		t.Errorf("32766 %s 0 = %d, want 32766", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 16383 {
-		t.Errorf("32766 %s 1 = %d, want 16383", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != 0 {
-		t.Errorf("32766 %s 65535 = %d, want 0", ">>", r)
-	}
-	x = 32767
-	y = 0
-	r = x >> y
-	if r != 32767 {
-		t.Errorf("32767 %s 0 = %d, want 32767", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 16383 {
-		t.Errorf("32767 %s 1 = %d, want 16383", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != 0 {
-		t.Errorf("32767 %s 65535 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFoldint16uint8lsh(t *testing.T) {
-	var x, r int16
-	var y uint8
-	x = -32768
-	y = 0
-	r = x << y
-	if r != -32768 {
-		t.Errorf("-32768 %s 0 = %d, want -32768", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("-32768 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("-32768 %s 255 = %d, want 0", "<<", r)
-	}
-	x = -32767
-	y = 0
-	r = x << y
-	if r != -32767 {
-		t.Errorf("-32767 %s 0 = %d, want -32767", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("-32767 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("-32767 %s 255 = %d, want 0", "<<", r)
-	}
-	x = -1
-	y = 0
-	r = x << y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("-1 %s 255 = %d, want 0", "<<", r)
-	}
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 255 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 255 = %d, want 0", "<<", r)
-	}
-	x = 32766
-	y = 0
-	r = x << y
-	if r != 32766 {
-		t.Errorf("32766 %s 0 = %d, want 32766", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -4 {
-		t.Errorf("32766 %s 1 = %d, want -4", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("32766 %s 255 = %d, want 0", "<<", r)
-	}
-	x = 32767
-	y = 0
-	r = x << y
-	if r != 32767 {
-		t.Errorf("32767 %s 0 = %d, want 32767", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("32767 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("32767 %s 255 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFoldint16uint8rsh(t *testing.T) {
-	var x, r int16
-	var y uint8
-	x = -32768
-	y = 0
-	r = x >> y
-	if r != -32768 {
-		t.Errorf("-32768 %s 0 = %d, want -32768", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -16384 {
-		t.Errorf("-32768 %s 1 = %d, want -16384", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-32768 %s 255 = %d, want -1", ">>", r)
-	}
-	x = -32767
-	y = 0
-	r = x >> y
-	if r != -32767 {
-		t.Errorf("-32767 %s 0 = %d, want -32767", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -16384 {
-		t.Errorf("-32767 %s 1 = %d, want -16384", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-32767 %s 255 = %d, want -1", ">>", r)
-	}
-	x = -1
-	y = 0
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 255 = %d, want -1", ">>", r)
-	}
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 255 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 255 = %d, want 0", ">>", r)
-	}
-	x = 32766
-	y = 0
-	r = x >> y
-	if r != 32766 {
-		t.Errorf("32766 %s 0 = %d, want 32766", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 16383 {
-		t.Errorf("32766 %s 1 = %d, want 16383", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != 0 {
-		t.Errorf("32766 %s 255 = %d, want 0", ">>", r)
-	}
-	x = 32767
-	y = 0
-	r = x >> y
-	if r != 32767 {
-		t.Errorf("32767 %s 0 = %d, want 32767", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 16383 {
-		t.Errorf("32767 %s 1 = %d, want 16383", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != 0 {
-		t.Errorf("32767 %s 255 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFolduint8uint64lsh(t *testing.T) {
-	var x, r uint8
-	var y uint64
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = 255
-	y = 0
-	r = x << y
-	if r != 255 {
-		t.Errorf("255 %s 0 = %d, want 255", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 254 {
-		t.Errorf("255 %s 1 = %d, want 254", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("255 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("255 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFolduint8uint64rsh(t *testing.T) {
-	var x, r uint8
-	var y uint64
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
-	}
-	x = 255
-	y = 0
-	r = x >> y
-	if r != 255 {
-		t.Errorf("255 %s 0 = %d, want 255", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 127 {
-		t.Errorf("255 %s 1 = %d, want 127", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != 0 {
-		t.Errorf("255 %s 4294967296 = %d, want 0", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != 0 {
-		t.Errorf("255 %s 18446744073709551615 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFolduint8uint32lsh(t *testing.T) {
-	var x, r uint8
-	var y uint32
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = 255
-	y = 0
-	r = x << y
-	if r != 255 {
-		t.Errorf("255 %s 0 = %d, want 255", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 254 {
-		t.Errorf("255 %s 1 = %d, want 254", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("255 %s 4294967295 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFolduint8uint32rsh(t *testing.T) {
-	var x, r uint8
-	var y uint32
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
-	}
-	x = 255
-	y = 0
-	r = x >> y
-	if r != 255 {
-		t.Errorf("255 %s 0 = %d, want 255", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 127 {
-		t.Errorf("255 %s 1 = %d, want 127", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != 0 {
-		t.Errorf("255 %s 4294967295 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFolduint8uint16lsh(t *testing.T) {
-	var x, r uint8
-	var y uint16
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = 255
-	y = 0
-	r = x << y
-	if r != 255 {
-		t.Errorf("255 %s 0 = %d, want 255", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 254 {
-		t.Errorf("255 %s 1 = %d, want 254", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("255 %s 65535 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFolduint8uint16rsh(t *testing.T) {
-	var x, r uint8
-	var y uint16
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
-	}
-	x = 255
-	y = 0
-	r = x >> y
-	if r != 255 {
-		t.Errorf("255 %s 0 = %d, want 255", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 127 {
-		t.Errorf("255 %s 1 = %d, want 127", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != 0 {
-		t.Errorf("255 %s 65535 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFolduint8uint8lsh(t *testing.T) {
-	var x, r uint8
-	var y uint8
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 255 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 255 = %d, want 0", "<<", r)
-	}
-	x = 255
-	y = 0
-	r = x << y
-	if r != 255 {
-		t.Errorf("255 %s 0 = %d, want 255", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 254 {
-		t.Errorf("255 %s 1 = %d, want 254", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("255 %s 255 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFolduint8uint8rsh(t *testing.T) {
-	var x, r uint8
-	var y uint8
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 255 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 255 = %d, want 0", ">>", r)
-	}
-	x = 255
-	y = 0
-	r = x >> y
-	if r != 255 {
-		t.Errorf("255 %s 0 = %d, want 255", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 127 {
-		t.Errorf("255 %s 1 = %d, want 127", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != 0 {
-		t.Errorf("255 %s 255 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFoldint8uint64lsh(t *testing.T) {
-	var x, r int8
-	var y uint64
-	x = -128
-	y = 0
-	r = x << y
-	if r != -128 {
-		t.Errorf("-128 %s 0 = %d, want -128", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("-128 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("-128 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("-128 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = -127
-	y = 0
-	r = x << y
-	if r != -127 {
-		t.Errorf("-127 %s 0 = %d, want -127", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("-127 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("-127 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("-127 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = -1
-	y = 0
-	r = x << y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("-1 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("-1 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = 126
-	y = 0
-	r = x << y
-	if r != 126 {
-		t.Errorf("126 %s 0 = %d, want 126", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -4 {
-		t.Errorf("126 %s 1 = %d, want -4", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("126 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("126 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-	x = 127
-	y = 0
-	r = x << y
-	if r != 127 {
-		t.Errorf("127 %s 0 = %d, want 127", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("127 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 4294967296
-	r = x << y
-	if r != 0 {
-		t.Errorf("127 %s 4294967296 = %d, want 0", "<<", r)
-	}
-	y = 18446744073709551615
-	r = x << y
-	if r != 0 {
-		t.Errorf("127 %s 18446744073709551615 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFoldint8uint64rsh(t *testing.T) {
-	var x, r int8
-	var y uint64
-	x = -128
-	y = 0
-	r = x >> y
-	if r != -128 {
-		t.Errorf("-128 %s 0 = %d, want -128", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -64 {
-		t.Errorf("-128 %s 1 = %d, want -64", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-128 %s 4294967296 = %d, want -1", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-128 %s 18446744073709551615 = %d, want -1", ">>", r)
-	}
-	x = -127
-	y = 0
-	r = x >> y
-	if r != -127 {
-		t.Errorf("-127 %s 0 = %d, want -127", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -64 {
-		t.Errorf("-127 %s 1 = %d, want -64", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-127 %s 4294967296 = %d, want -1", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-127 %s 18446744073709551615 = %d, want -1", ">>", r)
-	}
-	x = -1
-	y = 0
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 4294967296 = %d, want -1", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 18446744073709551615 = %d, want -1", ">>", r)
-	}
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
-	}
-	x = 126
-	y = 0
-	r = x >> y
-	if r != 126 {
-		t.Errorf("126 %s 0 = %d, want 126", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 63 {
-		t.Errorf("126 %s 1 = %d, want 63", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != 0 {
-		t.Errorf("126 %s 4294967296 = %d, want 0", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != 0 {
-		t.Errorf("126 %s 18446744073709551615 = %d, want 0", ">>", r)
-	}
-	x = 127
-	y = 0
-	r = x >> y
-	if r != 127 {
-		t.Errorf("127 %s 0 = %d, want 127", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 63 {
-		t.Errorf("127 %s 1 = %d, want 63", ">>", r)
-	}
-	y = 4294967296
-	r = x >> y
-	if r != 0 {
-		t.Errorf("127 %s 4294967296 = %d, want 0", ">>", r)
-	}
-	y = 18446744073709551615
-	r = x >> y
-	if r != 0 {
-		t.Errorf("127 %s 18446744073709551615 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFoldint8uint32lsh(t *testing.T) {
-	var x, r int8
-	var y uint32
-	x = -128
-	y = 0
-	r = x << y
-	if r != -128 {
-		t.Errorf("-128 %s 0 = %d, want -128", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("-128 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("-128 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = -127
-	y = 0
-	r = x << y
-	if r != -127 {
-		t.Errorf("-127 %s 0 = %d, want -127", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("-127 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("-127 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = -1
-	y = 0
-	r = x << y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("-1 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = 126
-	y = 0
-	r = x << y
-	if r != 126 {
-		t.Errorf("126 %s 0 = %d, want 126", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -4 {
-		t.Errorf("126 %s 1 = %d, want -4", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("126 %s 4294967295 = %d, want 0", "<<", r)
-	}
-	x = 127
-	y = 0
-	r = x << y
-	if r != 127 {
-		t.Errorf("127 %s 0 = %d, want 127", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("127 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 4294967295
-	r = x << y
-	if r != 0 {
-		t.Errorf("127 %s 4294967295 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFoldint8uint32rsh(t *testing.T) {
-	var x, r int8
-	var y uint32
-	x = -128
-	y = 0
-	r = x >> y
-	if r != -128 {
-		t.Errorf("-128 %s 0 = %d, want -128", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -64 {
-		t.Errorf("-128 %s 1 = %d, want -64", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-128 %s 4294967295 = %d, want -1", ">>", r)
-	}
-	x = -127
-	y = 0
-	r = x >> y
-	if r != -127 {
-		t.Errorf("-127 %s 0 = %d, want -127", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -64 {
-		t.Errorf("-127 %s 1 = %d, want -64", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-127 %s 4294967295 = %d, want -1", ">>", r)
-	}
-	x = -1
-	y = 0
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 4294967295 = %d, want -1", ">>", r)
-	}
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
-	}
-	x = 126
-	y = 0
-	r = x >> y
-	if r != 126 {
-		t.Errorf("126 %s 0 = %d, want 126", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 63 {
-		t.Errorf("126 %s 1 = %d, want 63", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != 0 {
-		t.Errorf("126 %s 4294967295 = %d, want 0", ">>", r)
-	}
-	x = 127
-	y = 0
-	r = x >> y
-	if r != 127 {
-		t.Errorf("127 %s 0 = %d, want 127", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 63 {
-		t.Errorf("127 %s 1 = %d, want 63", ">>", r)
-	}
-	y = 4294967295
-	r = x >> y
-	if r != 0 {
-		t.Errorf("127 %s 4294967295 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFoldint8uint16lsh(t *testing.T) {
-	var x, r int8
-	var y uint16
-	x = -128
-	y = 0
-	r = x << y
-	if r != -128 {
-		t.Errorf("-128 %s 0 = %d, want -128", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("-128 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("-128 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = -127
-	y = 0
-	r = x << y
-	if r != -127 {
-		t.Errorf("-127 %s 0 = %d, want -127", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("-127 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("-127 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = -1
-	y = 0
-	r = x << y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("-1 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = 126
-	y = 0
-	r = x << y
-	if r != 126 {
-		t.Errorf("126 %s 0 = %d, want 126", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -4 {
-		t.Errorf("126 %s 1 = %d, want -4", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("126 %s 65535 = %d, want 0", "<<", r)
-	}
-	x = 127
-	y = 0
-	r = x << y
-	if r != 127 {
-		t.Errorf("127 %s 0 = %d, want 127", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("127 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 65535
-	r = x << y
-	if r != 0 {
-		t.Errorf("127 %s 65535 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFoldint8uint16rsh(t *testing.T) {
-	var x, r int8
-	var y uint16
-	x = -128
-	y = 0
-	r = x >> y
-	if r != -128 {
-		t.Errorf("-128 %s 0 = %d, want -128", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -64 {
-		t.Errorf("-128 %s 1 = %d, want -64", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-128 %s 65535 = %d, want -1", ">>", r)
-	}
-	x = -127
-	y = 0
-	r = x >> y
-	if r != -127 {
-		t.Errorf("-127 %s 0 = %d, want -127", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -64 {
-		t.Errorf("-127 %s 1 = %d, want -64", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-127 %s 65535 = %d, want -1", ">>", r)
-	}
-	x = -1
-	y = 0
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 65535 = %d, want -1", ">>", r)
-	}
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
-	}
-	x = 126
-	y = 0
-	r = x >> y
-	if r != 126 {
-		t.Errorf("126 %s 0 = %d, want 126", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 63 {
-		t.Errorf("126 %s 1 = %d, want 63", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != 0 {
-		t.Errorf("126 %s 65535 = %d, want 0", ">>", r)
-	}
-	x = 127
-	y = 0
-	r = x >> y
-	if r != 127 {
-		t.Errorf("127 %s 0 = %d, want 127", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 63 {
-		t.Errorf("127 %s 1 = %d, want 63", ">>", r)
-	}
-	y = 65535
-	r = x >> y
-	if r != 0 {
-		t.Errorf("127 %s 65535 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFoldint8uint8lsh(t *testing.T) {
-	var x, r int8
-	var y uint8
-	x = -128
-	y = 0
-	r = x << y
-	if r != -128 {
-		t.Errorf("-128 %s 0 = %d, want -128", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("-128 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("-128 %s 255 = %d, want 0", "<<", r)
-	}
-	x = -127
-	y = 0
-	r = x << y
-	if r != -127 {
-		t.Errorf("-127 %s 0 = %d, want -127", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("-127 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("-127 %s 255 = %d, want 0", "<<", r)
-	}
-	x = -1
-	y = 0
-	r = x << y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("-1 %s 255 = %d, want 0", "<<", r)
-	}
-	x = 0
-	y = 0
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("0 %s 255 = %d, want 0", "<<", r)
-	}
-	x = 1
-	y = 0
-	r = x << y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != 2 {
-		t.Errorf("1 %s 1 = %d, want 2", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("1 %s 255 = %d, want 0", "<<", r)
-	}
-	x = 126
-	y = 0
-	r = x << y
-	if r != 126 {
-		t.Errorf("126 %s 0 = %d, want 126", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -4 {
-		t.Errorf("126 %s 1 = %d, want -4", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("126 %s 255 = %d, want 0", "<<", r)
-	}
-	x = 127
-	y = 0
-	r = x << y
-	if r != 127 {
-		t.Errorf("127 %s 0 = %d, want 127", "<<", r)
-	}
-	y = 1
-	r = x << y
-	if r != -2 {
-		t.Errorf("127 %s 1 = %d, want -2", "<<", r)
-	}
-	y = 255
-	r = x << y
-	if r != 0 {
-		t.Errorf("127 %s 255 = %d, want 0", "<<", r)
-	}
-}
-func TestConstFoldint8uint8rsh(t *testing.T) {
-	var x, r int8
-	var y uint8
-	x = -128
-	y = 0
-	r = x >> y
-	if r != -128 {
-		t.Errorf("-128 %s 0 = %d, want -128", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -64 {
-		t.Errorf("-128 %s 1 = %d, want -64", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-128 %s 255 = %d, want -1", ">>", r)
-	}
-	x = -127
-	y = 0
-	r = x >> y
-	if r != -127 {
-		t.Errorf("-127 %s 0 = %d, want -127", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -64 {
-		t.Errorf("-127 %s 1 = %d, want -64", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-127 %s 255 = %d, want -1", ">>", r)
-	}
-	x = -1
-	y = 0
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != -1 {
-		t.Errorf("-1 %s 255 = %d, want -1", ">>", r)
-	}
-	x = 0
-	y = 0
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 0 = %d, want 0", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != 0 {
-		t.Errorf("0 %s 255 = %d, want 0", ">>", r)
-	}
-	x = 1
-	y = 0
-	r = x >> y
-	if r != 1 {
-		t.Errorf("1 %s 0 = %d, want 1", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 1 = %d, want 0", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != 0 {
-		t.Errorf("1 %s 255 = %d, want 0", ">>", r)
-	}
-	x = 126
-	y = 0
-	r = x >> y
-	if r != 126 {
-		t.Errorf("126 %s 0 = %d, want 126", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 63 {
-		t.Errorf("126 %s 1 = %d, want 63", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != 0 {
-		t.Errorf("126 %s 255 = %d, want 0", ">>", r)
-	}
-	x = 127
-	y = 0
-	r = x >> y
-	if r != 127 {
-		t.Errorf("127 %s 0 = %d, want 127", ">>", r)
-	}
-	y = 1
-	r = x >> y
-	if r != 63 {
-		t.Errorf("127 %s 1 = %d, want 63", ">>", r)
-	}
-	y = 255
-	r = x >> y
-	if r != 0 {
-		t.Errorf("127 %s 255 = %d, want 0", ">>", r)
-	}
-}
-func TestConstFoldCompareuint64(t *testing.T) {
-	{
-		var x uint64 = 0
-		var y uint64 = 0
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x uint64 = 0
-		var y uint64 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x uint64 = 0
-		var y uint64 = 4294967296
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x uint64 = 0
-		var y uint64 = 18446744073709551615
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x uint64 = 1
-		var y uint64 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x uint64 = 1
-		var y uint64 = 1
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x uint64 = 1
-		var y uint64 = 4294967296
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x uint64 = 1
-		var y uint64 = 18446744073709551615
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x uint64 = 4294967296
-		var y uint64 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x uint64 = 4294967296
-		var y uint64 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x uint64 = 4294967296
-		var y uint64 = 4294967296
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x uint64 = 4294967296
-		var y uint64 = 18446744073709551615
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x uint64 = 18446744073709551615
-		var y uint64 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x uint64 = 18446744073709551615
-		var y uint64 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x uint64 = 18446744073709551615
-		var y uint64 = 4294967296
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x uint64 = 18446744073709551615
-		var y uint64 = 18446744073709551615
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-}
-func TestConstFoldCompareint64(t *testing.T) {
-	{
-		var x int64 = -9223372036854775808
-		var y int64 = -9223372036854775808
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = -9223372036854775808
-		var y int64 = -9223372036854775807
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = -9223372036854775808
-		var y int64 = -4294967296
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = -9223372036854775808
-		var y int64 = -1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = -9223372036854775808
-		var y int64 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = -9223372036854775808
-		var y int64 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = -9223372036854775808
-		var y int64 = 4294967296
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = -9223372036854775808
-		var y int64 = 9223372036854775806
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = -9223372036854775808
-		var y int64 = 9223372036854775807
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = -9223372036854775807
-		var y int64 = -9223372036854775808
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = -9223372036854775807
-		var y int64 = -9223372036854775807
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = -9223372036854775807
-		var y int64 = -4294967296
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = -9223372036854775807
-		var y int64 = -1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = -9223372036854775807
-		var y int64 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = -9223372036854775807
-		var y int64 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = -9223372036854775807
-		var y int64 = 4294967296
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = -9223372036854775807
-		var y int64 = 9223372036854775806
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = -9223372036854775807
-		var y int64 = 9223372036854775807
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = -4294967296
-		var y int64 = -9223372036854775808
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = -4294967296
-		var y int64 = -9223372036854775807
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = -4294967296
-		var y int64 = -4294967296
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = -4294967296
-		var y int64 = -1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = -4294967296
-		var y int64 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = -4294967296
-		var y int64 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = -4294967296
-		var y int64 = 4294967296
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = -4294967296
-		var y int64 = 9223372036854775806
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = -4294967296
-		var y int64 = 9223372036854775807
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = -1
-		var y int64 = -9223372036854775808
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = -1
-		var y int64 = -9223372036854775807
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = -1
-		var y int64 = -4294967296
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = -1
-		var y int64 = -1
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = -1
-		var y int64 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = -1
-		var y int64 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = -1
-		var y int64 = 4294967296
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = -1
-		var y int64 = 9223372036854775806
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = -1
-		var y int64 = 9223372036854775807
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = 0
-		var y int64 = -9223372036854775808
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 0
-		var y int64 = -9223372036854775807
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 0
-		var y int64 = -4294967296
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 0
-		var y int64 = -1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 0
-		var y int64 = 0
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 0
-		var y int64 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = 0
-		var y int64 = 4294967296
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = 0
-		var y int64 = 9223372036854775806
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = 0
-		var y int64 = 9223372036854775807
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = 1
-		var y int64 = -9223372036854775808
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 1
-		var y int64 = -9223372036854775807
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 1
-		var y int64 = -4294967296
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 1
-		var y int64 = -1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 1
-		var y int64 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 1
-		var y int64 = 1
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 1
-		var y int64 = 4294967296
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = 1
-		var y int64 = 9223372036854775806
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = 1
-		var y int64 = 9223372036854775807
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = 4294967296
-		var y int64 = -9223372036854775808
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 4294967296
-		var y int64 = -9223372036854775807
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 4294967296
-		var y int64 = -4294967296
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 4294967296
-		var y int64 = -1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 4294967296
-		var y int64 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 4294967296
-		var y int64 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 4294967296
-		var y int64 = 4294967296
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 4294967296
-		var y int64 = 9223372036854775806
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = 4294967296
-		var y int64 = 9223372036854775807
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = 9223372036854775806
-		var y int64 = -9223372036854775808
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 9223372036854775806
-		var y int64 = -9223372036854775807
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 9223372036854775806
-		var y int64 = -4294967296
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 9223372036854775806
-		var y int64 = -1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 9223372036854775806
-		var y int64 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 9223372036854775806
-		var y int64 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 9223372036854775806
-		var y int64 = 4294967296
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 9223372036854775806
-		var y int64 = 9223372036854775806
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 9223372036854775806
-		var y int64 = 9223372036854775807
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int64 = 9223372036854775807
-		var y int64 = -9223372036854775808
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 9223372036854775807
-		var y int64 = -9223372036854775807
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 9223372036854775807
-		var y int64 = -4294967296
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 9223372036854775807
-		var y int64 = -1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 9223372036854775807
-		var y int64 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 9223372036854775807
-		var y int64 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 9223372036854775807
-		var y int64 = 4294967296
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 9223372036854775807
-		var y int64 = 9223372036854775806
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int64 = 9223372036854775807
-		var y int64 = 9223372036854775807
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-}
-func TestConstFoldCompareuint32(t *testing.T) {
-	{
-		var x uint32 = 0
-		var y uint32 = 0
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x uint32 = 0
-		var y uint32 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x uint32 = 0
-		var y uint32 = 4294967295
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x uint32 = 1
-		var y uint32 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x uint32 = 1
-		var y uint32 = 1
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x uint32 = 1
-		var y uint32 = 4294967295
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x uint32 = 4294967295
-		var y uint32 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x uint32 = 4294967295
-		var y uint32 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x uint32 = 4294967295
-		var y uint32 = 4294967295
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-}
-func TestConstFoldCompareint32(t *testing.T) {
-	{
-		var x int32 = -2147483648
-		var y int32 = -2147483648
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int32 = -2147483648
-		var y int32 = -2147483647
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int32 = -2147483648
-		var y int32 = -1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int32 = -2147483648
-		var y int32 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int32 = -2147483648
-		var y int32 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int32 = -2147483648
-		var y int32 = 2147483647
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int32 = -2147483647
-		var y int32 = -2147483648
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int32 = -2147483647
-		var y int32 = -2147483647
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int32 = -2147483647
-		var y int32 = -1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int32 = -2147483647
-		var y int32 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int32 = -2147483647
-		var y int32 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int32 = -2147483647
-		var y int32 = 2147483647
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int32 = -1
-		var y int32 = -2147483648
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int32 = -1
-		var y int32 = -2147483647
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int32 = -1
-		var y int32 = -1
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int32 = -1
-		var y int32 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int32 = -1
-		var y int32 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int32 = -1
-		var y int32 = 2147483647
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int32 = 0
-		var y int32 = -2147483648
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int32 = 0
-		var y int32 = -2147483647
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int32 = 0
-		var y int32 = -1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int32 = 0
-		var y int32 = 0
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int32 = 0
-		var y int32 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int32 = 0
-		var y int32 = 2147483647
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int32 = 1
-		var y int32 = -2147483648
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int32 = 1
-		var y int32 = -2147483647
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int32 = 1
-		var y int32 = -1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int32 = 1
-		var y int32 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int32 = 1
-		var y int32 = 1
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int32 = 1
-		var y int32 = 2147483647
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int32 = 2147483647
-		var y int32 = -2147483648
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int32 = 2147483647
-		var y int32 = -2147483647
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int32 = 2147483647
-		var y int32 = -1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int32 = 2147483647
-		var y int32 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int32 = 2147483647
-		var y int32 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int32 = 2147483647
-		var y int32 = 2147483647
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-}
-func TestConstFoldCompareuint16(t *testing.T) {
-	{
-		var x uint16 = 0
-		var y uint16 = 0
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x uint16 = 0
-		var y uint16 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x uint16 = 0
-		var y uint16 = 65535
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x uint16 = 1
-		var y uint16 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x uint16 = 1
-		var y uint16 = 1
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x uint16 = 1
-		var y uint16 = 65535
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x uint16 = 65535
-		var y uint16 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x uint16 = 65535
-		var y uint16 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x uint16 = 65535
-		var y uint16 = 65535
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-}
-func TestConstFoldCompareint16(t *testing.T) {
-	{
-		var x int16 = -32768
-		var y int16 = -32768
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int16 = -32768
-		var y int16 = -32767
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int16 = -32768
-		var y int16 = -1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int16 = -32768
-		var y int16 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int16 = -32768
-		var y int16 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int16 = -32768
-		var y int16 = 32766
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int16 = -32768
-		var y int16 = 32767
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int16 = -32767
-		var y int16 = -32768
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int16 = -32767
-		var y int16 = -32767
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int16 = -32767
-		var y int16 = -1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int16 = -32767
-		var y int16 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int16 = -32767
-		var y int16 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int16 = -32767
-		var y int16 = 32766
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int16 = -32767
-		var y int16 = 32767
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int16 = -1
-		var y int16 = -32768
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int16 = -1
-		var y int16 = -32767
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int16 = -1
-		var y int16 = -1
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int16 = -1
-		var y int16 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int16 = -1
-		var y int16 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int16 = -1
-		var y int16 = 32766
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int16 = -1
-		var y int16 = 32767
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int16 = 0
-		var y int16 = -32768
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int16 = 0
-		var y int16 = -32767
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int16 = 0
-		var y int16 = -1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int16 = 0
-		var y int16 = 0
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int16 = 0
-		var y int16 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int16 = 0
-		var y int16 = 32766
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int16 = 0
-		var y int16 = 32767
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int16 = 1
-		var y int16 = -32768
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int16 = 1
-		var y int16 = -32767
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int16 = 1
-		var y int16 = -1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int16 = 1
-		var y int16 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int16 = 1
-		var y int16 = 1
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int16 = 1
-		var y int16 = 32766
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int16 = 1
-		var y int16 = 32767
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int16 = 32766
-		var y int16 = -32768
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int16 = 32766
-		var y int16 = -32767
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int16 = 32766
-		var y int16 = -1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int16 = 32766
-		var y int16 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int16 = 32766
-		var y int16 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int16 = 32766
-		var y int16 = 32766
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int16 = 32766
-		var y int16 = 32767
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int16 = 32767
-		var y int16 = -32768
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int16 = 32767
-		var y int16 = -32767
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int16 = 32767
-		var y int16 = -1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int16 = 32767
-		var y int16 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int16 = 32767
-		var y int16 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int16 = 32767
-		var y int16 = 32766
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int16 = 32767
-		var y int16 = 32767
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-}
-func TestConstFoldCompareuint8(t *testing.T) {
-	{
-		var x uint8 = 0
-		var y uint8 = 0
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x uint8 = 0
-		var y uint8 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x uint8 = 0
-		var y uint8 = 255
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x uint8 = 1
-		var y uint8 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x uint8 = 1
-		var y uint8 = 1
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x uint8 = 1
-		var y uint8 = 255
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x uint8 = 255
-		var y uint8 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x uint8 = 255
-		var y uint8 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x uint8 = 255
-		var y uint8 = 255
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-}
-func TestConstFoldCompareint8(t *testing.T) {
-	{
-		var x int8 = -128
-		var y int8 = -128
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int8 = -128
-		var y int8 = -127
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int8 = -128
-		var y int8 = -1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int8 = -128
-		var y int8 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int8 = -128
-		var y int8 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int8 = -128
-		var y int8 = 126
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int8 = -128
-		var y int8 = 127
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int8 = -127
-		var y int8 = -128
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int8 = -127
-		var y int8 = -127
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int8 = -127
-		var y int8 = -1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int8 = -127
-		var y int8 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int8 = -127
-		var y int8 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int8 = -127
-		var y int8 = 126
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int8 = -127
-		var y int8 = 127
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int8 = -1
-		var y int8 = -128
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int8 = -1
-		var y int8 = -127
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int8 = -1
-		var y int8 = -1
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int8 = -1
-		var y int8 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int8 = -1
-		var y int8 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int8 = -1
-		var y int8 = 126
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int8 = -1
-		var y int8 = 127
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int8 = 0
-		var y int8 = -128
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int8 = 0
-		var y int8 = -127
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int8 = 0
-		var y int8 = -1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int8 = 0
-		var y int8 = 0
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int8 = 0
-		var y int8 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int8 = 0
-		var y int8 = 126
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int8 = 0
-		var y int8 = 127
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int8 = 1
-		var y int8 = -128
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int8 = 1
-		var y int8 = -127
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int8 = 1
-		var y int8 = -1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int8 = 1
-		var y int8 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int8 = 1
-		var y int8 = 1
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int8 = 1
-		var y int8 = 126
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int8 = 1
-		var y int8 = 127
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int8 = 126
-		var y int8 = -128
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int8 = 126
-		var y int8 = -127
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int8 = 126
-		var y int8 = -1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int8 = 126
-		var y int8 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int8 = 126
-		var y int8 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int8 = 126
-		var y int8 = 126
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int8 = 126
-		var y int8 = 127
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if !(x < y) {
-			t.Errorf("!(%d < %d)", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if x >= y {
-			t.Errorf("%d >= %d", x, y)
-		}
-	}
-	{
-		var x int8 = 127
-		var y int8 = -128
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int8 = 127
-		var y int8 = -127
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int8 = 127
-		var y int8 = -1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int8 = 127
-		var y int8 = 0
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int8 = 127
-		var y int8 = 1
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int8 = 127
-		var y int8 = 126
-		if x == y {
-			t.Errorf("%d == %d", x, y)
-		}
-		if !(x != y) {
-			t.Errorf("!(%d != %d)", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if !(x > y) {
-			t.Errorf("!(%d > %d)", x, y)
-		}
-		if x <= y {
-			t.Errorf("%d <= %d", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-	{
-		var x int8 = 127
-		var y int8 = 127
-		if !(x == y) {
-			t.Errorf("!(%d == %d)", x, y)
-		}
-		if x != y {
-			t.Errorf("%d != %d", x, y)
-		}
-		if x < y {
-			t.Errorf("%d < %d", x, y)
-		}
-		if x > y {
-			t.Errorf("%d > %d", x, y)
-		}
-		if !(x <= y) {
-			t.Errorf("!(%d <= %d)", x, y)
-		}
-		if !(x >= y) {
-			t.Errorf("!(%d >= %d)", x, y)
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/dcl.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/dcl.go
deleted file mode 100644
index c26e3dc..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/dcl.go
+++ /dev/null
@@ -1,1454 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/dcl.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/dcl.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"fmt"
-	"sort"
-	"strings"
-)
-
-// Declaration stack & operations
-
-var externdcl []*Node
-
-var blockgen int32 // max block number
-
-var block int32 // current block number
-
-// dclstack maintains a stack of shadowed symbol declarations so that
-// popdcl can restore their declarations when a block scope ends.
-// The stack is maintained as a linked list, using Sym's Link field.
-//
-// In practice, the "stack" actually ends up forming a tree: goto and label
-// statements record the current state of dclstack so that checkgoto can
-// validate that a goto statement does not jump over any declarations or
-// into a new block scope.
-//
-// Finally, the Syms in this list are not "real" Syms as they don't actually
-// represent object names. Sym is just a convenient type for saving shadowed
-// Sym definitions, and only a subset of its fields are actually used.
-var dclstack *Sym
-
-func dcopy(a, b *Sym) {
-	a.Pkg = b.Pkg
-	a.Name = b.Name
-	a.Def = b.Def
-	a.Block = b.Block
-	a.Lastlineno = b.Lastlineno
-}
-
-func push() *Sym {
-	d := new(Sym)
-	d.Lastlineno = lineno
-	d.Link = dclstack
-	dclstack = d
-	return d
-}
-
-// pushdcl pushes the current declaration for symbol s (if any) so that
-// it can be shadowed by a new declaration within a nested block scope.
-func pushdcl(s *Sym) *Sym {
-	d := push()
-	dcopy(d, s)
-	return d
-}
-
-// popdcl pops the innermost block scope and restores all symbol declarations
-// to their previous state.
-func popdcl() {
-	d := dclstack
-	for ; d != nil && d.Name != ""; d = d.Link {
-		s := Pkglookup(d.Name, d.Pkg)
-		lno := s.Lastlineno
-		dcopy(s, d)
-		d.Lastlineno = lno
-	}
-
-	if d == nil {
-		Fatalf("popdcl: no mark")
-	}
-
-	dclstack = d.Link // pop mark
-	block = d.Block
-}
-
-// markdcl records the start of a new block scope for declarations.
-func markdcl() {
-	d := push()
-	d.Name = "" // used as a mark in fifo
-	d.Block = block
-
-	blockgen++
-	block = blockgen
-}
-
-// keep around for debugging
-func dumpdclstack() {
-	i := 0
-	for d := dclstack; d != nil; d = d.Link {
-		fmt.Printf("%6d  %p", i, d)
-		if d.Name != "" {
-			fmt.Printf("  '%s'  %v\n", d.Name, Pkglookup(d.Name, d.Pkg))
-		} else {
-			fmt.Printf("  ---\n")
-		}
-		i++
-	}
-}
-
-func testdclstack() {
-	for d := dclstack; d != nil; d = d.Link {
-		if d.Name == "" {
-			if nerrors != 0 {
-				errorexit()
-			}
-			yyerror("mark left on the stack")
-		}
-	}
-}
-
-// redeclare emits a diagnostic about symbol s being redeclared somewhere.
-func redeclare(s *Sym, where string) {
-	if s.Lastlineno == 0 {
-		var tmp string
-		if s.Origpkg != nil {
-			tmp = s.Origpkg.Path
-		} else {
-			tmp = s.Pkg.Path
-		}
-		pkgstr := tmp
-		yyerror("%v redeclared %s\n"+
-			"\tprevious declaration during import %q", s, where, pkgstr)
-	} else {
-		line1 := lineno
-		line2 := s.Lastlineno
-
-		// When an import and a declaration collide in separate files,
-		// present the import as the "redeclared", because the declaration
-		// is visible where the import is, but not vice versa.
-		// See issue 4510.
-		if s.Def == nil {
-			line2 = line1
-			line1 = s.Lastlineno
-		}
-
-		yyerrorl(line1, "%v redeclared %s\n"+
-			"\tprevious declaration at %v", s, where, linestr(line2))
-	}
-}
-
-var vargen int
-
-// declare individual names - var, typ, const
-
-var declare_typegen int
-
-// declare records that Node n declares symbol n.Sym in the specified
-// declaration context.
-func declare(n *Node, ctxt Class) {
-	if ctxt == PDISCARD {
-		return
-	}
-
-	if isblank(n) {
-		return
-	}
-
-	if n.Name == nil {
-		// named OLITERAL needs Name; most OLITERALs don't.
-		n.Name = new(Name)
-	}
-	n.Lineno = lineno
-	s := n.Sym
-
-	// kludgy: typecheckok means we're past parsing. Eg genwrapper may declare out of package names later.
-	if importpkg == nil && !typecheckok && s.Pkg != localpkg {
-		yyerror("cannot declare name %v", s)
-	}
-
-	if ctxt == PEXTERN && s.Name == "init" {
-		yyerror("cannot declare init - must be func")
-	}
-
-	gen := 0
-	if ctxt == PEXTERN {
-		externdcl = append(externdcl, n)
-	} else {
-		if Curfn == nil && ctxt == PAUTO {
-			Fatalf("automatic outside function")
-		}
-		if Curfn != nil {
-			Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
-		}
-		if n.Op == OTYPE {
-			declare_typegen++
-			gen = declare_typegen
-		} else if n.Op == ONAME && ctxt == PAUTO && !strings.Contains(s.Name, "·") {
-			vargen++
-			gen = vargen
-		}
-		pushdcl(s)
-		n.Name.Curfn = Curfn
-	}
-
-	if ctxt == PAUTO {
-		n.Xoffset = 0
-	}
-
-	if s.Block == block {
-		// functype will print errors about duplicate function arguments.
-		// Don't repeat the error here.
-		if ctxt != PPARAM && ctxt != PPARAMOUT {
-			redeclare(s, "in this block")
-		}
-	}
-
-	s.Block = block
-	s.Lastlineno = lineno
-	s.Def = n
-	n.Name.Vargen = int32(gen)
-	n.Name.Funcdepth = funcdepth
-	n.Class = ctxt
-
-	autoexport(n, ctxt)
-}
-
-func addvar(n *Node, t *Type, ctxt Class) {
-	if n == nil || n.Sym == nil || (n.Op != ONAME && n.Op != ONONAME) || t == nil {
-		Fatalf("addvar: n=%v t=%v nil", n, t)
-	}
-
-	n.Op = ONAME
-	declare(n, ctxt)
-	n.Type = t
-}
-
-// declare variables from grammar
-// new_name_list (type | [type] = expr_list)
-func variter(vl []*Node, t *Node, el []*Node) []*Node {
-	var init []*Node
-	doexpr := len(el) > 0
-
-	if len(el) == 1 && len(vl) > 1 {
-		e := el[0]
-		as2 := nod(OAS2, nil, nil)
-		as2.List.Set(vl)
-		as2.Rlist.Set1(e)
-		for _, v := range vl {
-			v.Op = ONAME
-			declare(v, dclcontext)
-			v.Name.Param.Ntype = t
-			v.Name.Defn = as2
-			if funcdepth > 0 {
-				init = append(init, nod(ODCL, v, nil))
-			}
-		}
-
-		return append(init, as2)
-	}
-
-	for _, v := range vl {
-		var e *Node
-		if doexpr {
-			if len(el) == 0 {
-				yyerror("missing expression in var declaration")
-				break
-			}
-			e = el[0]
-			el = el[1:]
-		}
-
-		v.Op = ONAME
-		declare(v, dclcontext)
-		v.Name.Param.Ntype = t
-
-		if e != nil || funcdepth > 0 || isblank(v) {
-			if funcdepth > 0 {
-				init = append(init, nod(ODCL, v, nil))
-			}
-			e = nod(OAS, v, e)
-			init = append(init, e)
-			if e.Right != nil {
-				v.Name.Defn = e
-			}
-		}
-	}
-
-	if len(el) != 0 {
-		yyerror("extra expression in var declaration")
-	}
-	return init
-}
-
-// declare constants from grammar
-// new_name_list [[type] = expr_list]
-func constiter(vl []*Node, t *Node, cl []*Node) []*Node {
-	lno := int32(0) // default is to leave line number alone in listtreecopy
-	if len(cl) == 0 {
-		if t != nil {
-			yyerror("const declaration cannot have type without expression")
-		}
-		cl = lastconst
-		t = lasttype
-		lno = vl[0].Lineno
-	} else {
-		lastconst = cl
-		lasttype = t
-	}
-	clcopy := listtreecopy(cl, lno)
-
-	var vv []*Node
-	for _, v := range vl {
-		if len(clcopy) == 0 {
-			yyerror("missing value in const declaration")
-			break
-		}
-
-		c := clcopy[0]
-		clcopy = clcopy[1:]
-
-		v.Op = OLITERAL
-		declare(v, dclcontext)
-
-		v.Name.Param.Ntype = t
-		v.Name.Defn = c
-
-		vv = append(vv, nod(ODCLCONST, v, nil))
-	}
-
-	if len(clcopy) != 0 {
-		yyerror("extra expression in const declaration")
-	}
-	iota_ += 1
-	return vv
-}
-
-// newname returns a new ONAME Node associated with symbol s.
-func newname(s *Sym) *Node {
-	if s == nil {
-		Fatalf("newname nil")
-	}
-	n := nod(ONAME, nil, nil)
-	n.Sym = s
-	n.Addable = true
-	n.Ullman = 1
-	n.Xoffset = 0
-	return n
-}
-
-// newnoname returns a new ONONAME Node associated with symbol s.
-func newnoname(s *Sym) *Node {
-	if s == nil {
-		Fatalf("newnoname nil")
-	}
-	n := nod(ONONAME, nil, nil)
-	n.Sym = s
-	n.Addable = true
-	n.Ullman = 1
-	n.Xoffset = 0
-	return n
-}
-
-// newfuncname generates a new name node for a function or method.
-// TODO(rsc): Use an ODCLFUNC node instead. See comment in CL 7360.
-func newfuncname(s *Sym) *Node {
-	n := newname(s)
-	n.Func = new(Func)
-	n.Func.IsHiddenClosure = Curfn != nil
-	return n
-}
-
-// this generates a new name node for a name
-// being declared.
-func dclname(s *Sym) *Node {
-	n := newname(s)
-	n.Op = ONONAME // caller will correct it
-	return n
-}
-
-func typenod(t *Type) *Node {
-	// if we copied another type with *t = *u
-	// then t->nod might be out of date, so
-	// check t->nod->type too
-	if t.nod == nil || t.nod.Type != t {
-		t.nod = nod(OTYPE, nil, nil)
-		t.nod.Type = t
-		t.nod.Sym = t.Sym
-	}
-
-	return t.nod
-}
-
-func anonfield(typ *Type) *Node {
-	return nod(ODCLFIELD, nil, typenod(typ))
-}
-
-func namedfield(s string, typ *Type) *Node {
-	return nod(ODCLFIELD, newname(lookup(s)), typenod(typ))
-}
-
-// oldname returns the Node that declares symbol s in the current scope.
-// If no such Node currently exists, an ONONAME Node is returned instead.
-func oldname(s *Sym) *Node {
-	n := s.Def
-	if n == nil {
-		// Maybe a top-level declaration will come along later to
-		// define s. resolve will check s.Def again once all input
-		// source has been processed.
-		n = newnoname(s)
-		n.SetIota(iota_) // save current iota value in const declarations
-		return n
-	}
-
-	if Curfn != nil && n.Op == ONAME && n.Name.Funcdepth > 0 && n.Name.Funcdepth != funcdepth {
-		// Inner func is referring to var in outer func.
-		//
-		// TODO(rsc): If there is an outer variable x and we
-		// are parsing x := 5 inside the closure, until we get to
-		// the := it looks like a reference to the outer x so we'll
-		// make x a closure variable unnecessarily.
-		c := n.Name.Param.Innermost
-		if c == nil || c.Name.Funcdepth != funcdepth {
-			// Do not have a closure var for the active closure yet; make one.
-			c = nod(ONAME, nil, nil)
-			c.Sym = s
-			c.Class = PAUTOHEAP
-			c.setIsClosureVar(true)
-			c.Isddd = n.Isddd
-			c.Name.Defn = n
-			c.Addable = false
-			c.Ullman = 2
-			c.Name.Funcdepth = funcdepth
-
-			// Link into list of active closure variables.
-			// Popped from list in func closurebody.
-			c.Name.Param.Outer = n.Name.Param.Innermost
-			n.Name.Param.Innermost = c
-
-			c.Xoffset = 0
-			Curfn.Func.Cvars.Append(c)
-		}
-
-		// return ref to closure var, not original
-		return c
-	}
-
-	return n
-}
-
-// := declarations
-func colasname(n *Node) bool {
-	switch n.Op {
-	case ONAME,
-		ONONAME,
-		OPACK,
-		OTYPE,
-		OLITERAL:
-		return n.Sym != nil
-	}
-
-	return false
-}
-
-func colasdefn(left []*Node, defn *Node) {
-	for _, n := range left {
-		if n.Sym != nil {
-			n.Sym.Flags |= SymUniq
-		}
-	}
-
-	var nnew, nerr int
-	for i, n := range left {
-		if isblank(n) {
-			continue
-		}
-		if !colasname(n) {
-			yyerrorl(defn.Lineno, "non-name %v on left side of :=", n)
-			nerr++
-			continue
-		}
-
-		if n.Sym.Flags&SymUniq == 0 {
-			yyerrorl(defn.Lineno, "%v repeated on left side of :=", n.Sym)
-			n.Diag = true
-			nerr++
-			continue
-		}
-
-		n.Sym.Flags &^= SymUniq
-		if n.Sym.Block == block {
-			continue
-		}
-
-		nnew++
-		n = newname(n.Sym)
-		declare(n, dclcontext)
-		n.Name.Defn = defn
-		defn.Ninit.Append(nod(ODCL, n, nil))
-		left[i] = n
-	}
-
-	if nnew == 0 && nerr == 0 {
-		yyerrorl(defn.Lineno, "no new variables on left side of :=")
-	}
-}
-
-// declare the arguments in an
-// interface field declaration.
-func ifacedcl(n *Node) {
-	if n.Op != ODCLFIELD || n.Right == nil {
-		Fatalf("ifacedcl")
-	}
-
-	if isblank(n.Left) {
-		yyerror("methods must have a unique non-blank name")
-	}
-}
-
-// declare the function proper
-// and declare the arguments.
-// called in extern-declaration context
-// returns in auto-declaration context.
-func funchdr(n *Node) {
-	// change the declaration context from extern to auto
-	if funcdepth == 0 && dclcontext != PEXTERN {
-		Fatalf("funchdr: dclcontext = %d", dclcontext)
-	}
-
-	if Ctxt.Flag_dynlink && importpkg == nil && n.Func.Nname != nil {
-		makefuncsym(n.Func.Nname.Sym)
-	}
-
-	dclcontext = PAUTO
-	funcstart(n)
-
-	if n.Func.Nname != nil {
-		funcargs(n.Func.Nname.Name.Param.Ntype)
-	} else if n.Func.Ntype != nil {
-		funcargs(n.Func.Ntype)
-	} else {
-		funcargs2(n.Type)
-	}
-}
-
-func funcargs(nt *Node) {
-	if nt.Op != OTFUNC {
-		Fatalf("funcargs %v", nt.Op)
-	}
-
-	// re-start the variable generation number
-	// we want to use small numbers for the return variables,
-	// so let them have the chunk starting at 1.
-	vargen = nt.Rlist.Len()
-
-	// declare the receiver and in arguments.
-	// no n->defn because type checking of func header
-	// will not fill in the types until later
-	if nt.Left != nil {
-		n := nt.Left
-		if n.Op != ODCLFIELD {
-			Fatalf("funcargs receiver %v", n.Op)
-		}
-		if n.Left != nil {
-			n.Left.Op = ONAME
-			n.Left.Name.Param.Ntype = n.Right
-			declare(n.Left, PPARAM)
-			if dclcontext == PAUTO {
-				vargen++
-				n.Left.Name.Vargen = int32(vargen)
-			}
-		}
-	}
-
-	for _, n := range nt.List.Slice() {
-		if n.Op != ODCLFIELD {
-			Fatalf("funcargs in %v", n.Op)
-		}
-		if n.Left != nil {
-			n.Left.Op = ONAME
-			n.Left.Name.Param.Ntype = n.Right
-			declare(n.Left, PPARAM)
-			if dclcontext == PAUTO {
-				vargen++
-				n.Left.Name.Vargen = int32(vargen)
-			}
-		}
-	}
-
-	// declare the out arguments.
-	gen := nt.List.Len()
-	var i int = 0
-	for _, n := range nt.Rlist.Slice() {
-		if n.Op != ODCLFIELD {
-			Fatalf("funcargs out %v", n.Op)
-		}
-
-		if n.Left == nil {
-			// Name so that escape analysis can track it. ~r stands for 'result'.
-			n.Left = newname(lookupN("~r", gen))
-			gen++
-		}
-
-		// TODO: n->left->missing = 1;
-		n.Left.Op = ONAME
-
-		if isblank(n.Left) {
-			// Give it a name so we can assign to it during return. ~b stands for 'blank'.
-			// The name must be different from ~r above because if you have
-			//	func f() (_ int)
-			//	func g() int
-			// f is allowed to use a plain 'return' with no arguments, while g is not.
-			// So the two cases must be distinguished.
-			// We do not record a pointer to the original node (n->orig).
-			// Having multiple names causes too much confusion in later passes.
-			nn := *n.Left
-			nn.Orig = &nn
-			nn.Sym = lookupN("~b", gen)
-			gen++
-			n.Left = &nn
-		}
-
-		n.Left.Name.Param.Ntype = n.Right
-		declare(n.Left, PPARAMOUT)
-		if dclcontext == PAUTO {
-			i++
-			n.Left.Name.Vargen = int32(i)
-		}
-	}
-}
-
-// Same as funcargs, except run over an already constructed TFUNC.
-// This happens during import, where the hidden_fndcl rule has
-// used functype directly to parse the function's type.
-func funcargs2(t *Type) {
-	if t.Etype != TFUNC {
-		Fatalf("funcargs2 %v", t)
-	}
-
-	for _, ft := range t.Recvs().Fields().Slice() {
-		if ft.Nname == nil || ft.Nname.Sym == nil {
-			continue
-		}
-		n := ft.Nname // no need for newname(ft->nname->sym)
-		n.Type = ft.Type
-		declare(n, PPARAM)
-	}
-
-	for _, ft := range t.Params().Fields().Slice() {
-		if ft.Nname == nil || ft.Nname.Sym == nil {
-			continue
-		}
-		n := ft.Nname
-		n.Type = ft.Type
-		declare(n, PPARAM)
-	}
-
-	for _, ft := range t.Results().Fields().Slice() {
-		if ft.Nname == nil || ft.Nname.Sym == nil {
-			continue
-		}
-		n := ft.Nname
-		n.Type = ft.Type
-		declare(n, PPARAMOUT)
-	}
-}
-
-var funcstack []*Node // stack of previous values of Curfn
-var funcdepth int32   // len(funcstack) during parsing, but then forced to be the same later during compilation
-
-// start the function.
-// called before funcargs; undone at end of funcbody.
-func funcstart(n *Node) {
-	markdcl()
-	funcstack = append(funcstack, Curfn)
-	funcdepth++
-	Curfn = n
-}
-
-// finish the body.
-// called in auto-declaration context.
-// returns in extern-declaration context.
-func funcbody(n *Node) {
-	// change the declaration context from auto to extern
-	if dclcontext != PAUTO {
-		Fatalf("funcbody: unexpected dclcontext %d", dclcontext)
-	}
-	popdcl()
-	funcstack, Curfn = funcstack[:len(funcstack)-1], funcstack[len(funcstack)-1]
-	funcdepth--
-	if funcdepth == 0 {
-		dclcontext = PEXTERN
-	}
-}
-
-// new type being defined with name s.
-func typedcl0(s *Sym) *Node {
-	n := newname(s)
-	n.Op = OTYPE
-	declare(n, dclcontext)
-	return n
-}
-
-// node n, which was returned by typedcl0
-// is being declared to have uncompiled type t.
-// return the ODCLTYPE node to use.
-func typedcl1(n *Node, t *Node, local bool) *Node {
-	n.Name.Param.Ntype = t
-	n.Local = local
-	return nod(ODCLTYPE, n, nil)
-}
-
-// structs, functions, and methods.
-// they don't belong here, but where do they belong?
-func checkembeddedtype(t *Type) {
-	if t == nil {
-		return
-	}
-
-	if t.Sym == nil && t.IsPtr() {
-		t = t.Elem()
-		if t.IsInterface() {
-			yyerror("embedded type cannot be a pointer to interface")
-		}
-	}
-
-	if t.IsPtr() || t.IsUnsafePtr() {
-		yyerror("embedded type cannot be a pointer")
-	} else if t.Etype == TFORW && t.ForwardType().Embedlineno == 0 {
-		t.ForwardType().Embedlineno = lineno
-	}
-}
-
-func structfield(n *Node) *Field {
-	lno := lineno
-	lineno = n.Lineno
-
-	if n.Op != ODCLFIELD {
-		Fatalf("structfield: oops %v\n", n)
-	}
-
-	f := newField()
-	f.Isddd = n.Isddd
-
-	if n.Right != nil {
-		n.Right = typecheck(n.Right, Etype)
-		n.Type = n.Right.Type
-		if n.Left != nil {
-			n.Left.Type = n.Type
-		}
-		if n.Embedded != 0 {
-			checkembeddedtype(n.Type)
-		}
-	}
-
-	n.Right = nil
-
-	f.Type = n.Type
-	if f.Type == nil {
-		f.Broke = true
-	}
-
-	switch u := n.Val().U.(type) {
-	case string:
-		f.Note = u
-	default:
-		yyerror("field annotation must be string")
-	case nil:
-		// noop
-	}
-
-	if n.Left != nil && n.Left.Op == ONAME {
-		f.Nname = n.Left
-		f.Embedded = n.Embedded
-		f.Sym = f.Nname.Sym
-	}
-
-	lineno = lno
-	return f
-}
-
-// checkdupfields emits errors for duplicately named fields or methods in
-// a list of struct or interface types.
-func checkdupfields(what string, ts ...*Type) {
-	lno := lineno
-
-	seen := make(map[*Sym]bool)
-	for _, t := range ts {
-		for _, f := range t.Fields().Slice() {
-			if f.Sym == nil || f.Nname == nil || isblank(f.Nname) {
-				continue
-			}
-			if seen[f.Sym] {
-				lineno = f.Nname.Lineno
-				yyerror("duplicate %s %s", what, f.Sym.Name)
-				continue
-			}
-			seen[f.Sym] = true
-		}
-	}
-
-	lineno = lno
-}
-
-// convert a parsed id/type list into
-// a type for struct/interface/arglist
-func tostruct(l []*Node) *Type {
-	t := typ(TSTRUCT)
-	tostruct0(t, l)
-	return t
-}
-
-func tostruct0(t *Type, l []*Node) {
-	if t == nil || !t.IsStruct() {
-		Fatalf("struct expected")
-	}
-
-	fields := make([]*Field, len(l))
-	for i, n := range l {
-		f := structfield(n)
-		if f.Broke {
-			t.Broke = true
-		}
-		fields[i] = f
-	}
-	t.SetFields(fields)
-
-	checkdupfields("field", t)
-
-	if !t.Broke {
-		checkwidth(t)
-	}
-}
-
-func tofunargs(l []*Node, funarg Funarg) *Type {
-	t := typ(TSTRUCT)
-	t.StructType().Funarg = funarg
-
-	fields := make([]*Field, len(l))
-	for i, n := range l {
-		f := structfield(n)
-		f.Funarg = funarg
-
-		// esc.go needs to find f given a PPARAM to add the tag.
-		if n.Left != nil && n.Left.Class == PPARAM {
-			n.Left.Name.Param.Field = f
-		}
-		if f.Broke {
-			t.Broke = true
-		}
-		fields[i] = f
-	}
-	t.SetFields(fields)
-	return t
-}
-
-func tofunargsfield(fields []*Field, funarg Funarg) *Type {
-	t := typ(TSTRUCT)
-	t.StructType().Funarg = funarg
-
-	for _, f := range fields {
-		f.Funarg = funarg
-
-		// esc.go needs to find f given a PPARAM to add the tag.
-		if f.Nname != nil && f.Nname.Class == PPARAM {
-			f.Nname.Name.Param.Field = f
-		}
-	}
-	t.SetFields(fields)
-	return t
-}
-
-func interfacefield(n *Node) *Field {
-	lno := lineno
-	lineno = n.Lineno
-
-	if n.Op != ODCLFIELD {
-		Fatalf("interfacefield: oops %v\n", n)
-	}
-
-	if n.Val().Ctype() != CTxxx {
-		yyerror("interface method cannot have annotation")
-	}
-
-	f := newField()
-	f.Isddd = n.Isddd
-
-	if n.Right != nil {
-		if n.Left != nil {
-			// queue resolution of method type for later.
-			// right now all we need is the name list.
-			// avoids cycles for recursive interface types.
-			n.Type = typ(TINTERMETH)
-			n.Type.SetNname(n.Right)
-			n.Left.Type = n.Type
-			queuemethod(n)
-
-			if n.Left.Op == ONAME {
-				f.Nname = n.Left
-				f.Embedded = n.Embedded
-				f.Sym = f.Nname.Sym
-			}
-		} else {
-			n.Right = typecheck(n.Right, Etype)
-			n.Type = n.Right.Type
-
-			if n.Embedded != 0 {
-				checkembeddedtype(n.Type)
-			}
-
-			if n.Type != nil {
-				switch n.Type.Etype {
-				case TINTER:
-					break
-
-				case TFORW:
-					yyerror("interface type loop involving %v", n.Type)
-					f.Broke = true
-
-				default:
-					yyerror("interface contains embedded non-interface %v", n.Type)
-					f.Broke = true
-				}
-			}
-		}
-	}
-
-	n.Right = nil
-
-	f.Type = n.Type
-	if f.Type == nil {
-		f.Broke = true
-	}
-
-	lineno = lno
-	return f
-}
-
-func tointerface(l []*Node) *Type {
-	t := typ(TINTER)
-	tointerface0(t, l)
-	return t
-}
-
-func tointerface0(t *Type, l []*Node) *Type {
-	if t == nil || !t.IsInterface() {
-		Fatalf("interface expected")
-	}
-
-	var fields []*Field
-	for _, n := range l {
-		f := interfacefield(n)
-
-		if n.Left == nil && f.Type.IsInterface() {
-			// embedded interface, inline methods
-			for _, t1 := range f.Type.Fields().Slice() {
-				f = newField()
-				f.Type = t1.Type
-				f.Broke = t1.Broke
-				f.Sym = t1.Sym
-				if f.Sym != nil {
-					f.Nname = newname(f.Sym)
-				}
-				fields = append(fields, f)
-			}
-		} else {
-			fields = append(fields, f)
-		}
-		if f.Broke {
-			t.Broke = true
-		}
-	}
-	sort.Sort(methcmp(fields))
-	t.SetFields(fields)
-
-	checkdupfields("method", t)
-	checkwidth(t)
-
-	return t
-}
-
-func embedded(s *Sym, pkg *Pkg) *Node {
-	const (
-		CenterDot = 0xB7
-	)
-	// Names sometimes have disambiguation junk
-	// appended after a center dot. Discard it when
-	// making the name for the embedded struct field.
-	name := s.Name
-
-	if i := strings.Index(s.Name, string(CenterDot)); i >= 0 {
-		name = s.Name[:i]
-	}
-
-	var n *Node
-	if exportname(name) {
-		n = newname(lookup(name))
-	} else if s.Pkg == builtinpkg {
-		// The name of embedded builtins belongs to pkg.
-		n = newname(Pkglookup(name, pkg))
-	} else {
-		n = newname(Pkglookup(name, s.Pkg))
-	}
-	n = nod(ODCLFIELD, n, oldname(s))
-	n.Embedded = 1
-	return n
-}
-
-// thisT is the singleton type used for interface method receivers.
-var thisT *Type
-
-func fakethis() *Node {
-	if thisT == nil {
-		thisT = ptrto(typ(TSTRUCT))
-	}
-	return nod(ODCLFIELD, nil, typenod(thisT))
-}
-
-func fakethisfield() *Field {
-	if thisT == nil {
-		thisT = ptrto(typ(TSTRUCT))
-	}
-	f := newField()
-	f.Type = thisT
-	return f
-}
-
-// Is this field a method on an interface?
-// Those methods have thisT as the receiver.
-// (See fakethis above.)
-func isifacemethod(f *Type) bool {
-	return f.Recv().Type == thisT
-}
-
-// turn a parsed function declaration into a type
-func functype(this *Node, in, out []*Node) *Type {
-	t := typ(TFUNC)
-	functype0(t, this, in, out)
-	return t
-}
-
-func functype0(t *Type, this *Node, in, out []*Node) {
-	if t == nil || t.Etype != TFUNC {
-		Fatalf("function type expected")
-	}
-
-	var rcvr []*Node
-	if this != nil {
-		rcvr = []*Node{this}
-	}
-	t.FuncType().Receiver = tofunargs(rcvr, FunargRcvr)
-	t.FuncType().Results = tofunargs(out, FunargResults)
-	t.FuncType().Params = tofunargs(in, FunargParams)
-
-	checkdupfields("argument", t.Recvs(), t.Results(), t.Params())
-
-	if t.Recvs().Broke || t.Results().Broke || t.Params().Broke {
-		t.Broke = true
-	}
-
-	t.FuncType().Outnamed = false
-	if len(out) > 0 && out[0].Left != nil && out[0].Left.Orig != nil {
-		s := out[0].Left.Orig.Sym
-		if s != nil && (s.Name[0] != '~' || s.Name[1] != 'r') { // ~r%d is the name invented for an unnamed result
-			t.FuncType().Outnamed = true
-		}
-	}
-}
-
-func functypefield(this *Field, in, out []*Field) *Type {
-	t := typ(TFUNC)
-	functypefield0(t, this, in, out)
-	return t
-}
-
-func functypefield0(t *Type, this *Field, in, out []*Field) {
-	var rcvr []*Field
-	if this != nil {
-		rcvr = []*Field{this}
-	}
-	t.FuncType().Receiver = tofunargsfield(rcvr, FunargRcvr)
-	t.FuncType().Results = tofunargsfield(out, FunargRcvr)
-	t.FuncType().Params = tofunargsfield(in, FunargRcvr)
-
-	t.FuncType().Outnamed = false
-	if len(out) > 0 && out[0].Nname != nil && out[0].Nname.Orig != nil {
-		s := out[0].Nname.Orig.Sym
-		if s != nil && (s.Name[0] != '~' || s.Name[1] != 'r') { // ~r%d is the name invented for an unnamed result
-			t.FuncType().Outnamed = true
-		}
-	}
-}
-
-var methodsym_toppkg *Pkg
-
-func methodsym(nsym *Sym, t0 *Type, iface int) *Sym {
-	var s *Sym
-	var p string
-	var suffix string
-	var spkg *Pkg
-
-	t := t0
-	if t == nil {
-		goto bad
-	}
-	s = t.Sym
-	if s == nil && t.IsPtr() {
-		t = t.Elem()
-		if t == nil {
-			goto bad
-		}
-		s = t.Sym
-	}
-
-	spkg = nil
-	if s != nil {
-		spkg = s.Pkg
-	}
-
-	// if t0 == *t and t0 has a sym,
-	// we want to see *t, not t0, in the method name.
-	if t != t0 && t0.Sym != nil {
-		t0 = ptrto(t)
-	}
-
-	suffix = ""
-	if iface != 0 {
-		dowidth(t0)
-		if t0.Width < Types[Tptr].Width {
-			suffix = "·i"
-		}
-	}
-
-	if (spkg == nil || nsym.Pkg != spkg) && !exportname(nsym.Name) {
-		if t0.Sym == nil && t0.IsPtr() {
-			p = fmt.Sprintf("(%-S).%s.%s%s", t0, nsym.Pkg.Prefix, nsym.Name, suffix)
-		} else {
-			p = fmt.Sprintf("%-S.%s.%s%s", t0, nsym.Pkg.Prefix, nsym.Name, suffix)
-		}
-	} else {
-		if t0.Sym == nil && t0.IsPtr() {
-			p = fmt.Sprintf("(%-S).%s%s", t0, nsym.Name, suffix)
-		} else {
-			p = fmt.Sprintf("%-S.%s%s", t0, nsym.Name, suffix)
-		}
-	}
-
-	if spkg == nil {
-		if methodsym_toppkg == nil {
-			methodsym_toppkg = mkpkg("go")
-		}
-		spkg = methodsym_toppkg
-	}
-
-	s = Pkglookup(p, spkg)
-
-	return s
-
-bad:
-	yyerror("illegal receiver type: %v", t0)
-	return nil
-}
-
-func methodname(n *Node, t *Node) *Node {
-	star := false
-	if t.Op == OIND {
-		star = true
-		t = t.Left
-	}
-
-	return methodname0(n.Sym, star, t.Sym)
-}
-
-func methodname0(s *Sym, star bool, tsym *Sym) *Node {
-	if tsym == nil || isblanksym(s) {
-		return newfuncname(s)
-	}
-
-	var p string
-	if star {
-		p = fmt.Sprintf("(*%v).%v", tsym, s)
-	} else {
-		p = fmt.Sprintf("%v.%v", tsym, s)
-	}
-
-	if exportname(tsym.Name) {
-		s = lookup(p)
-	} else {
-		s = Pkglookup(p, tsym.Pkg)
-	}
-
-	return newfuncname(s)
-}
-
-// Add a method, declared as a function.
-// - msym is the method symbol
-// - t is function type (with receiver)
-func addmethod(msym *Sym, t *Type, local, nointerface bool) {
-	// get field sym
-	if msym == nil {
-		Fatalf("no method symbol")
-	}
-
-	// get parent type sym
-	rf := t.Recv() // ptr to this structure
-	if rf == nil {
-		yyerror("missing receiver")
-		return
-	}
-
-	mt := methtype(rf.Type)
-	if mt == nil || mt.Sym == nil {
-		pa := rf.Type
-		t := pa
-		if t != nil && t.IsPtr() {
-			if t.Sym != nil {
-				yyerror("invalid receiver type %v (%v is a pointer type)", pa, t)
-				return
-			}
-			t = t.Elem()
-		}
-
-		switch {
-		case t == nil || t.Broke:
-			// rely on typecheck having complained before
-		case t.Sym == nil:
-			yyerror("invalid receiver type %v (%v is an unnamed type)", pa, t)
-		case t.IsPtr():
-			yyerror("invalid receiver type %v (%v is a pointer type)", pa, t)
-		case t.IsInterface():
-			yyerror("invalid receiver type %v (%v is an interface type)", pa, t)
-		default:
-			// Should have picked off all the reasons above,
-			// but just in case, fall back to generic error.
-			yyerror("invalid receiver type %v (%L / %L)", pa, pa, t)
-		}
-		return
-	}
-
-	if local && !mt.Local {
-		yyerror("cannot define new methods on non-local type %v", mt)
-		return
-	}
-
-	if isblanksym(msym) {
-		return
-	}
-
-	if mt.IsStruct() {
-		for _, f := range mt.Fields().Slice() {
-			if f.Sym == msym {
-				yyerror("type %v has both field and method named %v", mt, msym)
-				return
-			}
-		}
-	}
-
-	for _, f := range mt.Methods().Slice() {
-		if msym.Name != f.Sym.Name {
-			continue
-		}
-		// eqtype only checks that incoming and result parameters match,
-		// so explicitly check that the receiver parameters match too.
-		if !eqtype(t, f.Type) || !eqtype(t.Recv().Type, f.Type.Recv().Type) {
-			yyerror("method redeclared: %v.%v\n\t%v\n\t%v", mt, msym, f.Type, t)
-		}
-		return
-	}
-
-	f := newField()
-	f.Sym = msym
-	f.Nname = newname(msym)
-	f.Type = t
-	f.Nointerface = nointerface
-
-	mt.Methods().Append(f)
-}
-
-func funccompile(n *Node) {
-	Stksize = BADWIDTH
-	Maxarg = 0
-
-	if n.Type == nil {
-		if nerrors == 0 {
-			Fatalf("funccompile missing type")
-		}
-		return
-	}
-
-	// assign parameter offsets
-	checkwidth(n.Type)
-
-	if Curfn != nil {
-		Fatalf("funccompile %v inside %v", n.Func.Nname.Sym, Curfn.Func.Nname.Sym)
-	}
-
-	Stksize = 0
-	dclcontext = PAUTO
-	funcdepth = n.Func.Depth + 1
-	compile(n)
-	Curfn = nil
-	pc = nil
-	funcdepth = 0
-	dclcontext = PEXTERN
-	if nerrors != 0 {
-		// If we have compile errors, ignore any assembler/linker errors.
-		Ctxt.DiagFunc = func(string, ...interface{}) {}
-	}
-	obj.Flushplist(Ctxt) // convert from Prog list to machine code
-}
-
-func funcsym(s *Sym) *Sym {
-	if s.Fsym != nil {
-		return s.Fsym
-	}
-
-	s1 := Pkglookup(s.Name+"·f", s.Pkg)
-	if !Ctxt.Flag_dynlink && s1.Def == nil {
-		s1.Def = newfuncname(s1)
-		s1.Def.Func.Shortname = newname(s)
-		funcsyms = append(funcsyms, s1.Def)
-	}
-	s.Fsym = s1
-	return s1
-}
-
-func makefuncsym(s *Sym) {
-	if isblanksym(s) {
-		return
-	}
-	if compiling_runtime && s.Name == "getg" {
-		// runtime.getg() is not a real function and so does
-		// not get a funcsym.
-		return
-	}
-	s1 := funcsym(s)
-	s1.Def = newfuncname(s1)
-	s1.Def.Func.Shortname = newname(s)
-	funcsyms = append(funcsyms, s1.Def)
-}
-
-type nowritebarrierrecChecker struct {
-	curfn  *Node
-	stable bool
-
-	// best maps from the ODCLFUNC of each visited function that
-	// recursively invokes a write barrier to the called function
-	// on the shortest path to a write barrier.
-	best map[*Node]nowritebarrierrecCall
-}
-
-type nowritebarrierrecCall struct {
-	target *Node
-	depth  int
-	lineno int32
-}
-
-func checknowritebarrierrec() {
-	c := nowritebarrierrecChecker{
-		best: make(map[*Node]nowritebarrierrecCall),
-	}
-	visitBottomUp(xtop, func(list []*Node, recursive bool) {
-		// Functions with write barriers have depth 0.
-		for _, n := range list {
-			if n.Func.WBLineno != 0 && n.Func.Pragma&Yeswritebarrierrec == 0 {
-				c.best[n] = nowritebarrierrecCall{target: nil, depth: 0, lineno: n.Func.WBLineno}
-			}
-		}
-
-		// Propagate write barrier depth up from callees. In
-		// the recursive case, we have to update this at most
-		// len(list) times and can stop when we an iteration
-		// that doesn't change anything.
-		for _ = range list {
-			c.stable = false
-			for _, n := range list {
-				if n.Func.Pragma&Yeswritebarrierrec != 0 {
-					// Don't propagate write
-					// barrier up to a
-					// yeswritebarrierrec function.
-					continue
-				}
-				if n.Func.WBLineno == 0 {
-					c.curfn = n
-					c.visitcodelist(n.Nbody)
-				}
-			}
-			if c.stable {
-				break
-			}
-		}
-
-		// Check nowritebarrierrec functions.
-		for _, n := range list {
-			if n.Func.Pragma&Nowritebarrierrec == 0 {
-				continue
-			}
-			call, hasWB := c.best[n]
-			if !hasWB {
-				continue
-			}
-
-			// Build the error message in reverse.
-			err := ""
-			for call.target != nil {
-				err = fmt.Sprintf("\n\t%v: called by %v%s", linestr(call.lineno), n.Func.Nname, err)
-				n = call.target
-				call = c.best[n]
-			}
-			err = fmt.Sprintf("write barrier prohibited by caller; %v%s", n.Func.Nname, err)
-			yyerrorl(n.Func.WBLineno, err)
-		}
-	})
-}
-
-func (c *nowritebarrierrecChecker) visitcodelist(l Nodes) {
-	for _, n := range l.Slice() {
-		c.visitcode(n)
-	}
-}
-
-func (c *nowritebarrierrecChecker) visitcode(n *Node) {
-	if n == nil {
-		return
-	}
-
-	if n.Op == OCALLFUNC || n.Op == OCALLMETH {
-		c.visitcall(n)
-	}
-
-	c.visitcodelist(n.Ninit)
-	c.visitcode(n.Left)
-	c.visitcode(n.Right)
-	c.visitcodelist(n.List)
-	c.visitcodelist(n.Nbody)
-	c.visitcodelist(n.Rlist)
-}
-
-func (c *nowritebarrierrecChecker) visitcall(n *Node) {
-	fn := n.Left
-	if n.Op == OCALLMETH {
-		fn = n.Left.Sym.Def
-	}
-	if fn == nil || fn.Op != ONAME || fn.Class != PFUNC || fn.Name.Defn == nil {
-		return
-	}
-	defn := fn.Name.Defn
-
-	fnbest, ok := c.best[defn]
-	if !ok {
-		return
-	}
-	best, ok := c.best[c.curfn]
-	if ok && fnbest.depth+1 >= best.depth {
-		return
-	}
-	c.best[c.curfn] = nowritebarrierrecCall{target: defn, depth: fnbest.depth + 1, lineno: n.Lineno}
-	c.stable = false
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/esc.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/esc.go
deleted file mode 100644
index 8b22f61..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/esc.go
+++ /dev/null
@@ -1,2117 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/esc.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/esc.go:1
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"fmt"
-	"strconv"
-	"strings"
-)
-
-// Run analysis on minimal sets of mutually recursive functions
-// or single non-recursive functions, bottom up.
-//
-// Finding these sets is finding strongly connected components
-// by reverse topological order in the static call graph.
-// The algorithm (known as Tarjan's algorithm) for doing that is taken from
-// Sedgewick, Algorithms, Second Edition, p. 482, with two adaptations.
-//
-// First, a hidden closure function (n.Func.IsHiddenClosure) cannot be the
-// root of a connected component. Refusing to use it as a root
-// forces it into the component of the function in which it appears.
-// This is more convenient for escape analysis.
-//
-// Second, each function becomes two virtual nodes in the graph,
-// with numbers n and n+1. We record the function's node number as n
-// but search from node n+1. If the search tells us that the component
-// number (min) is n+1, we know that this is a trivial component: one function
-// plus its closures. If the search tells us that the component number is
-// n, then there was a path from node n+1 back to node n, meaning that
-// the function set is mutually recursive. The escape analysis can be
-// more precise when analyzing a single non-recursive function than
-// when analyzing a set of mutually recursive functions.
-
-type bottomUpVisitor struct {
-	analyze  func([]*Node, bool)
-	visitgen uint32
-	nodeID   map[*Node]uint32
-	stack    []*Node
-}
-
-// visitBottomUp invokes analyze on the ODCLFUNC nodes listed in list.
-// It calls analyze with successive groups of functions, working from
-// the bottom of the call graph upward. Each time analyze is called with
-// a list of functions, every function on that list only calls other functions
-// on the list or functions that have been passed in previous invocations of
-// analyze. Closures appear in the same list as their outer functions.
-// The lists are as short as possible while preserving those requirements.
-// (In a typical program, many invocations of analyze will be passed just
-// a single function.) The boolean argument 'recursive' passed to analyze
-// specifies whether the functions on the list are mutually recursive.
-// If recursive is false, the list consists of only a single function and its closures.
-// If recursive is true, the list may still contain only a single function,
-// if that function is itself recursive.
-func visitBottomUp(list []*Node, analyze func(list []*Node, recursive bool)) {
-	var v bottomUpVisitor
-	v.analyze = analyze
-	v.nodeID = make(map[*Node]uint32)
-	for _, n := range list {
-		if n.Op == ODCLFUNC && !n.Func.IsHiddenClosure {
-			v.visit(n)
-		}
-	}
-}
-
-func (v *bottomUpVisitor) visit(n *Node) uint32 {
-	if id := v.nodeID[n]; id > 0 {
-		// already visited
-		return id
-	}
-
-	v.visitgen++
-	id := v.visitgen
-	v.nodeID[n] = id
-	v.visitgen++
-	min := v.visitgen
-
-	v.stack = append(v.stack, n)
-	min = v.visitcodelist(n.Nbody, min)
-	if (min == id || min == id+1) && !n.Func.IsHiddenClosure {
-		// This node is the root of a strongly connected component.
-
-		// The original min passed to visitcodelist was v.nodeID[n]+1.
-		// If visitcodelist found its way back to v.nodeID[n], then this
-		// block is a set of mutually recursive functions.
-		// Otherwise it's just a lone function that does not recurse.
-		recursive := min == id
-
-		// Remove connected component from stack.
-		// Mark walkgen so that future visits return a large number
-		// so as not to affect the caller's min.
-
-		var i int
-		for i = len(v.stack) - 1; i >= 0; i-- {
-			x := v.stack[i]
-			if x == n {
-				break
-			}
-			v.nodeID[x] = ^uint32(0)
-		}
-		v.nodeID[n] = ^uint32(0)
-		block := v.stack[i:]
-		// Run escape analysis on this set of functions.
-		v.stack = v.stack[:i]
-		v.analyze(block, recursive)
-	}
-
-	return min
-}
-
-func (v *bottomUpVisitor) visitcodelist(l Nodes, min uint32) uint32 {
-	for _, n := range l.Slice() {
-		min = v.visitcode(n, min)
-	}
-	return min
-}
-
-func (v *bottomUpVisitor) visitcode(n *Node, min uint32) uint32 {
-	if n == nil {
-		return min
-	}
-
-	min = v.visitcodelist(n.Ninit, min)
-	min = v.visitcode(n.Left, min)
-	min = v.visitcode(n.Right, min)
-	min = v.visitcodelist(n.List, min)
-	min = v.visitcodelist(n.Nbody, min)
-	min = v.visitcodelist(n.Rlist, min)
-
-	if n.Op == OCALLFUNC || n.Op == OCALLMETH {
-		fn := n.Left
-		if n.Op == OCALLMETH {
-			fn = n.Left.Sym.Def
-		}
-		if fn != nil && fn.Op == ONAME && fn.Class == PFUNC && fn.Name.Defn != nil {
-			m := v.visit(fn.Name.Defn)
-			if m < min {
-				min = m
-			}
-		}
-	}
-
-	if n.Op == OCLOSURE {
-		m := v.visit(n.Func.Closure)
-		if m < min {
-			min = m
-		}
-	}
-
-	return min
-}
-
-// Escape analysis.
-
-// An escape analysis pass for a set of functions.
-// The analysis assumes that closures and the functions in which they
-// appear are analyzed together, so that the aliasing between their
-// variables can be modeled more precisely.
-//
-// First escfunc, esc and escassign recurse over the ast of each
-// function to dig out flow(dst,src) edges between any
-// pointer-containing nodes and store them in e.nodeEscState(dst).Flowsrc. For
-// variables assigned to a variable in an outer scope or used as a
-// return value, they store a flow(theSink, src) edge to a fake node
-// 'the Sink'.  For variables referenced in closures, an edge
-// flow(closure, &var) is recorded and the flow of a closure itself to
-// an outer scope is tracked the same way as other variables.
-//
-// Then escflood walks the graph starting at theSink and tags all
-// variables of it can reach an & node as escaping and all function
-// parameters it can reach as leaking.
-//
-// If a value's address is taken but the address does not escape,
-// then the value can stay on the stack. If the value new(T) does
-// not escape, then new(T) can be rewritten into a stack allocation.
-// The same is true of slice literals.
-//
-// If optimizations are disabled (-N), this code is not used.
-// Instead, the compiler assumes that any value whose address
-// is taken without being immediately dereferenced
-// needs to be moved to the heap, and new(T) and slice
-// literals are always real allocations.
-
-func escapes(all []*Node) {
-	visitBottomUp(all, escAnalyze)
-}
-
-const (
-	EscFuncUnknown = 0 + iota
-	EscFuncPlanned
-	EscFuncStarted
-	EscFuncTagged
-)
-
-// There appear to be some loops in the escape graph, causing
-// arbitrary recursion into deeper and deeper levels.
-// Cut this off safely by making minLevel sticky: once you
-// get that deep, you cannot go down any further but you also
-// cannot go up any further. This is a conservative fix.
-// Making minLevel smaller (more negative) would handle more
-// complex chains of indirections followed by address-of operations,
-// at the cost of repeating the traversal once for each additional
-// allowed level when a loop is encountered. Using -2 suffices to
-// pass all the tests we have written so far, which we assume matches
-// the level of complexity we want the escape analysis code to handle.
-const (
-	MinLevel = -2
-)
-
-// A Level encodes the reference state and context applied to
-// (stack, heap) allocated memory.
-//
-// value is the overall sum of *(1) and &(-1) operations encountered
-// along a path from a destination (sink, return value) to a source
-// (allocation, parameter).
-//
-// suffixValue is the maximum-copy-started-suffix-level applied to a sink.
-// For example:
-// sink = x.left.left --> level=2, x is dereferenced twice and does not escape to sink.
-// sink = &Node{x} --> level=-1, x is accessible from sink via one "address of"
-// sink = &Node{&Node{x}} --> level=-2, x is accessible from sink via two "address of"
-// sink = &Node{&Node{x.left}} --> level=-1, but x is NOT accessible from sink because it was indirected and then copied.
-// (The copy operations are sometimes implicit in the source code; in this case,
-// value of x.left was copied into a field of a newly allocated Node)
-//
-// There's one of these for each Node, and the integer values
-// rarely exceed even what can be stored in 4 bits, never mind 8.
-type Level struct {
-	value, suffixValue int8
-}
-
-func (l Level) int() int {
-	return int(l.value)
-}
-
-func levelFrom(i int) Level {
-	if i <= MinLevel {
-		return Level{value: MinLevel}
-	}
-	return Level{value: int8(i)}
-}
-
-func satInc8(x int8) int8 {
-	if x == 127 {
-		return 127
-	}
-	return x + 1
-}
-
-func min8(a, b int8) int8 {
-	if a < b {
-		return a
-	}
-	return b
-}
-
-func max8(a, b int8) int8 {
-	if a > b {
-		return a
-	}
-	return b
-}
-
-// inc returns the level l + 1, representing the effect of an indirect (*) operation.
-func (l Level) inc() Level {
-	if l.value <= MinLevel {
-		return Level{value: MinLevel}
-	}
-	return Level{value: satInc8(l.value), suffixValue: satInc8(l.suffixValue)}
-}
-
-// dec returns the level l - 1, representing the effect of an address-of (&) operation.
-func (l Level) dec() Level {
-	if l.value <= MinLevel {
-		return Level{value: MinLevel}
-	}
-	return Level{value: l.value - 1, suffixValue: l.suffixValue - 1}
-}
-
-// copy returns the level for a copy of a value with level l.
-func (l Level) copy() Level {
-	return Level{value: l.value, suffixValue: max8(l.suffixValue, 0)}
-}
-
-func (l1 Level) min(l2 Level) Level {
-	return Level{
-		value:       min8(l1.value, l2.value),
-		suffixValue: min8(l1.suffixValue, l2.suffixValue)}
-}
-
-// guaranteedDereference returns the number of dereferences
-// applied to a pointer before addresses are taken/generated.
-// This is the maximum level computed from path suffixes starting
-// with copies where paths flow from destination to source.
-func (l Level) guaranteedDereference() int {
-	return int(l.suffixValue)
-}
-
-// An EscStep documents one step in the path from memory
-// that is heap allocated to the (alleged) reason for the
-// heap allocation.
-type EscStep struct {
-	src, dst *Node    // the endpoints of this edge in the escape-to-heap chain.
-	where    *Node    // sometimes the endpoints don't match source locations; set 'where' to make that right
-	parent   *EscStep // used in flood to record path
-	why      string   // explanation for this step in the escape-to-heap chain
-	busy     bool     // used in prevent to snip cycles.
-}
-
-type NodeEscState struct {
-	Curfn             *Node
-	Flowsrc           []EscStep // flow(this, src)
-	Retval            Nodes     // on OCALLxxx, list of dummy return values
-	Loopdepth         int32     // -1: global, 0: return variables, 1:function top level, increased inside function for every loop or label to mark scopes
-	Level             Level
-	Walkgen           uint32
-	Maxextraloopdepth int32
-}
-
-func (e *EscState) nodeEscState(n *Node) *NodeEscState {
-	if nE, ok := n.Opt().(*NodeEscState); ok {
-		return nE
-	}
-	if n.Opt() != nil {
-		Fatalf("nodeEscState: opt in use (%T)", n.Opt())
-	}
-	nE := &NodeEscState{
-		Curfn: Curfn,
-	}
-	n.SetOpt(nE)
-	e.opts = append(e.opts, n)
-	return nE
-}
-
-func (e *EscState) track(n *Node) {
-	if Curfn == nil {
-		Fatalf("EscState.track: Curfn nil")
-	}
-	n.Esc = EscNone // until proven otherwise
-	nE := e.nodeEscState(n)
-	nE.Loopdepth = e.loopdepth
-	e.noesc = append(e.noesc, n)
-}
-
-// Escape constants are numbered in order of increasing "escapiness"
-// to help make inferences be monotonic. With the exception of
-// EscNever which is sticky, eX < eY means that eY is more exposed
-// than eX, and hence replaces it in a conservative analysis.
-const (
-	EscUnknown        = iota
-	EscNone           // Does not escape to heap, result, or parameters.
-	EscReturn         // Is returned or reachable from returned.
-	EscHeap           // Reachable from the heap
-	EscNever          // By construction will not escape.
-	EscBits           = 3
-	EscMask           = (1 << EscBits) - 1
-	EscContentEscapes = 1 << EscBits // value obtained by indirect of parameter escapes to heap
-	EscReturnBits     = EscBits + 1
-	// Node.esc encoding = | escapeReturnEncoding:(width-4) | contentEscapes:1 | escEnum:3
-)
-
-// escMax returns the maximum of an existing escape value
-// (and its additional parameter flow flags) and a new escape type.
-func escMax(e, etype uint16) uint16 {
-	if e&EscMask >= EscHeap {
-		// normalize
-		if e&^EscMask != 0 {
-			Fatalf("Escape information had unexpected return encoding bits (w/ EscHeap, EscNever), e&EscMask=%v", e&EscMask)
-		}
-	}
-	if e&EscMask > etype {
-		return e
-	}
-	if etype == EscNone || etype == EscReturn {
-		return (e &^ EscMask) | etype
-	}
-	return etype
-}
-
-// For each input parameter to a function, the escapeReturnEncoding describes
-// how the parameter may leak to the function's outputs. This is currently the
-// "level" of the leak where level is 0 or larger (negative level means stored into
-// something whose address is returned -- but that implies stored into the heap,
-// hence EscHeap, which means that the details are not currently relevant. )
-const (
-	bitsPerOutputInTag = 3                                 // For each output, the number of bits for a tag
-	bitsMaskForTag     = uint16(1<<bitsPerOutputInTag) - 1 // The bit mask to extract a single tag.
-	maxEncodedLevel    = int(bitsMaskForTag - 1)           // The largest level that can be stored in a tag.
-)
-
-type EscState struct {
-	// Fake node that all
-	//   - return values and output variables
-	//   - parameters on imported functions not marked 'safe'
-	//   - assignments to global variables
-	// flow to.
-	theSink Node
-
-	dsts      []*Node // all dst nodes
-	loopdepth int32   // for detecting nested loop scopes
-	pdepth    int     // for debug printing in recursions.
-	dstcount  int     // diagnostic
-	edgecount int     // diagnostic
-	noesc     []*Node // list of possible non-escaping nodes, for printing
-	recursive bool    // recursive function or group of mutually recursive functions.
-	opts      []*Node // nodes with .Opt initialized
-	walkgen   uint32
-}
-
-func newEscState(recursive bool) *EscState {
-	e := new(EscState)
-	e.theSink.Op = ONAME
-	e.theSink.Orig = &e.theSink
-	e.theSink.Class = PEXTERN
-	e.theSink.Sym = lookup(".sink")
-	e.nodeEscState(&e.theSink).Loopdepth = -1
-	e.recursive = recursive
-	return e
-}
-
-func (e *EscState) stepWalk(dst, src *Node, why string, parent *EscStep) *EscStep {
-	// TODO: keep a cache of these, mark entry/exit in escwalk to avoid allocation
-	// Or perhaps never mind, since it is disabled unless printing is on.
-	// We may want to revisit this, since the EscStep nodes would make
-	// an excellent replacement for the poorly-separated graph-build/graph-flood
-	// stages.
-	if Debug['m'] == 0 {
-		return nil
-	}
-	return &EscStep{src: src, dst: dst, why: why, parent: parent}
-}
-
-func (e *EscState) stepAssign(step *EscStep, dst, src *Node, why string) *EscStep {
-	if Debug['m'] == 0 {
-		return nil
-	}
-	if step != nil { // Caller may have known better.
-		if step.why == "" {
-			step.why = why
-		}
-		if step.dst == nil {
-			step.dst = dst
-		}
-		if step.src == nil {
-			step.src = src
-		}
-		return step
-	}
-	return &EscStep{src: src, dst: dst, why: why}
-}
-
-func (e *EscState) stepAssignWhere(dst, src *Node, why string, where *Node) *EscStep {
-	if Debug['m'] == 0 {
-		return nil
-	}
-	return &EscStep{src: src, dst: dst, why: why, where: where}
-}
-
-// funcSym returns fn.Func.Nname.Sym if no nils are encountered along the way.
-func funcSym(fn *Node) *Sym {
-	if fn == nil || fn.Func.Nname == nil {
-		return nil
-	}
-	return fn.Func.Nname.Sym
-}
-
-// curfnSym returns n.Curfn.Nname.Sym if no nils are encountered along the way.
-func (e *EscState) curfnSym(n *Node) *Sym {
-	nE := e.nodeEscState(n)
-	return funcSym(nE.Curfn)
-}
-
-func escAnalyze(all []*Node, recursive bool) {
-	e := newEscState(recursive)
-
-	for _, n := range all {
-		if n.Op == ODCLFUNC {
-			n.Esc = EscFuncPlanned
-		}
-	}
-
-	// flow-analyze functions
-	for _, n := range all {
-		if n.Op == ODCLFUNC {
-			e.escfunc(n)
-		}
-	}
-
-	// print("escapes: %d e.dsts, %d edges\n", e.dstcount, e.edgecount);
-
-	// visit the upstream of each dst, mark address nodes with
-	// addrescapes, mark parameters unsafe
-	escapes := make([]uint16, len(e.dsts))
-	for i, n := range e.dsts {
-		escapes[i] = n.Esc
-	}
-	for _, n := range e.dsts {
-		e.escflood(n)
-	}
-	for {
-		done := true
-		for i, n := range e.dsts {
-			if n.Esc != escapes[i] {
-				done = false
-				if Debug['m'] > 2 {
-					Warnl(n.Lineno, "Reflooding %v %S", e.curfnSym(n), n)
-				}
-				escapes[i] = n.Esc
-				e.escflood(n)
-			}
-		}
-		if done {
-			break
-		}
-	}
-
-	// for all top level functions, tag the typenodes corresponding to the param nodes
-	for _, n := range all {
-		if n.Op == ODCLFUNC {
-			e.esctag(n)
-		}
-	}
-
-	if Debug['m'] != 0 {
-		for _, n := range e.noesc {
-			if n.Esc == EscNone {
-				Warnl(n.Lineno, "%v %S does not escape", e.curfnSym(n), n)
-			}
-		}
-	}
-
-	for _, x := range e.opts {
-		x.SetOpt(nil)
-	}
-}
-
-func (e *EscState) escfunc(fn *Node) {
-	//	print("escfunc %N %s\n", fn.Func.Nname, e.recursive?"(recursive)":"");
-	if fn.Esc != EscFuncPlanned {
-		Fatalf("repeat escfunc %v", fn.Func.Nname)
-	}
-	fn.Esc = EscFuncStarted
-
-	saveld := e.loopdepth
-	e.loopdepth = 1
-	savefn := Curfn
-	Curfn = fn
-
-	for _, ln := range Curfn.Func.Dcl {
-		if ln.Op != ONAME {
-			continue
-		}
-		lnE := e.nodeEscState(ln)
-		switch ln.Class {
-		// out params are in a loopdepth between the sink and all local variables
-		case PPARAMOUT:
-			lnE.Loopdepth = 0
-
-		case PPARAM:
-			lnE.Loopdepth = 1
-			if ln.Type != nil && !haspointers(ln.Type) {
-				break
-			}
-			if Curfn.Nbody.Len() == 0 && !Curfn.Noescape {
-				ln.Esc = EscHeap
-			} else {
-				ln.Esc = EscNone // prime for escflood later
-			}
-			e.noesc = append(e.noesc, ln)
-		}
-	}
-
-	// in a mutually recursive group we lose track of the return values
-	if e.recursive {
-		for _, ln := range Curfn.Func.Dcl {
-			if ln.Op == ONAME && ln.Class == PPARAMOUT {
-				e.escflows(&e.theSink, ln, e.stepAssign(nil, ln, ln, "returned from recursive function"))
-			}
-		}
-	}
-
-	e.escloopdepthlist(Curfn.Nbody)
-	e.esclist(Curfn.Nbody, Curfn)
-	Curfn = savefn
-	e.loopdepth = saveld
-}
-
-// Mark labels that have no backjumps to them as not increasing e.loopdepth.
-// Walk hasn't generated (goto|label).Left.Sym.Label yet, so we'll cheat
-// and set it to one of the following two. Then in esc we'll clear it again.
-var (
-	looping    Node
-	nonlooping Node
-)
-
-func (e *EscState) escloopdepthlist(l Nodes) {
-	for _, n := range l.Slice() {
-		e.escloopdepth(n)
-	}
-}
-
-func (e *EscState) escloopdepth(n *Node) {
-	if n == nil {
-		return
-	}
-
-	e.escloopdepthlist(n.Ninit)
-
-	switch n.Op {
-	case OLABEL:
-		if n.Left == nil || n.Left.Sym == nil {
-			Fatalf("esc:label without label: %+v", n)
-		}
-
-		// Walk will complain about this label being already defined, but that's not until
-		// after escape analysis. in the future, maybe pull label & goto analysis out of walk and put before esc
-		// if(n.Left.Sym.Label != nil)
-		//	fatal("escape analysis messed up analyzing label: %+N", n);
-		n.Left.Sym.Label = &nonlooping
-
-	case OGOTO:
-		if n.Left == nil || n.Left.Sym == nil {
-			Fatalf("esc:goto without label: %+v", n)
-		}
-
-		// If we come past one that's uninitialized, this must be a (harmless) forward jump
-		// but if it's set to nonlooping the label must have preceded this goto.
-		if n.Left.Sym.Label == &nonlooping {
-			n.Left.Sym.Label = &looping
-		}
-	}
-
-	e.escloopdepth(n.Left)
-	e.escloopdepth(n.Right)
-	e.escloopdepthlist(n.List)
-	e.escloopdepthlist(n.Nbody)
-	e.escloopdepthlist(n.Rlist)
-}
-
-func (e *EscState) esclist(l Nodes, parent *Node) {
-	for _, n := range l.Slice() {
-		e.esc(n, parent)
-	}
-}
-
-func (e *EscState) esc(n *Node, parent *Node) {
-	if n == nil {
-		return
-	}
-
-	lno := setlineno(n)
-
-	// ninit logically runs at a different loopdepth than the rest of the for loop.
-	e.esclist(n.Ninit, n)
-
-	if n.Op == OFOR || n.Op == ORANGE {
-		e.loopdepth++
-	}
-
-	// type switch variables have no ODCL.
-	// process type switch as declaration.
-	// must happen before processing of switch body,
-	// so before recursion.
-	if n.Op == OSWITCH && n.Left != nil && n.Left.Op == OTYPESW {
-		for _, n1 := range n.List.Slice() { // cases
-			// it.N().Rlist is the variable per case
-			if n1.Rlist.Len() != 0 {
-				e.nodeEscState(n1.Rlist.First()).Loopdepth = e.loopdepth
-			}
-		}
-	}
-
-	// Big stuff escapes unconditionally
-	// "Big" conditions that were scattered around in walk have been gathered here
-	if n.Esc != EscHeap && n.Type != nil &&
-		(n.Type.Width > MaxStackVarSize ||
-			(n.Op == ONEW || n.Op == OPTRLIT) && n.Type.Elem().Width >= 1<<16 ||
-			n.Op == OMAKESLICE && !isSmallMakeSlice(n)) {
-		if Debug['m'] > 2 {
-			Warnl(n.Lineno, "%v is too large for stack", n)
-		}
-		n.Esc = EscHeap
-		addrescapes(n)
-		e.escassignSinkWhy(n, n, "too large for stack") // TODO category: tooLarge
-	}
-
-	e.esc(n.Left, n)
-	e.esc(n.Right, n)
-	e.esclist(n.Nbody, n)
-	e.esclist(n.List, n)
-	e.esclist(n.Rlist, n)
-
-	if n.Op == OFOR || n.Op == ORANGE {
-		e.loopdepth--
-	}
-
-	if Debug['m'] > 2 {
-		fmt.Printf("%v:[%d] %v esc: %v\n", linestr(lineno), e.loopdepth, funcSym(Curfn), n)
-	}
-
-	switch n.Op {
-	// Record loop depth at declaration.
-	case ODCL:
-		if n.Left != nil {
-			e.nodeEscState(n.Left).Loopdepth = e.loopdepth
-		}
-
-	case OLABEL:
-		if n.Left.Sym.Label == &nonlooping {
-			if Debug['m'] > 2 {
-				fmt.Printf("%v:%v non-looping label\n", linestr(lineno), n)
-			}
-		} else if n.Left.Sym.Label == &looping {
-			if Debug['m'] > 2 {
-				fmt.Printf("%v: %v looping label\n", linestr(lineno), n)
-			}
-			e.loopdepth++
-		}
-
-		// See case OLABEL in escloopdepth above
-		// else if(n.Left.Sym.Label == nil)
-		//	fatal("escape analysis missed or messed up a label: %+N", n);
-
-		n.Left.Sym.Label = nil
-
-	case ORANGE:
-		if n.List.Len() >= 2 {
-			// Everything but fixed array is a dereference.
-
-			// If fixed array is really the address of fixed array,
-			// it is also a dereference, because it is implicitly
-			// dereferenced (see #12588)
-			if n.Type.IsArray() &&
-				!(n.Right.Type.IsPtr() && eqtype(n.Right.Type.Elem(), n.Type)) {
-				e.escassignWhyWhere(n.List.Second(), n.Right, "range", n)
-			} else {
-				e.escassignDereference(n.List.Second(), n.Right, e.stepAssignWhere(n.List.Second(), n.Right, "range-deref", n))
-			}
-		}
-
-	case OSWITCH:
-		if n.Left != nil && n.Left.Op == OTYPESW {
-			for _, n2 := range n.List.Slice() {
-				// cases
-				// n.Left.Right is the argument of the .(type),
-				// it.N().Rlist is the variable per case
-				if n2.Rlist.Len() != 0 {
-					e.escassignWhyWhere(n2.Rlist.First(), n.Left.Right, "switch case", n)
-				}
-			}
-		}
-
-	// Filter out the following special case.
-	//
-	//	func (b *Buffer) Foo() {
-	//		n, m := ...
-	//		b.buf = b.buf[n:m]
-	//	}
-	//
-	// This assignment is a no-op for escape analysis,
-	// it does not store any new pointers into b that were not already there.
-	// However, without this special case b will escape, because we assign to OIND/ODOTPTR.
-	case OAS, OASOP, OASWB:
-		if (n.Left.Op == OIND || n.Left.Op == ODOTPTR) && n.Left.Left.Op == ONAME && // dst is ONAME dereference
-			(n.Right.Op == OSLICE || n.Right.Op == OSLICE3 || n.Right.Op == OSLICESTR) && // src is slice operation
-			(n.Right.Left.Op == OIND || n.Right.Left.Op == ODOTPTR) && n.Right.Left.Left.Op == ONAME && // slice is applied to ONAME dereference
-			n.Left.Left == n.Right.Left.Left { // dst and src reference the same base ONAME
-
-			// Here we also assume that the statement will not contain calls,
-			// that is, that order will move any calls to init.
-			// Otherwise base ONAME value could change between the moments
-			// when we evaluate it for dst and for src.
-			//
-			// Note, this optimization does not apply to OSLICEARR,
-			// because it does introduce a new pointer into b that was not already there
-			// (pointer to b itself). After such assignment, if b contents escape,
-			// b escapes as well. If we ignore such OSLICEARR, we will conclude
-			// that b does not escape when b contents do.
-			if Debug['m'] != 0 {
-				Warnl(n.Lineno, "%v ignoring self-assignment to %S", e.curfnSym(n), n.Left)
-			}
-
-			break
-		}
-
-		e.escassign(n.Left, n.Right, e.stepAssignWhere(nil, nil, "", n))
-
-	case OAS2: // x,y = a,b
-		if n.List.Len() == n.Rlist.Len() {
-			rs := n.Rlist.Slice()
-			for i, n := range n.List.Slice() {
-				e.escassignWhyWhere(n, rs[i], "assign-pair", n)
-			}
-		}
-
-	case OAS2RECV: // v, ok = <-ch
-		e.escassignWhyWhere(n.List.First(), n.Rlist.First(), "assign-pair-receive", n)
-	case OAS2MAPR: // v, ok = m[k]
-		e.escassignWhyWhere(n.List.First(), n.Rlist.First(), "assign-pair-mapr", n)
-	case OAS2DOTTYPE: // v, ok = x.(type)
-		e.escassignWhyWhere(n.List.First(), n.Rlist.First(), "assign-pair-dot-type", n)
-
-	case OSEND: // ch <- x
-		e.escassignSinkWhy(n, n.Right, "send")
-
-	case ODEFER:
-		if e.loopdepth == 1 { // top level
-			break
-		}
-		// arguments leak out of scope
-		// TODO: leak to a dummy node instead
-		// defer f(x) - f and x escape
-		e.escassignSinkWhy(n, n.Left.Left, "defer func")
-
-		e.escassignSinkWhy(n, n.Left.Right, "defer func ...") // ODDDARG for call
-		for _, n4 := range n.Left.List.Slice() {
-			e.escassignSinkWhy(n, n4, "defer func arg")
-		}
-
-	case OPROC:
-		// go f(x) - f and x escape
-		e.escassignSinkWhy(n, n.Left.Left, "go func")
-
-		e.escassignSinkWhy(n, n.Left.Right, "go func ...") // ODDDARG for call
-		for _, n4 := range n.Left.List.Slice() {
-			e.escassignSinkWhy(n, n4, "go func arg")
-		}
-
-	case OCALLMETH, OCALLFUNC, OCALLINTER:
-		e.esccall(n, parent)
-
-		// esccall already done on n.Rlist.First(). tie it's Retval to n.List
-	case OAS2FUNC: // x,y = f()
-		rs := e.nodeEscState(n.Rlist.First()).Retval.Slice()
-		for i, n := range n.List.Slice() {
-			if i >= len(rs) {
-				break
-			}
-			e.escassignWhyWhere(n, rs[i], "assign-pair-func-call", n)
-		}
-		if n.List.Len() != len(rs) {
-			Fatalf("esc oas2func")
-		}
-
-	case ORETURN:
-		retList := n.List
-		if retList.Len() == 1 && Curfn.Type.Results().NumFields() > 1 {
-			// OAS2FUNC in disguise
-			// esccall already done on n.List.First()
-			// tie e.nodeEscState(n.List.First()).Retval to Curfn.Func.Dcl PPARAMOUT's
-			retList = e.nodeEscState(n.List.First()).Retval
-		}
-
-		i := 0
-		for _, lrn := range Curfn.Func.Dcl {
-			if i >= retList.Len() {
-				break
-			}
-			if lrn.Op != ONAME || lrn.Class != PPARAMOUT {
-				continue
-			}
-			e.escassignWhyWhere(lrn, retList.Index(i), "return", n)
-			i++
-		}
-
-		if i < retList.Len() {
-			Fatalf("esc return list")
-		}
-
-		// Argument could leak through recover.
-	case OPANIC:
-		e.escassignSinkWhy(n, n.Left, "panic")
-
-	case OAPPEND:
-		if !n.Isddd {
-			for _, nn := range n.List.Slice()[1:] {
-				e.escassignSinkWhy(n, nn, "appended to slice") // lose track of assign to dereference
-			}
-		} else {
-			// append(slice1, slice2...) -- slice2 itself does not escape, but contents do.
-			slice2 := n.List.Second()
-			e.escassignDereference(&e.theSink, slice2, e.stepAssignWhere(n, slice2, "appended slice...", n)) // lose track of assign of dereference
-			if Debug['m'] > 3 {
-				Warnl(n.Lineno, "%v special treatment of append(slice1, slice2...) %S", e.curfnSym(n), n)
-			}
-		}
-		e.escassignDereference(&e.theSink, n.List.First(), e.stepAssignWhere(n, n.List.First(), "appendee slice", n)) // The original elements are now leaked, too
-
-	case OCOPY:
-		e.escassignDereference(&e.theSink, n.Right, e.stepAssignWhere(n, n.Right, "copied slice", n)) // lose track of assign of dereference
-
-	case OCONV, OCONVNOP:
-		e.escassignWhyWhere(n, n.Left, "converted", n)
-
-	case OCONVIFACE:
-		e.track(n)
-		e.escassignWhyWhere(n, n.Left, "interface-converted", n)
-
-	case OARRAYLIT:
-		// Link values to array
-		for _, n2 := range n.List.Slice() {
-			if n2.Op == OKEY {
-				n2 = n2.Right
-			}
-			e.escassign(n, n2, e.stepAssignWhere(n, n2, "array literal element", n))
-		}
-
-	case OSLICELIT:
-		// Slice is not leaked until proven otherwise
-		e.track(n)
-		// Link values to slice
-		for _, n2 := range n.List.Slice() {
-			if n2.Op == OKEY {
-				n2 = n2.Right
-			}
-			e.escassign(n, n2, e.stepAssignWhere(n, n2, "slice literal element", n))
-		}
-
-		// Link values to struct.
-	case OSTRUCTLIT:
-		for _, n6 := range n.List.Slice() {
-			e.escassignWhyWhere(n, n6.Left, "struct literal element", n)
-		}
-
-	case OPTRLIT:
-		e.track(n)
-
-		// Link OSTRUCTLIT to OPTRLIT; if OPTRLIT escapes, OSTRUCTLIT elements do too.
-		e.escassignWhyWhere(n, n.Left, "pointer literal [assign]", n)
-
-	case OCALLPART:
-		e.track(n)
-
-		// Contents make it to memory, lose track.
-		e.escassignSinkWhy(n, n.Left, "call part")
-
-	case OMAPLIT:
-		e.track(n)
-		// Keys and values make it to memory, lose track.
-		for _, n7 := range n.List.Slice() {
-			e.escassignSinkWhy(n, n7.Left, "map literal key")
-			e.escassignSinkWhy(n, n7.Right, "map literal value")
-		}
-
-	case OCLOSURE:
-		// Link addresses of captured variables to closure.
-		for _, v := range n.Func.Cvars.Slice() {
-			if v.Op == OXXX { // unnamed out argument; see dcl.go:/^funcargs
-				continue
-			}
-			a := v.Name.Defn
-			if !v.Name.Byval {
-				a = nod(OADDR, a, nil)
-				a.Lineno = v.Lineno
-				e.nodeEscState(a).Loopdepth = e.loopdepth
-				a = typecheck(a, Erv)
-			}
-
-			e.escassignWhyWhere(n, a, "captured by a closure", n)
-		}
-		fallthrough
-
-	case OMAKECHAN,
-		OMAKEMAP,
-		OMAKESLICE,
-		ONEW,
-		OARRAYRUNESTR,
-		OARRAYBYTESTR,
-		OSTRARRAYRUNE,
-		OSTRARRAYBYTE,
-		ORUNESTR:
-		e.track(n)
-
-	case OADDSTR:
-		e.track(n)
-		// Arguments of OADDSTR do not escape.
-
-	case OADDR:
-		// current loop depth is an upper bound on actual loop depth
-		// of addressed value.
-		e.track(n)
-
-		// for &x, use loop depth of x if known.
-		// it should always be known, but if not, be conservative
-		// and keep the current loop depth.
-		if n.Left.Op == ONAME {
-			switch n.Left.Class {
-			case PAUTO:
-				nE := e.nodeEscState(n)
-				leftE := e.nodeEscState(n.Left)
-				if leftE.Loopdepth != 0 {
-					nE.Loopdepth = leftE.Loopdepth
-				}
-
-			// PPARAM is loop depth 1 always.
-			// PPARAMOUT is loop depth 0 for writes
-			// but considered loop depth 1 for address-of,
-			// so that writing the address of one result
-			// to another (or the same) result makes the
-			// first result move to the heap.
-			case PPARAM, PPARAMOUT:
-				nE := e.nodeEscState(n)
-				nE.Loopdepth = 1
-			}
-		}
-	}
-
-	lineno = lno
-}
-
-// escassignWhyWhere bundles a common case of
-// escassign(e, dst, src, e.stepAssignWhere(dst, src, reason, where))
-func (e *EscState) escassignWhyWhere(dst, src *Node, reason string, where *Node) {
-	var step *EscStep
-	if Debug['m'] != 0 {
-		step = e.stepAssignWhere(dst, src, reason, where)
-	}
-	e.escassign(dst, src, step)
-}
-
-// escassignSinkWhy bundles a common case of
-// escassign(e, &e.theSink, src, e.stepAssign(nil, dst, src, reason))
-func (e *EscState) escassignSinkWhy(dst, src *Node, reason string) {
-	var step *EscStep
-	if Debug['m'] != 0 {
-		step = e.stepAssign(nil, dst, src, reason)
-	}
-	e.escassign(&e.theSink, src, step)
-}
-
-// escassignSinkWhyWhere is escassignSinkWhy but includes a call site
-// for accurate location reporting.
-func (e *EscState) escassignSinkWhyWhere(dst, src *Node, reason string, call *Node) {
-	var step *EscStep
-	if Debug['m'] != 0 {
-		step = e.stepAssignWhere(dst, src, reason, call)
-	}
-	e.escassign(&e.theSink, src, step)
-}
-
-// Assert that expr somehow gets assigned to dst, if non nil.  for
-// dst==nil, any name node expr still must be marked as being
-// evaluated in curfn.	For expr==nil, dst must still be examined for
-// evaluations inside it (e.g *f(x) = y)
-func (e *EscState) escassign(dst, src *Node, step *EscStep) {
-	if isblank(dst) || dst == nil || src == nil || src.Op == ONONAME || src.Op == OXXX {
-		return
-	}
-
-	if Debug['m'] > 2 {
-		fmt.Printf("%v:[%d] %v escassign: %S(%0j)[%v] = %S(%0j)[%v]\n",
-			linestr(lineno), e.loopdepth, funcSym(Curfn),
-			dst, dst, dst.Op,
-			src, src, src.Op)
-	}
-
-	setlineno(dst)
-
-	originalDst := dst
-	dstwhy := "assigned"
-
-	// Analyze lhs of assignment.
-	// Replace dst with &e.theSink if we can't track it.
-	switch dst.Op {
-	default:
-		Dump("dst", dst)
-		Fatalf("escassign: unexpected dst")
-
-	case OARRAYLIT,
-		OSLICELIT,
-		OCLOSURE,
-		OCONV,
-		OCONVIFACE,
-		OCONVNOP,
-		OMAPLIT,
-		OSTRUCTLIT,
-		OPTRLIT,
-		ODDDARG,
-		OCALLPART:
-
-	case ONAME:
-		if dst.Class == PEXTERN {
-			dstwhy = "assigned to top level variable"
-			dst = &e.theSink
-		}
-
-	case ODOT: // treat "dst.x = src" as "dst = src"
-		e.escassign(dst.Left, src, e.stepAssign(step, originalDst, src, "dot-equals"))
-		return
-
-	case OINDEX:
-		if dst.Left.Type.IsArray() {
-			e.escassign(dst.Left, src, e.stepAssign(step, originalDst, src, "array-element-equals"))
-			return
-		}
-
-		dstwhy = "slice-element-equals"
-		dst = &e.theSink // lose track of dereference
-
-	case OIND:
-		dstwhy = "star-equals"
-		dst = &e.theSink // lose track of dereference
-
-	case ODOTPTR:
-		dstwhy = "star-dot-equals"
-		dst = &e.theSink // lose track of dereference
-
-		// lose track of key and value
-	case OINDEXMAP:
-		e.escassign(&e.theSink, dst.Right, e.stepAssign(nil, originalDst, src, "key of map put"))
-		dstwhy = "value of map put"
-		dst = &e.theSink
-	}
-
-	lno := setlineno(src)
-	e.pdepth++
-
-	switch src.Op {
-	case OADDR, // dst = &x
-		OIND,    // dst = *x
-		ODOTPTR, // dst = (*x).f
-		ONAME,
-		ODDDARG,
-		OPTRLIT,
-		OARRAYLIT,
-		OSLICELIT,
-		OMAPLIT,
-		OSTRUCTLIT,
-		OMAKECHAN,
-		OMAKEMAP,
-		OMAKESLICE,
-		OARRAYRUNESTR,
-		OARRAYBYTESTR,
-		OSTRARRAYRUNE,
-		OSTRARRAYBYTE,
-		OADDSTR,
-		ONEW,
-		OCALLPART,
-		ORUNESTR,
-		OCONVIFACE:
-		e.escflows(dst, src, e.stepAssign(step, originalDst, src, dstwhy))
-
-	case OCLOSURE:
-		// OCLOSURE is lowered to OPTRLIT,
-		// insert OADDR to account for the additional indirection.
-		a := nod(OADDR, src, nil)
-		a.Lineno = src.Lineno
-		e.nodeEscState(a).Loopdepth = e.nodeEscState(src).Loopdepth
-		a.Type = ptrto(src.Type)
-		e.escflows(dst, a, e.stepAssign(nil, originalDst, src, dstwhy))
-
-	// Flowing multiple returns to a single dst happens when
-	// analyzing "go f(g())": here g() flows to sink (issue 4529).
-	case OCALLMETH, OCALLFUNC, OCALLINTER:
-		for _, n := range e.nodeEscState(src).Retval.Slice() {
-			e.escflows(dst, n, e.stepAssign(nil, originalDst, n, dstwhy))
-		}
-
-		// A non-pointer escaping from a struct does not concern us.
-	case ODOT:
-		if src.Type != nil && !haspointers(src.Type) {
-			break
-		}
-		fallthrough
-
-		// Conversions, field access, slice all preserve the input value.
-	case OCONV,
-		OCONVNOP,
-		ODOTMETH,
-		// treat recv.meth as a value with recv in it, only happens in ODEFER and OPROC
-		// iface.method already leaks iface in esccall, no need to put in extra ODOTINTER edge here
-		OSLICE,
-		OSLICE3,
-		OSLICEARR,
-		OSLICE3ARR,
-		OSLICESTR:
-		// Conversions, field access, slice all preserve the input value.
-		e.escassign(dst, src.Left, e.stepAssign(step, originalDst, src, dstwhy))
-
-	case ODOTTYPE,
-		ODOTTYPE2:
-		if src.Type != nil && !haspointers(src.Type) {
-			break
-		}
-		e.escassign(dst, src.Left, e.stepAssign(step, originalDst, src, dstwhy))
-
-	case OAPPEND:
-		// Append returns first argument.
-		// Subsequent arguments are already leaked because they are operands to append.
-		e.escassign(dst, src.List.First(), e.stepAssign(step, dst, src.List.First(), dstwhy))
-
-	case OINDEX:
-		// Index of array preserves input value.
-		if src.Left.Type.IsArray() {
-			e.escassign(dst, src.Left, e.stepAssign(step, originalDst, src, dstwhy))
-		} else {
-			e.escflows(dst, src, e.stepAssign(step, originalDst, src, dstwhy))
-		}
-
-	// Might be pointer arithmetic, in which case
-	// the operands flow into the result.
-	// TODO(rsc): Decide what the story is here. This is unsettling.
-	case OADD,
-		OSUB,
-		OOR,
-		OXOR,
-		OMUL,
-		ODIV,
-		OMOD,
-		OLSH,
-		ORSH,
-		OAND,
-		OANDNOT,
-		OPLUS,
-		OMINUS,
-		OCOM:
-		e.escassign(dst, src.Left, e.stepAssign(step, originalDst, src, dstwhy))
-
-		e.escassign(dst, src.Right, e.stepAssign(step, originalDst, src, dstwhy))
-	}
-
-	e.pdepth--
-	lineno = lno
-}
-
-// Common case for escapes is 16 bits 000000000xxxEEEE
-// where commonest cases for xxx encoding in-to-out pointer
-//  flow are 000, 001, 010, 011  and EEEE is computed Esc bits.
-// Note width of xxx depends on value of constant
-// bitsPerOutputInTag -- expect 2 or 3, so in practice the
-// tag cache array is 64 or 128 long. Some entries will
-// never be populated.
-var tags [1 << (bitsPerOutputInTag + EscReturnBits)]string
-
-// mktag returns the string representation for an escape analysis tag.
-func mktag(mask int) string {
-	switch mask & EscMask {
-	case EscNone, EscReturn:
-	default:
-		Fatalf("escape mktag")
-	}
-
-	if mask < len(tags) && tags[mask] != "" {
-		return tags[mask]
-	}
-
-	s := fmt.Sprintf("esc:0x%x", mask)
-	if mask < len(tags) {
-		tags[mask] = s
-	}
-	return s
-}
-
-// parsetag decodes an escape analysis tag and returns the esc value.
-func parsetag(note string) uint16 {
-	if !strings.HasPrefix(note, "esc:") {
-		return EscUnknown
-	}
-	n, _ := strconv.ParseInt(note[4:], 0, 0)
-	em := uint16(n)
-	if em == 0 {
-		return EscNone
-	}
-	return em
-}
-
-// describeEscape returns a string describing the escape tag.
-// The result is either one of {EscUnknown, EscNone, EscHeap} which all have no further annotation
-// or a description of parameter flow, which takes the form of an optional "contentToHeap"
-// indicating that the content of this parameter is leaked to the heap, followed by a sequence
-// of level encodings separated by spaces, one for each parameter, where _ means no flow,
-// = means direct flow, and N asterisks (*) encodes content (obtained by indirection) flow.
-// e.g., "contentToHeap _ =" means that a parameter's content (one or more dereferences)
-// escapes to the heap, the parameter does not leak to the first output, but does leak directly
-// to the second output (and if there are more than two outputs, there is no flow to those.)
-func describeEscape(em uint16) string {
-	var s string
-	if em&EscMask == EscUnknown {
-		s = "EscUnknown"
-	}
-	if em&EscMask == EscNone {
-		s = "EscNone"
-	}
-	if em&EscMask == EscHeap {
-		s = "EscHeap"
-	}
-	if em&EscMask == EscReturn {
-		s = "EscReturn"
-	}
-	if em&EscContentEscapes != 0 {
-		if s != "" {
-			s += " "
-		}
-		s += "contentToHeap"
-	}
-	for em >>= EscReturnBits; em != 0; em = em >> bitsPerOutputInTag {
-		// See encoding description above
-		if s != "" {
-			s += " "
-		}
-		switch embits := em & bitsMaskForTag; embits {
-		case 0:
-			s += "_"
-		case 1:
-			s += "="
-		default:
-			for i := uint16(0); i < embits-1; i++ {
-				s += "*"
-			}
-		}
-
-	}
-	return s
-}
-
-// escassignfromtag models the input-to-output assignment flow of one of a function
-// calls arguments, where the flow is encoded in "note".
-func (e *EscState) escassignfromtag(note string, dsts Nodes, src, call *Node) uint16 {
-	em := parsetag(note)
-	if src.Op == OLITERAL {
-		return em
-	}
-
-	if Debug['m'] > 3 {
-		fmt.Printf("%v::assignfromtag:: src=%S, em=%s\n",
-			linestr(lineno), src, describeEscape(em))
-	}
-
-	if em == EscUnknown {
-		e.escassignSinkWhyWhere(src, src, "passed to call[argument escapes]", call)
-		return em
-	}
-
-	if em == EscNone {
-		return em
-	}
-
-	// If content inside parameter (reached via indirection)
-	// escapes to heap, mark as such.
-	if em&EscContentEscapes != 0 {
-		e.escassign(&e.theSink, e.addDereference(src), e.stepAssignWhere(src, src, "passed to call[argument content escapes]", call))
-	}
-
-	em0 := em
-	dstsi := 0
-	for em >>= EscReturnBits; em != 0 && dstsi < dsts.Len(); em = em >> bitsPerOutputInTag {
-		// Prefer the lowest-level path to the reference (for escape purposes).
-		// Two-bit encoding (for example. 1, 3, and 4 bits are other options)
-		//  01 = 0-level
-		//  10 = 1-level, (content escapes),
-		//  11 = 2-level, (content of content escapes),
-		embits := em & bitsMaskForTag
-		if embits > 0 {
-			n := src
-			for i := uint16(0); i < embits-1; i++ {
-				n = e.addDereference(n) // encode level>0 as indirections
-			}
-			e.escassign(dsts.Index(dstsi), n, e.stepAssignWhere(dsts.Index(dstsi), src, "passed-to-and-returned-from-call", call))
-		}
-		dstsi++
-	}
-	// If there are too many outputs to fit in the tag,
-	// that is handled at the encoding end as EscHeap,
-	// so there is no need to check here.
-
-	if em != 0 && dstsi >= dsts.Len() {
-		Fatalf("corrupt esc tag %q or messed up escretval list\n", note)
-	}
-	return em0
-}
-
-func (e *EscState) escassignDereference(dst *Node, src *Node, step *EscStep) {
-	if src.Op == OLITERAL {
-		return
-	}
-	e.escassign(dst, e.addDereference(src), step)
-}
-
-// addDereference constructs a suitable OIND note applied to src.
-// Because this is for purposes of escape accounting, not execution,
-// some semantically dubious node combinations are (currently) possible.
-func (e *EscState) addDereference(n *Node) *Node {
-	ind := nod(OIND, n, nil)
-	e.nodeEscState(ind).Loopdepth = e.nodeEscState(n).Loopdepth
-	ind.Lineno = n.Lineno
-	t := n.Type
-	if t.IsKind(Tptr) {
-		// This should model our own sloppy use of OIND to encode
-		// decreasing levels of indirection; i.e., "indirecting" an array
-		// might yield the type of an element. To be enhanced...
-		t = t.Elem()
-	}
-	ind.Type = t
-	return ind
-}
-
-// escNoteOutputParamFlow encodes maxEncodedLevel/.../1/0-level flow to the vargen'th parameter.
-// Levels greater than maxEncodedLevel are replaced with maxEncodedLevel.
-// If the encoding cannot describe the modified input level and output number, then EscHeap is returned.
-func escNoteOutputParamFlow(e uint16, vargen int32, level Level) uint16 {
-	// Flow+level is encoded in two bits.
-	// 00 = not flow, xx = level+1 for 0 <= level <= maxEncodedLevel
-	// 16 bits for Esc allows 6x2bits or 4x3bits or 3x4bits if additional information would be useful.
-	if level.int() <= 0 && level.guaranteedDereference() > 0 {
-		return escMax(e|EscContentEscapes, EscNone) // At least one deref, thus only content.
-	}
-	if level.int() < 0 {
-		return EscHeap
-	}
-	if level.int() > maxEncodedLevel {
-		// Cannot encode larger values than maxEncodedLevel.
-		level = levelFrom(maxEncodedLevel)
-	}
-	encoded := uint16(level.int() + 1)
-
-	shift := uint(bitsPerOutputInTag*(vargen-1) + EscReturnBits)
-	old := (e >> shift) & bitsMaskForTag
-	if old == 0 || encoded != 0 && encoded < old {
-		old = encoded
-	}
-
-	encodedFlow := old << shift
-	if (encodedFlow>>shift)&bitsMaskForTag != old {
-		// Encoding failure defaults to heap.
-		return EscHeap
-	}
-
-	return (e &^ (bitsMaskForTag << shift)) | encodedFlow
-}
-
-func (e *EscState) initEscRetval(call *Node, fntype *Type) {
-	cE := e.nodeEscState(call)
-	cE.Retval.Set(nil) // Suspect this is not nil for indirect calls.
-	for i, f := range fntype.Results().Fields().Slice() {
-		ret := nod(ONAME, nil, nil)
-		buf := fmt.Sprintf(".out%d", i)
-		ret.Sym = lookup(buf)
-		ret.Type = f.Type
-		ret.Class = PAUTO
-		ret.Name.Curfn = Curfn
-		e.nodeEscState(ret).Loopdepth = e.loopdepth
-		ret.Used = true
-		ret.Lineno = call.Lineno
-		cE.Retval.Append(ret)
-	}
-}
-
-// This is a bit messier than fortunate, pulled out of esc's big
-// switch for clarity. We either have the paramnodes, which may be
-// connected to other things through flows or we have the parameter type
-// nodes, which may be marked "noescape". Navigating the ast is slightly
-// different for methods vs plain functions and for imported vs
-// this-package
-func (e *EscState) esccall(call *Node, parent *Node) {
-	var fntype *Type
-	var indirect bool
-	var fn *Node
-	switch call.Op {
-	default:
-		Fatalf("esccall")
-
-	case OCALLFUNC:
-		fn = call.Left
-		fntype = fn.Type
-		indirect = fn.Op != ONAME || fn.Class != PFUNC
-
-	case OCALLMETH:
-		fn = call.Left.Sym.Def
-		if fn != nil {
-			fntype = fn.Type
-		} else {
-			fntype = call.Left.Type
-		}
-
-	case OCALLINTER:
-		fntype = call.Left.Type
-		indirect = true
-	}
-
-	argList := call.List
-	if argList.Len() == 1 {
-		arg := argList.First()
-		if arg.Type.IsFuncArgStruct() { // f(g())
-			argList = e.nodeEscState(arg).Retval
-		}
-	}
-
-	args := argList.Slice()
-
-	if indirect {
-		// We know nothing!
-		// Leak all the parameters
-		for _, arg := range args {
-			e.escassignSinkWhy(call, arg, "parameter to indirect call")
-			if Debug['m'] > 3 {
-				fmt.Printf("%v::esccall:: indirect call <- %S, untracked\n", linestr(lineno), arg)
-			}
-		}
-		// Set up bogus outputs
-		e.initEscRetval(call, fntype)
-		// If there is a receiver, it also leaks to heap.
-		if call.Op != OCALLFUNC {
-			rf := fntype.Recv()
-			r := call.Left.Left
-			if haspointers(rf.Type) {
-				e.escassignSinkWhy(call, r, "receiver in indirect call")
-			}
-		} else { // indirect and OCALLFUNC = could be captured variables, too. (#14409)
-			rets := e.nodeEscState(call).Retval.Slice()
-			for _, ret := range rets {
-				e.escassignDereference(ret, fn, e.stepAssignWhere(ret, fn, "captured by called closure", call))
-			}
-		}
-		return
-	}
-
-	cE := e.nodeEscState(call)
-	if fn != nil && fn.Op == ONAME && fn.Class == PFUNC &&
-		fn.Name.Defn != nil && fn.Name.Defn.Nbody.Len() != 0 && fn.Name.Param.Ntype != nil && fn.Name.Defn.Esc < EscFuncTagged {
-		if Debug['m'] > 3 {
-			fmt.Printf("%v::esccall:: %S in recursive group\n", linestr(lineno), call)
-		}
-
-		// function in same mutually recursive group. Incorporate into flow graph.
-		//		print("esc local fn: %N\n", fn.Func.Ntype);
-		if fn.Name.Defn.Esc == EscFuncUnknown || cE.Retval.Len() != 0 {
-			Fatalf("graph inconsistency")
-		}
-
-		sawRcvr := false
-		for _, n := range fn.Name.Defn.Func.Dcl {
-			switch n.Class {
-			case PPARAM:
-				if call.Op != OCALLFUNC && !sawRcvr {
-					e.escassignWhyWhere(n, call.Left.Left, "call receiver", call)
-					sawRcvr = true
-					continue
-				}
-				if len(args) == 0 {
-					continue
-				}
-				arg := args[0]
-				if n.Isddd && !call.Isddd {
-					// Introduce ODDDARG node to represent ... allocation.
-					arg = nod(ODDDARG, nil, nil)
-					arr := typArray(n.Type.Elem(), int64(len(args)))
-					arg.Type = ptrto(arr) // make pointer so it will be tracked
-					arg.Lineno = call.Lineno
-					e.track(arg)
-					call.Right = arg
-				}
-				e.escassignWhyWhere(n, arg, "arg to recursive call", call) // TODO this message needs help.
-				if arg != args[0] {
-					// "..." arguments are untracked
-					for _, a := range args {
-						if Debug['m'] > 3 {
-							fmt.Printf("%v::esccall:: ... <- %S, untracked\n", linestr(lineno), a)
-						}
-						e.escassignSinkWhyWhere(arg, a, "... arg to recursive call", call)
-					}
-					// No more PPARAM processing, but keep
-					// going for PPARAMOUT.
-					args = nil
-					continue
-				}
-				args = args[1:]
-
-			case PPARAMOUT:
-				cE.Retval.Append(n)
-			}
-		}
-
-		return
-	}
-
-	// Imported or completely analyzed function. Use the escape tags.
-	if cE.Retval.Len() != 0 {
-		Fatalf("esc already decorated call %+v\n", call)
-	}
-
-	if Debug['m'] > 3 {
-		fmt.Printf("%v::esccall:: %S not recursive\n", linestr(lineno), call)
-	}
-
-	// set up out list on this call node with dummy auto ONAMES in the current (calling) function.
-	e.initEscRetval(call, fntype)
-
-	//	print("esc analyzed fn: %#N (%+T) returning (%+H)\n", fn, fntype, e.nodeEscState(call).Retval);
-
-	// Receiver.
-	if call.Op != OCALLFUNC {
-		rf := fntype.Recv()
-		r := call.Left.Left
-		if haspointers(rf.Type) {
-			e.escassignfromtag(rf.Note, cE.Retval, r, call)
-		}
-	}
-
-	var arg *Node
-	var note string
-	param, it := iterFields(fntype.Params())
-	i := 0
-	for ; i < len(args); i++ {
-		arg = args[i]
-		note = param.Note
-		if param.Isddd && !call.Isddd {
-			// Introduce ODDDARG node to represent ... allocation.
-			arg = nod(ODDDARG, nil, nil)
-			arg.Lineno = call.Lineno
-			arr := typArray(param.Type.Elem(), int64(len(args)-i))
-			arg.Type = ptrto(arr) // make pointer so it will be tracked
-			e.track(arg)
-			call.Right = arg
-		}
-
-		if haspointers(param.Type) {
-			if e.escassignfromtag(note, cE.Retval, arg, call)&EscMask == EscNone && parent.Op != ODEFER && parent.Op != OPROC {
-				a := arg
-				for a.Op == OCONVNOP {
-					a = a.Left
-				}
-				switch a.Op {
-				// The callee has already been analyzed, so its arguments have esc tags.
-				// The argument is marked as not escaping at all.
-				// Record that fact so that any temporary used for
-				// synthesizing this expression can be reclaimed when
-				// the function returns.
-				// This 'noescape' is even stronger than the usual esc == EscNone.
-				// arg.Esc == EscNone means that arg does not escape the current function.
-				// arg.Noescape = true here means that arg does not escape this statement
-				// in the current function.
-				case OCALLPART,
-					OCLOSURE,
-					ODDDARG,
-					OARRAYLIT,
-					OSLICELIT,
-					OPTRLIT,
-					OSTRUCTLIT:
-					a.Noescape = true
-				}
-			}
-		}
-
-		if arg != args[i] {
-			// This occurs when function parameter field Isddd and call not Isddd
-			break
-		}
-
-		if note == uintptrEscapesTag {
-			e.escassignSinkWhy(arg, arg, "escaping uintptr")
-		}
-
-		param = it.Next()
-	}
-
-	// Store arguments into slice for ... arg.
-	for ; i < len(args); i++ {
-		if Debug['m'] > 3 {
-			fmt.Printf("%v::esccall:: ... <- %S\n", linestr(lineno), args[i])
-		}
-		if note == uintptrEscapesTag {
-			e.escassignSinkWhyWhere(arg, args[i], "arg to uintptrescapes ...", call)
-		} else {
-			e.escassignWhyWhere(arg, args[i], "arg to ...", call)
-		}
-	}
-}
-
-// escflows records the link src->dst in dst, throwing out some quick wins,
-// and also ensuring that dst is noted as a flow destination.
-func (e *EscState) escflows(dst, src *Node, why *EscStep) {
-	if dst == nil || src == nil || dst == src {
-		return
-	}
-
-	// Don't bother building a graph for scalars.
-	if src.Type != nil && !haspointers(src.Type) {
-		return
-	}
-
-	if Debug['m'] > 3 {
-		fmt.Printf("%v::flows:: %S <- %S\n", linestr(lineno), dst, src)
-	}
-
-	dstE := e.nodeEscState(dst)
-	if len(dstE.Flowsrc) == 0 {
-		e.dsts = append(e.dsts, dst)
-		e.dstcount++
-	}
-
-	e.edgecount++
-
-	if why == nil {
-		dstE.Flowsrc = append(dstE.Flowsrc, EscStep{src: src})
-	} else {
-		starwhy := *why
-		starwhy.src = src // TODO: need to reconcile this w/ needs of explanations.
-		dstE.Flowsrc = append(dstE.Flowsrc, starwhy)
-	}
-}
-
-// Whenever we hit a reference node, the level goes up by one, and whenever
-// we hit an OADDR, the level goes down by one. as long as we're on a level > 0
-// finding an OADDR just means we're following the upstream of a dereference,
-// so this address doesn't leak (yet).
-// If level == 0, it means the /value/ of this node can reach the root of this flood.
-// so if this node is an OADDR, its argument should be marked as escaping iff
-// its currfn/e.loopdepth are different from the flood's root.
-// Once an object has been moved to the heap, all of its upstream should be considered
-// escaping to the global scope.
-func (e *EscState) escflood(dst *Node) {
-	switch dst.Op {
-	case ONAME, OCLOSURE:
-	default:
-		return
-	}
-
-	dstE := e.nodeEscState(dst)
-	if Debug['m'] > 2 {
-		fmt.Printf("\nescflood:%d: dst %S scope:%v[%d]\n", e.walkgen, dst, e.curfnSym(dst), dstE.Loopdepth)
-	}
-
-	for i := range dstE.Flowsrc {
-		e.walkgen++
-		s := &dstE.Flowsrc[i]
-		s.parent = nil
-		e.escwalk(levelFrom(0), dst, s.src, s)
-	}
-}
-
-// funcOutputAndInput reports whether dst and src correspond to output and input parameters of the same function.
-func funcOutputAndInput(dst, src *Node) bool {
-	// Note if dst is marked as escaping, then "returned" is too weak.
-	return dst.Op == ONAME && dst.Class == PPARAMOUT &&
-		src.Op == ONAME && src.Class == PPARAM && src.Name.Curfn == dst.Name.Curfn
-}
-
-func (es *EscStep) describe(src *Node) {
-	if Debug['m'] < 2 {
-		return
-	}
-	step0 := es
-	for step := step0; step != nil && !step.busy; step = step.parent {
-		// TODO: We get cycles. Trigger is i = &i (where var i interface{})
-		step.busy = true
-		// The trail is a little odd because of how the
-		// graph is constructed.  The link to the current
-		// Node is parent.src unless parent is nil in which
-		// case it is step.dst.
-		nextDest := step.parent
-		dst := step.dst
-		where := step.where
-		if nextDest != nil {
-			dst = nextDest.src
-		}
-		if where == nil {
-			where = dst
-		}
-		Warnl(src.Lineno, "\tfrom %v (%s) at %s", dst, step.why, where.Line())
-	}
-	for step := step0; step != nil && step.busy; step = step.parent {
-		step.busy = false
-	}
-}
-
-const NOTALOOPDEPTH = -1
-
-func (e *EscState) escwalk(level Level, dst *Node, src *Node, step *EscStep) {
-	e.escwalkBody(level, dst, src, step, NOTALOOPDEPTH)
-}
-
-func (e *EscState) escwalkBody(level Level, dst *Node, src *Node, step *EscStep, extraloopdepth int32) {
-	if src.Op == OLITERAL {
-		return
-	}
-	srcE := e.nodeEscState(src)
-	if srcE.Walkgen == e.walkgen {
-		// Esclevels are vectors, do not compare as integers,
-		// and must use "min" of old and new to guarantee
-		// convergence.
-		level = level.min(srcE.Level)
-		if level == srcE.Level {
-			// Have we been here already with an extraloopdepth,
-			// or is the extraloopdepth provided no improvement on
-			// what's already been seen?
-			if srcE.Maxextraloopdepth >= extraloopdepth || srcE.Loopdepth >= extraloopdepth {
-				return
-			}
-			srcE.Maxextraloopdepth = extraloopdepth
-		}
-	} else { // srcE.Walkgen < e.walkgen -- first time, reset this.
-		srcE.Maxextraloopdepth = NOTALOOPDEPTH
-	}
-
-	srcE.Walkgen = e.walkgen
-	srcE.Level = level
-	modSrcLoopdepth := srcE.Loopdepth
-
-	if extraloopdepth > modSrcLoopdepth {
-		modSrcLoopdepth = extraloopdepth
-	}
-
-	if Debug['m'] > 2 {
-		fmt.Printf("escwalk: level:%d depth:%d %.*s op=%v %S(%0j) scope:%v[%d] extraloopdepth=%v\n",
-			level, e.pdepth, e.pdepth, "\t\t\t\t\t\t\t\t\t\t", src.Op, src, src, e.curfnSym(src), srcE.Loopdepth, extraloopdepth)
-	}
-
-	e.pdepth++
-
-	// Input parameter flowing to output parameter?
-	var leaks bool
-	var osrcesc uint16 // used to prevent duplicate error messages
-
-	dstE := e.nodeEscState(dst)
-	if funcOutputAndInput(dst, src) && src.Esc&EscMask < EscHeap && dst.Esc != EscHeap {
-		// This case handles:
-		// 1. return in
-		// 2. return &in
-		// 3. tmp := in; return &tmp
-		// 4. return *in
-		if Debug['m'] != 0 {
-			if Debug['m'] <= 2 {
-				Warnl(src.Lineno, "leaking param: %S to result %v level=%v", src, dst.Sym, level.int())
-				step.describe(src)
-			} else {
-				Warnl(src.Lineno, "leaking param: %S to result %v level=%v", src, dst.Sym, level)
-			}
-		}
-		if src.Esc&EscMask != EscReturn {
-			src.Esc = EscReturn | src.Esc&EscContentEscapes
-		}
-		src.Esc = escNoteOutputParamFlow(src.Esc, dst.Name.Vargen, level)
-		goto recurse
-	}
-
-	// If parameter content escapes to heap, set EscContentEscapes
-	// Note minor confusion around escape from pointer-to-struct vs escape from struct
-	if dst.Esc == EscHeap &&
-		src.Op == ONAME && src.Class == PPARAM && src.Esc&EscMask < EscHeap &&
-		level.int() > 0 {
-		src.Esc = escMax(EscContentEscapes|src.Esc, EscNone)
-		if Debug['m'] != 0 {
-			Warnl(src.Lineno, "mark escaped content: %S", src)
-			step.describe(src)
-		}
-	}
-
-	leaks = level.int() <= 0 && level.guaranteedDereference() <= 0 && dstE.Loopdepth < modSrcLoopdepth
-	leaks = leaks || level.int() <= 0 && dst.Esc&EscMask == EscHeap
-
-	osrcesc = src.Esc
-	switch src.Op {
-	case ONAME:
-		if src.Class == PPARAM && (leaks || dstE.Loopdepth < 0) && src.Esc&EscMask < EscHeap {
-			if level.guaranteedDereference() > 0 {
-				src.Esc = escMax(EscContentEscapes|src.Esc, EscNone)
-				if Debug['m'] != 0 {
-					if Debug['m'] <= 2 {
-						if osrcesc != src.Esc {
-							Warnl(src.Lineno, "leaking param content: %S", src)
-							step.describe(src)
-						}
-					} else {
-						Warnl(src.Lineno, "leaking param content: %S level=%v dst.eld=%v src.eld=%v dst=%S",
-							src, level, dstE.Loopdepth, modSrcLoopdepth, dst)
-					}
-				}
-			} else {
-				src.Esc = EscHeap
-				if Debug['m'] != 0 {
-					if Debug['m'] <= 2 {
-						Warnl(src.Lineno, "leaking param: %S", src)
-						step.describe(src)
-					} else {
-						Warnl(src.Lineno, "leaking param: %S level=%v dst.eld=%v src.eld=%v dst=%S",
-							src, level, dstE.Loopdepth, modSrcLoopdepth, dst)
-					}
-				}
-			}
-		}
-
-		// Treat a captured closure variable as equivalent to the
-		// original variable.
-		if src.isClosureVar() {
-			if leaks && Debug['m'] != 0 {
-				Warnl(src.Lineno, "leaking closure reference %S", src)
-				step.describe(src)
-			}
-			e.escwalk(level, dst, src.Name.Defn, e.stepWalk(dst, src.Name.Defn, "closure-var", step))
-		}
-
-	case OPTRLIT, OADDR:
-		why := "pointer literal"
-		if src.Op == OADDR {
-			why = "address-of"
-		}
-		if leaks {
-			src.Esc = EscHeap
-			if Debug['m'] != 0 && osrcesc != src.Esc {
-				p := src
-				if p.Left.Op == OCLOSURE {
-					p = p.Left // merely to satisfy error messages in tests
-				}
-				if Debug['m'] > 2 {
-					Warnl(src.Lineno, "%S escapes to heap, level=%v, dst=%v dst.eld=%v, src.eld=%v",
-						p, level, dst, dstE.Loopdepth, modSrcLoopdepth)
-				} else {
-					Warnl(src.Lineno, "%S escapes to heap", p)
-					step.describe(src)
-				}
-			}
-			addrescapes(src.Left)
-			e.escwalkBody(level.dec(), dst, src.Left, e.stepWalk(dst, src.Left, why, step), modSrcLoopdepth)
-			extraloopdepth = modSrcLoopdepth // passes to recursive case, seems likely a no-op
-		} else {
-			e.escwalk(level.dec(), dst, src.Left, e.stepWalk(dst, src.Left, why, step))
-		}
-
-	case OAPPEND:
-		e.escwalk(level, dst, src.List.First(), e.stepWalk(dst, src.List.First(), "append-first-arg", step))
-
-	case ODDDARG:
-		if leaks {
-			src.Esc = EscHeap
-			if Debug['m'] != 0 && osrcesc != src.Esc {
-				Warnl(src.Lineno, "%S escapes to heap", src)
-				step.describe(src)
-			}
-			extraloopdepth = modSrcLoopdepth
-		}
-		// similar to a slice arraylit and its args.
-		level = level.dec()
-
-	case OSLICELIT:
-		for _, n1 := range src.List.Slice() {
-			if n1.Op == OKEY {
-				n1 = n1.Right
-			}
-			e.escwalk(level.dec(), dst, n1, e.stepWalk(dst, n1, "slice-literal-element", step))
-		}
-
-		fallthrough
-
-	case OMAKECHAN,
-		OMAKEMAP,
-		OMAKESLICE,
-		OARRAYRUNESTR,
-		OARRAYBYTESTR,
-		OSTRARRAYRUNE,
-		OSTRARRAYBYTE,
-		OADDSTR,
-		OMAPLIT,
-		ONEW,
-		OCLOSURE,
-		OCALLPART,
-		ORUNESTR,
-		OCONVIFACE:
-		if leaks {
-			src.Esc = EscHeap
-			if Debug['m'] != 0 && osrcesc != src.Esc {
-				Warnl(src.Lineno, "%S escapes to heap", src)
-				step.describe(src)
-			}
-			extraloopdepth = modSrcLoopdepth
-		}
-
-	case ODOT,
-		ODOTTYPE:
-		e.escwalk(level, dst, src.Left, e.stepWalk(dst, src.Left, "dot", step))
-
-	case
-		OSLICE,
-		OSLICEARR,
-		OSLICE3,
-		OSLICE3ARR,
-		OSLICESTR:
-		e.escwalk(level, dst, src.Left, e.stepWalk(dst, src.Left, "slice", step))
-
-	case OINDEX:
-		if src.Left.Type.IsArray() {
-			e.escwalk(level, dst, src.Left, e.stepWalk(dst, src.Left, "fixed-array-index-of", step))
-			break
-		}
-		fallthrough
-
-	case ODOTPTR:
-		e.escwalk(level.inc(), dst, src.Left, e.stepWalk(dst, src.Left, "dot of pointer", step))
-	case OINDEXMAP:
-		e.escwalk(level.inc(), dst, src.Left, e.stepWalk(dst, src.Left, "map index", step))
-	case OIND:
-		e.escwalk(level.inc(), dst, src.Left, e.stepWalk(dst, src.Left, "indirection", step))
-
-	// In this case a link went directly to a call, but should really go
-	// to the dummy .outN outputs that were created for the call that
-	// themselves link to the inputs with levels adjusted.
-	// See e.g. #10466
-	// This can only happen with functions returning a single result.
-	case OCALLMETH, OCALLFUNC, OCALLINTER:
-		if srcE.Retval.Len() != 0 {
-			if Debug['m'] > 2 {
-				fmt.Printf("%v:[%d] dst %S escwalk replace src: %S with %S\n",
-					linestr(lineno), e.loopdepth,
-					dst, src, srcE.Retval.First())
-			}
-			src = srcE.Retval.First()
-			srcE = e.nodeEscState(src)
-		}
-	}
-
-recurse:
-	level = level.copy()
-
-	for i := range srcE.Flowsrc {
-		s := &srcE.Flowsrc[i]
-		s.parent = step
-		e.escwalkBody(level, dst, s.src, s, extraloopdepth)
-		s.parent = nil
-	}
-
-	e.pdepth--
-}
-
-// This special tag is applied to uintptr variables
-// that we believe may hold unsafe.Pointers for
-// calls into assembly functions.
-// It is logically a constant, but using a var
-// lets us take the address below to get a *string.
-var unsafeUintptrTag = "unsafe-uintptr"
-
-// This special tag is applied to uintptr parameters of functions
-// marked go:uintptrescapes.
-const uintptrEscapesTag = "uintptr-escapes"
-
-func (e *EscState) esctag(fn *Node) {
-	fn.Esc = EscFuncTagged
-
-	name := func(s *Sym, narg int) string {
-		if s != nil {
-			return s.Name
-		}
-		return fmt.Sprintf("arg#%d", narg)
-	}
-
-	// External functions are assumed unsafe,
-	// unless //go:noescape is given before the declaration.
-	if fn.Nbody.Len() == 0 {
-		if fn.Noescape {
-			for _, f := range fn.Type.Params().Fields().Slice() {
-				if haspointers(f.Type) {
-					f.Note = mktag(EscNone)
-				}
-			}
-		}
-
-		// Assume that uintptr arguments must be held live across the call.
-		// This is most important for syscall.Syscall.
-		// See golang.org/issue/13372.
-		// This really doesn't have much to do with escape analysis per se,
-		// but we are reusing the ability to annotate an individual function
-		// argument and pass those annotations along to importing code.
-		narg := 0
-		for _, f := range fn.Type.Params().Fields().Slice() {
-			narg++
-			if f.Type.Etype == TUINTPTR {
-				if Debug['m'] != 0 {
-					Warnl(fn.Lineno, "%v assuming %v is unsafe uintptr", funcSym(fn), name(f.Sym, narg))
-				}
-				f.Note = unsafeUintptrTag
-			}
-		}
-
-		return
-	}
-
-	if fn.Func.Pragma&UintptrEscapes != 0 {
-		narg := 0
-		for _, f := range fn.Type.Params().Fields().Slice() {
-			narg++
-			if f.Type.Etype == TUINTPTR {
-				if Debug['m'] != 0 {
-					Warnl(fn.Lineno, "%v marking %v as escaping uintptr", funcSym(fn), name(f.Sym, narg))
-				}
-				f.Note = uintptrEscapesTag
-			}
-
-			if f.Isddd && f.Type.Elem().Etype == TUINTPTR {
-				// final argument is ...uintptr.
-				if Debug['m'] != 0 {
-					Warnl(fn.Lineno, "%v marking %v as escaping ...uintptr", funcSym(fn), name(f.Sym, narg))
-				}
-				f.Note = uintptrEscapesTag
-			}
-		}
-	}
-
-	for _, ln := range fn.Func.Dcl {
-		if ln.Op != ONAME {
-			continue
-		}
-
-		switch ln.Esc & EscMask {
-		case EscNone, // not touched by escflood
-			EscReturn:
-			if haspointers(ln.Type) { // don't bother tagging for scalars
-				if ln.Name.Param.Field.Note != uintptrEscapesTag {
-					ln.Name.Param.Field.Note = mktag(int(ln.Esc))
-				}
-			}
-
-		case EscHeap: // touched by escflood, moved to heap
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/export.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/export.go
deleted file mode 100644
index d330d4d..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/export.go
+++ /dev/null
@@ -1,383 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/export.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/export.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"bufio"
-	"bytes"
-	"bootstrap/cmd/internal/bio"
-	"fmt"
-	"unicode"
-	"unicode/utf8"
-)
-
-var (
-	Debug_export int // if set, print debugging information about export data
-	exportsize   int
-)
-
-func exportf(format string, args ...interface{}) {
-	n, _ := fmt.Fprintf(bout, format, args...)
-	exportsize += n
-	if Debug_export != 0 {
-		fmt.Printf(format, args...)
-	}
-}
-
-var asmlist []*Node
-
-// Mark n's symbol as exported
-func exportsym(n *Node) {
-	if n == nil || n.Sym == nil {
-		return
-	}
-	if n.Sym.Flags&(SymExport|SymPackage) != 0 {
-		if n.Sym.Flags&SymPackage != 0 {
-			yyerror("export/package mismatch: %v", n.Sym)
-		}
-		return
-	}
-
-	n.Sym.Flags |= SymExport
-	if Debug['E'] != 0 {
-		fmt.Printf("export symbol %v\n", n.Sym)
-	}
-
-	// Ensure original object is on exportlist before aliases.
-	if n.Sym.Flags&SymAlias != 0 {
-		exportlist = append(exportlist, n.Sym.Def)
-	}
-
-	exportlist = append(exportlist, n)
-}
-
-func exportname(s string) bool {
-	if r := s[0]; r < utf8.RuneSelf {
-		return 'A' <= r && r <= 'Z'
-	}
-	r, _ := utf8.DecodeRuneInString(s)
-	return unicode.IsUpper(r)
-}
-
-func initname(s string) bool {
-	return s == "init"
-}
-
-// exportedsym reports whether a symbol will be visible
-// to files that import our package.
-func exportedsym(sym *Sym) bool {
-	// Builtins are visible everywhere.
-	if sym.Pkg == builtinpkg || sym.Origpkg == builtinpkg {
-		return true
-	}
-
-	return sym.Pkg == localpkg && exportname(sym.Name)
-}
-
-func autoexport(n *Node, ctxt Class) {
-	if n == nil || n.Sym == nil {
-		return
-	}
-	if (ctxt != PEXTERN && ctxt != PFUNC) || dclcontext != PEXTERN {
-		return
-	}
-	if n.Name.Param != nil && n.Name.Param.Ntype != nil && n.Name.Param.Ntype.Op == OTFUNC && n.Name.Param.Ntype.Left != nil { // method
-		return
-	}
-
-	if exportname(n.Sym.Name) || initname(n.Sym.Name) {
-		exportsym(n)
-	}
-	if asmhdr != "" && n.Sym.Pkg == localpkg && n.Sym.Flags&SymAsm == 0 {
-		n.Sym.Flags |= SymAsm
-		asmlist = append(asmlist, n)
-	}
-}
-
-// Look for anything we need for the inline body
-func reexportdeplist(ll Nodes) {
-	for _, n := range ll.Slice() {
-		reexportdep(n)
-	}
-}
-
-func reexportdep(n *Node) {
-	if n == nil {
-		return
-	}
-
-	//print("reexportdep %+hN\n", n);
-	switch n.Op {
-	case ONAME:
-		switch n.Class {
-		// methods will be printed along with their type
-		// nodes for T.Method expressions
-		case PFUNC:
-			if n.Left != nil && n.Left.Op == OTYPE {
-				break
-			}
-
-			// nodes for method calls.
-			if n.Type == nil || n.IsMethod() {
-				break
-			}
-			fallthrough
-
-		case PEXTERN:
-			if n.Sym != nil && !exportedsym(n.Sym) {
-				if Debug['E'] != 0 {
-					fmt.Printf("reexport name %v\n", n.Sym)
-				}
-				exportlist = append(exportlist, n)
-			}
-		}
-
-	// Local variables in the bodies need their type.
-	case ODCL:
-		t := n.Left.Type
-
-		if t != Types[t.Etype] && t != idealbool && t != idealstring {
-			if t.IsPtr() {
-				t = t.Elem()
-			}
-			if t != nil && t.Sym != nil && t.Sym.Def != nil && !exportedsym(t.Sym) {
-				if Debug['E'] != 0 {
-					fmt.Printf("reexport type %v from declaration\n", t.Sym)
-				}
-				exportlist = append(exportlist, t.Sym.Def)
-			}
-		}
-
-	case OLITERAL:
-		t := n.Type
-		if t != Types[n.Type.Etype] && t != idealbool && t != idealstring {
-			if t.IsPtr() {
-				t = t.Elem()
-			}
-			if t != nil && t.Sym != nil && t.Sym.Def != nil && !exportedsym(t.Sym) {
-				if Debug['E'] != 0 {
-					fmt.Printf("reexport literal type %v\n", t.Sym)
-				}
-				exportlist = append(exportlist, t.Sym.Def)
-			}
-		}
-		fallthrough
-
-	case OTYPE:
-		if n.Sym != nil && n.Sym.Def != nil && !exportedsym(n.Sym) {
-			if Debug['E'] != 0 {
-				fmt.Printf("reexport literal/type %v\n", n.Sym)
-			}
-			exportlist = append(exportlist, n)
-		}
-
-	// for operations that need a type when rendered, put the type on the export list.
-	case OCONV,
-		OCONVIFACE,
-		OCONVNOP,
-		ORUNESTR,
-		OARRAYBYTESTR,
-		OARRAYRUNESTR,
-		OSTRARRAYBYTE,
-		OSTRARRAYRUNE,
-		ODOTTYPE,
-		ODOTTYPE2,
-		OSTRUCTLIT,
-		OARRAYLIT,
-		OSLICELIT,
-		OPTRLIT,
-		OMAKEMAP,
-		OMAKESLICE,
-		OMAKECHAN:
-		t := n.Type
-
-		switch t.Etype {
-		case TARRAY, TCHAN, TPTR32, TPTR64, TSLICE:
-			if t.Sym == nil {
-				t = t.Elem()
-			}
-		}
-		if t != nil && t.Sym != nil && t.Sym.Def != nil && !exportedsym(t.Sym) {
-			if Debug['E'] != 0 {
-				fmt.Printf("reexport type for expression %v\n", t.Sym)
-			}
-			exportlist = append(exportlist, t.Sym.Def)
-		}
-	}
-
-	reexportdep(n.Left)
-	reexportdep(n.Right)
-	reexportdeplist(n.List)
-	reexportdeplist(n.Rlist)
-	reexportdeplist(n.Ninit)
-	reexportdeplist(n.Nbody)
-}
-
-// methodbyname sorts types by symbol name.
-type methodbyname []*Field
-
-func (x methodbyname) Len() int           { return len(x) }
-func (x methodbyname) Swap(i, j int)      { x[i], x[j] = x[j], x[i] }
-func (x methodbyname) Less(i, j int) bool { return x[i].Sym.Name < x[j].Sym.Name }
-
-func dumpexport() {
-	if buildid != "" {
-		exportf("build id %q\n", buildid)
-	}
-
-	size := 0 // size of export section without enclosing markers
-	// The linker also looks for the $$ marker - use char after $$ to distinguish format.
-	exportf("\n$$B\n") // indicate binary export format
-	if debugFormat {
-		// save a copy of the export data
-		var copy bytes.Buffer
-		bcopy := bufio.NewWriter(&copy)
-		size = export(bcopy, Debug_export != 0)
-		bcopy.Flush() // flushing to bytes.Buffer cannot fail
-		if n, err := bout.Write(copy.Bytes()); n != size || err != nil {
-			Fatalf("error writing export data: got %d bytes, want %d bytes, err = %v", n, size, err)
-		}
-		// export data must contain no '$' so that we can find the end by searching for "$$"
-		// TODO(gri) is this still needed?
-		if bytes.IndexByte(copy.Bytes(), '$') >= 0 {
-			Fatalf("export data contains $")
-		}
-
-		// verify that we can read the copied export data back in
-		// (use empty package map to avoid collisions)
-		savedPkgMap := pkgMap
-		savedPkgs := pkgs
-		pkgMap = make(map[string]*Pkg)
-		pkgs = nil
-		importpkg = mkpkg("")
-		Import(bufio.NewReader(&copy)) // must not die
-		importpkg = nil
-		pkgs = savedPkgs
-		pkgMap = savedPkgMap
-	} else {
-		size = export(bout.Writer, Debug_export != 0)
-	}
-	exportf("\n$$\n")
-
-	if Debug_export != 0 {
-		fmt.Printf("export data size = %d bytes\n", size)
-	}
-}
-
-// importsym declares symbol s as an imported object representable by op.
-func importsym(s *Sym, op Op) {
-	if s.Def != nil && s.Def.Op != op {
-		pkgstr := fmt.Sprintf("during import %q", importpkg.Path)
-		redeclare(s, pkgstr)
-	}
-
-	// mark the symbol so it is not reexported
-	if s.Def == nil {
-		if exportname(s.Name) || initname(s.Name) {
-			s.Flags |= SymExport
-		} else {
-			s.Flags |= SymPackage // package scope
-		}
-	}
-}
-
-// pkgtype returns the named type declared by symbol s.
-// If no such type has been declared yet, a forward declaration is returned.
-func pkgtype(s *Sym) *Type {
-	importsym(s, OTYPE)
-	if s.Def == nil || s.Def.Op != OTYPE {
-		t := typ(TFORW)
-		t.Sym = s
-		s.Def = typenod(t)
-		s.Def.Name = new(Name)
-	}
-
-	if s.Def.Type == nil {
-		yyerror("pkgtype %v", s)
-	}
-	return s.Def.Type
-}
-
-// importconst declares symbol s as an imported constant with type t and value n.
-func importconst(s *Sym, t *Type, n *Node) {
-	importsym(s, OLITERAL)
-	n = convlit(n, t)
-
-	if s.Def != nil { // TODO: check if already the same.
-		return
-	}
-
-	if n.Op != OLITERAL {
-		yyerror("expression must be a constant")
-		return
-	}
-
-	if n.Sym != nil {
-		n1 := *n
-		n = &n1
-	}
-
-	n.Orig = newname(s)
-	n.Sym = s
-	declare(n, PEXTERN)
-
-	if Debug['E'] != 0 {
-		fmt.Printf("import const %v\n", s)
-	}
-}
-
-// importvar declares symbol s as an imported variable with type t.
-func importvar(s *Sym, t *Type) {
-	importsym(s, ONAME)
-	if s.Def != nil && s.Def.Op == ONAME {
-		if eqtype(t, s.Def.Type) {
-			return
-		}
-		yyerror("inconsistent definition for var %v during import\n\t%v (in %q)\n\t%v (in %q)", s, s.Def.Type, s.Importdef.Path, t, importpkg.Path)
-	}
-
-	n := newname(s)
-	s.Importdef = importpkg
-	n.Type = t
-	declare(n, PEXTERN)
-
-	if Debug['E'] != 0 {
-		fmt.Printf("import var %v %L\n", s, t)
-	}
-}
-
-func dumpasmhdr() {
-	b, err := bio.Create(asmhdr)
-	if err != nil {
-		Fatalf("%v", err)
-	}
-	fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", localpkg.Name)
-	for _, n := range asmlist {
-		if isblanksym(n.Sym) {
-			continue
-		}
-		switch n.Op {
-		case OLITERAL:
-			fmt.Fprintf(b, "#define const_%s %#v\n", n.Sym.Name, n.Val())
-
-		case OTYPE:
-			t := n.Type
-			if !t.IsStruct() || t.StructType().Map != nil || t.IsFuncArgStruct() {
-				break
-			}
-			fmt.Fprintf(b, "#define %s__size %d\n", t.Sym.Name, int(t.Width))
-			for _, t := range t.Fields().Slice() {
-				if !isblanksym(t.Sym) {
-					fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym.Name, t.Sym.Name, int(t.Offset))
-				}
-			}
-		}
-	}
-
-	b.Close()
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/fixedbugs_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/fixedbugs_test.go
deleted file mode 100644
index ca6a583..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/fixedbugs_test.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/fixedbugs_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/fixedbugs_test.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import "testing"
-
-type T struct {
-	x [2]int64 // field that will be clobbered. Also makes type not SSAable.
-	p *byte    // has a pointer
-}
-
-//go:noinline
-func makeT() T {
-	return T{}
-}
-
-var g T
-
-var sink interface{}
-
-func TestIssue15854(t *testing.T) {
-	for i := 0; i < 10000; i++ {
-		if g.x[0] != 0 {
-			t.Fatalf("g.x[0] clobbered with %x\n", g.x[0])
-		}
-		// The bug was in the following assignment. The return
-		// value of makeT() is not copied out of the args area of
-		// stack frame in a timely fashion. So when write barriers
-		// are enabled, the marshaling of the args for the write
-		// barrier call clobbers the result of makeT() before it is
-		// read by the write barrier code.
-		g = makeT()
-		sink = make([]byte, 1000) // force write barriers to eventually happen
-	}
-}
-func TestIssue15854b(t *testing.T) {
-	const N = 10000
-	a := make([]T, N)
-	for i := 0; i < N; i++ {
-		a = append(a, makeT())
-		sink = make([]byte, 1000) // force write barriers to eventually happen
-	}
-	for i, v := range a {
-		if v.x[0] != 0 {
-			t.Fatalf("a[%d].x[0] clobbered with %x\n", i, v.x[0])
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/float_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/float_test.go
deleted file mode 100644
index 93613cf..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/float_test.go
+++ /dev/null
@@ -1,138 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/float_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/float_test.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import "testing"
-
-// For GO386=387, make sure fucomi* opcodes are not used
-// for comparison operations.
-// Note that this test will fail only on a Pentium MMX
-// processor (with GOARCH=386 GO386=387), as it just runs
-// some code and looks for an unimplemented instruction fault.
-
-//go:noinline
-func compare1(a, b float64) bool {
-	return a < b
-}
-
-//go:noinline
-func compare2(a, b float32) bool {
-	return a < b
-}
-
-func TestFloatCompare(t *testing.T) {
-	if !compare1(3, 5) {
-		t.Errorf("compare1 returned false")
-	}
-	if !compare2(3, 5) {
-		t.Errorf("compare2 returned false")
-	}
-}
-
-// For GO386=387, make sure fucomi* opcodes are not used
-// for float->int conversions.
-
-//go:noinline
-func cvt1(a float64) uint64 {
-	return uint64(a)
-}
-
-//go:noinline
-func cvt2(a float64) uint32 {
-	return uint32(a)
-}
-
-//go:noinline
-func cvt3(a float32) uint64 {
-	return uint64(a)
-}
-
-//go:noinline
-func cvt4(a float32) uint32 {
-	return uint32(a)
-}
-
-//go:noinline
-func cvt5(a float64) int64 {
-	return int64(a)
-}
-
-//go:noinline
-func cvt6(a float64) int32 {
-	return int32(a)
-}
-
-//go:noinline
-func cvt7(a float32) int64 {
-	return int64(a)
-}
-
-//go:noinline
-func cvt8(a float32) int32 {
-	return int32(a)
-}
-
-// make sure to cover int, uint cases (issue #16738)
-//go:noinline
-func cvt9(a float64) int {
-	return int(a)
-}
-
-//go:noinline
-func cvt10(a float64) uint {
-	return uint(a)
-}
-
-//go:noinline
-func cvt11(a float32) int {
-	return int(a)
-}
-
-//go:noinline
-func cvt12(a float32) uint {
-	return uint(a)
-}
-
-func TestFloatConvert(t *testing.T) {
-	if got := cvt1(3.5); got != 3 {
-		t.Errorf("cvt1 got %d, wanted 3", got)
-	}
-	if got := cvt2(3.5); got != 3 {
-		t.Errorf("cvt2 got %d, wanted 3", got)
-	}
-	if got := cvt3(3.5); got != 3 {
-		t.Errorf("cvt3 got %d, wanted 3", got)
-	}
-	if got := cvt4(3.5); got != 3 {
-		t.Errorf("cvt4 got %d, wanted 3", got)
-	}
-	if got := cvt5(3.5); got != 3 {
-		t.Errorf("cvt5 got %d, wanted 3", got)
-	}
-	if got := cvt6(3.5); got != 3 {
-		t.Errorf("cvt6 got %d, wanted 3", got)
-	}
-	if got := cvt7(3.5); got != 3 {
-		t.Errorf("cvt7 got %d, wanted 3", got)
-	}
-	if got := cvt8(3.5); got != 3 {
-		t.Errorf("cvt8 got %d, wanted 3", got)
-	}
-	if got := cvt9(3.5); got != 3 {
-		t.Errorf("cvt9 got %d, wanted 3", got)
-	}
-	if got := cvt10(3.5); got != 3 {
-		t.Errorf("cvt10 got %d, wanted 3", got)
-	}
-	if got := cvt11(3.5); got != 3 {
-		t.Errorf("cvt11 got %d, wanted 3", got)
-	}
-	if got := cvt12(3.5); got != 3 {
-		t.Errorf("cvt12 got %d, wanted 3", got)
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/fmt.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/fmt.go
deleted file mode 100644
index dd70113..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/fmt.go
+++ /dev/null
@@ -1,1822 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/fmt.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/fmt.go:1
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"fmt"
-	"strconv"
-	"strings"
-	"unicode/utf8"
-)
-
-// A FmtFlag value is a set of flags (or 0).
-// They control how the Xconv functions format their values.
-// See the respective function's documentation for details.
-type FmtFlag int
-
-// TODO(gri) The ' ' flag is not used anymore in %-formats.
-//           Eliminate eventually.
-
-const ( //                                 fmt.Format flag/prec or verb
-	FmtLeft     FmtFlag = 1 << iota // '-'
-	FmtSharp                        // '#'
-	FmtSign                         // '+'
-	FmtUnsigned                     // ' '               (historic: u flag)
-	FmtShort                        // verb == 'S'       (historic: h flag)
-	FmtLong                         // verb == 'L'       (historic: l flag)
-	FmtComma                        // '.' (== hasPrec)  (historic: , flag)
-	FmtByte                         // '0'               (historic: hh flag)
-)
-
-// fmtFlag computes the (internal) FmtFlag
-// value given the fmt.State and format verb.
-func fmtFlag(s fmt.State, verb rune) FmtFlag {
-	var flag FmtFlag
-	if s.Flag('-') {
-		flag |= FmtLeft
-	}
-	if s.Flag('#') {
-		flag |= FmtSharp
-	}
-	if s.Flag('+') {
-		flag |= FmtSign
-	}
-	if s.Flag(' ') {
-		flag |= FmtUnsigned
-	}
-	if _, ok := s.Precision(); ok {
-		flag |= FmtComma
-	}
-	if s.Flag('0') {
-		flag |= FmtByte
-	}
-	switch verb {
-	case 'S':
-		flag |= FmtShort
-	case 'L':
-		flag |= FmtLong
-	}
-	return flag
-}
-
-// Format conversions:
-// TODO(gri) verify these; eliminate those not used anymore
-//
-//	%v Op		Node opcodes
-//		Flags:  #: print Go syntax (automatic unless fmtmode == FDbg)
-//
-//	%j *Node	Node details
-//		Flags:  0: suppresses things not relevant until walk
-//
-//	%v *Val		Constant values
-//
-//	%v *Sym		Symbols
-//	%S              unqualified identifier in any mode
-//		Flags:  +,- #: mode (see below)
-//			0: in export mode: unqualified identifier if exported, qualified if not
-//
-//	%v *Type	Types
-//	%S              omit "func" and receiver in function types
-//	%L              definition instead of name.
-//		Flags:  +,- #: mode (see below)
-//			' ' (only in -/Sym mode) print type identifiers wit package name instead of prefix.
-//
-//	%v *Node	Nodes
-//	%S              (only in +/debug mode) suppress recursion
-//	%L              (only in Error mode) print "foo (type Bar)"
-//		Flags:  +,- #: mode (see below)
-//
-//	%v Nodes	Node lists
-//		Flags:  those of *Node
-//			.: separate items with ',' instead of ';'
-
-// *Sym, *Type, and *Node types use the flags below to set the format mode
-const (
-	FErr = iota
-	FDbg
-	FTypeId
-)
-
-var fmtmode int = FErr
-
-var fmtpkgpfx int // "% v" stickyness for *Type objects
-
-// The mode flags '+', '-', and '#' are sticky; they persist through
-// recursions of *Node, *Type, and *Sym values. The ' ' flag is
-// sticky only on *Type recursions and only used in %-/*Sym mode.
-//
-// Example: given a *Sym: %+v %#v %-v print an identifier properly qualified for debug/export/internal mode
-
-// Useful format combinations:
-// TODO(gri): verify these
-//
-// *Node, Nodes:
-//   %+v    multiline recursive debug dump of *Node/Nodes
-//   %+S    non-recursive debug dump
-//
-// *Node:
-//   %#v    Go format
-//   %L     "foo (type Bar)" for error messages
-//
-// *Type:
-//   %#v    Go format
-//   %#L    type definition instead of name
-//   %#S    omit"func" and receiver in function signature
-//
-//   %-v    type identifiers
-//   %-S    type identifiers without "func" and arg names in type signatures (methodsym)
-//   %- v   type identifiers with package name instead of prefix (typesym, dcommontype, typehash)
-
-func setfmode(flags *FmtFlag) (fm int) {
-	fm = fmtmode
-	if *flags&FmtSign != 0 {
-		fmtmode = FDbg
-	} else if *flags&FmtSharp != 0 {
-		// ignore (textual export format no longer supported)
-	} else if *flags&FmtLeft != 0 {
-		fmtmode = FTypeId
-	}
-
-	*flags &^= (FmtSharp | FmtLeft | FmtSign)
-	return
-}
-
-var goopnames = []string{
-	OADDR:     "&",
-	OADD:      "+",
-	OADDSTR:   "+",
-	OALIGNOF:  "unsafe.Alignof",
-	OANDAND:   "&&",
-	OANDNOT:   "&^",
-	OAND:      "&",
-	OAPPEND:   "append",
-	OAS:       "=",
-	OAS2:      "=",
-	OBREAK:    "break",
-	OCALL:     "function call", // not actual syntax
-	OCAP:      "cap",
-	OCASE:     "case",
-	OCLOSE:    "close",
-	OCOMPLEX:  "complex",
-	OCOM:      "^",
-	OCONTINUE: "continue",
-	OCOPY:     "copy",
-	ODEC:      "--",
-	ODELETE:   "delete",
-	ODEFER:    "defer",
-	ODIV:      "/",
-	OEQ:       "==",
-	OFALL:     "fallthrough",
-	OFOR:      "for",
-	OGE:       ">=",
-	OGOTO:     "goto",
-	OGT:       ">",
-	OIF:       "if",
-	OIMAG:     "imag",
-	OINC:      "++",
-	OIND:      "*",
-	OLEN:      "len",
-	OLE:       "<=",
-	OLSH:      "<<",
-	OLT:       "<",
-	OMAKE:     "make",
-	OMINUS:    "-",
-	OMOD:      "%",
-	OMUL:      "*",
-	ONEW:      "new",
-	ONE:       "!=",
-	ONOT:      "!",
-	OOFFSETOF: "unsafe.Offsetof",
-	OOROR:     "||",
-	OOR:       "|",
-	OPANIC:    "panic",
-	OPLUS:     "+",
-	OPRINTN:   "println",
-	OPRINT:    "print",
-	ORANGE:    "range",
-	OREAL:     "real",
-	ORECV:     "<-",
-	ORECOVER:  "recover",
-	ORETURN:   "return",
-	ORSH:      ">>",
-	OSELECT:   "select",
-	OSEND:     "<-",
-	OSIZEOF:   "unsafe.Sizeof",
-	OSUB:      "-",
-	OSWITCH:   "switch",
-	OXOR:      "^",
-	OXFALL:    "fallthrough",
-}
-
-func (o Op) String() string {
-	return fmt.Sprint(o)
-}
-
-func (o Op) GoString() string {
-	return fmt.Sprintf("%#v", o)
-}
-
-func (o Op) Format(s fmt.State, verb rune) {
-	switch verb {
-	case 'v':
-		o.oconv(s, fmtFlag(s, verb))
-
-	default:
-		fmt.Fprintf(s, "%%!%c(Op=%d)", verb, int(o))
-	}
-}
-
-func (o Op) oconv(s fmt.State, flag FmtFlag) {
-	if (flag&FmtSharp != 0) || fmtmode != FDbg {
-		if o >= 0 && int(o) < len(goopnames) && goopnames[o] != "" {
-			fmt.Fprint(s, goopnames[o])
-			return
-		}
-	}
-
-	if o >= 0 && int(o) < len(opnames) && opnames[o] != "" {
-		fmt.Fprint(s, opnames[o])
-		return
-	}
-
-	fmt.Fprintf(s, "O-%d", int(o))
-}
-
-var classnames = []string{
-	"Pxxx",
-	"PEXTERN",
-	"PAUTO",
-	"PAUTOHEAP",
-	"PPARAM",
-	"PPARAMOUT",
-	"PFUNC",
-}
-
-func (n *Node) Format(s fmt.State, verb rune) {
-	switch verb {
-	case 'v', 'S', 'L':
-		n.Nconv(s, fmtFlag(s, verb))
-
-	case 'j':
-		n.jconv(s, fmtFlag(s, verb))
-
-	default:
-		fmt.Fprintf(s, "%%!%c(*Node=%p)", verb, n)
-	}
-}
-
-// *Node details
-func (n *Node) jconv(s fmt.State, flag FmtFlag) {
-	c := flag & FmtShort
-
-	if c == 0 && n.Ullman != 0 {
-		fmt.Fprintf(s, " u(%d)", n.Ullman)
-	}
-
-	if c == 0 && n.Addable {
-		fmt.Fprintf(s, " a(%v)", n.Addable)
-	}
-
-	if c == 0 && n.Name != nil && n.Name.Vargen != 0 {
-		fmt.Fprintf(s, " g(%d)", n.Name.Vargen)
-	}
-
-	if n.Lineno != 0 {
-		fmt.Fprintf(s, " l(%d)", n.Lineno)
-	}
-
-	if c == 0 && n.Xoffset != BADWIDTH {
-		fmt.Fprintf(s, " x(%d)", n.Xoffset)
-	}
-
-	if n.Class != 0 {
-		if int(n.Class) < len(classnames) {
-			fmt.Fprintf(s, " class(%s)", classnames[n.Class])
-		} else {
-			fmt.Fprintf(s, " class(%d?)", n.Class)
-		}
-	}
-
-	if n.Colas {
-		fmt.Fprintf(s, " colas(%v)", n.Colas)
-	}
-
-	if n.Name != nil && n.Name.Funcdepth != 0 {
-		fmt.Fprintf(s, " f(%d)", n.Name.Funcdepth)
-	}
-	if n.Func != nil && n.Func.Depth != 0 {
-		fmt.Fprintf(s, " ff(%d)", n.Func.Depth)
-	}
-
-	switch n.Esc {
-	case EscUnknown:
-		break
-
-	case EscHeap:
-		fmt.Fprint(s, " esc(h)")
-
-	case EscNone:
-		fmt.Fprint(s, " esc(no)")
-
-	case EscNever:
-		if c == 0 {
-			fmt.Fprint(s, " esc(N)")
-		}
-
-	default:
-		fmt.Fprintf(s, " esc(%d)", n.Esc)
-	}
-
-	if e, ok := n.Opt().(*NodeEscState); ok && e.Loopdepth != 0 {
-		fmt.Fprintf(s, " ld(%d)", e.Loopdepth)
-	}
-
-	if c == 0 && n.Typecheck != 0 {
-		fmt.Fprintf(s, " tc(%d)", n.Typecheck)
-	}
-
-	if c == 0 && n.IsStatic {
-		fmt.Fprint(s, " static")
-	}
-
-	if n.Isddd {
-		fmt.Fprintf(s, " isddd(%v)", n.Isddd)
-	}
-
-	if n.Implicit {
-		fmt.Fprintf(s, " implicit(%v)", n.Implicit)
-	}
-
-	if n.Embedded != 0 {
-		fmt.Fprintf(s, " embedded(%d)", n.Embedded)
-	}
-
-	if n.Addrtaken {
-		fmt.Fprint(s, " addrtaken")
-	}
-
-	if n.Assigned {
-		fmt.Fprint(s, " assigned")
-	}
-	if n.Bounded {
-		fmt.Fprint(s, " bounded")
-	}
-	if n.NonNil {
-		fmt.Fprint(s, " nonnil")
-	}
-
-	if c == 0 && n.Used {
-		fmt.Fprintf(s, " used(%v)", n.Used)
-	}
-}
-
-func (v Val) Format(s fmt.State, verb rune) {
-	switch verb {
-	case 'v':
-		v.vconv(s, fmtFlag(s, verb))
-
-	default:
-		fmt.Fprintf(s, "%%!%c(Val=%T)", verb, v)
-	}
-}
-
-func (v Val) vconv(s fmt.State, flag FmtFlag) {
-	switch u := v.U.(type) {
-	case *Mpint:
-		if !u.Rune {
-			if flag&FmtSharp != 0 {
-				fmt.Fprint(s, bconv(u, FmtSharp))
-				return
-			}
-			fmt.Fprint(s, bconv(u, 0))
-			return
-		}
-
-		switch x := u.Int64(); {
-		case ' ' <= x && x < utf8.RuneSelf && x != '\\' && x != '\'':
-			fmt.Fprintf(s, "'%c'", int(x))
-
-		case 0 <= x && x < 1<<16:
-			fmt.Fprintf(s, "'\\u%04x'", uint(int(x)))
-
-		case 0 <= x && x <= utf8.MaxRune:
-			fmt.Fprintf(s, "'\\U%08x'", uint64(x))
-
-		default:
-			fmt.Fprintf(s, "('\\x00' + %v)", u)
-		}
-
-	case *Mpflt:
-		if flag&FmtSharp != 0 {
-			fmt.Fprint(s, fconv(u, 0))
-			return
-		}
-		fmt.Fprint(s, fconv(u, FmtSharp))
-		return
-
-	case *Mpcplx:
-		switch {
-		case flag&FmtSharp != 0:
-			fmt.Fprintf(s, "(%v+%vi)", &u.Real, &u.Imag)
-
-		case v.U.(*Mpcplx).Real.CmpFloat64(0) == 0:
-			fmt.Fprintf(s, "%vi", fconv(&u.Imag, FmtSharp))
-
-		case v.U.(*Mpcplx).Imag.CmpFloat64(0) == 0:
-			fmt.Fprint(s, fconv(&u.Real, FmtSharp))
-
-		case v.U.(*Mpcplx).Imag.CmpFloat64(0) < 0:
-			fmt.Fprintf(s, "(%v%vi)", fconv(&u.Real, FmtSharp), fconv(&u.Imag, FmtSharp))
-
-		default:
-			fmt.Fprintf(s, "(%v+%vi)", fconv(&u.Real, FmtSharp), fconv(&u.Imag, FmtSharp))
-		}
-
-	case string:
-		fmt.Fprint(s, strconv.Quote(u))
-
-	case bool:
-		t := "false"
-		if u {
-			t = "true"
-		}
-		fmt.Fprint(s, t)
-
-	case *NilVal:
-		fmt.Fprint(s, "nil")
-
-	default:
-		fmt.Fprintf(s, "<ctype=%d>", v.Ctype())
-	}
-}
-
-/*
-s%,%,\n%g
-s%\n+%\n%g
-s%^[	]*T%%g
-s%,.*%%g
-s%.+%	[T&]		= "&",%g
-s%^	........*\]%&~%g
-s%~	%%g
-*/
-var etnames = []string{
-	Txxx:        "Txxx",
-	TINT:        "INT",
-	TUINT:       "UINT",
-	TINT8:       "INT8",
-	TUINT8:      "UINT8",
-	TINT16:      "INT16",
-	TUINT16:     "UINT16",
-	TINT32:      "INT32",
-	TUINT32:     "UINT32",
-	TINT64:      "INT64",
-	TUINT64:     "UINT64",
-	TUINTPTR:    "UINTPTR",
-	TFLOAT32:    "FLOAT32",
-	TFLOAT64:    "FLOAT64",
-	TCOMPLEX64:  "COMPLEX64",
-	TCOMPLEX128: "COMPLEX128",
-	TBOOL:       "BOOL",
-	TPTR32:      "PTR32",
-	TPTR64:      "PTR64",
-	TFUNC:       "FUNC",
-	TARRAY:      "ARRAY",
-	TSLICE:      "SLICE",
-	TSTRUCT:     "STRUCT",
-	TCHAN:       "CHAN",
-	TMAP:        "MAP",
-	TINTER:      "INTER",
-	TFORW:       "FORW",
-	TSTRING:     "STRING",
-	TUNSAFEPTR:  "TUNSAFEPTR",
-	TANY:        "ANY",
-	TIDEAL:      "TIDEAL",
-	TNIL:        "TNIL",
-	TBLANK:      "TBLANK",
-	TFUNCARGS:   "TFUNCARGS",
-	TCHANARGS:   "TCHANARGS",
-	TINTERMETH:  "TINTERMETH",
-	TDDDFIELD:   "TDDDFIELD",
-}
-
-func (et EType) String() string {
-	if int(et) < len(etnames) && etnames[et] != "" {
-		return etnames[et]
-	}
-	return fmt.Sprintf("E-%d", et)
-}
-
-func (s *Sym) symfmt(flag FmtFlag) string {
-	if s.Pkg != nil && flag&FmtShort == 0 {
-		switch fmtmode {
-		case FErr: // This is for the user
-			if s.Pkg == builtinpkg || s.Pkg == localpkg {
-				return s.Name
-			}
-
-			// If the name was used by multiple packages, display the full path,
-			if s.Pkg.Name != "" && numImport[s.Pkg.Name] > 1 {
-				return fmt.Sprintf("%q.%s", s.Pkg.Path, s.Name)
-			}
-			return s.Pkg.Name + "." + s.Name
-
-		case FDbg:
-			return s.Pkg.Name + "." + s.Name
-
-		case FTypeId:
-			if flag&FmtUnsigned != 0 {
-				return s.Pkg.Name + "." + s.Name // dcommontype, typehash
-			}
-			return s.Pkg.Prefix + "." + s.Name // (methodsym), typesym, weaksym
-		}
-	}
-
-	if flag&FmtByte != 0 {
-		// FmtByte (hh) implies FmtShort (h)
-		// skip leading "type." in method name
-		name := s.Name
-		if i := strings.LastIndex(name, "."); i >= 0 {
-			name = name[i+1:]
-		}
-
-		if fmtmode == FDbg {
-			return fmt.Sprintf("@%q.%s", s.Pkg.Path, name)
-		}
-
-		return name
-	}
-
-	return s.Name
-}
-
-var basicnames = []string{
-	TINT:        "int",
-	TUINT:       "uint",
-	TINT8:       "int8",
-	TUINT8:      "uint8",
-	TINT16:      "int16",
-	TUINT16:     "uint16",
-	TINT32:      "int32",
-	TUINT32:     "uint32",
-	TINT64:      "int64",
-	TUINT64:     "uint64",
-	TUINTPTR:    "uintptr",
-	TFLOAT32:    "float32",
-	TFLOAT64:    "float64",
-	TCOMPLEX64:  "complex64",
-	TCOMPLEX128: "complex128",
-	TBOOL:       "bool",
-	TANY:        "any",
-	TSTRING:     "string",
-	TNIL:        "nil",
-	TIDEAL:      "untyped number",
-	TBLANK:      "blank",
-}
-
-func (t *Type) typefmt(flag FmtFlag) string {
-	if t == nil {
-		return "<T>"
-	}
-
-	if t == bytetype || t == runetype {
-		// in %-T mode collapse rune and byte with their originals.
-		if fmtmode != FTypeId {
-			return t.Sym.sconv(FmtShort)
-		}
-		t = Types[t.Etype]
-	}
-
-	if t == errortype {
-		return "error"
-	}
-
-	// Unless the 'l' flag was specified, if the type has a name, just print that name.
-	if flag&FmtLong == 0 && t.Sym != nil && t != Types[t.Etype] {
-		switch fmtmode {
-		case FTypeId:
-			if flag&FmtShort != 0 {
-				if t.Vargen != 0 {
-					return fmt.Sprintf("%v·%d", t.Sym.sconv(FmtShort), t.Vargen)
-				}
-				return t.Sym.sconv(FmtShort)
-			}
-
-			if flag&FmtUnsigned != 0 {
-				return t.Sym.sconv(FmtUnsigned)
-			}
-
-			if t.Sym.Pkg == localpkg && t.Vargen != 0 {
-				return fmt.Sprintf("%v·%d", t.Sym, t.Vargen)
-			}
-		}
-
-		return t.Sym.String()
-	}
-
-	if int(t.Etype) < len(basicnames) && basicnames[t.Etype] != "" {
-		prefix := ""
-		if fmtmode == FErr && (t == idealbool || t == idealstring) {
-			prefix = "untyped "
-		}
-		return prefix + basicnames[t.Etype]
-	}
-
-	if fmtmode == FDbg {
-		fmtmode = 0
-		str := t.Etype.String() + "-" + t.typefmt(flag)
-		fmtmode = FDbg
-		return str
-	}
-
-	switch t.Etype {
-	case TPTR32, TPTR64:
-		if fmtmode == FTypeId && (flag&FmtShort != 0) {
-			return "*" + t.Elem().tconv(FmtShort)
-		}
-		return "*" + t.Elem().String()
-
-	case TARRAY:
-		if t.isDDDArray() {
-			return "[...]" + t.Elem().String()
-		}
-		return fmt.Sprintf("[%d]%v", t.NumElem(), t.Elem())
-
-	case TSLICE:
-		return "[]" + t.Elem().String()
-
-	case TCHAN:
-		switch t.ChanDir() {
-		case Crecv:
-			return "<-chan " + t.Elem().String()
-
-		case Csend:
-			return "chan<- " + t.Elem().String()
-		}
-
-		if t.Elem() != nil && t.Elem().IsChan() && t.Elem().Sym == nil && t.Elem().ChanDir() == Crecv {
-			return "chan (" + t.Elem().String() + ")"
-		}
-		return "chan " + t.Elem().String()
-
-	case TMAP:
-		return "map[" + t.Key().String() + "]" + t.Val().String()
-
-	case TINTER:
-		if t.IsEmptyInterface() {
-			return "interface {}"
-		}
-		buf := make([]byte, 0, 64)
-		buf = append(buf, "interface {"...)
-		for i, f := range t.Fields().Slice() {
-			if i != 0 {
-				buf = append(buf, ';')
-			}
-			buf = append(buf, ' ')
-			switch {
-			case f.Sym == nil:
-				// Check first that a symbol is defined for this type.
-				// Wrong interface definitions may have types lacking a symbol.
-				break
-			case exportname(f.Sym.Name):
-				buf = append(buf, f.Sym.sconv(FmtShort)...)
-			default:
-				buf = append(buf, f.Sym.sconv(FmtUnsigned)...)
-			}
-			buf = append(buf, f.Type.tconv(FmtShort)...)
-		}
-		if t.NumFields() != 0 {
-			buf = append(buf, ' ')
-		}
-		buf = append(buf, '}')
-		return string(buf)
-
-	case TFUNC:
-		buf := make([]byte, 0, 64)
-		if flag&FmtShort != 0 {
-			// no leading func
-		} else {
-			if t.Recv() != nil {
-				buf = append(buf, "method"...)
-				buf = append(buf, t.Recvs().String()...)
-				buf = append(buf, ' ')
-			}
-			buf = append(buf, "func"...)
-		}
-		buf = append(buf, t.Params().String()...)
-
-		switch t.Results().NumFields() {
-		case 0:
-			// nothing to do
-
-		case 1:
-			buf = append(buf, ' ')
-			buf = append(buf, t.Results().Field(0).Type.String()...) // struct->field->field's type
-
-		default:
-			buf = append(buf, ' ')
-			buf = append(buf, t.Results().String()...)
-		}
-		return string(buf)
-
-	case TSTRUCT:
-		if m := t.StructType().Map; m != nil {
-			mt := m.MapType()
-			// Format the bucket struct for map[x]y as map.bucket[x]y.
-			// This avoids a recursive print that generates very long names.
-			if mt.Bucket == t {
-				return "map.bucket[" + m.Key().String() + "]" + m.Val().String()
-			}
-
-			if mt.Hmap == t {
-				return "map.hdr[" + m.Key().String() + "]" + m.Val().String()
-			}
-
-			if mt.Hiter == t {
-				return "map.iter[" + m.Key().String() + "]" + m.Val().String()
-			}
-
-			yyerror("unknown internal map type")
-		}
-
-		buf := make([]byte, 0, 64)
-		if t.IsFuncArgStruct() {
-			buf = append(buf, '(')
-			var flag1 FmtFlag
-			if fmtmode == FTypeId || fmtmode == FErr { // no argument names on function signature, and no "noescape"/"nosplit" tags
-				flag1 = FmtShort
-			}
-			for i, f := range t.Fields().Slice() {
-				if i != 0 {
-					buf = append(buf, ", "...)
-				}
-				buf = append(buf, fldconv(f, flag1)...)
-			}
-			buf = append(buf, ')')
-		} else {
-			buf = append(buf, "struct {"...)
-			for i, f := range t.Fields().Slice() {
-				if i != 0 {
-					buf = append(buf, ';')
-				}
-				buf = append(buf, ' ')
-				buf = append(buf, fldconv(f, FmtLong)...)
-			}
-			if t.NumFields() != 0 {
-				buf = append(buf, ' ')
-			}
-			buf = append(buf, '}')
-		}
-		return string(buf)
-
-	case TFORW:
-		if t.Sym != nil {
-			return "undefined " + t.Sym.String()
-		}
-		return "undefined"
-
-	case TUNSAFEPTR:
-		return "unsafe.Pointer"
-
-	case TDDDFIELD:
-		return fmt.Sprintf("%v <%v> %v", t.Etype, t.Sym, t.DDDField())
-
-	case Txxx:
-		return "Txxx"
-	}
-
-	// Don't know how to handle - fall back to detailed prints.
-	return fmt.Sprintf("%v <%v> %v", t.Etype, t.Sym, t.Elem())
-}
-
-// Statements which may be rendered with a simplestmt as init.
-func stmtwithinit(op Op) bool {
-	switch op {
-	case OIF, OFOR, OSWITCH:
-		return true
-	}
-
-	return false
-}
-
-func (n *Node) stmtfmt(s fmt.State) {
-	// some statements allow for an init, but at most one,
-	// but we may have an arbitrary number added, eg by typecheck
-	// and inlining. If it doesn't fit the syntax, emit an enclosing
-	// block starting with the init statements.
-
-	// if we can just say "for" n->ninit; ... then do so
-	simpleinit := n.Ninit.Len() == 1 && n.Ninit.First().Ninit.Len() == 0 && stmtwithinit(n.Op)
-
-	// otherwise, print the inits as separate statements
-	complexinit := n.Ninit.Len() != 0 && !simpleinit && (fmtmode != FErr)
-
-	// but if it was for if/for/switch, put in an extra surrounding block to limit the scope
-	extrablock := complexinit && stmtwithinit(n.Op)
-
-	if extrablock {
-		fmt.Fprint(s, "{")
-	}
-
-	if complexinit {
-		fmt.Fprintf(s, " %v; ", n.Ninit)
-	}
-
-	switch n.Op {
-	case ODCL:
-		fmt.Fprintf(s, "var %v %v", n.Left.Sym, n.Left.Type)
-
-	case ODCLFIELD:
-		if n.Left != nil {
-			fmt.Fprintf(s, "%v %v", n.Left, n.Right)
-		} else {
-			fmt.Fprintf(s, "%v", n.Right)
-		}
-
-	// Don't export "v = <N>" initializing statements, hope they're always
-	// preceded by the DCL which will be re-parsed and typechecked to reproduce
-	// the "v = <N>" again.
-	case OAS, OASWB:
-		if n.Colas && !complexinit {
-			fmt.Fprintf(s, "%v := %v", n.Left, n.Right)
-		} else {
-			fmt.Fprintf(s, "%v = %v", n.Left, n.Right)
-		}
-
-	case OASOP:
-		if n.Implicit {
-			if Op(n.Etype) == OADD {
-				fmt.Fprintf(s, "%v++", n.Left)
-			} else {
-				fmt.Fprintf(s, "%v--", n.Left)
-			}
-			break
-		}
-
-		fmt.Fprintf(s, "%v %#v= %v", n.Left, Op(n.Etype), n.Right)
-
-	case OAS2:
-		if n.Colas && !complexinit {
-			fmt.Fprintf(s, "%.v := %.v", n.List, n.Rlist)
-			break
-		}
-		fallthrough
-
-	case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
-		fmt.Fprintf(s, "%.v = %.v", n.List, n.Rlist)
-
-	case ORETURN:
-		fmt.Fprintf(s, "return %.v", n.List)
-
-	case ORETJMP:
-		fmt.Fprintf(s, "retjmp %v", n.Sym)
-
-	case OPROC:
-		fmt.Fprintf(s, "go %v", n.Left)
-
-	case ODEFER:
-		fmt.Fprintf(s, "defer %v", n.Left)
-
-	case OIF:
-		if simpleinit {
-			fmt.Fprintf(s, "if %v; %v { %v }", n.Ninit.First(), n.Left, n.Nbody)
-		} else {
-			fmt.Fprintf(s, "if %v { %v }", n.Left, n.Nbody)
-		}
-		if n.Rlist.Len() != 0 {
-			fmt.Fprintf(s, " else { %v }", n.Rlist)
-		}
-
-	case OFOR:
-		if fmtmode == FErr { // TODO maybe only if FmtShort, same below
-			fmt.Fprint(s, "for loop")
-			break
-		}
-
-		fmt.Fprint(s, "for")
-		if simpleinit {
-			fmt.Fprintf(s, " %v;", n.Ninit.First())
-		} else if n.Right != nil {
-			fmt.Fprint(s, " ;")
-		}
-
-		if n.Left != nil {
-			fmt.Fprintf(s, " %v", n.Left)
-		}
-
-		if n.Right != nil {
-			fmt.Fprintf(s, "; %v", n.Right)
-		} else if simpleinit {
-			fmt.Fprint(s, ";")
-		}
-
-		fmt.Fprintf(s, " { %v }", n.Nbody)
-
-	case ORANGE:
-		if fmtmode == FErr {
-			fmt.Fprint(s, "for loop")
-			break
-		}
-
-		if n.List.Len() == 0 {
-			fmt.Fprintf(s, "for range %v { %v }", n.Right, n.Nbody)
-			break
-		}
-
-		fmt.Fprintf(s, "for %.v = range %v { %v }", n.List, n.Right, n.Nbody)
-
-	case OSELECT, OSWITCH:
-		if fmtmode == FErr {
-			fmt.Fprintf(s, "%v statement", n.Op)
-			break
-		}
-
-		fmt.Fprint(s, n.Op.GoString()) // %#v
-		if simpleinit {
-			fmt.Fprintf(s, " %v;", n.Ninit.First())
-		}
-		if n.Left != nil {
-			fmt.Fprintf(s, " %v ", n.Left)
-		}
-
-		fmt.Fprintf(s, " { %v }", n.List)
-
-	case OXCASE:
-		if n.List.Len() != 0 {
-			fmt.Fprintf(s, "case %.v", n.List)
-		} else {
-			fmt.Fprint(s, "default")
-		}
-		fmt.Fprintf(s, ": %v", n.Nbody)
-
-	case OCASE:
-		switch {
-		case n.Left != nil:
-			// single element
-			fmt.Fprintf(s, "case %v", n.Left)
-		case n.List.Len() > 0:
-			// range
-			if n.List.Len() != 2 {
-				Fatalf("bad OCASE list length %d", n.List.Len())
-			}
-			fmt.Fprintf(s, "case %v..%v", n.List.First(), n.List.Second())
-		default:
-			fmt.Fprint(s, "default")
-		}
-		fmt.Fprintf(s, ": %v", n.Nbody)
-
-	case OBREAK,
-		OCONTINUE,
-		OGOTO,
-		OFALL,
-		OXFALL:
-		if n.Left != nil {
-			fmt.Fprintf(s, "%#v %v", n.Op, n.Left)
-		} else {
-			fmt.Fprint(s, n.Op.GoString()) // %#v
-		}
-
-	case OEMPTY:
-		break
-
-	case OLABEL:
-		fmt.Fprintf(s, "%v: ", n.Left)
-	}
-
-	if extrablock {
-		fmt.Fprint(s, "}")
-	}
-}
-
-var opprec = []int{
-	OALIGNOF:      8,
-	OAPPEND:       8,
-	OARRAYBYTESTR: 8,
-	OARRAYLIT:     8,
-	OSLICELIT:     8,
-	OARRAYRUNESTR: 8,
-	OCALLFUNC:     8,
-	OCALLINTER:    8,
-	OCALLMETH:     8,
-	OCALL:         8,
-	OCAP:          8,
-	OCLOSE:        8,
-	OCONVIFACE:    8,
-	OCONVNOP:      8,
-	OCONV:         8,
-	OCOPY:         8,
-	ODELETE:       8,
-	OGETG:         8,
-	OLEN:          8,
-	OLITERAL:      8,
-	OMAKESLICE:    8,
-	OMAKE:         8,
-	OMAPLIT:       8,
-	ONAME:         8,
-	ONEW:          8,
-	ONONAME:       8,
-	OOFFSETOF:     8,
-	OPACK:         8,
-	OPANIC:        8,
-	OPAREN:        8,
-	OPRINTN:       8,
-	OPRINT:        8,
-	ORUNESTR:      8,
-	OSIZEOF:       8,
-	OSTRARRAYBYTE: 8,
-	OSTRARRAYRUNE: 8,
-	OSTRUCTLIT:    8,
-	OTARRAY:       8,
-	OTCHAN:        8,
-	OTFUNC:        8,
-	OTINTER:       8,
-	OTMAP:         8,
-	OTSTRUCT:      8,
-	OINDEXMAP:     8,
-	OINDEX:        8,
-	OSLICE:        8,
-	OSLICESTR:     8,
-	OSLICEARR:     8,
-	OSLICE3:       8,
-	OSLICE3ARR:    8,
-	ODOTINTER:     8,
-	ODOTMETH:      8,
-	ODOTPTR:       8,
-	ODOTTYPE2:     8,
-	ODOTTYPE:      8,
-	ODOT:          8,
-	OXDOT:         8,
-	OCALLPART:     8,
-	OPLUS:         7,
-	ONOT:          7,
-	OCOM:          7,
-	OMINUS:        7,
-	OADDR:         7,
-	OIND:          7,
-	ORECV:         7,
-	OMUL:          6,
-	ODIV:          6,
-	OMOD:          6,
-	OLSH:          6,
-	ORSH:          6,
-	OAND:          6,
-	OANDNOT:       6,
-	OADD:          5,
-	OSUB:          5,
-	OOR:           5,
-	OXOR:          5,
-	OEQ:           4,
-	OLT:           4,
-	OLE:           4,
-	OGE:           4,
-	OGT:           4,
-	ONE:           4,
-	OCMPSTR:       4,
-	OCMPIFACE:     4,
-	OSEND:         3,
-	OANDAND:       2,
-	OOROR:         1,
-	// Statements handled by stmtfmt
-	OAS:         -1,
-	OAS2:        -1,
-	OAS2DOTTYPE: -1,
-	OAS2FUNC:    -1,
-	OAS2MAPR:    -1,
-	OAS2RECV:    -1,
-	OASOP:       -1,
-	OBREAK:      -1,
-	OCASE:       -1,
-	OCONTINUE:   -1,
-	ODCL:        -1,
-	ODCLFIELD:   -1,
-	ODEFER:      -1,
-	OEMPTY:      -1,
-	OFALL:       -1,
-	OFOR:        -1,
-	OGOTO:       -1,
-	OIF:         -1,
-	OLABEL:      -1,
-	OPROC:       -1,
-	ORANGE:      -1,
-	ORETURN:     -1,
-	OSELECT:     -1,
-	OSWITCH:     -1,
-	OXCASE:      -1,
-	OXFALL:      -1,
-	OEND:        0,
-}
-
-func (n *Node) exprfmt(s fmt.State, prec int) {
-	for n != nil && n.Implicit && (n.Op == OIND || n.Op == OADDR) {
-		n = n.Left
-	}
-
-	if n == nil {
-		fmt.Fprint(s, "<N>")
-		return
-	}
-
-	nprec := opprec[n.Op]
-	if n.Op == OTYPE && n.Sym != nil {
-		nprec = 8
-	}
-
-	if prec > nprec {
-		fmt.Fprintf(s, "(%v)", n)
-		return
-	}
-
-	switch n.Op {
-	case OPAREN:
-		fmt.Fprintf(s, "(%v)", n.Left)
-
-	case ODDDARG:
-		fmt.Fprint(s, "... argument")
-
-	case OLITERAL: // this is a bit of a mess
-		if fmtmode == FErr {
-			if n.Orig != nil && n.Orig != n {
-				n.Orig.exprfmt(s, prec)
-				return
-			}
-			if n.Sym != nil {
-				fmt.Fprint(s, n.Sym.String())
-				return
-			}
-		}
-		if n.Val().Ctype() == CTNIL && n.Orig != nil && n.Orig != n {
-			n.Orig.exprfmt(s, prec)
-			return
-		}
-		if n.Type != nil && n.Type.Etype != TIDEAL && n.Type.Etype != TNIL && n.Type != idealbool && n.Type != idealstring {
-			// Need parens when type begins with what might
-			// be misinterpreted as a unary operator: * or <-.
-			if n.Type.IsPtr() || (n.Type.IsChan() && n.Type.ChanDir() == Crecv) {
-				fmt.Fprintf(s, "(%v)(%v)", n.Type, n.Val())
-				return
-			} else {
-				fmt.Fprintf(s, "%v(%v)", n.Type, n.Val())
-				return
-			}
-		}
-
-		fmt.Fprintf(s, "%v", n.Val())
-
-	// Special case: name used as local variable in export.
-	// _ becomes ~b%d internally; print as _ for export
-	case ONAME:
-		if fmtmode == FErr && n.Sym != nil && n.Sym.Name[0] == '~' && n.Sym.Name[1] == 'b' {
-			fmt.Fprint(s, "_")
-			return
-		}
-		fallthrough
-	case OPACK, ONONAME:
-		fmt.Fprint(s, n.Sym.String())
-
-	case OTYPE:
-		if n.Type == nil && n.Sym != nil {
-			fmt.Fprint(s, n.Sym.String())
-			return
-		}
-		fmt.Fprintf(s, "%v", n.Type)
-
-	case OTARRAY:
-		if n.Left != nil {
-			fmt.Fprintf(s, "[]%v", n.Left)
-			return
-		}
-		fmt.Fprintf(s, "[]%v", n.Right) // happens before typecheck
-
-	case OTMAP:
-		fmt.Fprintf(s, "map[%v]%v", n.Left, n.Right)
-
-	case OTCHAN:
-		switch ChanDir(n.Etype) {
-		case Crecv:
-			fmt.Fprintf(s, "<-chan %v", n.Left)
-
-		case Csend:
-			fmt.Fprintf(s, "chan<- %v", n.Left)
-
-		default:
-			if n.Left != nil && n.Left.Op == OTCHAN && n.Left.Sym == nil && ChanDir(n.Left.Etype) == Crecv {
-				fmt.Fprintf(s, "chan (%v)", n.Left)
-			} else {
-				fmt.Fprintf(s, "chan %v", n.Left)
-			}
-		}
-
-	case OTSTRUCT:
-		fmt.Fprint(s, "<struct>")
-
-	case OTINTER:
-		fmt.Fprint(s, "<inter>")
-
-	case OTFUNC:
-		fmt.Fprint(s, "<func>")
-
-	case OCLOSURE:
-		if fmtmode == FErr {
-			fmt.Fprint(s, "func literal")
-			return
-		}
-		if n.Nbody.Len() != 0 {
-			fmt.Fprintf(s, "%v { %v }", n.Type, n.Nbody)
-			return
-		}
-		fmt.Fprintf(s, "%v { %v }", n.Type, n.Func.Closure.Nbody)
-
-	case OCOMPLIT:
-		ptrlit := n.Right != nil && n.Right.Implicit && n.Right.Type != nil && n.Right.Type.IsPtr()
-		if fmtmode == FErr {
-			if n.Right != nil && n.Right.Type != nil && !n.Implicit {
-				if ptrlit {
-					fmt.Fprintf(s, "&%v literal", n.Right.Type.Elem())
-					return
-				} else {
-					fmt.Fprintf(s, "%v literal", n.Right.Type)
-					return
-				}
-			}
-
-			fmt.Fprint(s, "composite literal")
-			return
-		}
-		fmt.Fprintf(s, "(%v{ %.v })", n.Right, n.List)
-
-	case OPTRLIT:
-		fmt.Fprintf(s, "&%v", n.Left)
-
-	case OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT:
-		if fmtmode == FErr {
-			fmt.Fprintf(s, "%v literal", n.Type)
-			return
-		}
-		fmt.Fprintf(s, "(%v{ %.v })", n.Type, n.List)
-
-	case OKEY:
-		if n.Left != nil && n.Right != nil {
-			fmt.Fprintf(s, "%v:%v", n.Left, n.Right)
-			return
-		}
-
-		if n.Left == nil && n.Right != nil {
-			fmt.Fprintf(s, ":%v", n.Right)
-			return
-		}
-		if n.Left != nil && n.Right == nil {
-			fmt.Fprintf(s, "%v:", n.Left)
-			return
-		}
-		fmt.Fprint(s, ":")
-
-	case OSTRUCTKEY:
-		fmt.Fprintf(s, "%v:%v", n.Sym, n.Left)
-
-	case OCALLPART:
-		n.Left.exprfmt(s, nprec)
-		if n.Right == nil || n.Right.Sym == nil {
-			fmt.Fprint(s, ".<nil>")
-			return
-		}
-		fmt.Fprintf(s, ".%0S", n.Right.Sym)
-
-	case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
-		n.Left.exprfmt(s, nprec)
-		if n.Sym == nil {
-			fmt.Fprint(s, ".<nil>")
-			return
-		}
-		fmt.Fprintf(s, ".%0S", n.Sym)
-
-	case ODOTTYPE, ODOTTYPE2:
-		n.Left.exprfmt(s, nprec)
-		if n.Right != nil {
-			fmt.Fprintf(s, ".(%v)", n.Right)
-			return
-		}
-		fmt.Fprintf(s, ".(%v)", n.Type)
-
-	case OINDEX, OINDEXMAP:
-		n.Left.exprfmt(s, nprec)
-		fmt.Fprintf(s, "[%v]", n.Right)
-
-	case OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR:
-		n.Left.exprfmt(s, nprec)
-		fmt.Fprint(s, "[")
-		low, high, max := n.SliceBounds()
-		if low != nil {
-			fmt.Fprint(s, low.String())
-		}
-		fmt.Fprint(s, ":")
-		if high != nil {
-			fmt.Fprint(s, high.String())
-		}
-		if n.Op.IsSlice3() {
-			fmt.Fprint(s, ":")
-			if max != nil {
-				fmt.Fprint(s, max.String())
-			}
-		}
-		fmt.Fprint(s, "]")
-
-	case OCOPY, OCOMPLEX:
-		fmt.Fprintf(s, "%#v(%v, %v)", n.Op, n.Left, n.Right)
-
-	case OCONV,
-		OCONVIFACE,
-		OCONVNOP,
-		OARRAYBYTESTR,
-		OARRAYRUNESTR,
-		OSTRARRAYBYTE,
-		OSTRARRAYRUNE,
-		ORUNESTR:
-		if n.Type == nil || n.Type.Sym == nil {
-			fmt.Fprintf(s, "(%v)(%v)", n.Type, n.Left)
-			return
-		}
-		if n.Left != nil {
-			fmt.Fprintf(s, "%v(%v)", n.Type, n.Left)
-			return
-		}
-		fmt.Fprintf(s, "%v(%.v)", n.Type, n.List)
-
-	case OREAL,
-		OIMAG,
-		OAPPEND,
-		OCAP,
-		OCLOSE,
-		ODELETE,
-		OLEN,
-		OMAKE,
-		ONEW,
-		OPANIC,
-		ORECOVER,
-		OALIGNOF,
-		OOFFSETOF,
-		OSIZEOF,
-		OPRINT,
-		OPRINTN:
-		if n.Left != nil {
-			fmt.Fprintf(s, "%#v(%v)", n.Op, n.Left)
-			return
-		}
-		if n.Isddd {
-			fmt.Fprintf(s, "%#v(%.v...)", n.Op, n.List)
-			return
-		}
-		fmt.Fprintf(s, "%#v(%.v)", n.Op, n.List)
-
-	case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OGETG:
-		n.Left.exprfmt(s, nprec)
-		if n.Isddd {
-			fmt.Fprintf(s, "(%.v...)", n.List)
-			return
-		}
-		fmt.Fprintf(s, "(%.v)", n.List)
-
-	case OMAKEMAP, OMAKECHAN, OMAKESLICE:
-		if n.List.Len() != 0 { // pre-typecheck
-			fmt.Fprintf(s, "make(%v, %.v)", n.Type, n.List)
-			return
-		}
-		if n.Right != nil {
-			fmt.Fprintf(s, "make(%v, %v, %v)", n.Type, n.Left, n.Right)
-			return
-		}
-		if n.Left != nil && (n.Op == OMAKESLICE || !n.Left.Type.IsUntyped()) {
-			fmt.Fprintf(s, "make(%v, %v)", n.Type, n.Left)
-			return
-		}
-		fmt.Fprintf(s, "make(%v)", n.Type)
-
-		// Unary
-	case OPLUS,
-		OMINUS,
-		OADDR,
-		OCOM,
-		OIND,
-		ONOT,
-		ORECV:
-		fmt.Fprint(s, n.Op.GoString()) // %#v
-		if n.Left.Op == n.Op {
-			fmt.Fprint(s, " ")
-		}
-		n.Left.exprfmt(s, nprec+1)
-
-		// Binary
-	case OADD,
-		OAND,
-		OANDAND,
-		OANDNOT,
-		ODIV,
-		OEQ,
-		OGE,
-		OGT,
-		OLE,
-		OLT,
-		OLSH,
-		OMOD,
-		OMUL,
-		ONE,
-		OOR,
-		OOROR,
-		ORSH,
-		OSEND,
-		OSUB,
-		OXOR:
-		n.Left.exprfmt(s, nprec)
-		fmt.Fprintf(s, " %#v ", n.Op)
-		n.Right.exprfmt(s, nprec+1)
-
-	case OADDSTR:
-		i := 0
-		for _, n1 := range n.List.Slice() {
-			if i != 0 {
-				fmt.Fprint(s, " + ")
-			}
-			n1.exprfmt(s, nprec)
-			i++
-		}
-
-	case OCMPSTR, OCMPIFACE:
-		n.Left.exprfmt(s, nprec)
-		// TODO(marvin): Fix Node.EType type union.
-		fmt.Fprintf(s, " %#v ", Op(n.Etype))
-		n.Right.exprfmt(s, nprec+1)
-
-	default:
-		fmt.Fprintf(s, "<node %v>", n.Op)
-	}
-}
-
-func (n *Node) nodefmt(s fmt.State, flag FmtFlag) {
-	t := n.Type
-
-	// we almost always want the original, except in export mode for literals
-	// this saves the importer some work, and avoids us having to redo some
-	// special casing for package unsafe
-	if n.Op != OLITERAL && n.Orig != nil {
-		n = n.Orig
-	}
-
-	if flag&FmtLong != 0 && t != nil {
-		if t.Etype == TNIL {
-			fmt.Fprint(s, "nil")
-		} else {
-			fmt.Fprintf(s, "%v (type %v)", n, t)
-		}
-		return
-	}
-
-	// TODO inlining produces expressions with ninits. we can't print these yet.
-
-	if opprec[n.Op] < 0 {
-		n.stmtfmt(s)
-		return
-	}
-
-	n.exprfmt(s, 0)
-}
-
-func (n *Node) nodedump(s fmt.State, flag FmtFlag) {
-	if n == nil {
-		return
-	}
-
-	recur := flag&FmtShort == 0
-
-	if recur {
-		indent(s)
-		if dumpdepth > 10 {
-			fmt.Fprint(s, "...")
-			return
-		}
-
-		if n.Ninit.Len() != 0 {
-			fmt.Fprintf(s, "%v-init%v", n.Op, n.Ninit)
-			indent(s)
-		}
-	}
-
-	switch n.Op {
-	default:
-		fmt.Fprintf(s, "%v%j", n.Op, n)
-
-	case OINDREGSP:
-		fmt.Fprintf(s, "%v-SP%j", n.Op, n)
-
-	case OLITERAL:
-		fmt.Fprintf(s, "%v-%v%j", n.Op, n.Val(), n)
-
-	case ONAME, ONONAME:
-		if n.Sym != nil {
-			fmt.Fprintf(s, "%v-%v%j", n.Op, n.Sym, n)
-		} else {
-			fmt.Fprintf(s, "%v%j", n.Op, n)
-		}
-		if recur && n.Type == nil && n.Name != nil && n.Name.Param != nil && n.Name.Param.Ntype != nil {
-			indent(s)
-			fmt.Fprintf(s, "%v-ntype%v", n.Op, n.Name.Param.Ntype)
-		}
-
-	case OASOP:
-		fmt.Fprintf(s, "%v-%v%j", n.Op, Op(n.Etype), n)
-
-	case OTYPE:
-		fmt.Fprintf(s, "%v %v%j type=%v", n.Op, n.Sym, n, n.Type)
-		if recur && n.Type == nil && n.Name.Param.Ntype != nil {
-			indent(s)
-			fmt.Fprintf(s, "%v-ntype%v", n.Op, n.Name.Param.Ntype)
-		}
-	}
-
-	if n.Sym != nil && n.Op != ONAME {
-		fmt.Fprintf(s, " %v", n.Sym)
-	}
-
-	if n.Type != nil {
-		fmt.Fprintf(s, " %v", n.Type)
-	}
-
-	if recur {
-		if n.Left != nil {
-			fmt.Fprintf(s, "%v", n.Left)
-		}
-		if n.Right != nil {
-			fmt.Fprintf(s, "%v", n.Right)
-		}
-		if n.List.Len() != 0 {
-			indent(s)
-			fmt.Fprintf(s, "%v-list%v", n.Op, n.List)
-		}
-
-		if n.Rlist.Len() != 0 {
-			indent(s)
-			fmt.Fprintf(s, "%v-rlist%v", n.Op, n.Rlist)
-		}
-
-		if n.Nbody.Len() != 0 {
-			indent(s)
-			fmt.Fprintf(s, "%v-body%v", n.Op, n.Nbody)
-		}
-	}
-}
-
-// "%S" suppresses qualifying with package
-func (s *Sym) Format(f fmt.State, verb rune) {
-	switch verb {
-	case 'v', 'S':
-		fmt.Fprint(f, s.sconv(fmtFlag(f, verb)))
-
-	default:
-		fmt.Fprintf(f, "%%!%c(*Sym=%p)", verb, s)
-	}
-}
-
-func (s *Sym) String() string {
-	return s.sconv(0)
-}
-
-// See #16897 before changing the implementation of sconv.
-func (s *Sym) sconv(flag FmtFlag) string {
-	if flag&FmtLong != 0 {
-		panic("linksymfmt")
-	}
-
-	if s == nil {
-		return "<S>"
-	}
-
-	if s.Name == "_" {
-		return "_"
-	}
-
-	sf := flag
-	sm := setfmode(&flag)
-	str := s.symfmt(flag)
-	flag = sf
-	fmtmode = sm
-	return str
-}
-
-func (t *Type) String() string {
-	return t.tconv(0)
-}
-
-func fldconv(f *Field, flag FmtFlag) string {
-	if f == nil {
-		return "<T>"
-	}
-
-	sf := flag
-	sm := setfmode(&flag)
-
-	if fmtmode == FTypeId && (sf&FmtUnsigned != 0) {
-		fmtpkgpfx++
-	}
-	if fmtpkgpfx != 0 {
-		flag |= FmtUnsigned
-	}
-
-	var name string
-	if flag&FmtShort == 0 {
-		s := f.Sym
-
-		// Take the name from the original, lest we substituted it with ~r%d or ~b%d.
-		// ~r%d is a (formerly) unnamed result.
-		if fmtmode == FErr && f.Nname != nil {
-			if f.Nname.Orig != nil {
-				s = f.Nname.Orig.Sym
-				if s != nil && s.Name[0] == '~' {
-					if s.Name[1] == 'r' { // originally an unnamed result
-						s = nil
-					} else if s.Name[1] == 'b' { // originally the blank identifier _
-						s = lookup("_")
-					}
-				}
-			} else {
-				s = nil
-			}
-		}
-
-		if s != nil && f.Embedded == 0 {
-			if f.Funarg != FunargNone {
-				name = f.Nname.String()
-			} else if flag&FmtLong != 0 {
-				name = fmt.Sprintf("%0S", s)
-				if !exportname(name) && flag&FmtUnsigned == 0 {
-					name = s.String() // qualify non-exported names (used on structs, not on funarg)
-				}
-			} else {
-				name = s.String()
-			}
-		}
-	}
-
-	var typ string
-	if f.Isddd {
-		typ = fmt.Sprintf("...%v", f.Type.Elem())
-	} else {
-		typ = fmt.Sprintf("%v", f.Type)
-	}
-
-	str := typ
-	if name != "" {
-		str = name + " " + typ
-	}
-
-	if flag&FmtShort == 0 && f.Funarg == FunargNone && f.Note != "" {
-		str += " " + strconv.Quote(f.Note)
-	}
-
-	if fmtmode == FTypeId && (sf&FmtUnsigned != 0) {
-		fmtpkgpfx--
-	}
-
-	flag = sf
-	fmtmode = sm
-	return str
-}
-
-// "%L"  print definition, not name
-// "%S"  omit 'func' and receiver from function types, short type names
-// "% v" package name, not prefix (FTypeId mode, sticky)
-func (t *Type) Format(s fmt.State, verb rune) {
-	switch verb {
-	case 'v', 'S', 'L':
-		fmt.Fprint(s, t.tconv(fmtFlag(s, verb)))
-
-	default:
-		fmt.Fprintf(s, "%%!%c(*Type=%p)", verb, t)
-	}
-}
-
-// See #16897 before changing the implementation of tconv.
-func (t *Type) tconv(flag FmtFlag) string {
-	if t == nil {
-		return "<T>"
-	}
-
-	if t.Trecur > 4 {
-		return "<...>"
-	}
-
-	t.Trecur++
-	sf := flag
-	sm := setfmode(&flag)
-
-	if fmtmode == FTypeId && (sf&FmtUnsigned != 0) {
-		fmtpkgpfx++
-	}
-	if fmtpkgpfx != 0 {
-		flag |= FmtUnsigned
-	}
-
-	str := t.typefmt(flag)
-
-	if fmtmode == FTypeId && (sf&FmtUnsigned != 0) {
-		fmtpkgpfx--
-	}
-
-	flag = sf
-	fmtmode = sm
-	t.Trecur--
-	return str
-}
-
-func (n *Node) String() string {
-	return fmt.Sprint(n)
-}
-
-// "%L"  suffix with "(type %T)" where possible
-// "%+S" in debug mode, don't recurse, no multiline output
-func (n *Node) Nconv(s fmt.State, flag FmtFlag) {
-	if n == nil {
-		fmt.Fprint(s, "<N>")
-		return
-	}
-
-	sf := flag
-	sm := setfmode(&flag)
-
-	switch fmtmode {
-	case FErr:
-		n.nodefmt(s, flag)
-
-	case FDbg:
-		dumpdepth++
-		n.nodedump(s, flag)
-		dumpdepth--
-
-	default:
-		Fatalf("unhandled %%N mode: %d", fmtmode)
-	}
-
-	flag = sf
-	fmtmode = sm
-}
-
-func (l Nodes) Format(s fmt.State, verb rune) {
-	switch verb {
-	case 'v':
-		l.hconv(s, fmtFlag(s, verb))
-
-	default:
-		fmt.Fprintf(s, "%%!%c(Nodes)", verb)
-	}
-}
-
-func (n Nodes) String() string {
-	return fmt.Sprint(n)
-}
-
-// Flags: all those of %N plus '.': separate with comma's instead of semicolons.
-func (l Nodes) hconv(s fmt.State, flag FmtFlag) {
-	if l.Len() == 0 && fmtmode == FDbg {
-		fmt.Fprint(s, "<nil>")
-		return
-	}
-
-	sf := flag
-	sm := setfmode(&flag)
-	sep := "; "
-	if fmtmode == FDbg {
-		sep = "\n"
-	} else if flag&FmtComma != 0 {
-		sep = ", "
-	}
-
-	for i, n := range l.Slice() {
-		fmt.Fprint(s, n)
-		if i+1 < l.Len() {
-			fmt.Fprint(s, sep)
-		}
-	}
-
-	flag = sf
-	fmtmode = sm
-}
-
-func dumplist(s string, l Nodes) {
-	fmt.Printf("%s%+v\n", s, l)
-}
-
-func Dump(s string, n *Node) {
-	fmt.Printf("%s [%p]%+v\n", s, n, n)
-}
-
-// TODO(gri) make variable local somehow
-var dumpdepth int
-
-// indent prints indentation to s.
-func indent(s fmt.State) {
-	fmt.Fprint(s, "\n")
-	for i := 0; i < dumpdepth; i++ {
-		fmt.Fprint(s, ".   ")
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/gen.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/gen.go
deleted file mode 100644
index 7e54745..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/gen.go
+++ /dev/null
@@ -1,229 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/gen.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/gen.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Portable half of code generator; mainly statements and control flow.
-
-package gc
-
-import "fmt"
-
-func Sysfunc(name string) *Node {
-	n := newname(Pkglookup(name, Runtimepkg))
-	n.Class = PFUNC
-	return n
-}
-
-// addrescapes tags node n as having had its address taken
-// by "increasing" the "value" of n.Esc to EscHeap.
-// Storage is allocated as necessary to allow the address
-// to be taken.
-func addrescapes(n *Node) {
-	switch n.Op {
-	// probably a type error already.
-	// dump("addrescapes", n);
-	default:
-		break
-
-	case ONAME:
-		if n == nodfp {
-			break
-		}
-
-		// if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping.
-		// on PPARAM it means something different.
-		if n.Class == PAUTO && n.Esc == EscNever {
-			break
-		}
-
-		// If a closure reference escapes, mark the outer variable as escaping.
-		if n.isClosureVar() {
-			addrescapes(n.Name.Defn)
-			break
-		}
-
-		if n.Class != PPARAM && n.Class != PPARAMOUT && n.Class != PAUTO {
-			break
-		}
-
-		// This is a plain parameter or local variable that needs to move to the heap,
-		// but possibly for the function outside the one we're compiling.
-		// That is, if we have:
-		//
-		//	func f(x int) {
-		//		func() {
-		//			global = &x
-		//		}
-		//	}
-		//
-		// then we're analyzing the inner closure but we need to move x to the
-		// heap in f, not in the inner closure. Flip over to f before calling moveToHeap.
-		oldfn := Curfn
-		Curfn = n.Name.Curfn
-		if Curfn.Func.Closure != nil && Curfn.Op == OCLOSURE {
-			Curfn = Curfn.Func.Closure
-		}
-		ln := lineno
-		lineno = Curfn.Lineno
-		moveToHeap(n)
-		Curfn = oldfn
-		lineno = ln
-
-	case OIND, ODOTPTR:
-		break
-
-	// ODOTPTR has already been introduced,
-	// so these are the non-pointer ODOT and OINDEX.
-	// In &x[0], if x is a slice, then x does not
-	// escape--the pointer inside x does, but that
-	// is always a heap pointer anyway.
-	case ODOT, OINDEX, OPAREN, OCONVNOP:
-		if !n.Left.Type.IsSlice() {
-			addrescapes(n.Left)
-		}
-	}
-}
-
-// isParamStackCopy reports whether this is the on-stack copy of a
-// function parameter that moved to the heap.
-func (n *Node) isParamStackCopy() bool {
-	return n.Op == ONAME && (n.Class == PPARAM || n.Class == PPARAMOUT) && n.Name.Heapaddr != nil
-}
-
-// isParamHeapCopy reports whether this is the on-heap copy of
-// a function parameter that moved to the heap.
-func (n *Node) isParamHeapCopy() bool {
-	return n.Op == ONAME && n.Class == PAUTOHEAP && n.Name.Param.Stackcopy != nil
-}
-
-// moveToHeap records the parameter or local variable n as moved to the heap.
-func moveToHeap(n *Node) {
-	if Debug['r'] != 0 {
-		Dump("MOVE", n)
-	}
-	if compiling_runtime {
-		yyerror("%v escapes to heap, not allowed in runtime.", n)
-	}
-	if n.Class == PAUTOHEAP {
-		Dump("n", n)
-		Fatalf("double move to heap")
-	}
-
-	// Allocate a local stack variable to hold the pointer to the heap copy.
-	// temp will add it to the function declaration list automatically.
-	heapaddr := temp(ptrto(n.Type))
-	heapaddr.Sym = lookup("&" + n.Sym.Name)
-	heapaddr.Orig.Sym = heapaddr.Sym
-
-	// Unset AutoTemp to persist the &foo variable name through SSA to
-	// liveness analysis.
-	// TODO(mdempsky/drchase): Cleaner solution?
-	heapaddr.Name.AutoTemp = false
-
-	// Parameters have a local stack copy used at function start/end
-	// in addition to the copy in the heap that may live longer than
-	// the function.
-	if n.Class == PPARAM || n.Class == PPARAMOUT {
-		if n.Xoffset == BADWIDTH {
-			Fatalf("addrescapes before param assignment")
-		}
-
-		// We rewrite n below to be a heap variable (indirection of heapaddr).
-		// Preserve a copy so we can still write code referring to the original,
-		// and substitute that copy into the function declaration list
-		// so that analyses of the local (on-stack) variables use it.
-		stackcopy := nod(ONAME, nil, nil)
-		stackcopy.Sym = n.Sym
-		stackcopy.Type = n.Type
-		stackcopy.Xoffset = n.Xoffset
-		stackcopy.Class = n.Class
-		stackcopy.Name.Heapaddr = heapaddr
-		if n.Class == PPARAMOUT {
-			// Make sure the pointer to the heap copy is kept live throughout the function.
-			// The function could panic at any point, and then a defer could recover.
-			// Thus, we need the pointer to the heap copy always available so the
-			// post-deferreturn code can copy the return value back to the stack.
-			// See issue 16095.
-			heapaddr.setIsOutputParamHeapAddr(true)
-		}
-		n.Name.Param.Stackcopy = stackcopy
-
-		// Substitute the stackcopy into the function variable list so that
-		// liveness and other analyses use the underlying stack slot
-		// and not the now-pseudo-variable n.
-		found := false
-		for i, d := range Curfn.Func.Dcl {
-			if d == n {
-				Curfn.Func.Dcl[i] = stackcopy
-				found = true
-				break
-			}
-			// Parameters are before locals, so can stop early.
-			// This limits the search even in functions with many local variables.
-			if d.Class == PAUTO {
-				break
-			}
-		}
-		if !found {
-			Fatalf("cannot find %v in local variable list", n)
-		}
-		Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
-	}
-
-	// Modify n in place so that uses of n now mean indirection of the heapaddr.
-	n.Class = PAUTOHEAP
-	n.Ullman = 2
-	n.Xoffset = 0
-	n.Name.Heapaddr = heapaddr
-	n.Esc = EscHeap
-	if Debug['m'] != 0 {
-		fmt.Printf("%v: moved to heap: %v\n", n.Line(), n)
-	}
-}
-
-// make a new Node off the books
-func tempname(nn *Node, t *Type) {
-	if Curfn == nil {
-		Fatalf("no curfn for tempname")
-	}
-	if Curfn.Func.Closure != nil && Curfn.Op == OCLOSURE {
-		Dump("tempname", Curfn)
-		Fatalf("adding tempname to wrong closure function")
-	}
-
-	if t == nil {
-		yyerror("tempname called with nil type")
-		t = Types[TINT32]
-	}
-
-	// give each tmp a different name so that there
-	// a chance to registerizer them.
-	// Add a preceding . to avoid clash with legal names.
-	s := lookupN(".autotmp_", statuniqgen)
-	statuniqgen++
-	n := nod(ONAME, nil, nil)
-	n.Sym = s
-	s.Def = n
-	n.Type = t
-	n.Class = PAUTO
-	n.Addable = true
-	n.Ullman = 1
-	n.Esc = EscNever
-	n.Name.Curfn = Curfn
-	n.Name.AutoTemp = true
-	Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
-
-	dowidth(t)
-	n.Xoffset = 0
-	*nn = *n
-}
-
-func temp(t *Type) *Node {
-	var n Node
-	tempname(&n, t)
-	n.Sym.Def.Used = true
-	return n.Orig
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/global_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/global_test.go
deleted file mode 100644
index ae86ded..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/global_test.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/global_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/global_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"bytes"
-	"internal/testenv"
-	"io/ioutil"
-	"log"
-	"os"
-	"os/exec"
-	"path/filepath"
-	"strings"
-	"testing"
-)
-
-// Make sure "hello world" does not link in all the
-// fmt.scanf routines. See issue 6853.
-func TestScanfRemoval(t *testing.T) {
-	testenv.MustHaveGoBuild(t)
-
-	// Make a directory to work in.
-	dir, err := ioutil.TempDir("", "issue6853a-")
-	if err != nil {
-		log.Fatalf("could not create directory: %v", err)
-	}
-	defer os.RemoveAll(dir)
-
-	// Create source.
-	src := filepath.Join(dir, "test.go")
-	f, err := os.Create(src)
-	if err != nil {
-		log.Fatalf("could not create source file: %v", err)
-	}
-	f.Write([]byte(`
-package main
-import "fmt"
-func main() {
-	fmt.Println("hello world")
-}
-`))
-	f.Close()
-
-	// Name of destination.
-	dst := filepath.Join(dir, "test")
-
-	// Compile source.
-	cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", dst, src)
-	out, err := cmd.CombinedOutput()
-	if err != nil {
-		log.Fatalf("could not build target: %v", err)
-	}
-
-	// Check destination to see if scanf code was included.
-	cmd = exec.Command(testenv.GoToolPath(t), "tool", "nm", dst)
-	out, err = cmd.CombinedOutput()
-	if err != nil {
-		log.Fatalf("could not read target: %v", err)
-	}
-	if bytes.Contains(out, []byte("scanInt")) {
-		log.Fatalf("scanf code not removed from helloworld")
-	}
-}
-
-// Make sure -S prints assembly code. See issue 14515.
-func TestDashS(t *testing.T) {
-	testenv.MustHaveGoBuild(t)
-
-	// Make a directory to work in.
-	dir, err := ioutil.TempDir("", "issue14515-")
-	if err != nil {
-		log.Fatalf("could not create directory: %v", err)
-	}
-	defer os.RemoveAll(dir)
-
-	// Create source.
-	src := filepath.Join(dir, "test.go")
-	f, err := os.Create(src)
-	if err != nil {
-		log.Fatalf("could not create source file: %v", err)
-	}
-	f.Write([]byte(`
-package main
-import "fmt"
-func main() {
-	fmt.Println("hello world")
-}
-`))
-	f.Close()
-
-	// Compile source.
-	cmd := exec.Command(testenv.GoToolPath(t), "build", "-gcflags", "-S", "-o", filepath.Join(dir, "test"), src)
-	out, err := cmd.CombinedOutput()
-	if err != nil {
-		log.Fatalf("could not build target: %v", err)
-	}
-
-	patterns := []string{
-		// It is hard to look for actual instructions in an
-		// arch-independent way. So we'll just look for
-		// pseudo-ops that are arch-independent.
-		"\tTEXT\t",
-		"\tFUNCDATA\t",
-		"\tPCDATA\t",
-	}
-	outstr := string(out)
-	for _, p := range patterns {
-		if !strings.Contains(outstr, p) {
-			println(outstr)
-			panic("can't find pattern " + p)
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/go.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/go.go
deleted file mode 100644
index f221cc5..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/go.go
+++ /dev/null
@@ -1,387 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/go.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/go.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"bootstrap/cmd/compile/internal/ssa"
-	"bootstrap/cmd/internal/bio"
-	"bootstrap/cmd/internal/obj"
-)
-
-const (
-	UINF            = 100
-	BADWIDTH        = -1000000000
-	MaxStackVarSize = 10 * 1024 * 1024
-)
-
-type Pkg struct {
-	Name     string // package name, e.g. "sys"
-	Path     string // string literal used in import statement, e.g. "runtime/internal/sys"
-	Pathsym  *obj.LSym
-	Prefix   string // escaped path for use in symbol table
-	Imported bool   // export data of this package was parsed
-	Direct   bool   // imported directly
-	Syms     map[string]*Sym
-}
-
-// Sym represents an object name. Most commonly, this is a Go identifier naming
-// an object declared within a package, but Syms are also used to name internal
-// synthesized objects.
-//
-// As an exception, field and method names that are exported use the Sym
-// associated with localpkg instead of the package that declared them. This
-// allows using Sym pointer equality to test for Go identifier uniqueness when
-// handling selector expressions.
-type Sym struct {
-	Flags     SymFlags
-	Link      *Sym
-	Importdef *Pkg   // where imported definition was found
-	Linkname  string // link name
-
-	// saved and restored by dcopy
-	Pkg        *Pkg
-	Name       string // object name
-	Def        *Node  // definition: ONAME OTYPE OPACK or OLITERAL
-	Block      int32  // blocknumber to catch redeclaration
-	Lastlineno int32  // last declaration for diagnostic
-
-	Label   *Node // corresponding label (ephemeral)
-	Origpkg *Pkg  // original package for . import
-	Lsym    *obj.LSym
-	Fsym    *Sym // funcsym
-}
-
-type SymFlags uint8
-
-const (
-	SymExport SymFlags = 1 << iota // to be exported
-	SymPackage
-	SymExported // already written out by export
-	SymUniq
-	SymSiggen
-	SymAsm
-	SymAlgGen
-	SymAlias // alias, original is Sym.Def.Sym
-)
-
-// The Class of a variable/function describes the "storage class"
-// of a variable or function. During parsing, storage classes are
-// called declaration contexts.
-type Class uint8
-
-const (
-	Pxxx      Class = iota
-	PEXTERN         // global variable
-	PAUTO           // local variables
-	PAUTOHEAP       // local variable or parameter moved to heap
-	PPARAM          // input arguments
-	PPARAMOUT       // output results
-	PFUNC           // global function
-
-	PDISCARD // discard during parse of duplicate import
-)
-
-// note this is the runtime representation
-// of the compilers arrays.
-//
-// typedef	struct
-// {					// must not move anything
-// 	uchar	array[8];	// pointer to data
-// 	uchar	nel[4];		// number of elements
-// 	uchar	cap[4];		// allocated number of elements
-// } Array;
-var array_array int // runtime offsetof(Array,array) - same for String
-
-var array_nel int // runtime offsetof(Array,nel) - same for String
-
-var array_cap int // runtime offsetof(Array,cap)
-
-var sizeof_Array int // runtime sizeof(Array)
-
-// note this is the runtime representation
-// of the compilers strings.
-//
-// typedef	struct
-// {					// must not move anything
-// 	uchar	array[8];	// pointer to data
-// 	uchar	nel[4];		// number of elements
-// } String;
-var sizeof_String int // runtime sizeof(String)
-
-var pragcgobuf string
-
-var infile string
-
-var outfile string
-var linkobj string
-
-var bout *bio.Writer
-
-// nerrors is the number of compiler errors reported
-// since the last call to saveerrors.
-var nerrors int
-
-// nsavederrors is the total number of compiler errors
-// reported before the last call to saveerrors.
-var nsavederrors int
-
-var nsyntaxerrors int
-
-var decldepth int32
-
-var safemode bool
-
-var nolocalimports bool
-
-var Debug [256]int
-
-var debugstr string
-
-var Debug_checknil int
-var Debug_typeassert int
-
-var localpkg *Pkg // package being compiled
-
-var importpkg *Pkg // package being imported
-
-var itabpkg *Pkg // fake pkg for itab entries
-
-var itablinkpkg *Pkg // fake package for runtime itab entries
-
-var Runtimepkg *Pkg // package runtime
-
-var racepkg *Pkg // package runtime/race
-
-var msanpkg *Pkg // package runtime/msan
-
-var typepkg *Pkg // fake package for runtime type info (headers)
-
-var unsafepkg *Pkg // package unsafe
-
-var trackpkg *Pkg // fake package for field tracking
-
-var mappkg *Pkg // fake package for map zero value
-var zerosize int64
-
-var Tptr EType // either TPTR32 or TPTR64
-
-var myimportpath string
-
-var localimport string
-
-var asmhdr string
-
-var simtype [NTYPE]EType
-
-var (
-	isforw    [NTYPE]bool
-	isInt     [NTYPE]bool
-	isFloat   [NTYPE]bool
-	isComplex [NTYPE]bool
-	issimple  [NTYPE]bool
-)
-
-var (
-	okforeq    [NTYPE]bool
-	okforadd   [NTYPE]bool
-	okforand   [NTYPE]bool
-	okfornone  [NTYPE]bool
-	okforcmp   [NTYPE]bool
-	okforbool  [NTYPE]bool
-	okforcap   [NTYPE]bool
-	okforlen   [NTYPE]bool
-	okforarith [NTYPE]bool
-	okforconst [NTYPE]bool
-)
-
-var (
-	okfor [OEND][]bool
-	iscmp [OEND]bool
-)
-
-var minintval [NTYPE]*Mpint
-
-var maxintval [NTYPE]*Mpint
-
-var minfltval [NTYPE]*Mpflt
-
-var maxfltval [NTYPE]*Mpflt
-
-var xtop []*Node
-
-var exportlist []*Node
-
-var importlist []*Node // imported functions and methods with inlinable bodies
-
-var funcsyms []*Node
-
-var dclcontext Class // PEXTERN/PAUTO
-
-var statuniqgen int // name generator for static temps
-
-var iota_ int64
-
-var lastconst []*Node
-
-var lasttype *Node
-
-var Maxarg int64
-
-var Stksize int64 // stack size for current frame
-
-var stkptrsize int64 // prefix of stack containing pointers
-
-var hasdefer bool // flag that curfn has defer statement
-
-var Curfn *Node
-
-var Widthptr int
-
-var Widthint int
-
-var Widthreg int
-
-var nblank *Node
-
-var typecheckok bool
-
-var compiling_runtime bool
-
-var compiling_wrappers int
-
-var use_writebarrier bool
-
-var pure_go bool
-
-var flag_installsuffix string
-
-var flag_race bool
-
-var flag_msan bool
-
-var flag_largemodel bool
-
-// Whether we are adding any sort of code instrumentation, such as
-// when the race detector is enabled.
-var instrumenting bool
-
-var debuglive int
-
-var Ctxt *obj.Link
-
-var writearchive bool
-
-var Nacl bool
-
-var pc *obj.Prog
-
-var nodfp *Node
-
-var disable_checknil int
-
-// interface to back end
-
-const (
-	// Pseudo-op, like TEXT, GLOBL, TYPE, PCDATA, FUNCDATA.
-	Pseudo = 1 << 1
-
-	// There's nothing to say about the instruction,
-	// but it's still okay to see.
-	OK = 1 << 2
-
-	// Size of right-side write, or right-side read if no write.
-	SizeB = 1 << 3
-	SizeW = 1 << 4
-	SizeL = 1 << 5
-	SizeQ = 1 << 6
-	SizeF = 1 << 7
-	SizeD = 1 << 8
-
-	// Left side (Prog.from): address taken, read, write.
-	LeftAddr  = 1 << 9
-	LeftRead  = 1 << 10
-	LeftWrite = 1 << 11
-
-	// Register in middle (Prog.reg); only ever read. (arm, ppc64)
-	RegRead    = 1 << 12
-	CanRegRead = 1 << 13
-
-	// Right side (Prog.to): address taken, read, write.
-	RightAddr  = 1 << 14
-	RightRead  = 1 << 15
-	RightWrite = 1 << 16
-
-	// Instruction kinds
-	Move  = 1 << 17 // straight move
-	Conv  = 1 << 18 // size conversion
-	Cjmp  = 1 << 19 // conditional jump
-	Break = 1 << 20 // breaks control flow (no fallthrough)
-	Call  = 1 << 21 // function call
-	Jump  = 1 << 22 // jump
-	Skip  = 1 << 23 // data instruction
-
-	// Set, use, or kill of carry bit.
-	// Kill means we never look at the carry bit after this kind of instruction.
-	// Originally for understanding ADC, RCR, and so on, but now also
-	// tracks set, use, and kill of the zero and overflow bits as well.
-	// TODO rename to {Set,Use,Kill}Flags
-	SetCarry  = 1 << 24
-	UseCarry  = 1 << 25
-	KillCarry = 1 << 26
-
-	// Special cases for register use. (amd64, 386)
-	ShiftCX  = 1 << 27 // possible shift by CX
-	ImulAXDX = 1 << 28 // possible multiply into DX:AX
-
-	// Instruction updates whichever of from/to is type D_OREG. (ppc64)
-	PostInc = 1 << 29
-
-	// Optional 3rd input operand, only ever read.
-	From3Read = 1 << 30
-)
-
-type Arch struct {
-	LinkArch *obj.LinkArch
-
-	REGSP    int
-	MAXWIDTH int64
-
-	Defframe func(*obj.Prog)
-	Proginfo func(*obj.Prog) ProgInfo
-	Use387   bool // should 8g use 387 FP instructions instead of sse2.
-
-	// SSAMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
-	SSAMarkMoves func(*SSAGenState, *ssa.Block)
-
-	// SSAGenValue emits Prog(s) for the Value.
-	SSAGenValue func(*SSAGenState, *ssa.Value)
-
-	// SSAGenBlock emits end-of-block Progs. SSAGenValue should be called
-	// for all values in the block before SSAGenBlock.
-	SSAGenBlock func(s *SSAGenState, b, next *ssa.Block)
-}
-
-var pcloc int32
-
-var Thearch Arch
-
-var (
-	Newproc,
-	Deferproc,
-	Deferreturn,
-	panicindex,
-	panicslice,
-	panicdivide,
-	growslice,
-	panicdottype,
-	panicnildottype,
-	assertE2I,
-	assertE2I2,
-	assertI2I,
-	assertI2I2 *Node
-)
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/gsubr.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/gsubr.go
deleted file mode 100644
index 9ff6ac8..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/gsubr.go
+++ /dev/null
@@ -1,315 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/gsubr.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/gsubr.go:1
-// Derived from Inferno utils/6c/txt.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6c/txt.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package gc
-
-import "bootstrap/cmd/internal/obj"
-
-func Prog(as obj.As) *obj.Prog {
-	var p *obj.Prog
-
-	p = pc
-	pc = Ctxt.NewProg()
-	Clearp(pc)
-	p.Link = pc
-
-	if lineno == 0 && Debug['K'] != 0 {
-		Warn("prog: line 0")
-	}
-
-	p.As = as
-	p.Lineno = lineno
-	return p
-}
-
-func Clearp(p *obj.Prog) {
-	obj.Nopout(p)
-	p.As = obj.AEND
-	p.Pc = int64(pcloc)
-	pcloc++
-}
-
-func Appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16, foffset int64, ttype obj.AddrType, treg int16, toffset int64) *obj.Prog {
-	q := Ctxt.NewProg()
-	Clearp(q)
-	q.As = as
-	q.Lineno = p.Lineno
-	q.From.Type = ftype
-	q.From.Reg = freg
-	q.From.Offset = foffset
-	q.To.Type = ttype
-	q.To.Reg = treg
-	q.To.Offset = toffset
-	q.Link = p.Link
-	p.Link = q
-	return q
-}
-
-func ggloblnod(nam *Node) {
-	s := Linksym(nam.Sym)
-	s.Gotype = Linksym(ngotype(nam))
-	flags := 0
-	if nam.Name.Readonly {
-		flags = obj.RODATA
-	}
-	if nam.Type != nil && !haspointers(nam.Type) {
-		flags |= obj.NOPTR
-	}
-	Ctxt.Globl(s, nam.Type.Width, flags)
-}
-
-func ggloblsym(s *Sym, width int32, flags int16) {
-	ggloblLSym(Linksym(s), width, flags)
-}
-
-func ggloblLSym(s *obj.LSym, width int32, flags int16) {
-	if flags&obj.LOCAL != 0 {
-		s.Set(obj.AttrLocal, true)
-		flags &^= obj.LOCAL
-	}
-	Ctxt.Globl(s, int64(width), int(flags))
-}
-
-func gtrack(s *Sym) {
-	p := Gins(obj.AUSEFIELD, nil, nil)
-	p.From.Type = obj.TYPE_MEM
-	p.From.Name = obj.NAME_EXTERN
-	p.From.Sym = Linksym(s)
-}
-
-func isfat(t *Type) bool {
-	if t != nil {
-		switch t.Etype {
-		case TSTRUCT, TARRAY, TSLICE, TSTRING,
-			TINTER: // maybe remove later
-			return true
-		}
-	}
-
-	return false
-}
-
-// Naddr rewrites a to refer to n.
-// It assumes that a is zeroed on entry.
-func Naddr(a *obj.Addr, n *Node) {
-	if n == nil {
-		return
-	}
-
-	if n.Op != ONAME {
-		Debug['h'] = 1
-		Dump("naddr", n)
-		Fatalf("naddr: bad %v %v", n.Op, Ctxt.Dconv(a))
-	}
-
-	a.Offset = n.Xoffset
-	s := n.Sym
-	a.Node = n.Orig
-
-	if s == nil {
-		Fatalf("naddr: nil sym %v", n)
-	}
-
-	a.Type = obj.TYPE_MEM
-	switch n.Class {
-	default:
-		Fatalf("naddr: ONAME class %v %d\n", n.Sym, n.Class)
-
-	case PEXTERN, PFUNC:
-		a.Name = obj.NAME_EXTERN
-
-	case PAUTO:
-		a.Name = obj.NAME_AUTO
-
-	case PPARAM, PPARAMOUT:
-		a.Name = obj.NAME_PARAM
-	}
-
-	a.Sym = Linksym(s)
-}
-
-func Addrconst(a *obj.Addr, v int64) {
-	a.Sym = nil
-	a.Type = obj.TYPE_CONST
-	a.Offset = v
-}
-
-func newplist() *obj.Plist {
-	pl := obj.Linknewplist(Ctxt)
-
-	pc = Ctxt.NewProg()
-	Clearp(pc)
-	pl.Firstpc = pc
-
-	return pl
-}
-
-// nodarg returns a Node for the function argument denoted by t,
-// which is either the entire function argument or result struct (t is a  struct *Type)
-// or a specific argument (t is a *Field within a struct *Type).
-//
-// If fp is 0, the node is for use by a caller invoking the given
-// function, preparing the arguments before the call
-// or retrieving the results after the call.
-// In this case, the node will correspond to an outgoing argument
-// slot like 8(SP).
-//
-// If fp is 1, the node is for use by the function itself
-// (the callee), to retrieve its arguments or write its results.
-// In this case the node will be an ONAME with an appropriate
-// type and offset.
-func nodarg(t interface{}, fp int) *Node {
-	var n *Node
-
-	var funarg Funarg
-	switch t := t.(type) {
-	default:
-		Fatalf("bad nodarg %T(%v)", t, t)
-
-	case *Type:
-		// Entire argument struct, not just one arg
-		if !t.IsFuncArgStruct() {
-			Fatalf("nodarg: bad type %v", t)
-		}
-		funarg = t.StructType().Funarg
-
-		// Build fake variable name for whole arg struct.
-		n = nod(ONAME, nil, nil)
-		n.Sym = lookup(".args")
-		n.Type = t
-		first := t.Field(0)
-		if first == nil {
-			Fatalf("nodarg: bad struct")
-		}
-		if first.Offset == BADWIDTH {
-			Fatalf("nodarg: offset not computed for %v", t)
-		}
-		n.Xoffset = first.Offset
-		n.Addable = true
-
-	case *Field:
-		funarg = t.Funarg
-		if fp == 1 {
-			// NOTE(rsc): This should be using t.Nname directly,
-			// except in the case where t.Nname.Sym is the blank symbol and
-			// so the assignment would be discarded during code generation.
-			// In that case we need to make a new node, and there is no harm
-			// in optimization passes to doing so. But otherwise we should
-			// definitely be using the actual declaration and not a newly built node.
-			// The extra Fatalf checks here are verifying that this is the case,
-			// without changing the actual logic (at time of writing, it's getting
-			// toward time for the Go 1.7 beta).
-			// At some quieter time (assuming we've never seen these Fatalfs happen)
-			// we could change this code to use "expect" directly.
-			expect := t.Nname
-			if expect.isParamHeapCopy() {
-				expect = expect.Name.Param.Stackcopy
-			}
-
-			for _, n := range Curfn.Func.Dcl {
-				if (n.Class == PPARAM || n.Class == PPARAMOUT) && !isblanksym(t.Sym) && n.Sym == t.Sym {
-					if n != expect {
-						Fatalf("nodarg: unexpected node: %v (%p %v) vs %v (%p %v)", n, n, n.Op, t.Nname, t.Nname, t.Nname.Op)
-					}
-					return n
-				}
-			}
-
-			if !isblanksym(expect.Sym) {
-				Fatalf("nodarg: did not find node in dcl list: %v", expect)
-			}
-		}
-
-		// Build fake name for individual variable.
-		// This is safe because if there was a real declared name
-		// we'd have used it above.
-		n = nod(ONAME, nil, nil)
-		n.Type = t.Type
-		n.Sym = t.Sym
-		if t.Offset == BADWIDTH {
-			Fatalf("nodarg: offset not computed for %v", t)
-		}
-		n.Xoffset = t.Offset
-		n.Addable = true
-		n.Orig = t.Nname
-	}
-
-	// Rewrite argument named _ to __,
-	// or else the assignment to _ will be
-	// discarded during code generation.
-	if isblank(n) {
-		n.Sym = lookup("__")
-	}
-
-	switch fp {
-	default:
-		Fatalf("bad fp")
-
-	case 0: // preparing arguments for call
-		n.Op = OINDREGSP
-		n.Xoffset += Ctxt.FixedFrameSize()
-
-	case 1: // reading arguments inside call
-		n.Class = PPARAM
-		if funarg == FunargResults {
-			n.Class = PPARAMOUT
-		}
-	}
-
-	n.Typecheck = 1
-	n.Addrtaken = true // keep optimizers at bay
-	return n
-}
-
-func Patch(p *obj.Prog, to *obj.Prog) {
-	if p.To.Type != obj.TYPE_BRANCH {
-		Fatalf("patch: not a branch")
-	}
-	p.To.Val = to
-	p.To.Offset = to.Pc
-}
-
-// Gins inserts instruction as. f is from, t is to.
-func Gins(as obj.As, f, t *Node) *obj.Prog {
-	switch as {
-	case obj.AVARKILL, obj.AVARLIVE, obj.AVARDEF, obj.ATYPE,
-		obj.ATEXT, obj.AFUNCDATA, obj.AUSEFIELD:
-	default:
-		Fatalf("unhandled gins op %v", as)
-	}
-
-	p := Prog(as)
-	Naddr(&p.From, f)
-	Naddr(&p.To, t)
-	return p
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/iface_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/iface_test.go
deleted file mode 100644
index 978a5ba..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/iface_test.go
+++ /dev/null
@@ -1,131 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/iface_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/iface_test.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-// Test to make sure we make copies of the values we
-// put in interfaces.
-
-import (
-	"testing"
-)
-
-var x int
-
-func TestEfaceConv1(t *testing.T) {
-	a := 5
-	i := interface{}(a)
-	a += 2
-	if got := i.(int); got != 5 {
-		t.Errorf("wanted 5, got %d\n", got)
-	}
-}
-
-func TestEfaceConv2(t *testing.T) {
-	a := 5
-	sink = &a
-	i := interface{}(a)
-	a += 2
-	if got := i.(int); got != 5 {
-		t.Errorf("wanted 5, got %d\n", got)
-	}
-}
-
-func TestEfaceConv3(t *testing.T) {
-	x = 5
-	if got := e2int3(x); got != 5 {
-		t.Errorf("wanted 5, got %d\n", got)
-	}
-}
-
-//go:noinline
-func e2int3(i interface{}) int {
-	x = 7
-	return i.(int)
-}
-
-func TestEfaceConv4(t *testing.T) {
-	a := 5
-	if got := e2int4(a, &a); got != 5 {
-		t.Errorf("wanted 5, got %d\n", got)
-	}
-}
-
-//go:noinline
-func e2int4(i interface{}, p *int) int {
-	*p = 7
-	return i.(int)
-}
-
-type Int int
-
-var y Int
-
-type I interface {
-	foo()
-}
-
-func (i Int) foo() {
-}
-
-func TestIfaceConv1(t *testing.T) {
-	a := Int(5)
-	i := interface{}(a)
-	a += 2
-	if got := i.(Int); got != 5 {
-		t.Errorf("wanted 5, got %d\n", int(got))
-	}
-}
-
-func TestIfaceConv2(t *testing.T) {
-	a := Int(5)
-	sink = &a
-	i := interface{}(a)
-	a += 2
-	if got := i.(Int); got != 5 {
-		t.Errorf("wanted 5, got %d\n", int(got))
-	}
-}
-
-func TestIfaceConv3(t *testing.T) {
-	y = 5
-	if got := i2Int3(y); got != 5 {
-		t.Errorf("wanted 5, got %d\n", int(got))
-	}
-}
-
-//go:noinline
-func i2Int3(i I) Int {
-	y = 7
-	return i.(Int)
-}
-
-func TestIfaceConv4(t *testing.T) {
-	a := Int(5)
-	if got := i2Int4(a, &a); got != 5 {
-		t.Errorf("wanted 5, got %d\n", int(got))
-	}
-}
-
-//go:noinline
-func i2Int4(i I, p *Int) Int {
-	*p = 7
-	return i.(Int)
-}
-
-func BenchmarkEfaceInteger(b *testing.B) {
-	sum := 0
-	for i := 0; i < b.N; i++ {
-		sum += i2int(i)
-	}
-	sink = sum
-}
-
-//go:noinline
-func i2int(i interface{}) int {
-	return i.(int)
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/init.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/init.go
deleted file mode 100644
index 1b7a73c..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/init.go
+++ /dev/null
@@ -1,168 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/init.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/init.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-// a function named init is a special case.
-// it is called by the initialization before
-// main is run. to make it unique within a
-// package and also uncallable, the name,
-// normally "pkg.init", is altered to "pkg.init.1".
-
-var renameinit_initgen int
-
-func renameinit() *Sym {
-	renameinit_initgen++
-	return lookupN("init.", renameinit_initgen)
-}
-
-// hand-craft the following initialization code
-//      var initdone· uint8                             (1)
-//      func init() {                                   (2)
-//              if initdone· > 1 {                      (3)
-//                      return                          (3a)
-//              }
-//              if initdone· == 1 {                     (4)
-//                      throw()                         (4a)
-//              }
-//              initdone· = 1                           (5)
-//              // over all matching imported symbols
-//                      <pkg>.init()                    (6)
-//              { <init stmts> }                        (7)
-//              init.<n>() // if any                    (8)
-//              initdone· = 2                           (9)
-//              return                                  (10)
-//      }
-func anyinit(n []*Node) bool {
-	// are there any interesting init statements
-	for _, ln := range n {
-		switch ln.Op {
-		case ODCLFUNC, ODCLCONST, ODCLTYPE, OEMPTY:
-			break
-
-		case OAS, OASWB:
-			if isblank(ln.Left) && candiscard(ln.Right) {
-				break
-			}
-			fallthrough
-		default:
-			return true
-		}
-	}
-
-	// is this main
-	if localpkg.Name == "main" {
-		return true
-	}
-
-	// is there an explicit init function
-	s := lookup("init.1")
-
-	if s.Def != nil {
-		return true
-	}
-
-	// are there any imported init functions
-	for _, s := range initSyms {
-		if s.Def != nil {
-			return true
-		}
-	}
-
-	// then none
-	return false
-}
-
-func fninit(n []*Node) {
-	nf := initfix(n)
-	if !anyinit(nf) {
-		return
-	}
-
-	var r []*Node
-
-	// (1)
-	gatevar := newname(lookup("initdone·"))
-	addvar(gatevar, Types[TUINT8], PEXTERN)
-
-	// (2)
-	Maxarg = 0
-
-	fn := nod(ODCLFUNC, nil, nil)
-	initsym := lookup("init")
-	fn.Func.Nname = newname(initsym)
-	fn.Func.Nname.Name.Defn = fn
-	fn.Func.Nname.Name.Param.Ntype = nod(OTFUNC, nil, nil)
-	declare(fn.Func.Nname, PFUNC)
-	funchdr(fn)
-
-	// (3)
-	a := nod(OIF, nil, nil)
-	a.Left = nod(OGT, gatevar, nodintconst(1))
-	a.Likely = 1
-	r = append(r, a)
-	// (3a)
-	a.Nbody.Set1(nod(ORETURN, nil, nil))
-
-	// (4)
-	b := nod(OIF, nil, nil)
-	b.Left = nod(OEQ, gatevar, nodintconst(1))
-	// this actually isn't likely, but code layout is better
-	// like this: no JMP needed after the call.
-	b.Likely = 1
-	r = append(r, b)
-	// (4a)
-	b.Nbody.Set1(nod(OCALL, syslook("throwinit"), nil))
-
-	// (5)
-	a = nod(OAS, gatevar, nodintconst(1))
-
-	r = append(r, a)
-
-	// (6)
-	for _, s := range initSyms {
-		if s.Def != nil && s != initsym {
-			// could check that it is fn of no args/returns
-			a = nod(OCALL, s.Def, nil)
-			r = append(r, a)
-		}
-	}
-
-	// (7)
-	r = append(r, nf...)
-
-	// (8)
-	// could check that it is fn of no args/returns
-	for i := 1; ; i++ {
-		s := lookupN("init.", i)
-		if s.Def == nil {
-			break
-		}
-		a = nod(OCALL, s.Def, nil)
-		r = append(r, a)
-	}
-
-	// (9)
-	a = nod(OAS, gatevar, nodintconst(2))
-
-	r = append(r, a)
-
-	// (10)
-	a = nod(ORETURN, nil, nil)
-
-	r = append(r, a)
-	exportsym(fn.Func.Nname)
-
-	fn.Nbody.Set(r)
-	funcbody(fn)
-
-	Curfn = fn
-	fn = typecheck(fn, Etop)
-	typecheckslice(r, Etop)
-	Curfn = nil
-	funccompile(fn)
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/inl.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/inl.go
deleted file mode 100644
index 958a7b3..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/inl.go
+++ /dev/null
@@ -1,1046 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/inl.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/inl.go:1
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-//
-// The inlining facility makes 2 passes: first caninl determines which
-// functions are suitable for inlining, and for those that are it
-// saves a copy of the body. Then inlcalls walks each function body to
-// expand calls to inlinable functions.
-//
-// The debug['l'] flag controls the aggressiveness. Note that main() swaps level 0 and 1,
-// making 1 the default and -l disable.  -ll and more is useful to flush out bugs.
-// These additional levels (beyond -l) may be buggy and are not supported.
-//      0: disabled
-//      1: 40-nodes leaf functions, oneliners, lazy typechecking (default)
-//      2: early typechecking of all imported bodies
-//      3: allow variadic functions
-//      4: allow non-leaf functions , (breaks runtime.Caller)
-//
-//  At some point this may get another default and become switch-offable with -N.
-//
-//  The debug['m'] flag enables diagnostic output.  a single -m is useful for verifying
-//  which calls get inlined or not, more is for debugging, and may go away at any point.
-//
-// TODO:
-//   - inline functions with ... args
-//   - handle T.meth(f()) with func f() (t T, arg, arg, )
-
-package gc
-
-import "fmt"
-
-// Get the function's package. For ordinary functions it's on the ->sym, but for imported methods
-// the ->sym can be re-used in the local package, so peel it off the receiver's type.
-func fnpkg(fn *Node) *Pkg {
-	if fn.IsMethod() {
-		// method
-		rcvr := fn.Type.Recv().Type
-
-		if rcvr.IsPtr() {
-			rcvr = rcvr.Elem()
-		}
-		if rcvr.Sym == nil {
-			Fatalf("receiver with no sym: [%v] %L  (%v)", fn.Sym, fn, rcvr)
-		}
-		return rcvr.Sym.Pkg
-	}
-
-	// non-method
-	return fn.Sym.Pkg
-}
-
-// Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck
-// because they're a copy of an already checked body.
-func typecheckinl(fn *Node) {
-	lno := setlineno(fn)
-
-	// typecheckinl is only for imported functions;
-	// their bodies may refer to unsafe as long as the package
-	// was marked safe during import (which was checked then).
-	// the ->inl of a local function has been typechecked before caninl copied it.
-	pkg := fnpkg(fn)
-
-	if pkg == localpkg || pkg == nil {
-		return // typecheckinl on local function
-	}
-
-	if Debug['m'] > 2 || Debug_export != 0 {
-		fmt.Printf("typecheck import [%v] %L { %#v }\n", fn.Sym, fn, fn.Func.Inl)
-	}
-
-	save_safemode := safemode
-	safemode = false
-
-	savefn := Curfn
-	Curfn = fn
-	typecheckslice(fn.Func.Inl.Slice(), Etop)
-	Curfn = savefn
-
-	safemode = save_safemode
-
-	lineno = lno
-}
-
-// Caninl determines whether fn is inlineable.
-// If so, caninl saves fn->nbody in fn->inl and substitutes it with a copy.
-// fn and ->nbody will already have been typechecked.
-func caninl(fn *Node) {
-	if fn.Op != ODCLFUNC {
-		Fatalf("caninl %v", fn)
-	}
-	if fn.Func.Nname == nil {
-		Fatalf("caninl no nname %+v", fn)
-	}
-
-	var reason string // reason, if any, that the function was not inlined
-	if Debug['m'] > 1 {
-		defer func() {
-			if reason != "" {
-				fmt.Printf("%v: cannot inline %v: %s\n", fn.Line(), fn.Func.Nname, reason)
-			}
-		}()
-	}
-
-	// If marked "go:noinline", don't inline
-	if fn.Func.Pragma&Noinline != 0 {
-		reason = "marked go:noinline"
-		return
-	}
-
-	// If marked "go:cgo_unsafe_args", don't inline
-	if fn.Func.Pragma&CgoUnsafeArgs != 0 {
-		reason = "marked go:cgo_unsafe_args"
-		return
-	}
-
-	// If fn has no body (is defined outside of Go), cannot inline it.
-	if fn.Nbody.Len() == 0 {
-		reason = "no function body"
-		return
-	}
-
-	if fn.Typecheck == 0 {
-		Fatalf("caninl on non-typechecked function %v", fn)
-	}
-
-	// can't handle ... args yet
-	if Debug['l'] < 3 {
-		f := fn.Type.Params().Fields()
-		if len := f.Len(); len > 0 {
-			if t := f.Index(len - 1); t.Isddd {
-				reason = "has ... args"
-				return
-			}
-		}
-	}
-
-	// Runtime package must not be instrumented.
-	// Instrument skips runtime package. However, some runtime code can be
-	// inlined into other packages and instrumented there. To avoid this,
-	// we disable inlining of runtime functions when instrumenting.
-	// The example that we observed is inlining of LockOSThread,
-	// which lead to false race reports on m contents.
-	if instrumenting && myimportpath == "runtime" {
-		reason = "instrumenting and is runtime function"
-		return
-	}
-
-	const maxBudget = 80
-	budget := int32(maxBudget) // allowed hairyness
-	if ishairylist(fn.Nbody, &budget, &reason) {
-		return
-	}
-	if budget < 0 {
-		reason = "function too complex"
-		return
-	}
-
-	savefn := Curfn
-	Curfn = fn
-
-	n := fn.Func.Nname
-
-	n.Func.Inl.Set(fn.Nbody.Slice())
-	fn.Nbody.Set(inlcopylist(n.Func.Inl.Slice()))
-	inldcl := inlcopylist(n.Name.Defn.Func.Dcl)
-	n.Func.Inldcl.Set(inldcl)
-	n.Func.InlCost = maxBudget - budget
-
-	// hack, TODO, check for better way to link method nodes back to the thing with the ->inl
-	// this is so export can find the body of a method
-	fn.Type.SetNname(n)
-
-	if Debug['m'] > 1 {
-		fmt.Printf("%v: can inline %#v as: %#v { %#v }\n", fn.Line(), n, fn.Type, n.Func.Inl)
-	} else if Debug['m'] != 0 {
-		fmt.Printf("%v: can inline %v\n", fn.Line(), n)
-	}
-
-	Curfn = savefn
-}
-
-// Look for anything we want to punt on.
-func ishairylist(ll Nodes, budget *int32, reason *string) bool {
-	for _, n := range ll.Slice() {
-		if ishairy(n, budget, reason) {
-			return true
-		}
-	}
-	return false
-}
-
-func ishairy(n *Node, budget *int32, reason *string) bool {
-	if n == nil {
-		return false
-	}
-
-	switch n.Op {
-	// Call is okay if inlinable and we have the budget for the body.
-	case OCALLFUNC:
-		if fn := n.Left.Func; fn != nil && fn.Inl.Len() != 0 {
-			*budget -= fn.InlCost
-			break
-		}
-
-		if n.isMethodCalledAsFunction() {
-			if d := n.Left.Sym.Def; d != nil && d.Func.Inl.Len() != 0 {
-				*budget -= d.Func.InlCost
-				break
-			}
-		}
-		if Debug['l'] < 4 {
-			*reason = "non-leaf function"
-			return true
-		}
-
-	// Call is okay if inlinable and we have the budget for the body.
-	case OCALLMETH:
-		t := n.Left.Type
-		if t == nil {
-			Fatalf("no function type for [%p] %+v\n", n.Left, n.Left)
-		}
-		if t.Nname() == nil {
-			Fatalf("no function definition for [%p] %+v\n", t, t)
-		}
-		if inlfn := t.Nname().Func; inlfn.Inl.Len() != 0 {
-			*budget -= inlfn.InlCost
-			break
-		}
-		if Debug['l'] < 4 {
-			*reason = "non-leaf method"
-			return true
-		}
-
-	// Things that are too hairy, irrespective of the budget
-	case OCALL, OCALLINTER, OPANIC, ORECOVER:
-		if Debug['l'] < 4 {
-			*reason = "non-leaf op " + n.Op.String()
-			return true
-		}
-
-	case OCLOSURE,
-		OCALLPART,
-		ORANGE,
-		OFOR,
-		OSELECT,
-		OTYPESW,
-		OPROC,
-		ODEFER,
-		ODCLTYPE, // can't print yet
-		OBREAK,
-		ORETJMP:
-		*reason = "unhandled op " + n.Op.String()
-		return true
-	}
-
-	(*budget)--
-	// TODO(mdempsky/josharian): Hacks to appease toolstash; remove.
-	// See issue 17566 and CL 31674 for discussion.
-	switch n.Op {
-	case OSTRUCTKEY:
-		(*budget)--
-	case OSLICE, OSLICEARR, OSLICESTR:
-		(*budget)--
-	case OSLICE3, OSLICE3ARR:
-		*budget -= 2
-	}
-
-	return *budget < 0 || ishairy(n.Left, budget, reason) || ishairy(n.Right, budget, reason) ||
-		ishairylist(n.List, budget, reason) || ishairylist(n.Rlist, budget, reason) ||
-		ishairylist(n.Ninit, budget, reason) || ishairylist(n.Nbody, budget, reason)
-}
-
-// Inlcopy and inlcopylist recursively copy the body of a function.
-// Any name-like node of non-local class is marked for re-export by adding it to
-// the exportlist.
-func inlcopylist(ll []*Node) []*Node {
-	s := make([]*Node, 0, len(ll))
-	for _, n := range ll {
-		s = append(s, inlcopy(n))
-	}
-	return s
-}
-
-func inlcopy(n *Node) *Node {
-	if n == nil {
-		return nil
-	}
-
-	switch n.Op {
-	case ONAME, OTYPE, OLITERAL:
-		return n
-	}
-
-	m := *n
-	if m.Func != nil {
-		m.Func.Inl.Set(nil)
-	}
-	m.Left = inlcopy(n.Left)
-	m.Right = inlcopy(n.Right)
-	m.List.Set(inlcopylist(n.List.Slice()))
-	m.Rlist.Set(inlcopylist(n.Rlist.Slice()))
-	m.Ninit.Set(inlcopylist(n.Ninit.Slice()))
-	m.Nbody.Set(inlcopylist(n.Nbody.Slice()))
-
-	return &m
-}
-
-// Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any
-// calls made to inlineable functions. This is the external entry point.
-func inlcalls(fn *Node) {
-	savefn := Curfn
-	Curfn = fn
-	fn = inlnode(fn)
-	if fn != Curfn {
-		Fatalf("inlnode replaced curfn")
-	}
-	Curfn = savefn
-}
-
-// Turn an OINLCALL into a statement.
-func inlconv2stmt(n *Node) {
-	n.Op = OBLOCK
-
-	// n->ninit stays
-	n.List.Set(n.Nbody.Slice())
-
-	n.Nbody.Set(nil)
-	n.Rlist.Set(nil)
-}
-
-// Turn an OINLCALL into a single valued expression.
-// The result of inlconv2expr MUST be assigned back to n, e.g.
-// 	n.Left = inlconv2expr(n.Left)
-func inlconv2expr(n *Node) *Node {
-	r := n.Rlist.First()
-	return addinit(r, append(n.Ninit.Slice(), n.Nbody.Slice()...))
-}
-
-// Turn the rlist (with the return values) of the OINLCALL in
-// n into an expression list lumping the ninit and body
-// containing the inlined statements on the first list element so
-// order will be preserved Used in return, oas2func and call
-// statements.
-func inlconv2list(n *Node) []*Node {
-	if n.Op != OINLCALL || n.Rlist.Len() == 0 {
-		Fatalf("inlconv2list %+v\n", n)
-	}
-
-	s := n.Rlist.Slice()
-	s[0] = addinit(s[0], append(n.Ninit.Slice(), n.Nbody.Slice()...))
-	return s
-}
-
-func inlnodelist(l Nodes) {
-	s := l.Slice()
-	for i := range s {
-		s[i] = inlnode(s[i])
-	}
-}
-
-// inlnode recurses over the tree to find inlineable calls, which will
-// be turned into OINLCALLs by mkinlcall. When the recursion comes
-// back up will examine left, right, list, rlist, ninit, ntest, nincr,
-// nbody and nelse and use one of the 4 inlconv/glue functions above
-// to turn the OINLCALL into an expression, a statement, or patch it
-// in to this nodes list or rlist as appropriate.
-// NOTE it makes no sense to pass the glue functions down the
-// recursion to the level where the OINLCALL gets created because they
-// have to edit /this/ n, so you'd have to push that one down as well,
-// but then you may as well do it here.  so this is cleaner and
-// shorter and less complicated.
-// The result of inlnode MUST be assigned back to n, e.g.
-// 	n.Left = inlnode(n.Left)
-func inlnode(n *Node) *Node {
-	if n == nil {
-		return n
-	}
-
-	switch n.Op {
-	// inhibit inlining of their argument
-	case ODEFER, OPROC:
-		switch n.Left.Op {
-		case OCALLFUNC, OCALLMETH:
-			n.Left.setNoInline(true)
-		}
-		fallthrough
-
-	// TODO do them here (or earlier),
-	// so escape analysis can avoid more heapmoves.
-	case OCLOSURE:
-		return n
-	}
-
-	lno := setlineno(n)
-
-	inlnodelist(n.Ninit)
-	for _, n1 := range n.Ninit.Slice() {
-		if n1.Op == OINLCALL {
-			inlconv2stmt(n1)
-		}
-	}
-
-	n.Left = inlnode(n.Left)
-	if n.Left != nil && n.Left.Op == OINLCALL {
-		n.Left = inlconv2expr(n.Left)
-	}
-
-	n.Right = inlnode(n.Right)
-	if n.Right != nil && n.Right.Op == OINLCALL {
-		if n.Op == OFOR {
-			inlconv2stmt(n.Right)
-		} else {
-			n.Right = inlconv2expr(n.Right)
-		}
-	}
-
-	inlnodelist(n.List)
-	switch n.Op {
-	case OBLOCK:
-		for _, n2 := range n.List.Slice() {
-			if n2.Op == OINLCALL {
-				inlconv2stmt(n2)
-			}
-		}
-
-	// if we just replaced arg in f(arg()) or return arg with an inlined call
-	// and arg returns multiple values, glue as list
-	case ORETURN,
-		OCALLFUNC,
-		OCALLMETH,
-		OCALLINTER,
-		OAPPEND,
-		OCOMPLEX:
-		if n.List.Len() == 1 && n.List.First().Op == OINLCALL && n.List.First().Rlist.Len() > 1 {
-			n.List.Set(inlconv2list(n.List.First()))
-			break
-		}
-		fallthrough
-
-	default:
-		s := n.List.Slice()
-		for i1, n1 := range s {
-			if n1 != nil && n1.Op == OINLCALL {
-				s[i1] = inlconv2expr(s[i1])
-			}
-		}
-	}
-
-	inlnodelist(n.Rlist)
-	switch n.Op {
-	case OAS2FUNC:
-		if n.Rlist.First().Op == OINLCALL {
-			n.Rlist.Set(inlconv2list(n.Rlist.First()))
-			n.Op = OAS2
-			n.Typecheck = 0
-			n = typecheck(n, Etop)
-			break
-		}
-		fallthrough
-
-	default:
-		s := n.Rlist.Slice()
-		for i1, n1 := range s {
-			if n1.Op == OINLCALL {
-				if n.Op == OIF {
-					inlconv2stmt(n1)
-				} else {
-					s[i1] = inlconv2expr(s[i1])
-				}
-			}
-		}
-	}
-
-	inlnodelist(n.Nbody)
-	for _, n := range n.Nbody.Slice() {
-		if n.Op == OINLCALL {
-			inlconv2stmt(n)
-		}
-	}
-
-	// with all the branches out of the way, it is now time to
-	// transmogrify this node itself unless inhibited by the
-	// switch at the top of this function.
-	switch n.Op {
-	case OCALLFUNC, OCALLMETH:
-		if n.noInline() {
-			return n
-		}
-	}
-
-	switch n.Op {
-	case OCALLFUNC:
-		if Debug['m'] > 3 {
-			fmt.Printf("%v:call to func %+v\n", n.Line(), n.Left)
-		}
-		if n.Left.Func != nil && n.Left.Func.Inl.Len() != 0 && !isIntrinsicCall(n) { // normal case
-			n = mkinlcall(n, n.Left, n.Isddd)
-		} else if n.isMethodCalledAsFunction() && n.Left.Sym.Def != nil {
-			n = mkinlcall(n, n.Left.Sym.Def, n.Isddd)
-		}
-
-	case OCALLMETH:
-		if Debug['m'] > 3 {
-			fmt.Printf("%v:call to meth %L\n", n.Line(), n.Left.Right)
-		}
-
-		// typecheck should have resolved ODOTMETH->type, whose nname points to the actual function.
-		if n.Left.Type == nil {
-			Fatalf("no function type for [%p] %+v\n", n.Left, n.Left)
-		}
-
-		if n.Left.Type.Nname() == nil {
-			Fatalf("no function definition for [%p] %+v\n", n.Left.Type, n.Left.Type)
-		}
-
-		n = mkinlcall(n, n.Left.Type.Nname(), n.Isddd)
-	}
-
-	lineno = lno
-	return n
-}
-
-// The result of mkinlcall MUST be assigned back to n, e.g.
-// 	n.Left = mkinlcall(n.Left, fn, isddd)
-func mkinlcall(n *Node, fn *Node, isddd bool) *Node {
-	save_safemode := safemode
-
-	// imported functions may refer to unsafe as long as the
-	// package was marked safe during import (already checked).
-	pkg := fnpkg(fn)
-
-	if pkg != localpkg && pkg != nil {
-		safemode = false
-	}
-	n = mkinlcall1(n, fn, isddd)
-	safemode = save_safemode
-	return n
-}
-
-func tinlvar(t *Field, inlvars map[*Node]*Node) *Node {
-	if t.Nname != nil && !isblank(t.Nname) {
-		inlvar := inlvars[t.Nname]
-		if inlvar == nil {
-			Fatalf("missing inlvar for %v\n", t.Nname)
-		}
-		return inlvar
-	}
-
-	return typecheck(nblank, Erv|Easgn)
-}
-
-var inlgen int
-
-// if *np is a call, and fn is a function with an inlinable body, substitute *np with an OINLCALL.
-// On return ninit has the parameter assignments, the nbody is the
-// inlined function body and list, rlist contain the input, output
-// parameters.
-// The result of mkinlcall1 MUST be assigned back to n, e.g.
-// 	n.Left = mkinlcall1(n.Left, fn, isddd)
-func mkinlcall1(n *Node, fn *Node, isddd bool) *Node {
-	// For variadic fn.
-	if fn.Func.Inl.Len() == 0 {
-		return n
-	}
-
-	if fn == Curfn || fn.Name.Defn == Curfn {
-		return n
-	}
-
-	inlvars := make(map[*Node]*Node)
-
-	if Debug['l'] < 2 {
-		typecheckinl(fn)
-	}
-
-	// Bingo, we have a function node, and it has an inlineable body
-	if Debug['m'] > 1 {
-		fmt.Printf("%v: inlining call to %v %#v { %#v }\n", n.Line(), fn.Sym, fn.Type, fn.Func.Inl)
-	} else if Debug['m'] != 0 {
-		fmt.Printf("%v: inlining call to %v\n", n.Line(), fn)
-	}
-
-	if Debug['m'] > 2 {
-		fmt.Printf("%v: Before inlining: %+v\n", n.Line(), n)
-	}
-
-	ninit := n.Ninit
-
-	//dumplist("ninit pre", ninit);
-
-	var dcl []*Node
-	if fn.Name.Defn != nil {
-		// local function
-		dcl = fn.Func.Inldcl.Slice()
-	} else {
-		// imported function
-		dcl = fn.Func.Dcl
-	}
-
-	var retvars []*Node
-	i := 0
-
-	// Make temp names to use instead of the originals
-	for _, ln := range dcl {
-		if ln.Class == PPARAMOUT { // return values handled below.
-			continue
-		}
-		if ln.isParamStackCopy() { // ignore the on-stack copy of a parameter that moved to the heap
-			continue
-		}
-		if ln.Op == ONAME {
-			inlvars[ln] = typecheck(inlvar(ln), Erv)
-			if ln.Class == PPARAM || ln.Name.Param.Stackcopy != nil && ln.Name.Param.Stackcopy.Class == PPARAM {
-				ninit.Append(nod(ODCL, inlvars[ln], nil))
-			}
-		}
-	}
-
-	// temporaries for return values.
-	var m *Node
-	for _, t := range fn.Type.Results().Fields().Slice() {
-		if t != nil && t.Nname != nil && !isblank(t.Nname) {
-			m = inlvar(t.Nname)
-			m = typecheck(m, Erv)
-			inlvars[t.Nname] = m
-		} else {
-			// anonymous return values, synthesize names for use in assignment that replaces return
-			m = retvar(t, i)
-			i++
-		}
-
-		ninit.Append(nod(ODCL, m, nil))
-		retvars = append(retvars, m)
-	}
-
-	// assign receiver.
-	if fn.IsMethod() && n.Left.Op == ODOTMETH {
-		// method call with a receiver.
-		t := fn.Type.Recv()
-
-		if t != nil && t.Nname != nil && !isblank(t.Nname) && inlvars[t.Nname] == nil {
-			Fatalf("missing inlvar for %v\n", t.Nname)
-		}
-		if n.Left.Left == nil {
-			Fatalf("method call without receiver: %+v", n)
-		}
-		if t == nil {
-			Fatalf("method call unknown receiver type: %+v", n)
-		}
-		as := nod(OAS, tinlvar(t, inlvars), n.Left.Left)
-		if as != nil {
-			as = typecheck(as, Etop)
-			ninit.Append(as)
-		}
-	}
-
-	// check if inlined function is variadic.
-	variadic := false
-
-	var varargtype *Type
-	varargcount := 0
-	for _, t := range fn.Type.Params().Fields().Slice() {
-		if t.Isddd {
-			variadic = true
-			varargtype = t.Type
-		}
-	}
-
-	// but if argument is dotted too forget about variadicity.
-	if variadic && isddd {
-		variadic = false
-	}
-
-	// check if argument is actually a returned tuple from call.
-	multiret := 0
-
-	if n.List.Len() == 1 {
-		switch n.List.First().Op {
-		case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH:
-			if n.List.First().Left.Type.Results().NumFields() > 1 {
-				multiret = n.List.First().Left.Type.Results().NumFields() - 1
-			}
-		}
-	}
-
-	if variadic {
-		varargcount = n.List.Len() + multiret
-		if n.Left.Op != ODOTMETH {
-			varargcount -= fn.Type.Recvs().NumFields()
-		}
-		varargcount -= fn.Type.Params().NumFields() - 1
-	}
-
-	// assign arguments to the parameters' temp names
-	as := nod(OAS2, nil, nil)
-
-	as.Rlist.Set(n.List.Slice())
-	li := 0
-
-	// TODO: if len(nlist) == 1 but multiple args, check that n->list->n is a call?
-	if fn.IsMethod() && n.Left.Op != ODOTMETH {
-		// non-method call to method
-		if n.List.Len() == 0 {
-			Fatalf("non-method call to method without first arg: %+v", n)
-		}
-
-		// append receiver inlvar to LHS.
-		t := fn.Type.Recv()
-
-		if t != nil && t.Nname != nil && !isblank(t.Nname) && inlvars[t.Nname] == nil {
-			Fatalf("missing inlvar for %v\n", t.Nname)
-		}
-		if t == nil {
-			Fatalf("method call unknown receiver type: %+v", n)
-		}
-		as.List.Append(tinlvar(t, inlvars))
-		li++
-	}
-
-	// append ordinary arguments to LHS.
-	chkargcount := n.List.Len() > 1
-
-	var vararg *Node    // the slice argument to a variadic call
-	var varargs []*Node // the list of LHS names to put in vararg.
-	if !chkargcount {
-		// 0 or 1 expression on RHS.
-		var i int
-		for _, t := range fn.Type.Params().Fields().Slice() {
-			if variadic && t.Isddd {
-				vararg = tinlvar(t, inlvars)
-				for i = 0; i < varargcount && li < n.List.Len(); i++ {
-					m = argvar(varargtype, i)
-					varargs = append(varargs, m)
-					as.List.Append(m)
-				}
-
-				break
-			}
-
-			as.List.Append(tinlvar(t, inlvars))
-		}
-	} else {
-		// match arguments except final variadic (unless the call is dotted itself)
-		t, it := iterFields(fn.Type.Params())
-		for t != nil {
-			if li >= n.List.Len() {
-				break
-			}
-			if variadic && t.Isddd {
-				break
-			}
-			as.List.Append(tinlvar(t, inlvars))
-			t = it.Next()
-			li++
-		}
-
-		// match varargcount arguments with variadic parameters.
-		if variadic && t != nil && t.Isddd {
-			vararg = tinlvar(t, inlvars)
-			var i int
-			for i = 0; i < varargcount && li < n.List.Len(); i++ {
-				m = argvar(varargtype, i)
-				varargs = append(varargs, m)
-				as.List.Append(m)
-				li++
-			}
-
-			if i == varargcount {
-				t = it.Next()
-			}
-		}
-
-		if li < n.List.Len() || t != nil {
-			Fatalf("arg count mismatch: %#v vs %.v\n", fn.Type.Params(), n.List)
-		}
-	}
-
-	if as.Rlist.Len() != 0 {
-		as = typecheck(as, Etop)
-		ninit.Append(as)
-	}
-
-	// turn the variadic args into a slice.
-	if variadic {
-		as = nod(OAS, vararg, nil)
-		if varargcount == 0 {
-			as.Right = nodnil()
-			as.Right.Type = varargtype
-		} else {
-			varslicetype := typSlice(varargtype.Elem())
-			as.Right = nod(OCOMPLIT, nil, typenod(varslicetype))
-			as.Right.List.Set(varargs)
-		}
-
-		as = typecheck(as, Etop)
-		ninit.Append(as)
-	}
-
-	// zero the outparams
-	for _, n := range retvars {
-		as = nod(OAS, n, nil)
-		as = typecheck(as, Etop)
-		ninit.Append(as)
-	}
-
-	retlabel := autolabel(".i")
-	retlabel.Etype = 1 // flag 'safe' for escape analysis (no backjumps)
-
-	inlgen++
-
-	subst := inlsubst{
-		retlabel: retlabel,
-		retvars:  retvars,
-		inlvars:  inlvars,
-	}
-
-	body := subst.list(fn.Func.Inl)
-
-	lab := nod(OLABEL, retlabel, nil)
-	lab.Used = true // avoid 'not used' when function doesn't have return
-	body = append(body, lab)
-
-	typecheckslice(body, Etop)
-
-	//dumplist("ninit post", ninit);
-
-	call := nod(OINLCALL, nil, nil)
-
-	call.Ninit.Set(ninit.Slice())
-	call.Nbody.Set(body)
-	call.Rlist.Set(retvars)
-	call.Type = n.Type
-	call.Typecheck = 1
-
-	// Hide the args from setlno -- the parameters to the inlined
-	// call already have good line numbers that should be preserved.
-	args := as.Rlist
-	as.Rlist.Set(nil)
-
-	setlno(call, n.Lineno)
-
-	as.Rlist.Set(args.Slice())
-
-	//dumplist("call body", body);
-
-	n = call
-
-	// transitive inlining
-	// might be nice to do this before exporting the body,
-	// but can't emit the body with inlining expanded.
-	// instead we emit the things that the body needs
-	// and each use must redo the inlining.
-	// luckily these are small.
-	body = fn.Func.Inl.Slice()
-	fn.Func.Inl.Set(nil) // prevent infinite recursion (shouldn't happen anyway)
-	inlnodelist(call.Nbody)
-	for _, n := range call.Nbody.Slice() {
-		if n.Op == OINLCALL {
-			inlconv2stmt(n)
-		}
-	}
-	fn.Func.Inl.Set(body)
-
-	if Debug['m'] > 2 {
-		fmt.Printf("%v: After inlining %+v\n\n", n.Line(), n)
-	}
-
-	return n
-}
-
-// Every time we expand a function we generate a new set of tmpnames,
-// PAUTO's in the calling functions, and link them off of the
-// PPARAM's, PAUTOS and PPARAMOUTs of the called function.
-func inlvar(var_ *Node) *Node {
-	if Debug['m'] > 3 {
-		fmt.Printf("inlvar %+v\n", var_)
-	}
-
-	n := newname(var_.Sym)
-	n.Type = var_.Type
-	n.Class = PAUTO
-	n.Used = true
-	n.Name.Curfn = Curfn // the calling function, not the called one
-	n.Addrtaken = var_.Addrtaken
-
-	Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
-	return n
-}
-
-// Synthesize a variable to store the inlined function's results in.
-func retvar(t *Field, i int) *Node {
-	n := newname(lookupN("~r", i))
-	n.Type = t.Type
-	n.Class = PAUTO
-	n.Used = true
-	n.Name.Curfn = Curfn // the calling function, not the called one
-	Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
-	return n
-}
-
-// Synthesize a variable to store the inlined function's arguments
-// when they come from a multiple return call.
-func argvar(t *Type, i int) *Node {
-	n := newname(lookupN("~arg", i))
-	n.Type = t.Elem()
-	n.Class = PAUTO
-	n.Used = true
-	n.Name.Curfn = Curfn // the calling function, not the called one
-	Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
-	return n
-}
-
-// The inlsubst type implements the actual inlining of a single
-// function call.
-type inlsubst struct {
-	// Target of the goto substituted in place of a return.
-	retlabel *Node
-
-	// Temporary result variables.
-	retvars []*Node
-
-	inlvars map[*Node]*Node
-}
-
-// list inlines a list of nodes.
-func (subst *inlsubst) list(ll Nodes) []*Node {
-	s := make([]*Node, 0, ll.Len())
-	for _, n := range ll.Slice() {
-		s = append(s, subst.node(n))
-	}
-	return s
-}
-
-// node recursively copies a node from the saved pristine body of the
-// inlined function, substituting references to input/output
-// parameters with ones to the tmpnames, and substituting returns with
-// assignments to the output.
-func (subst *inlsubst) node(n *Node) *Node {
-	if n == nil {
-		return nil
-	}
-
-	switch n.Op {
-	case ONAME:
-		if inlvar := subst.inlvars[n]; inlvar != nil { // These will be set during inlnode
-			if Debug['m'] > 2 {
-				fmt.Printf("substituting name %+v  ->  %+v\n", n, inlvar)
-			}
-			return inlvar
-		}
-
-		if Debug['m'] > 2 {
-			fmt.Printf("not substituting name %+v\n", n)
-		}
-		return n
-
-	case OLITERAL, OTYPE:
-		return n
-
-		// Since we don't handle bodies with closures, this return is guaranteed to belong to the current inlined function.
-
-	//		dump("Return before substitution", n);
-	case ORETURN:
-		m := nod(OGOTO, subst.retlabel, nil)
-
-		m.Ninit.Set(subst.list(n.Ninit))
-
-		if len(subst.retvars) != 0 && n.List.Len() != 0 {
-			as := nod(OAS2, nil, nil)
-
-			// Make a shallow copy of retvars.
-			// Otherwise OINLCALL.Rlist will be the same list,
-			// and later walk and typecheck may clobber it.
-			for _, n := range subst.retvars {
-				as.List.Append(n)
-			}
-			as.Rlist.Set(subst.list(n.List))
-			as = typecheck(as, Etop)
-			m.Ninit.Append(as)
-		}
-
-		typecheckslice(m.Ninit.Slice(), Etop)
-		m = typecheck(m, Etop)
-
-		//		dump("Return after substitution", m);
-		return m
-
-	case OGOTO, OLABEL:
-		m := nod(OXXX, nil, nil)
-		*m = *n
-		m.Ninit.Set(nil)
-		p := fmt.Sprintf("%s·%d", n.Left.Sym.Name, inlgen)
-		m.Left = newname(lookup(p))
-
-		return m
-	default:
-		m := nod(OXXX, nil, nil)
-		*m = *n
-		m.Ninit.Set(nil)
-
-		if n.Op == OCLOSURE {
-			Fatalf("cannot inline function containing closure: %+v", n)
-		}
-
-		m.Left = subst.node(n.Left)
-		m.Right = subst.node(n.Right)
-		m.List.Set(subst.list(n.List))
-		m.Rlist.Set(subst.list(n.Rlist))
-		m.Ninit.Set(append(m.Ninit.Slice(), subst.list(n.Ninit)...))
-		m.Nbody.Set(subst.list(n.Nbody))
-
-		return m
-	}
-}
-
-// Plaster over linenumbers
-func setlnolist(ll Nodes, lno int32) {
-	for _, n := range ll.Slice() {
-		setlno(n, lno)
-	}
-}
-
-func setlno(n *Node, lno int32) {
-	if n == nil {
-		return
-	}
-
-	// don't clobber names, unless they're freshly synthesized
-	if n.Op != ONAME || n.Lineno == 0 {
-		n.Lineno = lno
-	}
-
-	setlno(n.Left, lno)
-	setlno(n.Right, lno)
-	setlnolist(n.List, lno)
-	setlnolist(n.Rlist, lno)
-	setlnolist(n.Ninit, lno)
-	setlnolist(n.Nbody, lno)
-}
-
-func (n *Node) isMethodCalledAsFunction() bool {
-	return n.Left.Op == ONAME && n.Left.Left != nil && n.Left.Left.Op == OTYPE && n.Left.Right != nil && n.Left.Right.Op == ONAME
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/lex.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/lex.go
deleted file mode 100644
index 6f46ecf..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/lex.go
+++ /dev/null
@@ -1,241 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/lex.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/lex.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"bootstrap/cmd/compile/internal/syntax"
-	"bootstrap/cmd/internal/obj"
-	"fmt"
-	"strings"
-)
-
-// lexlineno is the line number _after_ the most recently read rune.
-// In particular, it's advanced (or rewound) as newlines are read (or unread).
-var lexlineno int32
-
-// lineno is the line number at the start of the most recently lexed token.
-var lineno int32
-
-func isSpace(c rune) bool {
-	return c == ' ' || c == '\t' || c == '\n' || c == '\r'
-}
-
-func isQuoted(s string) bool {
-	return len(s) >= 2 && s[0] == '"' && s[len(s)-1] == '"'
-}
-
-func plan9quote(s string) string {
-	if s == "" {
-		return "''"
-	}
-	for _, c := range s {
-		if c <= ' ' || c == '\'' {
-			return "'" + strings.Replace(s, "'", "''", -1) + "'"
-		}
-	}
-	return s
-}
-
-type Pragma syntax.Pragma
-
-const (
-	// Func pragmas.
-	Nointerface    Pragma = 1 << iota
-	Noescape              // func parameters don't escape
-	Norace                // func must not have race detector annotations
-	Nosplit               // func should not execute on separate stack
-	Noinline              // func should not be inlined
-	CgoUnsafeArgs         // treat a pointer to one arg as a pointer to them all
-	UintptrEscapes        // pointers converted to uintptr escape
-
-	// Runtime-only func pragmas.
-	// See ../../../../runtime/README.md for detailed descriptions.
-	Systemstack        // func must run on system stack
-	Nowritebarrier     // emit compiler error instead of write barrier
-	Nowritebarrierrec  // error on write barrier in this or recursive callees
-	Yeswritebarrierrec // cancels Nowritebarrierrec in this function and callees
-
-	// Runtime-only type pragmas
-	NotInHeap // values of this type must not be heap allocated
-)
-
-func pragmaValue(verb string) Pragma {
-	switch verb {
-	case "go:nointerface":
-		if obj.Fieldtrack_enabled != 0 {
-			return Nointerface
-		}
-	case "go:noescape":
-		return Noescape
-	case "go:norace":
-		return Norace
-	case "go:nosplit":
-		return Nosplit
-	case "go:noinline":
-		return Noinline
-	case "go:systemstack":
-		if !compiling_runtime {
-			yyerror("//go:systemstack only allowed in runtime")
-		}
-		return Systemstack
-	case "go:nowritebarrier":
-		if !compiling_runtime {
-			yyerror("//go:nowritebarrier only allowed in runtime")
-		}
-		return Nowritebarrier
-	case "go:nowritebarrierrec":
-		if !compiling_runtime {
-			yyerror("//go:nowritebarrierrec only allowed in runtime")
-		}
-		return Nowritebarrierrec | Nowritebarrier // implies Nowritebarrier
-	case "go:yeswritebarrierrec":
-		if !compiling_runtime {
-			yyerror("//go:yeswritebarrierrec only allowed in runtime")
-		}
-		return Yeswritebarrierrec
-	case "go:cgo_unsafe_args":
-		return CgoUnsafeArgs
-	case "go:uintptrescapes":
-		// For the next function declared in the file
-		// any uintptr arguments may be pointer values
-		// converted to uintptr. This directive
-		// ensures that the referenced allocated
-		// object, if any, is retained and not moved
-		// until the call completes, even though from
-		// the types alone it would appear that the
-		// object is no longer needed during the
-		// call. The conversion to uintptr must appear
-		// in the argument list.
-		// Used in syscall/dll_windows.go.
-		return UintptrEscapes
-	case "go:notinheap":
-		return NotInHeap
-	}
-	return 0
-}
-
-var internedStrings = map[string]string{}
-
-func internString(b []byte) string {
-	s, ok := internedStrings[string(b)] // string(b) here doesn't allocate
-	if !ok {
-		s = string(b)
-		internedStrings[s] = s
-	}
-	return s
-}
-
-func pragcgo(text string) string {
-	f := pragmaFields(text)
-
-	verb := f[0][3:] // skip "go:"
-	switch verb {
-	case "cgo_export_static", "cgo_export_dynamic":
-		switch {
-		case len(f) == 2 && !isQuoted(f[1]):
-			local := plan9quote(f[1])
-			return fmt.Sprintln(verb, local)
-
-		case len(f) == 3 && !isQuoted(f[1]) && !isQuoted(f[2]):
-			local := plan9quote(f[1])
-			remote := plan9quote(f[2])
-			return fmt.Sprintln(verb, local, remote)
-
-		default:
-			yyerror(`usage: //go:%s local [remote]`, verb)
-		}
-	case "cgo_import_dynamic":
-		switch {
-		case len(f) == 2 && !isQuoted(f[1]):
-			local := plan9quote(f[1])
-			return fmt.Sprintln(verb, local)
-
-		case len(f) == 3 && !isQuoted(f[1]) && !isQuoted(f[2]):
-			local := plan9quote(f[1])
-			remote := plan9quote(f[2])
-			return fmt.Sprintln(verb, local, remote)
-
-		case len(f) == 4 && !isQuoted(f[1]) && !isQuoted(f[2]) && isQuoted(f[3]):
-			local := plan9quote(f[1])
-			remote := plan9quote(f[2])
-			library := plan9quote(strings.Trim(f[3], `"`))
-			return fmt.Sprintln(verb, local, remote, library)
-
-		default:
-			yyerror(`usage: //go:cgo_import_dynamic local [remote ["library"]]`)
-		}
-	case "cgo_import_static":
-		switch {
-		case len(f) == 2 && !isQuoted(f[1]):
-			local := plan9quote(f[1])
-			return fmt.Sprintln(verb, local)
-
-		default:
-			yyerror(`usage: //go:cgo_import_static local`)
-		}
-	case "cgo_dynamic_linker":
-		switch {
-		case len(f) == 2 && isQuoted(f[1]):
-			path := plan9quote(strings.Trim(f[1], `"`))
-			return fmt.Sprintln(verb, path)
-
-		default:
-			yyerror(`usage: //go:cgo_dynamic_linker "path"`)
-		}
-	case "cgo_ldflag":
-		switch {
-		case len(f) == 2 && isQuoted(f[1]):
-			arg := plan9quote(strings.Trim(f[1], `"`))
-			return fmt.Sprintln(verb, arg)
-
-		default:
-			yyerror(`usage: //go:cgo_ldflag "arg"`)
-		}
-	}
-	return ""
-}
-
-// pragmaFields is similar to strings.FieldsFunc(s, isSpace)
-// but does not split when inside double quoted regions and always
-// splits before the start and after the end of a double quoted region.
-// pragmaFields does not recognize escaped quotes. If a quote in s is not
-// closed the part after the opening quote will not be returned as a field.
-func pragmaFields(s string) []string {
-	var a []string
-	inQuote := false
-	fieldStart := -1 // Set to -1 when looking for start of field.
-	for i, c := range s {
-		switch {
-		case c == '"':
-			if inQuote {
-				inQuote = false
-				a = append(a, s[fieldStart:i+1])
-				fieldStart = -1
-			} else {
-				inQuote = true
-				if fieldStart >= 0 {
-					a = append(a, s[fieldStart:i])
-				}
-				fieldStart = i
-			}
-		case !inQuote && isSpace(c):
-			if fieldStart >= 0 {
-				a = append(a, s[fieldStart:i])
-				fieldStart = -1
-			}
-		default:
-			if fieldStart == -1 {
-				fieldStart = i
-			}
-		}
-	}
-	if !inQuote && fieldStart >= 0 { // Last field might end at the end of the string.
-		a = append(a, s[fieldStart:])
-	}
-	return a
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/lex_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/lex_test.go
deleted file mode 100644
index 2eb6f80..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/lex_test.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/lex_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/lex_test.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import "testing"
-
-func eq(a, b []string) bool {
-	if len(a) != len(b) {
-		return false
-	}
-	for i := 0; i < len(a); i++ {
-		if a[i] != b[i] {
-			return false
-		}
-	}
-	return true
-}
-
-func TestPragmaFields(t *testing.T) {
-
-	var tests = []struct {
-		in   string
-		want []string
-	}{
-		{"", []string{}},
-		{" \t ", []string{}},
-		{`""""`, []string{`""`, `""`}},
-		{"  a'b'c  ", []string{"a'b'c"}},
-		{"1 2 3 4", []string{"1", "2", "3", "4"}},
-		{"\n☺\t☹\n", []string{"☺", "☹"}},
-		{`"1 2 "  3  " 4 5"`, []string{`"1 2 "`, `3`, `" 4 5"`}},
-		{`"1""2 3""4"`, []string{`"1"`, `"2 3"`, `"4"`}},
-		{`12"34"`, []string{`12`, `"34"`}},
-		{`12"34 `, []string{`12`}},
-	}
-
-	for _, tt := range tests {
-		got := pragmaFields(tt.in)
-		if !eq(got, tt.want) {
-			t.Errorf("pragmaFields(%q) = %v; want %v", tt.in, got, tt.want)
-			continue
-		}
-	}
-}
-
-func TestPragcgo(t *testing.T) {
-
-	var tests = []struct {
-		in   string
-		want string
-	}{
-		{`go:cgo_export_dynamic local`, "cgo_export_dynamic local\n"},
-		{`go:cgo_export_dynamic local remote`, "cgo_export_dynamic local remote\n"},
-		{`go:cgo_export_dynamic local' remote'`, "cgo_export_dynamic 'local''' 'remote'''\n"},
-		{`go:cgo_export_static local`, "cgo_export_static local\n"},
-		{`go:cgo_export_static local remote`, "cgo_export_static local remote\n"},
-		{`go:cgo_export_static local' remote'`, "cgo_export_static 'local''' 'remote'''\n"},
-		{`go:cgo_import_dynamic local`, "cgo_import_dynamic local\n"},
-		{`go:cgo_import_dynamic local remote`, "cgo_import_dynamic local remote\n"},
-		{`go:cgo_import_dynamic local remote "library"`, "cgo_import_dynamic local remote library\n"},
-		{`go:cgo_import_dynamic local' remote' "lib rary"`, "cgo_import_dynamic 'local''' 'remote''' 'lib rary'\n"},
-		{`go:cgo_import_static local`, "cgo_import_static local\n"},
-		{`go:cgo_import_static local'`, "cgo_import_static 'local'''\n"},
-		{`go:cgo_dynamic_linker "/path/"`, "cgo_dynamic_linker /path/\n"},
-		{`go:cgo_dynamic_linker "/p ath/"`, "cgo_dynamic_linker '/p ath/'\n"},
-		{`go:cgo_ldflag "arg"`, "cgo_ldflag arg\n"},
-		{`go:cgo_ldflag "a rg"`, "cgo_ldflag 'a rg'\n"},
-	}
-
-	for _, tt := range tests {
-		got := pragcgo(tt.in)
-		if got != tt.want {
-			t.Errorf("pragcgo(%q) = %q; want %q", tt.in, got, tt.want)
-			continue
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/logic_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/logic_test.go
deleted file mode 100644
index 91a24eb..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/logic_test.go
+++ /dev/null
@@ -1,292 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/logic_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/logic_test.go:1
-package gc
-
-import "testing"
-
-// Tests to make sure logic simplification rules are correct.
-
-func TestLogic64(t *testing.T) {
-	// test values to determine function equality
-	values := [...]int64{-1 << 63, 1<<63 - 1, -4, -3, -2, -1, 0, 1, 2, 3, 4}
-
-	// golden functions we use repeatedly
-	zero := func(x int64) int64 { return 0 }
-	id := func(x int64) int64 { return x }
-	or := func(x, y int64) int64 { return x | y }
-	and := func(x, y int64) int64 { return x & y }
-	y := func(x, y int64) int64 { return y }
-
-	for _, test := range [...]struct {
-		name   string
-		f      func(int64) int64
-		golden func(int64) int64
-	}{
-		{"x|x", func(x int64) int64 { return x | x }, id},
-		{"x|0", func(x int64) int64 { return x | 0 }, id},
-		{"x|-1", func(x int64) int64 { return x | -1 }, func(x int64) int64 { return -1 }},
-		{"x&x", func(x int64) int64 { return x & x }, id},
-		{"x&0", func(x int64) int64 { return x & 0 }, zero},
-		{"x&-1", func(x int64) int64 { return x & -1 }, id},
-		{"x^x", func(x int64) int64 { return x ^ x }, zero},
-		{"x^0", func(x int64) int64 { return x ^ 0 }, id},
-		{"x^-1", func(x int64) int64 { return x ^ -1 }, func(x int64) int64 { return ^x }},
-		{"x+0", func(x int64) int64 { return x + 0 }, id},
-		{"x-x", func(x int64) int64 { return x - x }, zero},
-		{"x*0", func(x int64) int64 { return x * 0 }, zero},
-		{"^^x", func(x int64) int64 { return ^^x }, id},
-	} {
-		for _, v := range values {
-			got := test.f(v)
-			want := test.golden(v)
-			if want != got {
-				t.Errorf("[%s](%d)=%d, want %d", test.name, v, got, want)
-			}
-		}
-	}
-	for _, test := range [...]struct {
-		name   string
-		f      func(int64, int64) int64
-		golden func(int64, int64) int64
-	}{
-		{"x|(x|y)", func(x, y int64) int64 { return x | (x | y) }, or},
-		{"x|(y|x)", func(x, y int64) int64 { return x | (y | x) }, or},
-		{"(x|y)|x", func(x, y int64) int64 { return (x | y) | x }, or},
-		{"(y|x)|x", func(x, y int64) int64 { return (y | x) | x }, or},
-		{"x&(x&y)", func(x, y int64) int64 { return x & (x & y) }, and},
-		{"x&(y&x)", func(x, y int64) int64 { return x & (y & x) }, and},
-		{"(x&y)&x", func(x, y int64) int64 { return (x & y) & x }, and},
-		{"(y&x)&x", func(x, y int64) int64 { return (y & x) & x }, and},
-		{"x^(x^y)", func(x, y int64) int64 { return x ^ (x ^ y) }, y},
-		{"x^(y^x)", func(x, y int64) int64 { return x ^ (y ^ x) }, y},
-		{"(x^y)^x", func(x, y int64) int64 { return (x ^ y) ^ x }, y},
-		{"(y^x)^x", func(x, y int64) int64 { return (y ^ x) ^ x }, y},
-		{"-(y-x)", func(x, y int64) int64 { return -(y - x) }, func(x, y int64) int64 { return x - y }},
-		{"(x+y)-x", func(x, y int64) int64 { return (x + y) - x }, y},
-		{"(y+x)-x", func(x, y int64) int64 { return (y + x) - x }, y},
-	} {
-		for _, v := range values {
-			for _, w := range values {
-				got := test.f(v, w)
-				want := test.golden(v, w)
-				if want != got {
-					t.Errorf("[%s](%d,%d)=%d, want %d", test.name, v, w, got, want)
-				}
-			}
-		}
-	}
-}
-
-func TestLogic32(t *testing.T) {
-	// test values to determine function equality
-	values := [...]int32{-1 << 31, 1<<31 - 1, -4, -3, -2, -1, 0, 1, 2, 3, 4}
-
-	// golden functions we use repeatedly
-	zero := func(x int32) int32 { return 0 }
-	id := func(x int32) int32 { return x }
-	or := func(x, y int32) int32 { return x | y }
-	and := func(x, y int32) int32 { return x & y }
-	y := func(x, y int32) int32 { return y }
-
-	for _, test := range [...]struct {
-		name   string
-		f      func(int32) int32
-		golden func(int32) int32
-	}{
-		{"x|x", func(x int32) int32 { return x | x }, id},
-		{"x|0", func(x int32) int32 { return x | 0 }, id},
-		{"x|-1", func(x int32) int32 { return x | -1 }, func(x int32) int32 { return -1 }},
-		{"x&x", func(x int32) int32 { return x & x }, id},
-		{"x&0", func(x int32) int32 { return x & 0 }, zero},
-		{"x&-1", func(x int32) int32 { return x & -1 }, id},
-		{"x^x", func(x int32) int32 { return x ^ x }, zero},
-		{"x^0", func(x int32) int32 { return x ^ 0 }, id},
-		{"x^-1", func(x int32) int32 { return x ^ -1 }, func(x int32) int32 { return ^x }},
-		{"x+0", func(x int32) int32 { return x + 0 }, id},
-		{"x-x", func(x int32) int32 { return x - x }, zero},
-		{"x*0", func(x int32) int32 { return x * 0 }, zero},
-		{"^^x", func(x int32) int32 { return ^^x }, id},
-	} {
-		for _, v := range values {
-			got := test.f(v)
-			want := test.golden(v)
-			if want != got {
-				t.Errorf("[%s](%d)=%d, want %d", test.name, v, got, want)
-			}
-		}
-	}
-	for _, test := range [...]struct {
-		name   string
-		f      func(int32, int32) int32
-		golden func(int32, int32) int32
-	}{
-		{"x|(x|y)", func(x, y int32) int32 { return x | (x | y) }, or},
-		{"x|(y|x)", func(x, y int32) int32 { return x | (y | x) }, or},
-		{"(x|y)|x", func(x, y int32) int32 { return (x | y) | x }, or},
-		{"(y|x)|x", func(x, y int32) int32 { return (y | x) | x }, or},
-		{"x&(x&y)", func(x, y int32) int32 { return x & (x & y) }, and},
-		{"x&(y&x)", func(x, y int32) int32 { return x & (y & x) }, and},
-		{"(x&y)&x", func(x, y int32) int32 { return (x & y) & x }, and},
-		{"(y&x)&x", func(x, y int32) int32 { return (y & x) & x }, and},
-		{"x^(x^y)", func(x, y int32) int32 { return x ^ (x ^ y) }, y},
-		{"x^(y^x)", func(x, y int32) int32 { return x ^ (y ^ x) }, y},
-		{"(x^y)^x", func(x, y int32) int32 { return (x ^ y) ^ x }, y},
-		{"(y^x)^x", func(x, y int32) int32 { return (y ^ x) ^ x }, y},
-		{"-(y-x)", func(x, y int32) int32 { return -(y - x) }, func(x, y int32) int32 { return x - y }},
-		{"(x+y)-x", func(x, y int32) int32 { return (x + y) - x }, y},
-		{"(y+x)-x", func(x, y int32) int32 { return (y + x) - x }, y},
-	} {
-		for _, v := range values {
-			for _, w := range values {
-				got := test.f(v, w)
-				want := test.golden(v, w)
-				if want != got {
-					t.Errorf("[%s](%d,%d)=%d, want %d", test.name, v, w, got, want)
-				}
-			}
-		}
-	}
-}
-
-func TestLogic16(t *testing.T) {
-	// test values to determine function equality
-	values := [...]int16{-1 << 15, 1<<15 - 1, -4, -3, -2, -1, 0, 1, 2, 3, 4}
-
-	// golden functions we use repeatedly
-	zero := func(x int16) int16 { return 0 }
-	id := func(x int16) int16 { return x }
-	or := func(x, y int16) int16 { return x | y }
-	and := func(x, y int16) int16 { return x & y }
-	y := func(x, y int16) int16 { return y }
-
-	for _, test := range [...]struct {
-		name   string
-		f      func(int16) int16
-		golden func(int16) int16
-	}{
-		{"x|x", func(x int16) int16 { return x | x }, id},
-		{"x|0", func(x int16) int16 { return x | 0 }, id},
-		{"x|-1", func(x int16) int16 { return x | -1 }, func(x int16) int16 { return -1 }},
-		{"x&x", func(x int16) int16 { return x & x }, id},
-		{"x&0", func(x int16) int16 { return x & 0 }, zero},
-		{"x&-1", func(x int16) int16 { return x & -1 }, id},
-		{"x^x", func(x int16) int16 { return x ^ x }, zero},
-		{"x^0", func(x int16) int16 { return x ^ 0 }, id},
-		{"x^-1", func(x int16) int16 { return x ^ -1 }, func(x int16) int16 { return ^x }},
-		{"x+0", func(x int16) int16 { return x + 0 }, id},
-		{"x-x", func(x int16) int16 { return x - x }, zero},
-		{"x*0", func(x int16) int16 { return x * 0 }, zero},
-		{"^^x", func(x int16) int16 { return ^^x }, id},
-	} {
-		for _, v := range values {
-			got := test.f(v)
-			want := test.golden(v)
-			if want != got {
-				t.Errorf("[%s](%d)=%d, want %d", test.name, v, got, want)
-			}
-		}
-	}
-	for _, test := range [...]struct {
-		name   string
-		f      func(int16, int16) int16
-		golden func(int16, int16) int16
-	}{
-		{"x|(x|y)", func(x, y int16) int16 { return x | (x | y) }, or},
-		{"x|(y|x)", func(x, y int16) int16 { return x | (y | x) }, or},
-		{"(x|y)|x", func(x, y int16) int16 { return (x | y) | x }, or},
-		{"(y|x)|x", func(x, y int16) int16 { return (y | x) | x }, or},
-		{"x&(x&y)", func(x, y int16) int16 { return x & (x & y) }, and},
-		{"x&(y&x)", func(x, y int16) int16 { return x & (y & x) }, and},
-		{"(x&y)&x", func(x, y int16) int16 { return (x & y) & x }, and},
-		{"(y&x)&x", func(x, y int16) int16 { return (y & x) & x }, and},
-		{"x^(x^y)", func(x, y int16) int16 { return x ^ (x ^ y) }, y},
-		{"x^(y^x)", func(x, y int16) int16 { return x ^ (y ^ x) }, y},
-		{"(x^y)^x", func(x, y int16) int16 { return (x ^ y) ^ x }, y},
-		{"(y^x)^x", func(x, y int16) int16 { return (y ^ x) ^ x }, y},
-		{"-(y-x)", func(x, y int16) int16 { return -(y - x) }, func(x, y int16) int16 { return x - y }},
-		{"(x+y)-x", func(x, y int16) int16 { return (x + y) - x }, y},
-		{"(y+x)-x", func(x, y int16) int16 { return (y + x) - x }, y},
-	} {
-		for _, v := range values {
-			for _, w := range values {
-				got := test.f(v, w)
-				want := test.golden(v, w)
-				if want != got {
-					t.Errorf("[%s](%d,%d)=%d, want %d", test.name, v, w, got, want)
-				}
-			}
-		}
-	}
-}
-
-func TestLogic8(t *testing.T) {
-	// test values to determine function equality
-	values := [...]int8{-1 << 7, 1<<7 - 1, -4, -3, -2, -1, 0, 1, 2, 3, 4}
-
-	// golden functions we use repeatedly
-	zero := func(x int8) int8 { return 0 }
-	id := func(x int8) int8 { return x }
-	or := func(x, y int8) int8 { return x | y }
-	and := func(x, y int8) int8 { return x & y }
-	y := func(x, y int8) int8 { return y }
-
-	for _, test := range [...]struct {
-		name   string
-		f      func(int8) int8
-		golden func(int8) int8
-	}{
-		{"x|x", func(x int8) int8 { return x | x }, id},
-		{"x|0", func(x int8) int8 { return x | 0 }, id},
-		{"x|-1", func(x int8) int8 { return x | -1 }, func(x int8) int8 { return -1 }},
-		{"x&x", func(x int8) int8 { return x & x }, id},
-		{"x&0", func(x int8) int8 { return x & 0 }, zero},
-		{"x&-1", func(x int8) int8 { return x & -1 }, id},
-		{"x^x", func(x int8) int8 { return x ^ x }, zero},
-		{"x^0", func(x int8) int8 { return x ^ 0 }, id},
-		{"x^-1", func(x int8) int8 { return x ^ -1 }, func(x int8) int8 { return ^x }},
-		{"x+0", func(x int8) int8 { return x + 0 }, id},
-		{"x-x", func(x int8) int8 { return x - x }, zero},
-		{"x*0", func(x int8) int8 { return x * 0 }, zero},
-		{"^^x", func(x int8) int8 { return ^^x }, id},
-	} {
-		for _, v := range values {
-			got := test.f(v)
-			want := test.golden(v)
-			if want != got {
-				t.Errorf("[%s](%d)=%d, want %d", test.name, v, got, want)
-			}
-		}
-	}
-	for _, test := range [...]struct {
-		name   string
-		f      func(int8, int8) int8
-		golden func(int8, int8) int8
-	}{
-		{"x|(x|y)", func(x, y int8) int8 { return x | (x | y) }, or},
-		{"x|(y|x)", func(x, y int8) int8 { return x | (y | x) }, or},
-		{"(x|y)|x", func(x, y int8) int8 { return (x | y) | x }, or},
-		{"(y|x)|x", func(x, y int8) int8 { return (y | x) | x }, or},
-		{"x&(x&y)", func(x, y int8) int8 { return x & (x & y) }, and},
-		{"x&(y&x)", func(x, y int8) int8 { return x & (y & x) }, and},
-		{"(x&y)&x", func(x, y int8) int8 { return (x & y) & x }, and},
-		{"(y&x)&x", func(x, y int8) int8 { return (y & x) & x }, and},
-		{"x^(x^y)", func(x, y int8) int8 { return x ^ (x ^ y) }, y},
-		{"x^(y^x)", func(x, y int8) int8 { return x ^ (y ^ x) }, y},
-		{"(x^y)^x", func(x, y int8) int8 { return (x ^ y) ^ x }, y},
-		{"(y^x)^x", func(x, y int8) int8 { return (y ^ x) ^ x }, y},
-		{"-(y-x)", func(x, y int8) int8 { return -(y - x) }, func(x, y int8) int8 { return x - y }},
-		{"(x+y)-x", func(x, y int8) int8 { return (x + y) - x }, y},
-		{"(y+x)-x", func(x, y int8) int8 { return (y + x) - x }, y},
-	} {
-		for _, v := range values {
-			for _, w := range values {
-				got := test.f(v, w)
-				want := test.golden(v, w)
-				if want != got {
-					t.Errorf("[%s](%d,%d)=%d, want %d", test.name, v, w, got, want)
-				}
-			}
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/magic.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/magic.go
deleted file mode 100644
index 834d0f2..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/magic.go
+++ /dev/null
@@ -1,223 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/magic.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/magic.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-// Transmogrify slow integer division into fast multiplication using magic.
-
-// argument passing to/from
-// smagic and umagic
-type Magic struct {
-	W   int // input for both - width
-	S   int // output for both - shift
-	Bad int // output for both - unexpected failure
-
-	// magic multiplier for signed literal divisors
-	Sd int64 // input - literal divisor
-	Sm int64 // output - multiplier
-
-	// magic multiplier for unsigned literal divisors
-	Ud uint64 // input - literal divisor
-	Um uint64 // output - multiplier
-	Ua int    // output - adder
-}
-
-// magic number for signed division
-// see hacker's delight chapter 10
-func smagic(m *Magic) {
-	var mask uint64
-
-	m.Bad = 0
-	switch m.W {
-	default:
-		m.Bad = 1
-		return
-
-	case 8:
-		mask = 0xff
-
-	case 16:
-		mask = 0xffff
-
-	case 32:
-		mask = 0xffffffff
-
-	case 64:
-		mask = 0xffffffffffffffff
-	}
-
-	two31 := mask ^ (mask >> 1)
-
-	p := m.W - 1
-	ad := uint64(m.Sd)
-	if m.Sd < 0 {
-		ad = -uint64(m.Sd)
-	}
-
-	// bad denominators
-	if ad == 0 || ad == 1 || ad == two31 {
-		m.Bad = 1
-		return
-	}
-
-	t := two31
-	ad &= mask
-
-	anc := t - 1 - t%ad
-	anc &= mask
-
-	q1 := two31 / anc
-	r1 := two31 - q1*anc
-	q1 &= mask
-	r1 &= mask
-
-	q2 := two31 / ad
-	r2 := two31 - q2*ad
-	q2 &= mask
-	r2 &= mask
-
-	var delta uint64
-	for {
-		p++
-		q1 <<= 1
-		r1 <<= 1
-		q1 &= mask
-		r1 &= mask
-		if r1 >= anc {
-			q1++
-			r1 -= anc
-			q1 &= mask
-			r1 &= mask
-		}
-
-		q2 <<= 1
-		r2 <<= 1
-		q2 &= mask
-		r2 &= mask
-		if r2 >= ad {
-			q2++
-			r2 -= ad
-			q2 &= mask
-			r2 &= mask
-		}
-
-		delta = ad - r2
-		delta &= mask
-		if q1 < delta || (q1 == delta && r1 == 0) {
-			continue
-		}
-
-		break
-	}
-
-	m.Sm = int64(q2 + 1)
-	if uint64(m.Sm)&two31 != 0 {
-		m.Sm |= ^int64(mask)
-	}
-	m.S = p - m.W
-}
-
-// magic number for unsigned division
-// see hacker's delight chapter 10
-func umagic(m *Magic) {
-	var mask uint64
-
-	m.Bad = 0
-	m.Ua = 0
-
-	switch m.W {
-	default:
-		m.Bad = 1
-		return
-
-	case 8:
-		mask = 0xff
-
-	case 16:
-		mask = 0xffff
-
-	case 32:
-		mask = 0xffffffff
-
-	case 64:
-		mask = 0xffffffffffffffff
-	}
-
-	two31 := mask ^ (mask >> 1)
-
-	m.Ud &= mask
-	if m.Ud == 0 || m.Ud == two31 {
-		m.Bad = 1
-		return
-	}
-
-	nc := mask - (-m.Ud&mask)%m.Ud
-	p := m.W - 1
-
-	q1 := two31 / nc
-	r1 := two31 - q1*nc
-	q1 &= mask
-	r1 &= mask
-
-	q2 := (two31 - 1) / m.Ud
-	r2 := (two31 - 1) - q2*m.Ud
-	q2 &= mask
-	r2 &= mask
-
-	var delta uint64
-	for {
-		p++
-		if r1 >= nc-r1 {
-			q1 <<= 1
-			q1++
-			r1 <<= 1
-			r1 -= nc
-		} else {
-			q1 <<= 1
-			r1 <<= 1
-		}
-
-		q1 &= mask
-		r1 &= mask
-		if r2+1 >= m.Ud-r2 {
-			if q2 >= two31-1 {
-				m.Ua = 1
-			}
-
-			q2 <<= 1
-			q2++
-			r2 <<= 1
-			r2++
-			r2 -= m.Ud
-		} else {
-			if q2 >= two31 {
-				m.Ua = 1
-			}
-
-			q2 <<= 1
-			r2 <<= 1
-			r2++
-		}
-
-		q2 &= mask
-		r2 &= mask
-
-		delta = m.Ud - 1 - r2
-		delta &= mask
-
-		if p < m.W+m.W {
-			if q1 < delta || (q1 == delta && r1 == 0) {
-				continue
-			}
-		}
-
-		break
-	}
-
-	m.Um = q2 + 1
-	m.S = p - m.W
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/main.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/main.go
deleted file mode 100644
index 2e4f363..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/main.go
+++ /dev/null
@@ -1,963 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/main.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/main.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:generate go run mkbuiltin.go
-
-package gc
-
-import (
-	"bufio"
-	"bytes"
-	"bootstrap/cmd/compile/internal/ssa"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"flag"
-	"fmt"
-	"io"
-	"log"
-	"os"
-	"path"
-	"runtime"
-	"strconv"
-	"strings"
-)
-
-var imported_unsafe bool
-
-var (
-	buildid string
-)
-
-var (
-	Debug_append   int
-	Debug_closure  int
-	debug_dclstack int
-	Debug_panic    int
-	Debug_slice    int
-	Debug_wb       int
-)
-
-// Debug arguments.
-// These can be specified with the -d flag, as in "-d nil"
-// to set the debug_checknil variable. In general the list passed
-// to -d can be comma-separated.
-var debugtab = []struct {
-	name string
-	val  *int
-}{
-	{"append", &Debug_append},         // print information about append compilation
-	{"closure", &Debug_closure},       // print information about closure compilation
-	{"disablenil", &disable_checknil}, // disable nil checks
-	{"dclstack", &debug_dclstack},     // run internal dclstack checks
-	{"gcprog", &Debug_gcprog},         // print dump of GC programs
-	{"nil", &Debug_checknil},          // print information about nil checks
-	{"panic", &Debug_panic},           // do not hide any compiler panic
-	{"slice", &Debug_slice},           // print information about slice compilation
-	{"typeassert", &Debug_typeassert}, // print information about type assertion inlining
-	{"wb", &Debug_wb},                 // print information about write barriers
-	{"export", &Debug_export},         // print export data
-}
-
-func usage() {
-	fmt.Printf("usage: compile [options] file.go...\n")
-	obj.Flagprint(1)
-	Exit(2)
-}
-
-func hidePanic() {
-	if Debug_panic == 0 && nsavederrors+nerrors > 0 {
-		// If we've already complained about things
-		// in the program, don't bother complaining
-		// about a panic too; let the user clean up
-		// the code and try again.
-		if err := recover(); err != nil {
-			errorexit()
-		}
-	}
-}
-
-func doversion() {
-	p := obj.Expstring()
-	if p == "X:none" {
-		p = ""
-	}
-	sep := ""
-	if p != "" {
-		sep = " "
-	}
-	fmt.Printf("compile version %s%s%s\n", obj.Version, sep, p)
-	os.Exit(0)
-}
-
-// supportsDynlink reports whether or not the code generator for the given
-// architecture supports the -shared and -dynlink flags.
-func supportsDynlink(arch *sys.Arch) bool {
-	return arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.I386, sys.PPC64, sys.S390X)
-}
-
-// timing data for compiler phases
-var timings Timings
-var benchfile string
-
-// Main parses flags and Go source files specified in the command-line
-// arguments, type-checks the parsed Go package, compiles functions to machine
-// code, and finally writes the compiled package definition to disk.
-func Main() {
-	timings.Start("fe", "init")
-
-	defer hidePanic()
-
-	Ctxt = obj.Linknew(Thearch.LinkArch)
-	Ctxt.DiagFunc = yyerror
-	Ctxt.Bso = bufio.NewWriter(os.Stdout)
-
-	localpkg = mkpkg("")
-	localpkg.Prefix = "\"\""
-
-	// pseudo-package, for scoping
-	builtinpkg = mkpkg("go.builtin")
-	builtinpkg.Prefix = "go.builtin" // not go%2ebuiltin
-
-	// pseudo-package, accessed by import "unsafe"
-	unsafepkg = mkpkg("unsafe")
-	unsafepkg.Name = "unsafe"
-
-	// real package, referred to by generated runtime calls
-	Runtimepkg = mkpkg("runtime")
-	Runtimepkg.Name = "runtime"
-
-	// pseudo-packages used in symbol tables
-	itabpkg = mkpkg("go.itab")
-	itabpkg.Name = "go.itab"
-	itabpkg.Prefix = "go.itab" // not go%2eitab
-
-	itablinkpkg = mkpkg("go.itablink")
-	itablinkpkg.Name = "go.itablink"
-	itablinkpkg.Prefix = "go.itablink" // not go%2eitablink
-
-	trackpkg = mkpkg("go.track")
-	trackpkg.Name = "go.track"
-	trackpkg.Prefix = "go.track" // not go%2etrack
-
-	typepkg = mkpkg("type")
-	typepkg.Name = "type"
-
-	// pseudo-package used for map zero values
-	mappkg = mkpkg("go.map")
-	mappkg.Name = "go.map"
-	mappkg.Prefix = "go.map"
-
-	Nacl = obj.GOOS == "nacl"
-	if Nacl {
-		flag_largemodel = true
-	}
-
-	flag.BoolVar(&compiling_runtime, "+", false, "compiling runtime")
-	obj.Flagcount("%", "debug non-static initializers", &Debug['%'])
-	obj.Flagcount("B", "disable bounds checking", &Debug['B'])
-	flag.StringVar(&localimport, "D", "", "set relative `path` for local imports")
-	obj.Flagcount("E", "debug symbol export", &Debug['E'])
-	obj.Flagfn1("I", "add `directory` to import search path", addidir)
-	obj.Flagcount("K", "debug missing line numbers", &Debug['K'])
-	obj.Flagcount("N", "disable optimizations", &Debug['N'])
-	obj.Flagcount("S", "print assembly listing", &Debug['S'])
-	obj.Flagfn0("V", "print compiler version", doversion)
-	obj.Flagcount("W", "debug parse tree after type checking", &Debug['W'])
-	flag.StringVar(&asmhdr, "asmhdr", "", "write assembly header to `file`")
-	flag.StringVar(&buildid, "buildid", "", "record `id` as the build id in the export metadata")
-	flag.BoolVar(&pure_go, "complete", false, "compiling complete package (no C or assembly)")
-	flag.StringVar(&debugstr, "d", "", "print debug information about items in `list`")
-	obj.Flagcount("e", "no limit on number of errors reported", &Debug['e'])
-	obj.Flagcount("f", "debug stack frames", &Debug['f'])
-	obj.Flagcount("h", "halt on error", &Debug['h'])
-	obj.Flagcount("i", "debug line number stack", &Debug['i'])
-	obj.Flagfn1("importmap", "add `definition` of the form source=actual to import map", addImportMap)
-	flag.StringVar(&flag_installsuffix, "installsuffix", "", "set pkg directory `suffix`")
-	obj.Flagcount("j", "debug runtime-initialized variables", &Debug['j'])
-	obj.Flagcount("l", "disable inlining", &Debug['l'])
-	flag.StringVar(&linkobj, "linkobj", "", "write linker-specific object to `file`")
-	obj.Flagcount("live", "debug liveness analysis", &debuglive)
-	obj.Flagcount("m", "print optimization decisions", &Debug['m'])
-	flag.BoolVar(&flag_msan, "msan", false, "build code compatible with C/C++ memory sanitizer")
-	flag.BoolVar(&nolocalimports, "nolocalimports", false, "reject local (relative) imports")
-	flag.StringVar(&outfile, "o", "", "write output to `file`")
-	flag.StringVar(&myimportpath, "p", "", "set expected package import `path`")
-	flag.BoolVar(&writearchive, "pack", false, "write package file instead of object file")
-	obj.Flagcount("r", "debug generated wrappers", &Debug['r'])
-	flag.BoolVar(&flag_race, "race", false, "enable race detector")
-	obj.Flagcount("s", "warn about composite literals that can be simplified", &Debug['s'])
-	flag.StringVar(&Ctxt.LineHist.TrimPathPrefix, "trimpath", "", "remove `prefix` from recorded source file paths")
-	flag.BoolVar(&safemode, "u", false, "reject unsafe code")
-	obj.Flagcount("v", "increase debug verbosity", &Debug['v'])
-	obj.Flagcount("w", "debug type checking", &Debug['w'])
-	flag.BoolVar(&use_writebarrier, "wb", true, "enable write barrier")
-	var flag_shared bool
-	var flag_dynlink bool
-	if supportsDynlink(Thearch.LinkArch.Arch) {
-		flag.BoolVar(&flag_shared, "shared", false, "generate code that can be linked into a shared library")
-		flag.BoolVar(&flag_dynlink, "dynlink", false, "support references to Go symbols defined in other shared libraries")
-	}
-	if Thearch.LinkArch.Family == sys.AMD64 {
-		flag.BoolVar(&flag_largemodel, "largemodel", false, "generate code that assumes a large memory model")
-	}
-	flag.StringVar(&cpuprofile, "cpuprofile", "", "write cpu profile to `file`")
-	flag.StringVar(&memprofile, "memprofile", "", "write memory profile to `file`")
-	flag.Int64Var(&memprofilerate, "memprofilerate", 0, "set runtime.MemProfileRate to `rate`")
-	flag.StringVar(&traceprofile, "traceprofile", "", "write an execution trace to `file`")
-	flag.StringVar(&benchfile, "bench", "", "append benchmark times to `file`")
-	obj.Flagparse(usage)
-
-	Ctxt.Flag_shared = flag_dynlink || flag_shared
-	Ctxt.Flag_dynlink = flag_dynlink
-	Ctxt.Flag_optimize = Debug['N'] == 0
-
-	Ctxt.Debugasm = int32(Debug['S'])
-	Ctxt.Debugvlog = int32(Debug['v'])
-
-	if flag.NArg() < 1 {
-		usage()
-	}
-
-	startProfile()
-
-	if flag_race {
-		racepkg = mkpkg("runtime/race")
-		racepkg.Name = "race"
-	}
-	if flag_msan {
-		msanpkg = mkpkg("runtime/msan")
-		msanpkg.Name = "msan"
-	}
-	if flag_race && flag_msan {
-		log.Fatal("cannot use both -race and -msan")
-	} else if flag_race || flag_msan {
-		instrumenting = true
-	}
-
-	// parse -d argument
-	if debugstr != "" {
-	Split:
-		for _, name := range strings.Split(debugstr, ",") {
-			if name == "" {
-				continue
-			}
-			val := 1
-			valstring := ""
-			if i := strings.Index(name, "="); i >= 0 {
-				var err error
-				val, err = strconv.Atoi(name[i+1:])
-				if err != nil {
-					log.Fatalf("invalid debug value %v", name)
-				}
-				name = name[:i]
-			} else if i := strings.Index(name, ":"); i >= 0 {
-				valstring = name[i+1:]
-				name = name[:i]
-			}
-			for _, t := range debugtab {
-				if t.name == name {
-					if t.val != nil {
-						*t.val = val
-						continue Split
-					}
-				}
-			}
-			// special case for ssa for now
-			if strings.HasPrefix(name, "ssa/") {
-				// expect form ssa/phase/flag
-				// e.g. -d=ssa/generic_cse/time
-				// _ in phase name also matches space
-				phase := name[4:]
-				flag := "debug" // default flag is debug
-				if i := strings.Index(phase, "/"); i >= 0 {
-					flag = phase[i+1:]
-					phase = phase[:i]
-				}
-				err := ssa.PhaseOption(phase, flag, val, valstring)
-				if err != "" {
-					log.Fatalf(err)
-				}
-				continue Split
-			}
-			log.Fatalf("unknown debug key -d %s\n", name)
-		}
-	}
-
-	// enable inlining.  for now:
-	//	default: inlining on.  (debug['l'] == 1)
-	//	-l: inlining off  (debug['l'] == 0)
-	//	-ll, -lll: inlining on again, with extra debugging (debug['l'] > 1)
-	if Debug['l'] <= 1 {
-		Debug['l'] = 1 - Debug['l']
-	}
-
-	Widthint = Thearch.LinkArch.IntSize
-	Widthptr = Thearch.LinkArch.PtrSize
-	Widthreg = Thearch.LinkArch.RegSize
-
-	initUniverse()
-
-	blockgen = 1
-	dclcontext = PEXTERN
-	nerrors = 0
-	lexlineno = 1
-
-	timings.Start("fe", "loadsys")
-	loadsys()
-
-	timings.Start("fe", "parse")
-	lexlineno0 := lexlineno
-	for _, infile = range flag.Args() {
-		linehistpush(infile)
-		block = 1
-		iota_ = -1000000
-		imported_unsafe = false
-		parseFile(infile)
-		if nsyntaxerrors != 0 {
-			errorexit()
-		}
-
-		// Instead of converting EOF into '\n' in getc and count it as an extra line
-		// for the line history to work, and which then has to be corrected elsewhere,
-		// just add a line here.
-		lexlineno++
-		linehistpop()
-	}
-	timings.Stop()
-	timings.AddEvent(int64(lexlineno-lexlineno0), "lines")
-
-	mkpackage(localpkg.Name) // final import not used checks
-	finishUniverse()
-
-	typecheckok = true
-	if Debug['f'] != 0 {
-		frame(1)
-	}
-
-	// Process top-level declarations in phases.
-
-	// Phase 1: const, type, and names and types of funcs.
-	//   This will gather all the information about types
-	//   and methods but doesn't depend on any of it.
-	defercheckwidth()
-
-	// Don't use range--typecheck can add closures to xtop.
-	timings.Start("fe", "typecheck", "top1")
-	for i := 0; i < len(xtop); i++ {
-		if xtop[i].Op != ODCL && xtop[i].Op != OAS && xtop[i].Op != OAS2 {
-			xtop[i] = typecheck(xtop[i], Etop)
-		}
-	}
-
-	// Phase 2: Variable assignments.
-	//   To check interface assignments, depends on phase 1.
-
-	// Don't use range--typecheck can add closures to xtop.
-	timings.Start("fe", "typecheck", "top2")
-	for i := 0; i < len(xtop); i++ {
-		if xtop[i].Op == ODCL || xtop[i].Op == OAS || xtop[i].Op == OAS2 {
-			xtop[i] = typecheck(xtop[i], Etop)
-		}
-	}
-	resumecheckwidth()
-
-	// Phase 3: Type check function bodies.
-	// Don't use range--typecheck can add closures to xtop.
-	timings.Start("fe", "typecheck", "func")
-	var fcount int64
-	for i := 0; i < len(xtop); i++ {
-		if xtop[i].Op == ODCLFUNC || xtop[i].Op == OCLOSURE {
-			Curfn = xtop[i]
-			decldepth = 1
-			saveerrors()
-			typecheckslice(Curfn.Nbody.Slice(), Etop)
-			checkreturn(Curfn)
-			if nerrors != 0 {
-				Curfn.Nbody.Set(nil) // type errors; do not compile
-			}
-			fcount++
-		}
-	}
-	timings.AddEvent(fcount, "funcs")
-
-	// Phase 4: Decide how to capture closed variables.
-	// This needs to run before escape analysis,
-	// because variables captured by value do not escape.
-	timings.Start("fe", "capturevars")
-	for _, n := range xtop {
-		if n.Op == ODCLFUNC && n.Func.Closure != nil {
-			Curfn = n
-			capturevars(n)
-		}
-	}
-
-	Curfn = nil
-
-	if nsavederrors+nerrors != 0 {
-		errorexit()
-	}
-
-	// Phase 5: Inlining
-	timings.Start("fe", "inlining")
-	if Debug['l'] > 1 {
-		// Typecheck imported function bodies if debug['l'] > 1,
-		// otherwise lazily when used or re-exported.
-		for _, n := range importlist {
-			if n.Func.Inl.Len() != 0 {
-				saveerrors()
-				typecheckinl(n)
-			}
-		}
-
-		if nsavederrors+nerrors != 0 {
-			errorexit()
-		}
-	}
-
-	if Debug['l'] != 0 {
-		// Find functions that can be inlined and clone them before walk expands them.
-		visitBottomUp(xtop, func(list []*Node, recursive bool) {
-			for _, n := range list {
-				if !recursive {
-					caninl(n)
-				} else {
-					if Debug['m'] > 1 {
-						fmt.Printf("%v: cannot inline %v: recursive\n", n.Line(), n.Func.Nname)
-					}
-				}
-				inlcalls(n)
-			}
-		})
-	}
-
-	// Phase 6: Escape analysis.
-	// Required for moving heap allocations onto stack,
-	// which in turn is required by the closure implementation,
-	// which stores the addresses of stack variables into the closure.
-	// If the closure does not escape, it needs to be on the stack
-	// or else the stack copier will not update it.
-	// Large values are also moved off stack in escape analysis;
-	// because large values may contain pointers, it must happen early.
-	timings.Start("fe", "escapes")
-	escapes(xtop)
-
-	// Phase 7: Transform closure bodies to properly reference captured variables.
-	// This needs to happen before walk, because closures must be transformed
-	// before walk reaches a call of a closure.
-	timings.Start("fe", "xclosures")
-	for _, n := range xtop {
-		if n.Op == ODCLFUNC && n.Func.Closure != nil {
-			Curfn = n
-			transformclosure(n)
-		}
-	}
-
-	Curfn = nil
-
-	// Phase 8: Compile top level functions.
-	// Don't use range--walk can add functions to xtop.
-	timings.Start("be", "compilefuncs")
-	fcount = 0
-	for i := 0; i < len(xtop); i++ {
-		if xtop[i].Op == ODCLFUNC {
-			funccompile(xtop[i])
-			fcount++
-		}
-	}
-	timings.AddEvent(fcount, "funcs")
-
-	if nsavederrors+nerrors == 0 {
-		fninit(xtop)
-	}
-
-	if compiling_runtime {
-		checknowritebarrierrec()
-	}
-
-	// Phase 9: Check external declarations.
-	timings.Start("be", "externaldcls")
-	for i, n := range externdcl {
-		if n.Op == ONAME {
-			externdcl[i] = typecheck(externdcl[i], Erv)
-		}
-	}
-
-	if nerrors+nsavederrors != 0 {
-		errorexit()
-	}
-
-	// Write object data to disk.
-	timings.Start("be", "dumpobj")
-	dumpobj()
-	if asmhdr != "" {
-		dumpasmhdr()
-	}
-
-	if nerrors+nsavederrors != 0 {
-		errorexit()
-	}
-
-	flusherrors()
-	timings.Stop()
-
-	if benchfile != "" {
-		if err := writebench(benchfile); err != nil {
-			log.Fatalf("cannot write benchmark data: %v", err)
-		}
-	}
-}
-
-func writebench(filename string) error {
-	f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
-	if err != nil {
-		return err
-	}
-
-	var buf bytes.Buffer
-	fmt.Fprintln(&buf, "commit:", obj.Version)
-	fmt.Fprintln(&buf, "goos:", runtime.GOOS)
-	fmt.Fprintln(&buf, "goarch:", runtime.GOARCH)
-	timings.Write(&buf, "BenchmarkCompile:"+myimportpath+":")
-
-	n, err := f.Write(buf.Bytes())
-	if err != nil {
-		return err
-	}
-	if n != buf.Len() {
-		panic("bad writer")
-	}
-
-	return f.Close()
-}
-
-var importMap = map[string]string{}
-
-func addImportMap(s string) {
-	if strings.Count(s, "=") != 1 {
-		log.Fatal("-importmap argument must be of the form source=actual")
-	}
-	i := strings.Index(s, "=")
-	source, actual := s[:i], s[i+1:]
-	if source == "" || actual == "" {
-		log.Fatal("-importmap argument must be of the form source=actual; source and actual must be non-empty")
-	}
-	importMap[source] = actual
-}
-
-func saveerrors() {
-	nsavederrors += nerrors
-	nerrors = 0
-}
-
-func arsize(b *bufio.Reader, name string) int {
-	var buf [ArhdrSize]byte
-	if _, err := io.ReadFull(b, buf[:]); err != nil {
-		return -1
-	}
-	aname := strings.Trim(string(buf[0:16]), " ")
-	if !strings.HasPrefix(aname, name) {
-		return -1
-	}
-	asize := strings.Trim(string(buf[48:58]), " ")
-	i, _ := strconv.Atoi(asize)
-	return i
-}
-
-func skiptopkgdef(b *bufio.Reader) bool {
-	// archive header
-	p, err := b.ReadString('\n')
-	if err != nil {
-		log.Fatalf("reading input: %v", err)
-	}
-	if p != "!<arch>\n" {
-		return false
-	}
-
-	// package export block should be first
-	sz := arsize(b, "__.PKGDEF")
-	return sz > 0
-}
-
-var idirs []string
-
-func addidir(dir string) {
-	if dir != "" {
-		idirs = append(idirs, dir)
-	}
-}
-
-func isDriveLetter(b byte) bool {
-	return 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z'
-}
-
-// is this path a local name?  begins with ./ or ../ or /
-func islocalname(name string) bool {
-	return strings.HasPrefix(name, "/") ||
-		runtime.GOOS == "windows" && len(name) >= 3 && isDriveLetter(name[0]) && name[1] == ':' && name[2] == '/' ||
-		strings.HasPrefix(name, "./") || name == "." ||
-		strings.HasPrefix(name, "../") || name == ".."
-}
-
-func findpkg(name string) (file string, ok bool) {
-	if islocalname(name) {
-		if safemode || nolocalimports {
-			return "", false
-		}
-
-		// try .a before .6.  important for building libraries:
-		// if there is an array.6 in the array.a library,
-		// want to find all of array.a, not just array.6.
-		file = fmt.Sprintf("%s.a", name)
-		if _, err := os.Stat(file); err == nil {
-			return file, true
-		}
-		file = fmt.Sprintf("%s.o", name)
-		if _, err := os.Stat(file); err == nil {
-			return file, true
-		}
-		return "", false
-	}
-
-	// local imports should be canonicalized already.
-	// don't want to see "encoding/../encoding/base64"
-	// as different from "encoding/base64".
-	if q := path.Clean(name); q != name {
-		yyerror("non-canonical import path %q (should be %q)", name, q)
-		return "", false
-	}
-
-	for _, dir := range idirs {
-		file = fmt.Sprintf("%s/%s.a", dir, name)
-		if _, err := os.Stat(file); err == nil {
-			return file, true
-		}
-		file = fmt.Sprintf("%s/%s.o", dir, name)
-		if _, err := os.Stat(file); err == nil {
-			return file, true
-		}
-	}
-
-	if obj.GOROOT != "" {
-		suffix := ""
-		suffixsep := ""
-		if flag_installsuffix != "" {
-			suffixsep = "_"
-			suffix = flag_installsuffix
-		} else if flag_race {
-			suffixsep = "_"
-			suffix = "race"
-		} else if flag_msan {
-			suffixsep = "_"
-			suffix = "msan"
-		}
-
-		file = fmt.Sprintf("%s/pkg/%s_%s%s%s/%s.a", obj.GOROOT, obj.GOOS, obj.GOARCH, suffixsep, suffix, name)
-		if _, err := os.Stat(file); err == nil {
-			return file, true
-		}
-		file = fmt.Sprintf("%s/pkg/%s_%s%s%s/%s.o", obj.GOROOT, obj.GOOS, obj.GOARCH, suffixsep, suffix, name)
-		if _, err := os.Stat(file); err == nil {
-			return file, true
-		}
-	}
-
-	return "", false
-}
-
-// loadsys loads the definitions for the low-level runtime functions,
-// so that the compiler can generate calls to them,
-// but does not make them visible to user code.
-func loadsys() {
-	block = 1
-	iota_ = -1000000
-
-	importpkg = Runtimepkg
-	typecheckok = true
-	defercheckwidth()
-
-	typs := runtimeTypes()
-	for _, d := range runtimeDecls {
-		sym := Pkglookup(d.name, importpkg)
-		typ := typs[d.typ]
-		switch d.tag {
-		case funcTag:
-			importsym(sym, ONAME)
-			n := newfuncname(sym)
-			n.Type = typ
-			declare(n, PFUNC)
-		case varTag:
-			importvar(sym, typ)
-		default:
-			Fatalf("unhandled declaration tag %v", d.tag)
-		}
-	}
-
-	typecheckok = false
-	resumecheckwidth()
-	importpkg = nil
-}
-
-func importfile(f *Val, indent []byte) {
-	if importpkg != nil {
-		Fatalf("importpkg not nil")
-	}
-
-	path_, ok := f.U.(string)
-	if !ok {
-		yyerror("import statement not a string")
-		return
-	}
-
-	if len(path_) == 0 {
-		yyerror("import path is empty")
-		return
-	}
-
-	if isbadimport(path_) {
-		return
-	}
-
-	// The package name main is no longer reserved,
-	// but we reserve the import path "main" to identify
-	// the main package, just as we reserve the import
-	// path "math" to identify the standard math package.
-	if path_ == "main" {
-		yyerror("cannot import \"main\"")
-		errorexit()
-	}
-
-	if myimportpath != "" && path_ == myimportpath {
-		yyerror("import %q while compiling that package (import cycle)", path_)
-		errorexit()
-	}
-
-	if mapped, ok := importMap[path_]; ok {
-		path_ = mapped
-	}
-
-	if path_ == "unsafe" {
-		if safemode {
-			yyerror("cannot import package unsafe")
-			errorexit()
-		}
-
-		importpkg = unsafepkg
-		imported_unsafe = true
-		return
-	}
-
-	if islocalname(path_) {
-		if path_[0] == '/' {
-			yyerror("import path cannot be absolute path")
-			return
-		}
-
-		prefix := Ctxt.Pathname
-		if localimport != "" {
-			prefix = localimport
-		}
-		path_ = path.Join(prefix, path_)
-
-		if isbadimport(path_) {
-			return
-		}
-	}
-
-	file, found := findpkg(path_)
-	if !found {
-		yyerror("can't find import: %q", path_)
-		errorexit()
-	}
-
-	importpkg = mkpkg(path_)
-
-	if importpkg.Imported {
-		return
-	}
-
-	importpkg.Imported = true
-
-	impf, err := os.Open(file)
-	if err != nil {
-		yyerror("can't open import: %q: %v", path_, err)
-		errorexit()
-	}
-	defer impf.Close()
-	imp := bufio.NewReader(impf)
-
-	if strings.HasSuffix(file, ".a") {
-		if !skiptopkgdef(imp) {
-			yyerror("import %s: not a package file", file)
-			errorexit()
-		}
-	}
-
-	// check object header
-	p, err := imp.ReadString('\n')
-	if err != nil {
-		log.Fatalf("reading input: %v", err)
-	}
-	if len(p) > 0 {
-		p = p[:len(p)-1]
-	}
-
-	if p != "empty archive" {
-		if !strings.HasPrefix(p, "go object ") {
-			yyerror("import %s: not a go object file: %s", file, p)
-			errorexit()
-		}
-
-		q := fmt.Sprintf("%s %s %s %s", obj.GOOS, obj.GOARCH, obj.Version, obj.Expstring())
-		if p[10:] != q {
-			yyerror("import %s: object is [%s] expected [%s]", file, p[10:], q)
-			errorexit()
-		}
-	}
-
-	// process header lines
-	safe := false
-	for {
-		p, err = imp.ReadString('\n')
-		if err != nil {
-			log.Fatalf("reading input: %v", err)
-		}
-		if p == "\n" {
-			break // header ends with blank line
-		}
-		if strings.HasPrefix(p, "safe") {
-			safe = true
-			break // ok to ignore rest
-		}
-	}
-	if safemode && !safe {
-		yyerror("cannot import unsafe package %q", importpkg.Path)
-	}
-
-	// assume files move (get installed)
-	// so don't record the full path.
-	linehistpragma(file[len(file)-len(path_)-2:]) // acts as #pragma lib
-
-	// In the importfile, if we find:
-	// $$\n  (textual format): not supported anymore
-	// $$B\n (binary format) : import directly, then feed the lexer a dummy statement
-
-	// look for $$
-	var c byte
-	for {
-		c, err = imp.ReadByte()
-		if err != nil {
-			break
-		}
-		if c == '$' {
-			c, err = imp.ReadByte()
-			if c == '$' || err != nil {
-				break
-			}
-		}
-	}
-
-	// get character after $$
-	if err == nil {
-		c, _ = imp.ReadByte()
-	}
-
-	switch c {
-	case '\n':
-		yyerror("cannot import %s: old export format no longer supported (recompile library)", path_)
-
-	case 'B':
-		if Debug_export != 0 {
-			fmt.Printf("importing %s (%s)\n", path_, file)
-		}
-		imp.ReadByte() // skip \n after $$B
-		Import(imp)
-
-	default:
-		yyerror("no import in %q", path_)
-		errorexit()
-	}
-}
-
-func pkgnotused(lineno int32, path string, name string) {
-	// If the package was imported with a name other than the final
-	// import path element, show it explicitly in the error message.
-	// Note that this handles both renamed imports and imports of
-	// packages containing unconventional package declarations.
-	// Note that this uses / always, even on Windows, because Go import
-	// paths always use forward slashes.
-	elem := path
-	if i := strings.LastIndex(elem, "/"); i >= 0 {
-		elem = elem[i+1:]
-	}
-	if name == "" || elem == name {
-		yyerrorl(lineno, "imported and not used: %q", path)
-	} else {
-		yyerrorl(lineno, "imported and not used: %q as %s", path, name)
-	}
-}
-
-func mkpackage(pkgname string) {
-	if localpkg.Name == "" {
-		if pkgname == "_" {
-			yyerror("invalid package name _")
-		}
-		localpkg.Name = pkgname
-	} else {
-		if pkgname != localpkg.Name {
-			yyerror("package %s; expected %s", pkgname, localpkg.Name)
-		}
-		for _, s := range localpkg.Syms {
-			if s.Def == nil {
-				continue
-			}
-			if s.Def.Op == OPACK {
-				// throw away top-level package name leftover
-				// from previous file.
-				// leave s->block set to cause redeclaration
-				// errors if a conflicting top-level name is
-				// introduced by a different file.
-				if !s.Def.Used && nsyntaxerrors == 0 {
-					pkgnotused(s.Def.Lineno, s.Def.Name.Pkg.Path, s.Name)
-				}
-				s.Def = nil
-				continue
-			}
-
-			if s.Def.Sym != s && s.Flags&SymAlias == 0 {
-				// throw away top-level name left over
-				// from previous import . "x"
-				if s.Def.Name != nil && s.Def.Name.Pack != nil && !s.Def.Name.Pack.Used && nsyntaxerrors == 0 {
-					pkgnotused(s.Def.Name.Pack.Lineno, s.Def.Name.Pack.Name.Pkg.Path, "")
-					s.Def.Name.Pack.Used = true
-				}
-
-				s.Def = nil
-				continue
-			}
-		}
-	}
-
-	if outfile == "" {
-		p := infile
-		if i := strings.LastIndex(p, "/"); i >= 0 {
-			p = p[i+1:]
-		}
-		if runtime.GOOS == "windows" {
-			if i := strings.LastIndex(p, `\`); i >= 0 {
-				p = p[i+1:]
-			}
-		}
-		if i := strings.LastIndex(p, "."); i >= 0 {
-			p = p[:i]
-		}
-		suffix := ".o"
-		if writearchive {
-			suffix = ".a"
-		}
-		outfile = p + suffix
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/mkbuiltin.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/mkbuiltin.go
deleted file mode 100644
index dda749d..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/mkbuiltin.go
+++ /dev/null
@@ -1,215 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/mkbuiltin.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/mkbuiltin.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-// Generate builtin.go from builtin/runtime.go.
-
-package main
-
-import (
-	"bytes"
-	"flag"
-	"fmt"
-	"go/ast"
-	"go/format"
-	"go/parser"
-	"go/token"
-	"io"
-	"io/ioutil"
-	"log"
-	"os"
-	"path/filepath"
-	"strconv"
-	"strings"
-)
-
-var stdout = flag.Bool("stdout", false, "write to stdout instead of builtin.go")
-
-func main() {
-	flag.Parse()
-
-	var b bytes.Buffer
-	fmt.Fprintln(&b, "// AUTO-GENERATED by mkbuiltin.go; DO NOT EDIT")
-	fmt.Fprintln(&b)
-	fmt.Fprintln(&b, "package gc")
-
-	mkbuiltin(&b, "runtime")
-
-	out, err := format.Source(b.Bytes())
-	if err != nil {
-		log.Fatal(err)
-	}
-	if *stdout {
-		_, err = os.Stdout.Write(out)
-	} else {
-		err = ioutil.WriteFile("builtin.go", out, 0666)
-	}
-	if err != nil {
-		log.Fatal(err)
-	}
-}
-
-func mkbuiltin(w io.Writer, name string) {
-	fset := token.NewFileSet()
-	f, err := parser.ParseFile(fset, filepath.Join("builtin", name+".go"), nil, 0)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	var interner typeInterner
-
-	fmt.Fprintf(w, "var %sDecls = [...]struct { name string; tag int; typ int }{\n", name)
-	for _, decl := range f.Decls {
-		switch decl := decl.(type) {
-		case *ast.FuncDecl:
-			if decl.Recv != nil {
-				log.Fatal("methods unsupported")
-			}
-			if decl.Body != nil {
-				log.Fatal("unexpected function body")
-			}
-			fmt.Fprintf(w, "{%q, funcTag, %d},\n", decl.Name.Name, interner.intern(decl.Type))
-		case *ast.GenDecl:
-			if decl.Tok != token.VAR {
-				log.Fatal("unhandled declaration kind", decl.Tok)
-			}
-			for _, spec := range decl.Specs {
-				spec := spec.(*ast.ValueSpec)
-				if len(spec.Values) != 0 {
-					log.Fatal("unexpected values")
-				}
-				typ := interner.intern(spec.Type)
-				for _, name := range spec.Names {
-					fmt.Fprintf(w, "{%q, varTag, %d},\n", name.Name, typ)
-				}
-			}
-		default:
-			log.Fatal("unhandled decl type", decl)
-		}
-	}
-	fmt.Fprintln(w, "}")
-
-	fmt.Fprintln(w)
-	fmt.Fprintf(w, "func %sTypes() []*Type {\n", name)
-	fmt.Fprintf(w, "var typs [%d]*Type\n", len(interner.typs))
-	for i, typ := range interner.typs {
-		fmt.Fprintf(w, "typs[%d] = %s\n", i, typ)
-	}
-	fmt.Fprintln(w, "return typs[:]")
-	fmt.Fprintln(w, "}")
-}
-
-// typeInterner maps Go type expressions to compiler code that
-// constructs the denoted type. It recognizes and reuses common
-// subtype expressions.
-type typeInterner struct {
-	typs []string
-	hash map[string]int
-}
-
-func (i *typeInterner) intern(t ast.Expr) int {
-	x := i.mktype(t)
-	v, ok := i.hash[x]
-	if !ok {
-		v = len(i.typs)
-		if i.hash == nil {
-			i.hash = make(map[string]int)
-		}
-		i.hash[x] = v
-		i.typs = append(i.typs, x)
-	}
-	return v
-}
-
-func (i *typeInterner) subtype(t ast.Expr) string {
-	return fmt.Sprintf("typs[%d]", i.intern(t))
-}
-
-func (i *typeInterner) mktype(t ast.Expr) string {
-	switch t := t.(type) {
-	case *ast.Ident:
-		switch t.Name {
-		case "byte":
-			return "bytetype"
-		case "rune":
-			return "runetype"
-		}
-		return fmt.Sprintf("Types[T%s]", strings.ToUpper(t.Name))
-
-	case *ast.ArrayType:
-		if t.Len == nil {
-			return fmt.Sprintf("typSlice(%s)", i.subtype(t.Elt))
-		}
-		return fmt.Sprintf("typArray(%s, %d)", i.subtype(t.Elt), intconst(t.Len))
-	case *ast.ChanType:
-		dir := "Cboth"
-		switch t.Dir {
-		case ast.SEND:
-			dir = "Csend"
-		case ast.RECV:
-			dir = "Crecv"
-		}
-		return fmt.Sprintf("typChan(%s, %s)", i.subtype(t.Value), dir)
-	case *ast.FuncType:
-		return fmt.Sprintf("functype(nil, %s, %s)", i.fields(t.Params, false), i.fields(t.Results, false))
-	case *ast.InterfaceType:
-		if len(t.Methods.List) != 0 {
-			log.Fatal("non-empty interfaces unsupported")
-		}
-		return "Types[TINTER]"
-	case *ast.MapType:
-		return fmt.Sprintf("typMap(%s, %s)", i.subtype(t.Key), i.subtype(t.Value))
-	case *ast.StarExpr:
-		return fmt.Sprintf("typPtr(%s)", i.subtype(t.X))
-	case *ast.StructType:
-		return fmt.Sprintf("tostruct(%s)", i.fields(t.Fields, true))
-
-	default:
-		log.Fatalf("unhandled type: %#v", t)
-		panic("unreachable")
-	}
-}
-
-func (i *typeInterner) fields(fl *ast.FieldList, keepNames bool) string {
-	if fl == nil || len(fl.List) == 0 {
-		return "nil"
-	}
-	var res []string
-	for _, f := range fl.List {
-		typ := i.subtype(f.Type)
-		if len(f.Names) == 0 {
-			res = append(res, fmt.Sprintf("anonfield(%s)", typ))
-		} else {
-			for _, name := range f.Names {
-				if keepNames {
-					res = append(res, fmt.Sprintf("namedfield(%q, %s)", name.Name, typ))
-				} else {
-					res = append(res, fmt.Sprintf("anonfield(%s)", typ))
-				}
-			}
-		}
-	}
-	return fmt.Sprintf("[]*Node{%s}", strings.Join(res, ", "))
-}
-
-func intconst(e ast.Expr) int64 {
-	switch e := e.(type) {
-	case *ast.BasicLit:
-		if e.Kind != token.INT {
-			log.Fatalf("expected INT, got %v", e.Kind)
-		}
-		x, err := strconv.ParseInt(e.Value, 0, 64)
-		if err != nil {
-			log.Fatal(err)
-		}
-		return x
-	default:
-		log.Fatalf("unhandled expr: %#v", e)
-		panic("unreachable")
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/mpfloat.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/mpfloat.go
deleted file mode 100644
index eed10d7..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/mpfloat.go
+++ /dev/null
@@ -1,272 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/mpfloat.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/mpfloat.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"fmt"
-	"math"
-	"bootstrap/math/big"
-)
-
-// implements float arithmetic
-
-const (
-	// Maximum size in bits for Mpints before signalling
-	// overflow and also mantissa precision for Mpflts.
-	Mpprec = 512
-	// Turn on for constant arithmetic debugging output.
-	Mpdebug = false
-)
-
-// Mpflt represents a floating-point constant.
-type Mpflt struct {
-	Val big.Float
-}
-
-// Mpcplx represents a complex constant.
-type Mpcplx struct {
-	Real Mpflt
-	Imag Mpflt
-}
-
-func newMpflt() *Mpflt {
-	var a Mpflt
-	a.Val.SetPrec(Mpprec)
-	return &a
-}
-
-func (a *Mpflt) SetInt(b *Mpint) {
-	if b.Ovf {
-		// sign doesn't really matter but copy anyway
-		a.Val.SetInf(b.Val.Sign() < 0)
-		return
-	}
-	a.Val.SetInt(&b.Val)
-}
-
-func (a *Mpflt) Set(b *Mpflt) {
-	a.Val.Set(&b.Val)
-}
-
-func (a *Mpflt) Add(b *Mpflt) {
-	if Mpdebug {
-		fmt.Printf("\n%v + %v", a, b)
-	}
-
-	a.Val.Add(&a.Val, &b.Val)
-
-	if Mpdebug {
-		fmt.Printf(" = %v\n\n", a)
-	}
-}
-
-func (a *Mpflt) AddFloat64(c float64) {
-	var b Mpflt
-
-	b.SetFloat64(c)
-	a.Add(&b)
-}
-
-func (a *Mpflt) Sub(b *Mpflt) {
-	if Mpdebug {
-		fmt.Printf("\n%v - %v", a, b)
-	}
-
-	a.Val.Sub(&a.Val, &b.Val)
-
-	if Mpdebug {
-		fmt.Printf(" = %v\n\n", a)
-	}
-}
-
-func (a *Mpflt) Mul(b *Mpflt) {
-	if Mpdebug {
-		fmt.Printf("%v\n * %v\n", a, b)
-	}
-
-	a.Val.Mul(&a.Val, &b.Val)
-
-	if Mpdebug {
-		fmt.Printf(" = %v\n\n", a)
-	}
-}
-
-func (a *Mpflt) MulFloat64(c float64) {
-	var b Mpflt
-
-	b.SetFloat64(c)
-	a.Mul(&b)
-}
-
-func (a *Mpflt) Quo(b *Mpflt) {
-	if Mpdebug {
-		fmt.Printf("%v\n / %v\n", a, b)
-	}
-
-	a.Val.Quo(&a.Val, &b.Val)
-
-	if Mpdebug {
-		fmt.Printf(" = %v\n\n", a)
-	}
-}
-
-func (a *Mpflt) Cmp(b *Mpflt) int {
-	return a.Val.Cmp(&b.Val)
-}
-
-func (a *Mpflt) CmpFloat64(c float64) int {
-	if c == 0 {
-		return a.Val.Sign() // common case shortcut
-	}
-	return a.Val.Cmp(big.NewFloat(c))
-}
-
-func (a *Mpflt) Float64() float64 {
-	x, _ := a.Val.Float64()
-
-	// check for overflow
-	if math.IsInf(x, 0) && nsavederrors+nerrors == 0 {
-		yyerror("ovf in Mpflt Float64")
-	}
-
-	return x + 0 // avoid -0 (should not be needed, but be conservative)
-}
-
-func (a *Mpflt) Float32() float64 {
-	x32, _ := a.Val.Float32()
-	x := float64(x32)
-
-	// check for overflow
-	if math.IsInf(x, 0) && nsavederrors+nerrors == 0 {
-		yyerror("ovf in Mpflt Float32")
-	}
-
-	return x + 0 // avoid -0 (should not be needed, but be conservative)
-}
-
-func (a *Mpflt) SetFloat64(c float64) {
-	if Mpdebug {
-		fmt.Printf("\nconst %g", c)
-	}
-
-	// convert -0 to 0
-	if c == 0 {
-		c = 0
-	}
-	a.Val.SetFloat64(c)
-
-	if Mpdebug {
-		fmt.Printf(" = %v\n", a)
-	}
-}
-
-func (a *Mpflt) Neg() {
-	// avoid -0
-	if a.Val.Sign() != 0 {
-		a.Val.Neg(&a.Val)
-	}
-}
-
-//
-// floating point input
-// required syntax is [+-]d*[.]d*[e[+-]d*] or [+-]0xH*[e[+-]d*]
-//
-func (a *Mpflt) SetString(as string) {
-	for len(as) > 0 && (as[0] == ' ' || as[0] == '\t') {
-		as = as[1:]
-	}
-
-	f, ok := a.Val.SetString(as)
-	if !ok {
-		// At the moment we lose precise error cause;
-		// the old code additionally distinguished between:
-		// - malformed hex constant
-		// - decimal point in hex constant
-		// - constant exponent out of range
-		// - decimal point and binary point in constant
-		// TODO(gri) use different conversion function or check separately
-		yyerror("malformed constant: %s", as)
-		a.Val.SetFloat64(0)
-		return
-	}
-
-	if f.IsInf() {
-		yyerror("constant too large: %s", as)
-		a.Val.SetFloat64(0)
-		return
-	}
-
-	// -0 becomes 0
-	if f.Sign() == 0 && f.Signbit() {
-		a.Val.SetFloat64(0)
-	}
-}
-
-func (f *Mpflt) String() string {
-	return fconv(f, 0)
-}
-
-func fconv(fvp *Mpflt, flag FmtFlag) string {
-	if flag&FmtSharp == 0 {
-		return fvp.Val.Text('b', 0)
-	}
-
-	// use decimal format for error messages
-
-	// determine sign
-	f := &fvp.Val
-	var sign string
-	if f.Sign() < 0 {
-		sign = "-"
-		f = new(big.Float).Abs(f)
-	} else if flag&FmtSign != 0 {
-		sign = "+"
-	}
-
-	// Don't try to convert infinities (will not terminate).
-	if f.IsInf() {
-		return sign + "Inf"
-	}
-
-	// Use exact fmt formatting if in float64 range (common case):
-	// proceed if f doesn't underflow to 0 or overflow to inf.
-	if x, _ := f.Float64(); f.Sign() == 0 == (x == 0) && !math.IsInf(x, 0) {
-		return fmt.Sprintf("%s%.6g", sign, x)
-	}
-
-	// Out of float64 range. Do approximate manual to decimal
-	// conversion to avoid precise but possibly slow Float
-	// formatting.
-	// f = mant * 2**exp
-	var mant big.Float
-	exp := f.MantExp(&mant) // 0.5 <= mant < 1.0
-
-	// approximate float64 mantissa m and decimal exponent d
-	// f ~ m * 10**d
-	m, _ := mant.Float64()                     // 0.5 <= m < 1.0
-	d := float64(exp) * (math.Ln2 / math.Ln10) // log_10(2)
-
-	// adjust m for truncated (integer) decimal exponent e
-	e := int64(d)
-	m *= math.Pow(10, d-float64(e))
-
-	// ensure 1 <= m < 10
-	switch {
-	case m < 1-0.5e-6:
-		// The %.6g format below rounds m to 5 digits after the
-		// decimal point. Make sure that m*10 < 10 even after
-		// rounding up: m*10 + 0.5e-5 < 10 => m < 1 - 0.5e6.
-		m *= 10
-		e--
-	case m >= 10:
-		m /= 10
-		e++
-	}
-
-	return fmt.Sprintf("%s%.6ge%+d", sign, m, e)
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/mpint.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/mpint.go
deleted file mode 100644
index 4813290..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/mpint.go
+++ /dev/null
@@ -1,312 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/mpint.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/mpint.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"fmt"
-	"bootstrap/math/big"
-)
-
-// implements integer arithmetic
-
-// Mpint represents an integer constant.
-type Mpint struct {
-	Val  big.Int
-	Ovf  bool // set if Val overflowed compiler limit (sticky)
-	Rune bool // set if syntax indicates default type rune
-}
-
-func (a *Mpint) SetOverflow() {
-	a.Val.SetUint64(1) // avoid spurious div-zero errors
-	a.Ovf = true
-}
-
-func (a *Mpint) checkOverflow(extra int) bool {
-	// We don't need to be precise here, any reasonable upper limit would do.
-	// For now, use existing limit so we pass all the tests unchanged.
-	if a.Val.BitLen()+extra > Mpprec {
-		a.SetOverflow()
-	}
-	return a.Ovf
-}
-
-func (a *Mpint) Set(b *Mpint) {
-	a.Val.Set(&b.Val)
-}
-
-func (a *Mpint) SetFloat(b *Mpflt) int {
-	// avoid converting huge floating-point numbers to integers
-	// (2*Mpprec is large enough to permit all tests to pass)
-	if b.Val.MantExp(nil) > 2*Mpprec {
-		return -1
-	}
-
-	if _, acc := b.Val.Int(&a.Val); acc == big.Exact {
-		return 0
-	}
-
-	const delta = 16 // a reasonably small number of bits > 0
-	var t big.Float
-	t.SetPrec(Mpprec - delta)
-
-	// try rounding down a little
-	t.SetMode(big.ToZero)
-	t.Set(&b.Val)
-	if _, acc := t.Int(&a.Val); acc == big.Exact {
-		return 0
-	}
-
-	// try rounding up a little
-	t.SetMode(big.AwayFromZero)
-	t.Set(&b.Val)
-	if _, acc := t.Int(&a.Val); acc == big.Exact {
-		return 0
-	}
-
-	return -1
-}
-
-func (a *Mpint) Add(b *Mpint) {
-	if a.Ovf || b.Ovf {
-		if nsavederrors+nerrors == 0 {
-			yyerror("ovf in Mpint Add")
-		}
-		a.SetOverflow()
-		return
-	}
-
-	a.Val.Add(&a.Val, &b.Val)
-
-	if a.checkOverflow(0) {
-		yyerror("constant addition overflow")
-	}
-}
-
-func (a *Mpint) Sub(b *Mpint) {
-	if a.Ovf || b.Ovf {
-		if nsavederrors+nerrors == 0 {
-			yyerror("ovf in Mpint Sub")
-		}
-		a.SetOverflow()
-		return
-	}
-
-	a.Val.Sub(&a.Val, &b.Val)
-
-	if a.checkOverflow(0) {
-		yyerror("constant subtraction overflow")
-	}
-}
-
-func (a *Mpint) Mul(b *Mpint) {
-	if a.Ovf || b.Ovf {
-		if nsavederrors+nerrors == 0 {
-			yyerror("ovf in Mpint Mul")
-		}
-		a.SetOverflow()
-		return
-	}
-
-	a.Val.Mul(&a.Val, &b.Val)
-
-	if a.checkOverflow(0) {
-		yyerror("constant multiplication overflow")
-	}
-}
-
-func (a *Mpint) Quo(b *Mpint) {
-	if a.Ovf || b.Ovf {
-		if nsavederrors+nerrors == 0 {
-			yyerror("ovf in Mpint Quo")
-		}
-		a.SetOverflow()
-		return
-	}
-
-	a.Val.Quo(&a.Val, &b.Val)
-
-	if a.checkOverflow(0) {
-		// can only happen for div-0 which should be checked elsewhere
-		yyerror("constant division overflow")
-	}
-}
-
-func (a *Mpint) Rem(b *Mpint) {
-	if a.Ovf || b.Ovf {
-		if nsavederrors+nerrors == 0 {
-			yyerror("ovf in Mpint Rem")
-		}
-		a.SetOverflow()
-		return
-	}
-
-	a.Val.Rem(&a.Val, &b.Val)
-
-	if a.checkOverflow(0) {
-		// should never happen
-		yyerror("constant modulo overflow")
-	}
-}
-
-func (a *Mpint) Or(b *Mpint) {
-	if a.Ovf || b.Ovf {
-		if nsavederrors+nerrors == 0 {
-			yyerror("ovf in Mpint Or")
-		}
-		a.SetOverflow()
-		return
-	}
-
-	a.Val.Or(&a.Val, &b.Val)
-}
-
-func (a *Mpint) And(b *Mpint) {
-	if a.Ovf || b.Ovf {
-		if nsavederrors+nerrors == 0 {
-			yyerror("ovf in Mpint And")
-		}
-		a.SetOverflow()
-		return
-	}
-
-	a.Val.And(&a.Val, &b.Val)
-}
-
-func (a *Mpint) AndNot(b *Mpint) {
-	if a.Ovf || b.Ovf {
-		if nsavederrors+nerrors == 0 {
-			yyerror("ovf in Mpint AndNot")
-		}
-		a.SetOverflow()
-		return
-	}
-
-	a.Val.AndNot(&a.Val, &b.Val)
-}
-
-func (a *Mpint) Xor(b *Mpint) {
-	if a.Ovf || b.Ovf {
-		if nsavederrors+nerrors == 0 {
-			yyerror("ovf in Mpint Xor")
-		}
-		a.SetOverflow()
-		return
-	}
-
-	a.Val.Xor(&a.Val, &b.Val)
-}
-
-func (a *Mpint) Lsh(b *Mpint) {
-	if a.Ovf || b.Ovf {
-		if nsavederrors+nerrors == 0 {
-			yyerror("ovf in Mpint Lsh")
-		}
-		a.SetOverflow()
-		return
-	}
-
-	s := b.Int64()
-	if s < 0 || s >= Mpprec {
-		msg := "shift count too large"
-		if s < 0 {
-			msg = "invalid negative shift count"
-		}
-		yyerror("%s: %d", msg, s)
-		a.SetInt64(0)
-		return
-	}
-
-	if a.checkOverflow(int(s)) {
-		yyerror("constant shift overflow")
-		return
-	}
-	a.Val.Lsh(&a.Val, uint(s))
-}
-
-func (a *Mpint) Rsh(b *Mpint) {
-	if a.Ovf || b.Ovf {
-		if nsavederrors+nerrors == 0 {
-			yyerror("ovf in Mpint Rsh")
-		}
-		a.SetOverflow()
-		return
-	}
-
-	s := b.Int64()
-	if s < 0 {
-		yyerror("invalid negative shift count: %d", s)
-		if a.Val.Sign() < 0 {
-			a.SetInt64(-1)
-		} else {
-			a.SetInt64(0)
-		}
-		return
-	}
-
-	a.Val.Rsh(&a.Val, uint(s))
-}
-
-func (a *Mpint) Cmp(b *Mpint) int {
-	return a.Val.Cmp(&b.Val)
-}
-
-func (a *Mpint) CmpInt64(c int64) int {
-	if c == 0 {
-		return a.Val.Sign() // common case shortcut
-	}
-	return a.Val.Cmp(big.NewInt(c))
-}
-
-func (a *Mpint) Neg() {
-	a.Val.Neg(&a.Val)
-}
-
-func (a *Mpint) Int64() int64 {
-	if a.Ovf {
-		if nsavederrors+nerrors == 0 {
-			yyerror("constant overflow")
-		}
-		return 0
-	}
-
-	return a.Val.Int64()
-}
-
-func (a *Mpint) SetInt64(c int64) {
-	a.Val.SetInt64(c)
-}
-
-func (a *Mpint) SetString(as string) {
-	_, ok := a.Val.SetString(as, 0)
-	if !ok {
-		// required syntax is [+-][0[x]]d*
-		// At the moment we lose precise error cause;
-		// the old code distinguished between:
-		// - malformed hex constant
-		// - malformed octal constant
-		// - malformed decimal constant
-		// TODO(gri) use different conversion function
-		yyerror("malformed integer constant: %s", as)
-		a.Val.SetUint64(0)
-		return
-	}
-	if a.checkOverflow(0) {
-		yyerror("constant too large: %s", as)
-	}
-}
-
-func (x *Mpint) String() string {
-	return bconv(x, 0)
-}
-
-func bconv(xval *Mpint, flag FmtFlag) string {
-	if flag&FmtSharp != 0 {
-		return fmt.Sprintf("%#x", &xval.Val)
-	}
-	return xval.Val.String()
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/noder.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/noder.go
deleted file mode 100644
index 8b0153f..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/noder.go
+++ /dev/null
@@ -1,1090 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/noder.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/noder.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"fmt"
-	"os"
-	"strconv"
-	"strings"
-	"unicode/utf8"
-
-	"bootstrap/cmd/compile/internal/syntax"
-)
-
-func parseFile(filename string) {
-	src, err := os.Open(filename)
-	if err != nil {
-		fmt.Println(err)
-		errorexit()
-	}
-	defer src.Close()
-
-	p := noder{baseline: lexlineno}
-	file, _ := syntax.Parse(src, p.error, p.pragma, 0) // errors are tracked via p.error
-
-	p.file(file)
-
-	if !imported_unsafe {
-		for _, x := range p.linknames {
-			p.error(syntax.Error{Line: x, Msg: "//go:linkname only allowed in Go files that import \"unsafe\""})
-		}
-	}
-
-	if nsyntaxerrors == 0 {
-		// Always run testdclstack here, even when debug_dclstack is not set, as a sanity measure.
-		testdclstack()
-	}
-}
-
-// noder transforms package syntax's AST into a Nod tree.
-type noder struct {
-	baseline  int32
-	linknames []int // tracks //go:linkname lines
-}
-
-func (p *noder) file(file *syntax.File) {
-	p.lineno(file.PkgName)
-	mkpackage(file.PkgName.Value)
-
-	xtop = append(xtop, p.decls(file.DeclList)...)
-
-	lexlineno = p.baseline + int32(file.Lines) - 1
-	lineno = lexlineno
-}
-
-func (p *noder) decls(decls []syntax.Decl) (l []*Node) {
-	var lastConstGroup *syntax.Group
-	var lastConstRHS []*Node
-	var iotaVal int64
-
-	for _, decl := range decls {
-		p.lineno(decl)
-		switch decl := decl.(type) {
-		case *syntax.ImportDecl:
-			p.importDecl(decl)
-
-		case *syntax.VarDecl:
-			l = append(l, p.varDecl(decl)...)
-
-		case *syntax.ConstDecl:
-			// Tricky to handle golang.org/issue/15550 correctly.
-
-			prevIota := iota_
-
-			if decl.Group == nil || decl.Group != lastConstGroup {
-				iotaVal = 0
-				lastConstRHS = nil
-			}
-
-			iota_ = iotaVal
-			lastconst = lastConstRHS
-
-			l = append(l, p.constDecl(decl)...)
-
-			lastConstRHS = lastconst
-			lastconst = nil
-
-			iota_ = prevIota
-			iotaVal++
-
-			lastConstGroup = decl.Group
-
-		case *syntax.TypeDecl:
-			l = append(l, p.typeDecl(decl))
-
-		case *syntax.FuncDecl:
-			l = append(l, p.funcDecl(decl))
-
-		default:
-			panic("unhandled Decl")
-		}
-	}
-
-	return
-}
-
-func (p *noder) importDecl(imp *syntax.ImportDecl) {
-	val := p.basicLit(imp.Path)
-	importfile(&val, nil)
-	ipkg := importpkg
-	importpkg = nil
-
-	if ipkg == nil {
-		if nerrors == 0 {
-			Fatalf("phase error in import")
-		}
-		return
-	}
-
-	ipkg.Direct = true
-
-	var my *Sym
-	if imp.LocalPkgName != nil {
-		my = p.name(imp.LocalPkgName)
-	} else {
-		my = lookup(ipkg.Name)
-	}
-
-	pack := p.nod(imp, OPACK, nil, nil)
-	pack.Sym = my
-	pack.Name.Pkg = ipkg
-
-	if my.Name == "." {
-		importdot(ipkg, pack)
-		return
-	}
-	if my.Name == "init" {
-		yyerrorl(pack.Lineno, "cannot import package as init - init must be a func")
-		return
-	}
-	if my.Name == "_" {
-		return
-	}
-	if my.Def != nil {
-		lineno = pack.Lineno
-		redeclare(my, "as imported package name")
-	}
-	my.Def = pack
-	my.Lastlineno = pack.Lineno
-	my.Block = 1 // at top level
-}
-
-func (p *noder) varDecl(decl *syntax.VarDecl) []*Node {
-	names := p.declNames(decl.NameList)
-
-	var typ *Node
-	if decl.Type != nil {
-		typ = p.typeExpr(decl.Type)
-	}
-
-	var exprs []*Node
-	if decl.Values != nil {
-		exprs = p.exprList(decl.Values)
-	}
-
-	p.lineno(decl)
-	return variter(names, typ, exprs)
-}
-
-func (p *noder) constDecl(decl *syntax.ConstDecl) []*Node {
-	names := p.declNames(decl.NameList)
-
-	var typ *Node
-	if decl.Type != nil {
-		typ = p.typeExpr(decl.Type)
-	}
-
-	var exprs []*Node
-	if decl.Values != nil {
-		exprs = p.exprList(decl.Values)
-	}
-
-	return constiter(names, typ, exprs)
-}
-
-func (p *noder) typeDecl(decl *syntax.TypeDecl) *Node {
-	name := typedcl0(p.name(decl.Name))
-	name.Name.Param.Pragma = Pragma(decl.Pragma)
-
-	var typ *Node
-	if decl.Type != nil {
-		typ = p.typeExpr(decl.Type)
-	}
-
-	return typedcl1(name, typ, true)
-}
-
-func (p *noder) declNames(names []*syntax.Name) []*Node {
-	var nodes []*Node
-	for _, name := range names {
-		nodes = append(nodes, p.declName(name))
-	}
-	return nodes
-}
-
-func (p *noder) declName(name *syntax.Name) *Node {
-	// TODO(mdempsky): Set lineno?
-	return dclname(p.name(name))
-}
-
-func (p *noder) funcDecl(fun *syntax.FuncDecl) *Node {
-	f := p.funcHeader(fun)
-	if f == nil {
-		return nil
-	}
-
-	var body []*Node
-	if fun.Body != nil {
-		body = p.stmts(fun.Body)
-		if body == nil {
-			body = []*Node{p.nod(fun, OEMPTY, nil, nil)}
-		}
-	}
-
-	pragma := Pragma(fun.Pragma)
-
-	f.Nbody.Set(body)
-	f.Noescape = pragma&Noescape != 0
-	if f.Noescape && len(body) != 0 {
-		yyerror("can only use //go:noescape with external func implementations")
-	}
-	f.Func.Pragma = pragma
-	lineno = p.baseline + int32(fun.EndLine) - 1
-	f.Func.Endlineno = lineno
-
-	funcbody(f)
-
-	return f
-}
-
-func (p *noder) funcHeader(fun *syntax.FuncDecl) *Node {
-	name := p.name(fun.Name)
-	t := p.signature(fun.Recv, fun.Type)
-	f := p.nod(fun, ODCLFUNC, nil, nil)
-
-	if fun.Recv == nil {
-		// FunctionName Signature
-		if name.Name == "init" {
-			name = renameinit()
-			if t.List.Len() > 0 || t.Rlist.Len() > 0 {
-				yyerror("func init must have no arguments and no return values")
-			}
-		}
-
-		if localpkg.Name == "main" && name.Name == "main" {
-			if t.List.Len() > 0 || t.Rlist.Len() > 0 {
-				yyerror("func main must have no arguments and no return values")
-			}
-		}
-
-		f.Func.Nname = newfuncname(name)
-	} else {
-		// Receiver MethodName Signature
-
-		f.Func.Shortname = newfuncname(name)
-		f.Func.Nname = methodname(f.Func.Shortname, t.Left.Right)
-	}
-
-	f.Func.Nname.Name.Defn = f
-	f.Func.Nname.Name.Param.Ntype = t // TODO: check if nname already has an ntype
-
-	declare(f.Func.Nname, PFUNC)
-	funchdr(f)
-	return f
-}
-
-func (p *noder) signature(recv *syntax.Field, typ *syntax.FuncType) *Node {
-	n := p.nod(typ, OTFUNC, nil, nil)
-	if recv != nil {
-		n.Left = p.param(recv, false, false)
-	}
-	n.List.Set(p.params(typ.ParamList, true))
-	n.Rlist.Set(p.params(typ.ResultList, false))
-	return n
-}
-
-func (p *noder) params(params []*syntax.Field, dddOk bool) []*Node {
-	var nodes []*Node
-	for i, param := range params {
-		p.lineno(param)
-		nodes = append(nodes, p.param(param, dddOk, i+1 == len(params)))
-	}
-	return nodes
-}
-
-func (p *noder) param(param *syntax.Field, dddOk, final bool) *Node {
-	var name *Node
-	if param.Name != nil {
-		name = p.newname(param.Name)
-	}
-
-	typ := p.typeExpr(param.Type)
-	n := p.nod(param, ODCLFIELD, name, typ)
-
-	// rewrite ...T parameter
-	if typ.Op == ODDD {
-		if !dddOk {
-			yyerror("cannot use ... in receiver or result parameter list")
-		} else if !final {
-			yyerror("can only use ... with final parameter in list")
-		}
-		typ.Op = OTARRAY
-		typ.Right = typ.Left
-		typ.Left = nil
-		n.Isddd = true
-		if n.Left != nil {
-			n.Left.Isddd = true
-		}
-	}
-
-	return n
-}
-
-func (p *noder) exprList(expr syntax.Expr) []*Node {
-	if list, ok := expr.(*syntax.ListExpr); ok {
-		return p.exprs(list.ElemList)
-	}
-	return []*Node{p.expr(expr)}
-}
-
-func (p *noder) exprs(exprs []syntax.Expr) []*Node {
-	var nodes []*Node
-	for _, expr := range exprs {
-		nodes = append(nodes, p.expr(expr))
-	}
-	return nodes
-}
-
-func (p *noder) expr(expr syntax.Expr) *Node {
-	p.lineno(expr)
-	switch expr := expr.(type) {
-	case nil:
-		return nil
-	case *syntax.Name:
-		return p.mkname(expr)
-	case *syntax.BasicLit:
-		return p.setlineno(expr, nodlit(p.basicLit(expr)))
-
-	case *syntax.CompositeLit:
-		n := p.nod(expr, OCOMPLIT, nil, nil)
-		if expr.Type != nil {
-			n.Right = p.expr(expr.Type)
-		}
-		l := p.exprs(expr.ElemList)
-		for i, e := range l {
-			l[i] = p.wrapname(expr.ElemList[i], e)
-		}
-		n.List.Set(l)
-		lineno = p.baseline + int32(expr.EndLine) - 1
-		return n
-	case *syntax.KeyValueExpr:
-		return p.nod(expr, OKEY, p.expr(expr.Key), p.wrapname(expr.Value, p.expr(expr.Value)))
-	case *syntax.FuncLit:
-		closurehdr(p.typeExpr(expr.Type))
-		body := p.stmts(expr.Body)
-		lineno = p.baseline + int32(expr.EndLine) - 1
-		return p.setlineno(expr, closurebody(body))
-	case *syntax.ParenExpr:
-		return p.nod(expr, OPAREN, p.expr(expr.X), nil)
-	case *syntax.SelectorExpr:
-		// parser.new_dotname
-		obj := p.expr(expr.X)
-		if obj.Op == OPACK {
-			obj.Used = true
-			return oldname(restrictlookup(expr.Sel.Value, obj.Name.Pkg))
-		}
-		return p.setlineno(expr, nodSym(OXDOT, obj, p.name(expr.Sel)))
-	case *syntax.IndexExpr:
-		return p.nod(expr, OINDEX, p.expr(expr.X), p.expr(expr.Index))
-	case *syntax.SliceExpr:
-		op := OSLICE
-		if expr.Full {
-			op = OSLICE3
-		}
-		n := p.nod(expr, op, p.expr(expr.X), nil)
-		var index [3]*Node
-		for i, x := range expr.Index {
-			if x != nil {
-				index[i] = p.expr(x)
-			}
-		}
-		n.SetSliceBounds(index[0], index[1], index[2])
-		return n
-	case *syntax.AssertExpr:
-		if expr.Type == nil {
-			panic("unexpected AssertExpr")
-		}
-		// TODO(mdempsky): parser.pexpr uses p.expr(), but
-		// seems like the type field should be parsed with
-		// ntype? Shrug, doesn't matter here.
-		return p.nod(expr, ODOTTYPE, p.expr(expr.X), p.expr(expr.Type))
-	case *syntax.Operation:
-		x := p.expr(expr.X)
-		if expr.Y == nil {
-			if expr.Op == syntax.And {
-				x = unparen(x) // TODO(mdempsky): Needed?
-				if x.Op == OCOMPLIT {
-					// Special case for &T{...}: turn into (*T){...}.
-					// TODO(mdempsky): Switch back to p.nod after we
-					// get rid of gcCompat.
-					x.Right = nod(OIND, x.Right, nil)
-					x.Right.Implicit = true
-					return x
-				}
-			}
-			return p.nod(expr, p.unOp(expr.Op), x, nil)
-		}
-		return p.nod(expr, p.binOp(expr.Op), x, p.expr(expr.Y))
-	case *syntax.CallExpr:
-		n := p.nod(expr, OCALL, p.expr(expr.Fun), nil)
-		n.List.Set(p.exprs(expr.ArgList))
-		n.Isddd = expr.HasDots
-		return n
-
-	case *syntax.ArrayType:
-		var len *Node
-		if expr.Len != nil {
-			len = p.expr(expr.Len)
-		} else {
-			len = p.nod(expr, ODDD, nil, nil)
-		}
-		return p.nod(expr, OTARRAY, len, p.typeExpr(expr.Elem))
-	case *syntax.SliceType:
-		return p.nod(expr, OTARRAY, nil, p.typeExpr(expr.Elem))
-	case *syntax.DotsType:
-		return p.nod(expr, ODDD, p.typeExpr(expr.Elem), nil)
-	case *syntax.StructType:
-		return p.structType(expr)
-	case *syntax.InterfaceType:
-		return p.interfaceType(expr)
-	case *syntax.FuncType:
-		return p.signature(nil, expr)
-	case *syntax.MapType:
-		return p.nod(expr, OTMAP, p.typeExpr(expr.Key), p.typeExpr(expr.Value))
-	case *syntax.ChanType:
-		n := p.nod(expr, OTCHAN, p.typeExpr(expr.Elem), nil)
-		n.Etype = EType(p.chanDir(expr.Dir))
-		return n
-
-	case *syntax.TypeSwitchGuard:
-		n := p.nod(expr, OTYPESW, nil, p.expr(expr.X))
-		if expr.Lhs != nil {
-			n.Left = p.declName(expr.Lhs)
-			if isblank(n.Left) {
-				yyerror("invalid variable name %v in type switch", n.Left)
-			}
-		}
-		return n
-	}
-	panic("unhandled Expr")
-}
-
-func (p *noder) typeExpr(typ syntax.Expr) *Node {
-	// TODO(mdempsky): Be stricter? typecheck should handle errors anyway.
-	return p.expr(typ)
-}
-
-func (p *noder) chanDir(dir syntax.ChanDir) ChanDir {
-	switch dir {
-	case 0:
-		return Cboth
-	case syntax.SendOnly:
-		return Csend
-	case syntax.RecvOnly:
-		return Crecv
-	}
-	panic("unhandled ChanDir")
-}
-
-func (p *noder) structType(expr *syntax.StructType) *Node {
-	var l []*Node
-	for i, field := range expr.FieldList {
-		p.lineno(field)
-		var n *Node
-		if field.Name == nil {
-			n = p.embedded(field.Type)
-		} else {
-			n = p.nod(field, ODCLFIELD, p.newname(field.Name), p.typeExpr(field.Type))
-		}
-		if i < len(expr.TagList) && expr.TagList[i] != nil {
-			n.SetVal(p.basicLit(expr.TagList[i]))
-		}
-		l = append(l, n)
-	}
-
-	p.lineno(expr)
-	n := p.nod(expr, OTSTRUCT, nil, nil)
-	n.List.Set(l)
-	return n
-}
-
-func (p *noder) interfaceType(expr *syntax.InterfaceType) *Node {
-	var l []*Node
-	for _, method := range expr.MethodList {
-		p.lineno(method)
-		var n *Node
-		if method.Name == nil {
-			n = p.nod(method, ODCLFIELD, nil, oldname(p.packname(method.Type)))
-		} else {
-			mname := p.newname(method.Name)
-			sig := p.typeExpr(method.Type)
-			sig.Left = fakethis()
-			n = p.nod(method, ODCLFIELD, mname, sig)
-			ifacedcl(n)
-		}
-		l = append(l, n)
-	}
-
-	n := p.nod(expr, OTINTER, nil, nil)
-	n.List.Set(l)
-	return n
-}
-
-func (p *noder) packname(expr syntax.Expr) *Sym {
-	switch expr := expr.(type) {
-	case *syntax.Name:
-		name := p.name(expr)
-		if n := oldname(name); n.Name != nil && n.Name.Pack != nil {
-			n.Name.Pack.Used = true
-		}
-		return name
-	case *syntax.SelectorExpr:
-		name := p.name(expr.X.(*syntax.Name))
-		var pkg *Pkg
-		if name.Def == nil || name.Def.Op != OPACK {
-			yyerror("%v is not a package", name)
-			pkg = localpkg
-		} else {
-			name.Def.Used = true
-			pkg = name.Def.Name.Pkg
-		}
-		return restrictlookup(expr.Sel.Value, pkg)
-	}
-	panic(fmt.Sprintf("unexpected packname: %#v", expr))
-}
-
-func (p *noder) embedded(typ syntax.Expr) *Node {
-	op, isStar := typ.(*syntax.Operation)
-	if isStar {
-		if op.Op != syntax.Mul || op.Y != nil {
-			panic("unexpected Operation")
-		}
-		typ = op.X
-	}
-	n := embedded(p.packname(typ), localpkg)
-	if isStar {
-		n.Right = p.nod(op, OIND, n.Right, nil)
-	}
-	return n
-}
-
-func (p *noder) stmts(stmts []syntax.Stmt) []*Node {
-	var nodes []*Node
-	for _, stmt := range stmts {
-		s := p.stmt(stmt)
-		if s == nil {
-		} else if s.Op == OBLOCK && s.Ninit.Len() == 0 {
-			nodes = append(nodes, s.List.Slice()...)
-		} else {
-			nodes = append(nodes, s)
-		}
-	}
-	return nodes
-}
-
-func (p *noder) stmt(stmt syntax.Stmt) *Node {
-	p.lineno(stmt)
-	switch stmt := stmt.(type) {
-	case *syntax.EmptyStmt:
-		return nil
-	case *syntax.LabeledStmt:
-		return p.labeledStmt(stmt)
-	case *syntax.BlockStmt:
-		return p.body(stmt.Body)
-	case *syntax.ExprStmt:
-		return p.wrapname(stmt, p.expr(stmt.X))
-	case *syntax.SendStmt:
-		return p.nod(stmt, OSEND, p.expr(stmt.Chan), p.expr(stmt.Value))
-	case *syntax.DeclStmt:
-		return liststmt(p.decls(stmt.DeclList))
-	case *syntax.AssignStmt:
-		if stmt.Op != 0 && stmt.Op != syntax.Def {
-			n := p.nod(stmt, OASOP, p.expr(stmt.Lhs), p.expr(stmt.Rhs))
-			n.Implicit = stmt.Rhs == syntax.ImplicitOne
-			n.Etype = EType(p.binOp(stmt.Op))
-			return n
-		}
-
-		lhs := p.exprList(stmt.Lhs)
-		rhs := p.exprList(stmt.Rhs)
-
-		n := p.nod(stmt, OAS, nil, nil) // assume common case
-
-		if stmt.Op == syntax.Def {
-			n.Colas = true
-			colasdefn(lhs, n) // modifies lhs, call before using lhs[0] in common case
-		}
-
-		if len(lhs) == 1 && len(rhs) == 1 {
-			// common case
-			n.Left = lhs[0]
-			n.Right = rhs[0]
-		} else {
-			n.Op = OAS2
-			n.List.Set(lhs)
-			n.Rlist.Set(rhs)
-		}
-		return n
-
-	case *syntax.BranchStmt:
-		var op Op
-		switch stmt.Tok {
-		case syntax.Break:
-			op = OBREAK
-		case syntax.Continue:
-			op = OCONTINUE
-		case syntax.Fallthrough:
-			op = OXFALL
-		case syntax.Goto:
-			op = OGOTO
-		default:
-			panic("unhandled BranchStmt")
-		}
-		n := p.nod(stmt, op, nil, nil)
-		if stmt.Label != nil {
-			n.Left = p.newname(stmt.Label)
-		}
-		if op == OGOTO {
-			n.Sym = dclstack // context, for goto restriction
-		}
-		if op == OXFALL {
-			n.Xoffset = int64(block)
-		}
-		return n
-	case *syntax.CallStmt:
-		var op Op
-		switch stmt.Tok {
-		case syntax.Defer:
-			op = ODEFER
-		case syntax.Go:
-			op = OPROC
-		default:
-			panic("unhandled CallStmt")
-		}
-		return p.nod(stmt, op, p.expr(stmt.Call), nil)
-	case *syntax.ReturnStmt:
-		var results []*Node
-		if stmt.Results != nil {
-			results = p.exprList(stmt.Results)
-		}
-		n := p.nod(stmt, ORETURN, nil, nil)
-		n.List.Set(results)
-		if n.List.Len() == 0 && Curfn != nil {
-			for _, ln := range Curfn.Func.Dcl {
-				if ln.Class == PPARAM {
-					continue
-				}
-				if ln.Class != PPARAMOUT {
-					break
-				}
-				if ln.Sym.Def != ln {
-					yyerror("%s is shadowed during return", ln.Sym.Name)
-				}
-			}
-		}
-		return n
-	case *syntax.IfStmt:
-		return p.ifStmt(stmt)
-	case *syntax.ForStmt:
-		return p.forStmt(stmt)
-	case *syntax.SwitchStmt:
-		return p.switchStmt(stmt)
-	case *syntax.SelectStmt:
-		return p.selectStmt(stmt)
-	}
-	panic("unhandled Stmt")
-}
-
-func (p *noder) body(body []syntax.Stmt) *Node {
-	l := p.bodyList(body)
-	if len(l) == 0 {
-		// TODO(mdempsky): Line number?
-		return nod(OEMPTY, nil, nil)
-	}
-	return liststmt(l)
-}
-
-func (p *noder) bodyList(body []syntax.Stmt) []*Node {
-	markdcl()
-	nodes := p.stmts(body)
-	popdcl()
-	return nodes
-}
-
-func (p *noder) ifStmt(stmt *syntax.IfStmt) *Node {
-	markdcl()
-	n := p.nod(stmt, OIF, nil, nil)
-	if stmt.Init != nil {
-		n.Ninit.Set1(p.stmt(stmt.Init))
-	}
-	if stmt.Cond != nil {
-		n.Left = p.expr(stmt.Cond)
-	}
-	n.Nbody.Set(p.bodyList(stmt.Then))
-	if stmt.Else != nil {
-		e := p.stmt(stmt.Else)
-		if e.Op == OBLOCK && e.Ninit.Len() == 0 {
-			n.Rlist.Set(e.List.Slice())
-		} else {
-			n.Rlist.Set1(e)
-		}
-	}
-	popdcl()
-	return n
-}
-
-func (p *noder) forStmt(stmt *syntax.ForStmt) *Node {
-	markdcl()
-	var n *Node
-	if r, ok := stmt.Init.(*syntax.RangeClause); ok {
-		if stmt.Cond != nil || stmt.Post != nil {
-			panic("unexpected RangeClause")
-		}
-
-		n = p.nod(r, ORANGE, nil, p.expr(r.X))
-		if r.Lhs != nil {
-			lhs := p.exprList(r.Lhs)
-			n.List.Set(lhs)
-			if r.Def {
-				n.Colas = true
-				colasdefn(lhs, n)
-			}
-		}
-	} else {
-		n = p.nod(stmt, OFOR, nil, nil)
-		if stmt.Init != nil {
-			n.Ninit.Set1(p.stmt(stmt.Init))
-		}
-		if stmt.Cond != nil {
-			n.Left = p.expr(stmt.Cond)
-		}
-		if stmt.Post != nil {
-			n.Right = p.stmt(stmt.Post)
-		}
-	}
-	n.Nbody.Set(p.bodyList(stmt.Body))
-	popdcl()
-	return n
-}
-
-func (p *noder) switchStmt(stmt *syntax.SwitchStmt) *Node {
-	markdcl()
-	n := p.nod(stmt, OSWITCH, nil, nil)
-	if stmt.Init != nil {
-		n.Ninit.Set1(p.stmt(stmt.Init))
-	}
-	if stmt.Tag != nil {
-		n.Left = p.expr(stmt.Tag)
-	}
-
-	tswitch := n.Left
-	if tswitch != nil && (tswitch.Op != OTYPESW || tswitch.Left == nil) {
-		tswitch = nil
-	}
-
-	n.List.Set(p.caseClauses(stmt.Body, tswitch))
-
-	popdcl()
-	return n
-}
-
-func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *Node) []*Node {
-	var nodes []*Node
-	for _, clause := range clauses {
-		p.lineno(clause)
-		markdcl()
-		n := p.nod(clause, OXCASE, nil, nil)
-		if clause.Cases != nil {
-			n.List.Set(p.exprList(clause.Cases))
-		}
-		if tswitch != nil {
-			nn := newname(tswitch.Left.Sym)
-			declare(nn, dclcontext)
-			n.Rlist.Set1(nn)
-			// keep track of the instances for reporting unused
-			nn.Name.Defn = tswitch
-		}
-		n.Xoffset = int64(block)
-		n.Nbody.Set(p.stmts(clause.Body))
-		popdcl()
-		nodes = append(nodes, n)
-	}
-	return nodes
-}
-
-func (p *noder) selectStmt(stmt *syntax.SelectStmt) *Node {
-	n := p.nod(stmt, OSELECT, nil, nil)
-	n.List.Set(p.commClauses(stmt.Body))
-	return n
-}
-
-func (p *noder) commClauses(clauses []*syntax.CommClause) []*Node {
-	var nodes []*Node
-	for _, clause := range clauses {
-		p.lineno(clause)
-		markdcl()
-		n := p.nod(clause, OXCASE, nil, nil)
-		if clause.Comm != nil {
-			n.List.Set1(p.stmt(clause.Comm))
-		}
-		n.Xoffset = int64(block)
-		n.Nbody.Set(p.stmts(clause.Body))
-		popdcl()
-		nodes = append(nodes, n)
-	}
-	return nodes
-}
-
-func (p *noder) labeledStmt(label *syntax.LabeledStmt) *Node {
-	lhs := p.nod(label, OLABEL, p.newname(label.Label), nil)
-	lhs.Sym = dclstack
-
-	var ls *Node
-	if label.Stmt != nil { // TODO(mdempsky): Should always be present.
-		ls = p.stmt(label.Stmt)
-	}
-
-	lhs.Name.Defn = ls
-	l := []*Node{lhs}
-	if ls != nil {
-		if ls.Op == OBLOCK && ls.Ninit.Len() == 0 {
-			l = append(l, ls.List.Slice()...)
-		} else {
-			l = append(l, ls)
-		}
-	}
-	return liststmt(l)
-}
-
-var unOps = [...]Op{
-	syntax.Recv: ORECV,
-	syntax.Mul:  OIND,
-	syntax.And:  OADDR,
-
-	syntax.Not: ONOT,
-	syntax.Xor: OCOM,
-	syntax.Add: OPLUS,
-	syntax.Sub: OMINUS,
-}
-
-func (p *noder) unOp(op syntax.Operator) Op {
-	if uint64(op) >= uint64(len(unOps)) || unOps[op] == 0 {
-		panic("invalid Operator")
-	}
-	return unOps[op]
-}
-
-var binOps = [...]Op{
-	syntax.OrOr:   OOROR,
-	syntax.AndAnd: OANDAND,
-
-	syntax.Eql: OEQ,
-	syntax.Neq: ONE,
-	syntax.Lss: OLT,
-	syntax.Leq: OLE,
-	syntax.Gtr: OGT,
-	syntax.Geq: OGE,
-
-	syntax.Add: OADD,
-	syntax.Sub: OSUB,
-	syntax.Or:  OOR,
-	syntax.Xor: OXOR,
-
-	syntax.Mul:    OMUL,
-	syntax.Div:    ODIV,
-	syntax.Rem:    OMOD,
-	syntax.And:    OAND,
-	syntax.AndNot: OANDNOT,
-	syntax.Shl:    OLSH,
-	syntax.Shr:    ORSH,
-}
-
-func (p *noder) binOp(op syntax.Operator) Op {
-	if uint64(op) >= uint64(len(binOps)) || binOps[op] == 0 {
-		panic("invalid Operator")
-	}
-	return binOps[op]
-}
-
-func (p *noder) basicLit(lit *syntax.BasicLit) Val {
-	// TODO: Don't try to convert if we had syntax errors (conversions may fail).
-	//       Use dummy values so we can continue to compile. Eventually, use a
-	//       form of "unknown" literals that are ignored during type-checking so
-	//       we can continue type-checking w/o spurious follow-up errors.
-	switch s := lit.Value; lit.Kind {
-	case syntax.IntLit:
-		x := new(Mpint)
-		x.SetString(s)
-		return Val{U: x}
-
-	case syntax.FloatLit:
-		x := newMpflt()
-		x.SetString(s)
-		return Val{U: x}
-
-	case syntax.ImagLit:
-		x := new(Mpcplx)
-		x.Imag.SetString(strings.TrimSuffix(s, "i"))
-		return Val{U: x}
-
-	case syntax.RuneLit:
-		var r rune
-		if u, err := strconv.Unquote(s); err == nil && len(u) > 0 {
-			// Package syntax already reported any errors.
-			// Check for them again though because 0 is a
-			// better fallback value for invalid rune
-			// literals than 0xFFFD.
-			if len(u) == 1 {
-				r = rune(u[0])
-			} else {
-				r, _ = utf8.DecodeRuneInString(u)
-			}
-		}
-		x := new(Mpint)
-		x.SetInt64(int64(r))
-		x.Rune = true
-		return Val{U: x}
-
-	case syntax.StringLit:
-		if len(s) > 0 && s[0] == '`' {
-			// strip carriage returns from raw string
-			s = strings.Replace(s, "\r", "", -1)
-		}
-		// Ignore errors because package syntax already reported them.
-		u, _ := strconv.Unquote(s)
-		return Val{U: u}
-
-	default:
-		panic("unhandled BasicLit kind")
-	}
-}
-
-func (p *noder) name(name *syntax.Name) *Sym {
-	return lookup(name.Value)
-}
-
-func (p *noder) mkname(name *syntax.Name) *Node {
-	// TODO(mdempsky): Set line number?
-	return mkname(p.name(name))
-}
-
-func (p *noder) newname(name *syntax.Name) *Node {
-	// TODO(mdempsky): Set line number?
-	return newname(p.name(name))
-}
-
-func (p *noder) wrapname(n syntax.Node, x *Node) *Node {
-	// These nodes do not carry line numbers.
-	// Introduce a wrapper node to give them the correct line.
-	switch x.Op {
-	case OTYPE, OLITERAL:
-		if x.Sym == nil {
-			break
-		}
-		fallthrough
-	case ONAME, ONONAME, OPACK:
-		x = p.nod(n, OPAREN, x, nil)
-		x.Implicit = true
-	}
-	return x
-}
-
-func (p *noder) nod(orig syntax.Node, op Op, left, right *Node) *Node {
-	return p.setlineno(orig, nod(op, left, right))
-}
-
-func (p *noder) setlineno(src syntax.Node, dst *Node) *Node {
-	l := int32(src.Line())
-	if l == 0 {
-		// TODO(mdempsky): Shouldn't happen. Fix package syntax.
-		return dst
-	}
-	dst.Lineno = p.baseline + l - 1
-	return dst
-}
-
-func (p *noder) lineno(n syntax.Node) {
-	if n == nil {
-		return
-	}
-	l := int32(n.Line())
-	if l == 0 {
-		// TODO(mdempsky): Shouldn't happen. Fix package syntax.
-		return
-	}
-	lineno = p.baseline + l - 1
-}
-
-func (p *noder) error(err error) {
-	line := p.baseline
-	var msg string
-	if err, ok := err.(syntax.Error); ok {
-		line += int32(err.Line) - 1
-		msg = err.Msg
-	} else {
-		msg = err.Error()
-	}
-	yyerrorl(line, "%s", msg)
-}
-
-func (p *noder) pragma(pos, line int, text string) syntax.Pragma {
-	switch {
-	case strings.HasPrefix(text, "line "):
-		// Want to use LastIndexByte below but it's not defined in Go1.4 and bootstrap fails.
-		i := strings.LastIndex(text, ":") // look from right (Windows filenames may contain ':')
-		if i < 0 {
-			break
-		}
-		n, err := strconv.Atoi(text[i+1:])
-		if err != nil {
-			// TODO: make this an error instead? it is almost certainly a bug.
-			break
-		}
-		if n > 1e8 {
-			p.error(syntax.Error{Pos: pos, Line: line, Msg: "line number out of range"})
-			errorexit()
-		}
-		if n <= 0 {
-			break
-		}
-		lexlineno = p.baseline + int32(line)
-		linehistupdate(text[5:i], n)
-
-	case strings.HasPrefix(text, "go:linkname "):
-		// Record line number so we can emit an error later if
-		// the file doesn't import package unsafe.
-		p.linknames = append(p.linknames, line)
-
-		f := strings.Fields(text)
-		if len(f) != 3 {
-			p.error(syntax.Error{Pos: pos, Line: line, Msg: "usage: //go:linkname localname linkname"})
-			break
-		}
-		lookup(f[1]).Linkname = f[2]
-
-	case strings.HasPrefix(text, "go:cgo_"):
-		lineno = p.baseline + int32(line) - 1 // pragcgo may call yyerror
-		pragcgobuf += pragcgo(text)
-		fallthrough // because of //go:cgo_unsafe_args
-	default:
-		verb := text
-		if i := strings.Index(text, " "); i >= 0 {
-			verb = verb[:i]
-		}
-		lineno = p.baseline + int32(line) - 1 // pragmaValue may call yyerror
-		return syntax.Pragma(pragmaValue(verb))
-	}
-
-	return 0
-}
-
-func mkname(sym *Sym) *Node {
-	n := oldname(sym)
-	if n.Name != nil && n.Name.Pack != nil {
-		n.Name.Pack.Used = true
-	}
-	return n
-}
-
-func unparen(x *Node) *Node {
-	for x.Op == OPAREN {
-		x = x.Left
-	}
-	return x
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/obj.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/obj.go
deleted file mode 100644
index ffaa4f2..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/obj.go
+++ /dev/null
@@ -1,431 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/obj.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/obj.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"bootstrap/cmd/internal/bio"
-	"bootstrap/cmd/internal/obj"
-	"crypto/sha256"
-	"fmt"
-	"io"
-	"strconv"
-)
-
-// architecture-independent object file output
-const (
-	ArhdrSize = 60
-)
-
-func formathdr(arhdr []byte, name string, size int64) {
-	copy(arhdr[:], fmt.Sprintf("%-16s%-12d%-6d%-6d%-8o%-10d`\n", name, 0, 0, 0, 0644, size))
-}
-
-// These modes say which kind of object file to generate.
-// The default use of the toolchain is to set both bits,
-// generating a combined compiler+linker object, one that
-// serves to describe the package to both the compiler and the linker.
-// In fact the compiler and linker read nearly disjoint sections of
-// that file, though, so in a distributed build setting it can be more
-// efficient to split the output into two files, supplying the compiler
-// object only to future compilations and the linker object only to
-// future links.
-//
-// By default a combined object is written, but if -linkobj is specified
-// on the command line then the default -o output is a compiler object
-// and the -linkobj output is a linker object.
-const (
-	modeCompilerObj = 1 << iota
-	modeLinkerObj
-)
-
-func dumpobj() {
-	if linkobj == "" {
-		dumpobj1(outfile, modeCompilerObj|modeLinkerObj)
-	} else {
-		dumpobj1(outfile, modeCompilerObj)
-		dumpobj1(linkobj, modeLinkerObj)
-	}
-}
-
-func dumpobj1(outfile string, mode int) {
-	var err error
-	bout, err = bio.Create(outfile)
-	if err != nil {
-		flusherrors()
-		fmt.Printf("can't create %s: %v\n", outfile, err)
-		errorexit()
-	}
-
-	startobj := int64(0)
-	var arhdr [ArhdrSize]byte
-	if writearchive {
-		bout.WriteString("!<arch>\n")
-		arhdr = [ArhdrSize]byte{}
-		bout.Write(arhdr[:])
-		startobj = bout.Offset()
-	}
-
-	printheader := func() {
-		fmt.Fprintf(bout, "go object %s %s %s %s\n", obj.GOOS, obj.GOARCH, obj.Version, obj.Expstring())
-		if buildid != "" {
-			fmt.Fprintf(bout, "build id %q\n", buildid)
-		}
-		if localpkg.Name == "main" {
-			fmt.Fprintf(bout, "main\n")
-		}
-		if safemode {
-			fmt.Fprintf(bout, "safe\n")
-		} else {
-			fmt.Fprintf(bout, "----\n") // room for some other tool to write "safe"
-		}
-		fmt.Fprintf(bout, "\n") // header ends with blank line
-	}
-
-	printheader()
-
-	if mode&modeCompilerObj != 0 {
-		dumpexport()
-	}
-
-	if writearchive {
-		bout.Flush()
-		size := bout.Offset() - startobj
-		if size&1 != 0 {
-			bout.WriteByte(0)
-		}
-		bout.Seek(startobj-ArhdrSize, 0)
-		formathdr(arhdr[:], "__.PKGDEF", size)
-		bout.Write(arhdr[:])
-		bout.Flush()
-		bout.Seek(startobj+size+(size&1), 0)
-	}
-
-	if mode&modeLinkerObj == 0 {
-		bout.Close()
-		return
-	}
-
-	if writearchive {
-		// start object file
-		arhdr = [ArhdrSize]byte{}
-		bout.Write(arhdr[:])
-		startobj = bout.Offset()
-		printheader()
-	}
-
-	if pragcgobuf != "" {
-		if writearchive {
-			// write empty export section; must be before cgo section
-			fmt.Fprintf(bout, "\n$$\n\n$$\n\n")
-		}
-
-		fmt.Fprintf(bout, "\n$$  // cgo\n")
-		fmt.Fprintf(bout, "%s\n$$\n\n", pragcgobuf)
-	}
-
-	fmt.Fprintf(bout, "\n!\n")
-
-	externs := len(externdcl)
-
-	dumpglobls()
-	dumpptabs()
-	dumptypestructs()
-
-	// Dump extra globals.
-	tmp := externdcl
-
-	if externdcl != nil {
-		externdcl = externdcl[externs:]
-	}
-	dumpglobls()
-	externdcl = tmp
-
-	if zerosize > 0 {
-		zero := Pkglookup("zero", mappkg)
-		ggloblsym(zero, int32(zerosize), obj.DUPOK|obj.RODATA)
-	}
-
-	obj.Writeobjdirect(Ctxt, bout.Writer)
-
-	if writearchive {
-		bout.Flush()
-		size := bout.Offset() - startobj
-		if size&1 != 0 {
-			bout.WriteByte(0)
-		}
-		bout.Seek(startobj-ArhdrSize, 0)
-		formathdr(arhdr[:], "_go_.o", size)
-		bout.Write(arhdr[:])
-	}
-
-	bout.Close()
-}
-
-func dumpptabs() {
-	if !Ctxt.Flag_dynlink || localpkg.Name != "main" {
-		return
-	}
-	for _, exportn := range exportlist {
-		s := exportn.Sym
-		n := s.Def
-		if n == nil {
-			continue
-		}
-		if n.Op != ONAME {
-			continue
-		}
-		if !exportname(s.Name) {
-			continue
-		}
-		if s.Pkg.Name != "main" {
-			continue
-		}
-		if n.Type.Etype == TFUNC && n.Class == PFUNC {
-			// function
-			ptabs = append(ptabs, ptabEntry{s: s, t: s.Def.Type})
-		} else {
-			// variable
-			ptabs = append(ptabs, ptabEntry{s: s, t: typPtr(s.Def.Type)})
-		}
-	}
-}
-
-func dumpglobls() {
-	// add globals
-	for _, n := range externdcl {
-		if n.Op != ONAME {
-			continue
-		}
-
-		if n.Type == nil {
-			Fatalf("external %v nil type\n", n)
-		}
-		if n.Class == PFUNC {
-			continue
-		}
-		if n.Sym.Pkg != localpkg {
-			continue
-		}
-		dowidth(n.Type)
-		ggloblnod(n)
-	}
-
-	for _, n := range funcsyms {
-		dsymptr(n.Sym, 0, n.Sym.Def.Func.Shortname.Sym, 0)
-		ggloblsym(n.Sym, int32(Widthptr), obj.DUPOK|obj.RODATA)
-	}
-
-	// Do not reprocess funcsyms on next dumpglobls call.
-	funcsyms = nil
-}
-
-func Linksym(s *Sym) *obj.LSym {
-	if s == nil {
-		return nil
-	}
-	if s.Lsym != nil {
-		return s.Lsym
-	}
-	var name string
-	if isblanksym(s) {
-		name = "_"
-	} else if s.Linkname != "" {
-		name = s.Linkname
-	} else {
-		name = s.Pkg.Prefix + "." + s.Name
-	}
-
-	ls := obj.Linklookup(Ctxt, name, 0)
-	s.Lsym = ls
-	return ls
-}
-
-func duintxx(s *Sym, off int, v uint64, wid int) int {
-	return duintxxLSym(Linksym(s), off, v, wid)
-}
-
-func duintxxLSym(s *obj.LSym, off int, v uint64, wid int) int {
-	// Update symbol data directly instead of generating a
-	// DATA instruction that liblink will have to interpret later.
-	// This reduces compilation time and memory usage.
-	off = int(Rnd(int64(off), int64(wid)))
-
-	return int(obj.Setuintxx(Ctxt, s, int64(off), v, int64(wid)))
-}
-
-func duint8(s *Sym, off int, v uint8) int {
-	return duintxx(s, off, uint64(v), 1)
-}
-
-func duint16(s *Sym, off int, v uint16) int {
-	return duintxx(s, off, uint64(v), 2)
-}
-
-func duint32(s *Sym, off int, v uint32) int {
-	return duintxx(s, off, uint64(v), 4)
-}
-
-func duintptr(s *Sym, off int, v uint64) int {
-	return duintxx(s, off, v, Widthptr)
-}
-
-func dbvec(s *Sym, off int, bv bvec) int {
-	// Runtime reads the bitmaps as byte arrays. Oblige.
-	for j := 0; int32(j) < bv.n; j += 8 {
-		word := bv.b[j/32]
-		off = duint8(s, off, uint8(word>>(uint(j)%32)))
-	}
-	return off
-}
-
-func stringsym(s string) (data *obj.LSym) {
-	var symname string
-	if len(s) > 100 {
-		// Huge strings are hashed to avoid long names in object files.
-		// Indulge in some paranoia by writing the length of s, too,
-		// as protection against length extension attacks.
-		h := sha256.New()
-		io.WriteString(h, s)
-		symname = fmt.Sprintf(".gostring.%d.%x", len(s), h.Sum(nil))
-	} else {
-		// Small strings get named directly by their contents.
-		symname = strconv.Quote(s)
-	}
-
-	const prefix = "go.string."
-	symdataname := prefix + symname
-
-	symdata := obj.Linklookup(Ctxt, symdataname, 0)
-
-	if !symdata.SeenGlobl() {
-		// string data
-		off := dsnameLSym(symdata, 0, s)
-		ggloblLSym(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL)
-	}
-
-	return symdata
-}
-
-var slicebytes_gen int
-
-func slicebytes(nam *Node, s string, len int) {
-	slicebytes_gen++
-	symname := fmt.Sprintf(".gobytes.%d", slicebytes_gen)
-	sym := Pkglookup(symname, localpkg)
-	sym.Def = newname(sym)
-
-	off := dsname(sym, 0, s)
-	ggloblsym(sym, int32(off), obj.NOPTR|obj.LOCAL)
-
-	if nam.Op != ONAME {
-		Fatalf("slicebytes %v", nam)
-	}
-	off = int(nam.Xoffset)
-	off = dsymptr(nam.Sym, off, sym, 0)
-	off = duintxx(nam.Sym, off, uint64(len), Widthint)
-	duintxx(nam.Sym, off, uint64(len), Widthint)
-}
-
-func dsname(s *Sym, off int, t string) int {
-	return dsnameLSym(Linksym(s), off, t)
-}
-
-func dsnameLSym(s *obj.LSym, off int, t string) int {
-	s.WriteString(Ctxt, int64(off), len(t), t)
-	return off + len(t)
-}
-
-func dsymptr(s *Sym, off int, x *Sym, xoff int) int {
-	return dsymptrLSym(Linksym(s), off, Linksym(x), xoff)
-}
-
-func dsymptrLSym(s *obj.LSym, off int, x *obj.LSym, xoff int) int {
-	off = int(Rnd(int64(off), int64(Widthptr)))
-	s.WriteAddr(Ctxt, int64(off), Widthptr, x, int64(xoff))
-	off += Widthptr
-	return off
-}
-
-func dsymptrOffLSym(s *obj.LSym, off int, x *obj.LSym, xoff int) int {
-	s.WriteOff(Ctxt, int64(off), x, int64(xoff))
-	off += 4
-	return off
-}
-
-func dsymptrWeakOffLSym(s *obj.LSym, off int, x *obj.LSym) int {
-	s.WriteWeakOff(Ctxt, int64(off), x, 0)
-	off += 4
-	return off
-}
-
-func gdata(nam *Node, nr *Node, wid int) {
-	if nam.Op != ONAME {
-		Fatalf("gdata nam op %v", nam.Op)
-	}
-	if nam.Sym == nil {
-		Fatalf("gdata nil nam sym")
-	}
-	s := Linksym(nam.Sym)
-
-	switch nr.Op {
-	case OLITERAL:
-		switch u := nr.Val().U.(type) {
-		case bool:
-			i := int64(obj.Bool2int(u))
-			s.WriteInt(Ctxt, nam.Xoffset, wid, i)
-
-		case *Mpint:
-			s.WriteInt(Ctxt, nam.Xoffset, wid, u.Int64())
-
-		case *Mpflt:
-			f := u.Float64()
-			switch nam.Type.Etype {
-			case TFLOAT32:
-				s.WriteFloat32(Ctxt, nam.Xoffset, float32(f))
-			case TFLOAT64:
-				s.WriteFloat64(Ctxt, nam.Xoffset, f)
-			}
-
-		case *Mpcplx:
-			r := u.Real.Float64()
-			i := u.Imag.Float64()
-			switch nam.Type.Etype {
-			case TCOMPLEX64:
-				s.WriteFloat32(Ctxt, nam.Xoffset, float32(r))
-				s.WriteFloat32(Ctxt, nam.Xoffset+4, float32(i))
-			case TCOMPLEX128:
-				s.WriteFloat64(Ctxt, nam.Xoffset, r)
-				s.WriteFloat64(Ctxt, nam.Xoffset+8, i)
-			}
-
-		case string:
-			symdata := stringsym(u)
-			s.WriteAddr(Ctxt, nam.Xoffset, Widthptr, symdata, 0)
-			s.WriteInt(Ctxt, nam.Xoffset+int64(Widthptr), Widthint, int64(len(u)))
-
-		default:
-			Fatalf("gdata unhandled OLITERAL %v", nr)
-		}
-
-	case OADDR:
-		if nr.Left.Op != ONAME {
-			Fatalf("gdata ADDR left op %v", nr.Left.Op)
-		}
-		to := nr.Left
-		s.WriteAddr(Ctxt, nam.Xoffset, wid, Linksym(to.Sym), to.Xoffset)
-
-	case ONAME:
-		if nr.Class != PFUNC {
-			Fatalf("gdata NAME not PFUNC %d", nr.Class)
-		}
-		s.WriteAddr(Ctxt, nam.Xoffset, wid, Linksym(funcsym(nr.Sym)), nr.Xoffset)
-
-	default:
-		Fatalf("gdata unhandled op %v %v\n", nr, nr.Op)
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/opnames.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/opnames.go
deleted file mode 100644
index c187e59..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/opnames.go
+++ /dev/null
@@ -1,172 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/opnames.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/opnames.go:1
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-// auto generated by go tool dist
-var opnames = []string{
-	OXXX:             "XXX",
-	ONAME:            "NAME",
-	ONONAME:          "NONAME",
-	OTYPE:            "TYPE",
-	OPACK:            "PACK",
-	OLITERAL:         "LITERAL",
-	OADD:             "ADD",
-	OSUB:             "SUB",
-	OOR:              "OR",
-	OXOR:             "XOR",
-	OADDSTR:          "ADDSTR",
-	OADDR:            "ADDR",
-	OANDAND:          "ANDAND",
-	OAPPEND:          "APPEND",
-	OARRAYBYTESTR:    "ARRAYBYTESTR",
-	OARRAYBYTESTRTMP: "ARRAYBYTESTRTMP",
-	OARRAYRUNESTR:    "ARRAYRUNESTR",
-	OSTRARRAYBYTE:    "STRARRAYBYTE",
-	OSTRARRAYBYTETMP: "STRARRAYBYTETMP",
-	OSTRARRAYRUNE:    "STRARRAYRUNE",
-	OAS:              "AS",
-	OAS2:             "AS2",
-	OAS2FUNC:         "AS2FUNC",
-	OAS2RECV:         "AS2RECV",
-	OAS2MAPR:         "AS2MAPR",
-	OAS2DOTTYPE:      "AS2DOTTYPE",
-	OASOP:            "ASOP",
-	OASWB:            "ASWB",
-	OCALL:            "CALL",
-	OCALLFUNC:        "CALLFUNC",
-	OCALLMETH:        "CALLMETH",
-	OCALLINTER:       "CALLINTER",
-	OCALLPART:        "CALLPART",
-	OCAP:             "CAP",
-	OCLOSE:           "CLOSE",
-	OCLOSURE:         "CLOSURE",
-	OCMPIFACE:        "CMPIFACE",
-	OCMPSTR:          "CMPSTR",
-	OCOMPLIT:         "COMPLIT",
-	OMAPLIT:          "MAPLIT",
-	OSTRUCTLIT:       "STRUCTLIT",
-	OARRAYLIT:        "ARRAYLIT",
-	OSLICELIT:        "SLICELIT",
-	OPTRLIT:          "PTRLIT",
-	OCONV:            "CONV",
-	OCONVIFACE:       "CONVIFACE",
-	OCONVNOP:         "CONVNOP",
-	OCOPY:            "COPY",
-	ODCL:             "DCL",
-	ODCLFUNC:         "DCLFUNC",
-	ODCLFIELD:        "DCLFIELD",
-	ODCLCONST:        "DCLCONST",
-	ODCLTYPE:         "DCLTYPE",
-	ODELETE:          "DELETE",
-	ODOT:             "DOT",
-	ODOTPTR:          "DOTPTR",
-	ODOTMETH:         "DOTMETH",
-	ODOTINTER:        "DOTINTER",
-	OXDOT:            "XDOT",
-	ODOTTYPE:         "DOTTYPE",
-	ODOTTYPE2:        "DOTTYPE2",
-	OEQ:              "EQ",
-	ONE:              "NE",
-	OLT:              "LT",
-	OLE:              "LE",
-	OGE:              "GE",
-	OGT:              "GT",
-	OIND:             "IND",
-	OINDEX:           "INDEX",
-	OINDEXMAP:        "INDEXMAP",
-	OKEY:             "KEY",
-	OSTRUCTKEY:       "STRUCTKEY",
-	OLEN:             "LEN",
-	OMAKE:            "MAKE",
-	OMAKECHAN:        "MAKECHAN",
-	OMAKEMAP:         "MAKEMAP",
-	OMAKESLICE:       "MAKESLICE",
-	OMUL:             "MUL",
-	ODIV:             "DIV",
-	OMOD:             "MOD",
-	OLSH:             "LSH",
-	ORSH:             "RSH",
-	OAND:             "AND",
-	OANDNOT:          "ANDNOT",
-	ONEW:             "NEW",
-	ONOT:             "NOT",
-	OCOM:             "COM",
-	OPLUS:            "PLUS",
-	OMINUS:           "MINUS",
-	OOROR:            "OROR",
-	OPANIC:           "PANIC",
-	OPRINT:           "PRINT",
-	OPRINTN:          "PRINTN",
-	OPAREN:           "PAREN",
-	OSEND:            "SEND",
-	OSLICE:           "SLICE",
-	OSLICEARR:        "SLICEARR",
-	OSLICESTR:        "SLICESTR",
-	OSLICE3:          "SLICE3",
-	OSLICE3ARR:       "SLICE3ARR",
-	ORECOVER:         "RECOVER",
-	ORECV:            "RECV",
-	ORUNESTR:         "RUNESTR",
-	OSELRECV:         "SELRECV",
-	OSELRECV2:        "SELRECV2",
-	OIOTA:            "IOTA",
-	OREAL:            "REAL",
-	OIMAG:            "IMAG",
-	OCOMPLEX:         "COMPLEX",
-	OBLOCK:           "BLOCK",
-	OBREAK:           "BREAK",
-	OCASE:            "CASE",
-	OXCASE:           "XCASE",
-	OCONTINUE:        "CONTINUE",
-	ODEFER:           "DEFER",
-	OEMPTY:           "EMPTY",
-	OFALL:            "FALL",
-	OXFALL:           "XFALL",
-	OFOR:             "FOR",
-	OGOTO:            "GOTO",
-	OIF:              "IF",
-	OLABEL:           "LABEL",
-	OPROC:            "PROC",
-	ORANGE:           "RANGE",
-	ORETURN:          "RETURN",
-	OSELECT:          "SELECT",
-	OSWITCH:          "SWITCH",
-	OTYPESW:          "TYPESW",
-	OTCHAN:           "TCHAN",
-	OTMAP:            "TMAP",
-	OTSTRUCT:         "TSTRUCT",
-	OTINTER:          "TINTER",
-	OTFUNC:           "TFUNC",
-	OTARRAY:          "TARRAY",
-	ODDD:             "DDD",
-	ODDDARG:          "DDDARG",
-	OINLCALL:         "INLCALL",
-	OEFACE:           "EFACE",
-	OITAB:            "ITAB",
-	OIDATA:           "IDATA",
-	OSPTR:            "SPTR",
-	OCLOSUREVAR:      "CLOSUREVAR",
-	OCFUNC:           "CFUNC",
-	OCHECKNIL:        "CHECKNIL",
-	OVARKILL:         "VARKILL",
-	OVARLIVE:         "VARLIVE",
-	OINDREGSP:        "INDREGSP",
-	OCMP:             "CMP",
-	ODEC:             "DEC",
-	OINC:             "INC",
-	OEXTEND:          "EXTEND",
-	OHMUL:            "HMUL",
-	OLROT:            "LROT",
-	ORROTC:           "RROTC",
-	ORETJMP:          "RETJMP",
-	OPS:              "PS",
-	OPC:              "PC",
-	OSQRT:            "SQRT",
-	OGETG:            "GETG",
-	OEND:             "END",
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/order.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/order.go
deleted file mode 100644
index 6e2dca7..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/order.go
+++ /dev/null
@@ -1,1221 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/order.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/order.go:1
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"fmt"
-)
-
-// Rewrite tree to use separate statements to enforce
-// order of evaluation. Makes walk easier, because it
-// can (after this runs) reorder at will within an expression.
-//
-// Rewrite x op= y into x = x op y.
-//
-// Introduce temporaries as needed by runtime routines.
-// For example, the map runtime routines take the map key
-// by reference, so make sure all map keys are addressable
-// by copying them to temporaries as needed.
-// The same is true for channel operations.
-//
-// Arrange that map index expressions only appear in direct
-// assignments x = m[k] or m[k] = x, never in larger expressions.
-//
-// Arrange that receive expressions only appear in direct assignments
-// x = <-c or as standalone statements <-c, never in larger expressions.
-
-// TODO(rsc): The temporary introduction during multiple assignments
-// should be moved into this file, so that the temporaries can be cleaned
-// and so that conversions implicit in the OAS2FUNC and OAS2RECV
-// nodes can be made explicit and then have their temporaries cleaned.
-
-// TODO(rsc): Goto and multilevel break/continue can jump over
-// inserted VARKILL annotations. Work out a way to handle these.
-// The current implementation is safe, in that it will execute correctly.
-// But it won't reuse temporaries as aggressively as it might, and
-// it can result in unnecessary zeroing of those variables in the function
-// prologue.
-
-// Order holds state during the ordering process.
-type Order struct {
-	out  []*Node // list of generated statements
-	temp []*Node // stack of temporary variables
-}
-
-// Order rewrites fn->nbody to apply the ordering constraints
-// described in the comment at the top of the file.
-func order(fn *Node) {
-	if Debug['W'] > 1 {
-		s := fmt.Sprintf("\nbefore order %v", fn.Func.Nname.Sym)
-		dumplist(s, fn.Nbody)
-	}
-
-	orderblockNodes(&fn.Nbody)
-}
-
-// Ordertemp allocates a new temporary with the given type,
-// pushes it onto the temp stack, and returns it.
-// If clear is true, ordertemp emits code to zero the temporary.
-func ordertemp(t *Type, order *Order, clear bool) *Node {
-	var_ := temp(t)
-	if clear {
-		a := nod(OAS, var_, nil)
-		a = typecheck(a, Etop)
-		order.out = append(order.out, a)
-	}
-
-	order.temp = append(order.temp, var_)
-	return var_
-}
-
-// Ordercopyexpr behaves like ordertemp but also emits
-// code to initialize the temporary to the value n.
-//
-// The clear argument is provided for use when the evaluation
-// of tmp = n turns into a function call that is passed a pointer
-// to the temporary as the output space. If the call blocks before
-// tmp has been written, the garbage collector will still treat the
-// temporary as live, so we must zero it before entering that call.
-// Today, this only happens for channel receive operations.
-// (The other candidate would be map access, but map access
-// returns a pointer to the result data instead of taking a pointer
-// to be filled in.)
-func ordercopyexpr(n *Node, t *Type, order *Order, clear int) *Node {
-	var_ := ordertemp(t, order, clear != 0)
-	a := nod(OAS, var_, n)
-	a = typecheck(a, Etop)
-	order.out = append(order.out, a)
-	return var_
-}
-
-// Ordercheapexpr returns a cheap version of n.
-// The definition of cheap is that n is a variable or constant.
-// If not, ordercheapexpr allocates a new tmp, emits tmp = n,
-// and then returns tmp.
-func ordercheapexpr(n *Node, order *Order) *Node {
-	if n == nil {
-		return nil
-	}
-	switch n.Op {
-	case ONAME, OLITERAL:
-		return n
-	case OLEN, OCAP:
-		l := ordercheapexpr(n.Left, order)
-		if l == n.Left {
-			return n
-		}
-		a := *n
-		a.Orig = &a
-		a.Left = l
-		return typecheck(&a, Erv)
-	}
-
-	return ordercopyexpr(n, n.Type, order, 0)
-}
-
-// Ordersafeexpr returns a safe version of n.
-// The definition of safe is that n can appear multiple times
-// without violating the semantics of the original program,
-// and that assigning to the safe version has the same effect
-// as assigning to the original n.
-//
-// The intended use is to apply to x when rewriting x += y into x = x + y.
-func ordersafeexpr(n *Node, order *Order) *Node {
-	switch n.Op {
-	case ONAME, OLITERAL:
-		return n
-
-	case ODOT, OLEN, OCAP:
-		l := ordersafeexpr(n.Left, order)
-		if l == n.Left {
-			return n
-		}
-		a := *n
-		a.Orig = &a
-		a.Left = l
-		return typecheck(&a, Erv)
-
-	case ODOTPTR, OIND:
-		l := ordercheapexpr(n.Left, order)
-		if l == n.Left {
-			return n
-		}
-		a := *n
-		a.Orig = &a
-		a.Left = l
-		return typecheck(&a, Erv)
-
-	case OINDEX, OINDEXMAP:
-		var l *Node
-		if n.Left.Type.IsArray() {
-			l = ordersafeexpr(n.Left, order)
-		} else {
-			l = ordercheapexpr(n.Left, order)
-		}
-		r := ordercheapexpr(n.Right, order)
-		if l == n.Left && r == n.Right {
-			return n
-		}
-		a := *n
-		a.Orig = &a
-		a.Left = l
-		a.Right = r
-		return typecheck(&a, Erv)
-	default:
-		Fatalf("ordersafeexpr %v", n.Op)
-		return nil // not reached
-	}
-}
-
-// Isaddrokay reports whether it is okay to pass n's address to runtime routines.
-// Taking the address of a variable makes the liveness and optimization analyses
-// lose track of where the variable's lifetime ends. To avoid hurting the analyses
-// of ordinary stack variables, those are not 'isaddrokay'. Temporaries are okay,
-// because we emit explicit VARKILL instructions marking the end of those
-// temporaries' lifetimes.
-func isaddrokay(n *Node) bool {
-	return islvalue(n) && (n.Op != ONAME || n.Class == PEXTERN || n.IsAutoTmp())
-}
-
-// Orderaddrtemp ensures that n is okay to pass by address to runtime routines.
-// If the original argument n is not okay, orderaddrtemp creates a tmp, emits
-// tmp = n, and then returns tmp.
-func orderaddrtemp(n *Node, order *Order) *Node {
-	if isaddrokay(n) {
-		return n
-	}
-	return ordercopyexpr(n, n.Type, order, 0)
-}
-
-type ordermarker int
-
-// Marktemp returns the top of the temporary variable stack.
-func marktemp(order *Order) ordermarker {
-	return ordermarker(len(order.temp))
-}
-
-// Poptemp pops temporaries off the stack until reaching the mark,
-// which must have been returned by marktemp.
-func poptemp(mark ordermarker, order *Order) {
-	order.temp = order.temp[:mark]
-}
-
-// Cleantempnopop emits to *out VARKILL instructions for each temporary
-// above the mark on the temporary stack, but it does not pop them
-// from the stack.
-func cleantempnopop(mark ordermarker, order *Order, out *[]*Node) {
-	var kill *Node
-
-	for i := len(order.temp) - 1; i >= int(mark); i-- {
-		n := order.temp[i]
-		if n.Name.Keepalive {
-			n.Name.Keepalive = false
-			n.Addrtaken = true // ensure SSA keeps the n variable
-			kill = nod(OVARLIVE, n, nil)
-			kill = typecheck(kill, Etop)
-			*out = append(*out, kill)
-		}
-		kill = nod(OVARKILL, n, nil)
-		kill = typecheck(kill, Etop)
-		*out = append(*out, kill)
-	}
-}
-
-// Cleantemp emits VARKILL instructions for each temporary above the
-// mark on the temporary stack and removes them from the stack.
-func cleantemp(top ordermarker, order *Order) {
-	cleantempnopop(top, order, &order.out)
-	poptemp(top, order)
-}
-
-// Orderstmtlist orders each of the statements in the list.
-func orderstmtlist(l Nodes, order *Order) {
-	for _, n := range l.Slice() {
-		orderstmt(n, order)
-	}
-}
-
-// Orderblock orders the block of statements l onto a new list,
-// and returns the ordered list.
-func orderblock(l Nodes) []*Node {
-	var order Order
-	mark := marktemp(&order)
-	orderstmtlist(l, &order)
-	cleantemp(mark, &order)
-	return order.out
-}
-
-// OrderblockNodes orders the block of statements in n into a new slice,
-// and then replaces the old slice in n with the new slice.
-func orderblockNodes(n *Nodes) {
-	var order Order
-	mark := marktemp(&order)
-	orderstmtlist(*n, &order)
-	cleantemp(mark, &order)
-	n.Set(order.out)
-}
-
-// Orderexprinplace orders the side effects in *np and
-// leaves them as the init list of the final *np.
-// The result of orderexprinplace MUST be assigned back to n, e.g.
-// 	n.Left = orderexprinplace(n.Left, outer)
-func orderexprinplace(n *Node, outer *Order) *Node {
-	var order Order
-	n = orderexpr(n, &order, nil)
-	n = addinit(n, order.out)
-
-	// insert new temporaries from order
-	// at head of outer list.
-	outer.temp = append(outer.temp, order.temp...)
-	return n
-}
-
-// Orderstmtinplace orders the side effects of the single statement *np
-// and replaces it with the resulting statement list.
-// The result of orderstmtinplace MUST be assigned back to n, e.g.
-// 	n.Left = orderstmtinplace(n.Left)
-func orderstmtinplace(n *Node) *Node {
-	var order Order
-	mark := marktemp(&order)
-	orderstmt(n, &order)
-	cleantemp(mark, &order)
-	return liststmt(order.out)
-}
-
-// Orderinit moves n's init list to order->out.
-func orderinit(n *Node, order *Order) {
-	orderstmtlist(n.Ninit, order)
-	n.Ninit.Set(nil)
-}
-
-// Ismulticall reports whether the list l is f() for a multi-value function.
-// Such an f() could appear as the lone argument to a multi-arg function.
-func ismulticall(l Nodes) bool {
-	// one arg only
-	if l.Len() != 1 {
-		return false
-	}
-	n := l.First()
-
-	// must be call
-	switch n.Op {
-	default:
-		return false
-
-	case OCALLFUNC, OCALLMETH, OCALLINTER:
-		break
-	}
-
-	// call must return multiple values
-	return n.Left.Type.Results().NumFields() > 1
-}
-
-// Copyret emits t1, t2, ... = n, where n is a function call,
-// and then returns the list t1, t2, ....
-func copyret(n *Node, order *Order) []*Node {
-	if !n.Type.IsFuncArgStruct() {
-		Fatalf("copyret %v %d", n.Type, n.Left.Type.Results().NumFields())
-	}
-
-	var l1 []*Node
-	var l2 []*Node
-	for _, t := range n.Type.Fields().Slice() {
-		tmp := temp(t.Type)
-		l1 = append(l1, tmp)
-		l2 = append(l2, tmp)
-	}
-
-	as := nod(OAS2, nil, nil)
-	as.List.Set(l1)
-	as.Rlist.Set1(n)
-	as = typecheck(as, Etop)
-	orderstmt(as, order)
-
-	return l2
-}
-
-// Ordercallargs orders the list of call arguments *l.
-func ordercallargs(l *Nodes, order *Order) {
-	if ismulticall(*l) {
-		// return f() where f() is multiple values.
-		l.Set(copyret(l.First(), order))
-	} else {
-		orderexprlist(*l, order)
-	}
-}
-
-// Ordercall orders the call expression n.
-// n->op is OCALLMETH/OCALLFUNC/OCALLINTER or a builtin like OCOPY.
-func ordercall(n *Node, order *Order) {
-	n.Left = orderexpr(n.Left, order, nil)
-	n.Right = orderexpr(n.Right, order, nil) // ODDDARG temp
-	ordercallargs(&n.List, order)
-
-	if n.Op == OCALLFUNC {
-		t, it := iterFields(n.Left.Type.Params())
-		for i := range n.List.Slice() {
-			// Check for "unsafe-uintptr" tag provided by escape analysis.
-			// If present and the argument is really a pointer being converted
-			// to uintptr, arrange for the pointer to be kept alive until the call
-			// returns, by copying it into a temp and marking that temp
-			// still alive when we pop the temp stack.
-			if t == nil {
-				break
-			}
-			if t.Note == unsafeUintptrTag || t.Note == uintptrEscapesTag {
-				xp := n.List.Addr(i)
-				for (*xp).Op == OCONVNOP && !(*xp).Type.IsPtr() {
-					xp = &(*xp).Left
-				}
-				x := *xp
-				if x.Type.IsPtr() {
-					x = ordercopyexpr(x, x.Type, order, 0)
-					x.Name.Keepalive = true
-					*xp = x
-				}
-			}
-			next := it.Next()
-			if next == nil && t.Isddd && t.Note == uintptrEscapesTag {
-				next = t
-			}
-			t = next
-		}
-	}
-}
-
-// Ordermapassign appends n to order->out, introducing temporaries
-// to make sure that all map assignments have the form m[k] = x.
-// (Note: orderexpr has already been called on n, so we know k is addressable.)
-//
-// If n is the multiple assignment form ..., m[k], ... = ..., the rewrite is
-//	t1 = m
-//	t2 = k
-//	...., t3, ... = x
-//	t1[t2] = t3
-//
-// The temporaries t1, t2 are needed in case the ... being assigned
-// contain m or k. They are usually unnecessary, but in the unnecessary
-// cases they are also typically registerizable, so not much harm done.
-// And this only applies to the multiple-assignment form.
-// We could do a more precise analysis if needed, like in walk.go.
-//
-// Ordermapassign also inserts these temporaries if needed for
-// calling writebarrierfat with a pointer to n->right.
-func ordermapassign(n *Node, order *Order) {
-	switch n.Op {
-	default:
-		Fatalf("ordermapassign %v", n.Op)
-
-	case OAS:
-		order.out = append(order.out, n)
-
-		// We call writebarrierfat only for values > 4 pointers long. See walk.go.
-		// TODO(mdempsky): writebarrierfat doesn't exist anymore, but removing that
-		// logic causes net/http's tests to become flaky; see CL 21242.
-		if needwritebarrier(n.Left, n.Right) && n.Left.Type.Width > int64(4*Widthptr) && n.Right != nil && !isaddrokay(n.Right) {
-			m := n.Left
-			n.Left = ordertemp(m.Type, order, false)
-			a := nod(OAS, m, n.Left)
-			a = typecheck(a, Etop)
-			order.out = append(order.out, a)
-		}
-
-	case OAS2, OAS2DOTTYPE, OAS2MAPR, OAS2FUNC:
-		var post []*Node
-		var m *Node
-		var a *Node
-		for i1, n1 := range n.List.Slice() {
-			if n1.Op == OINDEXMAP {
-				m = n1
-				if !m.Left.IsAutoTmp() {
-					m.Left = ordercopyexpr(m.Left, m.Left.Type, order, 0)
-				}
-				if !m.Right.IsAutoTmp() {
-					m.Right = ordercopyexpr(m.Right, m.Right.Type, order, 0)
-				}
-				n.List.SetIndex(i1, ordertemp(m.Type, order, false))
-				a = nod(OAS, m, n.List.Index(i1))
-				a = typecheck(a, Etop)
-				post = append(post, a)
-			} else if instrumenting && n.Op == OAS2FUNC && !isblank(n.List.Index(i1)) {
-				m = n.List.Index(i1)
-				t := ordertemp(m.Type, order, false)
-				n.List.SetIndex(i1, t)
-				a = nod(OAS, m, t)
-				a = typecheck(a, Etop)
-				post = append(post, a)
-			}
-		}
-
-		order.out = append(order.out, n)
-		order.out = append(order.out, post...)
-	}
-}
-
-// Orderstmt orders the statement n, appending to order->out.
-// Temporaries created during the statement are cleaned
-// up using VARKILL instructions as possible.
-func orderstmt(n *Node, order *Order) {
-	if n == nil {
-		return
-	}
-
-	lno := setlineno(n)
-
-	orderinit(n, order)
-
-	switch n.Op {
-	default:
-		Fatalf("orderstmt %v", n.Op)
-
-	case OVARKILL, OVARLIVE:
-		order.out = append(order.out, n)
-
-	case OAS:
-		t := marktemp(order)
-		n.Left = orderexpr(n.Left, order, nil)
-		n.Right = orderexpr(n.Right, order, n.Left)
-		ordermapassign(n, order)
-		cleantemp(t, order)
-
-	case OAS2,
-		OCLOSE,
-		OCOPY,
-		OPRINT,
-		OPRINTN,
-		ORECOVER,
-		ORECV:
-		t := marktemp(order)
-		n.Left = orderexpr(n.Left, order, nil)
-		n.Right = orderexpr(n.Right, order, nil)
-		orderexprlist(n.List, order)
-		orderexprlist(n.Rlist, order)
-		switch n.Op {
-		case OAS2, OAS2DOTTYPE:
-			ordermapassign(n, order)
-		default:
-			order.out = append(order.out, n)
-		}
-		cleantemp(t, order)
-
-	case OASOP:
-		// Special: rewrite l op= r into l = l op r.
-		// This simplifies quite a few operations;
-		// most important is that it lets us separate
-		// out map read from map write when l is
-		// a map index expression.
-		t := marktemp(order)
-
-		n.Left = orderexpr(n.Left, order, nil)
-		n.Left = ordersafeexpr(n.Left, order)
-		tmp1 := treecopy(n.Left, 0)
-		if tmp1.Op == OINDEXMAP {
-			tmp1.Etype = 0 // now an rvalue not an lvalue
-		}
-		tmp1 = ordercopyexpr(tmp1, n.Left.Type, order, 0)
-		// TODO(marvin): Fix Node.EType type union.
-		n.Right = nod(Op(n.Etype), tmp1, n.Right)
-		n.Right = typecheck(n.Right, Erv)
-		n.Right = orderexpr(n.Right, order, nil)
-		n.Etype = 0
-		n.Op = OAS
-		ordermapassign(n, order)
-		cleantemp(t, order)
-
-	// Special: make sure key is addressable,
-	// and make sure OINDEXMAP is not copied out.
-	case OAS2MAPR:
-		t := marktemp(order)
-
-		orderexprlist(n.List, order)
-		r := n.Rlist.First()
-		r.Left = orderexpr(r.Left, order, nil)
-		r.Right = orderexpr(r.Right, order, nil)
-
-		// See case OINDEXMAP below.
-		if r.Right.Op == OARRAYBYTESTR {
-			r.Right.Op = OARRAYBYTESTRTMP
-		}
-		r.Right = orderaddrtemp(r.Right, order)
-		ordermapassign(n, order)
-		cleantemp(t, order)
-
-	// Special: avoid copy of func call n->rlist->n.
-	case OAS2FUNC:
-		t := marktemp(order)
-
-		orderexprlist(n.List, order)
-		ordercall(n.Rlist.First(), order)
-		ordermapassign(n, order)
-		cleantemp(t, order)
-
-	// Special: use temporary variables to hold result,
-	// so that assertI2Tetc can take address of temporary.
-	// No temporary for blank assignment.
-	case OAS2DOTTYPE:
-		t := marktemp(order)
-
-		orderexprlist(n.List, order)
-		n.Rlist.First().Left = orderexpr(n.Rlist.First().Left, order, nil) // i in i.(T)
-
-		var tmp1, tmp2 *Node
-		if !isblank(n.List.First()) {
-			typ := n.Rlist.First().Type
-			tmp1 = ordertemp(typ, order, haspointers(typ))
-		}
-		if !isblank(n.List.Second()) && !n.List.Second().Type.IsBoolean() {
-			tmp2 = ordertemp(Types[TBOOL], order, false)
-		}
-
-		order.out = append(order.out, n)
-
-		if tmp1 != nil {
-			r := nod(OAS, n.List.First(), tmp1)
-			r = typecheck(r, Etop)
-			ordermapassign(r, order)
-			n.List.SetIndex(0, tmp1)
-		}
-		if tmp2 != nil {
-			r := okas(n.List.Second(), tmp2)
-			r = typecheck(r, Etop)
-			ordermapassign(r, order)
-			n.List.SetIndex(1, tmp2)
-		}
-
-		cleantemp(t, order)
-
-	// Special: use temporary variables to hold result,
-	// so that chanrecv can take address of temporary.
-	case OAS2RECV:
-		t := marktemp(order)
-
-		orderexprlist(n.List, order)
-		n.Rlist.First().Left = orderexpr(n.Rlist.First().Left, order, nil) // arg to recv
-		ch := n.Rlist.First().Left.Type
-		tmp1 := ordertemp(ch.Elem(), order, haspointers(ch.Elem()))
-		tmp2 := ordertemp(Types[TBOOL], order, false)
-		order.out = append(order.out, n)
-		r := nod(OAS, n.List.First(), tmp1)
-		r = typecheck(r, Etop)
-		ordermapassign(r, order)
-		r = okas(n.List.Second(), tmp2)
-		r = typecheck(r, Etop)
-		ordermapassign(r, order)
-		n.List.Set([]*Node{tmp1, tmp2})
-		cleantemp(t, order)
-
-	// Special: does not save n onto out.
-	case OBLOCK, OEMPTY:
-		orderstmtlist(n.List, order)
-
-	// Special: n->left is not an expression; save as is.
-	case OBREAK,
-		OCONTINUE,
-		ODCL,
-		ODCLCONST,
-		ODCLTYPE,
-		OFALL,
-		OXFALL,
-		OGOTO,
-		OLABEL,
-		ORETJMP:
-		order.out = append(order.out, n)
-
-	// Special: handle call arguments.
-	case OCALLFUNC, OCALLINTER, OCALLMETH:
-		t := marktemp(order)
-
-		ordercall(n, order)
-		order.out = append(order.out, n)
-		cleantemp(t, order)
-
-	// Special: order arguments to inner call but not call itself.
-	case ODEFER, OPROC:
-		t := marktemp(order)
-
-		switch n.Left.Op {
-		// Delete will take the address of the key.
-		// Copy key into new temp and do not clean it
-		// (it persists beyond the statement).
-		case ODELETE:
-			orderexprlist(n.Left.List, order)
-
-			t1 := marktemp(order)
-			np := n.Left.List.Addr(1) // map key
-			*np = ordercopyexpr(*np, (*np).Type, order, 0)
-			poptemp(t1, order)
-
-		default:
-			ordercall(n.Left, order)
-		}
-
-		order.out = append(order.out, n)
-		cleantemp(t, order)
-
-	case ODELETE:
-		t := marktemp(order)
-		n.List.SetIndex(0, orderexpr(n.List.Index(0), order, nil))
-		n.List.SetIndex(1, orderexpr(n.List.Index(1), order, nil))
-		n.List.SetIndex(1, orderaddrtemp(n.List.Index(1), order)) // map key
-		order.out = append(order.out, n)
-		cleantemp(t, order)
-
-	// Clean temporaries from condition evaluation at
-	// beginning of loop body and after for statement.
-	case OFOR:
-		t := marktemp(order)
-
-		n.Left = orderexprinplace(n.Left, order)
-		var l []*Node
-		cleantempnopop(t, order, &l)
-		n.Nbody.Prepend(l...)
-		orderblockNodes(&n.Nbody)
-		n.Right = orderstmtinplace(n.Right)
-		order.out = append(order.out, n)
-		cleantemp(t, order)
-
-	// Clean temporaries from condition at
-	// beginning of both branches.
-	case OIF:
-		t := marktemp(order)
-
-		n.Left = orderexprinplace(n.Left, order)
-		var l []*Node
-		cleantempnopop(t, order, &l)
-		n.Nbody.Prepend(l...)
-		l = nil
-		cleantempnopop(t, order, &l)
-		n.Rlist.Prepend(l...)
-		poptemp(t, order)
-		orderblockNodes(&n.Nbody)
-		n.Rlist.Set(orderblock(n.Rlist))
-		order.out = append(order.out, n)
-
-	// Special: argument will be converted to interface using convT2E
-	// so make sure it is an addressable temporary.
-	case OPANIC:
-		t := marktemp(order)
-
-		n.Left = orderexpr(n.Left, order, nil)
-		if !n.Left.Type.IsInterface() {
-			n.Left = orderaddrtemp(n.Left, order)
-		}
-		order.out = append(order.out, n)
-		cleantemp(t, order)
-
-	case ORANGE:
-		// n.Right is the expression being ranged over.
-		// order it, and then make a copy if we need one.
-		// We almost always do, to ensure that we don't
-		// see any value changes made during the loop.
-		// Usually the copy is cheap (e.g., array pointer,
-		// chan, slice, string are all tiny).
-		// The exception is ranging over an array value
-		// (not a slice, not a pointer to array),
-		// which must make a copy to avoid seeing updates made during
-		// the range body. Ranging over an array value is uncommon though.
-
-		// Mark []byte(str) range expression to reuse string backing storage.
-		// It is safe because the storage cannot be mutated.
-		if n.Right.Op == OSTRARRAYBYTE {
-			n.Right.Op = OSTRARRAYBYTETMP
-		}
-
-		t := marktemp(order)
-		n.Right = orderexpr(n.Right, order, nil)
-		switch n.Type.Etype {
-		default:
-			Fatalf("orderstmt range %v", n.Type)
-
-		case TARRAY, TSLICE:
-			if n.List.Len() < 2 || isblank(n.List.Second()) {
-				// for i := range x will only use x once, to compute len(x).
-				// No need to copy it.
-				break
-			}
-			fallthrough
-
-		case TCHAN, TSTRING:
-			// chan, string, slice, array ranges use value multiple times.
-			// make copy.
-			r := n.Right
-
-			if r.Type.IsString() && r.Type != Types[TSTRING] {
-				r = nod(OCONV, r, nil)
-				r.Type = Types[TSTRING]
-				r = typecheck(r, Erv)
-			}
-
-			n.Right = ordercopyexpr(r, r.Type, order, 0)
-
-		case TMAP:
-			// copy the map value in case it is a map literal.
-			// TODO(rsc): Make tmp = literal expressions reuse tmp.
-			// For maps tmp is just one word so it hardly matters.
-			r := n.Right
-			n.Right = ordercopyexpr(r, r.Type, order, 0)
-
-			// n->alloc is the temp for the iterator.
-			prealloc[n] = ordertemp(Types[TUINT8], order, true)
-		}
-		for i := range n.List.Slice() {
-			n.List.SetIndex(i, orderexprinplace(n.List.Index(i), order))
-		}
-		orderblockNodes(&n.Nbody)
-		order.out = append(order.out, n)
-		cleantemp(t, order)
-
-	case ORETURN:
-		ordercallargs(&n.List, order)
-		order.out = append(order.out, n)
-
-	// Special: clean case temporaries in each block entry.
-	// Select must enter one of its blocks, so there is no
-	// need for a cleaning at the end.
-	// Doubly special: evaluation order for select is stricter
-	// than ordinary expressions. Even something like p.c
-	// has to be hoisted into a temporary, so that it cannot be
-	// reordered after the channel evaluation for a different
-	// case (if p were nil, then the timing of the fault would
-	// give this away).
-	case OSELECT:
-		t := marktemp(order)
-
-		var tmp1 *Node
-		var tmp2 *Node
-		var r *Node
-		for _, n2 := range n.List.Slice() {
-			if n2.Op != OXCASE {
-				Fatalf("order select case %v", n2.Op)
-			}
-			r = n2.Left
-			setlineno(n2)
-
-			// Append any new body prologue to ninit.
-			// The next loop will insert ninit into nbody.
-			if n2.Ninit.Len() != 0 {
-				Fatalf("order select ninit")
-			}
-			if r != nil {
-				switch r.Op {
-				default:
-					yyerror("unknown op in select %v", r.Op)
-					Dump("select case", r)
-
-				// If this is case x := <-ch or case x, y := <-ch, the case has
-				// the ODCL nodes to declare x and y. We want to delay that
-				// declaration (and possible allocation) until inside the case body.
-				// Delete the ODCL nodes here and recreate them inside the body below.
-				case OSELRECV, OSELRECV2:
-					if r.Colas {
-						i := 0
-						if r.Ninit.Len() != 0 && r.Ninit.First().Op == ODCL && r.Ninit.First().Left == r.Left {
-							i++
-						}
-						if i < r.Ninit.Len() && r.Ninit.Index(i).Op == ODCL && r.List.Len() != 0 && r.Ninit.Index(i).Left == r.List.First() {
-							i++
-						}
-						if i >= r.Ninit.Len() {
-							r.Ninit.Set(nil)
-						}
-					}
-
-					if r.Ninit.Len() != 0 {
-						yyerror("ninit on select recv")
-						dumplist("ninit", r.Ninit)
-					}
-
-					// case x = <-c
-					// case x, ok = <-c
-					// r->left is x, r->ntest is ok, r->right is ORECV, r->right->left is c.
-					// r->left == N means 'case <-c'.
-					// c is always evaluated; x and ok are only evaluated when assigned.
-					r.Right.Left = orderexpr(r.Right.Left, order, nil)
-
-					if r.Right.Left.Op != ONAME {
-						r.Right.Left = ordercopyexpr(r.Right.Left, r.Right.Left.Type, order, 0)
-					}
-
-					// Introduce temporary for receive and move actual copy into case body.
-					// avoids problems with target being addressed, as usual.
-					// NOTE: If we wanted to be clever, we could arrange for just one
-					// temporary per distinct type, sharing the temp among all receives
-					// with that temp. Similarly one ok bool could be shared among all
-					// the x,ok receives. Not worth doing until there's a clear need.
-					if r.Left != nil && isblank(r.Left) {
-						r.Left = nil
-					}
-					if r.Left != nil {
-						// use channel element type for temporary to avoid conversions,
-						// such as in case interfacevalue = <-intchan.
-						// the conversion happens in the OAS instead.
-						tmp1 = r.Left
-
-						if r.Colas {
-							tmp2 = nod(ODCL, tmp1, nil)
-							tmp2 = typecheck(tmp2, Etop)
-							n2.Ninit.Append(tmp2)
-						}
-
-						r.Left = ordertemp(r.Right.Left.Type.Elem(), order, haspointers(r.Right.Left.Type.Elem()))
-						tmp2 = nod(OAS, tmp1, r.Left)
-						tmp2 = typecheck(tmp2, Etop)
-						n2.Ninit.Append(tmp2)
-					}
-
-					if r.List.Len() != 0 && isblank(r.List.First()) {
-						r.List.Set(nil)
-					}
-					if r.List.Len() != 0 {
-						tmp1 = r.List.First()
-						if r.Colas {
-							tmp2 = nod(ODCL, tmp1, nil)
-							tmp2 = typecheck(tmp2, Etop)
-							n2.Ninit.Append(tmp2)
-						}
-
-						r.List.Set1(ordertemp(Types[TBOOL], order, false))
-						tmp2 = okas(tmp1, r.List.First())
-						tmp2 = typecheck(tmp2, Etop)
-						n2.Ninit.Append(tmp2)
-					}
-					n2.Ninit.Set(orderblock(n2.Ninit))
-
-				case OSEND:
-					if r.Ninit.Len() != 0 {
-						yyerror("ninit on select send")
-						dumplist("ninit", r.Ninit)
-					}
-
-					// case c <- x
-					// r->left is c, r->right is x, both are always evaluated.
-					r.Left = orderexpr(r.Left, order, nil)
-
-					if !r.Left.IsAutoTmp() {
-						r.Left = ordercopyexpr(r.Left, r.Left.Type, order, 0)
-					}
-					r.Right = orderexpr(r.Right, order, nil)
-					if !r.Right.IsAutoTmp() {
-						r.Right = ordercopyexpr(r.Right, r.Right.Type, order, 0)
-					}
-				}
-			}
-
-			orderblockNodes(&n2.Nbody)
-		}
-		// Now that we have accumulated all the temporaries, clean them.
-		// Also insert any ninit queued during the previous loop.
-		// (The temporary cleaning must follow that ninit work.)
-		for _, n3 := range n.List.Slice() {
-			s := n3.Ninit.Slice()
-			cleantempnopop(t, order, &s)
-			n3.Nbody.Prepend(s...)
-			n3.Ninit.Set(nil)
-		}
-
-		order.out = append(order.out, n)
-		poptemp(t, order)
-
-	// Special: value being sent is passed as a pointer; make it addressable.
-	case OSEND:
-		t := marktemp(order)
-
-		n.Left = orderexpr(n.Left, order, nil)
-		n.Right = orderexpr(n.Right, order, nil)
-		n.Right = orderaddrtemp(n.Right, order)
-		order.out = append(order.out, n)
-		cleantemp(t, order)
-
-	// TODO(rsc): Clean temporaries more aggressively.
-	// Note that because walkswitch will rewrite some of the
-	// switch into a binary search, this is not as easy as it looks.
-	// (If we ran that code here we could invoke orderstmt on
-	// the if-else chain instead.)
-	// For now just clean all the temporaries at the end.
-	// In practice that's fine.
-	case OSWITCH:
-		t := marktemp(order)
-
-		n.Left = orderexpr(n.Left, order, nil)
-		for _, n4 := range n.List.Slice() {
-			if n4.Op != OXCASE {
-				Fatalf("order switch case %v", n4.Op)
-			}
-			orderexprlistinplace(n4.List, order)
-			orderblockNodes(&n4.Nbody)
-		}
-
-		order.out = append(order.out, n)
-		cleantemp(t, order)
-	}
-
-	lineno = lno
-}
-
-// Orderexprlist orders the expression list l into order.
-func orderexprlist(l Nodes, order *Order) {
-	s := l.Slice()
-	for i := range s {
-		s[i] = orderexpr(s[i], order, nil)
-	}
-}
-
-// Orderexprlist orders the expression list l but saves
-// the side effects on the individual expression ninit lists.
-func orderexprlistinplace(l Nodes, order *Order) {
-	s := l.Slice()
-	for i := range s {
-		s[i] = orderexprinplace(s[i], order)
-	}
-}
-
-// prealloc[x] records the allocation to use for x.
-var prealloc = map[*Node]*Node{}
-
-// Orderexpr orders a single expression, appending side
-// effects to order->out as needed.
-// If this is part of an assignment lhs = *np, lhs is given.
-// Otherwise lhs == nil. (When lhs != nil it may be possible
-// to avoid copying the result of the expression to a temporary.)
-// The result of orderexpr MUST be assigned back to n, e.g.
-// 	n.Left = orderexpr(n.Left, order, lhs)
-func orderexpr(n *Node, order *Order, lhs *Node) *Node {
-	if n == nil {
-		return n
-	}
-
-	lno := setlineno(n)
-	orderinit(n, order)
-
-	switch n.Op {
-	default:
-		n.Left = orderexpr(n.Left, order, nil)
-		n.Right = orderexpr(n.Right, order, nil)
-		orderexprlist(n.List, order)
-		orderexprlist(n.Rlist, order)
-
-	// Addition of strings turns into a function call.
-	// Allocate a temporary to hold the strings.
-	// Fewer than 5 strings use direct runtime helpers.
-	case OADDSTR:
-		orderexprlist(n.List, order)
-
-		if n.List.Len() > 5 {
-			t := typArray(Types[TSTRING], int64(n.List.Len()))
-			prealloc[n] = ordertemp(t, order, false)
-		}
-
-		// Mark string(byteSlice) arguments to reuse byteSlice backing
-		// buffer during conversion. String concatenation does not
-		// memorize the strings for later use, so it is safe.
-		// However, we can do it only if there is at least one non-empty string literal.
-		// Otherwise if all other arguments are empty strings,
-		// concatstrings will return the reference to the temp string
-		// to the caller.
-		hasbyte := false
-
-		haslit := false
-		for _, n1 := range n.List.Slice() {
-			hasbyte = hasbyte || n1.Op == OARRAYBYTESTR
-			haslit = haslit || n1.Op == OLITERAL && len(n1.Val().U.(string)) != 0
-		}
-
-		if haslit && hasbyte {
-			for _, n2 := range n.List.Slice() {
-				if n2.Op == OARRAYBYTESTR {
-					n2.Op = OARRAYBYTESTRTMP
-				}
-			}
-		}
-
-	case OCMPSTR:
-		n.Left = orderexpr(n.Left, order, nil)
-		n.Right = orderexpr(n.Right, order, nil)
-
-		// Mark string(byteSlice) arguments to reuse byteSlice backing
-		// buffer during conversion. String comparison does not
-		// memorize the strings for later use, so it is safe.
-		if n.Left.Op == OARRAYBYTESTR {
-			n.Left.Op = OARRAYBYTESTRTMP
-		}
-		if n.Right.Op == OARRAYBYTESTR {
-			n.Right.Op = OARRAYBYTESTRTMP
-		}
-
-		// key must be addressable
-	case OINDEXMAP:
-		n.Left = orderexpr(n.Left, order, nil)
-		n.Right = orderexpr(n.Right, order, nil)
-		needCopy := false
-
-		if n.Etype == 0 && instrumenting {
-			// Race detector needs the copy so it can
-			// call treecopy on the result.
-			needCopy = true
-		}
-
-		// For x = m[string(k)] where k is []byte, the allocation of
-		// backing bytes for the string can be avoided by reusing
-		// the []byte backing array. This is a special case that it
-		// would be nice to handle more generally, but because
-		// there are no []byte-keyed maps, this specific case comes
-		// up in important cases in practice. See issue 3512.
-		// Nothing can change the []byte we are not copying before
-		// the map index, because the map access is going to
-		// be forced to happen immediately following this
-		// conversion (by the ordercopyexpr a few lines below).
-		if n.Etype == 0 && n.Right.Op == OARRAYBYTESTR {
-			n.Right.Op = OARRAYBYTESTRTMP
-			needCopy = true
-		}
-
-		// Map calls need to take the address of the key.
-		n.Right = orderaddrtemp(n.Right, order)
-
-		if needCopy {
-			n = ordercopyexpr(n, n.Type, order, 0)
-		}
-
-	// concrete type (not interface) argument must be addressable
-	// temporary to pass to runtime.
-	case OCONVIFACE:
-		n.Left = orderexpr(n.Left, order, nil)
-
-		if !n.Left.Type.IsInterface() {
-			n.Left = orderaddrtemp(n.Left, order)
-		}
-
-	case OCONVNOP:
-		if n.Type.IsKind(TUNSAFEPTR) && n.Left.Type.IsKind(TUINTPTR) && (n.Left.Op == OCALLFUNC || n.Left.Op == OCALLINTER || n.Left.Op == OCALLMETH) {
-			// When reordering unsafe.Pointer(f()) into a separate
-			// statement, the conversion and function call must stay
-			// together. See golang.org/issue/15329.
-			orderinit(n.Left, order)
-			ordercall(n.Left, order)
-			if lhs == nil || lhs.Op != ONAME || instrumenting {
-				n = ordercopyexpr(n, n.Type, order, 0)
-			}
-		} else {
-			n.Left = orderexpr(n.Left, order, nil)
-		}
-
-	case OANDAND, OOROR:
-		mark := marktemp(order)
-		n.Left = orderexpr(n.Left, order, nil)
-
-		// Clean temporaries from first branch at beginning of second.
-		// Leave them on the stack so that they can be killed in the outer
-		// context in case the short circuit is taken.
-		var s []*Node
-
-		cleantempnopop(mark, order, &s)
-		n.Right.Ninit.Prepend(s...)
-		n.Right = orderexprinplace(n.Right, order)
-
-	case OCALLFUNC,
-		OCALLINTER,
-		OCALLMETH,
-		OCAP,
-		OCOMPLEX,
-		OCOPY,
-		OIMAG,
-		OLEN,
-		OMAKECHAN,
-		OMAKEMAP,
-		OMAKESLICE,
-		ONEW,
-		OREAL,
-		ORECOVER,
-		OSTRARRAYBYTE,
-		OSTRARRAYBYTETMP,
-		OSTRARRAYRUNE:
-		ordercall(n, order)
-		if lhs == nil || lhs.Op != ONAME || instrumenting {
-			n = ordercopyexpr(n, n.Type, order, 0)
-		}
-
-	case OAPPEND:
-		ordercallargs(&n.List, order)
-		if lhs == nil || lhs.Op != ONAME && !samesafeexpr(lhs, n.List.First()) {
-			n = ordercopyexpr(n, n.Type, order, 0)
-		}
-
-	case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR:
-		n.Left = orderexpr(n.Left, order, nil)
-		low, high, max := n.SliceBounds()
-		low = orderexpr(low, order, nil)
-		low = ordercheapexpr(low, order)
-		high = orderexpr(high, order, nil)
-		high = ordercheapexpr(high, order)
-		max = orderexpr(max, order, nil)
-		max = ordercheapexpr(max, order)
-		n.SetSliceBounds(low, high, max)
-		if lhs == nil || lhs.Op != ONAME && !samesafeexpr(lhs, n.Left) {
-			n = ordercopyexpr(n, n.Type, order, 0)
-		}
-
-	case OCLOSURE:
-		if n.Noescape && n.Func.Cvars.Len() > 0 {
-			prealloc[n] = ordertemp(Types[TUINT8], order, false) // walk will fill in correct type
-		}
-
-	case OARRAYLIT, OSLICELIT, OCALLPART:
-		n.Left = orderexpr(n.Left, order, nil)
-		n.Right = orderexpr(n.Right, order, nil)
-		orderexprlist(n.List, order)
-		orderexprlist(n.Rlist, order)
-		if n.Noescape {
-			prealloc[n] = ordertemp(Types[TUINT8], order, false) // walk will fill in correct type
-		}
-
-	case ODDDARG:
-		if n.Noescape {
-			// The ddd argument does not live beyond the call it is created for.
-			// Allocate a temporary that will be cleaned up when this statement
-			// completes. We could be more aggressive and try to arrange for it
-			// to be cleaned up when the call completes.
-			prealloc[n] = ordertemp(n.Type.Elem(), order, false)
-		}
-
-	case ODOTTYPE, ODOTTYPE2:
-		n.Left = orderexpr(n.Left, order, nil)
-		// TODO(rsc): The isfat is for consistency with componentgen and walkexpr.
-		// It needs to be removed in all three places.
-		// That would allow inlining x.(struct{*int}) the same as x.(*int).
-		if !isdirectiface(n.Type) || isfat(n.Type) || instrumenting {
-			n = ordercopyexpr(n, n.Type, order, 1)
-		}
-
-	case ORECV:
-		n.Left = orderexpr(n.Left, order, nil)
-		n = ordercopyexpr(n, n.Type, order, 1)
-
-	case OEQ, ONE:
-		n.Left = orderexpr(n.Left, order, nil)
-		n.Right = orderexpr(n.Right, order, nil)
-		t := n.Left.Type
-		if t.IsStruct() || t.IsArray() {
-			// for complex comparisons, we need both args to be
-			// addressable so we can pass them to the runtime.
-			n.Left = orderaddrtemp(n.Left, order)
-			n.Right = orderaddrtemp(n.Right, order)
-		}
-	}
-
-	lineno = lno
-	return n
-}
-
-// okas creates and returns an assignment of val to ok,
-// including an explicit conversion if necessary.
-func okas(ok, val *Node) *Node {
-	if !isblank(ok) {
-		val = conv(val, ok.Type)
-	}
-	return nod(OAS, ok, val)
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/pgen.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/pgen.go
deleted file mode 100644
index 96e6405..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/pgen.go
+++ /dev/null
@@ -1,454 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/pgen.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/pgen.go:1
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"bootstrap/cmd/compile/internal/ssa"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"fmt"
-	"sort"
-	"strings"
-)
-
-// "Portable" code generation.
-
-var makefuncdatasym_nsym int
-
-func makefuncdatasym(nameprefix string, funcdatakind int64) *Sym {
-	sym := lookupN(nameprefix, makefuncdatasym_nsym)
-	makefuncdatasym_nsym++
-	pnod := newname(sym)
-	pnod.Class = PEXTERN
-	p := Gins(obj.AFUNCDATA, nil, pnod)
-	Addrconst(&p.From, funcdatakind)
-	return sym
-}
-
-// gvardef inserts a VARDEF for n into the instruction stream.
-// VARDEF is an annotation for the liveness analysis, marking a place
-// where a complete initialization (definition) of a variable begins.
-// Since the liveness analysis can see initialization of single-word
-// variables quite easy, gvardef is usually only called for multi-word
-// or 'fat' variables, those satisfying isfat(n->type).
-// However, gvardef is also called when a non-fat variable is initialized
-// via a block move; the only time this happens is when you have
-//	return f()
-// for a function with multiple return values exactly matching the return
-// types of the current function.
-//
-// A 'VARDEF x' annotation in the instruction stream tells the liveness
-// analysis to behave as though the variable x is being initialized at that
-// point in the instruction stream. The VARDEF must appear before the
-// actual (multi-instruction) initialization, and it must also appear after
-// any uses of the previous value, if any. For example, if compiling:
-//
-//	x = x[1:]
-//
-// it is important to generate code like:
-//
-//	base, len, cap = pieces of x[1:]
-//	VARDEF x
-//	x = {base, len, cap}
-//
-// If instead the generated code looked like:
-//
-//	VARDEF x
-//	base, len, cap = pieces of x[1:]
-//	x = {base, len, cap}
-//
-// then the liveness analysis would decide the previous value of x was
-// unnecessary even though it is about to be used by the x[1:] computation.
-// Similarly, if the generated code looked like:
-//
-//	base, len, cap = pieces of x[1:]
-//	x = {base, len, cap}
-//	VARDEF x
-//
-// then the liveness analysis will not preserve the new value of x, because
-// the VARDEF appears to have "overwritten" it.
-//
-// VARDEF is a bit of a kludge to work around the fact that the instruction
-// stream is working on single-word values but the liveness analysis
-// wants to work on individual variables, which might be multi-word
-// aggregates. It might make sense at some point to look into letting
-// the liveness analysis work on single-word values as well, although
-// there are complications around interface values, slices, and strings,
-// all of which cannot be treated as individual words.
-//
-// VARKILL is the opposite of VARDEF: it marks a value as no longer needed,
-// even if its address has been taken. That is, a VARKILL annotation asserts
-// that its argument is certainly dead, for use when the liveness analysis
-// would not otherwise be able to deduce that fact.
-
-func gvardefx(n *Node, as obj.As) {
-	if n == nil {
-		Fatalf("gvardef nil")
-	}
-	if n.Op != ONAME {
-		yyerror("gvardef %#v; %v", n.Op, n)
-		return
-	}
-
-	switch n.Class {
-	case PAUTO, PPARAM, PPARAMOUT:
-		if !n.Used {
-			Prog(obj.ANOP)
-			return
-		}
-
-		if as == obj.AVARLIVE {
-			Gins(as, n, nil)
-		} else {
-			Gins(as, nil, n)
-		}
-	}
-}
-
-func Gvardef(n *Node) {
-	gvardefx(n, obj.AVARDEF)
-}
-
-func Gvarkill(n *Node) {
-	gvardefx(n, obj.AVARKILL)
-}
-
-func Gvarlive(n *Node) {
-	gvardefx(n, obj.AVARLIVE)
-}
-
-func removevardef(firstp *obj.Prog) {
-	for p := firstp; p != nil; p = p.Link {
-		for p.Link != nil && (p.Link.As == obj.AVARDEF || p.Link.As == obj.AVARKILL || p.Link.As == obj.AVARLIVE) {
-			p.Link = p.Link.Link
-		}
-		if p.To.Type == obj.TYPE_BRANCH {
-			for p.To.Val.(*obj.Prog) != nil && (p.To.Val.(*obj.Prog).As == obj.AVARDEF || p.To.Val.(*obj.Prog).As == obj.AVARKILL || p.To.Val.(*obj.Prog).As == obj.AVARLIVE) {
-				p.To.Val = p.To.Val.(*obj.Prog).Link
-			}
-		}
-	}
-}
-
-func emitptrargsmap() {
-	if Curfn.Func.Nname.Sym.Name == "_" {
-		return
-	}
-	sym := lookup(fmt.Sprintf("%s.args_stackmap", Curfn.Func.Nname.Sym.Name))
-
-	nptr := int(Curfn.Type.ArgWidth() / int64(Widthptr))
-	bv := bvalloc(int32(nptr) * 2)
-	nbitmap := 1
-	if Curfn.Type.Results().NumFields() > 0 {
-		nbitmap = 2
-	}
-	off := duint32(sym, 0, uint32(nbitmap))
-	off = duint32(sym, off, uint32(bv.n))
-	var xoffset int64
-	if Curfn.IsMethod() {
-		xoffset = 0
-		onebitwalktype1(Curfn.Type.Recvs(), &xoffset, bv)
-	}
-
-	if Curfn.Type.Params().NumFields() > 0 {
-		xoffset = 0
-		onebitwalktype1(Curfn.Type.Params(), &xoffset, bv)
-	}
-
-	off = dbvec(sym, off, bv)
-	if Curfn.Type.Results().NumFields() > 0 {
-		xoffset = 0
-		onebitwalktype1(Curfn.Type.Results(), &xoffset, bv)
-		off = dbvec(sym, off, bv)
-	}
-
-	ggloblsym(sym, int32(off), obj.RODATA|obj.LOCAL)
-}
-
-// cmpstackvarlt reports whether the stack variable a sorts before b.
-//
-// Sort the list of stack variables. Autos after anything else,
-// within autos, unused after used, within used, things with
-// pointers first, zeroed things first, and then decreasing size.
-// Because autos are laid out in decreasing addresses
-// on the stack, pointers first, zeroed things first and decreasing size
-// really means, in memory, things with pointers needing zeroing at
-// the top of the stack and increasing in size.
-// Non-autos sort on offset.
-func cmpstackvarlt(a, b *Node) bool {
-	if (a.Class == PAUTO) != (b.Class == PAUTO) {
-		return b.Class == PAUTO
-	}
-
-	if a.Class != PAUTO {
-		return a.Xoffset < b.Xoffset
-	}
-
-	if a.Used != b.Used {
-		return a.Used
-	}
-
-	ap := haspointers(a.Type)
-	bp := haspointers(b.Type)
-	if ap != bp {
-		return ap
-	}
-
-	ap = a.Name.Needzero
-	bp = b.Name.Needzero
-	if ap != bp {
-		return ap
-	}
-
-	if a.Type.Width != b.Type.Width {
-		return a.Type.Width > b.Type.Width
-	}
-
-	return a.Sym.Name < b.Sym.Name
-}
-
-// byStackvar implements sort.Interface for []*Node using cmpstackvarlt.
-type byStackVar []*Node
-
-func (s byStackVar) Len() int           { return len(s) }
-func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) }
-func (s byStackVar) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
-
-var scratchFpMem *Node
-
-func (s *ssaExport) AllocFrame(f *ssa.Func) {
-	Stksize = 0
-	stkptrsize = 0
-
-	// Mark the PAUTO's unused.
-	for _, ln := range Curfn.Func.Dcl {
-		if ln.Class == PAUTO {
-			ln.Used = false
-		}
-	}
-
-	for _, l := range f.RegAlloc {
-		if ls, ok := l.(ssa.LocalSlot); ok {
-			ls.N.(*Node).Used = true
-		}
-
-	}
-
-	scratchUsed := false
-	for _, b := range f.Blocks {
-		for _, v := range b.Values {
-			switch a := v.Aux.(type) {
-			case *ssa.ArgSymbol:
-				a.Node.(*Node).Used = true
-			case *ssa.AutoSymbol:
-				a.Node.(*Node).Used = true
-			}
-
-			if !scratchUsed {
-				scratchUsed = v.Op.UsesScratch()
-			}
-		}
-	}
-
-	if f.Config.NeedsFpScratch {
-		scratchFpMem = temp(Types[TUINT64])
-		scratchFpMem.Used = scratchUsed
-	}
-
-	sort.Sort(byStackVar(Curfn.Func.Dcl))
-
-	// Reassign stack offsets of the locals that are used.
-	for i, n := range Curfn.Func.Dcl {
-		if n.Op != ONAME || n.Class != PAUTO {
-			continue
-		}
-		if !n.Used {
-			Curfn.Func.Dcl = Curfn.Func.Dcl[:i]
-			break
-		}
-
-		dowidth(n.Type)
-		w := n.Type.Width
-		if w >= Thearch.MAXWIDTH || w < 0 {
-			Fatalf("bad width")
-		}
-		Stksize += w
-		Stksize = Rnd(Stksize, int64(n.Type.Align))
-		if haspointers(n.Type) {
-			stkptrsize = Stksize
-		}
-		if Thearch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) {
-			Stksize = Rnd(Stksize, int64(Widthptr))
-		}
-		if Stksize >= 1<<31 {
-			setlineno(Curfn)
-			yyerror("stack frame too large (>2GB)")
-		}
-
-		n.Xoffset = -Stksize
-	}
-
-	Stksize = Rnd(Stksize, int64(Widthreg))
-	stkptrsize = Rnd(stkptrsize, int64(Widthreg))
-}
-
-func compile(fn *Node) {
-	if Newproc == nil {
-		Newproc = Sysfunc("newproc")
-		Deferproc = Sysfunc("deferproc")
-		Deferreturn = Sysfunc("deferreturn")
-		panicindex = Sysfunc("panicindex")
-		panicslice = Sysfunc("panicslice")
-		panicdivide = Sysfunc("panicdivide")
-		growslice = Sysfunc("growslice")
-		panicdottype = Sysfunc("panicdottype")
-		panicnildottype = Sysfunc("panicnildottype")
-		assertE2I = Sysfunc("assertE2I")
-		assertE2I2 = Sysfunc("assertE2I2")
-		assertI2I = Sysfunc("assertI2I")
-		assertI2I2 = Sysfunc("assertI2I2")
-	}
-
-	defer func(lno int32) {
-		lineno = lno
-	}(setlineno(fn))
-
-	Curfn = fn
-	dowidth(Curfn.Type)
-
-	if fn.Nbody.Len() == 0 {
-		if pure_go || strings.HasPrefix(fn.Func.Nname.Sym.Name, "init.") {
-			yyerror("missing function body for %q", fn.Func.Nname.Sym.Name)
-			return
-		}
-
-		emitptrargsmap()
-		return
-	}
-
-	saveerrors()
-
-	if Curfn.Type.FuncType().Outnamed {
-		// add clearing of the output parameters
-		for _, t := range Curfn.Type.Results().Fields().Slice() {
-			if t.Nname != nil {
-				n := nod(OAS, t.Nname, nil)
-				n = typecheck(n, Etop)
-				Curfn.Nbody.Prepend(n)
-			}
-		}
-	}
-
-	order(Curfn)
-	if nerrors != 0 {
-		return
-	}
-
-	hasdefer = false
-	walk(Curfn)
-	if nerrors != 0 {
-		return
-	}
-	if instrumenting {
-		instrument(Curfn)
-	}
-	if nerrors != 0 {
-		return
-	}
-
-	// Build an SSA backend function.
-	ssafn := buildssa(Curfn)
-	if nerrors != 0 {
-		return
-	}
-
-	newplist()
-
-	setlineno(Curfn)
-
-	nam := Curfn.Func.Nname
-	if isblank(nam) {
-		nam = nil
-	}
-	ptxt := Gins(obj.ATEXT, nam, nil)
-	ptxt.From3 = new(obj.Addr)
-	if fn.Func.Dupok {
-		ptxt.From3.Offset |= obj.DUPOK
-	}
-	if fn.Func.Wrapper {
-		ptxt.From3.Offset |= obj.WRAPPER
-	}
-	if fn.Func.NoFramePointer {
-		ptxt.From3.Offset |= obj.NOFRAME
-	}
-	if fn.Func.Needctxt {
-		ptxt.From3.Offset |= obj.NEEDCTXT
-	}
-	if fn.Func.Pragma&Nosplit != 0 {
-		ptxt.From3.Offset |= obj.NOSPLIT
-	}
-	if fn.Func.ReflectMethod {
-		ptxt.From3.Offset |= obj.REFLECTMETHOD
-	}
-	if fn.Func.Pragma&Systemstack != 0 {
-		ptxt.From.Sym.Set(obj.AttrCFunc, true)
-	}
-
-	// Clumsy but important.
-	// See test/recover.go for test cases and src/reflect/value.go
-	// for the actual functions being considered.
-	if myimportpath == "reflect" {
-		if Curfn.Func.Nname.Sym.Name == "callReflect" || Curfn.Func.Nname.Sym.Name == "callMethod" {
-			ptxt.From3.Offset |= obj.WRAPPER
-		}
-	}
-
-	gcargs := makefuncdatasym("gcargs·", obj.FUNCDATA_ArgsPointerMaps)
-	gclocals := makefuncdatasym("gclocals·", obj.FUNCDATA_LocalsPointerMaps)
-
-	if obj.Fieldtrack_enabled != 0 && len(Curfn.Func.FieldTrack) > 0 {
-		trackSyms := make([]*Sym, 0, len(Curfn.Func.FieldTrack))
-		for sym := range Curfn.Func.FieldTrack {
-			trackSyms = append(trackSyms, sym)
-		}
-		sort.Sort(symByName(trackSyms))
-		for _, sym := range trackSyms {
-			gtrack(sym)
-		}
-	}
-
-	for _, n := range fn.Func.Dcl {
-		if n.Op != ONAME { // might be OTYPE or OLITERAL
-			continue
-		}
-		switch n.Class {
-		case PAUTO:
-			if !n.Used {
-				continue
-			}
-			fallthrough
-		case PPARAM, PPARAMOUT:
-			// The symbol is excluded later from debugging info if its name begins ".autotmp_", but the type is still necessary.
-			// See bugs #17644 and #17830 and cmd/internal/dwarf/dwarf.go
-			p := Gins(obj.ATYPE, n, nil)
-			p.From.Sym = obj.Linklookup(Ctxt, n.Sym.Name, 0)
-			p.To.Type = obj.TYPE_MEM
-			p.To.Name = obj.NAME_EXTERN
-			p.To.Sym = Linksym(ngotype(n))
-		}
-	}
-
-	genssa(ssafn, ptxt, gcargs, gclocals)
-	ssafn.Free()
-}
-
-type symByName []*Sym
-
-func (a symByName) Len() int           { return len(a) }
-func (a symByName) Less(i, j int) bool { return a[i].Name < a[j].Name }
-func (a symByName) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/pgen_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/pgen_test.go
deleted file mode 100644
index ef80bca..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/pgen_test.go
+++ /dev/null
@@ -1,184 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/pgen_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/pgen_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"reflect"
-	"sort"
-	"testing"
-)
-
-func typeWithoutPointers() *Type {
-	return &Type{Etype: TSTRUCT, Extra: &StructType{Haspointers: 1}} // haspointers -> false
-}
-
-func typeWithPointers() *Type {
-	return &Type{Etype: TSTRUCT, Extra: &StructType{Haspointers: 2}} // haspointers -> true
-}
-
-// Test all code paths for cmpstackvarlt.
-func TestCmpstackvar(t *testing.T) {
-	testdata := []struct {
-		a, b Node
-		lt   bool
-	}{
-		{
-			Node{Class: PAUTO},
-			Node{Class: PFUNC},
-			false,
-		},
-		{
-			Node{Class: PFUNC},
-			Node{Class: PAUTO},
-			true,
-		},
-		{
-			Node{Class: PFUNC, Xoffset: 0},
-			Node{Class: PFUNC, Xoffset: 10},
-			true,
-		},
-		{
-			Node{Class: PFUNC, Xoffset: 20},
-			Node{Class: PFUNC, Xoffset: 10},
-			false,
-		},
-		{
-			Node{Class: PFUNC, Xoffset: 10},
-			Node{Class: PFUNC, Xoffset: 10},
-			false,
-		},
-		{
-			Node{Class: PPARAM, Xoffset: 10},
-			Node{Class: PPARAMOUT, Xoffset: 20},
-			true,
-		},
-		{
-			Node{Class: PPARAMOUT, Xoffset: 10},
-			Node{Class: PPARAM, Xoffset: 20},
-			true,
-		},
-		{
-			Node{Class: PAUTO, Used: true},
-			Node{Class: PAUTO, Used: false},
-			true,
-		},
-		{
-			Node{Class: PAUTO, Used: false},
-			Node{Class: PAUTO, Used: true},
-			false,
-		},
-		{
-			Node{Class: PAUTO, Type: typeWithoutPointers()},
-			Node{Class: PAUTO, Type: typeWithPointers()},
-			false,
-		},
-		{
-			Node{Class: PAUTO, Type: typeWithPointers()},
-			Node{Class: PAUTO, Type: typeWithoutPointers()},
-			true,
-		},
-		{
-			Node{Class: PAUTO, Type: &Type{}, Name: &Name{Needzero: true}},
-			Node{Class: PAUTO, Type: &Type{}, Name: &Name{Needzero: false}},
-			true,
-		},
-		{
-			Node{Class: PAUTO, Type: &Type{}, Name: &Name{Needzero: false}},
-			Node{Class: PAUTO, Type: &Type{}, Name: &Name{Needzero: true}},
-			false,
-		},
-		{
-			Node{Class: PAUTO, Type: &Type{Width: 1}, Name: &Name{}},
-			Node{Class: PAUTO, Type: &Type{Width: 2}, Name: &Name{}},
-			false,
-		},
-		{
-			Node{Class: PAUTO, Type: &Type{Width: 2}, Name: &Name{}},
-			Node{Class: PAUTO, Type: &Type{Width: 1}, Name: &Name{}},
-			true,
-		},
-		{
-			Node{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "abc"}},
-			Node{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "xyz"}},
-			true,
-		},
-		{
-			Node{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "abc"}},
-			Node{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "abc"}},
-			false,
-		},
-		{
-			Node{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "xyz"}},
-			Node{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "abc"}},
-			false,
-		},
-	}
-	for _, d := range testdata {
-		got := cmpstackvarlt(&d.a, &d.b)
-		if got != d.lt {
-			t.Errorf("want %#v < %#v", d.a, d.b)
-		}
-		// If we expect a < b to be true, check that b < a is false.
-		if d.lt && cmpstackvarlt(&d.b, &d.a) {
-			t.Errorf("unexpected %#v < %#v", d.b, d.a)
-		}
-	}
-}
-
-func TestStackvarSort(t *testing.T) {
-	inp := []*Node{
-		{Class: PFUNC, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
-		{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
-		{Class: PFUNC, Xoffset: 0, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
-		{Class: PFUNC, Xoffset: 10, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
-		{Class: PFUNC, Xoffset: 20, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
-		{Class: PAUTO, Used: true, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
-		{Class: PAUTO, Type: typeWithoutPointers(), Name: &Name{}, Sym: &Sym{}},
-		{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
-		{Class: PAUTO, Type: &Type{}, Name: &Name{Needzero: true}, Sym: &Sym{}},
-		{Class: PAUTO, Type: &Type{Width: 1}, Name: &Name{}, Sym: &Sym{}},
-		{Class: PAUTO, Type: &Type{Width: 2}, Name: &Name{}, Sym: &Sym{}},
-		{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "abc"}},
-		{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "xyz"}},
-	}
-	want := []*Node{
-		{Class: PFUNC, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
-		{Class: PFUNC, Xoffset: 0, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
-		{Class: PFUNC, Xoffset: 10, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
-		{Class: PFUNC, Xoffset: 20, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
-		{Class: PAUTO, Used: true, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
-		{Class: PAUTO, Type: &Type{}, Name: &Name{Needzero: true}, Sym: &Sym{}},
-		{Class: PAUTO, Type: &Type{Width: 2}, Name: &Name{}, Sym: &Sym{}},
-		{Class: PAUTO, Type: &Type{Width: 1}, Name: &Name{}, Sym: &Sym{}},
-		{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
-		{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
-		{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "abc"}},
-		{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "xyz"}},
-		{Class: PAUTO, Type: typeWithoutPointers(), Name: &Name{}, Sym: &Sym{}},
-	}
-	// haspointers updates Type.Haspointers as a side effect, so
-	// exercise this function on all inputs so that reflect.DeepEqual
-	// doesn't produce false positives.
-	for i := range want {
-		haspointers(want[i].Type)
-		haspointers(inp[i].Type)
-	}
-
-	sort.Sort(byStackVar(inp))
-	if !reflect.DeepEqual(want, inp) {
-		t.Error("sort failed")
-		for i := range inp {
-			g := inp[i]
-			w := want[i]
-			eq := reflect.DeepEqual(w, g)
-			if !eq {
-				t.Log(i, w, g)
-			}
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/phi.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/phi.go
deleted file mode 100644
index d754bc3..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/phi.go
+++ /dev/null
@@ -1,524 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/phi.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/phi.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"bootstrap/cmd/compile/internal/ssa"
-	"container/heap"
-	"fmt"
-)
-
-// This file contains the algorithm to place phi nodes in a function.
-// For small functions, we use Braun, Buchwald, Hack, Leißa, Mallon, and Zwinkau.
-// http://pp.info.uni-karlsruhe.de/uploads/publikationen/braun13cc.pdf
-// For large functions, we use Sreedhar & Gao: A Linear Time Algorithm for Placing Φ-Nodes.
-// http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.8.1979&rep=rep1&type=pdf
-
-const smallBlocks = 500
-
-const debugPhi = false
-
-// insertPhis finds all the places in the function where a phi is
-// necessary and inserts them.
-// Uses FwdRef ops to find all uses of variables, and s.defvars to find
-// all definitions.
-// Phi values are inserted, and all FwdRefs are changed to a Copy
-// of the appropriate phi or definition.
-// TODO: make this part of cmd/compile/internal/ssa somehow?
-func (s *state) insertPhis() {
-	if len(s.f.Blocks) <= smallBlocks {
-		sps := simplePhiState{s: s, f: s.f, defvars: s.defvars}
-		sps.insertPhis()
-		return
-	}
-	ps := phiState{s: s, f: s.f, defvars: s.defvars}
-	ps.insertPhis()
-}
-
-type phiState struct {
-	s       *state                 // SSA state
-	f       *ssa.Func              // function to work on
-	defvars []map[*Node]*ssa.Value // defined variables at end of each block
-
-	varnum map[*Node]int32 // variable numbering
-
-	// properties of the dominator tree
-	idom  []*ssa.Block // dominator parents
-	tree  []domBlock   // dominator child+sibling
-	level []int32      // level in dominator tree (0 = root or unreachable, 1 = children of root, ...)
-
-	// scratch locations
-	priq   blockHeap    // priority queue of blocks, higher level (toward leaves) = higher priority
-	q      []*ssa.Block // inner loop queue
-	queued *sparseSet   // has been put in q
-	hasPhi *sparseSet   // has a phi
-	hasDef *sparseSet   // has a write of the variable we're processing
-
-	// miscellaneous
-	placeholder *ssa.Value // dummy value to use as a "not set yet" placeholder.
-}
-
-func (s *phiState) insertPhis() {
-	if debugPhi {
-		fmt.Println(s.f.String())
-	}
-
-	// Find all the variables for which we need to match up reads & writes.
-	// This step prunes any basic-block-only variables from consideration.
-	// Generate a numbering for these variables.
-	s.varnum = map[*Node]int32{}
-	var vars []*Node
-	var vartypes []ssa.Type
-	for _, b := range s.f.Blocks {
-		for _, v := range b.Values {
-			if v.Op != ssa.OpFwdRef {
-				continue
-			}
-			var_ := v.Aux.(*Node)
-
-			// Optimization: look back 1 block for the definition.
-			if len(b.Preds) == 1 {
-				c := b.Preds[0].Block()
-				if w := s.defvars[c.ID][var_]; w != nil {
-					v.Op = ssa.OpCopy
-					v.Aux = nil
-					v.AddArg(w)
-					continue
-				}
-			}
-
-			if _, ok := s.varnum[var_]; ok {
-				continue
-			}
-			s.varnum[var_] = int32(len(vartypes))
-			if debugPhi {
-				fmt.Printf("var%d = %v\n", len(vartypes), var_)
-			}
-			vars = append(vars, var_)
-			vartypes = append(vartypes, v.Type)
-		}
-	}
-
-	if len(vartypes) == 0 {
-		return
-	}
-
-	// Find all definitions of the variables we need to process.
-	// defs[n] contains all the blocks in which variable number n is assigned.
-	defs := make([][]*ssa.Block, len(vartypes))
-	for _, b := range s.f.Blocks {
-		for var_ := range s.defvars[b.ID] { // TODO: encode defvars some other way (explicit ops)? make defvars[n] a slice instead of a map.
-			if n, ok := s.varnum[var_]; ok {
-				defs[n] = append(defs[n], b)
-			}
-		}
-	}
-
-	// Make dominator tree.
-	s.idom = s.f.Idom()
-	s.tree = make([]domBlock, s.f.NumBlocks())
-	for _, b := range s.f.Blocks {
-		p := s.idom[b.ID]
-		if p != nil {
-			s.tree[b.ID].sibling = s.tree[p.ID].firstChild
-			s.tree[p.ID].firstChild = b
-		}
-	}
-	// Compute levels in dominator tree.
-	// With parent pointers we can do a depth-first walk without
-	// any auxiliary storage.
-	s.level = make([]int32, s.f.NumBlocks())
-	b := s.f.Entry
-levels:
-	for {
-		if p := s.idom[b.ID]; p != nil {
-			s.level[b.ID] = s.level[p.ID] + 1
-			if debugPhi {
-				fmt.Printf("level %s = %d\n", b, s.level[b.ID])
-			}
-		}
-		if c := s.tree[b.ID].firstChild; c != nil {
-			b = c
-			continue
-		}
-		for {
-			if c := s.tree[b.ID].sibling; c != nil {
-				b = c
-				continue levels
-			}
-			b = s.idom[b.ID]
-			if b == nil {
-				break levels
-			}
-		}
-	}
-
-	// Allocate scratch locations.
-	s.priq.level = s.level
-	s.q = make([]*ssa.Block, 0, s.f.NumBlocks())
-	s.queued = newSparseSet(s.f.NumBlocks())
-	s.hasPhi = newSparseSet(s.f.NumBlocks())
-	s.hasDef = newSparseSet(s.f.NumBlocks())
-	s.placeholder = s.s.entryNewValue0(ssa.OpUnknown, ssa.TypeInvalid)
-
-	// Generate phi ops for each variable.
-	for n := range vartypes {
-		s.insertVarPhis(n, vars[n], defs[n], vartypes[n])
-	}
-
-	// Resolve FwdRefs to the correct write or phi.
-	s.resolveFwdRefs()
-
-	// Erase variable numbers stored in AuxInt fields of phi ops. They are no longer needed.
-	for _, b := range s.f.Blocks {
-		for _, v := range b.Values {
-			if v.Op == ssa.OpPhi {
-				v.AuxInt = 0
-			}
-		}
-	}
-}
-
-func (s *phiState) insertVarPhis(n int, var_ *Node, defs []*ssa.Block, typ ssa.Type) {
-	priq := &s.priq
-	q := s.q
-	queued := s.queued
-	queued.clear()
-	hasPhi := s.hasPhi
-	hasPhi.clear()
-	hasDef := s.hasDef
-	hasDef.clear()
-
-	// Add defining blocks to priority queue.
-	for _, b := range defs {
-		priq.a = append(priq.a, b)
-		hasDef.add(b.ID)
-		if debugPhi {
-			fmt.Printf("def of var%d in %s\n", n, b)
-		}
-	}
-	heap.Init(priq)
-
-	// Visit blocks defining variable n, from deepest to shallowest.
-	for len(priq.a) > 0 {
-		currentRoot := heap.Pop(priq).(*ssa.Block)
-		if debugPhi {
-			fmt.Printf("currentRoot %s\n", currentRoot)
-		}
-		// Walk subtree below definition.
-		// Skip subtrees we've done in previous iterations.
-		// Find edges exiting tree dominated by definition (the dominance frontier).
-		// Insert phis at target blocks.
-		if queued.contains(currentRoot.ID) {
-			s.s.Fatalf("root already in queue")
-		}
-		q = append(q, currentRoot)
-		queued.add(currentRoot.ID)
-		for len(q) > 0 {
-			b := q[len(q)-1]
-			q = q[:len(q)-1]
-			if debugPhi {
-				fmt.Printf("  processing %s\n", b)
-			}
-
-			for _, e := range b.Succs {
-				c := e.Block()
-				// TODO: if the variable is dead at c, skip it.
-				if s.level[c.ID] > s.level[currentRoot.ID] {
-					// a D-edge, or an edge whose target is in currentRoot's subtree.
-					continue
-				}
-				if !hasPhi.contains(c.ID) {
-					// Add a phi to block c for variable n.
-					hasPhi.add(c.ID)
-					v := c.NewValue0I(currentRoot.Line, ssa.OpPhi, typ, int64(n)) // TODO: line number right?
-					// Note: we store the variable number in the phi's AuxInt field. Used temporarily by phi building.
-					s.s.addNamedValue(var_, v)
-					for i := 0; i < len(c.Preds); i++ {
-						v.AddArg(s.placeholder) // Actual args will be filled in by resolveFwdRefs.
-					}
-					if debugPhi {
-						fmt.Printf("new phi for var%d in %s: %s\n", n, c, v)
-					}
-					if !hasDef.contains(c.ID) {
-						// There's now a new definition of this variable in block c.
-						// Add it to the priority queue to explore.
-						heap.Push(priq, c)
-						hasDef.add(c.ID)
-					}
-				}
-			}
-
-			// Visit children if they have not been visited yet.
-			for c := s.tree[b.ID].firstChild; c != nil; c = s.tree[c.ID].sibling {
-				if !queued.contains(c.ID) {
-					q = append(q, c)
-					queued.add(c.ID)
-				}
-			}
-		}
-	}
-}
-
-// resolveFwdRefs links all FwdRef uses up to their nearest dominating definition.
-func (s *phiState) resolveFwdRefs() {
-	// Do a depth-first walk of the dominator tree, keeping track
-	// of the most-recently-seen value for each variable.
-
-	// Map from variable ID to SSA value at the current point of the walk.
-	values := make([]*ssa.Value, len(s.varnum))
-	for i := range values {
-		values[i] = s.placeholder
-	}
-
-	// Stack of work to do.
-	type stackEntry struct {
-		b *ssa.Block // block to explore
-
-		// variable/value pair to reinstate on exit
-		n int32 // variable ID
-		v *ssa.Value
-
-		// Note: only one of b or n,v will be set.
-	}
-	var stk []stackEntry
-
-	stk = append(stk, stackEntry{b: s.f.Entry})
-	for len(stk) > 0 {
-		work := stk[len(stk)-1]
-		stk = stk[:len(stk)-1]
-
-		b := work.b
-		if b == nil {
-			// On exit from a block, this case will undo any assignments done below.
-			values[work.n] = work.v
-			continue
-		}
-
-		// Process phis as new defs. They come before FwdRefs in this block.
-		for _, v := range b.Values {
-			if v.Op != ssa.OpPhi {
-				continue
-			}
-			n := int32(v.AuxInt)
-			// Remember the old assignment so we can undo it when we exit b.
-			stk = append(stk, stackEntry{n: n, v: values[n]})
-			// Record the new assignment.
-			values[n] = v
-		}
-
-		// Replace a FwdRef op with the current incoming value for its variable.
-		for _, v := range b.Values {
-			if v.Op != ssa.OpFwdRef {
-				continue
-			}
-			n := s.varnum[v.Aux.(*Node)]
-			v.Op = ssa.OpCopy
-			v.Aux = nil
-			v.AddArg(values[n])
-		}
-
-		// Establish values for variables defined in b.
-		for var_, v := range s.defvars[b.ID] {
-			n, ok := s.varnum[var_]
-			if !ok {
-				// some variable not live across a basic block boundary.
-				continue
-			}
-			// Remember the old assignment so we can undo it when we exit b.
-			stk = append(stk, stackEntry{n: n, v: values[n]})
-			// Record the new assignment.
-			values[n] = v
-		}
-
-		// Replace phi args in successors with the current incoming value.
-		for _, e := range b.Succs {
-			c, i := e.Block(), e.Index()
-			for j := len(c.Values) - 1; j >= 0; j-- {
-				v := c.Values[j]
-				if v.Op != ssa.OpPhi {
-					break // All phis will be at the end of the block during phi building.
-				}
-				v.SetArg(i, values[v.AuxInt])
-			}
-		}
-
-		// Walk children in dominator tree.
-		for c := s.tree[b.ID].firstChild; c != nil; c = s.tree[c.ID].sibling {
-			stk = append(stk, stackEntry{b: c})
-		}
-	}
-}
-
-// domBlock contains extra per-block information to record the dominator tree.
-type domBlock struct {
-	firstChild *ssa.Block // first child of block in dominator tree
-	sibling    *ssa.Block // next child of parent in dominator tree
-}
-
-// A block heap is used as a priority queue to implement the PiggyBank
-// from Sreedhar and Gao.  That paper uses an array which is better
-// asymptotically but worse in the common case when the PiggyBank
-// holds a sparse set of blocks.
-type blockHeap struct {
-	a     []*ssa.Block // block IDs in heap
-	level []int32      // depth in dominator tree (static, used for determining priority)
-}
-
-func (h *blockHeap) Len() int      { return len(h.a) }
-func (h *blockHeap) Swap(i, j int) { a := h.a; a[i], a[j] = a[j], a[i] }
-
-func (h *blockHeap) Push(x interface{}) {
-	v := x.(*ssa.Block)
-	h.a = append(h.a, v)
-}
-func (h *blockHeap) Pop() interface{} {
-	old := h.a
-	n := len(old)
-	x := old[n-1]
-	h.a = old[:n-1]
-	return x
-}
-func (h *blockHeap) Less(i, j int) bool {
-	return h.level[h.a[i].ID] > h.level[h.a[j].ID]
-}
-
-// TODO: stop walking the iterated domininance frontier when
-// the variable is dead. Maybe detect that by checking if the
-// node we're on is reverse dominated by all the reads?
-// Reverse dominated by the highest common successor of all the reads?
-
-// copy of ../ssa/sparseset.go
-// TODO: move this file to ../ssa, then use sparseSet there.
-type sparseSet struct {
-	dense  []ssa.ID
-	sparse []int32
-}
-
-// newSparseSet returns a sparseSet that can represent
-// integers between 0 and n-1
-func newSparseSet(n int) *sparseSet {
-	return &sparseSet{dense: nil, sparse: make([]int32, n)}
-}
-
-func (s *sparseSet) contains(x ssa.ID) bool {
-	i := s.sparse[x]
-	return i < int32(len(s.dense)) && s.dense[i] == x
-}
-
-func (s *sparseSet) add(x ssa.ID) {
-	i := s.sparse[x]
-	if i < int32(len(s.dense)) && s.dense[i] == x {
-		return
-	}
-	s.dense = append(s.dense, x)
-	s.sparse[x] = int32(len(s.dense)) - 1
-}
-
-func (s *sparseSet) clear() {
-	s.dense = s.dense[:0]
-}
-
-// Variant to use for small functions.
-type simplePhiState struct {
-	s       *state                 // SSA state
-	f       *ssa.Func              // function to work on
-	fwdrefs []*ssa.Value           // list of FwdRefs to be processed
-	defvars []map[*Node]*ssa.Value // defined variables at end of each block
-}
-
-func (s *simplePhiState) insertPhis() {
-	// Find FwdRef ops.
-	for _, b := range s.f.Blocks {
-		for _, v := range b.Values {
-			if v.Op != ssa.OpFwdRef {
-				continue
-			}
-			s.fwdrefs = append(s.fwdrefs, v)
-			var_ := v.Aux.(*Node)
-			if _, ok := s.defvars[b.ID][var_]; !ok {
-				s.defvars[b.ID][var_] = v // treat FwdDefs as definitions.
-			}
-		}
-	}
-
-	var args []*ssa.Value
-
-loop:
-	for len(s.fwdrefs) > 0 {
-		v := s.fwdrefs[len(s.fwdrefs)-1]
-		s.fwdrefs = s.fwdrefs[:len(s.fwdrefs)-1]
-		b := v.Block
-		var_ := v.Aux.(*Node)
-		if len(b.Preds) == 0 {
-			if b == s.f.Entry {
-				// No variable should be live at entry.
-				s.s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, var_, v)
-			}
-			// This block is dead; it has no predecessors and it is not the entry block.
-			// It doesn't matter what we use here as long as it is well-formed.
-			v.Op = ssa.OpUnknown
-			v.Aux = nil
-			continue
-		}
-		// Find variable value on each predecessor.
-		args = args[:0]
-		for _, e := range b.Preds {
-			args = append(args, s.lookupVarOutgoing(e.Block(), v.Type, var_, v.Line))
-		}
-
-		// Decide if we need a phi or not. We need a phi if there
-		// are two different args (which are both not v).
-		var w *ssa.Value
-		for _, a := range args {
-			if a == v {
-				continue // self-reference
-			}
-			if a == w {
-				continue // already have this witness
-			}
-			if w != nil {
-				// two witnesses, need a phi value
-				v.Op = ssa.OpPhi
-				v.AddArgs(args...)
-				v.Aux = nil
-				continue loop
-			}
-			w = a // save witness
-		}
-		if w == nil {
-			s.s.Fatalf("no witness for reachable phi %s", v)
-		}
-		// One witness. Make v a copy of w.
-		v.Op = ssa.OpCopy
-		v.Aux = nil
-		v.AddArg(w)
-	}
-}
-
-// lookupVarOutgoing finds the variable's value at the end of block b.
-func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t ssa.Type, var_ *Node, line int32) *ssa.Value {
-	for {
-		if v := s.defvars[b.ID][var_]; v != nil {
-			return v
-		}
-		// The variable is not defined by b and we haven't looked it up yet.
-		// If b has exactly one predecessor, loop to look it up there.
-		// Otherwise, give up and insert a new FwdRef and resolve it later.
-		if len(b.Preds) != 1 {
-			break
-		}
-		b = b.Preds[0].Block()
-	}
-	// Generate a FwdRef for the variable and return that.
-	v := b.NewValue0A(line, ssa.OpFwdRef, t, var_)
-	s.defvars[b.ID][var_] = v
-	s.s.addNamedValue(var_, v)
-	s.fwdrefs = append(s.fwdrefs, v)
-	return v
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/plive.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/plive.go
deleted file mode 100644
index 921c088..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/plive.go
+++ /dev/null
@@ -1,1780 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/plive.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/plive.go:1
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Garbage collector liveness bitmap generation.
-
-// The command line flag -live causes this code to print debug information.
-// The levels are:
-//
-//	-live (aka -live=1): print liveness lists as code warnings at safe points
-//	-live=2: print an assembly listing with liveness annotations
-//	-live=3: print information during each computation phase (much chattier)
-//
-// Each level includes the earlier output as well.
-
-package gc
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"crypto/md5"
-	"fmt"
-	"sort"
-	"strings"
-)
-
-const (
-	UNVISITED = 0
-	VISITED   = 1
-)
-
-// An ordinary basic block.
-//
-// Instructions are threaded together in a doubly-linked list. To iterate in
-// program order follow the link pointer from the first node and stop after the
-// last node has been visited
-//
-//   for p = bb.first; ; p = p.link {
-//     ...
-//     if p == bb.last {
-//       break
-//     }
-//   }
-//
-// To iterate in reverse program order by following the opt pointer from the
-// last node
-//
-//   for p = bb.last; p != nil; p = p.opt {
-//     ...
-//   }
-type BasicBlock struct {
-	pred            []*BasicBlock // predecessors; if none, probably start of CFG
-	succ            []*BasicBlock // successors; if none, probably ends in return statement
-	first           *obj.Prog     // first instruction in block
-	last            *obj.Prog     // last instruction in block
-	rpo             int           // reverse post-order number (also index in cfg)
-	mark            int           // mark bit for traversals
-	lastbitmapindex int           // for livenessepilogue
-
-	// Summary sets of block effects.
-
-	// Computed during livenessprologue using only the content of
-	// individual blocks:
-	//
-	//	uevar: upward exposed variables (used before set in block)
-	//	varkill: killed variables (set in block)
-	//	avarinit: addrtaken variables set or used (proof of initialization)
-	uevar    bvec
-	varkill  bvec
-	avarinit bvec
-
-	// Computed during livenesssolve using control flow information:
-	//
-	//	livein: variables live at block entry
-	//	liveout: variables live at block exit
-	//	avarinitany: addrtaken variables possibly initialized at block exit
-	//		(initialized in block or at exit from any predecessor block)
-	//	avarinitall: addrtaken variables certainly initialized at block exit
-	//		(initialized in block or at exit from all predecessor blocks)
-	livein      bvec
-	liveout     bvec
-	avarinitany bvec
-	avarinitall bvec
-}
-
-// A collection of global state used by liveness analysis.
-type Liveness struct {
-	fn   *Node
-	ptxt *obj.Prog
-	vars []*Node
-	cfg  []*BasicBlock
-
-	// An array with a bit vector for each safe point tracking live pointers
-	// in the arguments and locals area, indexed by bb.rpo.
-	argslivepointers []bvec
-	livepointers     []bvec
-}
-
-// ProgInfo holds information about the instruction for use
-// by clients such as the compiler. The exact meaning of this
-// data is up to the client and is not interpreted by the cmd/internal/obj/... packages.
-type ProgInfo struct {
-	_     struct{} // to prevent unkeyed literals. Trailing zero-sized field will take space.
-	Flags uint32   // flag bits
-}
-
-// Constructs a new basic block containing a single instruction.
-func newblock(prog *obj.Prog) *BasicBlock {
-	if prog == nil {
-		Fatalf("newblock: prog cannot be nil")
-	}
-	// type block allows us to allocate a BasicBlock
-	// and its pred/succ slice together.
-	type block struct {
-		result BasicBlock
-		pred   [2]*BasicBlock
-		succ   [2]*BasicBlock
-	}
-	b := new(block)
-
-	result := &b.result
-	result.rpo = -1
-	result.mark = UNVISITED
-	result.first = prog
-	result.last = prog
-	result.pred = b.pred[:0]
-	result.succ = b.succ[:0]
-	return result
-}
-
-// Adds an edge between two basic blocks by making from a predecessor of to and
-// to a successor of from.
-func addedge(from *BasicBlock, to *BasicBlock) {
-	if from == nil {
-		Fatalf("addedge: from is nil")
-	}
-	if to == nil {
-		Fatalf("addedge: to is nil")
-	}
-	from.succ = append(from.succ, to)
-	to.pred = append(to.pred, from)
-}
-
-// Inserts prev before curr in the instruction
-// stream. Any control flow, such as branches or fall-throughs, that target the
-// existing instruction are adjusted to target the new instruction.
-func splicebefore(lv *Liveness, bb *BasicBlock, prev *obj.Prog, curr *obj.Prog) {
-	// There may be other instructions pointing at curr,
-	// and we want them to now point at prev. Instead of
-	// trying to find all such instructions, swap the contents
-	// so that the problem becomes inserting next after curr.
-	// The "opt" field is the backward link in the linked list.
-
-	// Overwrite curr's data with prev, but keep the list links.
-	tmp := *curr
-
-	*curr = *prev
-	curr.Opt = tmp.Opt
-	curr.Link = tmp.Link
-
-	// Overwrite prev (now next) with curr's old data.
-	next := prev
-
-	*next = tmp
-	next.Opt = nil
-	next.Link = nil
-
-	// Now insert next after curr.
-	next.Link = curr.Link
-
-	next.Opt = curr
-	curr.Link = next
-	if next.Link != nil && next.Link.Opt == curr {
-		next.Link.Opt = next
-	}
-
-	if bb.last == curr {
-		bb.last = next
-	}
-}
-
-// A pretty printer for basic blocks.
-func printblock(bb *BasicBlock) {
-	fmt.Printf("basic block %d\n", bb.rpo)
-	fmt.Printf("\tpred:")
-	for _, pred := range bb.pred {
-		fmt.Printf(" %d", pred.rpo)
-	}
-	fmt.Printf("\n")
-	fmt.Printf("\tsucc:")
-	for _, succ := range bb.succ {
-		fmt.Printf(" %d", succ.rpo)
-	}
-	fmt.Printf("\n")
-	fmt.Printf("\tprog:\n")
-	for prog := bb.first; ; prog = prog.Link {
-		fmt.Printf("\t\t%v\n", prog)
-		if prog == bb.last {
-			break
-		}
-	}
-}
-
-// Iterates over a basic block applying a callback to each instruction. There
-// are two criteria for termination. If the end of basic block is reached a
-// value of zero is returned. If the callback returns a non-zero value, the
-// iteration is stopped and the value of the callback is returned.
-func blockany(bb *BasicBlock, f func(*obj.Prog) bool) bool {
-	for p := bb.last; p != nil; p = p.Opt.(*obj.Prog) {
-		if f(p) {
-			return true
-		}
-	}
-	return false
-}
-
-// livenessShouldTrack reports whether the liveness analysis
-// should track the variable n.
-// We don't care about variables that have no pointers,
-// nor do we care about non-local variables,
-// nor do we care about empty structs (handled by the pointer check),
-// nor do we care about the fake PAUTOHEAP variables.
-func livenessShouldTrack(n *Node) bool {
-	return n.Op == ONAME && (n.Class == PAUTO || n.Class == PPARAM || n.Class == PPARAMOUT) && haspointers(n.Type)
-}
-
-// getvariables returns the list of on-stack variables that we need to track.
-func getvariables(fn *Node) []*Node {
-	var vars []*Node
-	for _, n := range fn.Func.Dcl {
-		if n.Op == ONAME {
-			// The Node.opt field is available for use by optimization passes.
-			// We use it to hold the index of the node in the variables array
-			// (nil means the Node is not in the variables array).
-			// The Node.curfn field is supposed to be set to the current function
-			// already, but for some compiler-introduced names it seems not to be,
-			// so fix that here.
-			// Later, when we want to find the index of a node in the variables list,
-			// we will check that n.Curfn == Curfn and n.Opt() != nil. Then n.Opt().(int32)
-			// is the index in the variables list.
-			n.SetOpt(nil)
-			n.Name.Curfn = Curfn
-		}
-
-		if livenessShouldTrack(n) {
-			n.SetOpt(int32(len(vars)))
-			vars = append(vars, n)
-		}
-	}
-
-	return vars
-}
-
-// A pretty printer for control flow graphs. Takes a slice of *BasicBlocks.
-func printcfg(cfg []*BasicBlock) {
-	for _, bb := range cfg {
-		printblock(bb)
-	}
-}
-
-// Assigns a reverse post order number to each connected basic block using the
-// standard algorithm. Unconnected blocks will not be affected.
-func reversepostorder(root *BasicBlock, rpo *int32) {
-	root.mark = VISITED
-	for _, bb := range root.succ {
-		if bb.mark == UNVISITED {
-			reversepostorder(bb, rpo)
-		}
-	}
-	*rpo -= 1
-	root.rpo = int(*rpo)
-}
-
-// Comparison predicate used for sorting basic blocks by their rpo in ascending
-// order.
-type blockrpocmp []*BasicBlock
-
-func (x blockrpocmp) Len() int           { return len(x) }
-func (x blockrpocmp) Swap(i, j int)      { x[i], x[j] = x[j], x[i] }
-func (x blockrpocmp) Less(i, j int) bool { return x[i].rpo < x[j].rpo }
-
-// A pattern matcher for call instructions. Returns true when the instruction
-// is a call to a specific package qualified function name.
-func iscall(prog *obj.Prog, name *obj.LSym) bool {
-	if prog == nil {
-		Fatalf("iscall: prog is nil")
-	}
-	if name == nil {
-		Fatalf("iscall: function name is nil")
-	}
-	if prog.As != obj.ACALL {
-		return false
-	}
-	return name == prog.To.Sym
-}
-
-// Returns true for instructions that call a runtime function implementing a
-// select communication clause.
-
-var selectNames [4]*obj.LSym
-
-func isselectcommcasecall(prog *obj.Prog) bool {
-	if selectNames[0] == nil {
-		selectNames[0] = Linksym(Pkglookup("selectsend", Runtimepkg))
-		selectNames[1] = Linksym(Pkglookup("selectrecv", Runtimepkg))
-		selectNames[2] = Linksym(Pkglookup("selectrecv2", Runtimepkg))
-		selectNames[3] = Linksym(Pkglookup("selectdefault", Runtimepkg))
-	}
-
-	for _, name := range selectNames {
-		if iscall(prog, name) {
-			return true
-		}
-	}
-	return false
-}
-
-// Returns true for call instructions that target runtime·newselect.
-
-var isnewselect_sym *obj.LSym
-
-func isnewselect(prog *obj.Prog) bool {
-	if isnewselect_sym == nil {
-		isnewselect_sym = Linksym(Pkglookup("newselect", Runtimepkg))
-	}
-	return iscall(prog, isnewselect_sym)
-}
-
-// Returns true for call instructions that target runtime·selectgo.
-
-var isselectgocall_sym *obj.LSym
-
-func isselectgocall(prog *obj.Prog) bool {
-	if isselectgocall_sym == nil {
-		isselectgocall_sym = Linksym(Pkglookup("selectgo", Runtimepkg))
-	}
-	return iscall(prog, isselectgocall_sym)
-}
-
-var isdeferreturn_sym *obj.LSym
-
-func isdeferreturn(prog *obj.Prog) bool {
-	if isdeferreturn_sym == nil {
-		isdeferreturn_sym = Linksym(Pkglookup("deferreturn", Runtimepkg))
-	}
-	return iscall(prog, isdeferreturn_sym)
-}
-
-// Walk backwards from a runtime·selectgo call up to its immediately dominating
-// runtime·newselect call. Any successor nodes of communication clause nodes
-// are implicit successors of the runtime·selectgo call node. The goal of this
-// analysis is to add these missing edges to complete the control flow graph.
-func addselectgosucc(selectgo *BasicBlock) {
-	pred := selectgo
-	for {
-		if len(pred.pred) == 0 {
-			Fatalf("selectgo does not have a newselect")
-		}
-		pred = pred.pred[0]
-		if blockany(pred, isselectcommcasecall) {
-			// A select comm case block should have exactly one
-			// successor.
-			if len(pred.succ) != 1 {
-				Fatalf("select comm case has too many successors")
-			}
-			succ := pred.succ[0]
-
-			// Its successor should have exactly two successors.
-			// The drop through should flow to the selectgo block
-			// and the branch should lead to the select case
-			// statements block.
-			if len(succ.succ) != 2 {
-				Fatalf("select comm case successor has too many successors")
-			}
-
-			// Add the block as a successor of the selectgo block.
-			addedge(selectgo, succ)
-		}
-
-		if blockany(pred, isnewselect) {
-			// Reached the matching newselect.
-			break
-		}
-	}
-}
-
-// The entry point for the missing selectgo control flow algorithm. Takes a
-// slice of *BasicBlocks containing selectgo calls.
-func fixselectgo(selectgo []*BasicBlock) {
-	for _, bb := range selectgo {
-		addselectgosucc(bb)
-	}
-}
-
-// Constructs a control flow graph from a sequence of instructions. This
-// procedure is complicated by various sources of implicit control flow that are
-// not accounted for using the standard cfg construction algorithm. Returns a
-// slice of *BasicBlocks in control flow graph form (basic blocks ordered by
-// their RPO number).
-func newcfg(firstp *obj.Prog) []*BasicBlock {
-	// Reset the opt field of each prog to nil. In the first and second
-	// passes, instructions that are labels temporarily use the opt field to
-	// point to their basic block. In the third pass, the opt field reset
-	// to point to the predecessor of an instruction in its basic block.
-	for p := firstp; p != nil; p = p.Link {
-		p.Opt = nil
-	}
-
-	// Allocate a slice to remember where we have seen selectgo calls.
-	// These blocks will be revisited to add successor control flow edges.
-	var selectgo []*BasicBlock
-
-	// Loop through all instructions identifying branch targets
-	// and fall-throughs and allocate basic blocks.
-	var cfg []*BasicBlock
-
-	bb := newblock(firstp)
-	cfg = append(cfg, bb)
-	for p := firstp; p != nil && p.As != obj.AEND; p = p.Link {
-		if p.To.Type == obj.TYPE_BRANCH {
-			if p.To.Val == nil {
-				Fatalf("prog branch to nil")
-			}
-			if p.To.Val.(*obj.Prog).Opt == nil {
-				p.To.Val.(*obj.Prog).Opt = newblock(p.To.Val.(*obj.Prog))
-				cfg = append(cfg, p.To.Val.(*obj.Prog).Opt.(*BasicBlock))
-			}
-
-			if p.As != obj.AJMP && p.Link != nil && p.Link.Opt == nil {
-				p.Link.Opt = newblock(p.Link)
-				cfg = append(cfg, p.Link.Opt.(*BasicBlock))
-			}
-		} else if isselectcommcasecall(p) || isselectgocall(p) {
-			// Accommodate implicit selectgo control flow.
-			if p.Link.Opt == nil {
-				p.Link.Opt = newblock(p.Link)
-				cfg = append(cfg, p.Link.Opt.(*BasicBlock))
-			}
-		}
-	}
-
-	// Loop through all basic blocks maximally growing the list of
-	// contained instructions until a label is reached. Add edges
-	// for branches and fall-through instructions.
-	for _, bb := range cfg {
-		for p := bb.last; p != nil && p.As != obj.AEND; p = p.Link {
-			if p.Opt != nil && p != bb.last {
-				break
-			}
-			bb.last = p
-
-			// Stop before an unreachable RET, to avoid creating
-			// unreachable control flow nodes.
-			if p.Link != nil && p.Link.As == obj.ARET && p.Link.Mode == 1 {
-				// TODO: remove after SSA is done. SSA does not
-				// generate any unreachable RET instructions.
-				break
-			}
-
-			// Collect basic blocks with selectgo calls.
-			if isselectgocall(p) {
-				selectgo = append(selectgo, bb)
-			}
-		}
-
-		if bb.last.To.Type == obj.TYPE_BRANCH {
-			addedge(bb, bb.last.To.Val.(*obj.Prog).Opt.(*BasicBlock))
-		}
-		if bb.last.Link != nil {
-			// Add a fall-through when the instruction is
-			// not an unconditional control transfer.
-			if bb.last.As != obj.AJMP && bb.last.As != obj.ARET && bb.last.As != obj.AUNDEF {
-				addedge(bb, bb.last.Link.Opt.(*BasicBlock))
-			}
-		}
-	}
-
-	// Add back links so the instructions in a basic block can be traversed
-	// backward. This is the final state of the instruction opt field.
-	for _, bb := range cfg {
-		p := bb.first
-		var prev *obj.Prog
-		for {
-			p.Opt = prev
-			if p == bb.last {
-				break
-			}
-			prev = p
-			p = p.Link
-		}
-	}
-
-	// Add missing successor edges to the selectgo blocks.
-	if len(selectgo) != 0 {
-		fixselectgo(selectgo)
-	}
-
-	// Find a depth-first order and assign a depth-first number to
-	// all basic blocks.
-	for _, bb := range cfg {
-		bb.mark = UNVISITED
-	}
-	bb = cfg[0]
-	rpo := int32(len(cfg))
-	reversepostorder(bb, &rpo)
-
-	// Sort the basic blocks by their depth first number. The
-	// slice is now a depth-first spanning tree with the first
-	// node being the root.
-	sort.Sort(blockrpocmp(cfg))
-
-	// Unreachable control flow nodes are indicated by a -1 in the rpo
-	// field. If we see these nodes something must have gone wrong in an
-	// upstream compilation phase.
-	bb = cfg[0]
-	if bb.rpo == -1 {
-		fmt.Printf("newcfg: unreachable basic block for %v\n", bb.last)
-		printcfg(cfg)
-		Fatalf("newcfg: invalid control flow graph")
-	}
-
-	return cfg
-}
-
-// Frees a control flow graph (a slice of *BasicBlocks) and all of its leaf
-// data structures.
-func freecfg(cfg []*BasicBlock) {
-	if len(cfg) > 0 {
-		bb0 := cfg[0]
-		for p := bb0.first; p != nil; p = p.Link {
-			p.Opt = nil
-		}
-	}
-}
-
-// Returns true if the node names a variable that is otherwise uninteresting to
-// the liveness computation.
-func isfunny(n *Node) bool {
-	return n.Sym != nil && (n.Sym.Name == ".fp" || n.Sym.Name == ".args")
-}
-
-// Computes the effects of an instruction on a set of
-// variables. The vars argument is a slice of *Nodes.
-//
-// The output vectors give bits for variables:
-//	uevar - used by this instruction
-//	varkill - killed by this instruction
-//		for variables without address taken, means variable was set
-//		for variables with address taken, means variable was marked dead
-//	avarinit - initialized or referred to by this instruction,
-//		only for variables with address taken but not escaping to heap
-//
-// The avarinit output serves as a signal that the data has been
-// initialized, because any use of a variable must come after its
-// initialization.
-func progeffects(prog *obj.Prog, vars []*Node, uevar bvec, varkill bvec, avarinit bvec) {
-	uevar.Clear()
-	varkill.Clear()
-	avarinit.Clear()
-
-	// A return instruction with a p.to is a tail return, which brings
-	// the stack pointer back up (if it ever went down) and then jumps
-	// to a new function entirely. That form of instruction must read
-	// all the parameters for correctness, and similarly it must not
-	// read the out arguments - they won't be set until the new
-	// function runs.
-	if (prog.As == obj.AJMP || prog.As == obj.ARET) && prog.To.Type == obj.TYPE_MEM && prog.To.Name == obj.NAME_EXTERN {
-		// This is a tail call. Ensure the arguments are still alive.
-		// See issue 16016.
-		for i, node := range vars {
-			if node.Class == PPARAM {
-				uevar.Set(int32(i))
-			}
-		}
-	}
-
-	if prog.As == obj.ARET {
-		// Return instructions read all of the out arguments.
-		for i, node := range vars {
-			switch node.Class {
-			// If the result had its address taken, it is being tracked
-			// by the avarinit code, which does not use uevar.
-			// If we added it to uevar too, we'd not see any kill
-			// and decide that the variable was live entry, which it is not.
-			// So only use uevar in the non-addrtaken case.
-			// The p.to.type == obj.TYPE_NONE limits the bvset to
-			// non-tail-call return instructions; see note below for details.
-			case PPARAMOUT:
-				if !node.Addrtaken && prog.To.Type == obj.TYPE_NONE {
-					uevar.Set(int32(i))
-				}
-			}
-		}
-
-		return
-	}
-
-	if prog.As == obj.ATEXT {
-		// A text instruction marks the entry point to a function and
-		// the definition point of all in arguments.
-		for i, node := range vars {
-			switch node.Class {
-			case PPARAM:
-				if node.Addrtaken {
-					avarinit.Set(int32(i))
-				}
-				varkill.Set(int32(i))
-			}
-		}
-
-		return
-	}
-
-	info := Thearch.Proginfo(prog)
-
-	if info.Flags&(LeftRead|LeftWrite|LeftAddr) != 0 {
-		from := &prog.From
-		if from.Node != nil && from.Sym != nil {
-			n := from.Node.(*Node)
-			if pos := liveIndex(n, vars); pos >= 0 {
-				if n.Addrtaken {
-					avarinit.Set(pos)
-				} else {
-					if info.Flags&(LeftRead|LeftAddr) != 0 {
-						uevar.Set(pos)
-					}
-					if info.Flags&LeftWrite != 0 {
-						if !isfat(n.Type) {
-							varkill.Set(pos)
-						}
-					}
-				}
-			}
-		}
-	}
-
-	if info.Flags&From3Read != 0 {
-		from := prog.From3
-		if from.Node != nil && from.Sym != nil {
-			n := from.Node.(*Node)
-			if pos := liveIndex(n, vars); pos >= 0 {
-				if n.Addrtaken {
-					avarinit.Set(pos)
-				} else {
-					uevar.Set(pos)
-				}
-			}
-		}
-	}
-
-	if info.Flags&(RightRead|RightWrite|RightAddr) != 0 {
-		to := &prog.To
-		if to.Node != nil && to.Sym != nil {
-			n := to.Node.(*Node)
-			if pos := liveIndex(n, vars); pos >= 0 {
-				if n.Addrtaken {
-					if prog.As != obj.AVARKILL {
-						avarinit.Set(pos)
-					}
-					if prog.As == obj.AVARDEF || prog.As == obj.AVARKILL {
-						varkill.Set(pos)
-					}
-				} else {
-					// RightRead is a read, obviously.
-					// RightAddr by itself is also implicitly a read.
-					//
-					// RightAddr|RightWrite means that the address is being taken
-					// but only so that the instruction can write to the value.
-					// It is not a read. It is equivalent to RightWrite except that
-					// having the RightAddr bit set keeps the registerizer from
-					// trying to substitute a register for the memory location.
-					if (info.Flags&RightRead != 0) || info.Flags&(RightAddr|RightWrite) == RightAddr {
-						uevar.Set(pos)
-					}
-					if info.Flags&RightWrite != 0 {
-						if !isfat(n.Type) || prog.As == obj.AVARDEF {
-							varkill.Set(pos)
-						}
-					}
-				}
-			}
-		}
-	}
-}
-
-// liveIndex returns the index of n in the set of tracked vars.
-// If n is not a tracked var, liveIndex returns -1.
-// If n is not a tracked var but should be tracked, liveIndex crashes.
-func liveIndex(n *Node, vars []*Node) int32 {
-	if n.Name.Curfn != Curfn || !livenessShouldTrack(n) {
-		return -1
-	}
-
-	pos, ok := n.Opt().(int32) // index in vars
-	if !ok {
-		Fatalf("lost track of variable in liveness: %v (%p, %p)", n, n, n.Orig)
-	}
-	if pos >= int32(len(vars)) || vars[pos] != n {
-		Fatalf("bad bookkeeping in liveness: %v (%p, %p)", n, n, n.Orig)
-	}
-	return pos
-}
-
-// Constructs a new liveness structure used to hold the global state of the
-// liveness computation. The cfg argument is a slice of *BasicBlocks and the
-// vars argument is a slice of *Nodes.
-func newliveness(fn *Node, ptxt *obj.Prog, cfg []*BasicBlock, vars []*Node) *Liveness {
-	result := Liveness{
-		fn:   fn,
-		ptxt: ptxt,
-		cfg:  cfg,
-		vars: vars,
-	}
-
-	nblocks := int32(len(cfg))
-	nvars := int32(len(vars))
-	bulk := bvbulkalloc(nvars, nblocks*7)
-	for _, bb := range cfg {
-		bb.uevar = bulk.next()
-		bb.varkill = bulk.next()
-		bb.livein = bulk.next()
-		bb.liveout = bulk.next()
-		bb.avarinit = bulk.next()
-		bb.avarinitany = bulk.next()
-		bb.avarinitall = bulk.next()
-	}
-	return &result
-}
-
-func printeffects(p *obj.Prog, uevar bvec, varkill bvec, avarinit bvec) {
-	fmt.Printf("effects of %v\n", p)
-	fmt.Println("uevar:", uevar)
-	fmt.Println("varkill:", varkill)
-	fmt.Println("avarinit:", avarinit)
-}
-
-// Pretty print a variable node. Uses Pascal like conventions for pointers and
-// addresses to avoid confusing the C like conventions used in the node variable
-// names.
-func printnode(node *Node) {
-	p := ""
-	if haspointers(node.Type) {
-		p = "^"
-	}
-	a := ""
-	if node.Addrtaken {
-		a = "@"
-	}
-	fmt.Printf(" %v%s%s", node, p, a)
-}
-
-// Pretty print a list of variables. The vars argument is a slice of *Nodes.
-func printvars(name string, bv bvec, vars []*Node) {
-	fmt.Printf("%s:", name)
-	for i, node := range vars {
-		if bv.Get(int32(i)) {
-			printnode(node)
-		}
-	}
-	fmt.Printf("\n")
-}
-
-// Prints a basic block annotated with the information computed by liveness
-// analysis.
-func livenessprintblock(lv *Liveness, bb *BasicBlock) {
-	fmt.Printf("basic block %d\n", bb.rpo)
-
-	fmt.Printf("\tpred:")
-	for _, pred := range bb.pred {
-		fmt.Printf(" %d", pred.rpo)
-	}
-	fmt.Printf("\n")
-
-	fmt.Printf("\tsucc:")
-	for _, succ := range bb.succ {
-		fmt.Printf(" %d", succ.rpo)
-	}
-	fmt.Printf("\n")
-
-	printvars("\tuevar", bb.uevar, lv.vars)
-	printvars("\tvarkill", bb.varkill, lv.vars)
-	printvars("\tlivein", bb.livein, lv.vars)
-	printvars("\tliveout", bb.liveout, lv.vars)
-	printvars("\tavarinit", bb.avarinit, lv.vars)
-	printvars("\tavarinitany", bb.avarinitany, lv.vars)
-	printvars("\tavarinitall", bb.avarinitall, lv.vars)
-
-	fmt.Printf("\tprog:\n")
-	for prog := bb.first; ; prog = prog.Link {
-		fmt.Printf("\t\t%v", prog)
-		if prog.As == obj.APCDATA && prog.From.Offset == obj.PCDATA_StackMapIndex {
-			pos := int32(prog.To.Offset)
-			live := lv.livepointers[pos]
-			fmt.Printf(" %s", live.String())
-		}
-
-		fmt.Printf("\n")
-		if prog == bb.last {
-			break
-		}
-	}
-}
-
-// Prints a control flow graph annotated with any information computed by
-// liveness analysis.
-func livenessprintcfg(lv *Liveness) {
-	for _, bb := range lv.cfg {
-		livenessprintblock(lv, bb)
-	}
-}
-
-func checkauto(fn *Node, p *obj.Prog, n *Node) {
-	for _, ln := range fn.Func.Dcl {
-		if ln.Op == ONAME && ln.Class == PAUTO && ln == n {
-			return
-		}
-	}
-
-	if n == nil {
-		fmt.Printf("%v: checkauto %v: nil node in %v\n", p.Line(), Curfn, p)
-		return
-	}
-
-	fmt.Printf("checkauto %v: %v (%p; class=%d) not found in %p %v\n", funcSym(Curfn), n, n, n.Class, p, p)
-	for _, ln := range fn.Func.Dcl {
-		fmt.Printf("\t%v (%p; class=%d)\n", ln, ln, ln.Class)
-	}
-	yyerror("checkauto: invariant lost")
-}
-
-func checkparam(fn *Node, p *obj.Prog, n *Node) {
-	if isfunny(n) {
-		return
-	}
-	for _, a := range fn.Func.Dcl {
-		if a.Op == ONAME && (a.Class == PPARAM || a.Class == PPARAMOUT) && a == n {
-			return
-		}
-	}
-
-	fmt.Printf("checkparam %v: %v (%p; class=%d) not found in %v\n", Curfn, n, n, n.Class, p)
-	for _, ln := range fn.Func.Dcl {
-		fmt.Printf("\t%v (%p; class=%d)\n", ln, ln, ln.Class)
-	}
-	yyerror("checkparam: invariant lost")
-}
-
-func checkprog(fn *Node, p *obj.Prog) {
-	if p.From.Name == obj.NAME_AUTO {
-		checkauto(fn, p, p.From.Node.(*Node))
-	}
-	if p.From.Name == obj.NAME_PARAM {
-		checkparam(fn, p, p.From.Node.(*Node))
-	}
-	if p.To.Name == obj.NAME_AUTO {
-		checkauto(fn, p, p.To.Node.(*Node))
-	}
-	if p.To.Name == obj.NAME_PARAM {
-		checkparam(fn, p, p.To.Node.(*Node))
-	}
-}
-
-// Check instruction invariants. We assume that the nodes corresponding to the
-// sources and destinations of memory operations will be declared in the
-// function. This is not strictly true, as is the case for the so-called funny
-// nodes and there are special cases to skip over that stuff. The analysis will
-// fail if this invariant blindly changes.
-func checkptxt(fn *Node, firstp *obj.Prog) {
-	if debuglive == 0 {
-		return
-	}
-
-	for p := firstp; p != nil; p = p.Link {
-		if false {
-			fmt.Printf("analyzing '%v'\n", p)
-		}
-		if p.As != obj.ATYPE {
-			checkprog(fn, p)
-		}
-	}
-}
-
-// NOTE: The bitmap for a specific type t should be cached in t after the first run
-// and then simply copied into bv at the correct offset on future calls with
-// the same type t. On https://rsc.googlecode.com/hg/testdata/slow.go, onebitwalktype1
-// accounts for 40% of the 6g execution time.
-func onebitwalktype1(t *Type, xoffset *int64, bv bvec) {
-	if t.Align > 0 && *xoffset&int64(t.Align-1) != 0 {
-		Fatalf("onebitwalktype1: invalid initial alignment, %v", t)
-	}
-
-	switch t.Etype {
-	case TINT8,
-		TUINT8,
-		TINT16,
-		TUINT16,
-		TINT32,
-		TUINT32,
-		TINT64,
-		TUINT64,
-		TINT,
-		TUINT,
-		TUINTPTR,
-		TBOOL,
-		TFLOAT32,
-		TFLOAT64,
-		TCOMPLEX64,
-		TCOMPLEX128:
-		*xoffset += t.Width
-
-	case TPTR32,
-		TPTR64,
-		TUNSAFEPTR,
-		TFUNC,
-		TCHAN,
-		TMAP:
-		if *xoffset&int64(Widthptr-1) != 0 {
-			Fatalf("onebitwalktype1: invalid alignment, %v", t)
-		}
-		bv.Set(int32(*xoffset / int64(Widthptr))) // pointer
-		*xoffset += t.Width
-
-	case TSTRING:
-		// struct { byte *str; intgo len; }
-		if *xoffset&int64(Widthptr-1) != 0 {
-			Fatalf("onebitwalktype1: invalid alignment, %v", t)
-		}
-		bv.Set(int32(*xoffset / int64(Widthptr))) //pointer in first slot
-		*xoffset += t.Width
-
-	case TINTER:
-		// struct { Itab *tab;	void *data; }
-		// or, when isnilinter(t)==true:
-		// struct { Type *type; void *data; }
-		if *xoffset&int64(Widthptr-1) != 0 {
-			Fatalf("onebitwalktype1: invalid alignment, %v", t)
-		}
-		bv.Set(int32(*xoffset / int64(Widthptr)))   // pointer in first slot
-		bv.Set(int32(*xoffset/int64(Widthptr) + 1)) // pointer in second slot
-		*xoffset += t.Width
-
-	case TSLICE:
-		// struct { byte *array; uintgo len; uintgo cap; }
-		if *xoffset&int64(Widthptr-1) != 0 {
-			Fatalf("onebitwalktype1: invalid TARRAY alignment, %v", t)
-		}
-		bv.Set(int32(*xoffset / int64(Widthptr))) // pointer in first slot (BitsPointer)
-		*xoffset += t.Width
-
-	case TARRAY:
-		for i := int64(0); i < t.NumElem(); i++ {
-			onebitwalktype1(t.Elem(), xoffset, bv)
-		}
-
-	case TSTRUCT:
-		var o int64
-		for _, t1 := range t.Fields().Slice() {
-			fieldoffset := t1.Offset
-			*xoffset += fieldoffset - o
-			onebitwalktype1(t1.Type, xoffset, bv)
-			o = fieldoffset + t1.Type.Width
-		}
-
-		*xoffset += t.Width - o
-
-	default:
-		Fatalf("onebitwalktype1: unexpected type, %v", t)
-	}
-}
-
-// Returns the number of words of local variables.
-func localswords() int32 {
-	return int32(stkptrsize / int64(Widthptr))
-}
-
-// Returns the number of words of in and out arguments.
-func argswords() int32 {
-	return int32(Curfn.Type.ArgWidth() / int64(Widthptr))
-}
-
-// Generates live pointer value maps for arguments and local variables. The
-// this argument and the in arguments are always assumed live. The vars
-// argument is a slice of *Nodes.
-func onebitlivepointermap(lv *Liveness, liveout bvec, vars []*Node, args bvec, locals bvec) {
-	var xoffset int64
-
-	for i := int32(0); ; i++ {
-		i = liveout.Next(i)
-		if i < 0 {
-			break
-		}
-		node := vars[i]
-		switch node.Class {
-		case PAUTO:
-			xoffset = node.Xoffset + stkptrsize
-			onebitwalktype1(node.Type, &xoffset, locals)
-
-		case PPARAM, PPARAMOUT:
-			xoffset = node.Xoffset
-			onebitwalktype1(node.Type, &xoffset, args)
-		}
-	}
-}
-
-// Construct a disembodied instruction.
-func unlinkedprog(as obj.As) *obj.Prog {
-	p := Ctxt.NewProg()
-	Clearp(p)
-	p.As = as
-	return p
-}
-
-// Construct a new PCDATA instruction associated with and for the purposes of
-// covering an existing instruction.
-func newpcdataprog(prog *obj.Prog, index int32) *obj.Prog {
-	pcdata := unlinkedprog(obj.APCDATA)
-	pcdata.Lineno = prog.Lineno
-	pcdata.From.Type = obj.TYPE_CONST
-	pcdata.From.Offset = obj.PCDATA_StackMapIndex
-	pcdata.To.Type = obj.TYPE_CONST
-	pcdata.To.Offset = int64(index)
-	return pcdata
-}
-
-// Returns true for instructions that are safe points that must be annotated
-// with liveness information.
-func issafepoint(prog *obj.Prog) bool {
-	return prog.As == obj.ATEXT || prog.As == obj.ACALL
-}
-
-// Initializes the sets for solving the live variables. Visits all the
-// instructions in each basic block to summarizes the information at each basic
-// block
-func livenessprologue(lv *Liveness) {
-	nvars := int32(len(lv.vars))
-	uevar := bvalloc(nvars)
-	varkill := bvalloc(nvars)
-	avarinit := bvalloc(nvars)
-	for _, bb := range lv.cfg {
-		// Walk the block instructions backward and update the block
-		// effects with the each prog effects.
-		for p := bb.last; p != nil; p = p.Opt.(*obj.Prog) {
-			progeffects(p, lv.vars, uevar, varkill, avarinit)
-			if debuglive >= 3 {
-				printeffects(p, uevar, varkill, avarinit)
-			}
-			bb.varkill.Or(bb.varkill, varkill)
-			bb.uevar.AndNot(bb.uevar, varkill)
-			bb.uevar.Or(bb.uevar, uevar)
-		}
-
-		// Walk the block instructions forward to update avarinit bits.
-		// avarinit describes the effect at the end of the block, not the beginning.
-		varkill.Clear()
-
-		for p := bb.first; ; p = p.Link {
-			progeffects(p, lv.vars, uevar, varkill, avarinit)
-			if debuglive >= 3 {
-				printeffects(p, uevar, varkill, avarinit)
-			}
-			bb.avarinit.AndNot(bb.avarinit, varkill)
-			bb.avarinit.Or(bb.avarinit, avarinit)
-			if p == bb.last {
-				break
-			}
-		}
-	}
-}
-
-// Solve the liveness dataflow equations.
-func livenesssolve(lv *Liveness) {
-	// These temporary bitvectors exist to avoid successive allocations and
-	// frees within the loop.
-	newlivein := bvalloc(int32(len(lv.vars)))
-
-	newliveout := bvalloc(int32(len(lv.vars)))
-	any := bvalloc(int32(len(lv.vars)))
-	all := bvalloc(int32(len(lv.vars)))
-
-	// Push avarinitall, avarinitany forward.
-	// avarinitall says the addressed var is initialized along all paths reaching the block exit.
-	// avarinitany says the addressed var is initialized along some path reaching the block exit.
-	for i, bb := range lv.cfg {
-		if i == 0 {
-			bb.avarinitall.Copy(bb.avarinit)
-		} else {
-			bb.avarinitall.Clear()
-			bb.avarinitall.Not()
-		}
-		bb.avarinitany.Copy(bb.avarinit)
-	}
-
-	for change := true; change; {
-		change = false
-		for _, bb := range lv.cfg {
-			any.Clear()
-			all.Clear()
-			for j, pred := range bb.pred {
-				if j == 0 {
-					any.Copy(pred.avarinitany)
-					all.Copy(pred.avarinitall)
-				} else {
-					any.Or(any, pred.avarinitany)
-					all.And(all, pred.avarinitall)
-				}
-			}
-
-			any.AndNot(any, bb.varkill)
-			all.AndNot(all, bb.varkill)
-			any.Or(any, bb.avarinit)
-			all.Or(all, bb.avarinit)
-			if !any.Eq(bb.avarinitany) {
-				change = true
-				bb.avarinitany.Copy(any)
-			}
-
-			if !all.Eq(bb.avarinitall) {
-				change = true
-				bb.avarinitall.Copy(all)
-			}
-		}
-	}
-
-	// Iterate through the blocks in reverse round-robin fashion. A work
-	// queue might be slightly faster. As is, the number of iterations is
-	// so low that it hardly seems to be worth the complexity.
-
-	for change := true; change; {
-		change = false
-
-		// Walk blocks in the general direction of propagation. This
-		// improves convergence.
-		for i := len(lv.cfg) - 1; i >= 0; i-- {
-			bb := lv.cfg[i]
-
-			// A variable is live on output from this block
-			// if it is live on input to some successor.
-			//
-			// out[b] = \bigcup_{s \in succ[b]} in[s]
-			newliveout.Clear()
-			for _, succ := range bb.succ {
-				newliveout.Or(newliveout, succ.livein)
-			}
-
-			if !bb.liveout.Eq(newliveout) {
-				change = true
-				bb.liveout.Copy(newliveout)
-			}
-
-			// A variable is live on input to this block
-			// if it is live on output from this block and
-			// not set by the code in this block.
-			//
-			// in[b] = uevar[b] \cup (out[b] \setminus varkill[b])
-			newlivein.AndNot(bb.liveout, bb.varkill)
-
-			bb.livein.Or(newlivein, bb.uevar)
-		}
-	}
-}
-
-// This function is slow but it is only used for generating debug prints.
-// Check whether n is marked live in args/locals.
-func islive(n *Node, args bvec, locals bvec) bool {
-	switch n.Class {
-	case PPARAM, PPARAMOUT:
-		for i := 0; int64(i) < n.Type.Width/int64(Widthptr); i++ {
-			if args.Get(int32(n.Xoffset/int64(Widthptr) + int64(i))) {
-				return true
-			}
-		}
-
-	case PAUTO:
-		for i := 0; int64(i) < n.Type.Width/int64(Widthptr); i++ {
-			if locals.Get(int32((n.Xoffset+stkptrsize)/int64(Widthptr) + int64(i))) {
-				return true
-			}
-		}
-	}
-
-	return false
-}
-
-// Visits all instructions in a basic block and computes a bit vector of live
-// variables at each safe point locations.
-func livenessepilogue(lv *Liveness) {
-	nvars := int32(len(lv.vars))
-	livein := bvalloc(nvars)
-	liveout := bvalloc(nvars)
-	uevar := bvalloc(nvars)
-	varkill := bvalloc(nvars)
-	avarinit := bvalloc(nvars)
-	any := bvalloc(nvars)
-	all := bvalloc(nvars)
-	pparamout := bvalloc(localswords())
-
-	// Record pointers to heap-allocated pparamout variables.  These
-	// are implicitly read by post-deferreturn code and thus must be
-	// kept live throughout the function (if there is any defer that
-	// recovers).
-	if hasdefer {
-		for _, n := range lv.vars {
-			if n.IsOutputParamHeapAddr() {
-				n.Name.Needzero = true
-				xoffset := n.Xoffset + stkptrsize
-				onebitwalktype1(n.Type, &xoffset, pparamout)
-			}
-		}
-	}
-
-	for _, bb := range lv.cfg {
-		// Compute avarinitany and avarinitall for entry to block.
-		// This duplicates information known during livenesssolve
-		// but avoids storing two more vectors for each block.
-		any.Clear()
-
-		all.Clear()
-		for j := 0; j < len(bb.pred); j++ {
-			pred := bb.pred[j]
-			if j == 0 {
-				any.Copy(pred.avarinitany)
-				all.Copy(pred.avarinitall)
-			} else {
-				any.Or(any, pred.avarinitany)
-				all.And(all, pred.avarinitall)
-			}
-		}
-
-		// Walk forward through the basic block instructions and
-		// allocate liveness maps for those instructions that need them.
-		// Seed the maps with information about the addrtaken variables.
-		for p := bb.first; ; p = p.Link {
-			progeffects(p, lv.vars, uevar, varkill, avarinit)
-			any.AndNot(any, varkill)
-			all.AndNot(all, varkill)
-			any.Or(any, avarinit)
-			all.Or(all, avarinit)
-
-			if issafepoint(p) {
-				// Annotate ambiguously live variables so that they can
-				// be zeroed at function entry.
-				// livein and liveout are dead here and used as temporaries.
-				livein.Clear()
-
-				liveout.AndNot(any, all)
-				if !liveout.IsEmpty() {
-					for pos := int32(0); pos < liveout.n; pos++ {
-						if !liveout.Get(pos) {
-							continue
-						}
-						all.Set(pos) // silence future warnings in this block
-						n := lv.vars[pos]
-						if !n.Name.Needzero {
-							n.Name.Needzero = true
-							if debuglive >= 1 {
-								Warnl(p.Lineno, "%v: %L is ambiguously live", Curfn.Func.Nname, n)
-							}
-						}
-					}
-				}
-
-				// Allocate a bit vector for each class and facet of
-				// value we are tracking.
-
-				// Live stuff first.
-				args := bvalloc(argswords())
-
-				lv.argslivepointers = append(lv.argslivepointers, args)
-				locals := bvalloc(localswords())
-				lv.livepointers = append(lv.livepointers, locals)
-
-				if debuglive >= 3 {
-					fmt.Printf("%v\n", p)
-					printvars("avarinitany", any, lv.vars)
-				}
-
-				// Record any values with an "address taken" reaching
-				// this code position as live. Must do now instead of below
-				// because the any/all calculation requires walking forward
-				// over the block (as this loop does), while the liveout
-				// requires walking backward (as the next loop does).
-				onebitlivepointermap(lv, any, lv.vars, args, locals)
-			}
-
-			if p == bb.last {
-				break
-			}
-		}
-
-		bb.lastbitmapindex = len(lv.livepointers) - 1
-	}
-
-	var msg []string
-	var nmsg, startmsg int
-	for _, bb := range lv.cfg {
-		if debuglive >= 1 && Curfn.Func.Nname.Sym.Name != "init" && Curfn.Func.Nname.Sym.Name[0] != '.' {
-			nmsg = len(lv.livepointers)
-			startmsg = nmsg
-			msg = make([]string, nmsg)
-			for j := 0; j < nmsg; j++ {
-				msg[j] = ""
-			}
-		}
-
-		// walk backward, emit pcdata and populate the maps
-		pos := int32(bb.lastbitmapindex)
-
-		if pos < 0 {
-			// the first block we encounter should have the ATEXT so
-			// at no point should pos ever be less than zero.
-			Fatalf("livenessepilogue")
-		}
-
-		livein.Copy(bb.liveout)
-		var next *obj.Prog
-		for p := bb.last; p != nil; p = next {
-			next = p.Opt.(*obj.Prog) // splicebefore modifies p.opt
-
-			// Propagate liveness information
-			progeffects(p, lv.vars, uevar, varkill, avarinit)
-
-			liveout.Copy(livein)
-			livein.AndNot(liveout, varkill)
-			livein.Or(livein, uevar)
-			if debuglive >= 3 && issafepoint(p) {
-				fmt.Printf("%v\n", p)
-				printvars("uevar", uevar, lv.vars)
-				printvars("varkill", varkill, lv.vars)
-				printvars("livein", livein, lv.vars)
-				printvars("liveout", liveout, lv.vars)
-			}
-
-			if issafepoint(p) {
-				// Found an interesting instruction, record the
-				// corresponding liveness information.
-
-				// Useful sanity check: on entry to the function,
-				// the only things that can possibly be live are the
-				// input parameters.
-				if p.As == obj.ATEXT {
-					for j := int32(0); j < liveout.n; j++ {
-						if !liveout.Get(j) {
-							continue
-						}
-						n := lv.vars[j]
-						if n.Class != PPARAM {
-							yyerrorl(p.Lineno, "internal error: %v %L recorded as live on entry, p.Pc=%v", Curfn.Func.Nname, n, p.Pc)
-						}
-					}
-				}
-
-				// Record live pointers.
-				args := lv.argslivepointers[pos]
-
-				locals := lv.livepointers[pos]
-				onebitlivepointermap(lv, liveout, lv.vars, args, locals)
-
-				// Mark pparamout variables (as described above)
-				if p.As == obj.ACALL {
-					locals.Or(locals, pparamout)
-				}
-
-				// Show live pointer bitmaps.
-				// We're interpreting the args and locals bitmap instead of liveout so that we
-				// include the bits added by the avarinit logic in the
-				// previous loop.
-				if msg != nil {
-					fmt_ := fmt.Sprintf("%v: live at ", p.Line())
-					if p.As == obj.ACALL && p.To.Sym != nil {
-						name := p.To.Sym.Name
-						i := strings.Index(name, ".")
-						if i >= 0 {
-							name = name[i+1:]
-						}
-						fmt_ += fmt.Sprintf("call to %s:", name)
-					} else if p.As == obj.ACALL {
-						fmt_ += "indirect call:"
-					} else {
-						fmt_ += fmt.Sprintf("entry to %s:", ((p.From.Node).(*Node)).Sym.Name)
-					}
-					numlive := 0
-					for j := 0; j < len(lv.vars); j++ {
-						n := lv.vars[j]
-						if islive(n, args, locals) {
-							fmt_ += fmt.Sprintf(" %v", n)
-							numlive++
-						}
-					}
-
-					fmt_ += "\n"
-					if numlive == 0 { // squelch message
-
-					} else {
-						startmsg--
-						msg[startmsg] = fmt_
-					}
-				}
-
-				// Only CALL instructions need a PCDATA annotation.
-				// The TEXT instruction annotation is implicit.
-				if p.As == obj.ACALL {
-					if isdeferreturn(p) {
-						// runtime.deferreturn modifies its return address to return
-						// back to the CALL, not to the subsequent instruction.
-						// Because the return comes back one instruction early,
-						// the PCDATA must begin one instruction early too.
-						// The instruction before a call to deferreturn is always a
-						// no-op, to keep PC-specific data unambiguous.
-						prev := p.Opt.(*obj.Prog)
-						if Ctxt.Arch.Family == sys.PPC64 {
-							// On ppc64 there is an additional instruction
-							// (another no-op or reload of toc pointer) before
-							// the call.
-							prev = prev.Opt.(*obj.Prog)
-						}
-						splicebefore(lv, bb, newpcdataprog(prev, pos), prev)
-					} else {
-						splicebefore(lv, bb, newpcdataprog(p, pos), p)
-					}
-				}
-
-				pos--
-			}
-		}
-
-		if msg != nil {
-			for j := startmsg; j < nmsg; j++ {
-				if msg[j] != "" {
-					fmt.Printf("%s", msg[j])
-				}
-			}
-
-			msg = nil
-			nmsg = 0
-			startmsg = 0
-		}
-	}
-
-	flusherrors()
-}
-
-// FNV-1 hash function constants.
-const (
-	H0 = 2166136261
-	Hp = 16777619
-)
-
-func hashbitmap(h uint32, bv bvec) uint32 {
-	n := int((bv.n + 31) / 32)
-	for i := 0; i < n; i++ {
-		w := bv.b[i]
-		h = (h * Hp) ^ (w & 0xff)
-		h = (h * Hp) ^ ((w >> 8) & 0xff)
-		h = (h * Hp) ^ ((w >> 16) & 0xff)
-		h = (h * Hp) ^ ((w >> 24) & 0xff)
-	}
-
-	return h
-}
-
-// Compact liveness information by coalescing identical per-call-site bitmaps.
-// The merging only happens for a single function, not across the entire binary.
-//
-// There are actually two lists of bitmaps, one list for the local variables and one
-// list for the function arguments. Both lists are indexed by the same PCDATA
-// index, so the corresponding pairs must be considered together when
-// merging duplicates. The argument bitmaps change much less often during
-// function execution than the local variable bitmaps, so it is possible that
-// we could introduce a separate PCDATA index for arguments vs locals and
-// then compact the set of argument bitmaps separately from the set of
-// local variable bitmaps. As of 2014-04-02, doing this to the godoc binary
-// is actually a net loss: we save about 50k of argument bitmaps but the new
-// PCDATA tables cost about 100k. So for now we keep using a single index for
-// both bitmap lists.
-func livenesscompact(lv *Liveness) {
-	// Linear probing hash table of bitmaps seen so far.
-	// The hash table has 4n entries to keep the linear
-	// scan short. An entry of -1 indicates an empty slot.
-	n := len(lv.livepointers)
-
-	tablesize := 4 * n
-	table := make([]int, tablesize)
-	for i := range table {
-		table[i] = -1
-	}
-
-	// remap[i] = the new index of the old bit vector #i.
-	remap := make([]int, n)
-
-	for i := range remap {
-		remap[i] = -1
-	}
-	uniq := 0 // unique tables found so far
-
-	// Consider bit vectors in turn.
-	// If new, assign next number using uniq,
-	// record in remap, record in lv.livepointers and lv.argslivepointers
-	// under the new index, and add entry to hash table.
-	// If already seen, record earlier index in remap and free bitmaps.
-	for i := 0; i < n; i++ {
-		local := lv.livepointers[i]
-		arg := lv.argslivepointers[i]
-		h := hashbitmap(hashbitmap(H0, local), arg) % uint32(tablesize)
-
-		for {
-			j := table[h]
-			if j < 0 {
-				break
-			}
-			jlocal := lv.livepointers[j]
-			jarg := lv.argslivepointers[j]
-			if local.Eq(jlocal) && arg.Eq(jarg) {
-				remap[i] = j
-				goto Next
-			}
-
-			h++
-			if h == uint32(tablesize) {
-				h = 0
-			}
-		}
-
-		table[h] = uniq
-		remap[i] = uniq
-		lv.livepointers[uniq] = local
-		lv.argslivepointers[uniq] = arg
-		uniq++
-	Next:
-	}
-
-	// We've already reordered lv.livepointers[0:uniq]
-	// and lv.argslivepointers[0:uniq] and freed the bitmaps
-	// we don't need anymore. Clear the pointers later in the
-	// array so that we can tell where the coalesced bitmaps stop
-	// and so that we don't double-free when cleaning up.
-	for j := uniq; j < n; j++ {
-		lv.livepointers[j] = bvec{}
-		lv.argslivepointers[j] = bvec{}
-	}
-
-	// Rewrite PCDATA instructions to use new numbering.
-	for p := lv.ptxt; p != nil; p = p.Link {
-		if p.As == obj.APCDATA && p.From.Offset == obj.PCDATA_StackMapIndex {
-			i := p.To.Offset
-			if i >= 0 {
-				p.To.Offset = int64(remap[i])
-			}
-		}
-	}
-}
-
-func printbitset(printed bool, name string, vars []*Node, bits bvec) bool {
-	started := false
-	for i, n := range vars {
-		if !bits.Get(int32(i)) {
-			continue
-		}
-		if !started {
-			if !printed {
-				fmt.Printf("\t")
-			} else {
-				fmt.Printf(" ")
-			}
-			started = true
-			printed = true
-			fmt.Printf("%s=", name)
-		} else {
-			fmt.Printf(",")
-		}
-
-		fmt.Printf("%s", n.Sym.Name)
-	}
-
-	return printed
-}
-
-// Prints the computed liveness information and inputs, for debugging.
-// This format synthesizes the information used during the multiple passes
-// into a single presentation.
-func livenessprintdebug(lv *Liveness) {
-	fmt.Printf("liveness: %s\n", Curfn.Func.Nname.Sym.Name)
-
-	uevar := bvalloc(int32(len(lv.vars)))
-	varkill := bvalloc(int32(len(lv.vars)))
-	avarinit := bvalloc(int32(len(lv.vars)))
-
-	pcdata := 0
-	for i, bb := range lv.cfg {
-		if i > 0 {
-			fmt.Printf("\n")
-		}
-
-		// bb#0 pred=1,2 succ=3,4
-		fmt.Printf("bb#%d pred=", i)
-
-		for j := 0; j < len(bb.pred); j++ {
-			if j > 0 {
-				fmt.Printf(",")
-			}
-			fmt.Printf("%d", (bb.pred[j]).rpo)
-		}
-
-		fmt.Printf(" succ=")
-		for j := 0; j < len(bb.succ); j++ {
-			if j > 0 {
-				fmt.Printf(",")
-			}
-			fmt.Printf("%d", (bb.succ[j]).rpo)
-		}
-
-		fmt.Printf("\n")
-
-		// initial settings
-		var printed bool
-
-		printed = printbitset(printed, "uevar", lv.vars, bb.uevar)
-		printed = printbitset(printed, "livein", lv.vars, bb.livein)
-		if printed {
-			fmt.Printf("\n")
-		}
-
-		// program listing, with individual effects listed
-		for p := bb.first; ; p = p.Link {
-			fmt.Printf("%v\n", p)
-			if p.As == obj.APCDATA && p.From.Offset == obj.PCDATA_StackMapIndex {
-				pcdata = int(p.To.Offset)
-			}
-			progeffects(p, lv.vars, uevar, varkill, avarinit)
-			printed = false
-			printed = printbitset(printed, "uevar", lv.vars, uevar)
-			printed = printbitset(printed, "varkill", lv.vars, varkill)
-			printed = printbitset(printed, "avarinit", lv.vars, avarinit)
-			if printed {
-				fmt.Printf("\n")
-			}
-			if issafepoint(p) {
-				args := lv.argslivepointers[pcdata]
-				locals := lv.livepointers[pcdata]
-				fmt.Printf("\tlive=")
-				printed = false
-				for j := 0; j < len(lv.vars); j++ {
-					n := lv.vars[j]
-					if islive(n, args, locals) {
-						if printed {
-							fmt.Printf(",")
-						}
-						fmt.Printf("%v", n)
-						printed = true
-					}
-				}
-				fmt.Printf("\n")
-			}
-
-			if p == bb.last {
-				break
-			}
-		}
-
-		// bb bitsets
-		fmt.Printf("end\n")
-
-		printed = printbitset(printed, "varkill", lv.vars, bb.varkill)
-		printed = printbitset(printed, "liveout", lv.vars, bb.liveout)
-		printed = printbitset(printed, "avarinit", lv.vars, bb.avarinit)
-		printed = printbitset(printed, "avarinitany", lv.vars, bb.avarinitany)
-		printed = printbitset(printed, "avarinitall", lv.vars, bb.avarinitall)
-		if printed {
-			fmt.Printf("\n")
-		}
-	}
-
-	fmt.Printf("\n")
-}
-
-// Dumps a slice of bitmaps to a symbol as a sequence of uint32 values. The
-// first word dumped is the total number of bitmaps. The second word is the
-// length of the bitmaps. All bitmaps are assumed to be of equal length. The
-// remaining bytes are the raw bitmaps.
-func onebitwritesymbol(arr []bvec, sym *Sym) {
-	off := 4                                  // number of bitmaps, to fill in later
-	off = duint32(sym, off, uint32(arr[0].n)) // number of bits in each bitmap
-	var i int
-	for i = 0; i < len(arr); i++ {
-		// bitmap words
-		bv := arr[i]
-
-		if bv.b == nil {
-			break
-		}
-		off = dbvec(sym, off, bv)
-	}
-
-	duint32(sym, 0, uint32(i)) // number of bitmaps
-	ls := Linksym(sym)
-	ls.Name = fmt.Sprintf("gclocals·%x", md5.Sum(ls.P))
-	ls.Set(obj.AttrDuplicateOK, true)
-	sv := obj.SymVer{Name: ls.Name, Version: 0}
-	ls2, ok := Ctxt.Hash[sv]
-	if ok {
-		sym.Lsym = ls2
-	} else {
-		Ctxt.Hash[sv] = ls
-		ggloblsym(sym, int32(off), obj.RODATA)
-	}
-}
-
-func printprog(p *obj.Prog) {
-	for p != nil {
-		fmt.Printf("%v\n", p)
-		p = p.Link
-	}
-}
-
-// Entry pointer for liveness analysis. Constructs a complete CFG, solves for
-// the liveness of pointer variables in the function, and emits a runtime data
-// structure read by the garbage collector.
-func liveness(fn *Node, firstp *obj.Prog, argssym *Sym, livesym *Sym) {
-	// Change name to dump debugging information only for a specific function.
-	debugdelta := 0
-
-	if Curfn.Func.Nname.Sym.Name == "!" {
-		debugdelta = 2
-	}
-
-	debuglive += debugdelta
-	if debuglive >= 3 {
-		fmt.Printf("liveness: %s\n", Curfn.Func.Nname.Sym.Name)
-		printprog(firstp)
-	}
-
-	checkptxt(fn, firstp)
-
-	// Construct the global liveness state.
-	cfg := newcfg(firstp)
-
-	if debuglive >= 3 {
-		printcfg(cfg)
-	}
-	vars := getvariables(fn)
-	lv := newliveness(fn, firstp, cfg, vars)
-
-	// Run the dataflow framework.
-	livenessprologue(lv)
-
-	if debuglive >= 3 {
-		livenessprintcfg(lv)
-	}
-	livenesssolve(lv)
-	if debuglive >= 3 {
-		livenessprintcfg(lv)
-	}
-	livenessepilogue(lv)
-	if debuglive >= 3 {
-		livenessprintcfg(lv)
-	}
-	livenesscompact(lv)
-
-	if debuglive >= 2 {
-		livenessprintdebug(lv)
-	}
-
-	// Emit the live pointer map data structures
-	onebitwritesymbol(lv.livepointers, livesym)
-
-	onebitwritesymbol(lv.argslivepointers, argssym)
-
-	// Free everything.
-	for _, ln := range fn.Func.Dcl {
-		if ln != nil {
-			ln.SetOpt(nil)
-		}
-	}
-
-	freecfg(cfg)
-
-	debuglive -= debugdelta
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/racewalk.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/racewalk.go
deleted file mode 100644
index 1f41595..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/racewalk.go
+++ /dev/null
@@ -1,644 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/racewalk.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/racewalk.go:1
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"fmt"
-	"strings"
-)
-
-// The instrument pass modifies the code tree for instrumentation.
-//
-// For flag_race it modifies the function as follows:
-//
-// 1. It inserts a call to racefuncenterfp at the beginning of each function.
-// 2. It inserts a call to racefuncexit at the end of each function.
-// 3. It inserts a call to raceread before each memory read.
-// 4. It inserts a call to racewrite before each memory write.
-//
-// For flag_msan:
-//
-// 1. It inserts a call to msanread before each memory read.
-// 2. It inserts a call to msanwrite before each memory write.
-//
-// The rewriting is not yet complete. Certain nodes are not rewritten
-// but should be.
-
-// TODO(dvyukov): do not instrument initialization as writes:
-// a := make([]int, 10)
-
-// Do not instrument the following packages at all,
-// at best instrumentation would cause infinite recursion.
-var omit_pkgs = []string{"runtime/internal/atomic", "runtime/internal/sys", "runtime", "runtime/race", "runtime/msan"}
-
-// Only insert racefuncenterfp/racefuncexit into the following packages.
-// Memory accesses in the packages are either uninteresting or will cause false positives.
-var norace_inst_pkgs = []string{"sync", "sync/atomic"}
-
-func ispkgin(pkgs []string) bool {
-	if myimportpath != "" {
-		for _, p := range pkgs {
-			if myimportpath == p {
-				return true
-			}
-		}
-	}
-
-	return false
-}
-
-func instrument(fn *Node) {
-	if ispkgin(omit_pkgs) || fn.Func.Pragma&Norace != 0 {
-		return
-	}
-
-	if !flag_race || !ispkgin(norace_inst_pkgs) {
-		instrumentlist(fn.Nbody, nil)
-
-		// nothing interesting for race detector in fn->enter
-		instrumentlist(fn.Func.Exit, nil)
-	}
-
-	if flag_race {
-		// nodpc is the PC of the caller as extracted by
-		// getcallerpc. We use -widthptr(FP) for x86.
-		// BUG: this will not work on arm.
-		nodpc := *nodfp
-		nodpc.Type = Types[TUINTPTR]
-		nodpc.Xoffset = int64(-Widthptr)
-		nd := mkcall("racefuncenter", nil, nil, &nodpc)
-		fn.Func.Enter.Prepend(nd)
-		nd = mkcall("racefuncexit", nil, nil)
-		fn.Func.Exit.Append(nd)
-		fn.Func.Dcl = append(fn.Func.Dcl, &nodpc)
-	}
-
-	if Debug['W'] != 0 {
-		s := fmt.Sprintf("after instrument %v", fn.Func.Nname.Sym)
-		dumplist(s, fn.Nbody)
-		s = fmt.Sprintf("enter %v", fn.Func.Nname.Sym)
-		dumplist(s, fn.Func.Enter)
-		s = fmt.Sprintf("exit %v", fn.Func.Nname.Sym)
-		dumplist(s, fn.Func.Exit)
-	}
-}
-
-func instrumentlist(l Nodes, init *Nodes) {
-	s := l.Slice()
-	for i := range s {
-		var instr Nodes
-		instrumentnode(&s[i], &instr, 0, 0)
-		if init == nil {
-			s[i].Ninit.AppendNodes(&instr)
-		} else {
-			init.AppendNodes(&instr)
-		}
-	}
-}
-
-// walkexpr and walkstmt combined
-// walks the tree and adds calls to the
-// instrumentation code to top-level (statement) nodes' init
-func instrumentnode(np **Node, init *Nodes, wr int, skip int) {
-	n := *np
-
-	if n == nil {
-		return
-	}
-
-	if Debug['w'] > 1 {
-		Dump("instrument-before", n)
-	}
-	setlineno(n)
-	if init == nil {
-		Fatalf("instrument: bad init list")
-	}
-	if init == &n.Ninit {
-		// If init == &n->ninit and n->ninit is non-nil,
-		// instrumentnode might append it to itself.
-		// nil it out and handle it separately before putting it back.
-		l := n.Ninit
-
-		n.Ninit.Set(nil)
-		instrumentlist(l, nil)
-		instrumentnode(&n, &l, wr, skip) // recurse with nil n->ninit
-		appendinit(&n, l)
-		*np = n
-		return
-	}
-
-	instrumentlist(n.Ninit, nil)
-
-	switch n.Op {
-	default:
-		Fatalf("instrument: unknown node type %v", n.Op)
-
-	case OAS, OASWB, OAS2FUNC:
-		instrumentnode(&n.Left, init, 1, 0)
-		instrumentnode(&n.Right, init, 0, 0)
-		goto ret
-
-		// can't matter
-	case OCFUNC, OVARKILL, OVARLIVE:
-		goto ret
-
-	case OBLOCK:
-		ls := n.List.Slice()
-		afterCall := false
-		for i := range ls {
-			op := ls[i].Op
-			// Scan past OAS nodes copying results off stack.
-			// Those must not be instrumented, because the
-			// instrumentation calls will smash the results.
-			// The assignments are to temporaries, so they cannot
-			// be involved in races and need not be instrumented.
-			if afterCall && op == OAS && iscallret(ls[i].Right) {
-				continue
-			}
-			instrumentnode(&ls[i], &ls[i].Ninit, 0, 0)
-			afterCall = (op == OCALLFUNC || op == OCALLMETH || op == OCALLINTER)
-		}
-		goto ret
-
-	case ODEFER:
-		instrumentnode(&n.Left, init, 0, 0)
-		goto ret
-
-	case OPROC:
-		instrumentnode(&n.Left, init, 0, 0)
-		goto ret
-
-	case OCALLINTER:
-		instrumentnode(&n.Left, init, 0, 0)
-		goto ret
-
-	// Instrument dst argument of runtime.writebarrier* calls
-	// as we do not instrument runtime code.
-	// typedslicecopy is instrumented in runtime.
-	case OCALLFUNC:
-		instrumentnode(&n.Left, init, 0, 0)
-		goto ret
-
-	case ONOT,
-		OMINUS,
-		OPLUS,
-		OREAL,
-		OIMAG,
-		OCOM,
-		OSQRT:
-		instrumentnode(&n.Left, init, wr, 0)
-		goto ret
-
-	case ODOTINTER:
-		instrumentnode(&n.Left, init, 0, 0)
-		goto ret
-
-	case ODOT:
-		instrumentnode(&n.Left, init, 0, 1)
-		callinstr(&n, init, wr, skip)
-		goto ret
-
-	case ODOTPTR: // dst = (*x).f with implicit *; otherwise it's ODOT+OIND
-		instrumentnode(&n.Left, init, 0, 0)
-
-		callinstr(&n, init, wr, skip)
-		goto ret
-
-	case OIND: // *p
-		instrumentnode(&n.Left, init, 0, 0)
-
-		callinstr(&n, init, wr, skip)
-		goto ret
-
-	case OSPTR, OLEN, OCAP:
-		instrumentnode(&n.Left, init, 0, 0)
-		if n.Left.Type.IsMap() {
-			n1 := nod(OCONVNOP, n.Left, nil)
-			n1.Type = ptrto(Types[TUINT8])
-			n1 = nod(OIND, n1, nil)
-			n1 = typecheck(n1, Erv)
-			callinstr(&n1, init, 0, skip)
-		}
-
-		goto ret
-
-	case OLSH,
-		ORSH,
-		OLROT,
-		OAND,
-		OANDNOT,
-		OOR,
-		OXOR,
-		OSUB,
-		OMUL,
-		OHMUL,
-		OEQ,
-		ONE,
-		OLT,
-		OLE,
-		OGE,
-		OGT,
-		OADD,
-		OCOMPLEX:
-		instrumentnode(&n.Left, init, wr, 0)
-		instrumentnode(&n.Right, init, wr, 0)
-		goto ret
-
-	case OANDAND, OOROR:
-		instrumentnode(&n.Left, init, wr, 0)
-
-		// walk has ensured the node has moved to a location where
-		// side effects are safe.
-		// n->right may not be executed,
-		// so instrumentation goes to n->right->ninit, not init.
-		instrumentnode(&n.Right, &n.Right.Ninit, wr, 0)
-
-		goto ret
-
-	case ONAME:
-		callinstr(&n, init, wr, skip)
-		goto ret
-
-	case OCONV:
-		instrumentnode(&n.Left, init, wr, 0)
-		goto ret
-
-	case OCONVNOP:
-		instrumentnode(&n.Left, init, wr, 0)
-		goto ret
-
-	case ODIV, OMOD:
-		instrumentnode(&n.Left, init, wr, 0)
-		instrumentnode(&n.Right, init, wr, 0)
-		goto ret
-
-	case OINDEX:
-		if !n.Left.Type.IsArray() {
-			instrumentnode(&n.Left, init, 0, 0)
-		} else if !islvalue(n.Left) {
-			// index of unaddressable array, like Map[k][i].
-			instrumentnode(&n.Left, init, wr, 0)
-
-			instrumentnode(&n.Right, init, 0, 0)
-			goto ret
-		}
-
-		instrumentnode(&n.Right, init, 0, 0)
-		if !n.Left.Type.IsString() {
-			callinstr(&n, init, wr, skip)
-		}
-		goto ret
-
-	case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR:
-		instrumentnode(&n.Left, init, 0, 0)
-		low, high, max := n.SliceBounds()
-		instrumentnode(&low, init, 0, 0)
-		instrumentnode(&high, init, 0, 0)
-		instrumentnode(&max, init, 0, 0)
-		n.SetSliceBounds(low, high, max)
-		goto ret
-
-	case OADDR:
-		instrumentnode(&n.Left, init, 0, 1)
-		goto ret
-
-		// n->left is Type* which is not interesting.
-	case OEFACE:
-		instrumentnode(&n.Right, init, 0, 0)
-
-		goto ret
-
-	case OITAB, OIDATA:
-		instrumentnode(&n.Left, init, 0, 0)
-		goto ret
-
-	case OSTRARRAYBYTETMP:
-		instrumentnode(&n.Left, init, 0, 0)
-		goto ret
-
-	case OAS2DOTTYPE:
-		instrumentnode(&n.Left, init, 1, 0)
-		instrumentnode(&n.Right, init, 0, 0)
-		goto ret
-
-	case ODOTTYPE, ODOTTYPE2:
-		instrumentnode(&n.Left, init, 0, 0)
-		goto ret
-
-		// should not appear in AST by now
-	case OSEND,
-		ORECV,
-		OCLOSE,
-		ONEW,
-		OXCASE,
-		OXFALL,
-		OCASE,
-		OPANIC,
-		ORECOVER,
-		OCONVIFACE,
-		OCMPIFACE,
-		OMAKECHAN,
-		OMAKEMAP,
-		OMAKESLICE,
-		OCALL,
-		OCOPY,
-		OAPPEND,
-		ORUNESTR,
-		OARRAYBYTESTR,
-		OARRAYRUNESTR,
-		OSTRARRAYBYTE,
-		OSTRARRAYRUNE,
-		OINDEXMAP,
-		// lowered to call
-		OCMPSTR,
-		OADDSTR,
-		OCALLPART,
-		// lowered to PTRLIT
-		OCLOSURE,  // lowered to PTRLIT
-		ORANGE,    // lowered to ordinary for loop
-		OARRAYLIT, // lowered to assignments
-		OSLICELIT,
-		OMAPLIT,
-		OSTRUCTLIT,
-		OAS2,
-		OAS2RECV,
-		OAS2MAPR,
-		OASOP:
-		yyerror("instrument: %v must be lowered by now", n.Op)
-
-		goto ret
-
-		// impossible nodes: only appear in backend.
-	case ORROTC, OEXTEND:
-		yyerror("instrument: %v cannot exist now", n.Op)
-		goto ret
-
-	case OGETG:
-		yyerror("instrument: OGETG can happen only in runtime which we don't instrument")
-		goto ret
-
-	case OFOR:
-		if n.Left != nil {
-			instrumentnode(&n.Left, &n.Left.Ninit, 0, 0)
-		}
-		if n.Right != nil {
-			instrumentnode(&n.Right, &n.Right.Ninit, 0, 0)
-		}
-		goto ret
-
-	case OIF, OSWITCH:
-		if n.Left != nil {
-			instrumentnode(&n.Left, &n.Left.Ninit, 0, 0)
-		}
-		goto ret
-
-		// just do generic traversal
-	case OCALLMETH,
-		ORETURN,
-		ORETJMP,
-		OSELECT,
-		OEMPTY,
-		OBREAK,
-		OCONTINUE,
-		OFALL,
-		OGOTO,
-		OLABEL:
-		goto ret
-
-		// does not require instrumentation
-	case OPRINT, // don't bother instrumenting it
-		OPRINTN,     // don't bother instrumenting it
-		OCHECKNIL,   // always followed by a read.
-		OCLOSUREVAR, // immutable pointer to captured variable
-		ODOTMETH,    // either part of CALLMETH or CALLPART (lowered to PTRLIT)
-		OINDREGSP,   // at this stage, only n(SP) nodes from nodarg
-		ODCL,        // declarations (without value) cannot be races
-		ODCLCONST,
-		ODCLTYPE,
-		OTYPE,
-		ONONAME,
-		OLITERAL,
-		OTYPESW: // ignored by code generation, do not instrument.
-		goto ret
-	}
-
-ret:
-	if n.Op != OBLOCK { // OBLOCK is handled above in a special way.
-		instrumentlist(n.List, init)
-	}
-	instrumentlist(n.Nbody, nil)
-	instrumentlist(n.Rlist, nil)
-	*np = n
-}
-
-func isartificial(n *Node) bool {
-	// compiler-emitted artificial things that we do not want to instrument,
-	// can't possibly participate in a data race.
-	// can't be seen by C/C++ and therefore irrelevant for msan.
-	if n.Op == ONAME && n.Sym != nil && n.Sym.Name != "" {
-		if n.Sym.Name == "_" {
-			return true
-		}
-
-		// autotmp's are always local
-		if n.IsAutoTmp() {
-			return true
-		}
-
-		// statictmp's are read-only
-		if strings.HasPrefix(n.Sym.Name, "statictmp_") {
-			return true
-		}
-
-		// go.itab is accessed only by the compiler and runtime (assume safe)
-		if n.Sym.Pkg != nil && n.Sym.Pkg.Name != "" && n.Sym.Pkg.Name == "go.itab" {
-			return true
-		}
-	}
-
-	return false
-}
-
-func callinstr(np **Node, init *Nodes, wr int, skip int) bool {
-	n := *np
-
-	//fmt.Printf("callinstr for %v [ %v ] etype=%v class=%v\n",
-	//	n, n.Op, n.Type.Etype, n.Class)
-
-	if skip != 0 || n.Type == nil || n.Type.Etype >= TIDEAL {
-		return false
-	}
-	t := n.Type
-	if isartificial(n) {
-		return false
-	}
-
-	b := outervalue(n)
-
-	// it skips e.g. stores to ... parameter array
-	if isartificial(b) {
-		return false
-	}
-	class := b.Class
-
-	// BUG: we _may_ want to instrument PAUTO sometimes
-	// e.g. if we've got a local variable/method receiver
-	// that has got a pointer inside. Whether it points to
-	// the heap or not is impossible to know at compile time
-	if class == PAUTOHEAP || class == PEXTERN || b.Op == OINDEX || b.Op == ODOTPTR || b.Op == OIND {
-		hascalls := 0
-		foreach(n, hascallspred, &hascalls)
-		if hascalls != 0 {
-			n = detachexpr(n, init)
-			*np = n
-		}
-
-		n = treecopy(n, 0)
-		makeaddable(n)
-		var f *Node
-		if flag_msan {
-			name := "msanread"
-			if wr != 0 {
-				name = "msanwrite"
-			}
-			// dowidth may not have been called for PEXTERN.
-			dowidth(t)
-			w := t.Width
-			if w == BADWIDTH {
-				Fatalf("instrument: %v badwidth", t)
-			}
-			f = mkcall(name, nil, init, uintptraddr(n), nodintconst(w))
-		} else if flag_race && (t.IsStruct() || t.IsArray()) {
-			name := "racereadrange"
-			if wr != 0 {
-				name = "racewriterange"
-			}
-			// dowidth may not have been called for PEXTERN.
-			dowidth(t)
-			w := t.Width
-			if w == BADWIDTH {
-				Fatalf("instrument: %v badwidth", t)
-			}
-			f = mkcall(name, nil, init, uintptraddr(n), nodintconst(w))
-		} else if flag_race {
-			name := "raceread"
-			if wr != 0 {
-				name = "racewrite"
-			}
-			f = mkcall(name, nil, init, uintptraddr(n))
-		}
-
-		init.Append(f)
-		return true
-	}
-
-	return false
-}
-
-// makeaddable returns a node whose memory location is the
-// same as n, but which is addressable in the Go language
-// sense.
-// This is different from functions like cheapexpr that may make
-// a copy of their argument.
-func makeaddable(n *Node) {
-	// The arguments to uintptraddr technically have an address but
-	// may not be addressable in the Go sense: for example, in the case
-	// of T(v).Field where T is a struct type and v is
-	// an addressable value.
-	switch n.Op {
-	case OINDEX:
-		if n.Left.Type.IsArray() {
-			makeaddable(n.Left)
-		}
-
-		// Turn T(v).Field into v.Field
-	case ODOT, OXDOT:
-		if n.Left.Op == OCONVNOP {
-			n.Left = n.Left.Left
-		}
-		makeaddable(n.Left)
-
-		// nothing to do
-	case ODOTPTR:
-		fallthrough
-	default:
-		break
-	}
-}
-
-func uintptraddr(n *Node) *Node {
-	r := nod(OADDR, n, nil)
-	r.Bounded = true
-	r = conv(r, Types[TUNSAFEPTR])
-	r = conv(r, Types[TUINTPTR])
-	return r
-}
-
-func detachexpr(n *Node, init *Nodes) *Node {
-	addr := nod(OADDR, n, nil)
-	l := temp(ptrto(n.Type))
-	as := nod(OAS, l, addr)
-	as = typecheck(as, Etop)
-	as = walkexpr(as, init)
-	init.Append(as)
-	ind := nod(OIND, l, nil)
-	ind = typecheck(ind, Erv)
-	ind = walkexpr(ind, init)
-	return ind
-}
-
-func foreachnode(n *Node, f func(*Node, interface{}), c interface{}) {
-	if n != nil {
-		f(n, c)
-	}
-}
-
-func foreachlist(l Nodes, f func(*Node, interface{}), c interface{}) {
-	for _, n := range l.Slice() {
-		foreachnode(n, f, c)
-	}
-}
-
-func foreach(n *Node, f func(*Node, interface{}), c interface{}) {
-	foreachlist(n.Ninit, f, c)
-	foreachnode(n.Left, f, c)
-	foreachnode(n.Right, f, c)
-	foreachlist(n.List, f, c)
-	foreachlist(n.Nbody, f, c)
-	foreachlist(n.Rlist, f, c)
-}
-
-func hascallspred(n *Node, c interface{}) {
-	switch n.Op {
-	case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER:
-		(*c.(*int))++
-	}
-}
-
-// appendinit is like addinit in subr.go
-// but appends rather than prepends.
-func appendinit(np **Node, init Nodes) {
-	if init.Len() == 0 {
-		return
-	}
-
-	n := *np
-	switch n.Op {
-	// There may be multiple refs to this node;
-	// introduce OCONVNOP to hold init list.
-	case ONAME, OLITERAL:
-		n = nod(OCONVNOP, n, nil)
-
-		n.Type = n.Left.Type
-		n.Typecheck = 1
-		*np = n
-	}
-
-	n.Ninit.AppendNodes(&init)
-	n.Ullman = UINF
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/range.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/range.go
deleted file mode 100644
index f9d7492..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/range.go
+++ /dev/null
@@ -1,449 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/range.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/range.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import "unicode/utf8"
-
-// range
-func typecheckrange(n *Node) {
-	var toomany int
-	var why string
-	var t1 *Type
-	var t2 *Type
-	var v1 *Node
-	var v2 *Node
-	var ls []*Node
-
-	// Typechecking order is important here:
-	// 0. first typecheck range expression (slice/map/chan),
-	//	it is evaluated only once and so logically it is not part of the loop.
-	// 1. typcheck produced values,
-	//	this part can declare new vars and so it must be typechecked before body,
-	//	because body can contain a closure that captures the vars.
-	// 2. decldepth++ to denote loop body.
-	// 3. typecheck body.
-	// 4. decldepth--.
-
-	n.Right = typecheck(n.Right, Erv)
-
-	t := n.Right.Type
-	if t == nil {
-		goto out
-	}
-	// delicate little dance.  see typecheckas2
-	ls = n.List.Slice()
-	for i1, n1 := range ls {
-		if n1.Name == nil || n1.Name.Defn != n {
-			ls[i1] = typecheck(ls[i1], Erv|Easgn)
-		}
-	}
-
-	if t.IsPtr() && t.Elem().IsArray() {
-		t = t.Elem()
-	}
-	n.Type = t
-
-	toomany = 0
-	switch t.Etype {
-	default:
-		yyerror("cannot range over %L", n.Right)
-		goto out
-
-	case TARRAY, TSLICE:
-		t1 = Types[TINT]
-		t2 = t.Elem()
-
-	case TMAP:
-		t1 = t.Key()
-		t2 = t.Val()
-
-	case TCHAN:
-		if !t.ChanDir().CanRecv() {
-			yyerror("invalid operation: range %v (receive from send-only type %v)", n.Right, n.Right.Type)
-			goto out
-		}
-
-		t1 = t.Elem()
-		t2 = nil
-		if n.List.Len() == 2 {
-			toomany = 1
-		}
-
-	case TSTRING:
-		t1 = Types[TINT]
-		t2 = runetype
-	}
-
-	if n.List.Len() > 2 || toomany != 0 {
-		yyerror("too many variables in range")
-	}
-
-	v1 = nil
-	if n.List.Len() != 0 {
-		v1 = n.List.First()
-	}
-	v2 = nil
-	if n.List.Len() > 1 {
-		v2 = n.List.Second()
-	}
-
-	// this is not only a optimization but also a requirement in the spec.
-	// "if the second iteration variable is the blank identifier, the range
-	// clause is equivalent to the same clause with only the first variable
-	// present."
-	if isblank(v2) {
-		if v1 != nil {
-			n.List.Set1(v1)
-		}
-		v2 = nil
-	}
-
-	if v1 != nil {
-		if v1.Name != nil && v1.Name.Defn == n {
-			v1.Type = t1
-		} else if v1.Type != nil && assignop(t1, v1.Type, &why) == 0 {
-			yyerror("cannot assign type %v to %L in range%s", t1, v1, why)
-		}
-		checkassign(n, v1)
-	}
-
-	if v2 != nil {
-		if v2.Name != nil && v2.Name.Defn == n {
-			v2.Type = t2
-		} else if v2.Type != nil && assignop(t2, v2.Type, &why) == 0 {
-			yyerror("cannot assign type %v to %L in range%s", t2, v2, why)
-		}
-		checkassign(n, v2)
-	}
-
-	// second half of dance
-out:
-	n.Typecheck = 1
-	ls = n.List.Slice()
-	for i1, n1 := range ls {
-		if n1.Typecheck == 0 {
-			ls[i1] = typecheck(ls[i1], Erv|Easgn)
-		}
-	}
-
-	decldepth++
-	typecheckslice(n.Nbody.Slice(), Etop)
-	decldepth--
-}
-
-func walkrange(n *Node) {
-	// variable name conventions:
-	//	ohv1, hv1, hv2: hidden (old) val 1, 2
-	//	ha, hit: hidden aggregate, iterator
-	//	hn, hp: hidden len, pointer
-	//	hb: hidden bool
-	//	a, v1, v2: not hidden aggregate, val 1, 2
-
-	t := n.Type
-
-	a := n.Right
-	lno := setlineno(a)
-	n.Right = nil
-
-	var v1 *Node
-	if n.List.Len() != 0 {
-		v1 = n.List.First()
-	}
-	var v2 *Node
-	if n.List.Len() > 1 && !isblank(n.List.Second()) {
-		v2 = n.List.Second()
-	}
-
-	// n.List has no meaning anymore, clear it
-	// to avoid erroneous processing by racewalk.
-	n.List.Set(nil)
-
-	var body []*Node
-	var init []*Node
-	switch t.Etype {
-	default:
-		Fatalf("walkrange")
-
-	case TARRAY, TSLICE:
-		if memclrrange(n, v1, v2, a) {
-			lineno = lno
-			return
-		}
-
-		// orderstmt arranged for a copy of the array/slice variable if needed.
-		ha := a
-
-		hv1 := temp(Types[TINT])
-		hn := temp(Types[TINT])
-		var hp *Node
-
-		init = append(init, nod(OAS, hv1, nil))
-		init = append(init, nod(OAS, hn, nod(OLEN, ha, nil)))
-		if v2 != nil {
-			hp = temp(ptrto(n.Type.Elem()))
-			tmp := nod(OINDEX, ha, nodintconst(0))
-			tmp.Bounded = true
-			init = append(init, nod(OAS, hp, nod(OADDR, tmp, nil)))
-		}
-
-		n.Left = nod(OLT, hv1, hn)
-		n.Right = nod(OAS, hv1, nod(OADD, hv1, nodintconst(1)))
-		if v1 == nil {
-			body = nil
-		} else if v2 == nil {
-			body = []*Node{nod(OAS, v1, hv1)}
-		} else {
-			a := nod(OAS2, nil, nil)
-			a.List.Set([]*Node{v1, v2})
-			a.Rlist.Set([]*Node{hv1, nod(OIND, hp, nil)})
-			body = []*Node{a}
-
-			// Advance pointer as part of increment.
-			// We used to advance the pointer before executing the loop body,
-			// but doing so would make the pointer point past the end of the
-			// array during the final iteration, possibly causing another unrelated
-			// piece of memory not to be garbage collected until the loop finished.
-			// Advancing during the increment ensures that the pointer p only points
-			// pass the end of the array during the final "p++; i++; if(i >= len(x)) break;",
-			// after which p is dead, so it cannot confuse the collector.
-			tmp := nod(OADD, hp, nodintconst(t.Elem().Width))
-
-			tmp.Type = hp.Type
-			tmp.Typecheck = 1
-			tmp.Right.Type = Types[Tptr]
-			tmp.Right.Typecheck = 1
-			a = nod(OAS, hp, tmp)
-			a = typecheck(a, Etop)
-			n.Right.Ninit.Set1(a)
-		}
-
-	case TMAP:
-		// orderstmt allocated the iterator for us.
-		// we only use a once, so no copy needed.
-		ha := a
-
-		th := hiter(t)
-		hit := prealloc[n]
-		hit.Type = th
-		n.Left = nil
-		keysym := th.Field(0).Sym // depends on layout of iterator struct.  See reflect.go:hiter
-		valsym := th.Field(1).Sym // ditto
-
-		fn := syslook("mapiterinit")
-
-		fn = substArgTypes(fn, t.Key(), t.Val(), th)
-		init = append(init, mkcall1(fn, nil, nil, typename(t), ha, nod(OADDR, hit, nil)))
-		n.Left = nod(ONE, nodSym(ODOT, hit, keysym), nodnil())
-
-		fn = syslook("mapiternext")
-		fn = substArgTypes(fn, th)
-		n.Right = mkcall1(fn, nil, nil, nod(OADDR, hit, nil))
-
-		key := nodSym(ODOT, hit, keysym)
-		key = nod(OIND, key, nil)
-		if v1 == nil {
-			body = nil
-		} else if v2 == nil {
-			body = []*Node{nod(OAS, v1, key)}
-		} else {
-			val := nodSym(ODOT, hit, valsym)
-			val = nod(OIND, val, nil)
-			a := nod(OAS2, nil, nil)
-			a.List.Set([]*Node{v1, v2})
-			a.Rlist.Set([]*Node{key, val})
-			body = []*Node{a}
-		}
-
-	case TCHAN:
-		// orderstmt arranged for a copy of the channel variable.
-		ha := a
-
-		n.Left = nil
-
-		hv1 := temp(t.Elem())
-		hv1.Typecheck = 1
-		if haspointers(t.Elem()) {
-			init = append(init, nod(OAS, hv1, nil))
-		}
-		hb := temp(Types[TBOOL])
-
-		n.Left = nod(ONE, hb, nodbool(false))
-		a := nod(OAS2RECV, nil, nil)
-		a.Typecheck = 1
-		a.List.Set([]*Node{hv1, hb})
-		a.Rlist.Set1(nod(ORECV, ha, nil))
-		n.Left.Ninit.Set1(a)
-		if v1 == nil {
-			body = nil
-		} else {
-			body = []*Node{nod(OAS, v1, hv1)}
-		}
-		// Zero hv1. This prevents hv1 from being the sole, inaccessible
-		// reference to an otherwise GC-able value during the next channel receive.
-		// See issue 15281.
-		body = append(body, nod(OAS, hv1, nil))
-
-	case TSTRING:
-		// Transform string range statements like "for v1, v2 = range a" into
-		//
-		// ha := a
-		// for hv1 := 0; hv1 < len(ha); {
-		//   v1 = hv1
-		//   hv2 := rune(ha[hv1])
-		//   if hv2 < utf8.RuneSelf {
-		//      hv1++
-		//   } else {
-		//      hv2, hv1 = decoderune(ha, hv1)
-		//   }
-		//   v2 = hv2
-		//   // original body
-		// }
-
-		// orderstmt arranged for a copy of the string variable.
-		ha := a
-
-		hv1 := temp(Types[TINT])
-		hv2 := temp(runetype)
-
-		// hv1 := 0
-		init = append(init, nod(OAS, hv1, nil))
-
-		// hv1 < len(ha)
-		n.Left = nod(OLT, hv1, nod(OLEN, ha, nil))
-
-		if v1 != nil {
-			// v1 = hv1
-			body = append(body, nod(OAS, v1, hv1))
-		}
-
-		// hv2 := ha[hv1]
-		nind := nod(OINDEX, ha, hv1)
-		nind.Bounded = true
-		body = append(body, nod(OAS, hv2, conv(nind, runetype)))
-
-		// if hv2 < utf8.RuneSelf
-		nif := nod(OIF, nil, nil)
-		nif.Left = nod(OLT, nind, nodintconst(utf8.RuneSelf))
-
-		// hv1++
-		nif.Nbody.Set1(nod(OAS, hv1, nod(OADD, hv1, nodintconst(1))))
-
-		// } else {
-		eif := nod(OAS2, nil, nil)
-		nif.Rlist.Set1(eif)
-
-		// hv2, hv1 = decoderune(ha, hv1)
-		eif.List.Set2(hv2, hv1)
-		fn := syslook("decoderune")
-		eif.Rlist.Set1(mkcall1(fn, fn.Type.Results(), nil, ha, hv1))
-
-		body = append(body, nif)
-
-		if v2 != nil {
-			// v2 = hv2
-			body = append(body, nod(OAS, v2, hv2))
-		}
-	}
-
-	n.Op = OFOR
-	typecheckslice(init, Etop)
-	n.Ninit.Append(init...)
-	typecheckslice(n.Left.Ninit.Slice(), Etop)
-	n.Left = typecheck(n.Left, Erv)
-	n.Right = typecheck(n.Right, Etop)
-	typecheckslice(body, Etop)
-	n.Nbody.Prepend(body...)
-	n = walkstmt(n)
-
-	lineno = lno
-}
-
-// Lower n into runtime·memclr if possible, for
-// fast zeroing of slices and arrays (issue 5373).
-// Look for instances of
-//
-// for i := range a {
-// 	a[i] = zero
-// }
-//
-// in which the evaluation of a is side-effect-free.
-//
-// Parameters are as in walkrange: "for v1, v2 = range a".
-func memclrrange(n, v1, v2, a *Node) bool {
-	if Debug['N'] != 0 || instrumenting {
-		return false
-	}
-	if v1 == nil || v2 != nil {
-		return false
-	}
-	if n.Nbody.Len() == 0 || n.Nbody.First() == nil || n.Nbody.Len() > 1 {
-		return false
-	}
-	stmt := n.Nbody.First() // only stmt in body
-	if stmt.Op != OAS || stmt.Left.Op != OINDEX {
-		return false
-	}
-	if !samesafeexpr(stmt.Left.Left, a) || !samesafeexpr(stmt.Left.Right, v1) {
-		return false
-	}
-	elemsize := n.Type.Elem().Width
-	if elemsize <= 0 || !iszero(stmt.Right) {
-		return false
-	}
-
-	// Convert to
-	// if len(a) != 0 {
-	// 	hp = &a[0]
-	// 	hn = len(a)*sizeof(elem(a))
-	// 	memclr{NoHeap,Has}Pointers(hp, hn)
-	// 	i = len(a) - 1
-	// }
-	n.Op = OIF
-
-	n.Nbody.Set(nil)
-	n.Left = nod(ONE, nod(OLEN, a, nil), nodintconst(0))
-
-	// hp = &a[0]
-	hp := temp(ptrto(Types[TUINT8]))
-
-	tmp := nod(OINDEX, a, nodintconst(0))
-	tmp.Bounded = true
-	tmp = nod(OADDR, tmp, nil)
-	tmp = nod(OCONVNOP, tmp, nil)
-	tmp.Type = ptrto(Types[TUINT8])
-	n.Nbody.Append(nod(OAS, hp, tmp))
-
-	// hn = len(a) * sizeof(elem(a))
-	hn := temp(Types[TUINTPTR])
-
-	tmp = nod(OLEN, a, nil)
-	tmp = nod(OMUL, tmp, nodintconst(elemsize))
-	tmp = conv(tmp, Types[TUINTPTR])
-	n.Nbody.Append(nod(OAS, hn, tmp))
-
-	var fn *Node
-	if haspointers(a.Type.Elem()) {
-		// memclrHasPointers(hp, hn)
-		fn = mkcall("memclrHasPointers", nil, nil, hp, hn)
-	} else {
-		// memclrNoHeapPointers(hp, hn)
-		fn = mkcall("memclrNoHeapPointers", nil, nil, hp, hn)
-	}
-
-	n.Nbody.Append(fn)
-
-	// i = len(a) - 1
-	v1 = nod(OAS, v1, nod(OSUB, nod(OLEN, a, nil), nodintconst(1)))
-
-	n.Nbody.Append(v1)
-
-	n.Left = typecheck(n.Left, Erv)
-	typecheckslice(n.Nbody.Slice(), Etop)
-	n = walkstmt(n)
-	return true
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/reflect.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/reflect.go
deleted file mode 100644
index 47f5871..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/reflect.go
+++ /dev/null
@@ -1,1788 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/reflect.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/reflect.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"bootstrap/cmd/internal/gcprog"
-	"bootstrap/cmd/internal/obj"
-	"fmt"
-	"os"
-	"sort"
-	"strings"
-)
-
-type itabEntry struct {
-	t, itype *Type
-	sym      *Sym
-}
-
-type ptabEntry struct {
-	s *Sym
-	t *Type
-}
-
-// runtime interface and reflection data structures
-var signatlist []*Node
-var itabs []itabEntry
-var ptabs []ptabEntry
-
-type Sig struct {
-	name   string
-	pkg    *Pkg
-	isym   *Sym
-	tsym   *Sym
-	type_  *Type
-	mtype  *Type
-	offset int32
-}
-
-// byMethodNameAndPackagePath sorts method signatures by name, then package path.
-type byMethodNameAndPackagePath []*Sig
-
-func (x byMethodNameAndPackagePath) Len() int      { return len(x) }
-func (x byMethodNameAndPackagePath) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-func (x byMethodNameAndPackagePath) Less(i, j int) bool {
-	return siglt(x[i], x[j])
-}
-
-// siglt reports whether a < b
-func siglt(a, b *Sig) bool {
-	if a.name != b.name {
-		return a.name < b.name
-	}
-	if a.pkg == b.pkg {
-		return false
-	}
-	if a.pkg == nil {
-		return true
-	}
-	if b.pkg == nil {
-		return false
-	}
-	return a.pkg.Path < b.pkg.Path
-}
-
-// Builds a type representing a Bucket structure for
-// the given map type. This type is not visible to users -
-// we include only enough information to generate a correct GC
-// program for it.
-// Make sure this stays in sync with ../../../../runtime/hashmap.go!
-const (
-	BUCKETSIZE = 8
-	MAXKEYSIZE = 128
-	MAXVALSIZE = 128
-)
-
-func structfieldSize() int       { return 3 * Widthptr } // Sizeof(runtime.structfield{})
-func imethodSize() int           { return 4 + 4 }        // Sizeof(runtime.imethod{})
-func uncommonSize(t *Type) int { // Sizeof(runtime.uncommontype{})
-	if t.Sym == nil && len(methods(t)) == 0 {
-		return 0
-	}
-	return 4 + 2 + 2 + 4 + 4
-}
-
-func makefield(name string, t *Type) *Field {
-	f := newField()
-	f.Type = t
-	f.Sym = nopkg.Lookup(name)
-	return f
-}
-
-func mapbucket(t *Type) *Type {
-	if t.MapType().Bucket != nil {
-		return t.MapType().Bucket
-	}
-
-	bucket := typ(TSTRUCT)
-	keytype := t.Key()
-	valtype := t.Val()
-	dowidth(keytype)
-	dowidth(valtype)
-	if keytype.Width > MAXKEYSIZE {
-		keytype = ptrto(keytype)
-	}
-	if valtype.Width > MAXVALSIZE {
-		valtype = ptrto(valtype)
-	}
-
-	field := make([]*Field, 0, 5)
-
-	// The first field is: uint8 topbits[BUCKETSIZE].
-	arr := typArray(Types[TUINT8], BUCKETSIZE)
-	field = append(field, makefield("topbits", arr))
-
-	arr = typArray(keytype, BUCKETSIZE)
-	arr.Noalg = true
-	field = append(field, makefield("keys", arr))
-
-	arr = typArray(valtype, BUCKETSIZE)
-	arr.Noalg = true
-	field = append(field, makefield("values", arr))
-
-	// Make sure the overflow pointer is the last memory in the struct,
-	// because the runtime assumes it can use size-ptrSize as the
-	// offset of the overflow pointer. We double-check that property
-	// below once the offsets and size are computed.
-	//
-	// BUCKETSIZE is 8, so the struct is aligned to 64 bits to this point.
-	// On 32-bit systems, the max alignment is 32-bit, and the
-	// overflow pointer will add another 32-bit field, and the struct
-	// will end with no padding.
-	// On 64-bit systems, the max alignment is 64-bit, and the
-	// overflow pointer will add another 64-bit field, and the struct
-	// will end with no padding.
-	// On nacl/amd64p32, however, the max alignment is 64-bit,
-	// but the overflow pointer will add only a 32-bit field,
-	// so if the struct needs 64-bit padding (because a key or value does)
-	// then it would end with an extra 32-bit padding field.
-	// Preempt that by emitting the padding here.
-	if int(t.Val().Align) > Widthptr || int(t.Key().Align) > Widthptr {
-		field = append(field, makefield("pad", Types[TUINTPTR]))
-	}
-
-	// If keys and values have no pointers, the map implementation
-	// can keep a list of overflow pointers on the side so that
-	// buckets can be marked as having no pointers.
-	// Arrange for the bucket to have no pointers by changing
-	// the type of the overflow field to uintptr in this case.
-	// See comment on hmap.overflow in ../../../../runtime/hashmap.go.
-	otyp := ptrto(bucket)
-	if !haspointers(t.Val()) && !haspointers(t.Key()) && t.Val().Width <= MAXVALSIZE && t.Key().Width <= MAXKEYSIZE {
-		otyp = Types[TUINTPTR]
-	}
-	ovf := makefield("overflow", otyp)
-	field = append(field, ovf)
-
-	// link up fields
-	bucket.Noalg = true
-	bucket.Local = t.Local
-	bucket.SetFields(field[:])
-	dowidth(bucket)
-
-	// Double-check that overflow field is final memory in struct,
-	// with no padding at end. See comment above.
-	if ovf.Offset != bucket.Width-int64(Widthptr) {
-		yyerror("bad math in mapbucket for %v", t)
-	}
-
-	t.MapType().Bucket = bucket
-
-	bucket.StructType().Map = t
-	return bucket
-}
-
-// Builds a type representing a Hmap structure for the given map type.
-// Make sure this stays in sync with ../../../../runtime/hashmap.go!
-func hmap(t *Type) *Type {
-	if t.MapType().Hmap != nil {
-		return t.MapType().Hmap
-	}
-
-	bucket := mapbucket(t)
-	fields := []*Field{
-		makefield("count", Types[TINT]),
-		makefield("flags", Types[TUINT8]),
-		makefield("B", Types[TUINT8]),
-		makefield("noverflow", Types[TUINT16]),
-		makefield("hash0", Types[TUINT32]),
-		makefield("buckets", ptrto(bucket)),
-		makefield("oldbuckets", ptrto(bucket)),
-		makefield("nevacuate", Types[TUINTPTR]),
-		makefield("overflow", Types[TUNSAFEPTR]),
-	}
-
-	h := typ(TSTRUCT)
-	h.Noalg = true
-	h.Local = t.Local
-	h.SetFields(fields)
-	dowidth(h)
-	t.MapType().Hmap = h
-	h.StructType().Map = t
-	return h
-}
-
-func hiter(t *Type) *Type {
-	if t.MapType().Hiter != nil {
-		return t.MapType().Hiter
-	}
-
-	// build a struct:
-	// hiter {
-	//    key *Key
-	//    val *Value
-	//    t *MapType
-	//    h *Hmap
-	//    buckets *Bucket
-	//    bptr *Bucket
-	//    overflow0 unsafe.Pointer
-	//    overflow1 unsafe.Pointer
-	//    startBucket uintptr
-	//    stuff uintptr
-	//    bucket uintptr
-	//    checkBucket uintptr
-	// }
-	// must match ../../../../runtime/hashmap.go:hiter.
-	var field [12]*Field
-	field[0] = makefield("key", ptrto(t.Key()))
-	field[1] = makefield("val", ptrto(t.Val()))
-	field[2] = makefield("t", ptrto(Types[TUINT8]))
-	field[3] = makefield("h", ptrto(hmap(t)))
-	field[4] = makefield("buckets", ptrto(mapbucket(t)))
-	field[5] = makefield("bptr", ptrto(mapbucket(t)))
-	field[6] = makefield("overflow0", Types[TUNSAFEPTR])
-	field[7] = makefield("overflow1", Types[TUNSAFEPTR])
-	field[8] = makefield("startBucket", Types[TUINTPTR])
-	field[9] = makefield("stuff", Types[TUINTPTR]) // offset+wrapped+B+I
-	field[10] = makefield("bucket", Types[TUINTPTR])
-	field[11] = makefield("checkBucket", Types[TUINTPTR])
-
-	// build iterator struct holding the above fields
-	i := typ(TSTRUCT)
-	i.Noalg = true
-	i.SetFields(field[:])
-	dowidth(i)
-	if i.Width != int64(12*Widthptr) {
-		yyerror("hash_iter size not correct %d %d", i.Width, 12*Widthptr)
-	}
-	t.MapType().Hiter = i
-	i.StructType().Map = t
-	return i
-}
-
-// f is method type, with receiver.
-// return function type, receiver as first argument (or not).
-func methodfunc(f *Type, receiver *Type) *Type {
-	var in []*Node
-	if receiver != nil {
-		d := nod(ODCLFIELD, nil, nil)
-		d.Type = receiver
-		in = append(in, d)
-	}
-
-	var d *Node
-	for _, t := range f.Params().Fields().Slice() {
-		d = nod(ODCLFIELD, nil, nil)
-		d.Type = t.Type
-		d.Isddd = t.Isddd
-		in = append(in, d)
-	}
-
-	var out []*Node
-	for _, t := range f.Results().Fields().Slice() {
-		d = nod(ODCLFIELD, nil, nil)
-		d.Type = t.Type
-		out = append(out, d)
-	}
-
-	t := functype(nil, in, out)
-	if f.Nname() != nil {
-		// Link to name of original method function.
-		t.SetNname(f.Nname())
-	}
-
-	return t
-}
-
-// methods returns the methods of the non-interface type t, sorted by name.
-// Generates stub functions as needed.
-func methods(t *Type) []*Sig {
-	// method type
-	mt := methtype(t)
-
-	if mt == nil {
-		return nil
-	}
-	expandmeth(mt)
-
-	// type stored in interface word
-	it := t
-
-	if !isdirectiface(it) {
-		it = ptrto(t)
-	}
-
-	// make list of methods for t,
-	// generating code if necessary.
-	var ms []*Sig
-	for _, f := range mt.AllMethods().Slice() {
-		if f.Type.Etype != TFUNC || f.Type.Recv() == nil {
-			Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f)
-		}
-		if f.Type.Recv() == nil {
-			Fatalf("receiver with no type on %v method %v %v\n", mt, f.Sym, f)
-		}
-		if f.Nointerface {
-			continue
-		}
-
-		method := f.Sym
-		if method == nil {
-			continue
-		}
-
-		// get receiver type for this particular method.
-		// if pointer receiver but non-pointer t and
-		// this is not an embedded pointer inside a struct,
-		// method does not apply.
-		this := f.Type.Recv().Type
-
-		if this.IsPtr() && this.Elem() == t {
-			continue
-		}
-		if this.IsPtr() && !t.IsPtr() && f.Embedded != 2 && !isifacemethod(f.Type) {
-			continue
-		}
-
-		var sig Sig
-		ms = append(ms, &sig)
-
-		sig.name = method.Name
-		if !exportname(method.Name) {
-			if method.Pkg == nil {
-				Fatalf("methods: missing package")
-			}
-			sig.pkg = method.Pkg
-		}
-
-		sig.isym = methodsym(method, it, 1)
-		sig.tsym = methodsym(method, t, 0)
-		sig.type_ = methodfunc(f.Type, t)
-		sig.mtype = methodfunc(f.Type, nil)
-
-		if sig.isym.Flags&SymSiggen == 0 {
-			sig.isym.Flags |= SymSiggen
-			if !eqtype(this, it) || this.Width < Types[Tptr].Width {
-				compiling_wrappers = 1
-				genwrapper(it, f, sig.isym, 1)
-				compiling_wrappers = 0
-			}
-		}
-
-		if sig.tsym.Flags&SymSiggen == 0 {
-			sig.tsym.Flags |= SymSiggen
-			if !eqtype(this, t) {
-				compiling_wrappers = 1
-				genwrapper(t, f, sig.tsym, 0)
-				compiling_wrappers = 0
-			}
-		}
-	}
-
-	sort.Sort(byMethodNameAndPackagePath(ms))
-	return ms
-}
-
-// imethods returns the methods of the interface type t, sorted by name.
-func imethods(t *Type) []*Sig {
-	var methods []*Sig
-	for _, f := range t.Fields().Slice() {
-		if f.Type.Etype != TFUNC || f.Sym == nil {
-			continue
-		}
-		method := f.Sym
-		var sig = Sig{
-			name: method.Name,
-		}
-		if !exportname(method.Name) {
-			if method.Pkg == nil {
-				Fatalf("imethods: missing package")
-			}
-			sig.pkg = method.Pkg
-		}
-
-		sig.mtype = f.Type
-		sig.offset = 0
-		sig.type_ = methodfunc(f.Type, nil)
-
-		if n := len(methods); n > 0 {
-			last := methods[n-1]
-			if !(siglt(last, &sig)) {
-				Fatalf("sigcmp vs sortinter %s %s", last.name, sig.name)
-			}
-		}
-		methods = append(methods, &sig)
-
-		// Compiler can only refer to wrappers for non-blank methods.
-		if isblanksym(method) {
-			continue
-		}
-
-		// NOTE(rsc): Perhaps an oversight that
-		// IfaceType.Method is not in the reflect data.
-		// Generate the method body, so that compiled
-		// code can refer to it.
-		isym := methodsym(method, t, 0)
-
-		if isym.Flags&SymSiggen == 0 {
-			isym.Flags |= SymSiggen
-			genwrapper(t, f, isym, 0)
-		}
-	}
-
-	return methods
-}
-
-func dimportpath(p *Pkg) {
-	if p.Pathsym != nil {
-		return
-	}
-
-	// If we are compiling the runtime package, there are two runtime packages around
-	// -- localpkg and Runtimepkg. We don't want to produce import path symbols for
-	// both of them, so just produce one for localpkg.
-	if myimportpath == "runtime" && p == Runtimepkg {
-		return
-	}
-
-	var str string
-	if p == localpkg {
-		// Note: myimportpath != "", or else dgopkgpath won't call dimportpath.
-		str = myimportpath
-	} else {
-		str = p.Path
-	}
-
-	s := obj.Linklookup(Ctxt, "type..importpath."+p.Prefix+".", 0)
-	ot := dnameData(s, 0, str, "", nil, false)
-	ggloblLSym(s, int32(ot), obj.DUPOK|obj.RODATA)
-	p.Pathsym = s
-}
-
-func dgopkgpath(s *Sym, ot int, pkg *Pkg) int {
-	return dgopkgpathLSym(Linksym(s), ot, pkg)
-}
-
-func dgopkgpathLSym(s *obj.LSym, ot int, pkg *Pkg) int {
-	if pkg == nil {
-		return duintxxLSym(s, ot, 0, Widthptr)
-	}
-
-	if pkg == localpkg && myimportpath == "" {
-		// If we don't know the full import path of the package being compiled
-		// (i.e. -p was not passed on the compiler command line), emit a reference to
-		// type..importpath.""., which the linker will rewrite using the correct import path.
-		// Every package that imports this one directly defines the symbol.
-		// See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ.
-		ns := obj.Linklookup(Ctxt, `type..importpath."".`, 0)
-		return dsymptrLSym(s, ot, ns, 0)
-	}
-
-	dimportpath(pkg)
-	return dsymptrLSym(s, ot, pkg.Pathsym, 0)
-}
-
-// dgopkgpathOffLSym writes an offset relocation in s at offset ot to the pkg path symbol.
-func dgopkgpathOffLSym(s *obj.LSym, ot int, pkg *Pkg) int {
-	if pkg == nil {
-		return duintxxLSym(s, ot, 0, 4)
-	}
-	if pkg == localpkg && myimportpath == "" {
-		// If we don't know the full import path of the package being compiled
-		// (i.e. -p was not passed on the compiler command line), emit a reference to
-		// type..importpath.""., which the linker will rewrite using the correct import path.
-		// Every package that imports this one directly defines the symbol.
-		// See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ.
-		ns := obj.Linklookup(Ctxt, `type..importpath."".`, 0)
-		return dsymptrOffLSym(s, ot, ns, 0)
-	}
-
-	dimportpath(pkg)
-	return dsymptrOffLSym(s, ot, pkg.Pathsym, 0)
-}
-
-// isExportedField reports whether a struct field is exported.
-// It also returns the package to use for PkgPath for an unexported field.
-func isExportedField(ft *Field) (bool, *Pkg) {
-	if ft.Sym != nil && ft.Embedded == 0 {
-		return exportname(ft.Sym.Name), ft.Sym.Pkg
-	} else {
-		if ft.Type.Sym != nil &&
-			(ft.Type.Sym.Pkg == builtinpkg || !exportname(ft.Type.Sym.Name)) {
-			return false, ft.Type.Sym.Pkg
-		} else {
-			return true, nil
-		}
-	}
-}
-
-// dnameField dumps a reflect.name for a struct field.
-func dnameField(s *Sym, ot int, spkg *Pkg, ft *Field) int {
-	var name string
-	if ft.Sym != nil && ft.Embedded == 0 {
-		name = ft.Sym.Name
-	}
-	isExported, fpkg := isExportedField(ft)
-	if isExported || fpkg == spkg {
-		fpkg = nil
-	}
-	nsym := dname(name, ft.Note, fpkg, isExported)
-	return dsymptrLSym(Linksym(s), ot, nsym, 0)
-}
-
-// dnameData writes the contents of a reflect.name into s at offset ot.
-func dnameData(s *obj.LSym, ot int, name, tag string, pkg *Pkg, exported bool) int {
-	if len(name) > 1<<16-1 {
-		Fatalf("name too long: %s", name)
-	}
-	if len(tag) > 1<<16-1 {
-		Fatalf("tag too long: %s", tag)
-	}
-
-	// Encode name and tag. See reflect/type.go for details.
-	var bits byte
-	l := 1 + 2 + len(name)
-	if exported {
-		bits |= 1 << 0
-	}
-	if len(tag) > 0 {
-		l += 2 + len(tag)
-		bits |= 1 << 1
-	}
-	if pkg != nil {
-		bits |= 1 << 2
-	}
-	b := make([]byte, l)
-	b[0] = bits
-	b[1] = uint8(len(name) >> 8)
-	b[2] = uint8(len(name))
-	copy(b[3:], name)
-	if len(tag) > 0 {
-		tb := b[3+len(name):]
-		tb[0] = uint8(len(tag) >> 8)
-		tb[1] = uint8(len(tag))
-		copy(tb[2:], tag)
-	}
-
-	ot = int(s.WriteBytes(Ctxt, int64(ot), b))
-
-	if pkg != nil {
-		ot = dgopkgpathOffLSym(s, ot, pkg)
-	}
-
-	return ot
-}
-
-var dnameCount int
-
-// dname creates a reflect.name for a struct field or method.
-func dname(name, tag string, pkg *Pkg, exported bool) *obj.LSym {
-	// Write out data as "type.." to signal two things to the
-	// linker, first that when dynamically linking, the symbol
-	// should be moved to a relro section, and second that the
-	// contents should not be decoded as a type.
-	sname := "type..namedata."
-	if pkg == nil {
-		// In the common case, share data with other packages.
-		if name == "" {
-			if exported {
-				sname += "-noname-exported." + tag
-			} else {
-				sname += "-noname-unexported." + tag
-			}
-		} else {
-			sname += name + "." + tag
-		}
-	} else {
-		sname = fmt.Sprintf(`%s"".%d`, sname, dnameCount)
-		dnameCount++
-	}
-	s := obj.Linklookup(Ctxt, sname, 0)
-	if len(s.P) > 0 {
-		return s
-	}
-	ot := dnameData(s, 0, name, tag, pkg, exported)
-	ggloblLSym(s, int32(ot), obj.DUPOK|obj.RODATA)
-	return s
-}
-
-// dextratype dumps the fields of a runtime.uncommontype.
-// dataAdd is the offset in bytes after the header where the
-// backing array of the []method field is written (by dextratypeData).
-func dextratype(s *Sym, ot int, t *Type, dataAdd int) int {
-	m := methods(t)
-	if t.Sym == nil && len(m) == 0 {
-		return ot
-	}
-	noff := int(Rnd(int64(ot), int64(Widthptr)))
-	if noff != ot {
-		Fatalf("unexpected alignment in dextratype for %v", t)
-	}
-
-	for _, a := range m {
-		dtypesym(a.type_)
-	}
-
-	ot = dgopkgpathOffLSym(Linksym(s), ot, typePkg(t))
-
-	dataAdd += uncommonSize(t)
-	mcount := len(m)
-	if mcount != int(uint16(mcount)) {
-		Fatalf("too many methods on %v: %d", t, mcount)
-	}
-	if dataAdd != int(uint32(dataAdd)) {
-		Fatalf("methods are too far away on %v: %d", t, dataAdd)
-	}
-
-	ot = duint16(s, ot, uint16(mcount))
-	ot = duint16(s, ot, 0)
-	ot = duint32(s, ot, uint32(dataAdd))
-	ot = duint32(s, ot, 0)
-	return ot
-}
-
-func typePkg(t *Type) *Pkg {
-	tsym := t.Sym
-	if tsym == nil {
-		switch t.Etype {
-		case TARRAY, TSLICE, TPTR32, TPTR64, TCHAN:
-			if t.Elem() != nil {
-				tsym = t.Elem().Sym
-			}
-		}
-	}
-	if tsym != nil && t != Types[t.Etype] && t != errortype {
-		return tsym.Pkg
-	}
-	return nil
-}
-
-// dextratypeData dumps the backing array for the []method field of
-// runtime.uncommontype.
-func dextratypeData(s *Sym, ot int, t *Type) int {
-	lsym := Linksym(s)
-	for _, a := range methods(t) {
-		// ../../../../runtime/type.go:/method
-		exported := exportname(a.name)
-		var pkg *Pkg
-		if !exported && a.pkg != typePkg(t) {
-			pkg = a.pkg
-		}
-		nsym := dname(a.name, "", pkg, exported)
-
-		ot = dsymptrOffLSym(lsym, ot, nsym, 0)
-		ot = dmethodptrOffLSym(lsym, ot, Linksym(dtypesym(a.mtype)))
-		ot = dmethodptrOffLSym(lsym, ot, Linksym(a.isym))
-		ot = dmethodptrOffLSym(lsym, ot, Linksym(a.tsym))
-	}
-	return ot
-}
-
-func dmethodptrOffLSym(s *obj.LSym, ot int, x *obj.LSym) int {
-	duintxxLSym(s, ot, 0, 4)
-	r := obj.Addrel(s)
-	r.Off = int32(ot)
-	r.Siz = 4
-	r.Sym = x
-	r.Type = obj.R_METHODOFF
-	return ot + 4
-}
-
-var kinds = []int{
-	TINT:        obj.KindInt,
-	TUINT:       obj.KindUint,
-	TINT8:       obj.KindInt8,
-	TUINT8:      obj.KindUint8,
-	TINT16:      obj.KindInt16,
-	TUINT16:     obj.KindUint16,
-	TINT32:      obj.KindInt32,
-	TUINT32:     obj.KindUint32,
-	TINT64:      obj.KindInt64,
-	TUINT64:     obj.KindUint64,
-	TUINTPTR:    obj.KindUintptr,
-	TFLOAT32:    obj.KindFloat32,
-	TFLOAT64:    obj.KindFloat64,
-	TBOOL:       obj.KindBool,
-	TSTRING:     obj.KindString,
-	TPTR32:      obj.KindPtr,
-	TPTR64:      obj.KindPtr,
-	TSTRUCT:     obj.KindStruct,
-	TINTER:      obj.KindInterface,
-	TCHAN:       obj.KindChan,
-	TMAP:        obj.KindMap,
-	TARRAY:      obj.KindArray,
-	TSLICE:      obj.KindSlice,
-	TFUNC:       obj.KindFunc,
-	TCOMPLEX64:  obj.KindComplex64,
-	TCOMPLEX128: obj.KindComplex128,
-	TUNSAFEPTR:  obj.KindUnsafePointer,
-}
-
-func haspointers(t *Type) bool {
-	switch t.Etype {
-	case TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64,
-		TUINT64, TUINTPTR, TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, TBOOL:
-		return false
-
-	case TSLICE:
-		return true
-
-	case TARRAY:
-		at := t.Extra.(*ArrayType)
-		if at.Haspointers != 0 {
-			return at.Haspointers-1 != 0
-		}
-
-		ret := false
-		if t.NumElem() != 0 { // non-empty array
-			ret = haspointers(t.Elem())
-		}
-
-		at.Haspointers = 1 + uint8(obj.Bool2int(ret))
-		return ret
-
-	case TSTRUCT:
-		st := t.StructType()
-		if st.Haspointers != 0 {
-			return st.Haspointers-1 != 0
-		}
-
-		ret := false
-		for _, t1 := range t.Fields().Slice() {
-			if haspointers(t1.Type) {
-				ret = true
-				break
-			}
-		}
-		st.Haspointers = 1 + uint8(obj.Bool2int(ret))
-		return ret
-	}
-
-	return true
-}
-
-// typeptrdata returns the length in bytes of the prefix of t
-// containing pointer data. Anything after this offset is scalar data.
-func typeptrdata(t *Type) int64 {
-	if !haspointers(t) {
-		return 0
-	}
-
-	switch t.Etype {
-	case TPTR32,
-		TPTR64,
-		TUNSAFEPTR,
-		TFUNC,
-		TCHAN,
-		TMAP:
-		return int64(Widthptr)
-
-	case TSTRING:
-		// struct { byte *str; intgo len; }
-		return int64(Widthptr)
-
-	case TINTER:
-		// struct { Itab *tab;	void *data; } or
-		// struct { Type *type; void *data; }
-		return 2 * int64(Widthptr)
-
-	case TSLICE:
-		// struct { byte *array; uintgo len; uintgo cap; }
-		return int64(Widthptr)
-
-	case TARRAY:
-		// haspointers already eliminated t.NumElem() == 0.
-		return (t.NumElem()-1)*t.Elem().Width + typeptrdata(t.Elem())
-
-	case TSTRUCT:
-		// Find the last field that has pointers.
-		var lastPtrField *Field
-		for _, t1 := range t.Fields().Slice() {
-			if haspointers(t1.Type) {
-				lastPtrField = t1
-			}
-		}
-		return lastPtrField.Offset + typeptrdata(lastPtrField.Type)
-
-	default:
-		Fatalf("typeptrdata: unexpected type, %v", t)
-		return 0
-	}
-}
-
-// tflag is documented in reflect/type.go.
-//
-// tflag values must be kept in sync with copies in:
-//	cmd/compile/internal/gc/reflect.go
-//	cmd/link/internal/ld/decodesym.go
-//	reflect/type.go
-//	runtime/type.go
-const (
-	tflagUncommon  = 1 << 0
-	tflagExtraStar = 1 << 1
-	tflagNamed     = 1 << 2
-)
-
-var dcommontype_algarray *Sym
-
-// dcommontype dumps the contents of a reflect.rtype (runtime._type).
-func dcommontype(s *Sym, ot int, t *Type) int {
-	if ot != 0 {
-		Fatalf("dcommontype %d", ot)
-	}
-
-	sizeofAlg := 2 * Widthptr
-	if dcommontype_algarray == nil {
-		dcommontype_algarray = Pkglookup("algarray", Runtimepkg)
-	}
-	dowidth(t)
-	alg := algtype(t)
-	var algsym *Sym
-	if alg == ASPECIAL || alg == AMEM {
-		algsym = dalgsym(t)
-	}
-
-	sptrWeak := true
-	var sptr *Sym
-	if !t.IsPtr() || t.ptrTo != nil {
-		tptr := ptrto(t)
-		if t.Sym != nil || methods(tptr) != nil {
-			sptrWeak = false
-		}
-		sptr = dtypesym(tptr)
-	}
-
-	gcsym, useGCProg, ptrdata := dgcsym(t)
-
-	// ../../../../reflect/type.go:/^type.rtype
-	// actual type structure
-	//	type rtype struct {
-	//		size          uintptr
-	//		ptrdata       uintptr
-	//		hash          uint32
-	//		tflag         tflag
-	//		align         uint8
-	//		fieldAlign    uint8
-	//		kind          uint8
-	//		alg           *typeAlg
-	//		gcdata        *byte
-	//		str           nameOff
-	//		ptrToThis     typeOff
-	//	}
-	ot = duintptr(s, ot, uint64(t.Width))
-	ot = duintptr(s, ot, uint64(ptrdata))
-
-	ot = duint32(s, ot, typehash(t))
-
-	var tflag uint8
-	if uncommonSize(t) != 0 {
-		tflag |= tflagUncommon
-	}
-	if t.Sym != nil && t.Sym.Name != "" {
-		tflag |= tflagNamed
-	}
-
-	exported := false
-	p := t.tconv(FmtLeft | FmtUnsigned)
-	// If we're writing out type T,
-	// we are very likely to write out type *T as well.
-	// Use the string "*T"[1:] for "T", so that the two
-	// share storage. This is a cheap way to reduce the
-	// amount of space taken up by reflect strings.
-	if !strings.HasPrefix(p, "*") {
-		p = "*" + p
-		tflag |= tflagExtraStar
-		if t.Sym != nil {
-			exported = exportname(t.Sym.Name)
-		}
-	} else {
-		if t.Elem() != nil && t.Elem().Sym != nil {
-			exported = exportname(t.Elem().Sym.Name)
-		}
-	}
-
-	ot = duint8(s, ot, tflag)
-
-	// runtime (and common sense) expects alignment to be a power of two.
-	i := int(t.Align)
-
-	if i == 0 {
-		i = 1
-	}
-	if i&(i-1) != 0 {
-		Fatalf("invalid alignment %d for %v", t.Align, t)
-	}
-	ot = duint8(s, ot, t.Align) // align
-	ot = duint8(s, ot, t.Align) // fieldAlign
-
-	i = kinds[t.Etype]
-	if !haspointers(t) {
-		i |= obj.KindNoPointers
-	}
-	if isdirectiface(t) {
-		i |= obj.KindDirectIface
-	}
-	if useGCProg {
-		i |= obj.KindGCProg
-	}
-	ot = duint8(s, ot, uint8(i)) // kind
-	if algsym == nil {
-		ot = dsymptr(s, ot, dcommontype_algarray, int(alg)*sizeofAlg)
-	} else {
-		ot = dsymptr(s, ot, algsym, 0)
-	}
-	ot = dsymptr(s, ot, gcsym, 0) // gcdata
-
-	nsym := dname(p, "", nil, exported)
-	ot = dsymptrOffLSym(Linksym(s), ot, nsym, 0) // str
-	// ptrToThis
-	if sptr == nil {
-		ot = duint32(s, ot, 0)
-	} else if sptrWeak {
-		ot = dsymptrWeakOffLSym(Linksym(s), ot, Linksym(sptr))
-	} else {
-		ot = dsymptrOffLSym(Linksym(s), ot, Linksym(sptr), 0)
-	}
-
-	return ot
-}
-
-func typesym(t *Type) *Sym {
-	name := t.tconv(FmtLeft)
-
-	// Use a separate symbol name for Noalg types for #17752.
-	if a, bad := algtype1(t); a == ANOEQ && bad.Noalg {
-		name = "noalg." + name
-	}
-
-	return Pkglookup(name, typepkg)
-}
-
-// tracksym returns the symbol for tracking use of field/method f, assumed
-// to be a member of struct/interface type t.
-func tracksym(t *Type, f *Field) *Sym {
-	return Pkglookup(t.tconv(FmtLeft)+"."+f.Sym.Name, trackpkg)
-}
-
-func typesymprefix(prefix string, t *Type) *Sym {
-	p := prefix + "." + t.tconv(FmtLeft)
-	s := Pkglookup(p, typepkg)
-
-	//print("algsym: %s -> %+S\n", p, s);
-
-	return s
-}
-
-func typenamesym(t *Type) *Sym {
-	if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() {
-		Fatalf("typename %v", t)
-	}
-	s := typesym(t)
-	if s.Def == nil {
-		n := newname(s)
-		n.Type = Types[TUINT8]
-		n.Class = PEXTERN
-		n.Typecheck = 1
-		s.Def = n
-
-		signatlist = append(signatlist, typenod(t))
-	}
-
-	return s.Def.Sym
-}
-
-func typename(t *Type) *Node {
-	s := typenamesym(t)
-	n := nod(OADDR, s.Def, nil)
-	n.Type = ptrto(s.Def.Type)
-	n.Addable = true
-	n.Ullman = 2
-	n.Typecheck = 1
-	return n
-}
-
-func itabname(t, itype *Type) *Node {
-	if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() {
-		Fatalf("itabname(%v, %v)", t, itype)
-	}
-	s := Pkglookup(t.tconv(FmtLeft)+","+itype.tconv(FmtLeft), itabpkg)
-	if s.Def == nil {
-		n := newname(s)
-		n.Type = Types[TUINT8]
-		n.Class = PEXTERN
-		n.Typecheck = 1
-		s.Def = n
-
-		itabs = append(itabs, itabEntry{t: t, itype: itype, sym: s})
-	}
-
-	n := nod(OADDR, s.Def, nil)
-	n.Type = ptrto(s.Def.Type)
-	n.Addable = true
-	n.Ullman = 2
-	n.Typecheck = 1
-	return n
-}
-
-// isreflexive reports whether t has a reflexive equality operator.
-// That is, if x==x for all x of type t.
-func isreflexive(t *Type) bool {
-	switch t.Etype {
-	case TBOOL,
-		TINT,
-		TUINT,
-		TINT8,
-		TUINT8,
-		TINT16,
-		TUINT16,
-		TINT32,
-		TUINT32,
-		TINT64,
-		TUINT64,
-		TUINTPTR,
-		TPTR32,
-		TPTR64,
-		TUNSAFEPTR,
-		TSTRING,
-		TCHAN:
-		return true
-
-	case TFLOAT32,
-		TFLOAT64,
-		TCOMPLEX64,
-		TCOMPLEX128,
-		TINTER:
-		return false
-
-	case TARRAY:
-		return isreflexive(t.Elem())
-
-	case TSTRUCT:
-		for _, t1 := range t.Fields().Slice() {
-			if !isreflexive(t1.Type) {
-				return false
-			}
-		}
-		return true
-
-	default:
-		Fatalf("bad type for map key: %v", t)
-		return false
-	}
-}
-
-// needkeyupdate reports whether map updates with t as a key
-// need the key to be updated.
-func needkeyupdate(t *Type) bool {
-	switch t.Etype {
-	case TBOOL,
-		TINT,
-		TUINT,
-		TINT8,
-		TUINT8,
-		TINT16,
-		TUINT16,
-		TINT32,
-		TUINT32,
-		TINT64,
-		TUINT64,
-		TUINTPTR,
-		TPTR32,
-		TPTR64,
-		TUNSAFEPTR,
-		TCHAN:
-		return false
-
-	case TFLOAT32, // floats can be +0/-0
-		TFLOAT64,
-		TCOMPLEX64,
-		TCOMPLEX128,
-		TINTER,
-		TSTRING: // strings might have smaller backing stores
-		return true
-
-	case TARRAY:
-		return needkeyupdate(t.Elem())
-
-	case TSTRUCT:
-		for _, t1 := range t.Fields().Slice() {
-			if needkeyupdate(t1.Type) {
-				return true
-			}
-		}
-		return false
-
-	default:
-		Fatalf("bad type for map key: %v", t)
-		return true
-	}
-}
-
-func dtypesym(t *Type) *Sym {
-	// Replace byte, rune aliases with real type.
-	// They've been separate internally to make error messages
-	// better, but we have to merge them in the reflect tables.
-	if t == bytetype || t == runetype {
-		t = Types[t.Etype]
-	}
-
-	if t.IsUntyped() {
-		Fatalf("dtypesym %v", t)
-	}
-
-	s := typesym(t)
-	if s.Flags&SymSiggen != 0 {
-		return s
-	}
-	s.Flags |= SymSiggen
-
-	// special case (look for runtime below):
-	// when compiling package runtime,
-	// emit the type structures for int, float, etc.
-	tbase := t
-
-	if t.IsPtr() && t.Sym == nil && t.Elem().Sym != nil {
-		tbase = t.Elem()
-	}
-	dupok := 0
-	if tbase.Sym == nil {
-		dupok = obj.DUPOK
-	}
-
-	if myimportpath == "runtime" && (tbase == Types[tbase.Etype] || tbase == bytetype || tbase == runetype || tbase == errortype) { // int, float, etc
-		goto ok
-	}
-
-	// named types from other files are defined only by those files
-	if tbase.Sym != nil && !tbase.Local {
-		return s
-	}
-	if isforw[tbase.Etype] {
-		return s
-	}
-
-ok:
-	ot := 0
-	switch t.Etype {
-	default:
-		ot = dcommontype(s, ot, t)
-		ot = dextratype(s, ot, t, 0)
-
-	case TARRAY:
-		// ../../../../runtime/type.go:/arrayType
-		s1 := dtypesym(t.Elem())
-		t2 := typSlice(t.Elem())
-		s2 := dtypesym(t2)
-		ot = dcommontype(s, ot, t)
-		ot = dsymptr(s, ot, s1, 0)
-		ot = dsymptr(s, ot, s2, 0)
-		ot = duintptr(s, ot, uint64(t.NumElem()))
-		ot = dextratype(s, ot, t, 0)
-
-	case TSLICE:
-		// ../../../../runtime/type.go:/sliceType
-		s1 := dtypesym(t.Elem())
-		ot = dcommontype(s, ot, t)
-		ot = dsymptr(s, ot, s1, 0)
-		ot = dextratype(s, ot, t, 0)
-
-	case TCHAN:
-		// ../../../../runtime/type.go:/chanType
-		s1 := dtypesym(t.Elem())
-		ot = dcommontype(s, ot, t)
-		ot = dsymptr(s, ot, s1, 0)
-		ot = duintptr(s, ot, uint64(t.ChanDir()))
-		ot = dextratype(s, ot, t, 0)
-
-	case TFUNC:
-		for _, t1 := range t.Recvs().Fields().Slice() {
-			dtypesym(t1.Type)
-		}
-		isddd := false
-		for _, t1 := range t.Params().Fields().Slice() {
-			isddd = t1.Isddd
-			dtypesym(t1.Type)
-		}
-		for _, t1 := range t.Results().Fields().Slice() {
-			dtypesym(t1.Type)
-		}
-
-		ot = dcommontype(s, ot, t)
-		inCount := t.Recvs().NumFields() + t.Params().NumFields()
-		outCount := t.Results().NumFields()
-		if isddd {
-			outCount |= 1 << 15
-		}
-		ot = duint16(s, ot, uint16(inCount))
-		ot = duint16(s, ot, uint16(outCount))
-		if Widthptr == 8 {
-			ot += 4 // align for *rtype
-		}
-
-		dataAdd := (inCount + t.Results().NumFields()) * Widthptr
-		ot = dextratype(s, ot, t, dataAdd)
-
-		// Array of rtype pointers follows funcType.
-		for _, t1 := range t.Recvs().Fields().Slice() {
-			ot = dsymptr(s, ot, dtypesym(t1.Type), 0)
-		}
-		for _, t1 := range t.Params().Fields().Slice() {
-			ot = dsymptr(s, ot, dtypesym(t1.Type), 0)
-		}
-		for _, t1 := range t.Results().Fields().Slice() {
-			ot = dsymptr(s, ot, dtypesym(t1.Type), 0)
-		}
-
-	case TINTER:
-		m := imethods(t)
-		n := len(m)
-		for _, a := range m {
-			dtypesym(a.type_)
-		}
-
-		// ../../../../runtime/type.go:/interfaceType
-		ot = dcommontype(s, ot, t)
-
-		var tpkg *Pkg
-		if t.Sym != nil && t != Types[t.Etype] && t != errortype {
-			tpkg = t.Sym.Pkg
-		}
-		ot = dgopkgpath(s, ot, tpkg)
-
-		ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint+uncommonSize(t))
-		ot = duintxx(s, ot, uint64(n), Widthint)
-		ot = duintxx(s, ot, uint64(n), Widthint)
-		dataAdd := imethodSize() * n
-		ot = dextratype(s, ot, t, dataAdd)
-
-		lsym := Linksym(s)
-		for _, a := range m {
-			// ../../../../runtime/type.go:/imethod
-			exported := exportname(a.name)
-			var pkg *Pkg
-			if !exported && a.pkg != tpkg {
-				pkg = a.pkg
-			}
-			nsym := dname(a.name, "", pkg, exported)
-
-			ot = dsymptrOffLSym(lsym, ot, nsym, 0)
-			ot = dsymptrOffLSym(lsym, ot, Linksym(dtypesym(a.type_)), 0)
-		}
-
-	// ../../../../runtime/type.go:/mapType
-	case TMAP:
-		s1 := dtypesym(t.Key())
-		s2 := dtypesym(t.Val())
-		s3 := dtypesym(mapbucket(t))
-		s4 := dtypesym(hmap(t))
-		ot = dcommontype(s, ot, t)
-		ot = dsymptr(s, ot, s1, 0)
-		ot = dsymptr(s, ot, s2, 0)
-		ot = dsymptr(s, ot, s3, 0)
-		ot = dsymptr(s, ot, s4, 0)
-		if t.Key().Width > MAXKEYSIZE {
-			ot = duint8(s, ot, uint8(Widthptr))
-			ot = duint8(s, ot, 1) // indirect
-		} else {
-			ot = duint8(s, ot, uint8(t.Key().Width))
-			ot = duint8(s, ot, 0) // not indirect
-		}
-
-		if t.Val().Width > MAXVALSIZE {
-			ot = duint8(s, ot, uint8(Widthptr))
-			ot = duint8(s, ot, 1) // indirect
-		} else {
-			ot = duint8(s, ot, uint8(t.Val().Width))
-			ot = duint8(s, ot, 0) // not indirect
-		}
-
-		ot = duint16(s, ot, uint16(mapbucket(t).Width))
-		ot = duint8(s, ot, uint8(obj.Bool2int(isreflexive(t.Key()))))
-		ot = duint8(s, ot, uint8(obj.Bool2int(needkeyupdate(t.Key()))))
-		ot = dextratype(s, ot, t, 0)
-
-	case TPTR32, TPTR64:
-		if t.Elem().Etype == TANY {
-			// ../../../../runtime/type.go:/UnsafePointerType
-			ot = dcommontype(s, ot, t)
-			ot = dextratype(s, ot, t, 0)
-
-			break
-		}
-
-		// ../../../../runtime/type.go:/ptrType
-		s1 := dtypesym(t.Elem())
-
-		ot = dcommontype(s, ot, t)
-		ot = dsymptr(s, ot, s1, 0)
-		ot = dextratype(s, ot, t, 0)
-
-	// ../../../../runtime/type.go:/structType
-	// for security, only the exported fields.
-	case TSTRUCT:
-		n := 0
-
-		for _, t1 := range t.Fields().Slice() {
-			dtypesym(t1.Type)
-			n++
-		}
-
-		ot = dcommontype(s, ot, t)
-		pkg := localpkg
-		if t.Sym != nil {
-			pkg = t.Sym.Pkg
-		} else {
-			// Unnamed type. Grab the package from the first field, if any.
-			for _, f := range t.Fields().Slice() {
-				if f.Embedded != 0 {
-					continue
-				}
-				pkg = f.Sym.Pkg
-				break
-			}
-		}
-		ot = dgopkgpath(s, ot, pkg)
-		ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint+uncommonSize(t))
-		ot = duintxx(s, ot, uint64(n), Widthint)
-		ot = duintxx(s, ot, uint64(n), Widthint)
-
-		dataAdd := n * structfieldSize()
-		ot = dextratype(s, ot, t, dataAdd)
-
-		for _, f := range t.Fields().Slice() {
-			// ../../../../runtime/type.go:/structField
-			ot = dnameField(s, ot, pkg, f)
-			ot = dsymptr(s, ot, dtypesym(f.Type), 0)
-			ot = duintptr(s, ot, uint64(f.Offset))
-		}
-	}
-
-	ot = dextratypeData(s, ot, t)
-	ggloblsym(s, int32(ot), int16(dupok|obj.RODATA))
-
-	// The linker will leave a table of all the typelinks for
-	// types in the binary, so the runtime can find them.
-	//
-	// When buildmode=shared, all types are in typelinks so the
-	// runtime can deduplicate type pointers.
-	keep := Ctxt.Flag_dynlink
-	if !keep && t.Sym == nil {
-		// For an unnamed type, we only need the link if the type can
-		// be created at run time by reflect.PtrTo and similar
-		// functions. If the type exists in the program, those
-		// functions must return the existing type structure rather
-		// than creating a new one.
-		switch t.Etype {
-		case TPTR32, TPTR64, TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRUCT:
-			keep = true
-		}
-	}
-	s.Lsym.Set(obj.AttrMakeTypelink, keep)
-
-	return s
-}
-
-func dumptypestructs() {
-	// copy types from externdcl list to signatlist
-	for _, n := range externdcl {
-		if n.Op != OTYPE {
-			continue
-		}
-		signatlist = append(signatlist, n)
-	}
-
-	// Process signatlist.  This can't use range, as entries are
-	// added to the list while it is being processed.
-	for i := 0; i < len(signatlist); i++ {
-		n := signatlist[i]
-		if n.Op != OTYPE {
-			continue
-		}
-		t := n.Type
-		dtypesym(t)
-		if t.Sym != nil {
-			dtypesym(ptrto(t))
-		}
-	}
-
-	// process itabs
-	for _, i := range itabs {
-		// dump empty itab symbol into i.sym
-		// type itab struct {
-		//   inter  *interfacetype
-		//   _type  *_type
-		//   link   *itab
-		//   bad    int32
-		//   unused int32
-		//   fun    [1]uintptr // variable sized
-		// }
-		o := dsymptr(i.sym, 0, dtypesym(i.itype), 0)
-		o = dsymptr(i.sym, o, dtypesym(i.t), 0)
-		o += Widthptr + 8                      // skip link/bad/inhash fields
-		o += len(imethods(i.itype)) * Widthptr // skip fun method pointers
-		// at runtime the itab will contain pointers to types, other itabs and
-		// method functions. None are allocated on heap, so we can use obj.NOPTR.
-		ggloblsym(i.sym, int32(o), int16(obj.DUPOK|obj.NOPTR))
-
-		ilink := Pkglookup(i.t.tconv(FmtLeft)+","+i.itype.tconv(FmtLeft), itablinkpkg)
-		dsymptr(ilink, 0, i.sym, 0)
-		ggloblsym(ilink, int32(Widthptr), int16(obj.DUPOK|obj.RODATA))
-	}
-
-	// process ptabs
-	if localpkg.Name == "main" && len(ptabs) > 0 {
-		ot := 0
-		s := obj.Linklookup(Ctxt, "go.plugin.tabs", 0)
-		for _, p := range ptabs {
-			// Dump ptab symbol into go.pluginsym package.
-			//
-			// type ptab struct {
-			//	name nameOff
-			//	typ  typeOff // pointer to symbol
-			// }
-			nsym := dname(p.s.Name, "", nil, true)
-			ot = dsymptrOffLSym(s, ot, nsym, 0)
-			ot = dsymptrOffLSym(s, ot, Linksym(dtypesym(p.t)), 0)
-		}
-		ggloblLSym(s, int32(ot), int16(obj.RODATA))
-
-		ot = 0
-		s = obj.Linklookup(Ctxt, "go.plugin.exports", 0)
-		for _, p := range ptabs {
-			ot = dsymptrLSym(s, ot, Linksym(p.s), 0)
-		}
-		ggloblLSym(s, int32(ot), int16(obj.RODATA))
-	}
-
-	// generate import strings for imported packages
-	if forceObjFileStability {
-		// Sorting the packages is not necessary but to compare binaries created
-		// using textual and binary format we sort by path to reduce differences.
-		sort.Sort(pkgByPath(pkgs))
-	}
-	for _, p := range pkgs {
-		if p.Direct {
-			dimportpath(p)
-		}
-	}
-
-	// do basic types if compiling package runtime.
-	// they have to be in at least one package,
-	// and runtime is always loaded implicitly,
-	// so this is as good as any.
-	// another possible choice would be package main,
-	// but using runtime means fewer copies in .6 files.
-	if myimportpath == "runtime" {
-		for i := EType(1); i <= TBOOL; i++ {
-			dtypesym(ptrto(Types[i]))
-		}
-		dtypesym(ptrto(Types[TSTRING]))
-		dtypesym(ptrto(Types[TUNSAFEPTR]))
-
-		// emit type structs for error and func(error) string.
-		// The latter is the type of an auto-generated wrapper.
-		dtypesym(ptrto(errortype))
-
-		dtypesym(functype(nil, []*Node{nod(ODCLFIELD, nil, typenod(errortype))}, []*Node{nod(ODCLFIELD, nil, typenod(Types[TSTRING]))}))
-
-		// add paths for runtime and main, which 6l imports implicitly.
-		dimportpath(Runtimepkg)
-
-		if flag_race {
-			dimportpath(racepkg)
-		}
-		if flag_msan {
-			dimportpath(msanpkg)
-		}
-		dimportpath(mkpkg("main"))
-	}
-}
-
-type pkgByPath []*Pkg
-
-func (a pkgByPath) Len() int           { return len(a) }
-func (a pkgByPath) Less(i, j int) bool { return a[i].Path < a[j].Path }
-func (a pkgByPath) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
-
-func dalgsym(t *Type) *Sym {
-	var s *Sym
-	var hashfunc *Sym
-	var eqfunc *Sym
-
-	// dalgsym is only called for a type that needs an algorithm table,
-	// which implies that the type is comparable (or else it would use ANOEQ).
-
-	if algtype(t) == AMEM {
-		// we use one algorithm table for all AMEM types of a given size
-		p := fmt.Sprintf(".alg%d", t.Width)
-
-		s = Pkglookup(p, typepkg)
-
-		if s.Flags&SymAlgGen != 0 {
-			return s
-		}
-		s.Flags |= SymAlgGen
-
-		// make hash closure
-		p = fmt.Sprintf(".hashfunc%d", t.Width)
-
-		hashfunc = Pkglookup(p, typepkg)
-
-		ot := 0
-		ot = dsymptr(hashfunc, ot, Pkglookup("memhash_varlen", Runtimepkg), 0)
-		ot = duintxx(hashfunc, ot, uint64(t.Width), Widthptr) // size encoded in closure
-		ggloblsym(hashfunc, int32(ot), obj.DUPOK|obj.RODATA)
-
-		// make equality closure
-		p = fmt.Sprintf(".eqfunc%d", t.Width)
-
-		eqfunc = Pkglookup(p, typepkg)
-
-		ot = 0
-		ot = dsymptr(eqfunc, ot, Pkglookup("memequal_varlen", Runtimepkg), 0)
-		ot = duintxx(eqfunc, ot, uint64(t.Width), Widthptr)
-		ggloblsym(eqfunc, int32(ot), obj.DUPOK|obj.RODATA)
-	} else {
-		// generate an alg table specific to this type
-		s = typesymprefix(".alg", t)
-
-		hash := typesymprefix(".hash", t)
-		eq := typesymprefix(".eq", t)
-		hashfunc = typesymprefix(".hashfunc", t)
-		eqfunc = typesymprefix(".eqfunc", t)
-
-		genhash(hash, t)
-		geneq(eq, t)
-
-		// make Go funcs (closures) for calling hash and equal from Go
-		dsymptr(hashfunc, 0, hash, 0)
-
-		ggloblsym(hashfunc, int32(Widthptr), obj.DUPOK|obj.RODATA)
-		dsymptr(eqfunc, 0, eq, 0)
-		ggloblsym(eqfunc, int32(Widthptr), obj.DUPOK|obj.RODATA)
-	}
-
-	// ../../../../runtime/alg.go:/typeAlg
-	ot := 0
-
-	ot = dsymptr(s, ot, hashfunc, 0)
-	ot = dsymptr(s, ot, eqfunc, 0)
-	ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA)
-	return s
-}
-
-// maxPtrmaskBytes is the maximum length of a GC ptrmask bitmap,
-// which holds 1-bit entries describing where pointers are in a given type.
-// 16 bytes is enough to describe 128 pointer-sized words, 512 or 1024 bytes
-// depending on the system. Above this length, the GC information is
-// recorded as a GC program, which can express repetition compactly.
-// In either form, the information is used by the runtime to initialize the
-// heap bitmap, and for large types (like 128 or more words), they are
-// roughly the same speed. GC programs are never much larger and often
-// more compact. (If large arrays are involved, they can be arbitrarily more
-// compact.)
-//
-// The cutoff must be large enough that any allocation large enough to
-// use a GC program is large enough that it does not share heap bitmap
-// bytes with any other objects, allowing the GC program execution to
-// assume an aligned start and not use atomic operations. In the current
-// runtime, this means all malloc size classes larger than the cutoff must
-// be multiples of four words. On 32-bit systems that's 16 bytes, and
-// all size classes >= 16 bytes are 16-byte aligned, so no real constraint.
-// On 64-bit systems, that's 32 bytes, and 32-byte alignment is guaranteed
-// for size classes >= 256 bytes. On a 64-bit system, 256 bytes allocated
-// is 32 pointers, the bits for which fit in 4 bytes. So maxPtrmaskBytes
-// must be >= 4.
-//
-// We used to use 16 because the GC programs do have some constant overhead
-// to get started, and processing 128 pointers seems to be enough to
-// amortize that overhead well.
-//
-// To make sure that the runtime's chansend can call typeBitsBulkBarrier,
-// we raised the limit to 2048, so that even 32-bit systems are guaranteed to
-// use bitmaps for objects up to 64 kB in size.
-//
-// Also known to reflect/type.go.
-//
-const maxPtrmaskBytes = 2048
-
-// dgcsym emits and returns a data symbol containing GC information for type t,
-// along with a boolean reporting whether the UseGCProg bit should be set in
-// the type kind, and the ptrdata field to record in the reflect type information.
-func dgcsym(t *Type) (sym *Sym, useGCProg bool, ptrdata int64) {
-	ptrdata = typeptrdata(t)
-	if ptrdata/int64(Widthptr) <= maxPtrmaskBytes*8 {
-		sym = dgcptrmask(t)
-		return
-	}
-
-	useGCProg = true
-	sym, ptrdata = dgcprog(t)
-	return
-}
-
-// dgcptrmask emits and returns the symbol containing a pointer mask for type t.
-func dgcptrmask(t *Type) *Sym {
-	ptrmask := make([]byte, (typeptrdata(t)/int64(Widthptr)+7)/8)
-	fillptrmask(t, ptrmask)
-	p := fmt.Sprintf("gcbits.%x", ptrmask)
-
-	sym := Pkglookup(p, Runtimepkg)
-	if sym.Flags&SymUniq == 0 {
-		sym.Flags |= SymUniq
-		for i, x := range ptrmask {
-			duint8(sym, i, x)
-		}
-		ggloblsym(sym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL)
-	}
-	return sym
-}
-
-// fillptrmask fills in ptrmask with 1s corresponding to the
-// word offsets in t that hold pointers.
-// ptrmask is assumed to fit at least typeptrdata(t)/Widthptr bits.
-func fillptrmask(t *Type, ptrmask []byte) {
-	for i := range ptrmask {
-		ptrmask[i] = 0
-	}
-	if !haspointers(t) {
-		return
-	}
-
-	vec := bvalloc(8 * int32(len(ptrmask)))
-	xoffset := int64(0)
-	onebitwalktype1(t, &xoffset, vec)
-
-	nptr := typeptrdata(t) / int64(Widthptr)
-	for i := int64(0); i < nptr; i++ {
-		if vec.Get(int32(i)) {
-			ptrmask[i/8] |= 1 << (uint(i) % 8)
-		}
-	}
-}
-
-// dgcprog emits and returns the symbol containing a GC program for type t
-// along with the size of the data described by the program (in the range [typeptrdata(t), t.Width]).
-// In practice, the size is typeptrdata(t) except for non-trivial arrays.
-// For non-trivial arrays, the program describes the full t.Width size.
-func dgcprog(t *Type) (*Sym, int64) {
-	dowidth(t)
-	if t.Width == BADWIDTH {
-		Fatalf("dgcprog: %v badwidth", t)
-	}
-	sym := typesymprefix(".gcprog", t)
-	var p GCProg
-	p.init(sym)
-	p.emit(t, 0)
-	offset := p.w.BitIndex() * int64(Widthptr)
-	p.end()
-	if ptrdata := typeptrdata(t); offset < ptrdata || offset > t.Width {
-		Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width)
-	}
-	return sym, offset
-}
-
-type GCProg struct {
-	sym    *Sym
-	symoff int
-	w      gcprog.Writer
-}
-
-var Debug_gcprog int // set by -d gcprog
-
-func (p *GCProg) init(sym *Sym) {
-	p.sym = sym
-	p.symoff = 4 // first 4 bytes hold program length
-	p.w.Init(p.writeByte)
-	if Debug_gcprog > 0 {
-		fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", sym)
-		p.w.Debug(os.Stderr)
-	}
-}
-
-func (p *GCProg) writeByte(x byte) {
-	p.symoff = duint8(p.sym, p.symoff, x)
-}
-
-func (p *GCProg) end() {
-	p.w.End()
-	duint32(p.sym, 0, uint32(p.symoff-4))
-	ggloblsym(p.sym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL)
-	if Debug_gcprog > 0 {
-		fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.sym)
-	}
-}
-
-func (p *GCProg) emit(t *Type, offset int64) {
-	dowidth(t)
-	if !haspointers(t) {
-		return
-	}
-	if t.Width == int64(Widthptr) {
-		p.w.Ptr(offset / int64(Widthptr))
-		return
-	}
-	switch t.Etype {
-	default:
-		Fatalf("GCProg.emit: unexpected type %v", t)
-
-	case TSTRING:
-		p.w.Ptr(offset / int64(Widthptr))
-
-	case TINTER:
-		p.w.Ptr(offset / int64(Widthptr))
-		p.w.Ptr(offset/int64(Widthptr) + 1)
-
-	case TSLICE:
-		p.w.Ptr(offset / int64(Widthptr))
-
-	case TARRAY:
-		if t.NumElem() == 0 {
-			// should have been handled by haspointers check above
-			Fatalf("GCProg.emit: empty array")
-		}
-
-		// Flatten array-of-array-of-array to just a big array by multiplying counts.
-		count := t.NumElem()
-		elem := t.Elem()
-		for elem.IsArray() {
-			count *= elem.NumElem()
-			elem = elem.Elem()
-		}
-
-		if !p.w.ShouldRepeat(elem.Width/int64(Widthptr), count) {
-			// Cheaper to just emit the bits.
-			for i := int64(0); i < count; i++ {
-				p.emit(elem, offset+i*elem.Width)
-			}
-			return
-		}
-		p.emit(elem, offset)
-		p.w.ZeroUntil((offset + elem.Width) / int64(Widthptr))
-		p.w.Repeat(elem.Width/int64(Widthptr), count-1)
-
-	case TSTRUCT:
-		for _, t1 := range t.Fields().Slice() {
-			p.emit(t1.Type, offset+t1.Offset)
-		}
-	}
-}
-
-// zeroaddr returns the address of a symbol with at least
-// size bytes of zeros.
-func zeroaddr(size int64) *Node {
-	if size >= 1<<31 {
-		Fatalf("map value too big %d", size)
-	}
-	if zerosize < size {
-		zerosize = size
-	}
-	s := Pkglookup("zero", mappkg)
-	if s.Def == nil {
-		x := newname(s)
-		x.Type = Types[TUINT8]
-		x.Class = PEXTERN
-		x.Typecheck = 1
-		s.Def = x
-	}
-	z := nod(OADDR, s.Def, nil)
-	z.Type = ptrto(Types[TUINT8])
-	z.Addable = true
-	z.Typecheck = 1
-	return z
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/reflect_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/reflect_test.go
deleted file mode 100644
index 67b23c7..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/reflect_test.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/reflect_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/reflect_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"reflect"
-	"sort"
-	"testing"
-)
-
-func TestSortingByMethodNameAndPackagePath(t *testing.T) {
-	data := []*Sig{
-		&Sig{name: "b", pkg: &Pkg{Path: "abc"}},
-		&Sig{name: "b", pkg: nil},
-		&Sig{name: "c", pkg: nil},
-		&Sig{name: "c", pkg: &Pkg{Path: "uvw"}},
-		&Sig{name: "c", pkg: nil},
-		&Sig{name: "b", pkg: &Pkg{Path: "xyz"}},
-		&Sig{name: "a", pkg: &Pkg{Path: "abc"}},
-		&Sig{name: "b", pkg: nil},
-	}
-	want := []*Sig{
-		&Sig{name: "a", pkg: &Pkg{Path: "abc"}},
-		&Sig{name: "b", pkg: nil},
-		&Sig{name: "b", pkg: nil},
-		&Sig{name: "b", pkg: &Pkg{Path: "abc"}},
-		&Sig{name: "b", pkg: &Pkg{Path: "xyz"}},
-		&Sig{name: "c", pkg: nil},
-		&Sig{name: "c", pkg: nil},
-		&Sig{name: "c", pkg: &Pkg{Path: "uvw"}},
-	}
-	if len(data) != len(want) {
-		t.Fatal("want and data must match")
-	}
-	if reflect.DeepEqual(data, want) {
-		t.Fatal("data must be shuffled")
-	}
-	sort.Sort(byMethodNameAndPackagePath(data))
-	if !reflect.DeepEqual(data, want) {
-		t.Logf("want: %#v", want)
-		t.Logf("data: %#v", data)
-		t.Errorf("sorting failed")
-	}
-
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/select.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/select.go
deleted file mode 100644
index dedd22f..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/select.go
+++ /dev/null
@@ -1,357 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/select.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/select.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-// select
-func typecheckselect(sel *Node) {
-	var ncase *Node
-	var n *Node
-
-	var def *Node
-	lno := setlineno(sel)
-	count := 0
-	typecheckslice(sel.Ninit.Slice(), Etop)
-	for _, n1 := range sel.List.Slice() {
-		count++
-		ncase = n1
-		setlineno(ncase)
-		if ncase.Op != OXCASE {
-			Fatalf("typecheckselect %v", ncase.Op)
-		}
-
-		if ncase.List.Len() == 0 {
-			// default
-			if def != nil {
-				yyerror("multiple defaults in select (first at %v)", def.Line())
-			} else {
-				def = ncase
-			}
-		} else if ncase.List.Len() > 1 {
-			yyerror("select cases cannot be lists")
-		} else {
-			ncase.List.SetIndex(0, typecheck(ncase.List.Index(0), Etop))
-			n = ncase.List.Index(0)
-			ncase.Left = n
-			ncase.List.Set(nil)
-			setlineno(n)
-			switch n.Op {
-			default:
-				yyerror("select case must be receive, send or assign recv")
-
-			// convert x = <-c into OSELRECV(x, <-c).
-			// remove implicit conversions; the eventual assignment
-			// will reintroduce them.
-			case OAS:
-				if (n.Right.Op == OCONVNOP || n.Right.Op == OCONVIFACE) && n.Right.Implicit {
-					n.Right = n.Right.Left
-				}
-
-				if n.Right.Op != ORECV {
-					yyerror("select assignment must have receive on right hand side")
-					break
-				}
-
-				n.Op = OSELRECV
-
-				// convert x, ok = <-c into OSELRECV2(x, <-c) with ntest=ok
-			case OAS2RECV:
-				if n.Rlist.First().Op != ORECV {
-					yyerror("select assignment must have receive on right hand side")
-					break
-				}
-
-				n.Op = OSELRECV2
-				n.Left = n.List.First()
-				n.List.Set1(n.List.Second())
-				n.Right = n.Rlist.First()
-				n.Rlist.Set(nil)
-
-				// convert <-c into OSELRECV(N, <-c)
-			case ORECV:
-				n = nod(OSELRECV, nil, n)
-
-				n.Typecheck = 1
-				ncase.Left = n
-
-			case OSEND:
-				break
-			}
-		}
-
-		typecheckslice(ncase.Nbody.Slice(), Etop)
-	}
-
-	sel.Xoffset = int64(count)
-	lineno = lno
-}
-
-func walkselect(sel *Node) {
-	if sel.List.Len() == 0 && sel.Xoffset != 0 {
-		Fatalf("double walkselect") // already rewrote
-	}
-
-	lno := setlineno(sel)
-	i := sel.List.Len()
-
-	// optimization: zero-case select
-	var init []*Node
-	var r *Node
-	var n *Node
-	var var_ *Node
-	var selv *Node
-	if i == 0 {
-		sel.Nbody.Set1(mkcall("block", nil, nil))
-		goto out
-	}
-
-	// optimization: one-case select: single op.
-	// TODO(rsc): Reenable optimization once order.go can handle it.
-	// golang.org/issue/7672.
-	if i == 1 {
-		cas := sel.List.First()
-		setlineno(cas)
-		l := cas.Ninit.Slice()
-		if cas.Left != nil { // not default:
-			n := cas.Left
-			l = append(l, n.Ninit.Slice()...)
-			n.Ninit.Set(nil)
-			var ch *Node
-			switch n.Op {
-			default:
-				Fatalf("select %v", n.Op)
-
-				// ok already
-			case OSEND:
-				ch = n.Left
-
-			case OSELRECV, OSELRECV2:
-				ch = n.Right.Left
-				if n.Op == OSELRECV || n.List.Len() == 0 {
-					if n.Left == nil {
-						n = n.Right
-					} else {
-						n.Op = OAS
-					}
-					break
-				}
-
-				if n.Left == nil {
-					nblank = typecheck(nblank, Erv|Easgn)
-					n.Left = nblank
-				}
-
-				n.Op = OAS2
-				n.List.Prepend(n.Left)
-				n.Rlist.Set1(n.Right)
-				n.Right = nil
-				n.Left = nil
-				n.Typecheck = 0
-				n = typecheck(n, Etop)
-			}
-
-			// if ch == nil { block() }; n;
-			a := nod(OIF, nil, nil)
-
-			a.Left = nod(OEQ, ch, nodnil())
-			var ln Nodes
-			ln.Set(l)
-			a.Nbody.Set1(mkcall("block", nil, &ln))
-			l = ln.Slice()
-			a = typecheck(a, Etop)
-			l = append(l, a)
-			l = append(l, n)
-		}
-
-		l = append(l, cas.Nbody.Slice()...)
-		sel.Nbody.Set(l)
-		goto out
-	}
-
-	// convert case value arguments to addresses.
-	// this rewrite is used by both the general code and the next optimization.
-	for _, cas := range sel.List.Slice() {
-		setlineno(cas)
-		n = cas.Left
-		if n == nil {
-			continue
-		}
-		switch n.Op {
-		case OSEND:
-			n.Right = nod(OADDR, n.Right, nil)
-			n.Right = typecheck(n.Right, Erv)
-
-		case OSELRECV, OSELRECV2:
-			if n.Op == OSELRECV2 && n.List.Len() == 0 {
-				n.Op = OSELRECV
-			}
-			if n.Op == OSELRECV2 {
-				n.List.SetIndex(0, nod(OADDR, n.List.First(), nil))
-				n.List.SetIndex(0, typecheck(n.List.Index(0), Erv))
-			}
-
-			if n.Left == nil {
-				n.Left = nodnil()
-			} else {
-				n.Left = nod(OADDR, n.Left, nil)
-				n.Left = typecheck(n.Left, Erv)
-			}
-		}
-	}
-
-	// optimization: two-case select but one is default: single non-blocking op.
-	if i == 2 && (sel.List.First().Left == nil || sel.List.Second().Left == nil) {
-		var cas *Node
-		var dflt *Node
-		if sel.List.First().Left == nil {
-			cas = sel.List.Second()
-			dflt = sel.List.First()
-		} else {
-			dflt = sel.List.Second()
-			cas = sel.List.First()
-		}
-
-		n := cas.Left
-		setlineno(n)
-		r := nod(OIF, nil, nil)
-		r.Ninit.Set(cas.Ninit.Slice())
-		switch n.Op {
-		default:
-			Fatalf("select %v", n.Op)
-
-			// if selectnbsend(c, v) { body } else { default body }
-		case OSEND:
-			ch := n.Left
-
-			r.Left = mkcall1(chanfn("selectnbsend", 2, ch.Type), Types[TBOOL], &r.Ninit, typename(ch.Type), ch, n.Right)
-
-			// if c != nil && selectnbrecv(&v, c) { body } else { default body }
-		case OSELRECV:
-			r = nod(OIF, nil, nil)
-
-			r.Ninit.Set(cas.Ninit.Slice())
-			ch := n.Right.Left
-			r.Left = mkcall1(chanfn("selectnbrecv", 2, ch.Type), Types[TBOOL], &r.Ninit, typename(ch.Type), n.Left, ch)
-
-			// if c != nil && selectnbrecv2(&v, c) { body } else { default body }
-		case OSELRECV2:
-			r = nod(OIF, nil, nil)
-
-			r.Ninit.Set(cas.Ninit.Slice())
-			ch := n.Right.Left
-			r.Left = mkcall1(chanfn("selectnbrecv2", 2, ch.Type), Types[TBOOL], &r.Ninit, typename(ch.Type), n.Left, n.List.First(), ch)
-		}
-
-		r.Left = typecheck(r.Left, Erv)
-		r.Nbody.Set(cas.Nbody.Slice())
-		r.Rlist.Set(append(dflt.Ninit.Slice(), dflt.Nbody.Slice()...))
-		sel.Nbody.Set1(r)
-		goto out
-	}
-
-	init = sel.Ninit.Slice()
-	sel.Ninit.Set(nil)
-
-	// generate sel-struct
-	setlineno(sel)
-
-	selv = temp(selecttype(int32(sel.Xoffset)))
-	r = nod(OAS, selv, nil)
-	r = typecheck(r, Etop)
-	init = append(init, r)
-	var_ = conv(conv(nod(OADDR, selv, nil), Types[TUNSAFEPTR]), ptrto(Types[TUINT8]))
-	r = mkcall("newselect", nil, nil, var_, nodintconst(selv.Type.Width), nodintconst(sel.Xoffset))
-	r = typecheck(r, Etop)
-	init = append(init, r)
-	// register cases
-	for _, cas := range sel.List.Slice() {
-		setlineno(cas)
-		n = cas.Left
-		r = nod(OIF, nil, nil)
-		r.Ninit.Set(cas.Ninit.Slice())
-		cas.Ninit.Set(nil)
-		if n != nil {
-			r.Ninit.AppendNodes(&n.Ninit)
-			n.Ninit.Set(nil)
-		}
-
-		if n == nil {
-			// selectdefault(sel *byte);
-			r.Left = mkcall("selectdefault", Types[TBOOL], &r.Ninit, var_)
-		} else {
-			switch n.Op {
-			default:
-				Fatalf("select %v", n.Op)
-
-				// selectsend(sel *byte, hchan *chan any, elem *any) (selected bool);
-			case OSEND:
-				r.Left = mkcall1(chanfn("selectsend", 2, n.Left.Type), Types[TBOOL], &r.Ninit, var_, n.Left, n.Right)
-
-				// selectrecv(sel *byte, hchan *chan any, elem *any) (selected bool);
-			case OSELRECV:
-				r.Left = mkcall1(chanfn("selectrecv", 2, n.Right.Left.Type), Types[TBOOL], &r.Ninit, var_, n.Right.Left, n.Left)
-
-				// selectrecv2(sel *byte, hchan *chan any, elem *any, received *bool) (selected bool);
-			case OSELRECV2:
-				r.Left = mkcall1(chanfn("selectrecv2", 2, n.Right.Left.Type), Types[TBOOL], &r.Ninit, var_, n.Right.Left, n.Left, n.List.First())
-			}
-		}
-
-		// selv is no longer alive after use.
-		r.Nbody.Append(nod(OVARKILL, selv, nil))
-
-		r.Nbody.AppendNodes(&cas.Nbody)
-		r.Nbody.Append(nod(OBREAK, nil, nil))
-		init = append(init, r)
-	}
-
-	// run the select
-	setlineno(sel)
-
-	init = append(init, mkcall("selectgo", nil, nil, var_))
-	sel.Nbody.Set(init)
-
-out:
-	sel.List.Set(nil)
-	walkstmtlist(sel.Nbody.Slice())
-	lineno = lno
-}
-
-// Keep in sync with src/runtime/select.go.
-func selecttype(size int32) *Type {
-	// TODO(dvyukov): it's possible to generate Scase only once
-	// and then cache; and also cache Select per size.
-
-	scase := nod(OTSTRUCT, nil, nil)
-	scase.List.Append(nod(ODCLFIELD, newname(lookup("elem")), typenod(ptrto(Types[TUINT8]))))
-	scase.List.Append(nod(ODCLFIELD, newname(lookup("chan")), typenod(ptrto(Types[TUINT8]))))
-	scase.List.Append(nod(ODCLFIELD, newname(lookup("pc")), typenod(Types[TUINTPTR])))
-	scase.List.Append(nod(ODCLFIELD, newname(lookup("kind")), typenod(Types[TUINT16])))
-	scase.List.Append(nod(ODCLFIELD, newname(lookup("so")), typenod(Types[TUINT16])))
-	scase.List.Append(nod(ODCLFIELD, newname(lookup("receivedp")), typenod(ptrto(Types[TUINT8]))))
-	scase.List.Append(nod(ODCLFIELD, newname(lookup("releasetime")), typenod(Types[TUINT64])))
-	scase = typecheck(scase, Etype)
-	scase.Type.Noalg = true
-	scase.Type.Local = true
-
-	sel := nod(OTSTRUCT, nil, nil)
-	sel.List.Append(nod(ODCLFIELD, newname(lookup("tcase")), typenod(Types[TUINT16])))
-	sel.List.Append(nod(ODCLFIELD, newname(lookup("ncase")), typenod(Types[TUINT16])))
-	sel.List.Append(nod(ODCLFIELD, newname(lookup("pollorder")), typenod(ptrto(Types[TUINT8]))))
-	sel.List.Append(nod(ODCLFIELD, newname(lookup("lockorder")), typenod(ptrto(Types[TUINT8]))))
-	arr := nod(OTARRAY, nodintconst(int64(size)), scase)
-	sel.List.Append(nod(ODCLFIELD, newname(lookup("scase")), arr))
-	arr = nod(OTARRAY, nodintconst(int64(size)), typenod(Types[TUINT16]))
-	sel.List.Append(nod(ODCLFIELD, newname(lookup("lockorderarr")), arr))
-	arr = nod(OTARRAY, nodintconst(int64(size)), typenod(Types[TUINT16]))
-	sel.List.Append(nod(ODCLFIELD, newname(lookup("pollorderarr")), arr))
-	sel = typecheck(sel, Etype)
-	sel.Type.Noalg = true
-	sel.Type.Local = true
-
-	return sel.Type
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/shift_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/shift_test.go
deleted file mode 100644
index 57a2b5b..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/shift_test.go
+++ /dev/null
@@ -1,1034 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/shift_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/shift_test.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"reflect"
-	"testing"
-)
-
-// Tests shifts of zero.
-
-//go:noinline
-func ofz64l64(n uint64) int64 {
-	var x int64
-	return x << n
-}
-
-//go:noinline
-func ofz64l32(n uint32) int64 {
-	var x int64
-	return x << n
-}
-
-//go:noinline
-func ofz64l16(n uint16) int64 {
-	var x int64
-	return x << n
-}
-
-//go:noinline
-func ofz64l8(n uint8) int64 {
-	var x int64
-	return x << n
-}
-
-//go:noinline
-func ofz64r64(n uint64) int64 {
-	var x int64
-	return x >> n
-}
-
-//go:noinline
-func ofz64r32(n uint32) int64 {
-	var x int64
-	return x >> n
-}
-
-//go:noinline
-func ofz64r16(n uint16) int64 {
-	var x int64
-	return x >> n
-}
-
-//go:noinline
-func ofz64r8(n uint8) int64 {
-	var x int64
-	return x >> n
-}
-
-//go:noinline
-func ofz64ur64(n uint64) uint64 {
-	var x uint64
-	return x >> n
-}
-
-//go:noinline
-func ofz64ur32(n uint32) uint64 {
-	var x uint64
-	return x >> n
-}
-
-//go:noinline
-func ofz64ur16(n uint16) uint64 {
-	var x uint64
-	return x >> n
-}
-
-//go:noinline
-func ofz64ur8(n uint8) uint64 {
-	var x uint64
-	return x >> n
-}
-
-//go:noinline
-func ofz32l64(n uint64) int32 {
-	var x int32
-	return x << n
-}
-
-//go:noinline
-func ofz32l32(n uint32) int32 {
-	var x int32
-	return x << n
-}
-
-//go:noinline
-func ofz32l16(n uint16) int32 {
-	var x int32
-	return x << n
-}
-
-//go:noinline
-func ofz32l8(n uint8) int32 {
-	var x int32
-	return x << n
-}
-
-//go:noinline
-func ofz32r64(n uint64) int32 {
-	var x int32
-	return x >> n
-}
-
-//go:noinline
-func ofz32r32(n uint32) int32 {
-	var x int32
-	return x >> n
-}
-
-//go:noinline
-func ofz32r16(n uint16) int32 {
-	var x int32
-	return x >> n
-}
-
-//go:noinline
-func ofz32r8(n uint8) int32 {
-	var x int32
-	return x >> n
-}
-
-//go:noinline
-func ofz32ur64(n uint64) uint32 {
-	var x uint32
-	return x >> n
-}
-
-//go:noinline
-func ofz32ur32(n uint32) uint32 {
-	var x uint32
-	return x >> n
-}
-
-//go:noinline
-func ofz32ur16(n uint16) uint32 {
-	var x uint32
-	return x >> n
-}
-
-//go:noinline
-func ofz32ur8(n uint8) uint32 {
-	var x uint32
-	return x >> n
-}
-
-//go:noinline
-func ofz16l64(n uint64) int16 {
-	var x int16
-	return x << n
-}
-
-//go:noinline
-func ofz16l32(n uint32) int16 {
-	var x int16
-	return x << n
-}
-
-//go:noinline
-func ofz16l16(n uint16) int16 {
-	var x int16
-	return x << n
-}
-
-//go:noinline
-func ofz16l8(n uint8) int16 {
-	var x int16
-	return x << n
-}
-
-//go:noinline
-func ofz16r64(n uint64) int16 {
-	var x int16
-	return x >> n
-}
-
-//go:noinline
-func ofz16r32(n uint32) int16 {
-	var x int16
-	return x >> n
-}
-
-//go:noinline
-func ofz16r16(n uint16) int16 {
-	var x int16
-	return x >> n
-}
-
-//go:noinline
-func ofz16r8(n uint8) int16 {
-	var x int16
-	return x >> n
-}
-
-//go:noinline
-func ofz16ur64(n uint64) uint16 {
-	var x uint16
-	return x >> n
-}
-
-//go:noinline
-func ofz16ur32(n uint32) uint16 {
-	var x uint16
-	return x >> n
-}
-
-//go:noinline
-func ofz16ur16(n uint16) uint16 {
-	var x uint16
-	return x >> n
-}
-
-//go:noinline
-func ofz16ur8(n uint8) uint16 {
-	var x uint16
-	return x >> n
-}
-
-//go:noinline
-func ofz8l64(n uint64) int8 {
-	var x int8
-	return x << n
-}
-
-//go:noinline
-func ofz8l32(n uint32) int8 {
-	var x int8
-	return x << n
-}
-
-//go:noinline
-func ofz8l16(n uint16) int8 {
-	var x int8
-	return x << n
-}
-
-//go:noinline
-func ofz8l8(n uint8) int8 {
-	var x int8
-	return x << n
-}
-
-//go:noinline
-func ofz8r64(n uint64) int8 {
-	var x int8
-	return x >> n
-}
-
-//go:noinline
-func ofz8r32(n uint32) int8 {
-	var x int8
-	return x >> n
-}
-
-//go:noinline
-func ofz8r16(n uint16) int8 {
-	var x int8
-	return x >> n
-}
-
-//go:noinline
-func ofz8r8(n uint8) int8 {
-	var x int8
-	return x >> n
-}
-
-//go:noinline
-func ofz8ur64(n uint64) uint8 {
-	var x uint8
-	return x >> n
-}
-
-//go:noinline
-func ofz8ur32(n uint32) uint8 {
-	var x uint8
-	return x >> n
-}
-
-//go:noinline
-func ofz8ur16(n uint16) uint8 {
-	var x uint8
-	return x >> n
-}
-
-//go:noinline
-func ofz8ur8(n uint8) uint8 {
-	var x uint8
-	return x >> n
-}
-
-func TestShiftOfZero(t *testing.T) {
-	if got := ofz64l64(5); got != 0 {
-		t.Errorf("0<<5 == %d, want 0", got)
-	}
-	if got := ofz64l32(5); got != 0 {
-		t.Errorf("0<<5 == %d, want 0", got)
-	}
-	if got := ofz64l16(5); got != 0 {
-		t.Errorf("0<<5 == %d, want 0", got)
-	}
-	if got := ofz64l8(5); got != 0 {
-		t.Errorf("0<<5 == %d, want 0", got)
-	}
-	if got := ofz64r64(5); got != 0 {
-		t.Errorf("0>>5 == %d, want 0", got)
-	}
-	if got := ofz64r32(5); got != 0 {
-		t.Errorf("0>>5 == %d, want 0", got)
-	}
-	if got := ofz64r16(5); got != 0 {
-		t.Errorf("0>>5 == %d, want 0", got)
-	}
-	if got := ofz64r8(5); got != 0 {
-		t.Errorf("0>>5 == %d, want 0", got)
-	}
-	if got := ofz64ur64(5); got != 0 {
-		t.Errorf("0>>>5 == %d, want 0", got)
-	}
-	if got := ofz64ur32(5); got != 0 {
-		t.Errorf("0>>>5 == %d, want 0", got)
-	}
-	if got := ofz64ur16(5); got != 0 {
-		t.Errorf("0>>>5 == %d, want 0", got)
-	}
-	if got := ofz64ur8(5); got != 0 {
-		t.Errorf("0>>>5 == %d, want 0", got)
-	}
-
-	if got := ofz32l64(5); got != 0 {
-		t.Errorf("0<<5 == %d, want 0", got)
-	}
-	if got := ofz32l32(5); got != 0 {
-		t.Errorf("0<<5 == %d, want 0", got)
-	}
-	if got := ofz32l16(5); got != 0 {
-		t.Errorf("0<<5 == %d, want 0", got)
-	}
-	if got := ofz32l8(5); got != 0 {
-		t.Errorf("0<<5 == %d, want 0", got)
-	}
-	if got := ofz32r64(5); got != 0 {
-		t.Errorf("0>>5 == %d, want 0", got)
-	}
-	if got := ofz32r32(5); got != 0 {
-		t.Errorf("0>>5 == %d, want 0", got)
-	}
-	if got := ofz32r16(5); got != 0 {
-		t.Errorf("0>>5 == %d, want 0", got)
-	}
-	if got := ofz32r8(5); got != 0 {
-		t.Errorf("0>>5 == %d, want 0", got)
-	}
-	if got := ofz32ur64(5); got != 0 {
-		t.Errorf("0>>>5 == %d, want 0", got)
-	}
-	if got := ofz32ur32(5); got != 0 {
-		t.Errorf("0>>>5 == %d, want 0", got)
-	}
-	if got := ofz32ur16(5); got != 0 {
-		t.Errorf("0>>>5 == %d, want 0", got)
-	}
-	if got := ofz32ur8(5); got != 0 {
-		t.Errorf("0>>>5 == %d, want 0", got)
-	}
-
-	if got := ofz16l64(5); got != 0 {
-		t.Errorf("0<<5 == %d, want 0", got)
-	}
-	if got := ofz16l32(5); got != 0 {
-		t.Errorf("0<<5 == %d, want 0", got)
-	}
-	if got := ofz16l16(5); got != 0 {
-		t.Errorf("0<<5 == %d, want 0", got)
-	}
-	if got := ofz16l8(5); got != 0 {
-		t.Errorf("0<<5 == %d, want 0", got)
-	}
-	if got := ofz16r64(5); got != 0 {
-		t.Errorf("0>>5 == %d, want 0", got)
-	}
-	if got := ofz16r32(5); got != 0 {
-		t.Errorf("0>>5 == %d, want 0", got)
-	}
-	if got := ofz16r16(5); got != 0 {
-		t.Errorf("0>>5 == %d, want 0", got)
-	}
-	if got := ofz16r8(5); got != 0 {
-		t.Errorf("0>>5 == %d, want 0", got)
-	}
-	if got := ofz16ur64(5); got != 0 {
-		t.Errorf("0>>>5 == %d, want 0", got)
-	}
-	if got := ofz16ur32(5); got != 0 {
-		t.Errorf("0>>>5 == %d, want 0", got)
-	}
-	if got := ofz16ur16(5); got != 0 {
-		t.Errorf("0>>>5 == %d, want 0", got)
-	}
-	if got := ofz16ur8(5); got != 0 {
-		t.Errorf("0>>>5 == %d, want 0", got)
-	}
-
-	if got := ofz8l64(5); got != 0 {
-		t.Errorf("0<<5 == %d, want 0", got)
-	}
-	if got := ofz8l32(5); got != 0 {
-		t.Errorf("0<<5 == %d, want 0", got)
-	}
-	if got := ofz8l16(5); got != 0 {
-		t.Errorf("0<<5 == %d, want 0", got)
-	}
-	if got := ofz8l8(5); got != 0 {
-		t.Errorf("0<<5 == %d, want 0", got)
-	}
-	if got := ofz8r64(5); got != 0 {
-		t.Errorf("0>>5 == %d, want 0", got)
-	}
-	if got := ofz8r32(5); got != 0 {
-		t.Errorf("0>>5 == %d, want 0", got)
-	}
-	if got := ofz8r16(5); got != 0 {
-		t.Errorf("0>>5 == %d, want 0", got)
-	}
-	if got := ofz8r8(5); got != 0 {
-		t.Errorf("0>>5 == %d, want 0", got)
-	}
-	if got := ofz8ur64(5); got != 0 {
-		t.Errorf("0>>>5 == %d, want 0", got)
-	}
-	if got := ofz8ur32(5); got != 0 {
-		t.Errorf("0>>>5 == %d, want 0", got)
-	}
-	if got := ofz8ur16(5); got != 0 {
-		t.Errorf("0>>>5 == %d, want 0", got)
-	}
-	if got := ofz8ur8(5); got != 0 {
-		t.Errorf("0>>>5 == %d, want 0", got)
-	}
-}
-
-//go:noinline
-func byz64l(n int64) int64 {
-	return n << 0
-}
-
-//go:noinline
-func byz64r(n int64) int64 {
-	return n >> 0
-}
-
-//go:noinline
-func byz64ur(n uint64) uint64 {
-	return n >> 0
-}
-
-//go:noinline
-func byz32l(n int32) int32 {
-	return n << 0
-}
-
-//go:noinline
-func byz32r(n int32) int32 {
-	return n >> 0
-}
-
-//go:noinline
-func byz32ur(n uint32) uint32 {
-	return n >> 0
-}
-
-//go:noinline
-func byz16l(n int16) int16 {
-	return n << 0
-}
-
-//go:noinline
-func byz16r(n int16) int16 {
-	return n >> 0
-}
-
-//go:noinline
-func byz16ur(n uint16) uint16 {
-	return n >> 0
-}
-
-//go:noinline
-func byz8l(n int8) int8 {
-	return n << 0
-}
-
-//go:noinline
-func byz8r(n int8) int8 {
-	return n >> 0
-}
-
-//go:noinline
-func byz8ur(n uint8) uint8 {
-	return n >> 0
-}
-
-func TestShiftByZero(t *testing.T) {
-	{
-		var n int64 = 0x5555555555555555
-		if got := byz64l(n); got != n {
-			t.Errorf("%x<<0 == %x, want %x", n, got, n)
-		}
-		if got := byz64r(n); got != n {
-			t.Errorf("%x>>0 == %x, want %x", n, got, n)
-		}
-	}
-	{
-		var n uint64 = 0xaaaaaaaaaaaaaaaa
-		if got := byz64ur(n); got != n {
-			t.Errorf("%x>>>0 == %x, want %x", n, got, n)
-		}
-	}
-
-	{
-		var n int32 = 0x55555555
-		if got := byz32l(n); got != n {
-			t.Errorf("%x<<0 == %x, want %x", n, got, n)
-		}
-		if got := byz32r(n); got != n {
-			t.Errorf("%x>>0 == %x, want %x", n, got, n)
-		}
-	}
-	{
-		var n uint32 = 0xaaaaaaaa
-		if got := byz32ur(n); got != n {
-			t.Errorf("%x>>>0 == %x, want %x", n, got, n)
-		}
-	}
-
-	{
-		var n int16 = 0x5555
-		if got := byz16l(n); got != n {
-			t.Errorf("%x<<0 == %x, want %x", n, got, n)
-		}
-		if got := byz16r(n); got != n {
-			t.Errorf("%x>>0 == %x, want %x", n, got, n)
-		}
-	}
-	{
-		var n uint16 = 0xaaaa
-		if got := byz16ur(n); got != n {
-			t.Errorf("%x>>>0 == %x, want %x", n, got, n)
-		}
-	}
-
-	{
-		var n int8 = 0x55
-		if got := byz8l(n); got != n {
-			t.Errorf("%x<<0 == %x, want %x", n, got, n)
-		}
-		if got := byz8r(n); got != n {
-			t.Errorf("%x>>0 == %x, want %x", n, got, n)
-		}
-	}
-	{
-		var n uint8 = 0x55
-		if got := byz8ur(n); got != n {
-			t.Errorf("%x>>>0 == %x, want %x", n, got, n)
-		}
-	}
-}
-
-//go:noinline
-func two64l(x int64) int64 {
-	return x << 1 << 1
-}
-
-//go:noinline
-func two64r(x int64) int64 {
-	return x >> 1 >> 1
-}
-
-//go:noinline
-func two64ur(x uint64) uint64 {
-	return x >> 1 >> 1
-}
-
-//go:noinline
-func two32l(x int32) int32 {
-	return x << 1 << 1
-}
-
-//go:noinline
-func two32r(x int32) int32 {
-	return x >> 1 >> 1
-}
-
-//go:noinline
-func two32ur(x uint32) uint32 {
-	return x >> 1 >> 1
-}
-
-//go:noinline
-func two16l(x int16) int16 {
-	return x << 1 << 1
-}
-
-//go:noinline
-func two16r(x int16) int16 {
-	return x >> 1 >> 1
-}
-
-//go:noinline
-func two16ur(x uint16) uint16 {
-	return x >> 1 >> 1
-}
-
-//go:noinline
-func two8l(x int8) int8 {
-	return x << 1 << 1
-}
-
-//go:noinline
-func two8r(x int8) int8 {
-	return x >> 1 >> 1
-}
-
-//go:noinline
-func two8ur(x uint8) uint8 {
-	return x >> 1 >> 1
-}
-
-func TestShiftCombine(t *testing.T) {
-	if got, want := two64l(4), int64(16); want != got {
-		t.Errorf("4<<1<<1 == %d, want %d", got, want)
-	}
-	if got, want := two64r(64), int64(16); want != got {
-		t.Errorf("64>>1>>1 == %d, want %d", got, want)
-	}
-	if got, want := two64ur(64), uint64(16); want != got {
-		t.Errorf("64>>1>>1 == %d, want %d", got, want)
-	}
-	if got, want := two32l(4), int32(16); want != got {
-		t.Errorf("4<<1<<1 == %d, want %d", got, want)
-	}
-	if got, want := two32r(64), int32(16); want != got {
-		t.Errorf("64>>1>>1 == %d, want %d", got, want)
-	}
-	if got, want := two32ur(64), uint32(16); want != got {
-		t.Errorf("64>>1>>1 == %d, want %d", got, want)
-	}
-	if got, want := two16l(4), int16(16); want != got {
-		t.Errorf("4<<1<<1 == %d, want %d", got, want)
-	}
-	if got, want := two16r(64), int16(16); want != got {
-		t.Errorf("64>>1>>1 == %d, want %d", got, want)
-	}
-	if got, want := two16ur(64), uint16(16); want != got {
-		t.Errorf("64>>1>>1 == %d, want %d", got, want)
-	}
-	if got, want := two8l(4), int8(16); want != got {
-		t.Errorf("4<<1<<1 == %d, want %d", got, want)
-	}
-	if got, want := two8r(64), int8(16); want != got {
-		t.Errorf("64>>1>>1 == %d, want %d", got, want)
-	}
-	if got, want := two8ur(64), uint8(16); want != got {
-		t.Errorf("64>>1>>1 == %d, want %d", got, want)
-	}
-
-}
-
-//go:noinline
-func three64l(x int64) int64 {
-	return x << 3 >> 1 << 2
-}
-
-//go:noinline
-func three64ul(x uint64) uint64 {
-	return x << 3 >> 1 << 2
-}
-
-//go:noinline
-func three64r(x int64) int64 {
-	return x >> 3 << 1 >> 2
-}
-
-//go:noinline
-func three64ur(x uint64) uint64 {
-	return x >> 3 << 1 >> 2
-}
-
-//go:noinline
-func three32l(x int32) int32 {
-	return x << 3 >> 1 << 2
-}
-
-//go:noinline
-func three32ul(x uint32) uint32 {
-	return x << 3 >> 1 << 2
-}
-
-//go:noinline
-func three32r(x int32) int32 {
-	return x >> 3 << 1 >> 2
-}
-
-//go:noinline
-func three32ur(x uint32) uint32 {
-	return x >> 3 << 1 >> 2
-}
-
-//go:noinline
-func three16l(x int16) int16 {
-	return x << 3 >> 1 << 2
-}
-
-//go:noinline
-func three16ul(x uint16) uint16 {
-	return x << 3 >> 1 << 2
-}
-
-//go:noinline
-func three16r(x int16) int16 {
-	return x >> 3 << 1 >> 2
-}
-
-//go:noinline
-func three16ur(x uint16) uint16 {
-	return x >> 3 << 1 >> 2
-}
-
-//go:noinline
-func three8l(x int8) int8 {
-	return x << 3 >> 1 << 2
-}
-
-//go:noinline
-func three8ul(x uint8) uint8 {
-	return x << 3 >> 1 << 2
-}
-
-//go:noinline
-func three8r(x int8) int8 {
-	return x >> 3 << 1 >> 2
-}
-
-//go:noinline
-func three8ur(x uint8) uint8 {
-	return x >> 3 << 1 >> 2
-}
-
-func TestShiftCombine3(t *testing.T) {
-	if got, want := three64l(4), int64(64); want != got {
-		t.Errorf("4<<1<<1 == %d, want %d", got, want)
-	}
-	if got, want := three64ul(4), uint64(64); want != got {
-		t.Errorf("4<<1<<1 == %d, want %d", got, want)
-	}
-	if got, want := three64r(64), int64(4); want != got {
-		t.Errorf("64>>1>>1 == %d, want %d", got, want)
-	}
-	if got, want := three64ur(64), uint64(4); want != got {
-		t.Errorf("64>>1>>1 == %d, want %d", got, want)
-	}
-	if got, want := three32l(4), int32(64); want != got {
-		t.Errorf("4<<1<<1 == %d, want %d", got, want)
-	}
-	if got, want := three32ul(4), uint32(64); want != got {
-		t.Errorf("4<<1<<1 == %d, want %d", got, want)
-	}
-	if got, want := three32r(64), int32(4); want != got {
-		t.Errorf("64>>1>>1 == %d, want %d", got, want)
-	}
-	if got, want := three32ur(64), uint32(4); want != got {
-		t.Errorf("64>>1>>1 == %d, want %d", got, want)
-	}
-	if got, want := three16l(4), int16(64); want != got {
-		t.Errorf("4<<1<<1 == %d, want %d", got, want)
-	}
-	if got, want := three16ul(4), uint16(64); want != got {
-		t.Errorf("4<<1<<1 == %d, want %d", got, want)
-	}
-	if got, want := three16r(64), int16(4); want != got {
-		t.Errorf("64>>1>>1 == %d, want %d", got, want)
-	}
-	if got, want := three16ur(64), uint16(4); want != got {
-		t.Errorf("64>>1>>1 == %d, want %d", got, want)
-	}
-	if got, want := three8l(4), int8(64); want != got {
-		t.Errorf("4<<1<<1 == %d, want %d", got, want)
-	}
-	if got, want := three8ul(4), uint8(64); want != got {
-		t.Errorf("4<<1<<1 == %d, want %d", got, want)
-	}
-	if got, want := three8r(64), int8(4); want != got {
-		t.Errorf("64>>1>>1 == %d, want %d", got, want)
-	}
-	if got, want := three8ur(64), uint8(4); want != got {
-		t.Errorf("64>>1>>1 == %d, want %d", got, want)
-	}
-}
-
-var (
-	one64  int64  = 1
-	one64u uint64 = 1
-	one32  int32  = 1
-	one32u uint32 = 1
-	one16  int16  = 1
-	one16u uint16 = 1
-	one8   int8   = 1
-	one8u  uint8  = 1
-)
-
-func TestShiftLargeCombine(t *testing.T) {
-	var N uint64 = 0x8000000000000000
-	if one64<<N<<N == 1 {
-		t.Errorf("shift overflow mishandled")
-	}
-	if one64>>N>>N == 1 {
-		t.Errorf("shift overflow mishandled")
-	}
-	if one64u>>N>>N == 1 {
-		t.Errorf("shift overflow mishandled")
-	}
-	if one32<<N<<N == 1 {
-		t.Errorf("shift overflow mishandled")
-	}
-	if one32>>N>>N == 1 {
-		t.Errorf("shift overflow mishandled")
-	}
-	if one32u>>N>>N == 1 {
-		t.Errorf("shift overflow mishandled")
-	}
-	if one16<<N<<N == 1 {
-		t.Errorf("shift overflow mishandled")
-	}
-	if one16>>N>>N == 1 {
-		t.Errorf("shift overflow mishandled")
-	}
-	if one16u>>N>>N == 1 {
-		t.Errorf("shift overflow mishandled")
-	}
-	if one8<<N<<N == 1 {
-		t.Errorf("shift overflow mishandled")
-	}
-	if one8>>N>>N == 1 {
-		t.Errorf("shift overflow mishandled")
-	}
-	if one8u>>N>>N == 1 {
-		t.Errorf("shift overflow mishandled")
-	}
-}
-
-func TestShiftLargeCombine3(t *testing.T) {
-	var N uint64 = 0x8000000000000001
-	if one64<<N>>2<<N == 1 {
-		t.Errorf("shift overflow mishandled")
-	}
-	if one64u<<N>>2<<N == 1 {
-		t.Errorf("shift overflow mishandled")
-	}
-	if one64>>N<<2>>N == 1 {
-		t.Errorf("shift overflow mishandled")
-	}
-	if one64u>>N<<2>>N == 1 {
-		t.Errorf("shift overflow mishandled")
-	}
-	if one32<<N>>2<<N == 1 {
-		t.Errorf("shift overflow mishandled")
-	}
-	if one32u<<N>>2<<N == 1 {
-		t.Errorf("shift overflow mishandled")
-	}
-	if one32>>N<<2>>N == 1 {
-		t.Errorf("shift overflow mishandled")
-	}
-	if one32u>>N<<2>>N == 1 {
-		t.Errorf("shift overflow mishandled")
-	}
-	if one16<<N>>2<<N == 1 {
-		t.Errorf("shift overflow mishandled")
-	}
-	if one16u<<N>>2<<N == 1 {
-		t.Errorf("shift overflow mishandled")
-	}
-	if one16>>N<<2>>N == 1 {
-		t.Errorf("shift overflow mishandled")
-	}
-	if one16u>>N<<2>>N == 1 {
-		t.Errorf("shift overflow mishandled")
-	}
-	if one8<<N>>2<<N == 1 {
-		t.Errorf("shift overflow mishandled")
-	}
-	if one8u<<N>>2<<N == 1 {
-		t.Errorf("shift overflow mishandled")
-	}
-	if one8>>N<<2>>N == 1 {
-		t.Errorf("shift overflow mishandled")
-	}
-	if one8u>>N<<2>>N == 1 {
-		t.Errorf("shift overflow mishandled")
-	}
-}
-
-func TestShiftGeneric(t *testing.T) {
-	for _, test := range [...]struct {
-		valueWidth int
-		signed     bool
-		shiftWidth int
-		left       bool
-		f          interface{}
-	}{
-		{64, true, 64, true, func(n int64, s uint64) int64 { return n << s }},
-		{64, true, 64, false, func(n int64, s uint64) int64 { return n >> s }},
-		{64, false, 64, false, func(n uint64, s uint64) uint64 { return n >> s }},
-		{64, true, 32, true, func(n int64, s uint32) int64 { return n << s }},
-		{64, true, 32, false, func(n int64, s uint32) int64 { return n >> s }},
-		{64, false, 32, false, func(n uint64, s uint32) uint64 { return n >> s }},
-		{64, true, 16, true, func(n int64, s uint16) int64 { return n << s }},
-		{64, true, 16, false, func(n int64, s uint16) int64 { return n >> s }},
-		{64, false, 16, false, func(n uint64, s uint16) uint64 { return n >> s }},
-		{64, true, 8, true, func(n int64, s uint8) int64 { return n << s }},
-		{64, true, 8, false, func(n int64, s uint8) int64 { return n >> s }},
-		{64, false, 8, false, func(n uint64, s uint8) uint64 { return n >> s }},
-
-		{32, true, 64, true, func(n int32, s uint64) int32 { return n << s }},
-		{32, true, 64, false, func(n int32, s uint64) int32 { return n >> s }},
-		{32, false, 64, false, func(n uint32, s uint64) uint32 { return n >> s }},
-		{32, true, 32, true, func(n int32, s uint32) int32 { return n << s }},
-		{32, true, 32, false, func(n int32, s uint32) int32 { return n >> s }},
-		{32, false, 32, false, func(n uint32, s uint32) uint32 { return n >> s }},
-		{32, true, 16, true, func(n int32, s uint16) int32 { return n << s }},
-		{32, true, 16, false, func(n int32, s uint16) int32 { return n >> s }},
-		{32, false, 16, false, func(n uint32, s uint16) uint32 { return n >> s }},
-		{32, true, 8, true, func(n int32, s uint8) int32 { return n << s }},
-		{32, true, 8, false, func(n int32, s uint8) int32 { return n >> s }},
-		{32, false, 8, false, func(n uint32, s uint8) uint32 { return n >> s }},
-
-		{16, true, 64, true, func(n int16, s uint64) int16 { return n << s }},
-		{16, true, 64, false, func(n int16, s uint64) int16 { return n >> s }},
-		{16, false, 64, false, func(n uint16, s uint64) uint16 { return n >> s }},
-		{16, true, 32, true, func(n int16, s uint32) int16 { return n << s }},
-		{16, true, 32, false, func(n int16, s uint32) int16 { return n >> s }},
-		{16, false, 32, false, func(n uint16, s uint32) uint16 { return n >> s }},
-		{16, true, 16, true, func(n int16, s uint16) int16 { return n << s }},
-		{16, true, 16, false, func(n int16, s uint16) int16 { return n >> s }},
-		{16, false, 16, false, func(n uint16, s uint16) uint16 { return n >> s }},
-		{16, true, 8, true, func(n int16, s uint8) int16 { return n << s }},
-		{16, true, 8, false, func(n int16, s uint8) int16 { return n >> s }},
-		{16, false, 8, false, func(n uint16, s uint8) uint16 { return n >> s }},
-
-		{8, true, 64, true, func(n int8, s uint64) int8 { return n << s }},
-		{8, true, 64, false, func(n int8, s uint64) int8 { return n >> s }},
-		{8, false, 64, false, func(n uint8, s uint64) uint8 { return n >> s }},
-		{8, true, 32, true, func(n int8, s uint32) int8 { return n << s }},
-		{8, true, 32, false, func(n int8, s uint32) int8 { return n >> s }},
-		{8, false, 32, false, func(n uint8, s uint32) uint8 { return n >> s }},
-		{8, true, 16, true, func(n int8, s uint16) int8 { return n << s }},
-		{8, true, 16, false, func(n int8, s uint16) int8 { return n >> s }},
-		{8, false, 16, false, func(n uint8, s uint16) uint8 { return n >> s }},
-		{8, true, 8, true, func(n int8, s uint8) int8 { return n << s }},
-		{8, true, 8, false, func(n int8, s uint8) int8 { return n >> s }},
-		{8, false, 8, false, func(n uint8, s uint8) uint8 { return n >> s }},
-	} {
-		fv := reflect.ValueOf(test.f)
-		var args [2]reflect.Value
-		for i := 0; i < test.valueWidth; i++ {
-			// Build value to be shifted.
-			var n int64 = 1
-			for j := 0; j < i; j++ {
-				n <<= 1
-			}
-			args[0] = reflect.ValueOf(n).Convert(fv.Type().In(0))
-			for s := 0; s <= test.shiftWidth; s++ {
-				args[1] = reflect.ValueOf(s).Convert(fv.Type().In(1))
-
-				// Compute desired result. We're testing variable shifts
-				// assuming constant shifts are correct.
-				r := n
-				var op string
-				switch {
-				case test.left:
-					op = "<<"
-					for j := 0; j < s; j++ {
-						r <<= 1
-					}
-					switch test.valueWidth {
-					case 32:
-						r = int64(int32(r))
-					case 16:
-						r = int64(int16(r))
-					case 8:
-						r = int64(int8(r))
-					}
-				case test.signed:
-					op = ">>"
-					switch test.valueWidth {
-					case 32:
-						r = int64(int32(r))
-					case 16:
-						r = int64(int16(r))
-					case 8:
-						r = int64(int8(r))
-					}
-					for j := 0; j < s; j++ {
-						r >>= 1
-					}
-				default:
-					op = ">>>"
-					for j := 0; j < s; j++ {
-						r = int64(uint64(r) >> 1)
-					}
-				}
-
-				// Call function.
-				res := fv.Call(args[:])[0].Convert(reflect.ValueOf(r).Type())
-
-				if res.Int() != r {
-					t.Errorf("%s%dx%d(%x,%x)=%x, want %x", op, test.valueWidth, test.shiftWidth, n, s, res.Int(), r)
-				}
-			}
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/sinit.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/sinit.go
deleted file mode 100644
index bc4a5c0..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/sinit.go
+++ /dev/null
@@ -1,1439 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/sinit.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/sinit.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import "fmt"
-
-// static initialization
-const (
-	InitNotStarted = 0
-	InitDone       = 1
-	InitPending    = 2
-)
-
-type InitEntry struct {
-	Xoffset int64 // struct, array only
-	Expr    *Node // bytes of run-time computed expressions
-}
-
-type InitPlan struct {
-	E []InitEntry
-}
-
-var (
-	initlist  []*Node
-	initplans map[*Node]*InitPlan
-	inittemps = make(map[*Node]*Node)
-)
-
-// init1 walks the AST starting at n, and accumulates in out
-// the list of definitions needing init code in dependency order.
-func init1(n *Node, out *[]*Node) {
-	if n == nil {
-		return
-	}
-	init1(n.Left, out)
-	init1(n.Right, out)
-	for _, n1 := range n.List.Slice() {
-		init1(n1, out)
-	}
-
-	if n.Left != nil && n.Type != nil && n.Left.Op == OTYPE && n.Class == PFUNC {
-		// Methods called as Type.Method(receiver, ...).
-		// Definitions for method expressions are stored in type->nname.
-		init1(n.Type.Nname(), out)
-	}
-
-	if n.Op != ONAME {
-		return
-	}
-	switch n.Class {
-	case PEXTERN, PFUNC:
-	default:
-		if isblank(n) && n.Name.Curfn == nil && n.Name.Defn != nil && n.Name.Defn.Initorder == InitNotStarted {
-			// blank names initialization is part of init() but not
-			// when they are inside a function.
-			break
-		}
-		return
-	}
-
-	if n.Initorder == InitDone {
-		return
-	}
-	if n.Initorder == InitPending {
-		// Since mutually recursive sets of functions are allowed,
-		// we don't necessarily raise an error if n depends on a node
-		// which is already waiting for its dependencies to be visited.
-		//
-		// initlist contains a cycle of identifiers referring to each other.
-		// If this cycle contains a variable, then this variable refers to itself.
-		// Conversely, if there exists an initialization cycle involving
-		// a variable in the program, the tree walk will reach a cycle
-		// involving that variable.
-		if n.Class != PFUNC {
-			foundinitloop(n, n)
-		}
-
-		for i := len(initlist) - 1; i >= 0; i-- {
-			x := initlist[i]
-			if x == n {
-				break
-			}
-			if x.Class != PFUNC {
-				foundinitloop(n, x)
-			}
-		}
-
-		// The loop involves only functions, ok.
-		return
-	}
-
-	// reached a new unvisited node.
-	n.Initorder = InitPending
-	initlist = append(initlist, n)
-
-	// make sure that everything n depends on is initialized.
-	// n->defn is an assignment to n
-	if defn := n.Name.Defn; defn != nil {
-		switch defn.Op {
-		default:
-			Dump("defn", defn)
-			Fatalf("init1: bad defn")
-
-		case ODCLFUNC:
-			init2list(defn.Nbody, out)
-
-		case OAS:
-			if defn.Left != n {
-				Dump("defn", defn)
-				Fatalf("init1: bad defn")
-			}
-			if isblank(defn.Left) && candiscard(defn.Right) {
-				defn.Op = OEMPTY
-				defn.Left = nil
-				defn.Right = nil
-				break
-			}
-
-			init2(defn.Right, out)
-			if Debug['j'] != 0 {
-				fmt.Printf("%v\n", n.Sym)
-			}
-			if isblank(n) || !staticinit(n, out) {
-				if Debug['%'] != 0 {
-					Dump("nonstatic", defn)
-				}
-				*out = append(*out, defn)
-			}
-
-		case OAS2FUNC, OAS2MAPR, OAS2DOTTYPE, OAS2RECV:
-			if defn.Initorder == InitDone {
-				break
-			}
-			defn.Initorder = InitPending
-			for _, n2 := range defn.Rlist.Slice() {
-				init1(n2, out)
-			}
-			if Debug['%'] != 0 {
-				Dump("nonstatic", defn)
-			}
-			*out = append(*out, defn)
-			defn.Initorder = InitDone
-		}
-	}
-
-	last := len(initlist) - 1
-	if initlist[last] != n {
-		Fatalf("bad initlist %v", initlist)
-	}
-	initlist[last] = nil // allow GC
-	initlist = initlist[:last]
-
-	n.Initorder = InitDone
-	return
-}
-
-// foundinitloop prints an init loop error and exits.
-func foundinitloop(node, visited *Node) {
-	// If there have already been errors printed,
-	// those errors probably confused us and
-	// there might not be a loop. Let the user
-	// fix those first.
-	flusherrors()
-	if nerrors > 0 {
-		errorexit()
-	}
-
-	// Find the index of node and visited in the initlist.
-	var nodeindex, visitedindex int
-	for ; initlist[nodeindex] != node; nodeindex++ {
-	}
-	for ; initlist[visitedindex] != visited; visitedindex++ {
-	}
-
-	// There is a loop involving visited. We know about node and
-	// initlist = n1 <- ... <- visited <- ... <- node <- ...
-	fmt.Printf("%v: initialization loop:\n", visited.Line())
-
-	// Print visited -> ... -> n1 -> node.
-	for _, n := range initlist[visitedindex:] {
-		fmt.Printf("\t%v %v refers to\n", n.Line(), n.Sym)
-	}
-
-	// Print node -> ... -> visited.
-	for _, n := range initlist[nodeindex:visitedindex] {
-		fmt.Printf("\t%v %v refers to\n", n.Line(), n.Sym)
-	}
-
-	fmt.Printf("\t%v %v\n", visited.Line(), visited.Sym)
-	errorexit()
-}
-
-// recurse over n, doing init1 everywhere.
-func init2(n *Node, out *[]*Node) {
-	if n == nil || n.Initorder == InitDone {
-		return
-	}
-
-	if n.Op == ONAME && n.Ninit.Len() != 0 {
-		Fatalf("name %v with ninit: %+v\n", n.Sym, n)
-	}
-
-	init1(n, out)
-	init2(n.Left, out)
-	init2(n.Right, out)
-	init2list(n.Ninit, out)
-	init2list(n.List, out)
-	init2list(n.Rlist, out)
-	init2list(n.Nbody, out)
-
-	if n.Op == OCLOSURE {
-		init2list(n.Func.Closure.Nbody, out)
-	}
-	if n.Op == ODOTMETH || n.Op == OCALLPART {
-		init2(n.Type.Nname(), out)
-	}
-}
-
-func init2list(l Nodes, out *[]*Node) {
-	for _, n := range l.Slice() {
-		init2(n, out)
-	}
-}
-
-func initreorder(l []*Node, out *[]*Node) {
-	var n *Node
-	for _, n = range l {
-		switch n.Op {
-		case ODCLFUNC, ODCLCONST, ODCLTYPE:
-			continue
-		}
-
-		initreorder(n.Ninit.Slice(), out)
-		n.Ninit.Set(nil)
-		init1(n, out)
-	}
-}
-
-// initfix computes initialization order for a list l of top-level
-// declarations and outputs the corresponding list of statements
-// to include in the init() function body.
-func initfix(l []*Node) []*Node {
-	var lout []*Node
-	initplans = make(map[*Node]*InitPlan)
-	lno := lineno
-	initreorder(l, &lout)
-	lineno = lno
-	initplans = nil
-	return lout
-}
-
-// compilation of top-level (static) assignments
-// into DATA statements if at all possible.
-func staticinit(n *Node, out *[]*Node) bool {
-	if n.Op != ONAME || n.Class != PEXTERN || n.Name.Defn == nil || n.Name.Defn.Op != OAS {
-		Fatalf("staticinit")
-	}
-
-	lineno = n.Lineno
-	l := n.Name.Defn.Left
-	r := n.Name.Defn.Right
-	return staticassign(l, r, out)
-}
-
-// like staticassign but we are copying an already
-// initialized value r.
-func staticcopy(l *Node, r *Node, out *[]*Node) bool {
-	if r.Op != ONAME {
-		return false
-	}
-	if r.Class == PFUNC {
-		gdata(l, r, Widthptr)
-		return true
-	}
-	if r.Class != PEXTERN || r.Sym.Pkg != localpkg {
-		return false
-	}
-	if r.Name.Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value
-		return false
-	}
-	if r.Name.Defn.Op != OAS {
-		return false
-	}
-	orig := r
-	r = r.Name.Defn.Right
-
-	for r.Op == OCONVNOP && !eqtype(r.Type, l.Type) {
-		r = r.Left
-	}
-
-	switch r.Op {
-	case ONAME:
-		if staticcopy(l, r, out) {
-			return true
-		}
-		// We may have skipped past one or more OCONVNOPs, so
-		// use conv to ensure r is assignable to l (#13263).
-		*out = append(*out, nod(OAS, l, conv(r, l.Type)))
-		return true
-
-	case OLITERAL:
-		if iszero(r) {
-			return true
-		}
-		gdata(l, r, int(l.Type.Width))
-		return true
-
-	case OADDR:
-		switch r.Left.Op {
-		case ONAME:
-			gdata(l, r, int(l.Type.Width))
-			return true
-		}
-
-	case OPTRLIT:
-		switch r.Left.Op {
-		case OARRAYLIT, OSLICELIT, OSTRUCTLIT, OMAPLIT:
-			// copy pointer
-			gdata(l, nod(OADDR, inittemps[r], nil), int(l.Type.Width))
-			return true
-		}
-
-	case OSLICELIT:
-		// copy slice
-		a := inittemps[r]
-
-		n := *l
-		n.Xoffset = l.Xoffset + int64(array_array)
-		gdata(&n, nod(OADDR, a, nil), Widthptr)
-		n.Xoffset = l.Xoffset + int64(array_nel)
-		gdata(&n, r.Right, Widthint)
-		n.Xoffset = l.Xoffset + int64(array_cap)
-		gdata(&n, r.Right, Widthint)
-		return true
-
-	case OARRAYLIT, OSTRUCTLIT:
-		p := initplans[r]
-
-		n := *l
-		for i := range p.E {
-			e := &p.E[i]
-			n.Xoffset = l.Xoffset + e.Xoffset
-			n.Type = e.Expr.Type
-			if e.Expr.Op == OLITERAL {
-				gdata(&n, e.Expr, int(n.Type.Width))
-			} else {
-				ll := nod(OXXX, nil, nil)
-				*ll = n
-				ll.Orig = ll // completely separate copy
-				if !staticassign(ll, e.Expr, out) {
-					// Requires computation, but we're
-					// copying someone else's computation.
-					rr := nod(OXXX, nil, nil)
-
-					*rr = *orig
-					rr.Orig = rr // completely separate copy
-					rr.Type = ll.Type
-					rr.Xoffset += e.Xoffset
-					setlineno(rr)
-					*out = append(*out, nod(OAS, ll, rr))
-				}
-			}
-		}
-
-		return true
-	}
-
-	return false
-}
-
-func staticassign(l *Node, r *Node, out *[]*Node) bool {
-	for r.Op == OCONVNOP {
-		r = r.Left
-	}
-
-	switch r.Op {
-	case ONAME:
-		return staticcopy(l, r, out)
-
-	case OLITERAL:
-		if iszero(r) {
-			return true
-		}
-		gdata(l, r, int(l.Type.Width))
-		return true
-
-	case OADDR:
-		var nam Node
-		if stataddr(&nam, r.Left) {
-			n := *r
-			n.Left = &nam
-			gdata(l, &n, int(l.Type.Width))
-			return true
-		}
-		fallthrough
-
-	case OPTRLIT:
-		switch r.Left.Op {
-		case OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT:
-			// Init pointer.
-			a := staticname(r.Left.Type)
-
-			inittemps[r] = a
-			gdata(l, nod(OADDR, a, nil), int(l.Type.Width))
-
-			// Init underlying literal.
-			if !staticassign(a, r.Left, out) {
-				*out = append(*out, nod(OAS, a, r.Left))
-			}
-			return true
-		}
-		//dump("not static ptrlit", r);
-
-	case OSTRARRAYBYTE:
-		if l.Class == PEXTERN && r.Left.Op == OLITERAL {
-			sval := r.Left.Val().U.(string)
-			slicebytes(l, sval, len(sval))
-			return true
-		}
-
-	case OSLICELIT:
-		initplan(r)
-		// Init slice.
-		bound := r.Right.Int64()
-		ta := typArray(r.Type.Elem(), bound)
-		a := staticname(ta)
-		inittemps[r] = a
-		n := *l
-		n.Xoffset = l.Xoffset + int64(array_array)
-		gdata(&n, nod(OADDR, a, nil), Widthptr)
-		n.Xoffset = l.Xoffset + int64(array_nel)
-		gdata(&n, r.Right, Widthint)
-		n.Xoffset = l.Xoffset + int64(array_cap)
-		gdata(&n, r.Right, Widthint)
-
-		// Fall through to init underlying array.
-		l = a
-		fallthrough
-
-	case OARRAYLIT, OSTRUCTLIT:
-		initplan(r)
-
-		p := initplans[r]
-		n := *l
-		for i := range p.E {
-			e := &p.E[i]
-			n.Xoffset = l.Xoffset + e.Xoffset
-			n.Type = e.Expr.Type
-			if e.Expr.Op == OLITERAL {
-				gdata(&n, e.Expr, int(n.Type.Width))
-			} else {
-				setlineno(e.Expr)
-				a := nod(OXXX, nil, nil)
-				*a = n
-				a.Orig = a // completely separate copy
-				if !staticassign(a, e.Expr, out) {
-					*out = append(*out, nod(OAS, a, e.Expr))
-				}
-			}
-		}
-
-		return true
-
-	case OMAPLIT:
-		break
-
-	case OCLOSURE:
-		if hasemptycvars(r) {
-			if Debug_closure > 0 {
-				Warnl(r.Lineno, "closure converted to global")
-			}
-			// Closures with no captured variables are globals,
-			// so the assignment can be done at link time.
-			n := *l
-			gdata(&n, r.Func.Closure.Func.Nname, Widthptr)
-			return true
-		} else {
-			closuredebugruntimecheck(r)
-		}
-
-	case OCONVIFACE:
-		// This logic is mirrored in isStaticCompositeLiteral.
-		// If you change something here, change it there, and vice versa.
-
-		// Determine the underlying concrete type and value we are converting from.
-		val := r
-		for val.Op == OCONVIFACE {
-			val = val.Left
-		}
-		if val.Type.IsInterface() {
-			// val is an interface type.
-			// If val is nil, we can statically initialize l;
-			// both words are zero and so there no work to do, so report success.
-			// If val is non-nil, we have no concrete type to record,
-			// and we won't be able to statically initialize its value, so report failure.
-			return Isconst(val, CTNIL)
-		}
-
-		var itab *Node
-		if l.Type.IsEmptyInterface() {
-			itab = typename(val.Type)
-		} else {
-			itab = itabname(val.Type, l.Type)
-		}
-
-		// Create a copy of l to modify while we emit data.
-		n := *l
-
-		// Emit itab, advance offset.
-		gdata(&n, itab, Widthptr)
-		n.Xoffset += int64(Widthptr)
-
-		// Emit data.
-		if isdirectiface(val.Type) {
-			if Isconst(val, CTNIL) {
-				// Nil is zero, nothing to do.
-				return true
-			}
-			// Copy val directly into n.
-			n.Type = val.Type
-			setlineno(val)
-			a := nod(OXXX, nil, nil)
-			*a = n
-			a.Orig = a
-			if !staticassign(a, val, out) {
-				*out = append(*out, nod(OAS, a, val))
-			}
-		} else {
-			// Construct temp to hold val, write pointer to temp into n.
-			a := staticname(val.Type)
-			inittemps[val] = a
-			if !staticassign(a, val, out) {
-				*out = append(*out, nod(OAS, a, val))
-			}
-			ptr := nod(OADDR, a, nil)
-			n.Type = ptrto(val.Type)
-			gdata(&n, ptr, Widthptr)
-		}
-
-		return true
-	}
-
-	//dump("not static", r);
-	return false
-}
-
-// initContext is the context in which static data is populated.
-// It is either in an init function or in any other function.
-// Static data populated in an init function will be written either
-// zero times (as a readonly, static data symbol) or
-// one time (during init function execution).
-// Either way, there is no opportunity for races or further modification,
-// so the data can be written to a (possibly readonly) data symbol.
-// Static data populated in any other function needs to be local to
-// that function to allow multiple instances of that function
-// to execute concurrently without clobbering each others' data.
-type initContext uint8
-
-const (
-	inInitFunction initContext = iota
-	inNonInitFunction
-)
-
-// from here down is the walk analysis
-// of composite literals.
-// most of the work is to generate
-// data statements for the constant
-// part of the composite literal.
-
-// staticname returns a name backed by a static data symbol.
-// Callers should set n.Name.Readonly = true on the
-// returned node for readonly nodes.
-func staticname(t *Type) *Node {
-	n := newname(lookupN("statictmp_", statuniqgen))
-	statuniqgen++
-	addvar(n, t, PEXTERN)
-	return n
-}
-
-func isliteral(n *Node) bool {
-	// Treat nils as zeros rather than literals.
-	return n.Op == OLITERAL && n.Val().Ctype() != CTNIL
-}
-
-func (n *Node) isSimpleName() bool {
-	return n.Op == ONAME && n.Addable && n.Class != PAUTOHEAP
-}
-
-func litas(l *Node, r *Node, init *Nodes) {
-	a := nod(OAS, l, r)
-	a = typecheck(a, Etop)
-	a = walkexpr(a, init)
-	init.Append(a)
-}
-
-// initGenType is a bitmap indicating the types of generation that will occur for a static value.
-type initGenType uint8
-
-const (
-	initDynamic initGenType = 1 << iota // contains some dynamic values, for which init code will be generated
-	initConst                           // contains some constant values, which may be written into data symbols
-)
-
-// getdyn calculates the initGenType for n.
-// If top is false, getdyn is recursing.
-func getdyn(n *Node, top bool) initGenType {
-	switch n.Op {
-	default:
-		if isliteral(n) {
-			return initConst
-		}
-		return initDynamic
-
-	case OSLICELIT:
-		if !top {
-			return initDynamic
-		}
-
-	case OARRAYLIT, OSTRUCTLIT:
-	}
-
-	var mode initGenType
-	for _, n1 := range n.List.Slice() {
-		switch n1.Op {
-		case OKEY:
-			n1 = n1.Right
-		case OSTRUCTKEY:
-			n1 = n1.Left
-		}
-		mode |= getdyn(n1, false)
-		if mode == initDynamic|initConst {
-			break
-		}
-	}
-	return mode
-}
-
-// isStaticCompositeLiteral reports whether n is a compile-time constant.
-func isStaticCompositeLiteral(n *Node) bool {
-	switch n.Op {
-	case OSLICELIT:
-		return false
-	case OARRAYLIT:
-		for _, r := range n.List.Slice() {
-			if r.Op == OKEY {
-				r = r.Right
-			}
-			if !isStaticCompositeLiteral(r) {
-				return false
-			}
-		}
-		return true
-	case OSTRUCTLIT:
-		for _, r := range n.List.Slice() {
-			if r.Op != OSTRUCTKEY {
-				Fatalf("isStaticCompositeLiteral: rhs not OSTRUCTKEY: %v", r)
-			}
-			if !isStaticCompositeLiteral(r.Left) {
-				return false
-			}
-		}
-		return true
-	case OLITERAL:
-		return true
-	case OCONVIFACE:
-		// See staticassign's OCONVIFACE case for comments.
-		val := n
-		for val.Op == OCONVIFACE {
-			val = val.Left
-		}
-		if val.Type.IsInterface() {
-			return Isconst(val, CTNIL)
-		}
-		if isdirectiface(val.Type) && Isconst(val, CTNIL) {
-			return true
-		}
-		return isStaticCompositeLiteral(val)
-	}
-	return false
-}
-
-// initKind is a kind of static initialization: static, dynamic, or local.
-// Static initialization represents literals and
-// literal components of composite literals.
-// Dynamic initialization represents non-literals and
-// non-literal components of composite literals.
-// LocalCode initializion represents initialization
-// that occurs purely in generated code local to the function of use.
-// Initialization code is sometimes generated in passes,
-// first static then dynamic.
-type initKind uint8
-
-const (
-	initKindStatic initKind = iota + 1
-	initKindDynamic
-	initKindLocalCode
-)
-
-// fixedlit handles struct, array, and slice literals.
-// TODO: expand documentation.
-func fixedlit(ctxt initContext, kind initKind, n *Node, var_ *Node, init *Nodes) {
-	var splitnode func(*Node) (a *Node, value *Node)
-	switch n.Op {
-	case OARRAYLIT, OSLICELIT:
-		var k int64
-		splitnode = func(r *Node) (*Node, *Node) {
-			if r.Op == OKEY {
-				k = nonnegintconst(r.Left)
-				r = r.Right
-			}
-			a := nod(OINDEX, var_, nodintconst(k))
-			k++
-			return a, r
-		}
-	case OSTRUCTLIT:
-		splitnode = func(r *Node) (*Node, *Node) {
-			if r.Op != OSTRUCTKEY {
-				Fatalf("fixedlit: rhs not OSTRUCTKEY: %v", r)
-			}
-			return nodSym(ODOT, var_, r.Sym), r.Left
-		}
-	default:
-		Fatalf("fixedlit bad op: %v", n.Op)
-	}
-
-	for _, r := range n.List.Slice() {
-		a, value := splitnode(r)
-
-		switch value.Op {
-		case OSLICELIT:
-			if (kind == initKindStatic && ctxt == inNonInitFunction) || (kind == initKindDynamic && ctxt == inInitFunction) {
-				slicelit(ctxt, value, a, init)
-				continue
-			}
-
-		case OARRAYLIT, OSTRUCTLIT:
-			fixedlit(ctxt, kind, value, a, init)
-			continue
-		}
-
-		islit := isliteral(value)
-		if (kind == initKindStatic && !islit) || (kind == initKindDynamic && islit) {
-			continue
-		}
-
-		// build list of assignments: var[index] = expr
-		setlineno(value)
-		a = nod(OAS, a, value)
-		a = typecheck(a, Etop)
-		switch kind {
-		case initKindStatic:
-			a = walkexpr(a, init) // add any assignments in r to top
-			if a.Op == OASWB {
-				// Static initialization never needs
-				// write barriers.
-				a.Op = OAS
-			}
-			if a.Op != OAS {
-				Fatalf("fixedlit: not as, is %v", a)
-			}
-			a.IsStatic = true
-		case initKindDynamic, initKindLocalCode:
-			a = orderstmtinplace(a)
-			a = walkstmt(a)
-		default:
-			Fatalf("fixedlit: bad kind %d", kind)
-		}
-
-		init.Append(a)
-	}
-}
-
-func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
-	// make an array type corresponding the number of elements we have
-	t := typArray(n.Type.Elem(), n.Right.Int64())
-	dowidth(t)
-
-	if ctxt == inNonInitFunction {
-		// put everything into static array
-		vstat := staticname(t)
-
-		fixedlit(ctxt, initKindStatic, n, vstat, init)
-		fixedlit(ctxt, initKindDynamic, n, vstat, init)
-
-		// copy static to slice
-		a := nod(OSLICE, vstat, nil)
-
-		a = nod(OAS, var_, a)
-		a = typecheck(a, Etop)
-		a.IsStatic = true
-		init.Append(a)
-		return
-	}
-
-	// recipe for var = []t{...}
-	// 1. make a static array
-	//	var vstat [...]t
-	// 2. assign (data statements) the constant part
-	//	vstat = constpart{}
-	// 3. make an auto pointer to array and allocate heap to it
-	//	var vauto *[...]t = new([...]t)
-	// 4. copy the static array to the auto array
-	//	*vauto = vstat
-	// 5. for each dynamic part assign to the array
-	//	vauto[i] = dynamic part
-	// 6. assign slice of allocated heap to var
-	//	var = vauto[:]
-	//
-	// an optimization is done if there is no constant part
-	//	3. var vauto *[...]t = new([...]t)
-	//	5. vauto[i] = dynamic part
-	//	6. var = vauto[:]
-
-	// if the literal contains constants,
-	// make static initialized array (1),(2)
-	var vstat *Node
-
-	mode := getdyn(n, true)
-	if mode&initConst != 0 {
-		vstat = staticname(t)
-		if ctxt == inInitFunction {
-			vstat.Name.Readonly = true
-		}
-		fixedlit(ctxt, initKindStatic, n, vstat, init)
-	}
-
-	// make new auto *array (3 declare)
-	vauto := temp(ptrto(t))
-
-	// set auto to point at new temp or heap (3 assign)
-	var a *Node
-	if x := prealloc[n]; x != nil {
-		// temp allocated during order.go for dddarg
-		x.Type = t
-
-		if vstat == nil {
-			a = nod(OAS, x, nil)
-			a = typecheck(a, Etop)
-			init.Append(a) // zero new temp
-		}
-
-		a = nod(OADDR, x, nil)
-	} else if n.Esc == EscNone {
-		a = temp(t)
-		if vstat == nil {
-			a = nod(OAS, temp(t), nil)
-			a = typecheck(a, Etop)
-			init.Append(a) // zero new temp
-			a = a.Left
-		}
-
-		a = nod(OADDR, a, nil)
-	} else {
-		a = nod(ONEW, nil, nil)
-		a.List.Set1(typenod(t))
-	}
-
-	a = nod(OAS, vauto, a)
-	a = typecheck(a, Etop)
-	a = walkexpr(a, init)
-	init.Append(a)
-
-	if vstat != nil {
-		// copy static to heap (4)
-		a = nod(OIND, vauto, nil)
-
-		a = nod(OAS, a, vstat)
-		a = typecheck(a, Etop)
-		a = walkexpr(a, init)
-		init.Append(a)
-	}
-
-	// put dynamics into array (5)
-	var index int64
-	for _, r := range n.List.Slice() {
-		value := r
-		if r.Op == OKEY {
-			index = nonnegintconst(r.Left)
-			value = r.Right
-		}
-		a := nod(OINDEX, vauto, nodintconst(index))
-		a.Bounded = true
-		index++
-
-		// TODO need to check bounds?
-
-		switch value.Op {
-		case OSLICELIT:
-			break
-
-		case OARRAYLIT, OSTRUCTLIT:
-			fixedlit(ctxt, initKindDynamic, value, a, init)
-			continue
-		}
-
-		if isliteral(value) {
-			continue
-		}
-
-		// build list of vauto[c] = expr
-		setlineno(value)
-		a = nod(OAS, a, value)
-
-		a = typecheck(a, Etop)
-		a = orderstmtinplace(a)
-		a = walkstmt(a)
-		init.Append(a)
-	}
-
-	// make slice out of heap (6)
-	a = nod(OAS, var_, nod(OSLICE, vauto, nil))
-
-	a = typecheck(a, Etop)
-	a = orderstmtinplace(a)
-	a = walkstmt(a)
-	init.Append(a)
-}
-
-func maplit(n *Node, m *Node, init *Nodes) {
-	// make the map var
-	nerr := nerrors
-
-	a := nod(OMAKE, nil, nil)
-	a.List.Set2(typenod(n.Type), nodintconst(int64(len(n.List.Slice()))))
-	litas(m, a, init)
-
-	// count the initializers
-	b := 0
-	for _, r := range n.List.Slice() {
-		if r.Op != OKEY {
-			Fatalf("maplit: rhs not OKEY: %v", r)
-		}
-		index := r.Left
-		value := r.Right
-
-		if isliteral(index) && isliteral(value) {
-			b++
-		}
-	}
-
-	if b != 0 {
-		// build types [count]Tindex and [count]Tvalue
-		tk := typArray(n.Type.Key(), int64(b))
-		tv := typArray(n.Type.Val(), int64(b))
-
-		// TODO(josharian): suppress alg generation for these types?
-		dowidth(tk)
-		dowidth(tv)
-
-		// make and initialize static arrays
-		vstatk := staticname(tk)
-		vstatk.Name.Readonly = true
-		vstatv := staticname(tv)
-		vstatv.Name.Readonly = true
-
-		b := int64(0)
-		for _, r := range n.List.Slice() {
-			if r.Op != OKEY {
-				Fatalf("maplit: rhs not OKEY: %v", r)
-			}
-			index := r.Left
-			value := r.Right
-
-			if isliteral(index) && isliteral(value) {
-				// build vstatk[b] = index
-				setlineno(index)
-				lhs := nod(OINDEX, vstatk, nodintconst(b))
-				as := nod(OAS, lhs, index)
-				as = typecheck(as, Etop)
-				as = walkexpr(as, init)
-				as.IsStatic = true
-				init.Append(as)
-
-				// build vstatv[b] = value
-				setlineno(value)
-				lhs = nod(OINDEX, vstatv, nodintconst(b))
-				as = nod(OAS, lhs, value)
-				as = typecheck(as, Etop)
-				as = walkexpr(as, init)
-				as.IsStatic = true
-				init.Append(as)
-
-				b++
-			}
-		}
-
-		// loop adding structure elements to map
-		// for i = 0; i < len(vstatk); i++ {
-		//	map[vstatk[i]] = vstatv[i]
-		// }
-		i := temp(Types[TINT])
-		rhs := nod(OINDEX, vstatv, i)
-		rhs.Bounded = true
-
-		kidx := nod(OINDEX, vstatk, i)
-		kidx.Bounded = true
-		lhs := nod(OINDEX, m, kidx)
-
-		zero := nod(OAS, i, nodintconst(0))
-		cond := nod(OLT, i, nodintconst(tk.NumElem()))
-		incr := nod(OAS, i, nod(OADD, i, nodintconst(1)))
-		body := nod(OAS, lhs, rhs)
-
-		loop := nod(OFOR, cond, incr)
-		loop.Nbody.Set1(body)
-		loop.Ninit.Set1(zero)
-
-		loop = typecheck(loop, Etop)
-		loop = walkstmt(loop)
-		init.Append(loop)
-	}
-
-	// put in dynamic entries one-at-a-time
-	var key, val *Node
-	for _, r := range n.List.Slice() {
-		if r.Op != OKEY {
-			Fatalf("maplit: rhs not OKEY: %v", r)
-		}
-		index := r.Left
-		value := r.Right
-
-		if isliteral(index) && isliteral(value) {
-			continue
-		}
-
-		// build list of var[c] = expr.
-		// use temporary so that mapassign1 can have addressable key, val.
-		if key == nil {
-			key = temp(m.Type.Key())
-			val = temp(m.Type.Val())
-		}
-
-		setlineno(index)
-		a = nod(OAS, key, index)
-		a = typecheck(a, Etop)
-		a = walkstmt(a)
-		init.Append(a)
-
-		setlineno(value)
-		a = nod(OAS, val, value)
-		a = typecheck(a, Etop)
-		a = walkstmt(a)
-		init.Append(a)
-
-		setlineno(val)
-		a = nod(OAS, nod(OINDEX, m, key), val)
-		a = typecheck(a, Etop)
-		a = walkstmt(a)
-		init.Append(a)
-
-		if nerr != nerrors {
-			break
-		}
-	}
-
-	if key != nil {
-		a = nod(OVARKILL, key, nil)
-		a = typecheck(a, Etop)
-		init.Append(a)
-		a = nod(OVARKILL, val, nil)
-		a = typecheck(a, Etop)
-		init.Append(a)
-	}
-}
-
-func anylit(n *Node, var_ *Node, init *Nodes) {
-	t := n.Type
-	switch n.Op {
-	default:
-		Fatalf("anylit: not lit, op=%v node=%v", n.Op, n)
-
-	case OPTRLIT:
-		if !t.IsPtr() {
-			Fatalf("anylit: not ptr")
-		}
-
-		var r *Node
-		if n.Right != nil {
-			// n.Right is stack temporary used as backing store.
-			init.Append(nod(OAS, n.Right, nil)) // zero backing store, just in case (#18410)
-			r = nod(OADDR, n.Right, nil)
-			r = typecheck(r, Erv)
-		} else {
-			r = nod(ONEW, nil, nil)
-			r.Typecheck = 1
-			r.Type = t
-			r.Esc = n.Esc
-		}
-
-		r = walkexpr(r, init)
-		a := nod(OAS, var_, r)
-
-		a = typecheck(a, Etop)
-		init.Append(a)
-
-		var_ = nod(OIND, var_, nil)
-		var_ = typecheck(var_, Erv|Easgn)
-		anylit(n.Left, var_, init)
-
-	case OSTRUCTLIT, OARRAYLIT:
-		if !t.IsStruct() && !t.IsArray() {
-			Fatalf("anylit: not struct/array")
-		}
-
-		if var_.isSimpleName() && n.List.Len() > 4 {
-			// lay out static data
-			vstat := staticname(t)
-			vstat.Name.Readonly = true
-
-			ctxt := inInitFunction
-			if n.Op == OARRAYLIT {
-				ctxt = inNonInitFunction
-			}
-			fixedlit(ctxt, initKindStatic, n, vstat, init)
-
-			// copy static to var
-			a := nod(OAS, var_, vstat)
-
-			a = typecheck(a, Etop)
-			a = walkexpr(a, init)
-			init.Append(a)
-
-			// add expressions to automatic
-			fixedlit(inInitFunction, initKindDynamic, n, var_, init)
-			break
-		}
-
-		var components int64
-		if n.Op == OARRAYLIT {
-			components = t.NumElem()
-		} else {
-			components = int64(t.NumFields())
-		}
-		// initialization of an array or struct with unspecified components (missing fields or arrays)
-		if var_.isSimpleName() || int64(n.List.Len()) < components {
-			a := nod(OAS, var_, nil)
-			a = typecheck(a, Etop)
-			a = walkexpr(a, init)
-			init.Append(a)
-		}
-
-		fixedlit(inInitFunction, initKindLocalCode, n, var_, init)
-
-	case OSLICELIT:
-		slicelit(inInitFunction, n, var_, init)
-
-	case OMAPLIT:
-		if !t.IsMap() {
-			Fatalf("anylit: not map")
-		}
-		maplit(n, var_, init)
-	}
-}
-
-func oaslit(n *Node, init *Nodes) bool {
-	if n.Left == nil || n.Right == nil {
-		// not a special composite literal assignment
-		return false
-	}
-	if n.Left.Type == nil || n.Right.Type == nil {
-		// not a special composite literal assignment
-		return false
-	}
-	if !n.Left.isSimpleName() {
-		// not a special composite literal assignment
-		return false
-	}
-	if !eqtype(n.Left.Type, n.Right.Type) {
-		// not a special composite literal assignment
-		return false
-	}
-
-	switch n.Right.Op {
-	default:
-		// not a special composite literal assignment
-		return false
-
-	case OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT:
-		if vmatch1(n.Left, n.Right) {
-			// not a special composite literal assignment
-			return false
-		}
-		anylit(n.Right, n.Left, init)
-	}
-
-	n.Op = OEMPTY
-	n.Right = nil
-	return true
-}
-
-func getlit(lit *Node) int {
-	if smallintconst(lit) {
-		return int(lit.Int64())
-	}
-	return -1
-}
-
-// stataddr sets nam to the static address of n and reports whether it succeeded.
-func stataddr(nam *Node, n *Node) bool {
-	if n == nil {
-		return false
-	}
-
-	switch n.Op {
-	case ONAME:
-		*nam = *n
-		return n.Addable
-
-	case ODOT:
-		if !stataddr(nam, n.Left) {
-			break
-		}
-		nam.Xoffset += n.Xoffset
-		nam.Type = n.Type
-		return true
-
-	case OINDEX:
-		if n.Left.Type.IsSlice() {
-			break
-		}
-		if !stataddr(nam, n.Left) {
-			break
-		}
-		l := getlit(n.Right)
-		if l < 0 {
-			break
-		}
-
-		// Check for overflow.
-		if n.Type.Width != 0 && Thearch.MAXWIDTH/n.Type.Width <= int64(l) {
-			break
-		}
-		nam.Xoffset += int64(l) * n.Type.Width
-		nam.Type = n.Type
-		return true
-	}
-
-	return false
-}
-
-func initplan(n *Node) {
-	if initplans[n] != nil {
-		return
-	}
-	p := new(InitPlan)
-	initplans[n] = p
-	switch n.Op {
-	default:
-		Fatalf("initplan")
-
-	case OARRAYLIT, OSLICELIT:
-		var k int64
-		for _, a := range n.List.Slice() {
-			if a.Op == OKEY {
-				k = nonnegintconst(a.Left)
-				a = a.Right
-			}
-			addvalue(p, k*n.Type.Elem().Width, a)
-			k++
-		}
-
-	case OSTRUCTLIT:
-		for _, a := range n.List.Slice() {
-			if a.Op != OSTRUCTKEY {
-				Fatalf("initplan fixedlit")
-			}
-			addvalue(p, a.Xoffset, a.Left)
-		}
-
-	case OMAPLIT:
-		for _, a := range n.List.Slice() {
-			if a.Op != OKEY {
-				Fatalf("initplan maplit")
-			}
-			addvalue(p, -1, a.Right)
-		}
-	}
-}
-
-func addvalue(p *InitPlan, xoffset int64, n *Node) {
-	// special case: zero can be dropped entirely
-	if iszero(n) {
-		return
-	}
-
-	// special case: inline struct and array (not slice) literals
-	if isvaluelit(n) {
-		initplan(n)
-		q := initplans[n]
-		for _, qe := range q.E {
-			// qe is a copy; we are not modifying entries in q.E
-			qe.Xoffset += xoffset
-			p.E = append(p.E, qe)
-		}
-		return
-	}
-
-	// add to plan
-	p.E = append(p.E, InitEntry{Xoffset: xoffset, Expr: n})
-}
-
-func iszero(n *Node) bool {
-	switch n.Op {
-	case OLITERAL:
-		switch u := n.Val().U.(type) {
-		default:
-			Dump("unexpected literal", n)
-			Fatalf("iszero")
-		case *NilVal:
-			return true
-		case string:
-			return u == ""
-		case bool:
-			return !u
-		case *Mpint:
-			return u.CmpInt64(0) == 0
-		case *Mpflt:
-			return u.CmpFloat64(0) == 0
-		case *Mpcplx:
-			return u.Real.CmpFloat64(0) == 0 && u.Imag.CmpFloat64(0) == 0
-		}
-
-	case OARRAYLIT:
-		for _, n1 := range n.List.Slice() {
-			if n1.Op == OKEY {
-				n1 = n1.Right
-			}
-			if !iszero(n1) {
-				return false
-			}
-		}
-		return true
-
-	case OSTRUCTLIT:
-		for _, n1 := range n.List.Slice() {
-			if !iszero(n1.Left) {
-				return false
-			}
-		}
-		return true
-	}
-
-	return false
-}
-
-func isvaluelit(n *Node) bool {
-	return n.Op == OARRAYLIT || n.Op == OSTRUCTLIT
-}
-
-// gen_as_init attempts to emit static data for n and reports whether it succeeded.
-// If reportOnly is true, it does not emit static data and does not modify the AST.
-func gen_as_init(n *Node, reportOnly bool) bool {
-	success := genAsInitNoCheck(n, reportOnly)
-	if !success && n.IsStatic {
-		Dump("\ngen_as_init", n)
-		Fatalf("gen_as_init couldn't generate static data")
-	}
-	return success
-}
-
-func genAsInitNoCheck(n *Node, reportOnly bool) bool {
-	if !n.IsStatic {
-		return false
-	}
-
-	nr := n.Right
-	nl := n.Left
-	if nr == nil {
-		var nam Node
-		return stataddr(&nam, nl) && nam.Class == PEXTERN
-	}
-
-	if nr.Type == nil || !eqtype(nl.Type, nr.Type) {
-		return false
-	}
-
-	var nam Node
-	if !stataddr(&nam, nl) || nam.Class != PEXTERN {
-		return false
-	}
-
-	switch nr.Op {
-	default:
-		return false
-
-	case OCONVNOP:
-		nr = nr.Left
-		if nr == nil || nr.Op != OSLICEARR {
-			return false
-		}
-		fallthrough
-
-	case OSLICEARR:
-		low, high, _ := nr.SliceBounds()
-		if low != nil || high != nil {
-			return false
-		}
-		nr = nr.Left
-		if nr == nil || nr.Op != OADDR {
-			return false
-		}
-		ptr := nr
-		nr = nr.Left
-		if nr == nil || nr.Op != ONAME {
-			return false
-		}
-
-		// nr is the array being converted to a slice
-		if nr.Type == nil || !nr.Type.IsArray() {
-			return false
-		}
-
-		if !reportOnly {
-			nam.Xoffset += int64(array_array)
-			gdata(&nam, ptr, Widthptr)
-
-			nam.Xoffset += int64(array_nel) - int64(array_array)
-			var nod1 Node
-			Nodconst(&nod1, Types[TINT], nr.Type.NumElem())
-			gdata(&nam, &nod1, Widthint)
-
-			nam.Xoffset += int64(array_cap) - int64(array_nel)
-			gdata(&nam, &nod1, Widthint)
-		}
-
-		return true
-
-	case OLITERAL:
-		if !reportOnly {
-			gdata(&nam, nr, int(nr.Type.Width))
-		}
-		return true
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/sizeof_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/sizeof_test.go
deleted file mode 100644
index f45fdf0..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/sizeof_test.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/sizeof_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/sizeof_test.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !nacl
-
-package gc
-
-import (
-	"reflect"
-	"testing"
-	"unsafe"
-)
-
-// Assert that the size of important structures do not change unexpectedly.
-
-func TestSizeof(t *testing.T) {
-	const _64bit = unsafe.Sizeof(uintptr(0)) == 8
-
-	var tests = []struct {
-		val    interface{} // type as a value
-		_32bit uintptr     // size on 32bit platforms
-		_64bit uintptr     // size on 64bit platforms
-	}{
-		{Func{}, 92, 160},
-		{Name{}, 44, 72},
-		{Param{}, 24, 48},
-		{Node{}, 92, 144},
-		{Sym{}, 60, 112},
-		{Type{}, 60, 96},
-		{MapType{}, 20, 40},
-		{ForwardType{}, 16, 32},
-		{FuncType{}, 28, 48},
-		{StructType{}, 12, 24},
-		{InterType{}, 4, 8},
-		{ChanType{}, 8, 16},
-		{ArrayType{}, 16, 24},
-		{InterMethType{}, 4, 8},
-		{DDDFieldType{}, 4, 8},
-		{FuncArgsType{}, 4, 8},
-		{ChanArgsType{}, 4, 8},
-		{PtrType{}, 4, 8},
-		{SliceType{}, 4, 8},
-	}
-
-	for _, tt := range tests {
-		want := tt._32bit
-		if _64bit {
-			want = tt._64bit
-		}
-		got := reflect.TypeOf(tt.val).Size()
-		if want != got {
-			t.Errorf("unsafe.Sizeof(%T) = %d, want %d", tt.val, got, want)
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/ssa.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/ssa.go
deleted file mode 100644
index 649de4f..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/ssa.go
+++ /dev/null
@@ -1,5005 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/ssa.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/ssa.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"bytes"
-	"encoding/binary"
-	"fmt"
-	"html"
-	"os"
-	"sort"
-
-	"bootstrap/cmd/compile/internal/ssa"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-)
-
-var ssaConfig *ssa.Config
-var ssaExp ssaExport
-
-func initssa() *ssa.Config {
-	if ssaConfig == nil {
-		ssaConfig = ssa.NewConfig(Thearch.LinkArch.Name, &ssaExp, Ctxt, Debug['N'] == 0)
-		if Thearch.LinkArch.Name == "386" {
-			ssaConfig.Set387(Thearch.Use387)
-		}
-	}
-	ssaConfig.HTML = nil
-	return ssaConfig
-}
-
-// buildssa builds an SSA function.
-func buildssa(fn *Node) *ssa.Func {
-	name := fn.Func.Nname.Sym.Name
-	printssa := name == os.Getenv("GOSSAFUNC")
-	if printssa {
-		fmt.Println("generating SSA for", name)
-		dumplist("buildssa-enter", fn.Func.Enter)
-		dumplist("buildssa-body", fn.Nbody)
-		dumplist("buildssa-exit", fn.Func.Exit)
-	}
-
-	var s state
-	s.pushLine(fn.Lineno)
-	defer s.popLine()
-
-	if fn.Func.Pragma&CgoUnsafeArgs != 0 {
-		s.cgoUnsafeArgs = true
-	}
-	if fn.Func.Pragma&Nowritebarrier != 0 {
-		s.noWB = true
-	}
-	defer func() {
-		if s.WBLineno != 0 {
-			fn.Func.WBLineno = s.WBLineno
-		}
-	}()
-	// TODO(khr): build config just once at the start of the compiler binary
-
-	ssaExp.log = printssa
-
-	s.config = initssa()
-	s.f = s.config.NewFunc()
-	s.f.Name = name
-	if fn.Func.Pragma&Nosplit != 0 {
-		s.f.NoSplit = true
-	}
-	s.exitCode = fn.Func.Exit
-	s.panics = map[funcLine]*ssa.Block{}
-	s.config.DebugTest = s.config.DebugHashMatch("GOSSAHASH", name)
-
-	if name == os.Getenv("GOSSAFUNC") {
-		// TODO: tempfile? it is handy to have the location
-		// of this file be stable, so you can just reload in the browser.
-		s.config.HTML = ssa.NewHTMLWriter("ssa.html", s.config, name)
-		// TODO: generate and print a mapping from nodes to values and blocks
-	}
-
-	// Allocate starting block
-	s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
-
-	// Allocate starting values
-	s.labels = map[string]*ssaLabel{}
-	s.labeledNodes = map[*Node]*ssaLabel{}
-	s.fwdVars = map[*Node]*ssa.Value{}
-	s.startmem = s.entryNewValue0(ssa.OpInitMem, ssa.TypeMem)
-	s.sp = s.entryNewValue0(ssa.OpSP, Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
-	s.sb = s.entryNewValue0(ssa.OpSB, Types[TUINTPTR])
-
-	s.startBlock(s.f.Entry)
-	s.vars[&memVar] = s.startmem
-
-	s.varsyms = map[*Node]interface{}{}
-
-	// Generate addresses of local declarations
-	s.decladdrs = map[*Node]*ssa.Value{}
-	for _, n := range fn.Func.Dcl {
-		switch n.Class {
-		case PPARAM, PPARAMOUT:
-			aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n})
-			s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, ptrto(n.Type), aux, s.sp)
-			if n.Class == PPARAMOUT && s.canSSA(n) {
-				// Save ssa-able PPARAMOUT variables so we can
-				// store them back to the stack at the end of
-				// the function.
-				s.returns = append(s.returns, n)
-			}
-		case PAUTO:
-			// processed at each use, to prevent Addr coming
-			// before the decl.
-		case PAUTOHEAP:
-			// moved to heap - already handled by frontend
-		case PFUNC:
-			// local function - already handled by frontend
-		default:
-			s.Fatalf("local variable with class %s unimplemented", classnames[n.Class])
-		}
-	}
-
-	// Populate arguments.
-	for _, n := range fn.Func.Dcl {
-		if n.Class != PPARAM {
-			continue
-		}
-		var v *ssa.Value
-		if s.canSSA(n) {
-			v = s.newValue0A(ssa.OpArg, n.Type, n)
-		} else {
-			// Not SSAable. Load it.
-			v = s.newValue2(ssa.OpLoad, n.Type, s.decladdrs[n], s.startmem)
-		}
-		s.vars[n] = v
-	}
-
-	// Convert the AST-based IR to the SSA-based IR
-	s.stmtList(fn.Func.Enter)
-	s.stmtList(fn.Nbody)
-
-	// fallthrough to exit
-	if s.curBlock != nil {
-		s.pushLine(fn.Func.Endlineno)
-		s.exit()
-		s.popLine()
-	}
-
-	// Check that we used all labels
-	for name, lab := range s.labels {
-		if !lab.used() && !lab.reported && !lab.defNode.Used {
-			yyerrorl(lab.defNode.Lineno, "label %v defined and not used", name)
-			lab.reported = true
-		}
-		if lab.used() && !lab.defined() && !lab.reported {
-			yyerrorl(lab.useNode.Lineno, "label %v not defined", name)
-			lab.reported = true
-		}
-	}
-
-	// Check any forward gotos. Non-forward gotos have already been checked.
-	for _, n := range s.fwdGotos {
-		lab := s.labels[n.Left.Sym.Name]
-		// If the label is undefined, we have already have printed an error.
-		if lab.defined() {
-			s.checkgoto(n, lab.defNode)
-		}
-	}
-
-	if nerrors > 0 {
-		s.f.Free()
-		return nil
-	}
-
-	s.insertPhis()
-
-	// Don't carry reference this around longer than necessary
-	s.exitCode = Nodes{}
-
-	// Main call to ssa package to compile function
-	ssa.Compile(s.f)
-
-	return s.f
-}
-
-type state struct {
-	// configuration (arch) information
-	config *ssa.Config
-
-	// function we're building
-	f *ssa.Func
-
-	// labels and labeled control flow nodes (OFOR, OSWITCH, OSELECT) in f
-	labels       map[string]*ssaLabel
-	labeledNodes map[*Node]*ssaLabel
-
-	// gotos that jump forward; required for deferred checkgoto calls
-	fwdGotos []*Node
-	// Code that must precede any return
-	// (e.g., copying value of heap-escaped paramout back to true paramout)
-	exitCode Nodes
-
-	// unlabeled break and continue statement tracking
-	breakTo    *ssa.Block // current target for plain break statement
-	continueTo *ssa.Block // current target for plain continue statement
-
-	// current location where we're interpreting the AST
-	curBlock *ssa.Block
-
-	// variable assignments in the current block (map from variable symbol to ssa value)
-	// *Node is the unique identifier (an ONAME Node) for the variable.
-	// TODO: keep a single varnum map, then make all of these maps slices instead?
-	vars map[*Node]*ssa.Value
-
-	// fwdVars are variables that are used before they are defined in the current block.
-	// This map exists just to coalesce multiple references into a single FwdRef op.
-	// *Node is the unique identifier (an ONAME Node) for the variable.
-	fwdVars map[*Node]*ssa.Value
-
-	// all defined variables at the end of each block. Indexed by block ID.
-	defvars []map[*Node]*ssa.Value
-
-	// addresses of PPARAM and PPARAMOUT variables.
-	decladdrs map[*Node]*ssa.Value
-
-	// symbols for PEXTERN, PAUTO and PPARAMOUT variables so they can be reused.
-	varsyms map[*Node]interface{}
-
-	// starting values. Memory, stack pointer, and globals pointer
-	startmem *ssa.Value
-	sp       *ssa.Value
-	sb       *ssa.Value
-
-	// line number stack. The current line number is top of stack
-	line []int32
-
-	// list of panic calls by function name and line number.
-	// Used to deduplicate panic calls.
-	panics map[funcLine]*ssa.Block
-
-	// list of PPARAMOUT (return) variables.
-	returns []*Node
-
-	// A dummy value used during phi construction.
-	placeholder *ssa.Value
-
-	cgoUnsafeArgs bool
-	noWB          bool
-	WBLineno      int32 // line number of first write barrier. 0=no write barriers
-}
-
-type funcLine struct {
-	f    *Node
-	line int32
-}
-
-type ssaLabel struct {
-	target         *ssa.Block // block identified by this label
-	breakTarget    *ssa.Block // block to break to in control flow node identified by this label
-	continueTarget *ssa.Block // block to continue to in control flow node identified by this label
-	defNode        *Node      // label definition Node (OLABEL)
-	// Label use Node (OGOTO, OBREAK, OCONTINUE).
-	// Used only for error detection and reporting.
-	// There might be multiple uses, but we only need to track one.
-	useNode  *Node
-	reported bool // reported indicates whether an error has already been reported for this label
-}
-
-// defined reports whether the label has a definition (OLABEL node).
-func (l *ssaLabel) defined() bool { return l.defNode != nil }
-
-// used reports whether the label has a use (OGOTO, OBREAK, or OCONTINUE node).
-func (l *ssaLabel) used() bool { return l.useNode != nil }
-
-// label returns the label associated with sym, creating it if necessary.
-func (s *state) label(sym *Sym) *ssaLabel {
-	lab := s.labels[sym.Name]
-	if lab == nil {
-		lab = new(ssaLabel)
-		s.labels[sym.Name] = lab
-	}
-	return lab
-}
-
-func (s *state) Logf(msg string, args ...interface{})              { s.config.Logf(msg, args...) }
-func (s *state) Log() bool                                         { return s.config.Log() }
-func (s *state) Fatalf(msg string, args ...interface{})            { s.config.Fatalf(s.peekLine(), msg, args...) }
-func (s *state) Warnl(line int32, msg string, args ...interface{}) { s.config.Warnl(line, msg, args...) }
-func (s *state) Debug_checknil() bool                              { return s.config.Debug_checknil() }
-
-var (
-	// dummy node for the memory variable
-	memVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "mem"}}
-
-	// dummy nodes for temporary variables
-	ptrVar    = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ptr"}}
-	lenVar    = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "len"}}
-	newlenVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "newlen"}}
-	capVar    = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "cap"}}
-	typVar    = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "typ"}}
-	okVar     = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ok"}}
-)
-
-// startBlock sets the current block we're generating code in to b.
-func (s *state) startBlock(b *ssa.Block) {
-	if s.curBlock != nil {
-		s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
-	}
-	s.curBlock = b
-	s.vars = map[*Node]*ssa.Value{}
-	for n := range s.fwdVars {
-		delete(s.fwdVars, n)
-	}
-}
-
-// endBlock marks the end of generating code for the current block.
-// Returns the (former) current block. Returns nil if there is no current
-// block, i.e. if no code flows to the current execution point.
-func (s *state) endBlock() *ssa.Block {
-	b := s.curBlock
-	if b == nil {
-		return nil
-	}
-	for len(s.defvars) <= int(b.ID) {
-		s.defvars = append(s.defvars, nil)
-	}
-	s.defvars[b.ID] = s.vars
-	s.curBlock = nil
-	s.vars = nil
-	b.Line = s.peekLine()
-	return b
-}
-
-// pushLine pushes a line number on the line number stack.
-func (s *state) pushLine(line int32) {
-	if line == 0 {
-		// the frontend may emit node with line number missing,
-		// use the parent line number in this case.
-		line = s.peekLine()
-		if Debug['K'] != 0 {
-			Warn("buildssa: line 0")
-		}
-	}
-	s.line = append(s.line, line)
-}
-
-// popLine pops the top of the line number stack.
-func (s *state) popLine() {
-	s.line = s.line[:len(s.line)-1]
-}
-
-// peekLine peek the top of the line number stack.
-func (s *state) peekLine() int32 {
-	return s.line[len(s.line)-1]
-}
-
-func (s *state) Error(msg string, args ...interface{}) {
-	yyerrorl(s.peekLine(), msg, args...)
-}
-
-// newValue0 adds a new value with no arguments to the current block.
-func (s *state) newValue0(op ssa.Op, t ssa.Type) *ssa.Value {
-	return s.curBlock.NewValue0(s.peekLine(), op, t)
-}
-
-// newValue0A adds a new value with no arguments and an aux value to the current block.
-func (s *state) newValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value {
-	return s.curBlock.NewValue0A(s.peekLine(), op, t, aux)
-}
-
-// newValue0I adds a new value with no arguments and an auxint value to the current block.
-func (s *state) newValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value {
-	return s.curBlock.NewValue0I(s.peekLine(), op, t, auxint)
-}
-
-// newValue1 adds a new value with one argument to the current block.
-func (s *state) newValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value {
-	return s.curBlock.NewValue1(s.peekLine(), op, t, arg)
-}
-
-// newValue1A adds a new value with one argument and an aux value to the current block.
-func (s *state) newValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
-	return s.curBlock.NewValue1A(s.peekLine(), op, t, aux, arg)
-}
-
-// newValue1I adds a new value with one argument and an auxint value to the current block.
-func (s *state) newValue1I(op ssa.Op, t ssa.Type, aux int64, arg *ssa.Value) *ssa.Value {
-	return s.curBlock.NewValue1I(s.peekLine(), op, t, aux, arg)
-}
-
-// newValue2 adds a new value with two arguments to the current block.
-func (s *state) newValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value {
-	return s.curBlock.NewValue2(s.peekLine(), op, t, arg0, arg1)
-}
-
-// newValue2I adds a new value with two arguments and an auxint value to the current block.
-func (s *state) newValue2I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value {
-	return s.curBlock.NewValue2I(s.peekLine(), op, t, aux, arg0, arg1)
-}
-
-// newValue3 adds a new value with three arguments to the current block.
-func (s *state) newValue3(op ssa.Op, t ssa.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
-	return s.curBlock.NewValue3(s.peekLine(), op, t, arg0, arg1, arg2)
-}
-
-// newValue3I adds a new value with three arguments and an auxint value to the current block.
-func (s *state) newValue3I(op ssa.Op, t ssa.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
-	return s.curBlock.NewValue3I(s.peekLine(), op, t, aux, arg0, arg1, arg2)
-}
-
-// newValue4 adds a new value with four arguments to the current block.
-func (s *state) newValue4(op ssa.Op, t ssa.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
-	return s.curBlock.NewValue4(s.peekLine(), op, t, arg0, arg1, arg2, arg3)
-}
-
-// entryNewValue0 adds a new value with no arguments to the entry block.
-func (s *state) entryNewValue0(op ssa.Op, t ssa.Type) *ssa.Value {
-	return s.f.Entry.NewValue0(s.peekLine(), op, t)
-}
-
-// entryNewValue0A adds a new value with no arguments and an aux value to the entry block.
-func (s *state) entryNewValue0A(op ssa.Op, t ssa.Type, aux interface{}) *ssa.Value {
-	return s.f.Entry.NewValue0A(s.peekLine(), op, t, aux)
-}
-
-// entryNewValue0I adds a new value with no arguments and an auxint value to the entry block.
-func (s *state) entryNewValue0I(op ssa.Op, t ssa.Type, auxint int64) *ssa.Value {
-	return s.f.Entry.NewValue0I(s.peekLine(), op, t, auxint)
-}
-
-// entryNewValue1 adds a new value with one argument to the entry block.
-func (s *state) entryNewValue1(op ssa.Op, t ssa.Type, arg *ssa.Value) *ssa.Value {
-	return s.f.Entry.NewValue1(s.peekLine(), op, t, arg)
-}
-
-// entryNewValue1 adds a new value with one argument and an auxint value to the entry block.
-func (s *state) entryNewValue1I(op ssa.Op, t ssa.Type, auxint int64, arg *ssa.Value) *ssa.Value {
-	return s.f.Entry.NewValue1I(s.peekLine(), op, t, auxint, arg)
-}
-
-// entryNewValue1A adds a new value with one argument and an aux value to the entry block.
-func (s *state) entryNewValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
-	return s.f.Entry.NewValue1A(s.peekLine(), op, t, aux, arg)
-}
-
-// entryNewValue2 adds a new value with two arguments to the entry block.
-func (s *state) entryNewValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value {
-	return s.f.Entry.NewValue2(s.peekLine(), op, t, arg0, arg1)
-}
-
-// const* routines add a new const value to the entry block.
-func (s *state) constSlice(t ssa.Type) *ssa.Value       { return s.f.ConstSlice(s.peekLine(), t) }
-func (s *state) constInterface(t ssa.Type) *ssa.Value   { return s.f.ConstInterface(s.peekLine(), t) }
-func (s *state) constNil(t ssa.Type) *ssa.Value         { return s.f.ConstNil(s.peekLine(), t) }
-func (s *state) constEmptyString(t ssa.Type) *ssa.Value { return s.f.ConstEmptyString(s.peekLine(), t) }
-func (s *state) constBool(c bool) *ssa.Value {
-	return s.f.ConstBool(s.peekLine(), Types[TBOOL], c)
-}
-func (s *state) constInt8(t ssa.Type, c int8) *ssa.Value {
-	return s.f.ConstInt8(s.peekLine(), t, c)
-}
-func (s *state) constInt16(t ssa.Type, c int16) *ssa.Value {
-	return s.f.ConstInt16(s.peekLine(), t, c)
-}
-func (s *state) constInt32(t ssa.Type, c int32) *ssa.Value {
-	return s.f.ConstInt32(s.peekLine(), t, c)
-}
-func (s *state) constInt64(t ssa.Type, c int64) *ssa.Value {
-	return s.f.ConstInt64(s.peekLine(), t, c)
-}
-func (s *state) constFloat32(t ssa.Type, c float64) *ssa.Value {
-	return s.f.ConstFloat32(s.peekLine(), t, c)
-}
-func (s *state) constFloat64(t ssa.Type, c float64) *ssa.Value {
-	return s.f.ConstFloat64(s.peekLine(), t, c)
-}
-func (s *state) constInt(t ssa.Type, c int64) *ssa.Value {
-	if s.config.IntSize == 8 {
-		return s.constInt64(t, c)
-	}
-	if int64(int32(c)) != c {
-		s.Fatalf("integer constant too big %d", c)
-	}
-	return s.constInt32(t, int32(c))
-}
-
-// stmtList converts the statement list n to SSA and adds it to s.
-func (s *state) stmtList(l Nodes) {
-	for _, n := range l.Slice() {
-		s.stmt(n)
-	}
-}
-
-// stmt converts the statement n to SSA and adds it to s.
-func (s *state) stmt(n *Node) {
-	s.pushLine(n.Lineno)
-	defer s.popLine()
-
-	// If s.curBlock is nil, then we're about to generate dead code.
-	// We can't just short-circuit here, though,
-	// because we check labels and gotos as part of SSA generation.
-	// Provide a block for the dead code so that we don't have
-	// to add special cases everywhere else.
-	if s.curBlock == nil {
-		dead := s.f.NewBlock(ssa.BlockPlain)
-		s.startBlock(dead)
-	}
-
-	s.stmtList(n.Ninit)
-	switch n.Op {
-
-	case OBLOCK:
-		s.stmtList(n.List)
-
-	// No-ops
-	case OEMPTY, ODCLCONST, ODCLTYPE, OFALL:
-
-	// Expression statements
-	case OCALLFUNC:
-		if isIntrinsicCall(n) {
-			s.intrinsicCall(n)
-			return
-		}
-		fallthrough
-
-	case OCALLMETH, OCALLINTER:
-		s.call(n, callNormal)
-		if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class == PFUNC {
-			if fn := n.Left.Sym.Name; compiling_runtime && fn == "throw" ||
-				n.Left.Sym.Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "selectgo" || fn == "block") {
-				m := s.mem()
-				b := s.endBlock()
-				b.Kind = ssa.BlockExit
-				b.SetControl(m)
-				// TODO: never rewrite OPANIC to OCALLFUNC in the
-				// first place. Need to wait until all backends
-				// go through SSA.
-			}
-		}
-	case ODEFER:
-		s.call(n.Left, callDefer)
-	case OPROC:
-		s.call(n.Left, callGo)
-
-	case OAS2DOTTYPE:
-		res, resok := s.dottype(n.Rlist.First(), true)
-		deref := false
-		if !canSSAType(n.Rlist.First().Type) {
-			if res.Op != ssa.OpLoad {
-				s.Fatalf("dottype of non-load")
-			}
-			mem := s.mem()
-			if mem.Op == ssa.OpVarKill {
-				mem = mem.Args[0]
-			}
-			if res.Args[1] != mem {
-				s.Fatalf("memory no longer live from 2-result dottype load")
-			}
-			deref = true
-			res = res.Args[0]
-		}
-		s.assign(n.List.First(), res, needwritebarrier(n.List.First(), n.Rlist.First()), deref, n.Lineno, 0, false)
-		s.assign(n.List.Second(), resok, false, false, n.Lineno, 0, false)
-		return
-
-	case OAS2FUNC:
-		// We come here only when it is an intrinsic call returning two values.
-		if !isIntrinsicCall(n.Rlist.First()) {
-			s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Rlist.First())
-		}
-		v := s.intrinsicCall(n.Rlist.First())
-		v1 := s.newValue1(ssa.OpSelect0, n.List.First().Type, v)
-		v2 := s.newValue1(ssa.OpSelect1, n.List.Second().Type, v)
-		// Make a fake node to mimic loading return value, ONLY for write barrier test.
-		// This is future-proofing against non-scalar 2-result intrinsics.
-		// Currently we only have scalar ones, which result in no write barrier.
-		fakeret := &Node{Op: OINDREGSP}
-		s.assign(n.List.First(), v1, needwritebarrier(n.List.First(), fakeret), false, n.Lineno, 0, false)
-		s.assign(n.List.Second(), v2, needwritebarrier(n.List.Second(), fakeret), false, n.Lineno, 0, false)
-		return
-
-	case ODCL:
-		if n.Left.Class == PAUTOHEAP {
-			Fatalf("DCL %v", n)
-		}
-
-	case OLABEL:
-		sym := n.Left.Sym
-
-		if isblanksym(sym) {
-			// Empty identifier is valid but useless.
-			// See issues 11589, 11593.
-			return
-		}
-
-		lab := s.label(sym)
-
-		// Associate label with its control flow node, if any
-		if ctl := n.Name.Defn; ctl != nil {
-			switch ctl.Op {
-			case OFOR, OSWITCH, OSELECT:
-				s.labeledNodes[ctl] = lab
-			}
-		}
-
-		if !lab.defined() {
-			lab.defNode = n
-		} else {
-			s.Error("label %v already defined at %v", sym, linestr(lab.defNode.Lineno))
-			lab.reported = true
-		}
-		// The label might already have a target block via a goto.
-		if lab.target == nil {
-			lab.target = s.f.NewBlock(ssa.BlockPlain)
-		}
-
-		// go to that label (we pretend "label:" is preceded by "goto label")
-		b := s.endBlock()
-		b.AddEdgeTo(lab.target)
-		s.startBlock(lab.target)
-
-	case OGOTO:
-		sym := n.Left.Sym
-
-		lab := s.label(sym)
-		if lab.target == nil {
-			lab.target = s.f.NewBlock(ssa.BlockPlain)
-		}
-		if !lab.used() {
-			lab.useNode = n
-		}
-
-		if lab.defined() {
-			s.checkgoto(n, lab.defNode)
-		} else {
-			s.fwdGotos = append(s.fwdGotos, n)
-		}
-
-		b := s.endBlock()
-		b.AddEdgeTo(lab.target)
-
-	case OAS, OASWB:
-		// Check whether we can generate static data rather than code.
-		// If so, ignore n and defer data generation until codegen.
-		// Failure to do this causes writes to readonly symbols.
-		if gen_as_init(n, true) {
-			var data []*Node
-			if s.f.StaticData != nil {
-				data = s.f.StaticData.([]*Node)
-			}
-			s.f.StaticData = append(data, n)
-			return
-		}
-
-		if n.Left == n.Right && n.Left.Op == ONAME {
-			// An x=x assignment. No point in doing anything
-			// here. In addition, skipping this assignment
-			// prevents generating:
-			//   VARDEF x
-			//   COPY x -> x
-			// which is bad because x is incorrectly considered
-			// dead before the vardef. See issue #14904.
-			return
-		}
-
-		var t *Type
-		if n.Right != nil {
-			t = n.Right.Type
-		} else {
-			t = n.Left.Type
-		}
-
-		// Evaluate RHS.
-		rhs := n.Right
-		if rhs != nil {
-			switch rhs.Op {
-			case OSTRUCTLIT, OARRAYLIT, OSLICELIT:
-				// All literals with nonzero fields have already been
-				// rewritten during walk. Any that remain are just T{}
-				// or equivalents. Use the zero value.
-				if !iszero(rhs) {
-					Fatalf("literal with nonzero value in SSA: %v", rhs)
-				}
-				rhs = nil
-			case OAPPEND:
-				// If we're writing the result of an append back to the same slice,
-				// handle it specially to avoid write barriers on the fast (non-growth) path.
-				// If the slice can be SSA'd, it'll be on the stack,
-				// so there will be no write barriers,
-				// so there's no need to attempt to prevent them.
-				if samesafeexpr(n.Left, rhs.List.First()) {
-					if !s.canSSA(n.Left) {
-						if Debug_append > 0 {
-							Warnl(n.Lineno, "append: len-only update")
-						}
-						s.append(rhs, true)
-						return
-					} else {
-						if Debug_append > 0 { // replicating old diagnostic message
-							Warnl(n.Lineno, "append: len-only update (in local slice)")
-						}
-					}
-				}
-			}
-		}
-		var r *ssa.Value
-		var isVolatile bool
-		needwb := n.Op == OASWB
-		deref := !canSSAType(t)
-		if deref {
-			if rhs == nil {
-				r = nil // Signal assign to use OpZero.
-			} else {
-				r, isVolatile = s.addr(rhs, false)
-			}
-		} else {
-			if rhs == nil {
-				r = s.zeroVal(t)
-			} else {
-				r = s.expr(rhs)
-			}
-		}
-		if rhs != nil && rhs.Op == OAPPEND && needwritebarrier(n.Left, rhs) {
-			// The frontend gets rid of the write barrier to enable the special OAPPEND
-			// handling above, but since this is not a special case, we need it.
-			// TODO: just add a ptr graying to the end of growslice?
-			// TODO: check whether we need to provide special handling and a write barrier
-			// for ODOTTYPE and ORECV also.
-			// They get similar wb-removal treatment in walk.go:OAS.
-			needwb = true
-		}
-
-		var skip skipMask
-		if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) {
-			// We're assigning a slicing operation back to its source.
-			// Don't write back fields we aren't changing. See issue #14855.
-			i, j, k := rhs.SliceBounds()
-			if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64() == 0) {
-				// [0:...] is the same as [:...]
-				i = nil
-			}
-			// TODO: detect defaults for len/cap also.
-			// Currently doesn't really work because (*p)[:len(*p)] appears here as:
-			//    tmp = len(*p)
-			//    (*p)[:tmp]
-			//if j != nil && (j.Op == OLEN && samesafeexpr(j.Left, n.Left)) {
-			//      j = nil
-			//}
-			//if k != nil && (k.Op == OCAP && samesafeexpr(k.Left, n.Left)) {
-			//      k = nil
-			//}
-			if i == nil {
-				skip |= skipPtr
-				if j == nil {
-					skip |= skipLen
-				}
-				if k == nil {
-					skip |= skipCap
-				}
-			}
-		}
-
-		s.assign(n.Left, r, needwb, deref, n.Lineno, skip, isVolatile)
-
-	case OIF:
-		bThen := s.f.NewBlock(ssa.BlockPlain)
-		bEnd := s.f.NewBlock(ssa.BlockPlain)
-		var bElse *ssa.Block
-		if n.Rlist.Len() != 0 {
-			bElse = s.f.NewBlock(ssa.BlockPlain)
-			s.condBranch(n.Left, bThen, bElse, n.Likely)
-		} else {
-			s.condBranch(n.Left, bThen, bEnd, n.Likely)
-		}
-
-		s.startBlock(bThen)
-		s.stmtList(n.Nbody)
-		if b := s.endBlock(); b != nil {
-			b.AddEdgeTo(bEnd)
-		}
-
-		if n.Rlist.Len() != 0 {
-			s.startBlock(bElse)
-			s.stmtList(n.Rlist)
-			if b := s.endBlock(); b != nil {
-				b.AddEdgeTo(bEnd)
-			}
-		}
-		s.startBlock(bEnd)
-
-	case ORETURN:
-		s.stmtList(n.List)
-		s.exit()
-	case ORETJMP:
-		s.stmtList(n.List)
-		b := s.exit()
-		b.Kind = ssa.BlockRetJmp // override BlockRet
-		b.Aux = n.Left.Sym
-
-	case OCONTINUE, OBREAK:
-		var op string
-		var to *ssa.Block
-		switch n.Op {
-		case OCONTINUE:
-			op = "continue"
-			to = s.continueTo
-		case OBREAK:
-			op = "break"
-			to = s.breakTo
-		}
-		if n.Left == nil {
-			// plain break/continue
-			if to == nil {
-				s.Error("%s is not in a loop", op)
-				return
-			}
-			// nothing to do; "to" is already the correct target
-		} else {
-			// labeled break/continue; look up the target
-			sym := n.Left.Sym
-			lab := s.label(sym)
-			if !lab.used() {
-				lab.useNode = n.Left
-			}
-			if !lab.defined() {
-				s.Error("%s label not defined: %v", op, sym)
-				lab.reported = true
-				return
-			}
-			switch n.Op {
-			case OCONTINUE:
-				to = lab.continueTarget
-			case OBREAK:
-				to = lab.breakTarget
-			}
-			if to == nil {
-				// Valid label but not usable with a break/continue here, e.g.:
-				// for {
-				// 	continue abc
-				// }
-				// abc:
-				// for {}
-				s.Error("invalid %s label %v", op, sym)
-				lab.reported = true
-				return
-			}
-		}
-
-		b := s.endBlock()
-		b.AddEdgeTo(to)
-
-	case OFOR:
-		// OFOR: for Ninit; Left; Right { Nbody }
-		bCond := s.f.NewBlock(ssa.BlockPlain)
-		bBody := s.f.NewBlock(ssa.BlockPlain)
-		bIncr := s.f.NewBlock(ssa.BlockPlain)
-		bEnd := s.f.NewBlock(ssa.BlockPlain)
-
-		// first, jump to condition test
-		b := s.endBlock()
-		b.AddEdgeTo(bCond)
-
-		// generate code to test condition
-		s.startBlock(bCond)
-		if n.Left != nil {
-			s.condBranch(n.Left, bBody, bEnd, 1)
-		} else {
-			b := s.endBlock()
-			b.Kind = ssa.BlockPlain
-			b.AddEdgeTo(bBody)
-		}
-
-		// set up for continue/break in body
-		prevContinue := s.continueTo
-		prevBreak := s.breakTo
-		s.continueTo = bIncr
-		s.breakTo = bEnd
-		lab := s.labeledNodes[n]
-		if lab != nil {
-			// labeled for loop
-			lab.continueTarget = bIncr
-			lab.breakTarget = bEnd
-		}
-
-		// generate body
-		s.startBlock(bBody)
-		s.stmtList(n.Nbody)
-
-		// tear down continue/break
-		s.continueTo = prevContinue
-		s.breakTo = prevBreak
-		if lab != nil {
-			lab.continueTarget = nil
-			lab.breakTarget = nil
-		}
-
-		// done with body, goto incr
-		if b := s.endBlock(); b != nil {
-			b.AddEdgeTo(bIncr)
-		}
-
-		// generate incr
-		s.startBlock(bIncr)
-		if n.Right != nil {
-			s.stmt(n.Right)
-		}
-		if b := s.endBlock(); b != nil {
-			b.AddEdgeTo(bCond)
-		}
-		s.startBlock(bEnd)
-
-	case OSWITCH, OSELECT:
-		// These have been mostly rewritten by the front end into their Nbody fields.
-		// Our main task is to correctly hook up any break statements.
-		bEnd := s.f.NewBlock(ssa.BlockPlain)
-
-		prevBreak := s.breakTo
-		s.breakTo = bEnd
-		lab := s.labeledNodes[n]
-		if lab != nil {
-			// labeled
-			lab.breakTarget = bEnd
-		}
-
-		// generate body code
-		s.stmtList(n.Nbody)
-
-		s.breakTo = prevBreak
-		if lab != nil {
-			lab.breakTarget = nil
-		}
-
-		// OSWITCH never falls through (s.curBlock == nil here).
-		// OSELECT does not fall through if we're calling selectgo.
-		// OSELECT does fall through if we're calling selectnb{send,recv}[2].
-		// In those latter cases, go to the code after the select.
-		if b := s.endBlock(); b != nil {
-			b.AddEdgeTo(bEnd)
-		}
-		s.startBlock(bEnd)
-
-	case OVARKILL:
-		// Insert a varkill op to record that a variable is no longer live.
-		// We only care about liveness info at call sites, so putting the
-		// varkill in the store chain is enough to keep it correctly ordered
-		// with respect to call ops.
-		if !s.canSSA(n.Left) {
-			s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, n.Left, s.mem())
-		}
-
-	case OVARLIVE:
-		// Insert a varlive op to record that a variable is still live.
-		if !n.Left.Addrtaken {
-			s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left)
-		}
-		s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, ssa.TypeMem, n.Left, s.mem())
-
-	case OCHECKNIL:
-		p := s.expr(n.Left)
-		s.nilCheck(p)
-
-	case OSQRT:
-		s.expr(n.Left)
-
-	default:
-		s.Fatalf("unhandled stmt %v", n.Op)
-	}
-}
-
-// exit processes any code that needs to be generated just before returning.
-// It returns a BlockRet block that ends the control flow. Its control value
-// will be set to the final memory state.
-func (s *state) exit() *ssa.Block {
-	if hasdefer {
-		s.rtcall(Deferreturn, true, nil)
-	}
-
-	// Run exit code. Typically, this code copies heap-allocated PPARAMOUT
-	// variables back to the stack.
-	s.stmtList(s.exitCode)
-
-	// Store SSAable PPARAMOUT variables back to stack locations.
-	for _, n := range s.returns {
-		addr := s.decladdrs[n]
-		val := s.variable(n, n.Type)
-		s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, n, s.mem())
-		s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, n.Type.Size(), addr, val, s.mem())
-		// TODO: if val is ever spilled, we'd like to use the
-		// PPARAMOUT slot for spilling it. That won't happen
-		// currently.
-	}
-
-	// Do actual return.
-	m := s.mem()
-	b := s.endBlock()
-	b.Kind = ssa.BlockRet
-	b.SetControl(m)
-	return b
-}
-
-type opAndType struct {
-	op    Op
-	etype EType
-}
-
-var opToSSA = map[opAndType]ssa.Op{
-	opAndType{OADD, TINT8}:    ssa.OpAdd8,
-	opAndType{OADD, TUINT8}:   ssa.OpAdd8,
-	opAndType{OADD, TINT16}:   ssa.OpAdd16,
-	opAndType{OADD, TUINT16}:  ssa.OpAdd16,
-	opAndType{OADD, TINT32}:   ssa.OpAdd32,
-	opAndType{OADD, TUINT32}:  ssa.OpAdd32,
-	opAndType{OADD, TPTR32}:   ssa.OpAdd32,
-	opAndType{OADD, TINT64}:   ssa.OpAdd64,
-	opAndType{OADD, TUINT64}:  ssa.OpAdd64,
-	opAndType{OADD, TPTR64}:   ssa.OpAdd64,
-	opAndType{OADD, TFLOAT32}: ssa.OpAdd32F,
-	opAndType{OADD, TFLOAT64}: ssa.OpAdd64F,
-
-	opAndType{OSUB, TINT8}:    ssa.OpSub8,
-	opAndType{OSUB, TUINT8}:   ssa.OpSub8,
-	opAndType{OSUB, TINT16}:   ssa.OpSub16,
-	opAndType{OSUB, TUINT16}:  ssa.OpSub16,
-	opAndType{OSUB, TINT32}:   ssa.OpSub32,
-	opAndType{OSUB, TUINT32}:  ssa.OpSub32,
-	opAndType{OSUB, TINT64}:   ssa.OpSub64,
-	opAndType{OSUB, TUINT64}:  ssa.OpSub64,
-	opAndType{OSUB, TFLOAT32}: ssa.OpSub32F,
-	opAndType{OSUB, TFLOAT64}: ssa.OpSub64F,
-
-	opAndType{ONOT, TBOOL}: ssa.OpNot,
-
-	opAndType{OMINUS, TINT8}:    ssa.OpNeg8,
-	opAndType{OMINUS, TUINT8}:   ssa.OpNeg8,
-	opAndType{OMINUS, TINT16}:   ssa.OpNeg16,
-	opAndType{OMINUS, TUINT16}:  ssa.OpNeg16,
-	opAndType{OMINUS, TINT32}:   ssa.OpNeg32,
-	opAndType{OMINUS, TUINT32}:  ssa.OpNeg32,
-	opAndType{OMINUS, TINT64}:   ssa.OpNeg64,
-	opAndType{OMINUS, TUINT64}:  ssa.OpNeg64,
-	opAndType{OMINUS, TFLOAT32}: ssa.OpNeg32F,
-	opAndType{OMINUS, TFLOAT64}: ssa.OpNeg64F,
-
-	opAndType{OCOM, TINT8}:   ssa.OpCom8,
-	opAndType{OCOM, TUINT8}:  ssa.OpCom8,
-	opAndType{OCOM, TINT16}:  ssa.OpCom16,
-	opAndType{OCOM, TUINT16}: ssa.OpCom16,
-	opAndType{OCOM, TINT32}:  ssa.OpCom32,
-	opAndType{OCOM, TUINT32}: ssa.OpCom32,
-	opAndType{OCOM, TINT64}:  ssa.OpCom64,
-	opAndType{OCOM, TUINT64}: ssa.OpCom64,
-
-	opAndType{OIMAG, TCOMPLEX64}:  ssa.OpComplexImag,
-	opAndType{OIMAG, TCOMPLEX128}: ssa.OpComplexImag,
-	opAndType{OREAL, TCOMPLEX64}:  ssa.OpComplexReal,
-	opAndType{OREAL, TCOMPLEX128}: ssa.OpComplexReal,
-
-	opAndType{OMUL, TINT8}:    ssa.OpMul8,
-	opAndType{OMUL, TUINT8}:   ssa.OpMul8,
-	opAndType{OMUL, TINT16}:   ssa.OpMul16,
-	opAndType{OMUL, TUINT16}:  ssa.OpMul16,
-	opAndType{OMUL, TINT32}:   ssa.OpMul32,
-	opAndType{OMUL, TUINT32}:  ssa.OpMul32,
-	opAndType{OMUL, TINT64}:   ssa.OpMul64,
-	opAndType{OMUL, TUINT64}:  ssa.OpMul64,
-	opAndType{OMUL, TFLOAT32}: ssa.OpMul32F,
-	opAndType{OMUL, TFLOAT64}: ssa.OpMul64F,
-
-	opAndType{ODIV, TFLOAT32}: ssa.OpDiv32F,
-	opAndType{ODIV, TFLOAT64}: ssa.OpDiv64F,
-
-	opAndType{OHMUL, TINT8}:   ssa.OpHmul8,
-	opAndType{OHMUL, TUINT8}:  ssa.OpHmul8u,
-	opAndType{OHMUL, TINT16}:  ssa.OpHmul16,
-	opAndType{OHMUL, TUINT16}: ssa.OpHmul16u,
-	opAndType{OHMUL, TINT32}:  ssa.OpHmul32,
-	opAndType{OHMUL, TUINT32}: ssa.OpHmul32u,
-
-	opAndType{ODIV, TINT8}:   ssa.OpDiv8,
-	opAndType{ODIV, TUINT8}:  ssa.OpDiv8u,
-	opAndType{ODIV, TINT16}:  ssa.OpDiv16,
-	opAndType{ODIV, TUINT16}: ssa.OpDiv16u,
-	opAndType{ODIV, TINT32}:  ssa.OpDiv32,
-	opAndType{ODIV, TUINT32}: ssa.OpDiv32u,
-	opAndType{ODIV, TINT64}:  ssa.OpDiv64,
-	opAndType{ODIV, TUINT64}: ssa.OpDiv64u,
-
-	opAndType{OMOD, TINT8}:   ssa.OpMod8,
-	opAndType{OMOD, TUINT8}:  ssa.OpMod8u,
-	opAndType{OMOD, TINT16}:  ssa.OpMod16,
-	opAndType{OMOD, TUINT16}: ssa.OpMod16u,
-	opAndType{OMOD, TINT32}:  ssa.OpMod32,
-	opAndType{OMOD, TUINT32}: ssa.OpMod32u,
-	opAndType{OMOD, TINT64}:  ssa.OpMod64,
-	opAndType{OMOD, TUINT64}: ssa.OpMod64u,
-
-	opAndType{OAND, TINT8}:   ssa.OpAnd8,
-	opAndType{OAND, TUINT8}:  ssa.OpAnd8,
-	opAndType{OAND, TINT16}:  ssa.OpAnd16,
-	opAndType{OAND, TUINT16}: ssa.OpAnd16,
-	opAndType{OAND, TINT32}:  ssa.OpAnd32,
-	opAndType{OAND, TUINT32}: ssa.OpAnd32,
-	opAndType{OAND, TINT64}:  ssa.OpAnd64,
-	opAndType{OAND, TUINT64}: ssa.OpAnd64,
-
-	opAndType{OOR, TINT8}:   ssa.OpOr8,
-	opAndType{OOR, TUINT8}:  ssa.OpOr8,
-	opAndType{OOR, TINT16}:  ssa.OpOr16,
-	opAndType{OOR, TUINT16}: ssa.OpOr16,
-	opAndType{OOR, TINT32}:  ssa.OpOr32,
-	opAndType{OOR, TUINT32}: ssa.OpOr32,
-	opAndType{OOR, TINT64}:  ssa.OpOr64,
-	opAndType{OOR, TUINT64}: ssa.OpOr64,
-
-	opAndType{OXOR, TINT8}:   ssa.OpXor8,
-	opAndType{OXOR, TUINT8}:  ssa.OpXor8,
-	opAndType{OXOR, TINT16}:  ssa.OpXor16,
-	opAndType{OXOR, TUINT16}: ssa.OpXor16,
-	opAndType{OXOR, TINT32}:  ssa.OpXor32,
-	opAndType{OXOR, TUINT32}: ssa.OpXor32,
-	opAndType{OXOR, TINT64}:  ssa.OpXor64,
-	opAndType{OXOR, TUINT64}: ssa.OpXor64,
-
-	opAndType{OEQ, TBOOL}:      ssa.OpEqB,
-	opAndType{OEQ, TINT8}:      ssa.OpEq8,
-	opAndType{OEQ, TUINT8}:     ssa.OpEq8,
-	opAndType{OEQ, TINT16}:     ssa.OpEq16,
-	opAndType{OEQ, TUINT16}:    ssa.OpEq16,
-	opAndType{OEQ, TINT32}:     ssa.OpEq32,
-	opAndType{OEQ, TUINT32}:    ssa.OpEq32,
-	opAndType{OEQ, TINT64}:     ssa.OpEq64,
-	opAndType{OEQ, TUINT64}:    ssa.OpEq64,
-	opAndType{OEQ, TINTER}:     ssa.OpEqInter,
-	opAndType{OEQ, TSLICE}:     ssa.OpEqSlice,
-	opAndType{OEQ, TFUNC}:      ssa.OpEqPtr,
-	opAndType{OEQ, TMAP}:       ssa.OpEqPtr,
-	opAndType{OEQ, TCHAN}:      ssa.OpEqPtr,
-	opAndType{OEQ, TPTR32}:     ssa.OpEqPtr,
-	opAndType{OEQ, TPTR64}:     ssa.OpEqPtr,
-	opAndType{OEQ, TUINTPTR}:   ssa.OpEqPtr,
-	opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr,
-	opAndType{OEQ, TFLOAT64}:   ssa.OpEq64F,
-	opAndType{OEQ, TFLOAT32}:   ssa.OpEq32F,
-
-	opAndType{ONE, TBOOL}:      ssa.OpNeqB,
-	opAndType{ONE, TINT8}:      ssa.OpNeq8,
-	opAndType{ONE, TUINT8}:     ssa.OpNeq8,
-	opAndType{ONE, TINT16}:     ssa.OpNeq16,
-	opAndType{ONE, TUINT16}:    ssa.OpNeq16,
-	opAndType{ONE, TINT32}:     ssa.OpNeq32,
-	opAndType{ONE, TUINT32}:    ssa.OpNeq32,
-	opAndType{ONE, TINT64}:     ssa.OpNeq64,
-	opAndType{ONE, TUINT64}:    ssa.OpNeq64,
-	opAndType{ONE, TINTER}:     ssa.OpNeqInter,
-	opAndType{ONE, TSLICE}:     ssa.OpNeqSlice,
-	opAndType{ONE, TFUNC}:      ssa.OpNeqPtr,
-	opAndType{ONE, TMAP}:       ssa.OpNeqPtr,
-	opAndType{ONE, TCHAN}:      ssa.OpNeqPtr,
-	opAndType{ONE, TPTR32}:     ssa.OpNeqPtr,
-	opAndType{ONE, TPTR64}:     ssa.OpNeqPtr,
-	opAndType{ONE, TUINTPTR}:   ssa.OpNeqPtr,
-	opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr,
-	opAndType{ONE, TFLOAT64}:   ssa.OpNeq64F,
-	opAndType{ONE, TFLOAT32}:   ssa.OpNeq32F,
-
-	opAndType{OLT, TINT8}:    ssa.OpLess8,
-	opAndType{OLT, TUINT8}:   ssa.OpLess8U,
-	opAndType{OLT, TINT16}:   ssa.OpLess16,
-	opAndType{OLT, TUINT16}:  ssa.OpLess16U,
-	opAndType{OLT, TINT32}:   ssa.OpLess32,
-	opAndType{OLT, TUINT32}:  ssa.OpLess32U,
-	opAndType{OLT, TINT64}:   ssa.OpLess64,
-	opAndType{OLT, TUINT64}:  ssa.OpLess64U,
-	opAndType{OLT, TFLOAT64}: ssa.OpLess64F,
-	opAndType{OLT, TFLOAT32}: ssa.OpLess32F,
-
-	opAndType{OGT, TINT8}:    ssa.OpGreater8,
-	opAndType{OGT, TUINT8}:   ssa.OpGreater8U,
-	opAndType{OGT, TINT16}:   ssa.OpGreater16,
-	opAndType{OGT, TUINT16}:  ssa.OpGreater16U,
-	opAndType{OGT, TINT32}:   ssa.OpGreater32,
-	opAndType{OGT, TUINT32}:  ssa.OpGreater32U,
-	opAndType{OGT, TINT64}:   ssa.OpGreater64,
-	opAndType{OGT, TUINT64}:  ssa.OpGreater64U,
-	opAndType{OGT, TFLOAT64}: ssa.OpGreater64F,
-	opAndType{OGT, TFLOAT32}: ssa.OpGreater32F,
-
-	opAndType{OLE, TINT8}:    ssa.OpLeq8,
-	opAndType{OLE, TUINT8}:   ssa.OpLeq8U,
-	opAndType{OLE, TINT16}:   ssa.OpLeq16,
-	opAndType{OLE, TUINT16}:  ssa.OpLeq16U,
-	opAndType{OLE, TINT32}:   ssa.OpLeq32,
-	opAndType{OLE, TUINT32}:  ssa.OpLeq32U,
-	opAndType{OLE, TINT64}:   ssa.OpLeq64,
-	opAndType{OLE, TUINT64}:  ssa.OpLeq64U,
-	opAndType{OLE, TFLOAT64}: ssa.OpLeq64F,
-	opAndType{OLE, TFLOAT32}: ssa.OpLeq32F,
-
-	opAndType{OGE, TINT8}:    ssa.OpGeq8,
-	opAndType{OGE, TUINT8}:   ssa.OpGeq8U,
-	opAndType{OGE, TINT16}:   ssa.OpGeq16,
-	opAndType{OGE, TUINT16}:  ssa.OpGeq16U,
-	opAndType{OGE, TINT32}:   ssa.OpGeq32,
-	opAndType{OGE, TUINT32}:  ssa.OpGeq32U,
-	opAndType{OGE, TINT64}:   ssa.OpGeq64,
-	opAndType{OGE, TUINT64}:  ssa.OpGeq64U,
-	opAndType{OGE, TFLOAT64}: ssa.OpGeq64F,
-	opAndType{OGE, TFLOAT32}: ssa.OpGeq32F,
-
-	opAndType{OLROT, TUINT8}:  ssa.OpLrot8,
-	opAndType{OLROT, TUINT16}: ssa.OpLrot16,
-	opAndType{OLROT, TUINT32}: ssa.OpLrot32,
-	opAndType{OLROT, TUINT64}: ssa.OpLrot64,
-
-	opAndType{OSQRT, TFLOAT64}: ssa.OpSqrt,
-}
-
-func (s *state) concreteEtype(t *Type) EType {
-	e := t.Etype
-	switch e {
-	default:
-		return e
-	case TINT:
-		if s.config.IntSize == 8 {
-			return TINT64
-		}
-		return TINT32
-	case TUINT:
-		if s.config.IntSize == 8 {
-			return TUINT64
-		}
-		return TUINT32
-	case TUINTPTR:
-		if s.config.PtrSize == 8 {
-			return TUINT64
-		}
-		return TUINT32
-	}
-}
-
-func (s *state) ssaOp(op Op, t *Type) ssa.Op {
-	etype := s.concreteEtype(t)
-	x, ok := opToSSA[opAndType{op, etype}]
-	if !ok {
-		s.Fatalf("unhandled binary op %v %s", op, etype)
-	}
-	return x
-}
-
-func floatForComplex(t *Type) *Type {
-	if t.Size() == 8 {
-		return Types[TFLOAT32]
-	} else {
-		return Types[TFLOAT64]
-	}
-}
-
-type opAndTwoTypes struct {
-	op     Op
-	etype1 EType
-	etype2 EType
-}
-
-type twoTypes struct {
-	etype1 EType
-	etype2 EType
-}
-
-type twoOpsAndType struct {
-	op1              ssa.Op
-	op2              ssa.Op
-	intermediateType EType
-}
-
-var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
-
-	twoTypes{TINT8, TFLOAT32}:  twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32},
-	twoTypes{TINT16, TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32},
-	twoTypes{TINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, TINT32},
-	twoTypes{TINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, TINT64},
-
-	twoTypes{TINT8, TFLOAT64}:  twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32},
-	twoTypes{TINT16, TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32},
-	twoTypes{TINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, TINT32},
-	twoTypes{TINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, TINT64},
-
-	twoTypes{TFLOAT32, TINT8}:  twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
-	twoTypes{TFLOAT32, TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
-	twoTypes{TFLOAT32, TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, TINT32},
-	twoTypes{TFLOAT32, TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, TINT64},
-
-	twoTypes{TFLOAT64, TINT8}:  twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
-	twoTypes{TFLOAT64, TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
-	twoTypes{TFLOAT64, TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, TINT32},
-	twoTypes{TFLOAT64, TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, TINT64},
-	// unsigned
-	twoTypes{TUINT8, TFLOAT32}:  twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32},
-	twoTypes{TUINT16, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32},
-	twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned
-	twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64},            // Cvt64Uto32F, branchy code expansion instead
-
-	twoTypes{TUINT8, TFLOAT64}:  twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32},
-	twoTypes{TUINT16, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32},
-	twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned
-	twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64},            // Cvt64Uto64F, branchy code expansion instead
-
-	twoTypes{TFLOAT32, TUINT8}:  twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
-	twoTypes{TFLOAT32, TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
-	twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
-	twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64},          // Cvt32Fto64U, branchy code expansion instead
-
-	twoTypes{TFLOAT64, TUINT8}:  twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
-	twoTypes{TFLOAT64, TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
-	twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
-	twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64},          // Cvt64Fto64U, branchy code expansion instead
-
-	// float
-	twoTypes{TFLOAT64, TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32},
-	twoTypes{TFLOAT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCopy, TFLOAT64},
-	twoTypes{TFLOAT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCopy, TFLOAT32},
-	twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64},
-}
-
-// this map is used only for 32-bit arch, and only includes the difference
-// on 32-bit arch, don't use int64<->float conversion for uint32
-var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{
-	twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, TUINT32},
-	twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, TUINT32},
-	twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, TUINT32},
-	twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, TUINT32},
-}
-
-// uint64<->float conversions, only on machines that have intructions for that
-var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{
-	twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, TUINT64},
-	twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, TUINT64},
-	twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, TUINT64},
-	twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, TUINT64},
-}
-
-var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{
-	opAndTwoTypes{OLSH, TINT8, TUINT8}:   ssa.OpLsh8x8,
-	opAndTwoTypes{OLSH, TUINT8, TUINT8}:  ssa.OpLsh8x8,
-	opAndTwoTypes{OLSH, TINT8, TUINT16}:  ssa.OpLsh8x16,
-	opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16,
-	opAndTwoTypes{OLSH, TINT8, TUINT32}:  ssa.OpLsh8x32,
-	opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32,
-	opAndTwoTypes{OLSH, TINT8, TUINT64}:  ssa.OpLsh8x64,
-	opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64,
-
-	opAndTwoTypes{OLSH, TINT16, TUINT8}:   ssa.OpLsh16x8,
-	opAndTwoTypes{OLSH, TUINT16, TUINT8}:  ssa.OpLsh16x8,
-	opAndTwoTypes{OLSH, TINT16, TUINT16}:  ssa.OpLsh16x16,
-	opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16,
-	opAndTwoTypes{OLSH, TINT16, TUINT32}:  ssa.OpLsh16x32,
-	opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32,
-	opAndTwoTypes{OLSH, TINT16, TUINT64}:  ssa.OpLsh16x64,
-	opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64,
-
-	opAndTwoTypes{OLSH, TINT32, TUINT8}:   ssa.OpLsh32x8,
-	opAndTwoTypes{OLSH, TUINT32, TUINT8}:  ssa.OpLsh32x8,
-	opAndTwoTypes{OLSH, TINT32, TUINT16}:  ssa.OpLsh32x16,
-	opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16,
-	opAndTwoTypes{OLSH, TINT32, TUINT32}:  ssa.OpLsh32x32,
-	opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32,
-	opAndTwoTypes{OLSH, TINT32, TUINT64}:  ssa.OpLsh32x64,
-	opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64,
-
-	opAndTwoTypes{OLSH, TINT64, TUINT8}:   ssa.OpLsh64x8,
-	opAndTwoTypes{OLSH, TUINT64, TUINT8}:  ssa.OpLsh64x8,
-	opAndTwoTypes{OLSH, TINT64, TUINT16}:  ssa.OpLsh64x16,
-	opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16,
-	opAndTwoTypes{OLSH, TINT64, TUINT32}:  ssa.OpLsh64x32,
-	opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32,
-	opAndTwoTypes{OLSH, TINT64, TUINT64}:  ssa.OpLsh64x64,
-	opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64,
-
-	opAndTwoTypes{ORSH, TINT8, TUINT8}:   ssa.OpRsh8x8,
-	opAndTwoTypes{ORSH, TUINT8, TUINT8}:  ssa.OpRsh8Ux8,
-	opAndTwoTypes{ORSH, TINT8, TUINT16}:  ssa.OpRsh8x16,
-	opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16,
-	opAndTwoTypes{ORSH, TINT8, TUINT32}:  ssa.OpRsh8x32,
-	opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32,
-	opAndTwoTypes{ORSH, TINT8, TUINT64}:  ssa.OpRsh8x64,
-	opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64,
-
-	opAndTwoTypes{ORSH, TINT16, TUINT8}:   ssa.OpRsh16x8,
-	opAndTwoTypes{ORSH, TUINT16, TUINT8}:  ssa.OpRsh16Ux8,
-	opAndTwoTypes{ORSH, TINT16, TUINT16}:  ssa.OpRsh16x16,
-	opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16,
-	opAndTwoTypes{ORSH, TINT16, TUINT32}:  ssa.OpRsh16x32,
-	opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32,
-	opAndTwoTypes{ORSH, TINT16, TUINT64}:  ssa.OpRsh16x64,
-	opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64,
-
-	opAndTwoTypes{ORSH, TINT32, TUINT8}:   ssa.OpRsh32x8,
-	opAndTwoTypes{ORSH, TUINT32, TUINT8}:  ssa.OpRsh32Ux8,
-	opAndTwoTypes{ORSH, TINT32, TUINT16}:  ssa.OpRsh32x16,
-	opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16,
-	opAndTwoTypes{ORSH, TINT32, TUINT32}:  ssa.OpRsh32x32,
-	opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32,
-	opAndTwoTypes{ORSH, TINT32, TUINT64}:  ssa.OpRsh32x64,
-	opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64,
-
-	opAndTwoTypes{ORSH, TINT64, TUINT8}:   ssa.OpRsh64x8,
-	opAndTwoTypes{ORSH, TUINT64, TUINT8}:  ssa.OpRsh64Ux8,
-	opAndTwoTypes{ORSH, TINT64, TUINT16}:  ssa.OpRsh64x16,
-	opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16,
-	opAndTwoTypes{ORSH, TINT64, TUINT32}:  ssa.OpRsh64x32,
-	opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32,
-	opAndTwoTypes{ORSH, TINT64, TUINT64}:  ssa.OpRsh64x64,
-	opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64,
-}
-
-func (s *state) ssaShiftOp(op Op, t *Type, u *Type) ssa.Op {
-	etype1 := s.concreteEtype(t)
-	etype2 := s.concreteEtype(u)
-	x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
-	if !ok {
-		s.Fatalf("unhandled shift op %v etype=%s/%s", op, etype1, etype2)
-	}
-	return x
-}
-
-func (s *state) ssaRotateOp(op Op, t *Type) ssa.Op {
-	etype1 := s.concreteEtype(t)
-	x, ok := opToSSA[opAndType{op, etype1}]
-	if !ok {
-		s.Fatalf("unhandled rotate op %v etype=%s", op, etype1)
-	}
-	return x
-}
-
-// expr converts the expression n to ssa, adds it to s and returns the ssa result.
-func (s *state) expr(n *Node) *ssa.Value {
-	if !(n.Op == ONAME || n.Op == OLITERAL && n.Sym != nil) {
-		// ONAMEs and named OLITERALs have the line number
-		// of the decl, not the use. See issue 14742.
-		s.pushLine(n.Lineno)
-		defer s.popLine()
-	}
-
-	s.stmtList(n.Ninit)
-	switch n.Op {
-	case OARRAYBYTESTRTMP:
-		slice := s.expr(n.Left)
-		ptr := s.newValue1(ssa.OpSlicePtr, ptrto(Types[TUINT8]), slice)
-		len := s.newValue1(ssa.OpSliceLen, Types[TINT], slice)
-		return s.newValue2(ssa.OpStringMake, n.Type, ptr, len)
-	case OSTRARRAYBYTETMP:
-		str := s.expr(n.Left)
-		ptr := s.newValue1(ssa.OpStringPtr, ptrto(Types[TUINT8]), str)
-		len := s.newValue1(ssa.OpStringLen, Types[TINT], str)
-		return s.newValue3(ssa.OpSliceMake, n.Type, ptr, len, len)
-	case OCFUNC:
-		aux := s.lookupSymbol(n, &ssa.ExternSymbol{Typ: n.Type, Sym: n.Left.Sym})
-		return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb)
-	case ONAME:
-		if n.Class == PFUNC {
-			// "value" of a function is the address of the function's closure
-			sym := funcsym(n.Sym)
-			aux := &ssa.ExternSymbol{Typ: n.Type, Sym: sym}
-			return s.entryNewValue1A(ssa.OpAddr, ptrto(n.Type), aux, s.sb)
-		}
-		if s.canSSA(n) {
-			return s.variable(n, n.Type)
-		}
-		addr, _ := s.addr(n, false)
-		return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
-	case OCLOSUREVAR:
-		addr, _ := s.addr(n, false)
-		return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
-	case OLITERAL:
-		switch u := n.Val().U.(type) {
-		case *Mpint:
-			i := u.Int64()
-			switch n.Type.Size() {
-			case 1:
-				return s.constInt8(n.Type, int8(i))
-			case 2:
-				return s.constInt16(n.Type, int16(i))
-			case 4:
-				return s.constInt32(n.Type, int32(i))
-			case 8:
-				return s.constInt64(n.Type, i)
-			default:
-				s.Fatalf("bad integer size %d", n.Type.Size())
-				return nil
-			}
-		case string:
-			if u == "" {
-				return s.constEmptyString(n.Type)
-			}
-			return s.entryNewValue0A(ssa.OpConstString, n.Type, u)
-		case bool:
-			return s.constBool(u)
-		case *NilVal:
-			t := n.Type
-			switch {
-			case t.IsSlice():
-				return s.constSlice(t)
-			case t.IsInterface():
-				return s.constInterface(t)
-			default:
-				return s.constNil(t)
-			}
-		case *Mpflt:
-			switch n.Type.Size() {
-			case 4:
-				return s.constFloat32(n.Type, u.Float32())
-			case 8:
-				return s.constFloat64(n.Type, u.Float64())
-			default:
-				s.Fatalf("bad float size %d", n.Type.Size())
-				return nil
-			}
-		case *Mpcplx:
-			r := &u.Real
-			i := &u.Imag
-			switch n.Type.Size() {
-			case 8:
-				pt := Types[TFLOAT32]
-				return s.newValue2(ssa.OpComplexMake, n.Type,
-					s.constFloat32(pt, r.Float32()),
-					s.constFloat32(pt, i.Float32()))
-			case 16:
-				pt := Types[TFLOAT64]
-				return s.newValue2(ssa.OpComplexMake, n.Type,
-					s.constFloat64(pt, r.Float64()),
-					s.constFloat64(pt, i.Float64()))
-			default:
-				s.Fatalf("bad float size %d", n.Type.Size())
-				return nil
-			}
-
-		default:
-			s.Fatalf("unhandled OLITERAL %v", n.Val().Ctype())
-			return nil
-		}
-	case OCONVNOP:
-		to := n.Type
-		from := n.Left.Type
-
-		// Assume everything will work out, so set up our return value.
-		// Anything interesting that happens from here is a fatal.
-		x := s.expr(n.Left)
-
-		// Special case for not confusing GC and liveness.
-		// We don't want pointers accidentally classified
-		// as not-pointers or vice-versa because of copy
-		// elision.
-		if to.IsPtrShaped() != from.IsPtrShaped() {
-			return s.newValue2(ssa.OpConvert, to, x, s.mem())
-		}
-
-		v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type
-
-		// CONVNOP closure
-		if to.Etype == TFUNC && from.IsPtrShaped() {
-			return v
-		}
-
-		// named <--> unnamed type or typed <--> untyped const
-		if from.Etype == to.Etype {
-			return v
-		}
-
-		// unsafe.Pointer <--> *T
-		if to.Etype == TUNSAFEPTR && from.IsPtr() || from.Etype == TUNSAFEPTR && to.IsPtr() {
-			return v
-		}
-
-		dowidth(from)
-		dowidth(to)
-		if from.Width != to.Width {
-			s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width)
-			return nil
-		}
-		if etypesign(from.Etype) != etypesign(to.Etype) {
-			s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Etype, to, to.Etype)
-			return nil
-		}
-
-		if instrumenting {
-			// These appear to be fine, but they fail the
-			// integer constraint below, so okay them here.
-			// Sample non-integer conversion: map[string]string -> *uint8
-			return v
-		}
-
-		if etypesign(from.Etype) == 0 {
-			s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to)
-			return nil
-		}
-
-		// integer, same width, same sign
-		return v
-
-	case OCONV:
-		x := s.expr(n.Left)
-		ft := n.Left.Type // from type
-		tt := n.Type      // to type
-		if ft.IsInteger() && tt.IsInteger() {
-			var op ssa.Op
-			if tt.Size() == ft.Size() {
-				op = ssa.OpCopy
-			} else if tt.Size() < ft.Size() {
-				// truncation
-				switch 10*ft.Size() + tt.Size() {
-				case 21:
-					op = ssa.OpTrunc16to8
-				case 41:
-					op = ssa.OpTrunc32to8
-				case 42:
-					op = ssa.OpTrunc32to16
-				case 81:
-					op = ssa.OpTrunc64to8
-				case 82:
-					op = ssa.OpTrunc64to16
-				case 84:
-					op = ssa.OpTrunc64to32
-				default:
-					s.Fatalf("weird integer truncation %v -> %v", ft, tt)
-				}
-			} else if ft.IsSigned() {
-				// sign extension
-				switch 10*ft.Size() + tt.Size() {
-				case 12:
-					op = ssa.OpSignExt8to16
-				case 14:
-					op = ssa.OpSignExt8to32
-				case 18:
-					op = ssa.OpSignExt8to64
-				case 24:
-					op = ssa.OpSignExt16to32
-				case 28:
-					op = ssa.OpSignExt16to64
-				case 48:
-					op = ssa.OpSignExt32to64
-				default:
-					s.Fatalf("bad integer sign extension %v -> %v", ft, tt)
-				}
-			} else {
-				// zero extension
-				switch 10*ft.Size() + tt.Size() {
-				case 12:
-					op = ssa.OpZeroExt8to16
-				case 14:
-					op = ssa.OpZeroExt8to32
-				case 18:
-					op = ssa.OpZeroExt8to64
-				case 24:
-					op = ssa.OpZeroExt16to32
-				case 28:
-					op = ssa.OpZeroExt16to64
-				case 48:
-					op = ssa.OpZeroExt32to64
-				default:
-					s.Fatalf("weird integer sign extension %v -> %v", ft, tt)
-				}
-			}
-			return s.newValue1(op, n.Type, x)
-		}
-
-		if ft.IsFloat() || tt.IsFloat() {
-			conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
-			if s.config.IntSize == 4 && Thearch.LinkArch.Name != "amd64p32" && Thearch.LinkArch.Family != sys.MIPS {
-				if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
-					conv = conv1
-				}
-			}
-			if Thearch.LinkArch.Name == "arm64" {
-				if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
-					conv = conv1
-				}
-			}
-
-			if Thearch.LinkArch.Family == sys.MIPS {
-				if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() {
-					// tt is float32 or float64, and ft is also unsigned
-					if tt.Size() == 4 {
-						return s.uint32Tofloat32(n, x, ft, tt)
-					}
-					if tt.Size() == 8 {
-						return s.uint32Tofloat64(n, x, ft, tt)
-					}
-				} else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() {
-					// ft is float32 or float64, and tt is unsigned integer
-					if ft.Size() == 4 {
-						return s.float32ToUint32(n, x, ft, tt)
-					}
-					if ft.Size() == 8 {
-						return s.float64ToUint32(n, x, ft, tt)
-					}
-				}
-			}
-
-			if !ok {
-				s.Fatalf("weird float conversion %v -> %v", ft, tt)
-			}
-			op1, op2, it := conv.op1, conv.op2, conv.intermediateType
-
-			if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid {
-				// normal case, not tripping over unsigned 64
-				if op1 == ssa.OpCopy {
-					if op2 == ssa.OpCopy {
-						return x
-					}
-					return s.newValue1(op2, n.Type, x)
-				}
-				if op2 == ssa.OpCopy {
-					return s.newValue1(op1, n.Type, x)
-				}
-				return s.newValue1(op2, n.Type, s.newValue1(op1, Types[it], x))
-			}
-			// Tricky 64-bit unsigned cases.
-			if ft.IsInteger() {
-				// tt is float32 or float64, and ft is also unsigned
-				if tt.Size() == 4 {
-					return s.uint64Tofloat32(n, x, ft, tt)
-				}
-				if tt.Size() == 8 {
-					return s.uint64Tofloat64(n, x, ft, tt)
-				}
-				s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt)
-			}
-			// ft is float32 or float64, and tt is unsigned integer
-			if ft.Size() == 4 {
-				return s.float32ToUint64(n, x, ft, tt)
-			}
-			if ft.Size() == 8 {
-				return s.float64ToUint64(n, x, ft, tt)
-			}
-			s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt)
-			return nil
-		}
-
-		if ft.IsComplex() && tt.IsComplex() {
-			var op ssa.Op
-			if ft.Size() == tt.Size() {
-				op = ssa.OpCopy
-			} else if ft.Size() == 8 && tt.Size() == 16 {
-				op = ssa.OpCvt32Fto64F
-			} else if ft.Size() == 16 && tt.Size() == 8 {
-				op = ssa.OpCvt64Fto32F
-			} else {
-				s.Fatalf("weird complex conversion %v -> %v", ft, tt)
-			}
-			ftp := floatForComplex(ft)
-			ttp := floatForComplex(tt)
-			return s.newValue2(ssa.OpComplexMake, tt,
-				s.newValue1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)),
-				s.newValue1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x)))
-		}
-
-		s.Fatalf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype)
-		return nil
-
-	case ODOTTYPE:
-		res, _ := s.dottype(n, false)
-		return res
-
-	// binary ops
-	case OLT, OEQ, ONE, OLE, OGE, OGT:
-		a := s.expr(n.Left)
-		b := s.expr(n.Right)
-		if n.Left.Type.IsComplex() {
-			pt := floatForComplex(n.Left.Type)
-			op := s.ssaOp(OEQ, pt)
-			r := s.newValue2(op, Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
-			i := s.newValue2(op, Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
-			c := s.newValue2(ssa.OpAndB, Types[TBOOL], r, i)
-			switch n.Op {
-			case OEQ:
-				return c
-			case ONE:
-				return s.newValue1(ssa.OpNot, Types[TBOOL], c)
-			default:
-				s.Fatalf("ordered complex compare %v", n.Op)
-			}
-		}
-		return s.newValue2(s.ssaOp(n.Op, n.Left.Type), Types[TBOOL], a, b)
-	case OMUL:
-		a := s.expr(n.Left)
-		b := s.expr(n.Right)
-		if n.Type.IsComplex() {
-			mulop := ssa.OpMul64F
-			addop := ssa.OpAdd64F
-			subop := ssa.OpSub64F
-			pt := floatForComplex(n.Type) // Could be Float32 or Float64
-			wt := Types[TFLOAT64]         // Compute in Float64 to minimize cancelation error
-
-			areal := s.newValue1(ssa.OpComplexReal, pt, a)
-			breal := s.newValue1(ssa.OpComplexReal, pt, b)
-			aimag := s.newValue1(ssa.OpComplexImag, pt, a)
-			bimag := s.newValue1(ssa.OpComplexImag, pt, b)
-
-			if pt != wt { // Widen for calculation
-				areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal)
-				breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal)
-				aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag)
-				bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag)
-			}
-
-			xreal := s.newValue2(subop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag))
-			ximag := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, bimag), s.newValue2(mulop, wt, aimag, breal))
-
-			if pt != wt { // Narrow to store back
-				xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal)
-				ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag)
-			}
-
-			return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
-		}
-		return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
-
-	case ODIV:
-		a := s.expr(n.Left)
-		b := s.expr(n.Right)
-		if n.Type.IsComplex() {
-			// TODO this is not executed because the front-end substitutes a runtime call.
-			// That probably ought to change; with modest optimization the widen/narrow
-			// conversions could all be elided in larger expression trees.
-			mulop := ssa.OpMul64F
-			addop := ssa.OpAdd64F
-			subop := ssa.OpSub64F
-			divop := ssa.OpDiv64F
-			pt := floatForComplex(n.Type) // Could be Float32 or Float64
-			wt := Types[TFLOAT64]         // Compute in Float64 to minimize cancelation error
-
-			areal := s.newValue1(ssa.OpComplexReal, pt, a)
-			breal := s.newValue1(ssa.OpComplexReal, pt, b)
-			aimag := s.newValue1(ssa.OpComplexImag, pt, a)
-			bimag := s.newValue1(ssa.OpComplexImag, pt, b)
-
-			if pt != wt { // Widen for calculation
-				areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal)
-				breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal)
-				aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag)
-				bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag)
-			}
-
-			denom := s.newValue2(addop, wt, s.newValue2(mulop, wt, breal, breal), s.newValue2(mulop, wt, bimag, bimag))
-			xreal := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag))
-			ximag := s.newValue2(subop, wt, s.newValue2(mulop, wt, aimag, breal), s.newValue2(mulop, wt, areal, bimag))
-
-			// TODO not sure if this is best done in wide precision or narrow
-			// Double-rounding might be an issue.
-			// Note that the pre-SSA implementation does the entire calculation
-			// in wide format, so wide is compatible.
-			xreal = s.newValue2(divop, wt, xreal, denom)
-			ximag = s.newValue2(divop, wt, ximag, denom)
-
-			if pt != wt { // Narrow to store back
-				xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal)
-				ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag)
-			}
-			return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
-		}
-		if n.Type.IsFloat() {
-			return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
-		}
-		return s.intDivide(n, a, b)
-	case OMOD:
-		a := s.expr(n.Left)
-		b := s.expr(n.Right)
-		return s.intDivide(n, a, b)
-	case OADD, OSUB:
-		a := s.expr(n.Left)
-		b := s.expr(n.Right)
-		if n.Type.IsComplex() {
-			pt := floatForComplex(n.Type)
-			op := s.ssaOp(n.Op, pt)
-			return s.newValue2(ssa.OpComplexMake, n.Type,
-				s.newValue2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
-				s.newValue2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)))
-		}
-		return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
-	case OAND, OOR, OHMUL, OXOR:
-		a := s.expr(n.Left)
-		b := s.expr(n.Right)
-		return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
-	case OLSH, ORSH:
-		a := s.expr(n.Left)
-		b := s.expr(n.Right)
-		return s.newValue2(s.ssaShiftOp(n.Op, n.Type, n.Right.Type), a.Type, a, b)
-	case OLROT:
-		a := s.expr(n.Left)
-		i := n.Right.Int64()
-		if i <= 0 || i >= n.Type.Size()*8 {
-			s.Fatalf("Wrong rotate distance for LROT, expected 1 through %d, saw %d", n.Type.Size()*8-1, i)
-		}
-		return s.newValue1I(s.ssaRotateOp(n.Op, n.Type), a.Type, i, a)
-	case OANDAND, OOROR:
-		// To implement OANDAND (and OOROR), we introduce a
-		// new temporary variable to hold the result. The
-		// variable is associated with the OANDAND node in the
-		// s.vars table (normally variables are only
-		// associated with ONAME nodes). We convert
-		//     A && B
-		// to
-		//     var = A
-		//     if var {
-		//         var = B
-		//     }
-		// Using var in the subsequent block introduces the
-		// necessary phi variable.
-		el := s.expr(n.Left)
-		s.vars[n] = el
-
-		b := s.endBlock()
-		b.Kind = ssa.BlockIf
-		b.SetControl(el)
-		// In theory, we should set b.Likely here based on context.
-		// However, gc only gives us likeliness hints
-		// in a single place, for plain OIF statements,
-		// and passing around context is finnicky, so don't bother for now.
-
-		bRight := s.f.NewBlock(ssa.BlockPlain)
-		bResult := s.f.NewBlock(ssa.BlockPlain)
-		if n.Op == OANDAND {
-			b.AddEdgeTo(bRight)
-			b.AddEdgeTo(bResult)
-		} else if n.Op == OOROR {
-			b.AddEdgeTo(bResult)
-			b.AddEdgeTo(bRight)
-		}
-
-		s.startBlock(bRight)
-		er := s.expr(n.Right)
-		s.vars[n] = er
-
-		b = s.endBlock()
-		b.AddEdgeTo(bResult)
-
-		s.startBlock(bResult)
-		return s.variable(n, Types[TBOOL])
-	case OCOMPLEX:
-		r := s.expr(n.Left)
-		i := s.expr(n.Right)
-		return s.newValue2(ssa.OpComplexMake, n.Type, r, i)
-
-	// unary ops
-	case OMINUS:
-		a := s.expr(n.Left)
-		if n.Type.IsComplex() {
-			tp := floatForComplex(n.Type)
-			negop := s.ssaOp(n.Op, tp)
-			return s.newValue2(ssa.OpComplexMake, n.Type,
-				s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)),
-				s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a)))
-		}
-		return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
-	case ONOT, OCOM, OSQRT:
-		a := s.expr(n.Left)
-		return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
-	case OIMAG, OREAL:
-		a := s.expr(n.Left)
-		return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a)
-	case OPLUS:
-		return s.expr(n.Left)
-
-	case OADDR:
-		a, _ := s.addr(n.Left, n.Bounded)
-		// Note we know the volatile result is false because you can't write &f() in Go.
-		return a
-
-	case OINDREGSP:
-		addr := s.entryNewValue1I(ssa.OpOffPtr, ptrto(n.Type), n.Xoffset, s.sp)
-		return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
-
-	case OIND:
-		p := s.exprPtr(n.Left, false, n.Lineno)
-		return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
-
-	case ODOT:
-		t := n.Left.Type
-		if canSSAType(t) {
-			v := s.expr(n.Left)
-			return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v)
-		}
-		p, _ := s.addr(n, false)
-		return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
-
-	case ODOTPTR:
-		p := s.exprPtr(n.Left, false, n.Lineno)
-		p = s.newValue1I(ssa.OpOffPtr, p.Type, n.Xoffset, p)
-		return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
-
-	case OINDEX:
-		switch {
-		case n.Left.Type.IsString():
-			if n.Bounded && Isconst(n.Left, CTSTR) && Isconst(n.Right, CTINT) {
-				// Replace "abc"[1] with 'b'.
-				// Delayed until now because "abc"[1] is not an ideal constant.
-				// See test/fixedbugs/issue11370.go.
-				return s.newValue0I(ssa.OpConst8, Types[TUINT8], int64(int8(n.Left.Val().U.(string)[n.Right.Int64()])))
-			}
-			a := s.expr(n.Left)
-			i := s.expr(n.Right)
-			i = s.extendIndex(i, panicindex)
-			if !n.Bounded {
-				len := s.newValue1(ssa.OpStringLen, Types[TINT], a)
-				s.boundsCheck(i, len)
-			}
-			ptrtyp := ptrto(Types[TUINT8])
-			ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
-			if Isconst(n.Right, CTINT) {
-				ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64(), ptr)
-			} else {
-				ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
-			}
-			return s.newValue2(ssa.OpLoad, Types[TUINT8], ptr, s.mem())
-		case n.Left.Type.IsSlice():
-			p, _ := s.addr(n, false)
-			return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem())
-		case n.Left.Type.IsArray():
-			if bound := n.Left.Type.NumElem(); bound <= 1 {
-				// SSA can handle arrays of length at most 1.
-				a := s.expr(n.Left)
-				i := s.expr(n.Right)
-				if bound == 0 {
-					// Bounds check will never succeed.  Might as well
-					// use constants for the bounds check.
-					z := s.constInt(Types[TINT], 0)
-					s.boundsCheck(z, z)
-					// The return value won't be live, return junk.
-					return s.newValue0(ssa.OpUnknown, n.Type)
-				}
-				i = s.extendIndex(i, panicindex)
-				s.boundsCheck(i, s.constInt(Types[TINT], bound))
-				return s.newValue1I(ssa.OpArraySelect, n.Type, 0, a)
-			}
-			p, _ := s.addr(n, false)
-			return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem())
-		default:
-			s.Fatalf("bad type for index %v", n.Left.Type)
-			return nil
-		}
-
-	case OLEN, OCAP:
-		switch {
-		case n.Left.Type.IsSlice():
-			op := ssa.OpSliceLen
-			if n.Op == OCAP {
-				op = ssa.OpSliceCap
-			}
-			return s.newValue1(op, Types[TINT], s.expr(n.Left))
-		case n.Left.Type.IsString(): // string; not reachable for OCAP
-			return s.newValue1(ssa.OpStringLen, Types[TINT], s.expr(n.Left))
-		case n.Left.Type.IsMap(), n.Left.Type.IsChan():
-			return s.referenceTypeBuiltin(n, s.expr(n.Left))
-		default: // array
-			return s.constInt(Types[TINT], n.Left.Type.NumElem())
-		}
-
-	case OSPTR:
-		a := s.expr(n.Left)
-		if n.Left.Type.IsSlice() {
-			return s.newValue1(ssa.OpSlicePtr, n.Type, a)
-		} else {
-			return s.newValue1(ssa.OpStringPtr, n.Type, a)
-		}
-
-	case OITAB:
-		a := s.expr(n.Left)
-		return s.newValue1(ssa.OpITab, n.Type, a)
-
-	case OIDATA:
-		a := s.expr(n.Left)
-		return s.newValue1(ssa.OpIData, n.Type, a)
-
-	case OEFACE:
-		tab := s.expr(n.Left)
-		data := s.expr(n.Right)
-		return s.newValue2(ssa.OpIMake, n.Type, tab, data)
-
-	case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR:
-		v := s.expr(n.Left)
-		var i, j, k *ssa.Value
-		low, high, max := n.SliceBounds()
-		if low != nil {
-			i = s.extendIndex(s.expr(low), panicslice)
-		}
-		if high != nil {
-			j = s.extendIndex(s.expr(high), panicslice)
-		}
-		if max != nil {
-			k = s.extendIndex(s.expr(max), panicslice)
-		}
-		p, l, c := s.slice(n.Left.Type, v, i, j, k)
-		return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c)
-
-	case OSLICESTR:
-		v := s.expr(n.Left)
-		var i, j *ssa.Value
-		low, high, _ := n.SliceBounds()
-		if low != nil {
-			i = s.extendIndex(s.expr(low), panicslice)
-		}
-		if high != nil {
-			j = s.extendIndex(s.expr(high), panicslice)
-		}
-		p, l, _ := s.slice(n.Left.Type, v, i, j, nil)
-		return s.newValue2(ssa.OpStringMake, n.Type, p, l)
-
-	case OCALLFUNC:
-		if isIntrinsicCall(n) {
-			return s.intrinsicCall(n)
-		}
-		fallthrough
-
-	case OCALLINTER, OCALLMETH:
-		a := s.call(n, callNormal)
-		return s.newValue2(ssa.OpLoad, n.Type, a, s.mem())
-
-	case OGETG:
-		return s.newValue1(ssa.OpGetG, n.Type, s.mem())
-
-	case OAPPEND:
-		return s.append(n, false)
-
-	default:
-		s.Fatalf("unhandled expr %v", n.Op)
-		return nil
-	}
-}
-
-// append converts an OAPPEND node to SSA.
-// If inplace is false, it converts the OAPPEND expression n to an ssa.Value,
-// adds it to s, and returns the Value.
-// If inplace is true, it writes the result of the OAPPEND expression n
-// back to the slice being appended to, and returns nil.
-// inplace MUST be set to false if the slice can be SSA'd.
-func (s *state) append(n *Node, inplace bool) *ssa.Value {
-	// If inplace is false, process as expression "append(s, e1, e2, e3)":
-	//
-	// ptr, len, cap := s
-	// newlen := len + 3
-	// if newlen > cap {
-	//     ptr, len, cap = growslice(s, newlen)
-	//     newlen = len + 3 // recalculate to avoid a spill
-	// }
-	// // with write barriers, if needed:
-	// *(ptr+len) = e1
-	// *(ptr+len+1) = e2
-	// *(ptr+len+2) = e3
-	// return makeslice(ptr, newlen, cap)
-	//
-	//
-	// If inplace is true, process as statement "s = append(s, e1, e2, e3)":
-	//
-	// a := &s
-	// ptr, len, cap := s
-	// newlen := len + 3
-	// if newlen > cap {
-	//    newptr, len, newcap = growslice(ptr, len, cap, newlen)
-	//    vardef(a)       // if necessary, advise liveness we are writing a new a
-	//    *a.cap = newcap // write before ptr to avoid a spill
-	//    *a.ptr = newptr // with write barrier
-	// }
-	// newlen = len + 3 // recalculate to avoid a spill
-	// *a.len = newlen
-	// // with write barriers, if needed:
-	// *(ptr+len) = e1
-	// *(ptr+len+1) = e2
-	// *(ptr+len+2) = e3
-
-	et := n.Type.Elem()
-	pt := ptrto(et)
-
-	// Evaluate slice
-	sn := n.List.First() // the slice node is the first in the list
-
-	var slice, addr *ssa.Value
-	if inplace {
-		addr, _ = s.addr(sn, false)
-		slice = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
-	} else {
-		slice = s.expr(sn)
-	}
-
-	// Allocate new blocks
-	grow := s.f.NewBlock(ssa.BlockPlain)
-	assign := s.f.NewBlock(ssa.BlockPlain)
-
-	// Decide if we need to grow
-	nargs := int64(n.List.Len() - 1)
-	p := s.newValue1(ssa.OpSlicePtr, pt, slice)
-	l := s.newValue1(ssa.OpSliceLen, Types[TINT], slice)
-	c := s.newValue1(ssa.OpSliceCap, Types[TINT], slice)
-	nl := s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs))
-
-	cmp := s.newValue2(s.ssaOp(OGT, Types[TINT]), Types[TBOOL], nl, c)
-	s.vars[&ptrVar] = p
-
-	if !inplace {
-		s.vars[&newlenVar] = nl
-		s.vars[&capVar] = c
-	} else {
-		s.vars[&lenVar] = l
-	}
-
-	b := s.endBlock()
-	b.Kind = ssa.BlockIf
-	b.Likely = ssa.BranchUnlikely
-	b.SetControl(cmp)
-	b.AddEdgeTo(grow)
-	b.AddEdgeTo(assign)
-
-	// Call growslice
-	s.startBlock(grow)
-	taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: typenamesym(n.Type.Elem())}, s.sb)
-
-	r := s.rtcall(growslice, true, []*Type{pt, Types[TINT], Types[TINT]}, taddr, p, l, c, nl)
-
-	if inplace {
-		if sn.Op == ONAME {
-			// Tell liveness we're about to build a new slice
-			s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, sn, s.mem())
-		}
-		capaddr := s.newValue1I(ssa.OpOffPtr, pt, int64(array_cap), addr)
-		s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, capaddr, r[2], s.mem())
-		if ssa.IsStackAddr(addr) {
-			s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, pt.Size(), addr, r[0], s.mem())
-		} else {
-			s.insertWBstore(pt, addr, r[0], n.Lineno, 0)
-		}
-		// load the value we just stored to avoid having to spill it
-		s.vars[&ptrVar] = s.newValue2(ssa.OpLoad, pt, addr, s.mem())
-		s.vars[&lenVar] = r[1] // avoid a spill in the fast path
-	} else {
-		s.vars[&ptrVar] = r[0]
-		s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], r[1], s.constInt(Types[TINT], nargs))
-		s.vars[&capVar] = r[2]
-	}
-
-	b = s.endBlock()
-	b.AddEdgeTo(assign)
-
-	// assign new elements to slots
-	s.startBlock(assign)
-
-	if inplace {
-		l = s.variable(&lenVar, Types[TINT]) // generates phi for len
-		nl = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs))
-		lenaddr := s.newValue1I(ssa.OpOffPtr, pt, int64(array_nel), addr)
-		s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenaddr, nl, s.mem())
-	}
-
-	// Evaluate args
-	type argRec struct {
-		// if store is true, we're appending the value v.  If false, we're appending the
-		// value at *v.  If store==false, isVolatile reports whether the source
-		// is in the outargs section of the stack frame.
-		v          *ssa.Value
-		store      bool
-		isVolatile bool
-	}
-	args := make([]argRec, 0, nargs)
-	for _, n := range n.List.Slice()[1:] {
-		if canSSAType(n.Type) {
-			args = append(args, argRec{v: s.expr(n), store: true})
-		} else {
-			v, isVolatile := s.addr(n, false)
-			args = append(args, argRec{v: v, isVolatile: isVolatile})
-		}
-	}
-
-	p = s.variable(&ptrVar, pt) // generates phi for ptr
-	if !inplace {
-		nl = s.variable(&newlenVar, Types[TINT]) // generates phi for nl
-		c = s.variable(&capVar, Types[TINT])     // generates phi for cap
-	}
-	p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l)
-	// TODO: just one write barrier call for all of these writes?
-	// TODO: maybe just one writeBarrier.enabled check?
-	for i, arg := range args {
-		addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(Types[TINT], int64(i)))
-		if arg.store {
-			if haspointers(et) {
-				s.insertWBstore(et, addr, arg.v, n.Lineno, 0)
-			} else {
-				s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, et.Size(), addr, arg.v, s.mem())
-			}
-		} else {
-			if haspointers(et) {
-				s.insertWBmove(et, addr, arg.v, n.Lineno, arg.isVolatile)
-			} else {
-				s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, sizeAlignAuxInt(et), addr, arg.v, s.mem())
-			}
-		}
-	}
-
-	delete(s.vars, &ptrVar)
-	if inplace {
-		delete(s.vars, &lenVar)
-		return nil
-	}
-	delete(s.vars, &newlenVar)
-	delete(s.vars, &capVar)
-	// make result
-	return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c)
-}
-
-// condBranch evaluates the boolean expression cond and branches to yes
-// if cond is true and no if cond is false.
-// This function is intended to handle && and || better than just calling
-// s.expr(cond) and branching on the result.
-func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) {
-	if cond.Op == OANDAND {
-		mid := s.f.NewBlock(ssa.BlockPlain)
-		s.stmtList(cond.Ninit)
-		s.condBranch(cond.Left, mid, no, max8(likely, 0))
-		s.startBlock(mid)
-		s.condBranch(cond.Right, yes, no, likely)
-		return
-		// Note: if likely==1, then both recursive calls pass 1.
-		// If likely==-1, then we don't have enough information to decide
-		// whether the first branch is likely or not. So we pass 0 for
-		// the likeliness of the first branch.
-		// TODO: have the frontend give us branch prediction hints for
-		// OANDAND and OOROR nodes (if it ever has such info).
-	}
-	if cond.Op == OOROR {
-		mid := s.f.NewBlock(ssa.BlockPlain)
-		s.stmtList(cond.Ninit)
-		s.condBranch(cond.Left, yes, mid, min8(likely, 0))
-		s.startBlock(mid)
-		s.condBranch(cond.Right, yes, no, likely)
-		return
-		// Note: if likely==-1, then both recursive calls pass -1.
-		// If likely==1, then we don't have enough info to decide
-		// the likelihood of the first branch.
-	}
-	if cond.Op == ONOT {
-		s.stmtList(cond.Ninit)
-		s.condBranch(cond.Left, no, yes, -likely)
-		return
-	}
-	c := s.expr(cond)
-	b := s.endBlock()
-	b.Kind = ssa.BlockIf
-	b.SetControl(c)
-	b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness
-	b.AddEdgeTo(yes)
-	b.AddEdgeTo(no)
-}
-
-type skipMask uint8
-
-const (
-	skipPtr skipMask = 1 << iota
-	skipLen
-	skipCap
-)
-
-// assign does left = right.
-// Right has already been evaluated to ssa, left has not.
-// If deref is true, then we do left = *right instead (and right has already been nil-checked).
-// If deref is true and right == nil, just do left = 0.
-// If deref is true, rightIsVolatile reports whether right points to volatile (clobbered by a call) storage.
-// Include a write barrier if wb is true.
-// skip indicates assignments (at the top level) that can be avoided.
-func (s *state) assign(left *Node, right *ssa.Value, wb, deref bool, line int32, skip skipMask, rightIsVolatile bool) {
-	if left.Op == ONAME && isblank(left) {
-		return
-	}
-	t := left.Type
-	dowidth(t)
-	if s.canSSA(left) {
-		if deref {
-			s.Fatalf("can SSA LHS %v but not RHS %s", left, right)
-		}
-		if left.Op == ODOT {
-			// We're assigning to a field of an ssa-able value.
-			// We need to build a new structure with the new value for the
-			// field we're assigning and the old values for the other fields.
-			// For instance:
-			//   type T struct {a, b, c int}
-			//   var T x
-			//   x.b = 5
-			// For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c}
-
-			// Grab information about the structure type.
-			t := left.Left.Type
-			nf := t.NumFields()
-			idx := fieldIdx(left)
-
-			// Grab old value of structure.
-			old := s.expr(left.Left)
-
-			// Make new structure.
-			new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t)
-
-			// Add fields as args.
-			for i := 0; i < nf; i++ {
-				if i == idx {
-					new.AddArg(right)
-				} else {
-					new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old))
-				}
-			}
-
-			// Recursively assign the new value we've made to the base of the dot op.
-			s.assign(left.Left, new, false, false, line, 0, rightIsVolatile)
-			// TODO: do we need to update named values here?
-			return
-		}
-		if left.Op == OINDEX && left.Left.Type.IsArray() {
-			// We're assigning to an element of an ssa-able array.
-			// a[i] = v
-			t := left.Left.Type
-			n := t.NumElem()
-
-			i := s.expr(left.Right) // index
-			if n == 0 {
-				// The bounds check must fail.  Might as well
-				// ignore the actual index and just use zeros.
-				z := s.constInt(Types[TINT], 0)
-				s.boundsCheck(z, z)
-				return
-			}
-			if n != 1 {
-				s.Fatalf("assigning to non-1-length array")
-			}
-			// Rewrite to a = [1]{v}
-			i = s.extendIndex(i, panicindex)
-			s.boundsCheck(i, s.constInt(Types[TINT], 1))
-			v := s.newValue1(ssa.OpArrayMake1, t, right)
-			s.assign(left.Left, v, false, false, line, 0, rightIsVolatile)
-			return
-		}
-		// Update variable assignment.
-		s.vars[left] = right
-		s.addNamedValue(left, right)
-		return
-	}
-	// Left is not ssa-able. Compute its address.
-	addr, _ := s.addr(left, false)
-	if left.Op == ONAME && skip == 0 {
-		s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, left, s.mem())
-	}
-	if deref {
-		// Treat as a mem->mem move.
-		if wb && !ssa.IsStackAddr(addr) {
-			s.insertWBmove(t, addr, right, line, rightIsVolatile)
-			return
-		}
-		if right == nil {
-			s.vars[&memVar] = s.newValue2I(ssa.OpZero, ssa.TypeMem, sizeAlignAuxInt(t), addr, s.mem())
-			return
-		}
-		s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, sizeAlignAuxInt(t), addr, right, s.mem())
-		return
-	}
-	// Treat as a store.
-	if wb && !ssa.IsStackAddr(addr) {
-		if skip&skipPtr != 0 {
-			// Special case: if we don't write back the pointers, don't bother
-			// doing the write barrier check.
-			s.storeTypeScalars(t, addr, right, skip)
-			return
-		}
-		s.insertWBstore(t, addr, right, line, skip)
-		return
-	}
-	if skip != 0 {
-		if skip&skipPtr == 0 {
-			s.storeTypePtrs(t, addr, right)
-		}
-		s.storeTypeScalars(t, addr, right, skip)
-		return
-	}
-	s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, t.Size(), addr, right, s.mem())
-}
-
-// zeroVal returns the zero value for type t.
-func (s *state) zeroVal(t *Type) *ssa.Value {
-	switch {
-	case t.IsInteger():
-		switch t.Size() {
-		case 1:
-			return s.constInt8(t, 0)
-		case 2:
-			return s.constInt16(t, 0)
-		case 4:
-			return s.constInt32(t, 0)
-		case 8:
-			return s.constInt64(t, 0)
-		default:
-			s.Fatalf("bad sized integer type %v", t)
-		}
-	case t.IsFloat():
-		switch t.Size() {
-		case 4:
-			return s.constFloat32(t, 0)
-		case 8:
-			return s.constFloat64(t, 0)
-		default:
-			s.Fatalf("bad sized float type %v", t)
-		}
-	case t.IsComplex():
-		switch t.Size() {
-		case 8:
-			z := s.constFloat32(Types[TFLOAT32], 0)
-			return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
-		case 16:
-			z := s.constFloat64(Types[TFLOAT64], 0)
-			return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
-		default:
-			s.Fatalf("bad sized complex type %v", t)
-		}
-
-	case t.IsString():
-		return s.constEmptyString(t)
-	case t.IsPtrShaped():
-		return s.constNil(t)
-	case t.IsBoolean():
-		return s.constBool(false)
-	case t.IsInterface():
-		return s.constInterface(t)
-	case t.IsSlice():
-		return s.constSlice(t)
-	case t.IsStruct():
-		n := t.NumFields()
-		v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t)
-		for i := 0; i < n; i++ {
-			v.AddArg(s.zeroVal(t.FieldType(i).(*Type)))
-		}
-		return v
-	case t.IsArray():
-		switch t.NumElem() {
-		case 0:
-			return s.entryNewValue0(ssa.OpArrayMake0, t)
-		case 1:
-			return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem()))
-		}
-	}
-	s.Fatalf("zero for type %v not implemented", t)
-	return nil
-}
-
-type callKind int8
-
-const (
-	callNormal callKind = iota
-	callDefer
-	callGo
-)
-
-// TODO: make this a field of a configuration object instead of a global.
-var intrinsics *intrinsicInfo
-
-type intrinsicInfo struct {
-	std      map[intrinsicKey]intrinsicBuilder
-	intSized map[sizedIntrinsicKey]intrinsicBuilder
-	ptrSized map[sizedIntrinsicKey]intrinsicBuilder
-}
-
-// An intrinsicBuilder converts a call node n into an ssa value that
-// implements that call as an intrinsic. args is a list of arguments to the func.
-type intrinsicBuilder func(s *state, n *Node, args []*ssa.Value) *ssa.Value
-
-type intrinsicKey struct {
-	pkg string
-	fn  string
-}
-
-type sizedIntrinsicKey struct {
-	pkg  string
-	fn   string
-	size int
-}
-
-// disableForInstrumenting returns nil when instrumenting, fn otherwise
-func disableForInstrumenting(fn intrinsicBuilder) intrinsicBuilder {
-	if instrumenting {
-		return nil
-	}
-	return fn
-}
-
-// enableOnArch returns fn on given archs, nil otherwise
-func enableOnArch(fn intrinsicBuilder, archs ...sys.ArchFamily) intrinsicBuilder {
-	if Thearch.LinkArch.InFamily(archs...) {
-		return fn
-	}
-	return nil
-}
-
-func intrinsicInit() {
-	i := &intrinsicInfo{}
-	intrinsics = i
-
-	// initial set of intrinsics.
-	i.std = map[intrinsicKey]intrinsicBuilder{
-		/******** runtime ********/
-		intrinsicKey{"runtime", "slicebytetostringtmp"}: disableForInstrumenting(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			// Compiler frontend optimizations emit OARRAYBYTESTRTMP nodes
-			// for the backend instead of slicebytetostringtmp calls
-			// when not instrumenting.
-			slice := args[0]
-			ptr := s.newValue1(ssa.OpSlicePtr, ptrto(Types[TUINT8]), slice)
-			len := s.newValue1(ssa.OpSliceLen, Types[TINT], slice)
-			return s.newValue2(ssa.OpStringMake, n.Type, ptr, len)
-		}),
-		intrinsicKey{"runtime", "KeepAlive"}: func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			data := s.newValue1(ssa.OpIData, ptrto(Types[TUINT8]), args[0])
-			s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, ssa.TypeMem, data, s.mem())
-			return nil
-		},
-
-		/******** runtime/internal/sys ********/
-		intrinsicKey{"runtime/internal/sys", "Ctz32"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpCtz32, Types[TUINT32], args[0])
-		}, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS),
-		intrinsicKey{"runtime/internal/sys", "Ctz64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpCtz64, Types[TUINT64], args[0])
-		}, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS),
-		intrinsicKey{"runtime/internal/sys", "Bswap32"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpBswap32, Types[TUINT32], args[0])
-		}, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X),
-		intrinsicKey{"runtime/internal/sys", "Bswap64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue1(ssa.OpBswap64, Types[TUINT64], args[0])
-		}, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X),
-
-		/******** runtime/internal/atomic ********/
-		intrinsicKey{"runtime/internal/atomic", "Load"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			v := s.newValue2(ssa.OpAtomicLoad32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), args[0], s.mem())
-			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
-			return s.newValue1(ssa.OpSelect0, Types[TUINT32], v)
-		}, sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS),
-		intrinsicKey{"runtime/internal/atomic", "Load64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			v := s.newValue2(ssa.OpAtomicLoad64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), args[0], s.mem())
-			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
-			return s.newValue1(ssa.OpSelect0, Types[TUINT64], v)
-		}, sys.AMD64, sys.ARM64, sys.S390X),
-		intrinsicKey{"runtime/internal/atomic", "Loadp"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			v := s.newValue2(ssa.OpAtomicLoadPtr, ssa.MakeTuple(ptrto(Types[TUINT8]), ssa.TypeMem), args[0], s.mem())
-			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
-			return s.newValue1(ssa.OpSelect0, ptrto(Types[TUINT8]), v)
-		}, sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS),
-
-		intrinsicKey{"runtime/internal/atomic", "Store"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, ssa.TypeMem, args[0], args[1], s.mem())
-			return nil
-		}, sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS),
-		intrinsicKey{"runtime/internal/atomic", "Store64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, ssa.TypeMem, args[0], args[1], s.mem())
-			return nil
-		}, sys.AMD64, sys.ARM64, sys.S390X),
-		intrinsicKey{"runtime/internal/atomic", "StorepNoWB"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, ssa.TypeMem, args[0], args[1], s.mem())
-			return nil
-		}, sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS),
-
-		intrinsicKey{"runtime/internal/atomic", "Xchg"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			v := s.newValue3(ssa.OpAtomicExchange32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), args[0], args[1], s.mem())
-			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
-			return s.newValue1(ssa.OpSelect0, Types[TUINT32], v)
-		}, sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS),
-		intrinsicKey{"runtime/internal/atomic", "Xchg64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			v := s.newValue3(ssa.OpAtomicExchange64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), args[0], args[1], s.mem())
-			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
-			return s.newValue1(ssa.OpSelect0, Types[TUINT64], v)
-		}, sys.AMD64, sys.ARM64, sys.S390X),
-
-		intrinsicKey{"runtime/internal/atomic", "Xadd"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			v := s.newValue3(ssa.OpAtomicAdd32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), args[0], args[1], s.mem())
-			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
-			return s.newValue1(ssa.OpSelect0, Types[TUINT32], v)
-		}, sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS),
-		intrinsicKey{"runtime/internal/atomic", "Xadd64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			v := s.newValue3(ssa.OpAtomicAdd64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), args[0], args[1], s.mem())
-			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
-			return s.newValue1(ssa.OpSelect0, Types[TUINT64], v)
-		}, sys.AMD64, sys.ARM64, sys.S390X),
-
-		intrinsicKey{"runtime/internal/atomic", "Cas"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			v := s.newValue4(ssa.OpAtomicCompareAndSwap32, ssa.MakeTuple(Types[TBOOL], ssa.TypeMem), args[0], args[1], args[2], s.mem())
-			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
-			return s.newValue1(ssa.OpSelect0, Types[TBOOL], v)
-		}, sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS),
-		intrinsicKey{"runtime/internal/atomic", "Cas64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			v := s.newValue4(ssa.OpAtomicCompareAndSwap64, ssa.MakeTuple(Types[TBOOL], ssa.TypeMem), args[0], args[1], args[2], s.mem())
-			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
-			return s.newValue1(ssa.OpSelect0, Types[TBOOL], v)
-		}, sys.AMD64, sys.ARM64, sys.S390X),
-
-		intrinsicKey{"runtime/internal/atomic", "And8"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, ssa.TypeMem, args[0], args[1], s.mem())
-			return nil
-		}, sys.AMD64, sys.ARM64, sys.MIPS),
-		intrinsicKey{"runtime/internal/atomic", "Or8"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, ssa.TypeMem, args[0], args[1], s.mem())
-			return nil
-		}, sys.AMD64, sys.ARM64, sys.MIPS),
-	}
-
-	// aliases internal to runtime/internal/atomic
-	i.std[intrinsicKey{"runtime/internal/atomic", "Loadint64"}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Load64"}]
-	i.std[intrinsicKey{"runtime/internal/atomic", "Xaddint64"}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Xadd64"}]
-
-	// intrinsics which vary depending on the size of int/ptr.
-	i.intSized = map[sizedIntrinsicKey]intrinsicBuilder{
-		sizedIntrinsicKey{"runtime/internal/atomic", "Loaduint", 4}: i.std[intrinsicKey{"runtime/internal/atomic", "Load"}],
-		sizedIntrinsicKey{"runtime/internal/atomic", "Loaduint", 8}: i.std[intrinsicKey{"runtime/internal/atomic", "Load64"}],
-	}
-	i.ptrSized = map[sizedIntrinsicKey]intrinsicBuilder{
-		sizedIntrinsicKey{"runtime/internal/atomic", "Loaduintptr", 4}:  i.std[intrinsicKey{"runtime/internal/atomic", "Load"}],
-		sizedIntrinsicKey{"runtime/internal/atomic", "Loaduintptr", 8}:  i.std[intrinsicKey{"runtime/internal/atomic", "Load64"}],
-		sizedIntrinsicKey{"runtime/internal/atomic", "Storeuintptr", 4}: i.std[intrinsicKey{"runtime/internal/atomic", "Store"}],
-		sizedIntrinsicKey{"runtime/internal/atomic", "Storeuintptr", 8}: i.std[intrinsicKey{"runtime/internal/atomic", "Store64"}],
-		sizedIntrinsicKey{"runtime/internal/atomic", "Xchguintptr", 4}:  i.std[intrinsicKey{"runtime/internal/atomic", "Xchg"}],
-		sizedIntrinsicKey{"runtime/internal/atomic", "Xchguintptr", 8}:  i.std[intrinsicKey{"runtime/internal/atomic", "Xchg64"}],
-		sizedIntrinsicKey{"runtime/internal/atomic", "Xadduintptr", 4}:  i.std[intrinsicKey{"runtime/internal/atomic", "Xadd"}],
-		sizedIntrinsicKey{"runtime/internal/atomic", "Xadduintptr", 8}:  i.std[intrinsicKey{"runtime/internal/atomic", "Xadd64"}],
-		sizedIntrinsicKey{"runtime/internal/atomic", "Casuintptr", 4}:   i.std[intrinsicKey{"runtime/internal/atomic", "Cas"}],
-		sizedIntrinsicKey{"runtime/internal/atomic", "Casuintptr", 8}:   i.std[intrinsicKey{"runtime/internal/atomic", "Cas64"}],
-		sizedIntrinsicKey{"runtime/internal/atomic", "Casp1", 4}:        i.std[intrinsicKey{"runtime/internal/atomic", "Cas"}],
-		sizedIntrinsicKey{"runtime/internal/atomic", "Casp1", 8}:        i.std[intrinsicKey{"runtime/internal/atomic", "Cas64"}],
-	}
-
-	/******** sync/atomic ********/
-	if flag_race {
-		// The race detector needs to be able to intercept these calls.
-		// We can't intrinsify them.
-		return
-	}
-	// these are all aliases to runtime/internal/atomic implementations.
-	i.std[intrinsicKey{"sync/atomic", "LoadInt32"}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Load"}]
-	i.std[intrinsicKey{"sync/atomic", "LoadInt64"}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Load64"}]
-	i.std[intrinsicKey{"sync/atomic", "LoadPointer"}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Loadp"}]
-	i.std[intrinsicKey{"sync/atomic", "LoadUint32"}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Load"}]
-	i.std[intrinsicKey{"sync/atomic", "LoadUint64"}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Load64"}]
-	i.ptrSized[sizedIntrinsicKey{"sync/atomic", "LoadUintptr", 4}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Load"}]
-	i.ptrSized[sizedIntrinsicKey{"sync/atomic", "LoadUintptr", 8}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Load64"}]
-
-	i.std[intrinsicKey{"sync/atomic", "StoreInt32"}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Store"}]
-	i.std[intrinsicKey{"sync/atomic", "StoreInt64"}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Store64"}]
-	// Note: not StorePointer, that needs a write barrier.  Same below for {CompareAnd}Swap.
-	i.std[intrinsicKey{"sync/atomic", "StoreUint32"}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Store"}]
-	i.std[intrinsicKey{"sync/atomic", "StoreUint64"}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Store64"}]
-	i.ptrSized[sizedIntrinsicKey{"sync/atomic", "StoreUintptr", 4}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Store"}]
-	i.ptrSized[sizedIntrinsicKey{"sync/atomic", "StoreUintptr", 8}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Store64"}]
-
-	i.std[intrinsicKey{"sync/atomic", "SwapInt32"}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Xchg"}]
-	i.std[intrinsicKey{"sync/atomic", "SwapInt64"}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Xchg64"}]
-	i.std[intrinsicKey{"sync/atomic", "SwapUint32"}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Xchg"}]
-	i.std[intrinsicKey{"sync/atomic", "SwapUint64"}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Xchg64"}]
-	i.ptrSized[sizedIntrinsicKey{"sync/atomic", "SwapUintptr", 4}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Xchg"}]
-	i.ptrSized[sizedIntrinsicKey{"sync/atomic", "SwapUintptr", 8}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Xchg64"}]
-
-	i.std[intrinsicKey{"sync/atomic", "CompareAndSwapInt32"}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Cas"}]
-	i.std[intrinsicKey{"sync/atomic", "CompareAndSwapInt64"}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Cas64"}]
-	i.std[intrinsicKey{"sync/atomic", "CompareAndSwapUint32"}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Cas"}]
-	i.std[intrinsicKey{"sync/atomic", "CompareAndSwapUint64"}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Cas64"}]
-	i.ptrSized[sizedIntrinsicKey{"sync/atomic", "CompareAndSwapUintptr", 4}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Cas"}]
-	i.ptrSized[sizedIntrinsicKey{"sync/atomic", "CompareAndSwapUintptr", 8}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Cas64"}]
-
-	i.std[intrinsicKey{"sync/atomic", "AddInt32"}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Xadd"}]
-	i.std[intrinsicKey{"sync/atomic", "AddInt64"}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Xadd64"}]
-	i.std[intrinsicKey{"sync/atomic", "AddUint32"}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Xadd"}]
-	i.std[intrinsicKey{"sync/atomic", "AddUint64"}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Xadd64"}]
-	i.ptrSized[sizedIntrinsicKey{"sync/atomic", "AddUintptr", 4}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Xadd"}]
-	i.ptrSized[sizedIntrinsicKey{"sync/atomic", "AddUintptr", 8}] =
-		i.std[intrinsicKey{"runtime/internal/atomic", "Xadd64"}]
-
-	/******** math/big ********/
-	i.intSized[sizedIntrinsicKey{"math/big", "mulWW", 8}] =
-		enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue2(ssa.OpMul64uhilo, ssa.MakeTuple(Types[TUINT64], Types[TUINT64]), args[0], args[1])
-		}, sys.AMD64)
-	i.intSized[sizedIntrinsicKey{"math/big", "divWW", 8}] =
-		enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
-			return s.newValue3(ssa.OpDiv128u, ssa.MakeTuple(Types[TUINT64], Types[TUINT64]), args[0], args[1], args[2])
-		}, sys.AMD64)
-}
-
-// findIntrinsic returns a function which builds the SSA equivalent of the
-// function identified by the symbol sym.  If sym is not an intrinsic call, returns nil.
-func findIntrinsic(sym *Sym) intrinsicBuilder {
-	if ssa.IntrinsicsDisable {
-		return nil
-	}
-	if sym == nil || sym.Pkg == nil {
-		return nil
-	}
-	if intrinsics == nil {
-		intrinsicInit()
-	}
-	pkg := sym.Pkg.Path
-	if sym.Pkg == localpkg {
-		pkg = myimportpath
-	}
-	fn := sym.Name
-	f := intrinsics.std[intrinsicKey{pkg, fn}]
-	if f != nil {
-		return f
-	}
-	f = intrinsics.intSized[sizedIntrinsicKey{pkg, fn, Widthint}]
-	if f != nil {
-		return f
-	}
-	return intrinsics.ptrSized[sizedIntrinsicKey{pkg, fn, Widthptr}]
-}
-
-func isIntrinsicCall(n *Node) bool {
-	if n == nil || n.Left == nil {
-		return false
-	}
-	return findIntrinsic(n.Left.Sym) != nil
-}
-
-// intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
-func (s *state) intrinsicCall(n *Node) *ssa.Value {
-	v := findIntrinsic(n.Left.Sym)(s, n, s.intrinsicArgs(n))
-	if ssa.IntrinsicsDebug > 0 {
-		x := v
-		if x == nil {
-			x = s.mem()
-		}
-		if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 {
-			x = x.Args[0]
-		}
-		Warnl(n.Lineno, "intrinsic substitution for %v with %s", n.Left.Sym.Name, x.LongString())
-	}
-	return v
-}
-
-type callArg struct {
-	offset int64
-	v      *ssa.Value
-}
-type byOffset []callArg
-
-func (x byOffset) Len() int      { return len(x) }
-func (x byOffset) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-func (x byOffset) Less(i, j int) bool {
-	return x[i].offset < x[j].offset
-}
-
-// intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them.
-func (s *state) intrinsicArgs(n *Node) []*ssa.Value {
-	// This code is complicated because of how walk transforms calls. For a call node,
-	// each entry in n.List is either an assignment to OINDREGSP which actually
-	// stores an arg, or an assignment to a temporary which computes an arg
-	// which is later assigned.
-	// The args can also be out of order.
-	// TODO: when walk goes away someday, this code can go away also.
-	var args []callArg
-	temps := map[*Node]*ssa.Value{}
-	for _, a := range n.List.Slice() {
-		if a.Op != OAS {
-			s.Fatalf("non-assignment as a function argument %s", opnames[a.Op])
-		}
-		l, r := a.Left, a.Right
-		switch l.Op {
-		case ONAME:
-			// Evaluate and store to "temporary".
-			// Walk ensures these temporaries are dead outside of n.
-			temps[l] = s.expr(r)
-		case OINDREGSP:
-			// Store a value to an argument slot.
-			var v *ssa.Value
-			if x, ok := temps[r]; ok {
-				// This is a previously computed temporary.
-				v = x
-			} else {
-				// This is an explicit value; evaluate it.
-				v = s.expr(r)
-			}
-			args = append(args, callArg{l.Xoffset, v})
-		default:
-			s.Fatalf("function argument assignment target not allowed: %s", opnames[l.Op])
-		}
-	}
-	sort.Sort(byOffset(args))
-	res := make([]*ssa.Value, len(args))
-	for i, a := range args {
-		res[i] = a.v
-	}
-	return res
-}
-
-// Calls the function n using the specified call type.
-// Returns the address of the return value (or nil if none).
-func (s *state) call(n *Node, k callKind) *ssa.Value {
-	var sym *Sym           // target symbol (if static)
-	var closure *ssa.Value // ptr to closure to run (if dynamic)
-	var codeptr *ssa.Value // ptr to target code (if dynamic)
-	var rcvr *ssa.Value    // receiver to set
-	fn := n.Left
-	switch n.Op {
-	case OCALLFUNC:
-		if k == callNormal && fn.Op == ONAME && fn.Class == PFUNC {
-			sym = fn.Sym
-			break
-		}
-		closure = s.expr(fn)
-	case OCALLMETH:
-		if fn.Op != ODOTMETH {
-			Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn)
-		}
-		if k == callNormal {
-			sym = fn.Sym
-			break
-		}
-		// Make a name n2 for the function.
-		// fn.Sym might be sync.(*Mutex).Unlock.
-		// Make a PFUNC node out of that, then evaluate it.
-		// We get back an SSA value representing &sync.(*Mutex).Unlock·f.
-		// We can then pass that to defer or go.
-		n2 := newname(fn.Sym)
-		n2.Class = PFUNC
-		n2.Lineno = fn.Lineno
-		n2.Type = Types[TUINT8] // dummy type for a static closure. Could use runtime.funcval if we had it.
-		closure = s.expr(n2)
-		// Note: receiver is already assigned in n.List, so we don't
-		// want to set it here.
-	case OCALLINTER:
-		if fn.Op != ODOTINTER {
-			Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op)
-		}
-		i := s.expr(fn.Left)
-		itab := s.newValue1(ssa.OpITab, Types[TUINTPTR], i)
-		if k != callNormal {
-			s.nilCheck(itab)
-		}
-		itabidx := fn.Xoffset + 3*int64(Widthptr) + 8 // offset of fun field in runtime.itab
-		itab = s.newValue1I(ssa.OpOffPtr, ptrto(Types[TUINTPTR]), itabidx, itab)
-		if k == callNormal {
-			codeptr = s.newValue2(ssa.OpLoad, Types[TUINTPTR], itab, s.mem())
-		} else {
-			closure = itab
-		}
-		rcvr = s.newValue1(ssa.OpIData, Types[TUINTPTR], i)
-	}
-	dowidth(fn.Type)
-	stksize := fn.Type.ArgWidth() // includes receiver
-
-	// Run all argument assignments. The arg slots have already
-	// been offset by the appropriate amount (+2*widthptr for go/defer,
-	// +widthptr for interface calls).
-	// For OCALLMETH, the receiver is set in these statements.
-	s.stmtList(n.List)
-
-	// Set receiver (for interface calls)
-	if rcvr != nil {
-		argStart := Ctxt.FixedFrameSize()
-		if k != callNormal {
-			argStart += int64(2 * Widthptr)
-		}
-		addr := s.entryNewValue1I(ssa.OpOffPtr, ptrto(Types[TUINTPTR]), argStart, s.sp)
-		s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, int64(Widthptr), addr, rcvr, s.mem())
-	}
-
-	// Defer/go args
-	if k != callNormal {
-		// Write argsize and closure (args to Newproc/Deferproc).
-		argStart := Ctxt.FixedFrameSize()
-		argsize := s.constInt32(Types[TUINT32], int32(stksize))
-		addr := s.entryNewValue1I(ssa.OpOffPtr, ptrto(Types[TUINT32]), argStart, s.sp)
-		s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, 4, addr, argsize, s.mem())
-		addr = s.entryNewValue1I(ssa.OpOffPtr, ptrto(Types[TUINTPTR]), argStart+int64(Widthptr), s.sp)
-		s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, int64(Widthptr), addr, closure, s.mem())
-		stksize += 2 * int64(Widthptr)
-	}
-
-	// call target
-	var call *ssa.Value
-	switch {
-	case k == callDefer:
-		call = s.newValue1(ssa.OpDeferCall, ssa.TypeMem, s.mem())
-	case k == callGo:
-		call = s.newValue1(ssa.OpGoCall, ssa.TypeMem, s.mem())
-	case closure != nil:
-		codeptr = s.newValue2(ssa.OpLoad, Types[TUINTPTR], closure, s.mem())
-		call = s.newValue3(ssa.OpClosureCall, ssa.TypeMem, codeptr, closure, s.mem())
-	case codeptr != nil:
-		call = s.newValue2(ssa.OpInterCall, ssa.TypeMem, codeptr, s.mem())
-	case sym != nil:
-		call = s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, sym, s.mem())
-	default:
-		Fatalf("bad call type %v %v", n.Op, n)
-	}
-	call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
-	s.vars[&memVar] = call
-
-	// Finish block for defers
-	if k == callDefer {
-		b := s.endBlock()
-		b.Kind = ssa.BlockDefer
-		b.SetControl(call)
-		bNext := s.f.NewBlock(ssa.BlockPlain)
-		b.AddEdgeTo(bNext)
-		// Add recover edge to exit code.
-		r := s.f.NewBlock(ssa.BlockPlain)
-		s.startBlock(r)
-		s.exit()
-		b.AddEdgeTo(r)
-		b.Likely = ssa.BranchLikely
-		s.startBlock(bNext)
-	}
-
-	res := n.Left.Type.Results()
-	if res.NumFields() == 0 || k != callNormal {
-		// call has no return value. Continue with the next statement.
-		return nil
-	}
-	fp := res.Field(0)
-	return s.entryNewValue1I(ssa.OpOffPtr, ptrto(fp.Type), fp.Offset+Ctxt.FixedFrameSize(), s.sp)
-}
-
-// etypesign returns the signed-ness of e, for integer/pointer etypes.
-// -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
-func etypesign(e EType) int8 {
-	switch e {
-	case TINT8, TINT16, TINT32, TINT64, TINT:
-		return -1
-	case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR, TUNSAFEPTR:
-		return +1
-	}
-	return 0
-}
-
-// lookupSymbol is used to retrieve the symbol (Extern, Arg or Auto) used for a particular node.
-// This improves the effectiveness of cse by using the same Aux values for the
-// same symbols.
-func (s *state) lookupSymbol(n *Node, sym interface{}) interface{} {
-	switch sym.(type) {
-	default:
-		s.Fatalf("sym %v is of uknown type %T", sym, sym)
-	case *ssa.ExternSymbol, *ssa.ArgSymbol, *ssa.AutoSymbol:
-		// these are the only valid types
-	}
-
-	if lsym, ok := s.varsyms[n]; ok {
-		return lsym
-	} else {
-		s.varsyms[n] = sym
-		return sym
-	}
-}
-
-// addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
-// Also returns a bool reporting whether the returned value is "volatile", that is it
-// points to the outargs section and thus the referent will be clobbered by any call.
-// The value that the returned Value represents is guaranteed to be non-nil.
-// If bounded is true then this address does not require a nil check for its operand
-// even if that would otherwise be implied.
-func (s *state) addr(n *Node, bounded bool) (*ssa.Value, bool) {
-	t := ptrto(n.Type)
-	switch n.Op {
-	case ONAME:
-		switch n.Class {
-		case PEXTERN:
-			// global variable
-			aux := s.lookupSymbol(n, &ssa.ExternSymbol{Typ: n.Type, Sym: n.Sym})
-			v := s.entryNewValue1A(ssa.OpAddr, t, aux, s.sb)
-			// TODO: Make OpAddr use AuxInt as well as Aux.
-			if n.Xoffset != 0 {
-				v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v)
-			}
-			return v, false
-		case PPARAM:
-			// parameter slot
-			v := s.decladdrs[n]
-			if v != nil {
-				return v, false
-			}
-			if n == nodfp {
-				// Special arg that points to the frame pointer (Used by ORECOVER).
-				aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n})
-				return s.entryNewValue1A(ssa.OpAddr, t, aux, s.sp), false
-			}
-			s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
-			return nil, false
-		case PAUTO:
-			aux := s.lookupSymbol(n, &ssa.AutoSymbol{Typ: n.Type, Node: n})
-			return s.newValue1A(ssa.OpAddr, t, aux, s.sp), false
-		case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
-			// ensure that we reuse symbols for out parameters so
-			// that cse works on their addresses
-			aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n})
-			return s.newValue1A(ssa.OpAddr, t, aux, s.sp), false
-		default:
-			s.Fatalf("variable address class %v not implemented", classnames[n.Class])
-			return nil, false
-		}
-	case OINDREGSP:
-		// indirect off REGSP
-		// used for storing/loading arguments/returns to/from callees
-		return s.entryNewValue1I(ssa.OpOffPtr, t, n.Xoffset, s.sp), true
-	case OINDEX:
-		if n.Left.Type.IsSlice() {
-			a := s.expr(n.Left)
-			i := s.expr(n.Right)
-			i = s.extendIndex(i, panicindex)
-			len := s.newValue1(ssa.OpSliceLen, Types[TINT], a)
-			if !n.Bounded {
-				s.boundsCheck(i, len)
-			}
-			p := s.newValue1(ssa.OpSlicePtr, t, a)
-			return s.newValue2(ssa.OpPtrIndex, t, p, i), false
-		} else { // array
-			a, isVolatile := s.addr(n.Left, bounded)
-			i := s.expr(n.Right)
-			i = s.extendIndex(i, panicindex)
-			len := s.constInt(Types[TINT], n.Left.Type.NumElem())
-			if !n.Bounded {
-				s.boundsCheck(i, len)
-			}
-			return s.newValue2(ssa.OpPtrIndex, ptrto(n.Left.Type.Elem()), a, i), isVolatile
-		}
-	case OIND:
-		return s.exprPtr(n.Left, bounded, n.Lineno), false
-	case ODOT:
-		p, isVolatile := s.addr(n.Left, bounded)
-		return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p), isVolatile
-	case ODOTPTR:
-		p := s.exprPtr(n.Left, bounded, n.Lineno)
-		return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p), false
-	case OCLOSUREVAR:
-		return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset,
-			s.entryNewValue0(ssa.OpGetClosurePtr, ptrto(Types[TUINT8]))), false
-	case OCONVNOP:
-		addr, isVolatile := s.addr(n.Left, bounded)
-		return s.newValue1(ssa.OpCopy, t, addr), isVolatile // ensure that addr has the right type
-	case OCALLFUNC, OCALLINTER, OCALLMETH:
-		return s.call(n, callNormal), true
-	case ODOTTYPE:
-		v, _ := s.dottype(n, false)
-		if v.Op != ssa.OpLoad {
-			s.Fatalf("dottype of non-load")
-		}
-		if v.Args[1] != s.mem() {
-			s.Fatalf("memory no longer live from dottype load")
-		}
-		return v.Args[0], false
-	default:
-		s.Fatalf("unhandled addr %v", n.Op)
-		return nil, false
-	}
-}
-
-// canSSA reports whether n is SSA-able.
-// n must be an ONAME (or an ODOT sequence with an ONAME base).
-func (s *state) canSSA(n *Node) bool {
-	if Debug['N'] != 0 {
-		return false
-	}
-	for n.Op == ODOT || (n.Op == OINDEX && n.Left.Type.IsArray()) {
-		n = n.Left
-	}
-	if n.Op != ONAME {
-		return false
-	}
-	if n.Addrtaken {
-		return false
-	}
-	if n.isParamHeapCopy() {
-		return false
-	}
-	if n.Class == PAUTOHEAP {
-		Fatalf("canSSA of PAUTOHEAP %v", n)
-	}
-	switch n.Class {
-	case PEXTERN:
-		return false
-	case PPARAMOUT:
-		if hasdefer {
-			// TODO: handle this case?  Named return values must be
-			// in memory so that the deferred function can see them.
-			// Maybe do: if !strings.HasPrefix(n.String(), "~") { return false }
-			return false
-		}
-		if s.cgoUnsafeArgs {
-			// Cgo effectively takes the address of all result args,
-			// but the compiler can't see that.
-			return false
-		}
-	}
-	if n.Class == PPARAM && n.String() == ".this" {
-		// wrappers generated by genwrapper need to update
-		// the .this pointer in place.
-		// TODO: treat as a PPARMOUT?
-		return false
-	}
-	return canSSAType(n.Type)
-	// TODO: try to make more variables SSAable?
-}
-
-// canSSA reports whether variables of type t are SSA-able.
-func canSSAType(t *Type) bool {
-	dowidth(t)
-	if t.Width > int64(4*Widthptr) {
-		// 4*Widthptr is an arbitrary constant. We want it
-		// to be at least 3*Widthptr so slices can be registerized.
-		// Too big and we'll introduce too much register pressure.
-		return false
-	}
-	switch t.Etype {
-	case TARRAY:
-		// We can't do larger arrays because dynamic indexing is
-		// not supported on SSA variables.
-		// TODO: allow if all indexes are constant.
-		if t.NumElem() == 0 {
-			return true
-		}
-		if t.NumElem() == 1 {
-			return canSSAType(t.Elem())
-		}
-		return false
-	case TSTRUCT:
-		if t.NumFields() > ssa.MaxStruct {
-			return false
-		}
-		for _, t1 := range t.Fields().Slice() {
-			if !canSSAType(t1.Type) {
-				return false
-			}
-		}
-		return true
-	default:
-		return true
-	}
-}
-
-// exprPtr evaluates n to a pointer and nil-checks it.
-func (s *state) exprPtr(n *Node, bounded bool, lineno int32) *ssa.Value {
-	p := s.expr(n)
-	if bounded || n.NonNil {
-		if s.f.Config.Debug_checknil() && lineno > 1 {
-			s.f.Config.Warnl(lineno, "removed nil check")
-		}
-		return p
-	}
-	s.nilCheck(p)
-	return p
-}
-
-// nilCheck generates nil pointer checking code.
-// Used only for automatically inserted nil checks,
-// not for user code like 'x != nil'.
-func (s *state) nilCheck(ptr *ssa.Value) {
-	if disable_checknil != 0 {
-		return
-	}
-	s.newValue2(ssa.OpNilCheck, ssa.TypeVoid, ptr, s.mem())
-}
-
-// boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not.
-// Starts a new block on return.
-// idx is already converted to full int width.
-func (s *state) boundsCheck(idx, len *ssa.Value) {
-	if Debug['B'] != 0 {
-		return
-	}
-
-	// bounds check
-	cmp := s.newValue2(ssa.OpIsInBounds, Types[TBOOL], idx, len)
-	s.check(cmp, panicindex)
-}
-
-// sliceBoundsCheck generates slice bounds checking code. Checks if 0 <= idx <= len, branches to exit if not.
-// Starts a new block on return.
-// idx and len are already converted to full int width.
-func (s *state) sliceBoundsCheck(idx, len *ssa.Value) {
-	if Debug['B'] != 0 {
-		return
-	}
-
-	// bounds check
-	cmp := s.newValue2(ssa.OpIsSliceInBounds, Types[TBOOL], idx, len)
-	s.check(cmp, panicslice)
-}
-
-// If cmp (a bool) is false, panic using the given function.
-func (s *state) check(cmp *ssa.Value, fn *Node) {
-	b := s.endBlock()
-	b.Kind = ssa.BlockIf
-	b.SetControl(cmp)
-	b.Likely = ssa.BranchLikely
-	bNext := s.f.NewBlock(ssa.BlockPlain)
-	line := s.peekLine()
-	bPanic := s.panics[funcLine{fn, line}]
-	if bPanic == nil {
-		bPanic = s.f.NewBlock(ssa.BlockPlain)
-		s.panics[funcLine{fn, line}] = bPanic
-		s.startBlock(bPanic)
-		// The panic call takes/returns memory to ensure that the right
-		// memory state is observed if the panic happens.
-		s.rtcall(fn, false, nil)
-	}
-	b.AddEdgeTo(bNext)
-	b.AddEdgeTo(bPanic)
-	s.startBlock(bNext)
-}
-
-func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value {
-	needcheck := true
-	switch b.Op {
-	case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64:
-		if b.AuxInt != 0 {
-			needcheck = false
-		}
-	}
-	if needcheck {
-		// do a size-appropriate check for zero
-		cmp := s.newValue2(s.ssaOp(ONE, n.Type), Types[TBOOL], b, s.zeroVal(n.Type))
-		s.check(cmp, panicdivide)
-	}
-	return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
-}
-
-// rtcall issues a call to the given runtime function fn with the listed args.
-// Returns a slice of results of the given result types.
-// The call is added to the end of the current block.
-// If returns is false, the block is marked as an exit block.
-func (s *state) rtcall(fn *Node, returns bool, results []*Type, args ...*ssa.Value) []*ssa.Value {
-	// Write args to the stack
-	off := Ctxt.FixedFrameSize()
-	for _, arg := range args {
-		t := arg.Type
-		off = Rnd(off, t.Alignment())
-		ptr := s.sp
-		if off != 0 {
-			ptr = s.newValue1I(ssa.OpOffPtr, t.PtrTo(), off, s.sp)
-		}
-		size := t.Size()
-		s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, size, ptr, arg, s.mem())
-		off += size
-	}
-	off = Rnd(off, int64(Widthptr))
-	if Thearch.LinkArch.Name == "amd64p32" {
-		// amd64p32 wants 8-byte alignment of the start of the return values.
-		off = Rnd(off, 8)
-	}
-
-	// Issue call
-	call := s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, fn.Sym, s.mem())
-	s.vars[&memVar] = call
-
-	if !returns {
-		// Finish block
-		b := s.endBlock()
-		b.Kind = ssa.BlockExit
-		b.SetControl(call)
-		call.AuxInt = off - Ctxt.FixedFrameSize()
-		if len(results) > 0 {
-			Fatalf("panic call can't have results")
-		}
-		return nil
-	}
-
-	// Load results
-	res := make([]*ssa.Value, len(results))
-	for i, t := range results {
-		off = Rnd(off, t.Alignment())
-		ptr := s.sp
-		if off != 0 {
-			ptr = s.newValue1I(ssa.OpOffPtr, ptrto(t), off, s.sp)
-		}
-		res[i] = s.newValue2(ssa.OpLoad, t, ptr, s.mem())
-		off += t.Size()
-	}
-	off = Rnd(off, int64(Widthptr))
-
-	// Remember how much callee stack space we needed.
-	call.AuxInt = off
-
-	return res
-}
-
-// insertWBmove inserts the assignment *left = *right including a write barrier.
-// t is the type being assigned.
-// If right == nil, then we're zeroing *left.
-func (s *state) insertWBmove(t *Type, left, right *ssa.Value, line int32, rightIsVolatile bool) {
-	// if writeBarrier.enabled {
-	//   typedmemmove(&t, left, right)
-	// } else {
-	//   *left = *right
-	// }
-	//
-	// or
-	//
-	// if writeBarrier.enabled {
-	//   typedmemclr(&t, left)
-	// } else {
-	//   *left = zeroValue
-	// }
-
-	if s.noWB {
-		s.Error("write barrier prohibited")
-	}
-	if s.WBLineno == 0 {
-		s.WBLineno = left.Line
-	}
-
-	var val *ssa.Value
-	if right == nil {
-		val = s.newValue2I(ssa.OpZeroWB, ssa.TypeMem, sizeAlignAuxInt(t), left, s.mem())
-	} else {
-		var op ssa.Op
-		if rightIsVolatile {
-			op = ssa.OpMoveWBVolatile
-		} else {
-			op = ssa.OpMoveWB
-		}
-		val = s.newValue3I(op, ssa.TypeMem, sizeAlignAuxInt(t), left, right, s.mem())
-	}
-	val.Aux = &ssa.ExternSymbol{Typ: Types[TUINTPTR], Sym: typenamesym(t)}
-	s.vars[&memVar] = val
-
-	// WB ops will be expanded to branches at writebarrier phase.
-	// To make it easy, we put WB ops at the end of a block, so
-	// that it does not need to split a block into two parts when
-	// expanding WB ops.
-	b := s.f.NewBlock(ssa.BlockPlain)
-	s.endBlock().AddEdgeTo(b)
-	s.startBlock(b)
-}
-
-// insertWBstore inserts the assignment *left = right including a write barrier.
-// t is the type being assigned.
-func (s *state) insertWBstore(t *Type, left, right *ssa.Value, line int32, skip skipMask) {
-	// store scalar fields
-	// if writeBarrier.enabled {
-	//   writebarrierptr for pointer fields
-	// } else {
-	//   store pointer fields
-	// }
-
-	if s.noWB {
-		s.Error("write barrier prohibited")
-	}
-	if s.WBLineno == 0 {
-		s.WBLineno = left.Line
-	}
-	s.storeTypeScalars(t, left, right, skip)
-	s.storeTypePtrsWB(t, left, right)
-
-	// WB ops will be expanded to branches at writebarrier phase.
-	// To make it easy, we put WB ops at the end of a block, so
-	// that it does not need to split a block into two parts when
-	// expanding WB ops.
-	b := s.f.NewBlock(ssa.BlockPlain)
-	s.endBlock().AddEdgeTo(b)
-	s.startBlock(b)
-}
-
-// do *left = right for all scalar (non-pointer) parts of t.
-func (s *state) storeTypeScalars(t *Type, left, right *ssa.Value, skip skipMask) {
-	switch {
-	case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex():
-		s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, t.Size(), left, right, s.mem())
-	case t.IsPtrShaped():
-		// no scalar fields.
-	case t.IsString():
-		if skip&skipLen != 0 {
-			return
-		}
-		len := s.newValue1(ssa.OpStringLen, Types[TINT], right)
-		lenAddr := s.newValue1I(ssa.OpOffPtr, ptrto(Types[TINT]), s.config.IntSize, left)
-		s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenAddr, len, s.mem())
-	case t.IsSlice():
-		if skip&skipLen == 0 {
-			len := s.newValue1(ssa.OpSliceLen, Types[TINT], right)
-			lenAddr := s.newValue1I(ssa.OpOffPtr, ptrto(Types[TINT]), s.config.IntSize, left)
-			s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenAddr, len, s.mem())
-		}
-		if skip&skipCap == 0 {
-			cap := s.newValue1(ssa.OpSliceCap, Types[TINT], right)
-			capAddr := s.newValue1I(ssa.OpOffPtr, ptrto(Types[TINT]), 2*s.config.IntSize, left)
-			s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, capAddr, cap, s.mem())
-		}
-	case t.IsInterface():
-		// itab field doesn't need a write barrier (even though it is a pointer).
-		itab := s.newValue1(ssa.OpITab, ptrto(Types[TUINT8]), right)
-		s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, left, itab, s.mem())
-	case t.IsStruct():
-		n := t.NumFields()
-		for i := 0; i < n; i++ {
-			ft := t.FieldType(i)
-			addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
-			val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
-			s.storeTypeScalars(ft.(*Type), addr, val, 0)
-		}
-	case t.IsArray() && t.NumElem() == 0:
-		// nothing
-	case t.IsArray() && t.NumElem() == 1:
-		s.storeTypeScalars(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right), 0)
-	default:
-		s.Fatalf("bad write barrier type %v", t)
-	}
-}
-
-// do *left = right for all pointer parts of t.
-func (s *state) storeTypePtrs(t *Type, left, right *ssa.Value) {
-	switch {
-	case t.IsPtrShaped():
-		s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, right, s.mem())
-	case t.IsString():
-		ptr := s.newValue1(ssa.OpStringPtr, ptrto(Types[TUINT8]), right)
-		s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem())
-	case t.IsSlice():
-		ptr := s.newValue1(ssa.OpSlicePtr, ptrto(Types[TUINT8]), right)
-		s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem())
-	case t.IsInterface():
-		// itab field is treated as a scalar.
-		idata := s.newValue1(ssa.OpIData, ptrto(Types[TUINT8]), right)
-		idataAddr := s.newValue1I(ssa.OpOffPtr, ptrto(Types[TUINT8]), s.config.PtrSize, left)
-		s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.PtrSize, idataAddr, idata, s.mem())
-	case t.IsStruct():
-		n := t.NumFields()
-		for i := 0; i < n; i++ {
-			ft := t.FieldType(i)
-			if !haspointers(ft.(*Type)) {
-				continue
-			}
-			addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
-			val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
-			s.storeTypePtrs(ft.(*Type), addr, val)
-		}
-	case t.IsArray() && t.NumElem() == 0:
-		// nothing
-	case t.IsArray() && t.NumElem() == 1:
-		s.storeTypePtrs(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right))
-	default:
-		s.Fatalf("bad write barrier type %v", t)
-	}
-}
-
-// do *left = right for all pointer parts of t, with write barriers if necessary.
-func (s *state) storeTypePtrsWB(t *Type, left, right *ssa.Value) {
-	switch {
-	case t.IsPtrShaped():
-		s.vars[&memVar] = s.newValue3I(ssa.OpStoreWB, ssa.TypeMem, s.config.PtrSize, left, right, s.mem())
-	case t.IsString():
-		ptr := s.newValue1(ssa.OpStringPtr, ptrto(Types[TUINT8]), right)
-		s.vars[&memVar] = s.newValue3I(ssa.OpStoreWB, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem())
-	case t.IsSlice():
-		ptr := s.newValue1(ssa.OpSlicePtr, ptrto(Types[TUINT8]), right)
-		s.vars[&memVar] = s.newValue3I(ssa.OpStoreWB, ssa.TypeMem, s.config.PtrSize, left, ptr, s.mem())
-	case t.IsInterface():
-		// itab field is treated as a scalar.
-		idata := s.newValue1(ssa.OpIData, ptrto(Types[TUINT8]), right)
-		idataAddr := s.newValue1I(ssa.OpOffPtr, ptrto(Types[TUINT8]), s.config.PtrSize, left)
-		s.vars[&memVar] = s.newValue3I(ssa.OpStoreWB, ssa.TypeMem, s.config.PtrSize, idataAddr, idata, s.mem())
-	case t.IsStruct():
-		n := t.NumFields()
-		for i := 0; i < n; i++ {
-			ft := t.FieldType(i)
-			if !haspointers(ft.(*Type)) {
-				continue
-			}
-			addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
-			val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
-			s.storeTypePtrsWB(ft.(*Type), addr, val)
-		}
-	case t.IsArray() && t.NumElem() == 0:
-		// nothing
-	case t.IsArray() && t.NumElem() == 1:
-		s.storeTypePtrsWB(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right))
-	default:
-		s.Fatalf("bad write barrier type %v", t)
-	}
-}
-
-// slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
-// i,j,k may be nil, in which case they are set to their default value.
-// t is a slice, ptr to array, or string type.
-func (s *state) slice(t *Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) {
-	var elemtype *Type
-	var ptrtype *Type
-	var ptr *ssa.Value
-	var len *ssa.Value
-	var cap *ssa.Value
-	zero := s.constInt(Types[TINT], 0)
-	switch {
-	case t.IsSlice():
-		elemtype = t.Elem()
-		ptrtype = ptrto(elemtype)
-		ptr = s.newValue1(ssa.OpSlicePtr, ptrtype, v)
-		len = s.newValue1(ssa.OpSliceLen, Types[TINT], v)
-		cap = s.newValue1(ssa.OpSliceCap, Types[TINT], v)
-	case t.IsString():
-		elemtype = Types[TUINT8]
-		ptrtype = ptrto(elemtype)
-		ptr = s.newValue1(ssa.OpStringPtr, ptrtype, v)
-		len = s.newValue1(ssa.OpStringLen, Types[TINT], v)
-		cap = len
-	case t.IsPtr():
-		if !t.Elem().IsArray() {
-			s.Fatalf("bad ptr to array in slice %v\n", t)
-		}
-		elemtype = t.Elem().Elem()
-		ptrtype = ptrto(elemtype)
-		s.nilCheck(v)
-		ptr = v
-		len = s.constInt(Types[TINT], t.Elem().NumElem())
-		cap = len
-	default:
-		s.Fatalf("bad type in slice %v\n", t)
-	}
-
-	// Set default values
-	if i == nil {
-		i = zero
-	}
-	if j == nil {
-		j = len
-	}
-	if k == nil {
-		k = cap
-	}
-
-	// Panic if slice indices are not in bounds.
-	s.sliceBoundsCheck(i, j)
-	if j != k {
-		s.sliceBoundsCheck(j, k)
-	}
-	if k != cap {
-		s.sliceBoundsCheck(k, cap)
-	}
-
-	// Generate the following code assuming that indexes are in bounds.
-	// The masking is to make sure that we don't generate a slice
-	// that points to the next object in memory.
-	// rlen = j - i
-	// rcap = k - i
-	// delta = i * elemsize
-	// rptr = p + delta&mask(rcap)
-	// result = (SliceMake rptr rlen rcap)
-	// where mask(x) is 0 if x==0 and -1 if x>0.
-	subOp := s.ssaOp(OSUB, Types[TINT])
-	mulOp := s.ssaOp(OMUL, Types[TINT])
-	andOp := s.ssaOp(OAND, Types[TINT])
-	rlen := s.newValue2(subOp, Types[TINT], j, i)
-	var rcap *ssa.Value
-	switch {
-	case t.IsString():
-		// Capacity of the result is unimportant. However, we use
-		// rcap to test if we've generated a zero-length slice.
-		// Use length of strings for that.
-		rcap = rlen
-	case j == k:
-		rcap = rlen
-	default:
-		rcap = s.newValue2(subOp, Types[TINT], k, i)
-	}
-
-	var rptr *ssa.Value
-	if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 {
-		// No pointer arithmetic necessary.
-		rptr = ptr
-	} else {
-		// delta = # of bytes to offset pointer by.
-		delta := s.newValue2(mulOp, Types[TINT], i, s.constInt(Types[TINT], elemtype.Width))
-		// If we're slicing to the point where the capacity is zero,
-		// zero out the delta.
-		mask := s.newValue1(ssa.OpSlicemask, Types[TINT], rcap)
-		delta = s.newValue2(andOp, Types[TINT], delta, mask)
-		// Compute rptr = ptr + delta
-		rptr = s.newValue2(ssa.OpAddPtr, ptrtype, ptr, delta)
-	}
-
-	return rptr, rlen, rcap
-}
-
-type u642fcvtTab struct {
-	geq, cvt2F, and, rsh, or, add ssa.Op
-	one                           func(*state, ssa.Type, int64) *ssa.Value
-}
-
-var u64_f64 u642fcvtTab = u642fcvtTab{
-	geq:   ssa.OpGeq64,
-	cvt2F: ssa.OpCvt64to64F,
-	and:   ssa.OpAnd64,
-	rsh:   ssa.OpRsh64Ux64,
-	or:    ssa.OpOr64,
-	add:   ssa.OpAdd64F,
-	one:   (*state).constInt64,
-}
-
-var u64_f32 u642fcvtTab = u642fcvtTab{
-	geq:   ssa.OpGeq64,
-	cvt2F: ssa.OpCvt64to32F,
-	and:   ssa.OpAnd64,
-	rsh:   ssa.OpRsh64Ux64,
-	or:    ssa.OpOr64,
-	add:   ssa.OpAdd32F,
-	one:   (*state).constInt64,
-}
-
-func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
-	return s.uint64Tofloat(&u64_f64, n, x, ft, tt)
-}
-
-func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
-	return s.uint64Tofloat(&u64_f32, n, x, ft, tt)
-}
-
-func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
-	// if x >= 0 {
-	//    result = (floatY) x
-	// } else {
-	// 	  y = uintX(x) ; y = x & 1
-	// 	  z = uintX(x) ; z = z >> 1
-	// 	  z = z >> 1
-	// 	  z = z | y
-	// 	  result = floatY(z)
-	// 	  result = result + result
-	// }
-	//
-	// Code borrowed from old code generator.
-	// What's going on: large 64-bit "unsigned" looks like
-	// negative number to hardware's integer-to-float
-	// conversion. However, because the mantissa is only
-	// 63 bits, we don't need the LSB, so instead we do an
-	// unsigned right shift (divide by two), convert, and
-	// double. However, before we do that, we need to be
-	// sure that we do not lose a "1" if that made the
-	// difference in the resulting rounding. Therefore, we
-	// preserve it, and OR (not ADD) it back in. The case
-	// that matters is when the eleven discarded bits are
-	// equal to 10000000001; that rounds up, and the 1 cannot
-	// be lost else it would round down if the LSB of the
-	// candidate mantissa is 0.
-	cmp := s.newValue2(cvttab.geq, Types[TBOOL], x, s.zeroVal(ft))
-	b := s.endBlock()
-	b.Kind = ssa.BlockIf
-	b.SetControl(cmp)
-	b.Likely = ssa.BranchLikely
-
-	bThen := s.f.NewBlock(ssa.BlockPlain)
-	bElse := s.f.NewBlock(ssa.BlockPlain)
-	bAfter := s.f.NewBlock(ssa.BlockPlain)
-
-	b.AddEdgeTo(bThen)
-	s.startBlock(bThen)
-	a0 := s.newValue1(cvttab.cvt2F, tt, x)
-	s.vars[n] = a0
-	s.endBlock()
-	bThen.AddEdgeTo(bAfter)
-
-	b.AddEdgeTo(bElse)
-	s.startBlock(bElse)
-	one := cvttab.one(s, ft, 1)
-	y := s.newValue2(cvttab.and, ft, x, one)
-	z := s.newValue2(cvttab.rsh, ft, x, one)
-	z = s.newValue2(cvttab.or, ft, z, y)
-	a := s.newValue1(cvttab.cvt2F, tt, z)
-	a1 := s.newValue2(cvttab.add, tt, a, a)
-	s.vars[n] = a1
-	s.endBlock()
-	bElse.AddEdgeTo(bAfter)
-
-	s.startBlock(bAfter)
-	return s.variable(n, n.Type)
-}
-
-type u322fcvtTab struct {
-	cvtI2F, cvtF2F ssa.Op
-}
-
-var u32_f64 u322fcvtTab = u322fcvtTab{
-	cvtI2F: ssa.OpCvt32to64F,
-	cvtF2F: ssa.OpCopy,
-}
-
-var u32_f32 u322fcvtTab = u322fcvtTab{
-	cvtI2F: ssa.OpCvt32to32F,
-	cvtF2F: ssa.OpCvt64Fto32F,
-}
-
-func (s *state) uint32Tofloat64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
-	return s.uint32Tofloat(&u32_f64, n, x, ft, tt)
-}
-
-func (s *state) uint32Tofloat32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
-	return s.uint32Tofloat(&u32_f32, n, x, ft, tt)
-}
-
-func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
-	// if x >= 0 {
-	// 	result = floatY(x)
-	// } else {
-	// 	result = floatY(float64(x) + (1<<32))
-	// }
-	cmp := s.newValue2(ssa.OpGeq32, Types[TBOOL], x, s.zeroVal(ft))
-	b := s.endBlock()
-	b.Kind = ssa.BlockIf
-	b.SetControl(cmp)
-	b.Likely = ssa.BranchLikely
-
-	bThen := s.f.NewBlock(ssa.BlockPlain)
-	bElse := s.f.NewBlock(ssa.BlockPlain)
-	bAfter := s.f.NewBlock(ssa.BlockPlain)
-
-	b.AddEdgeTo(bThen)
-	s.startBlock(bThen)
-	a0 := s.newValue1(cvttab.cvtI2F, tt, x)
-	s.vars[n] = a0
-	s.endBlock()
-	bThen.AddEdgeTo(bAfter)
-
-	b.AddEdgeTo(bElse)
-	s.startBlock(bElse)
-	a1 := s.newValue1(ssa.OpCvt32to64F, Types[TFLOAT64], x)
-	twoToThe32 := s.constFloat64(Types[TFLOAT64], float64(1<<32))
-	a2 := s.newValue2(ssa.OpAdd64F, Types[TFLOAT64], a1, twoToThe32)
-	a3 := s.newValue1(cvttab.cvtF2F, tt, a2)
-
-	s.vars[n] = a3
-	s.endBlock()
-	bElse.AddEdgeTo(bAfter)
-
-	s.startBlock(bAfter)
-	return s.variable(n, n.Type)
-}
-
-// referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
-func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value {
-	if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() {
-		s.Fatalf("node must be a map or a channel")
-	}
-	// if n == nil {
-	//   return 0
-	// } else {
-	//   // len
-	//   return *((*int)n)
-	//   // cap
-	//   return *(((*int)n)+1)
-	// }
-	lenType := n.Type
-	nilValue := s.constNil(Types[TUINTPTR])
-	cmp := s.newValue2(ssa.OpEqPtr, Types[TBOOL], x, nilValue)
-	b := s.endBlock()
-	b.Kind = ssa.BlockIf
-	b.SetControl(cmp)
-	b.Likely = ssa.BranchUnlikely
-
-	bThen := s.f.NewBlock(ssa.BlockPlain)
-	bElse := s.f.NewBlock(ssa.BlockPlain)
-	bAfter := s.f.NewBlock(ssa.BlockPlain)
-
-	// length/capacity of a nil map/chan is zero
-	b.AddEdgeTo(bThen)
-	s.startBlock(bThen)
-	s.vars[n] = s.zeroVal(lenType)
-	s.endBlock()
-	bThen.AddEdgeTo(bAfter)
-
-	b.AddEdgeTo(bElse)
-	s.startBlock(bElse)
-	if n.Op == OLEN {
-		// length is stored in the first word for map/chan
-		s.vars[n] = s.newValue2(ssa.OpLoad, lenType, x, s.mem())
-	} else if n.Op == OCAP {
-		// capacity is stored in the second word for chan
-		sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x)
-		s.vars[n] = s.newValue2(ssa.OpLoad, lenType, sw, s.mem())
-	} else {
-		s.Fatalf("op must be OLEN or OCAP")
-	}
-	s.endBlock()
-	bElse.AddEdgeTo(bAfter)
-
-	s.startBlock(bAfter)
-	return s.variable(n, lenType)
-}
-
-type f2uCvtTab struct {
-	ltf, cvt2U, subf, or ssa.Op
-	floatValue           func(*state, ssa.Type, float64) *ssa.Value
-	intValue             func(*state, ssa.Type, int64) *ssa.Value
-	cutoff               uint64
-}
-
-var f32_u64 f2uCvtTab = f2uCvtTab{
-	ltf:        ssa.OpLess32F,
-	cvt2U:      ssa.OpCvt32Fto64,
-	subf:       ssa.OpSub32F,
-	or:         ssa.OpOr64,
-	floatValue: (*state).constFloat32,
-	intValue:   (*state).constInt64,
-	cutoff:     9223372036854775808,
-}
-
-var f64_u64 f2uCvtTab = f2uCvtTab{
-	ltf:        ssa.OpLess64F,
-	cvt2U:      ssa.OpCvt64Fto64,
-	subf:       ssa.OpSub64F,
-	or:         ssa.OpOr64,
-	floatValue: (*state).constFloat64,
-	intValue:   (*state).constInt64,
-	cutoff:     9223372036854775808,
-}
-
-var f32_u32 f2uCvtTab = f2uCvtTab{
-	ltf:        ssa.OpLess32F,
-	cvt2U:      ssa.OpCvt32Fto32,
-	subf:       ssa.OpSub32F,
-	or:         ssa.OpOr32,
-	floatValue: (*state).constFloat32,
-	intValue:   func(s *state, t ssa.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
-	cutoff:     2147483648,
-}
-
-var f64_u32 f2uCvtTab = f2uCvtTab{
-	ltf:        ssa.OpLess64F,
-	cvt2U:      ssa.OpCvt64Fto32,
-	subf:       ssa.OpSub64F,
-	or:         ssa.OpOr32,
-	floatValue: (*state).constFloat64,
-	intValue:   func(s *state, t ssa.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
-	cutoff:     2147483648,
-}
-
-func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
-	return s.floatToUint(&f32_u64, n, x, ft, tt)
-}
-func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
-	return s.floatToUint(&f64_u64, n, x, ft, tt)
-}
-
-func (s *state) float32ToUint32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
-	return s.floatToUint(&f32_u32, n, x, ft, tt)
-}
-
-func (s *state) float64ToUint32(n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
-	return s.floatToUint(&f64_u32, n, x, ft, tt)
-}
-
-func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *Type) *ssa.Value {
-	// cutoff:=1<<(intY_Size-1)
-	// if x < floatX(cutoff) {
-	// 	result = uintY(x)
-	// } else {
-	// 	y = x - floatX(cutoff)
-	// 	z = uintY(y)
-	// 	result = z | -(cutoff)
-	// }
-	cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff))
-	cmp := s.newValue2(cvttab.ltf, Types[TBOOL], x, cutoff)
-	b := s.endBlock()
-	b.Kind = ssa.BlockIf
-	b.SetControl(cmp)
-	b.Likely = ssa.BranchLikely
-
-	bThen := s.f.NewBlock(ssa.BlockPlain)
-	bElse := s.f.NewBlock(ssa.BlockPlain)
-	bAfter := s.f.NewBlock(ssa.BlockPlain)
-
-	b.AddEdgeTo(bThen)
-	s.startBlock(bThen)
-	a0 := s.newValue1(cvttab.cvt2U, tt, x)
-	s.vars[n] = a0
-	s.endBlock()
-	bThen.AddEdgeTo(bAfter)
-
-	b.AddEdgeTo(bElse)
-	s.startBlock(bElse)
-	y := s.newValue2(cvttab.subf, ft, x, cutoff)
-	y = s.newValue1(cvttab.cvt2U, tt, y)
-	z := cvttab.intValue(s, tt, int64(-cvttab.cutoff))
-	a1 := s.newValue2(cvttab.or, tt, y, z)
-	s.vars[n] = a1
-	s.endBlock()
-	bElse.AddEdgeTo(bAfter)
-
-	s.startBlock(bAfter)
-	return s.variable(n, n.Type)
-}
-
-// ifaceType returns the value for the word containing the type.
-// t is the type of the interface expression.
-// v is the corresponding value.
-func (s *state) ifaceType(t *Type, v *ssa.Value) *ssa.Value {
-	byteptr := ptrto(Types[TUINT8]) // type used in runtime prototypes for runtime type (*byte)
-
-	if t.IsEmptyInterface() {
-		// Have eface. The type is the first word in the struct.
-		return s.newValue1(ssa.OpITab, byteptr, v)
-	}
-
-	// Have iface.
-	// The first word in the struct is the itab.
-	// If the itab is nil, return 0.
-	// Otherwise, the second word in the itab is the type.
-
-	tab := s.newValue1(ssa.OpITab, byteptr, v)
-	s.vars[&typVar] = tab
-	isnonnil := s.newValue2(ssa.OpNeqPtr, Types[TBOOL], tab, s.constNil(byteptr))
-	b := s.endBlock()
-	b.Kind = ssa.BlockIf
-	b.SetControl(isnonnil)
-	b.Likely = ssa.BranchLikely
-
-	bLoad := s.f.NewBlock(ssa.BlockPlain)
-	bEnd := s.f.NewBlock(ssa.BlockPlain)
-
-	b.AddEdgeTo(bLoad)
-	b.AddEdgeTo(bEnd)
-	bLoad.AddEdgeTo(bEnd)
-
-	s.startBlock(bLoad)
-	off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), tab)
-	s.vars[&typVar] = s.newValue2(ssa.OpLoad, byteptr, off, s.mem())
-	s.endBlock()
-
-	s.startBlock(bEnd)
-	typ := s.variable(&typVar, byteptr)
-	delete(s.vars, &typVar)
-	return typ
-}
-
-// dottype generates SSA for a type assertion node.
-// commaok indicates whether to panic or return a bool.
-// If commaok is false, resok will be nil.
-func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
-	iface := s.expr(n.Left)            // input interface
-	target := s.expr(typename(n.Type)) // target type
-	byteptr := ptrto(Types[TUINT8])
-
-	if n.Type.IsInterface() {
-		if n.Type.IsEmptyInterface() {
-			// Converting to an empty interface.
-			// Input could be an empty or nonempty interface.
-			if Debug_typeassert > 0 {
-				Warnl(n.Lineno, "type assertion inlined")
-			}
-
-			// Get itab/type field from input.
-			itab := s.newValue1(ssa.OpITab, byteptr, iface)
-			// Conversion succeeds iff that field is not nil.
-			cond := s.newValue2(ssa.OpNeqPtr, Types[TBOOL], itab, s.constNil(byteptr))
-
-			if n.Left.Type.IsEmptyInterface() && commaok {
-				// Converting empty interface to empty interface with ,ok is just a nil check.
-				return iface, cond
-			}
-
-			// Branch on nilness.
-			b := s.endBlock()
-			b.Kind = ssa.BlockIf
-			b.SetControl(cond)
-			b.Likely = ssa.BranchLikely
-			bOk := s.f.NewBlock(ssa.BlockPlain)
-			bFail := s.f.NewBlock(ssa.BlockPlain)
-			b.AddEdgeTo(bOk)
-			b.AddEdgeTo(bFail)
-
-			if !commaok {
-				// On failure, panic by calling panicnildottype.
-				s.startBlock(bFail)
-				s.rtcall(panicnildottype, false, nil, target)
-
-				// On success, return (perhaps modified) input interface.
-				s.startBlock(bOk)
-				if n.Left.Type.IsEmptyInterface() {
-					res = iface // Use input interface unchanged.
-					return
-				}
-				// Load type out of itab, build interface with existing idata.
-				off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab)
-				typ := s.newValue2(ssa.OpLoad, byteptr, off, s.mem())
-				idata := s.newValue1(ssa.OpIData, n.Type, iface)
-				res = s.newValue2(ssa.OpIMake, n.Type, typ, idata)
-				return
-			}
-
-			s.startBlock(bOk)
-			// nonempty -> empty
-			// Need to load type from itab
-			off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab)
-			s.vars[&typVar] = s.newValue2(ssa.OpLoad, byteptr, off, s.mem())
-			s.endBlock()
-
-			// itab is nil, might as well use that as the nil result.
-			s.startBlock(bFail)
-			s.vars[&typVar] = itab
-			s.endBlock()
-
-			// Merge point.
-			bEnd := s.f.NewBlock(ssa.BlockPlain)
-			bOk.AddEdgeTo(bEnd)
-			bFail.AddEdgeTo(bEnd)
-			s.startBlock(bEnd)
-			idata := s.newValue1(ssa.OpIData, n.Type, iface)
-			res = s.newValue2(ssa.OpIMake, n.Type, s.variable(&typVar, byteptr), idata)
-			resok = cond
-			delete(s.vars, &typVar)
-			return
-		}
-		// converting to a nonempty interface needs a runtime call.
-		if Debug_typeassert > 0 {
-			Warnl(n.Lineno, "type assertion not inlined")
-		}
-		if n.Left.Type.IsEmptyInterface() {
-			if commaok {
-				call := s.rtcall(assertE2I2, true, []*Type{n.Type, Types[TBOOL]}, target, iface)
-				return call[0], call[1]
-			}
-			return s.rtcall(assertE2I, true, []*Type{n.Type}, target, iface)[0], nil
-		}
-		if commaok {
-			call := s.rtcall(assertI2I2, true, []*Type{n.Type, Types[TBOOL]}, target, iface)
-			return call[0], call[1]
-		}
-		return s.rtcall(assertI2I, true, []*Type{n.Type}, target, iface)[0], nil
-	}
-
-	if Debug_typeassert > 0 {
-		Warnl(n.Lineno, "type assertion inlined")
-	}
-
-	// Converting to a concrete type.
-	direct := isdirectiface(n.Type)
-	typ := s.ifaceType(n.Left.Type, iface) // actual concrete type of input interface
-
-	if Debug_typeassert > 0 {
-		Warnl(n.Lineno, "type assertion inlined")
-	}
-
-	var tmp *Node       // temporary for use with large types
-	var addr *ssa.Value // address of tmp
-	if commaok && !canSSAType(n.Type) {
-		// unSSAable type, use temporary.
-		// TODO: get rid of some of these temporaries.
-		tmp = temp(n.Type)
-		addr, _ = s.addr(tmp, false)
-		s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, tmp, s.mem())
-	}
-
-	// TODO:  If we have a nonempty interface and its itab field is nil,
-	// then this test is redundant and ifaceType should just branch directly to bFail.
-	cond := s.newValue2(ssa.OpEqPtr, Types[TBOOL], typ, target)
-	b := s.endBlock()
-	b.Kind = ssa.BlockIf
-	b.SetControl(cond)
-	b.Likely = ssa.BranchLikely
-
-	bOk := s.f.NewBlock(ssa.BlockPlain)
-	bFail := s.f.NewBlock(ssa.BlockPlain)
-	b.AddEdgeTo(bOk)
-	b.AddEdgeTo(bFail)
-
-	if !commaok {
-		// on failure, panic by calling panicdottype
-		s.startBlock(bFail)
-		taddr := s.newValue1A(ssa.OpAddr, byteptr, &ssa.ExternSymbol{Typ: byteptr, Sym: typenamesym(n.Left.Type)}, s.sb)
-		s.rtcall(panicdottype, false, nil, typ, target, taddr)
-
-		// on success, return data from interface
-		s.startBlock(bOk)
-		if direct {
-			return s.newValue1(ssa.OpIData, n.Type, iface), nil
-		}
-		p := s.newValue1(ssa.OpIData, ptrto(n.Type), iface)
-		return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()), nil
-	}
-
-	// commaok is the more complicated case because we have
-	// a control flow merge point.
-	bEnd := s.f.NewBlock(ssa.BlockPlain)
-	// Note that we need a new valVar each time (unlike okVar where we can
-	// reuse the variable) because it might have a different type every time.
-	valVar := &Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "val"}}
-
-	// type assertion succeeded
-	s.startBlock(bOk)
-	if tmp == nil {
-		if direct {
-			s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type, iface)
-		} else {
-			p := s.newValue1(ssa.OpIData, ptrto(n.Type), iface)
-			s.vars[valVar] = s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
-		}
-	} else {
-		p := s.newValue1(ssa.OpIData, ptrto(n.Type), iface)
-		s.vars[&memVar] = s.newValue3I(ssa.OpMove, ssa.TypeMem, sizeAlignAuxInt(n.Type), addr, p, s.mem())
-	}
-	s.vars[&okVar] = s.constBool(true)
-	s.endBlock()
-	bOk.AddEdgeTo(bEnd)
-
-	// type assertion failed
-	s.startBlock(bFail)
-	if tmp == nil {
-		s.vars[valVar] = s.zeroVal(n.Type)
-	} else {
-		s.vars[&memVar] = s.newValue2I(ssa.OpZero, ssa.TypeMem, sizeAlignAuxInt(n.Type), addr, s.mem())
-	}
-	s.vars[&okVar] = s.constBool(false)
-	s.endBlock()
-	bFail.AddEdgeTo(bEnd)
-
-	// merge point
-	s.startBlock(bEnd)
-	if tmp == nil {
-		res = s.variable(valVar, n.Type)
-		delete(s.vars, valVar)
-	} else {
-		res = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
-		s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, tmp, s.mem())
-	}
-	resok = s.variable(&okVar, Types[TBOOL])
-	delete(s.vars, &okVar)
-	return res, resok
-}
-
-// checkgoto checks that a goto from from to to does not
-// jump into a block or jump over variable declarations.
-// It is a copy of checkgoto in the pre-SSA backend,
-// modified only for line number handling.
-// TODO: document how this works and why it is designed the way it is.
-func (s *state) checkgoto(from *Node, to *Node) {
-	if from.Sym == to.Sym {
-		return
-	}
-
-	nf := 0
-	for fs := from.Sym; fs != nil; fs = fs.Link {
-		nf++
-	}
-	nt := 0
-	for fs := to.Sym; fs != nil; fs = fs.Link {
-		nt++
-	}
-	fs := from.Sym
-	for ; nf > nt; nf-- {
-		fs = fs.Link
-	}
-	if fs != to.Sym {
-		// decide what to complain about.
-		// prefer to complain about 'into block' over declarations,
-		// so scan backward to find most recent block or else dcl.
-		var block *Sym
-
-		var dcl *Sym
-		ts := to.Sym
-		for ; nt > nf; nt-- {
-			if ts.Pkg == nil {
-				block = ts
-			} else {
-				dcl = ts
-			}
-			ts = ts.Link
-		}
-
-		for ts != fs {
-			if ts.Pkg == nil {
-				block = ts
-			} else {
-				dcl = ts
-			}
-			ts = ts.Link
-			fs = fs.Link
-		}
-
-		lno := from.Left.Lineno
-		if block != nil {
-			yyerrorl(lno, "goto %v jumps into block starting at %v", from.Left.Sym, linestr(block.Lastlineno))
-		} else {
-			yyerrorl(lno, "goto %v jumps over declaration of %v at %v", from.Left.Sym, dcl, linestr(dcl.Lastlineno))
-		}
-	}
-}
-
-// variable returns the value of a variable at the current location.
-func (s *state) variable(name *Node, t ssa.Type) *ssa.Value {
-	v := s.vars[name]
-	if v != nil {
-		return v
-	}
-	v = s.fwdVars[name]
-	if v != nil {
-		return v
-	}
-
-	if s.curBlock == s.f.Entry {
-		// No variable should be live at entry.
-		s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, name, v)
-	}
-	// Make a FwdRef, which records a value that's live on block input.
-	// We'll find the matching definition as part of insertPhis.
-	v = s.newValue0A(ssa.OpFwdRef, t, name)
-	s.fwdVars[name] = v
-	s.addNamedValue(name, v)
-	return v
-}
-
-func (s *state) mem() *ssa.Value {
-	return s.variable(&memVar, ssa.TypeMem)
-}
-
-func (s *state) addNamedValue(n *Node, v *ssa.Value) {
-	if n.Class == Pxxx {
-		// Don't track our dummy nodes (&memVar etc.).
-		return
-	}
-	if n.IsAutoTmp() {
-		// Don't track temporary variables.
-		return
-	}
-	if n.Class == PPARAMOUT {
-		// Don't track named output values.  This prevents return values
-		// from being assigned too early. See #14591 and #14762. TODO: allow this.
-		return
-	}
-	if n.Class == PAUTO && n.Xoffset != 0 {
-		s.Fatalf("AUTO var with offset %v %d", n, n.Xoffset)
-	}
-	loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0}
-	values, ok := s.f.NamedValues[loc]
-	if !ok {
-		s.f.Names = append(s.f.Names, loc)
-	}
-	s.f.NamedValues[loc] = append(values, v)
-}
-
-// Branch is an unresolved branch.
-type Branch struct {
-	P *obj.Prog  // branch instruction
-	B *ssa.Block // target
-}
-
-// SSAGenState contains state needed during Prog generation.
-type SSAGenState struct {
-	// Branches remembers all the branch instructions we've seen
-	// and where they would like to go.
-	Branches []Branch
-
-	// bstart remembers where each block starts (indexed by block ID)
-	bstart []*obj.Prog
-
-	// 387 port: maps from SSE registers (REG_X?) to 387 registers (REG_F?)
-	SSEto387 map[int16]int16
-	// Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include x86-387, PPC, and Sparc V8.
-	ScratchFpMem *Node
-}
-
-// Pc returns the current Prog.
-func (s *SSAGenState) Pc() *obj.Prog {
-	return pc
-}
-
-// SetLineno sets the current source line number.
-func (s *SSAGenState) SetLineno(l int32) {
-	lineno = l
-}
-
-// genssa appends entries to ptxt for each instruction in f.
-// gcargs and gclocals are filled in with pointer maps for the frame.
-func genssa(f *ssa.Func, ptxt *obj.Prog, gcargs, gclocals *Sym) {
-	var s SSAGenState
-
-	e := f.Config.Frontend().(*ssaExport)
-
-	// Remember where each block starts.
-	s.bstart = make([]*obj.Prog, f.NumBlocks())
-
-	var valueProgs map[*obj.Prog]*ssa.Value
-	var blockProgs map[*obj.Prog]*ssa.Block
-	var logProgs = e.log
-	if logProgs {
-		valueProgs = make(map[*obj.Prog]*ssa.Value, f.NumValues())
-		blockProgs = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
-		f.Logf("genssa %s\n", f.Name)
-		blockProgs[pc] = f.Blocks[0]
-	}
-
-	if Thearch.Use387 {
-		s.SSEto387 = map[int16]int16{}
-	}
-
-	s.ScratchFpMem = scratchFpMem
-	scratchFpMem = nil
-
-	// Emit basic blocks
-	for i, b := range f.Blocks {
-		s.bstart[b.ID] = pc
-		// Emit values in block
-		Thearch.SSAMarkMoves(&s, b)
-		for _, v := range b.Values {
-			x := pc
-			Thearch.SSAGenValue(&s, v)
-			if logProgs {
-				for ; x != pc; x = x.Link {
-					valueProgs[x] = v
-				}
-			}
-		}
-		// Emit control flow instructions for block
-		var next *ssa.Block
-		if i < len(f.Blocks)-1 && Debug['N'] == 0 {
-			// If -N, leave next==nil so every block with successors
-			// ends in a JMP (except call blocks - plive doesn't like
-			// select{send,recv} followed by a JMP call).  Helps keep
-			// line numbers for otherwise empty blocks.
-			next = f.Blocks[i+1]
-		}
-		x := pc
-		Thearch.SSAGenBlock(&s, b, next)
-		if logProgs {
-			for ; x != pc; x = x.Link {
-				blockProgs[x] = b
-			}
-		}
-	}
-
-	// Resolve branches
-	for _, br := range s.Branches {
-		br.P.To.Val = s.bstart[br.B.ID]
-	}
-
-	if logProgs {
-		for p := ptxt; p != nil; p = p.Link {
-			var s string
-			if v, ok := valueProgs[p]; ok {
-				s = v.String()
-			} else if b, ok := blockProgs[p]; ok {
-				s = b.String()
-			} else {
-				s = "   " // most value and branch strings are 2-3 characters long
-			}
-			f.Logf("%s\t%s\n", s, p)
-		}
-		if f.Config.HTML != nil {
-			saved := ptxt.Ctxt.LineHist.PrintFilenameOnly
-			ptxt.Ctxt.LineHist.PrintFilenameOnly = true
-			var buf bytes.Buffer
-			buf.WriteString("<code>")
-			buf.WriteString("<dl class=\"ssa-gen\">")
-			for p := ptxt; p != nil; p = p.Link {
-				buf.WriteString("<dt class=\"ssa-prog-src\">")
-				if v, ok := valueProgs[p]; ok {
-					buf.WriteString(v.HTML())
-				} else if b, ok := blockProgs[p]; ok {
-					buf.WriteString(b.HTML())
-				}
-				buf.WriteString("</dt>")
-				buf.WriteString("<dd class=\"ssa-prog\">")
-				buf.WriteString(html.EscapeString(p.String()))
-				buf.WriteString("</dd>")
-				buf.WriteString("</li>")
-			}
-			buf.WriteString("</dl>")
-			buf.WriteString("</code>")
-			f.Config.HTML.WriteColumn("genssa", buf.String())
-			ptxt.Ctxt.LineHist.PrintFilenameOnly = saved
-		}
-	}
-
-	// Emit static data
-	if f.StaticData != nil {
-		for _, n := range f.StaticData.([]*Node) {
-			if !gen_as_init(n, false) {
-				Fatalf("non-static data marked as static: %v\n\n", n)
-			}
-		}
-	}
-
-	// Generate gc bitmaps.
-	liveness(Curfn, ptxt, gcargs, gclocals)
-
-	// Add frame prologue. Zero ambiguously live variables.
-	Thearch.Defframe(ptxt)
-	if Debug['f'] != 0 {
-		frame(0)
-	}
-
-	// Remove leftover instrumentation from the instruction stream.
-	removevardef(ptxt)
-
-	f.Config.HTML.Close()
-	f.Config.HTML = nil
-}
-
-type FloatingEQNEJump struct {
-	Jump  obj.As
-	Index int
-}
-
-func oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump, likely ssa.BranchPrediction, branches []Branch) []Branch {
-	p := Prog(jumps.Jump)
-	p.To.Type = obj.TYPE_BRANCH
-	to := jumps.Index
-	branches = append(branches, Branch{p, b.Succs[to].Block()})
-	if to == 1 {
-		likely = -likely
-	}
-	// liblink reorders the instruction stream as it sees fit.
-	// Pass along what we know so liblink can make use of it.
-	// TODO: Once we've fully switched to SSA,
-	// make liblink leave our output alone.
-	switch likely {
-	case ssa.BranchUnlikely:
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = 0
-	case ssa.BranchLikely:
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = 1
-	}
-	return branches
-}
-
-func SSAGenFPJump(s *SSAGenState, b, next *ssa.Block, jumps *[2][2]FloatingEQNEJump) {
-	likely := b.Likely
-	switch next {
-	case b.Succs[0].Block():
-		s.Branches = oneFPJump(b, &jumps[0][0], likely, s.Branches)
-		s.Branches = oneFPJump(b, &jumps[0][1], likely, s.Branches)
-	case b.Succs[1].Block():
-		s.Branches = oneFPJump(b, &jumps[1][0], likely, s.Branches)
-		s.Branches = oneFPJump(b, &jumps[1][1], likely, s.Branches)
-	default:
-		s.Branches = oneFPJump(b, &jumps[1][0], likely, s.Branches)
-		s.Branches = oneFPJump(b, &jumps[1][1], likely, s.Branches)
-		q := Prog(obj.AJMP)
-		q.To.Type = obj.TYPE_BRANCH
-		s.Branches = append(s.Branches, Branch{q, b.Succs[1].Block()})
-	}
-}
-
-func AuxOffset(v *ssa.Value) (offset int64) {
-	if v.Aux == nil {
-		return 0
-	}
-	switch sym := v.Aux.(type) {
-
-	case *ssa.AutoSymbol:
-		n := sym.Node.(*Node)
-		return n.Xoffset
-	}
-	return 0
-}
-
-// AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a.
-func AddAux(a *obj.Addr, v *ssa.Value) {
-	AddAux2(a, v, v.AuxInt)
-}
-func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
-	if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR {
-		v.Fatalf("bad AddAux addr %v", a)
-	}
-	// add integer offset
-	a.Offset += offset
-
-	// If no additional symbol offset, we're done.
-	if v.Aux == nil {
-		return
-	}
-	// Add symbol's offset from its base register.
-	switch sym := v.Aux.(type) {
-	case *ssa.ExternSymbol:
-		a.Name = obj.NAME_EXTERN
-		switch s := sym.Sym.(type) {
-		case *Sym:
-			a.Sym = Linksym(s)
-		case *obj.LSym:
-			a.Sym = s
-		default:
-			v.Fatalf("ExternSymbol.Sym is %T", s)
-		}
-	case *ssa.ArgSymbol:
-		n := sym.Node.(*Node)
-		a.Name = obj.NAME_PARAM
-		a.Node = n
-		a.Sym = Linksym(n.Orig.Sym)
-		a.Offset += n.Xoffset
-	case *ssa.AutoSymbol:
-		n := sym.Node.(*Node)
-		a.Name = obj.NAME_AUTO
-		a.Node = n
-		a.Sym = Linksym(n.Sym)
-		a.Offset += n.Xoffset
-	default:
-		v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
-	}
-}
-
-// sizeAlignAuxInt returns an AuxInt encoding the size and alignment of type t.
-func sizeAlignAuxInt(t *Type) int64 {
-	return ssa.MakeSizeAndAlign(t.Size(), t.Alignment()).Int64()
-}
-
-// extendIndex extends v to a full int width.
-// panic using the given function if v does not fit in an int (only on 32-bit archs).
-func (s *state) extendIndex(v *ssa.Value, panicfn *Node) *ssa.Value {
-	size := v.Type.Size()
-	if size == s.config.IntSize {
-		return v
-	}
-	if size > s.config.IntSize {
-		// truncate 64-bit indexes on 32-bit pointer archs. Test the
-		// high word and branch to out-of-bounds failure if it is not 0.
-		if Debug['B'] == 0 {
-			hi := s.newValue1(ssa.OpInt64Hi, Types[TUINT32], v)
-			cmp := s.newValue2(ssa.OpEq32, Types[TBOOL], hi, s.constInt32(Types[TUINT32], 0))
-			s.check(cmp, panicfn)
-		}
-		return s.newValue1(ssa.OpTrunc64to32, Types[TINT], v)
-	}
-
-	// Extend value to the required size
-	var op ssa.Op
-	if v.Type.IsSigned() {
-		switch 10*size + s.config.IntSize {
-		case 14:
-			op = ssa.OpSignExt8to32
-		case 18:
-			op = ssa.OpSignExt8to64
-		case 24:
-			op = ssa.OpSignExt16to32
-		case 28:
-			op = ssa.OpSignExt16to64
-		case 48:
-			op = ssa.OpSignExt32to64
-		default:
-			s.Fatalf("bad signed index extension %s", v.Type)
-		}
-	} else {
-		switch 10*size + s.config.IntSize {
-		case 14:
-			op = ssa.OpZeroExt8to32
-		case 18:
-			op = ssa.OpZeroExt8to64
-		case 24:
-			op = ssa.OpZeroExt16to32
-		case 28:
-			op = ssa.OpZeroExt16to64
-		case 48:
-			op = ssa.OpZeroExt32to64
-		default:
-			s.Fatalf("bad unsigned index extension %s", v.Type)
-		}
-	}
-	return s.newValue1(op, Types[TINT], v)
-}
-
-// CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values.
-// Called during ssaGenValue.
-func CheckLoweredPhi(v *ssa.Value) {
-	if v.Op != ssa.OpPhi {
-		v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString())
-	}
-	if v.Type.IsMemory() {
-		return
-	}
-	f := v.Block.Func
-	loc := f.RegAlloc[v.ID]
-	for _, a := range v.Args {
-		if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead?
-			v.Fatalf("phi arg at different location than phi: %v @ %v, but arg %v @ %v\n%s\n", v, loc, a, aloc, v.Block.Func)
-		}
-	}
-}
-
-// CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block.
-// The output of LoweredGetClosurePtr is generally hardwired to the correct register.
-// That register contains the closure pointer on closure entry.
-func CheckLoweredGetClosurePtr(v *ssa.Value) {
-	entry := v.Block.Func.Entry
-	if entry != v.Block || entry.Values[0] != v {
-		Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
-	}
-}
-
-// KeepAlive marks the variable referenced by OpKeepAlive as live.
-// Called during ssaGenValue.
-func KeepAlive(v *ssa.Value) {
-	if v.Op != ssa.OpKeepAlive {
-		v.Fatalf("KeepAlive called with non-KeepAlive value: %v", v.LongString())
-	}
-	if !v.Args[0].Type.IsPtrShaped() {
-		v.Fatalf("keeping non-pointer alive %v", v.Args[0])
-	}
-	n, _ := AutoVar(v.Args[0])
-	if n == nil {
-		v.Fatalf("KeepAlive with non-spilled value %s %s", v, v.Args[0])
-	}
-	// Note: KeepAlive arg may be a small part of a larger variable n.  We keep the
-	// whole variable n alive at this point. (Typically, this happens when
-	// we are requested to keep the idata portion of an interface{} alive, and
-	// we end up keeping the whole interface{} alive.  That's ok.)
-	Gvarlive(n)
-}
-
-// AutoVar returns a *Node and int64 representing the auto variable and offset within it
-// where v should be spilled.
-func AutoVar(v *ssa.Value) (*Node, int64) {
-	loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot)
-	if v.Type.Size() > loc.Type.Size() {
-		v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type)
-	}
-	return loc.N.(*Node), loc.Off
-}
-
-func AddrAuto(a *obj.Addr, v *ssa.Value) {
-	n, off := AutoVar(v)
-	a.Type = obj.TYPE_MEM
-	a.Node = n
-	a.Sym = Linksym(n.Sym)
-	a.Offset = n.Xoffset + off
-	if n.Class == PPARAM || n.Class == PPARAMOUT {
-		a.Name = obj.NAME_PARAM
-	} else {
-		a.Name = obj.NAME_AUTO
-	}
-}
-
-func (s *SSAGenState) AddrScratch(a *obj.Addr) {
-	if s.ScratchFpMem == nil {
-		panic("no scratch memory available; forgot to declare usesScratch for Op?")
-	}
-	a.Type = obj.TYPE_MEM
-	a.Name = obj.NAME_AUTO
-	a.Node = s.ScratchFpMem
-	a.Sym = Linksym(s.ScratchFpMem.Sym)
-	a.Reg = int16(Thearch.REGSP)
-	a.Offset = s.ScratchFpMem.Xoffset
-}
-
-// fieldIdx finds the index of the field referred to by the ODOT node n.
-func fieldIdx(n *Node) int {
-	t := n.Left.Type
-	f := n.Sym
-	if !t.IsStruct() {
-		panic("ODOT's LHS is not a struct")
-	}
-
-	var i int
-	for _, t1 := range t.Fields().Slice() {
-		if t1.Sym != f {
-			i++
-			continue
-		}
-		if t1.Offset != n.Xoffset {
-			panic("field offset doesn't match")
-		}
-		return i
-	}
-	panic(fmt.Sprintf("can't find field in expr %v\n", n))
-
-	// TODO: keep the result of this function somewhere in the ODOT Node
-	// so we don't have to recompute it each time we need it.
-}
-
-// ssaExport exports a bunch of compiler services for the ssa backend.
-type ssaExport struct {
-	log bool
-}
-
-func (s *ssaExport) TypeBool() ssa.Type    { return Types[TBOOL] }
-func (s *ssaExport) TypeInt8() ssa.Type    { return Types[TINT8] }
-func (s *ssaExport) TypeInt16() ssa.Type   { return Types[TINT16] }
-func (s *ssaExport) TypeInt32() ssa.Type   { return Types[TINT32] }
-func (s *ssaExport) TypeInt64() ssa.Type   { return Types[TINT64] }
-func (s *ssaExport) TypeUInt8() ssa.Type   { return Types[TUINT8] }
-func (s *ssaExport) TypeUInt16() ssa.Type  { return Types[TUINT16] }
-func (s *ssaExport) TypeUInt32() ssa.Type  { return Types[TUINT32] }
-func (s *ssaExport) TypeUInt64() ssa.Type  { return Types[TUINT64] }
-func (s *ssaExport) TypeFloat32() ssa.Type { return Types[TFLOAT32] }
-func (s *ssaExport) TypeFloat64() ssa.Type { return Types[TFLOAT64] }
-func (s *ssaExport) TypeInt() ssa.Type     { return Types[TINT] }
-func (s *ssaExport) TypeUintptr() ssa.Type { return Types[TUINTPTR] }
-func (s *ssaExport) TypeString() ssa.Type  { return Types[TSTRING] }
-func (s *ssaExport) TypeBytePtr() ssa.Type { return ptrto(Types[TUINT8]) }
-
-// StringData returns a symbol (a *Sym wrapped in an interface) which
-// is the data component of a global string constant containing s.
-func (*ssaExport) StringData(s string) interface{} {
-	// TODO: is idealstring correct?  It might not matter...
-	data := stringsym(s)
-	return &ssa.ExternSymbol{Typ: idealstring, Sym: data}
-}
-
-func (e *ssaExport) Auto(t ssa.Type) ssa.GCNode {
-	n := temp(t.(*Type)) // Note: adds new auto to Curfn.Func.Dcl list
-	return n
-}
-
-func (e *ssaExport) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
-	n := name.N.(*Node)
-	ptrType := ptrto(Types[TUINT8])
-	lenType := Types[TINT]
-	if n.Class == PAUTO && !n.Addrtaken {
-		// Split this string up into two separate variables.
-		p := e.namedAuto(n.Sym.Name+".ptr", ptrType)
-		l := e.namedAuto(n.Sym.Name+".len", lenType)
-		return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0}
-	}
-	// Return the two parts of the larger variable.
-	return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)}
-}
-
-func (e *ssaExport) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
-	n := name.N.(*Node)
-	t := ptrto(Types[TUINT8])
-	if n.Class == PAUTO && !n.Addrtaken {
-		// Split this interface up into two separate variables.
-		f := ".itab"
-		if n.Type.IsEmptyInterface() {
-			f = ".type"
-		}
-		c := e.namedAuto(n.Sym.Name+f, t)
-		d := e.namedAuto(n.Sym.Name+".data", t)
-		return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0}
-	}
-	// Return the two parts of the larger variable.
-	return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + int64(Widthptr)}
-}
-
-func (e *ssaExport) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) {
-	n := name.N.(*Node)
-	ptrType := ptrto(name.Type.ElemType().(*Type))
-	lenType := Types[TINT]
-	if n.Class == PAUTO && !n.Addrtaken {
-		// Split this slice up into three separate variables.
-		p := e.namedAuto(n.Sym.Name+".ptr", ptrType)
-		l := e.namedAuto(n.Sym.Name+".len", lenType)
-		c := e.namedAuto(n.Sym.Name+".cap", lenType)
-		return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0}, ssa.LocalSlot{N: c, Type: lenType, Off: 0}
-	}
-	// Return the three parts of the larger variable.
-	return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off},
-		ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)},
-		ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(2*Widthptr)}
-}
-
-func (e *ssaExport) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
-	n := name.N.(*Node)
-	s := name.Type.Size() / 2
-	var t *Type
-	if s == 8 {
-		t = Types[TFLOAT64]
-	} else {
-		t = Types[TFLOAT32]
-	}
-	if n.Class == PAUTO && !n.Addrtaken {
-		// Split this complex up into two separate variables.
-		c := e.namedAuto(n.Sym.Name+".real", t)
-		d := e.namedAuto(n.Sym.Name+".imag", t)
-		return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0}
-	}
-	// Return the two parts of the larger variable.
-	return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + s}
-}
-
-func (e *ssaExport) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
-	n := name.N.(*Node)
-	var t *Type
-	if name.Type.IsSigned() {
-		t = Types[TINT32]
-	} else {
-		t = Types[TUINT32]
-	}
-	if n.Class == PAUTO && !n.Addrtaken {
-		// Split this int64 up into two separate variables.
-		h := e.namedAuto(n.Sym.Name+".hi", t)
-		l := e.namedAuto(n.Sym.Name+".lo", Types[TUINT32])
-		return ssa.LocalSlot{N: h, Type: t, Off: 0}, ssa.LocalSlot{N: l, Type: Types[TUINT32], Off: 0}
-	}
-	// Return the two parts of the larger variable.
-	if Thearch.LinkArch.ByteOrder == binary.BigEndian {
-		return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: Types[TUINT32], Off: name.Off + 4}
-	}
-	return ssa.LocalSlot{N: n, Type: t, Off: name.Off + 4}, ssa.LocalSlot{N: n, Type: Types[TUINT32], Off: name.Off}
-}
-
-func (e *ssaExport) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot {
-	n := name.N.(*Node)
-	st := name.Type
-	ft := st.FieldType(i)
-	if n.Class == PAUTO && !n.Addrtaken {
-		// Note: the _ field may appear several times.  But
-		// have no fear, identically-named but distinct Autos are
-		// ok, albeit maybe confusing for a debugger.
-		x := e.namedAuto(n.Sym.Name+"."+st.FieldName(i), ft)
-		return ssa.LocalSlot{N: x, Type: ft, Off: 0}
-	}
-	return ssa.LocalSlot{N: n, Type: ft, Off: name.Off + st.FieldOff(i)}
-}
-
-func (e *ssaExport) SplitArray(name ssa.LocalSlot) ssa.LocalSlot {
-	n := name.N.(*Node)
-	at := name.Type
-	if at.NumElem() != 1 {
-		Fatalf("bad array size")
-	}
-	et := at.ElemType()
-	if n.Class == PAUTO && !n.Addrtaken {
-		x := e.namedAuto(n.Sym.Name+"[0]", et)
-		return ssa.LocalSlot{N: x, Type: et, Off: 0}
-	}
-	return ssa.LocalSlot{N: n, Type: et, Off: name.Off}
-}
-
-// namedAuto returns a new AUTO variable with the given name and type.
-// These are exposed to the debugger.
-func (e *ssaExport) namedAuto(name string, typ ssa.Type) ssa.GCNode {
-	t := typ.(*Type)
-	s := &Sym{Name: name, Pkg: localpkg}
-	n := nod(ONAME, nil, nil)
-	s.Def = n
-	s.Def.Used = true
-	n.Sym = s
-	n.Type = t
-	n.Class = PAUTO
-	n.Addable = true
-	n.Ullman = 1
-	n.Esc = EscNever
-	n.Xoffset = 0
-	n.Name.Curfn = Curfn
-	Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
-
-	dowidth(t)
-	return n
-}
-
-func (e *ssaExport) CanSSA(t ssa.Type) bool {
-	return canSSAType(t.(*Type))
-}
-
-func (e *ssaExport) Line(line int32) string {
-	return linestr(line)
-}
-
-// Log logs a message from the compiler.
-func (e *ssaExport) Logf(msg string, args ...interface{}) {
-	if e.log {
-		fmt.Printf(msg, args...)
-	}
-}
-
-func (e *ssaExport) Log() bool {
-	return e.log
-}
-
-// Fatal reports a compiler error and exits.
-func (e *ssaExport) Fatalf(line int32, msg string, args ...interface{}) {
-	lineno = line
-	Fatalf(msg, args...)
-}
-
-// Warnl reports a "warning", which is usually flag-triggered
-// logging output for the benefit of tests.
-func (e *ssaExport) Warnl(line int32, fmt_ string, args ...interface{}) {
-	Warnl(line, fmt_, args...)
-}
-
-func (e *ssaExport) Debug_checknil() bool {
-	return Debug_checknil != 0
-}
-
-func (e *ssaExport) Debug_wb() bool {
-	return Debug_wb != 0
-}
-
-func (e *ssaExport) Syslook(name string) interface{} {
-	return syslook(name).Sym
-}
-
-func (n *Node) Typ() ssa.Type {
-	return n.Type
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/ssa_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/ssa_test.go
deleted file mode 100644
index ec7e27c..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/ssa_test.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/ssa_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/ssa_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"bytes"
-	"internal/testenv"
-	"os/exec"
-	"path/filepath"
-	"strings"
-	"testing"
-)
-
-// TODO: move all these tests elsewhere?
-// Perhaps teach test/run.go how to run them with a new action verb.
-func runTest(t *testing.T, filename string) {
-	t.Parallel()
-	doTest(t, filename, "run")
-}
-func buildTest(t *testing.T, filename string) {
-	t.Parallel()
-	doTest(t, filename, "build")
-}
-func doTest(t *testing.T, filename string, kind string) {
-	testenv.MustHaveGoBuild(t)
-	var stdout, stderr bytes.Buffer
-	cmd := exec.Command(testenv.GoToolPath(t), kind, filepath.Join("testdata", filename))
-	cmd.Stdout = &stdout
-	cmd.Stderr = &stderr
-	if err := cmd.Run(); err != nil {
-		t.Fatalf("Failed: %v:\nOut: %s\nStderr: %s\n", err, &stdout, &stderr)
-	}
-	if s := stdout.String(); s != "" {
-		t.Errorf("Stdout = %s\nWant empty", s)
-	}
-	if s := stderr.String(); strings.Contains(s, "SSA unimplemented") {
-		t.Errorf("Unimplemented message found in stderr:\n%s", s)
-	}
-}
-
-// TestShortCircuit tests OANDAND and OOROR expressions and short circuiting.
-func TestShortCircuit(t *testing.T) { runTest(t, "short.go") }
-
-// TestBreakContinue tests that continue and break statements do what they say.
-func TestBreakContinue(t *testing.T) { runTest(t, "break.go") }
-
-// TestTypeAssertion tests type assertions.
-func TestTypeAssertion(t *testing.T) { runTest(t, "assert.go") }
-
-// TestArithmetic tests that both backends have the same result for arithmetic expressions.
-func TestArithmetic(t *testing.T) { runTest(t, "arith.go") }
-
-// TestFP tests that both backends have the same result for floating point expressions.
-func TestFP(t *testing.T) { runTest(t, "fp.go") }
-
-// TestArithmeticBoundary tests boundary results for arithmetic operations.
-func TestArithmeticBoundary(t *testing.T) { runTest(t, "arithBoundary.go") }
-
-// TestArithmeticConst tests results for arithmetic operations against constants.
-func TestArithmeticConst(t *testing.T) { runTest(t, "arithConst.go") }
-
-func TestChan(t *testing.T) { runTest(t, "chan.go") }
-
-func TestCompound(t *testing.T) { runTest(t, "compound.go") }
-
-func TestCtl(t *testing.T) { runTest(t, "ctl.go") }
-
-func TestLoadStore(t *testing.T) { runTest(t, "loadstore.go") }
-
-func TestMap(t *testing.T) { runTest(t, "map.go") }
-
-func TestRegalloc(t *testing.T) { runTest(t, "regalloc.go") }
-
-func TestString(t *testing.T) { runTest(t, "string.go") }
-
-func TestDeferNoReturn(t *testing.T) { buildTest(t, "deferNoReturn.go") }
-
-// TestClosure tests closure related behavior.
-func TestClosure(t *testing.T) { runTest(t, "closure.go") }
-
-func TestArray(t *testing.T) { runTest(t, "array.go") }
-
-func TestAppend(t *testing.T) { runTest(t, "append.go") }
-
-func TestZero(t *testing.T) { runTest(t, "zero.go") }
-
-func TestAddressed(t *testing.T) { runTest(t, "addressed.go") }
-
-func TestCopy(t *testing.T) { runTest(t, "copy.go") }
-
-func TestUnsafe(t *testing.T) { runTest(t, "unsafe.go") }
-
-func TestPhi(t *testing.T) { runTest(t, "phi.go") }
-
-func TestSlice(t *testing.T) { runTest(t, "slice.go") }
-
-func TestNamedReturn(t *testing.T) { runTest(t, "namedReturn.go") }
-
-func TestDuplicateLoad(t *testing.T) { runTest(t, "dupLoad.go") }
-
-func TestSqrt(t *testing.T) { runTest(t, "sqrt_const.go") }
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/subr.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/subr.go
deleted file mode 100644
index 2413ad5..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/subr.go
+++ /dev/null
@@ -1,2248 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/subr.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/subr.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"bytes"
-	"bootstrap/cmd/internal/obj"
-	"crypto/md5"
-	"encoding/binary"
-	"fmt"
-	"os"
-	"runtime/debug"
-	"sort"
-	"strconv"
-	"strings"
-	"unicode"
-	"unicode/utf8"
-)
-
-type Error struct {
-	lineno int32
-	msg    string
-}
-
-var errors []Error
-
-func errorexit() {
-	flusherrors()
-	if outfile != "" {
-		os.Remove(outfile)
-	}
-	os.Exit(2)
-}
-
-func adderrorname(n *Node) {
-	if n.Op != ODOT {
-		return
-	}
-	old := fmt.Sprintf("%v: undefined: %v\n", n.Line(), n.Left)
-	if len(errors) > 0 && errors[len(errors)-1].lineno == n.Lineno && errors[len(errors)-1].msg == old {
-		errors[len(errors)-1].msg = fmt.Sprintf("%v: undefined: %v in %v\n", n.Line(), n.Left, n)
-	}
-}
-
-func adderr(line int32, format string, args ...interface{}) {
-	errors = append(errors, Error{
-		lineno: line,
-		msg:    fmt.Sprintf("%v: %s\n", linestr(line), fmt.Sprintf(format, args...)),
-	})
-}
-
-// byLineno sorts errors by lineno.
-type byLineno []Error
-
-func (x byLineno) Len() int           { return len(x) }
-func (x byLineno) Less(i, j int) bool { return x[i].lineno < x[j].lineno }
-func (x byLineno) Swap(i, j int)      { x[i], x[j] = x[j], x[i] }
-
-// flusherrors sorts errors seen so far by line number, prints them to stdout,
-// and empties the errors array.
-func flusherrors() {
-	Ctxt.Bso.Flush()
-	if len(errors) == 0 {
-		return
-	}
-	sort.Stable(byLineno(errors))
-	for i := 0; i < len(errors); i++ {
-		if i == 0 || errors[i].msg != errors[i-1].msg {
-			fmt.Printf("%s", errors[i].msg)
-		}
-	}
-	errors = errors[:0]
-}
-
-func hcrash() {
-	if Debug['h'] != 0 {
-		flusherrors()
-		if outfile != "" {
-			os.Remove(outfile)
-		}
-		var x *int
-		*x = 0
-	}
-}
-
-func linestr(line int32) string {
-	return Ctxt.Line(int(line))
-}
-
-// lasterror keeps track of the most recently issued error.
-// It is used to avoid multiple error messages on the same
-// line.
-var lasterror struct {
-	syntax int32  // line of last syntax error
-	other  int32  // line of last non-syntax error
-	msg    string // error message of last non-syntax error
-}
-
-func yyerrorl(line int32, format string, args ...interface{}) {
-	msg := fmt.Sprintf(format, args...)
-
-	if strings.HasPrefix(msg, "syntax error") {
-		nsyntaxerrors++
-		// only one syntax error per line, no matter what error
-		if lasterror.syntax == line {
-			return
-		}
-		lasterror.syntax = line
-	} else {
-		// only one of multiple equal non-syntax errors per line
-		// (flusherrors shows only one of them, so we filter them
-		// here as best as we can (they may not appear in order)
-		// so that we don't count them here and exit early, and
-		// then have nothing to show for.)
-		if lasterror.other == line && lasterror.msg == msg {
-			return
-		}
-		lasterror.other = line
-		lasterror.msg = msg
-	}
-
-	adderr(line, "%s", msg)
-
-	hcrash()
-	nerrors++
-	if nsavederrors+nerrors >= 10 && Debug['e'] == 0 {
-		flusherrors()
-		fmt.Printf("%v: too many errors\n", linestr(line))
-		errorexit()
-	}
-}
-
-func yyerror(format string, args ...interface{}) {
-	yyerrorl(lineno, format, args...)
-}
-
-func Warn(fmt_ string, args ...interface{}) {
-	adderr(lineno, fmt_, args...)
-
-	hcrash()
-}
-
-func Warnl(line int32, fmt_ string, args ...interface{}) {
-	adderr(line, fmt_, args...)
-	if Debug['m'] != 0 {
-		flusherrors()
-	}
-}
-
-func Fatalf(fmt_ string, args ...interface{}) {
-	flusherrors()
-
-	fmt.Printf("%v: internal compiler error: ", linestr(lineno))
-	fmt.Printf(fmt_, args...)
-	fmt.Printf("\n")
-
-	// If this is a released compiler version, ask for a bug report.
-	if strings.HasPrefix(obj.Version, "release") {
-		fmt.Printf("\n")
-		fmt.Printf("Please file a bug report including a short program that triggers the error.\n")
-		fmt.Printf("https://golang.org/issue/new\n")
-	} else {
-		// Not a release; dump a stack trace, too.
-		fmt.Println()
-		os.Stdout.Write(debug.Stack())
-		fmt.Println()
-	}
-
-	hcrash()
-	errorexit()
-}
-
-func linehistpragma(file string) {
-	if Debug['i'] != 0 {
-		fmt.Printf("pragma %s at line %v\n", file, linestr(lexlineno))
-	}
-	Ctxt.AddImport(file)
-}
-
-func linehistpush(file string) {
-	if Debug['i'] != 0 {
-		fmt.Printf("import %s at line %v\n", file, linestr(lexlineno))
-	}
-	Ctxt.LineHist.Push(int(lexlineno), file)
-}
-
-func linehistpop() {
-	if Debug['i'] != 0 {
-		fmt.Printf("end of import at line %v\n", linestr(lexlineno))
-	}
-	Ctxt.LineHist.Pop(int(lexlineno))
-}
-
-func linehistupdate(file string, off int) {
-	if Debug['i'] != 0 {
-		fmt.Printf("line %s at line %v\n", file, linestr(lexlineno))
-	}
-	Ctxt.LineHist.Update(int(lexlineno), file, off)
-}
-
-func setlineno(n *Node) int32 {
-	lno := lineno
-	if n != nil {
-		switch n.Op {
-		case ONAME, OPACK:
-			break
-
-		case OLITERAL, OTYPE:
-			if n.Sym != nil {
-				break
-			}
-			fallthrough
-
-		default:
-			lineno = n.Lineno
-			if lineno == 0 {
-				if Debug['K'] != 0 {
-					Warn("setlineno: line 0")
-				}
-				lineno = lno
-			}
-		}
-	}
-
-	return lno
-}
-
-func lookup(name string) *Sym {
-	return localpkg.Lookup(name)
-}
-
-func lookupf(format string, a ...interface{}) *Sym {
-	return lookup(fmt.Sprintf(format, a...))
-}
-
-func lookupBytes(name []byte) *Sym {
-	return localpkg.LookupBytes(name)
-}
-
-// lookupN looks up the symbol starting with prefix and ending with
-// the decimal n. If prefix is too long, lookupN panics.
-func lookupN(prefix string, n int) *Sym {
-	var buf [20]byte // plenty long enough for all current users
-	copy(buf[:], prefix)
-	b := strconv.AppendInt(buf[:len(prefix)], int64(n), 10)
-	return lookupBytes(b)
-}
-
-// autolabel generates a new Name node for use with
-// an automatically generated label.
-// prefix is a short mnemonic (e.g. ".s" for switch)
-// to help with debugging.
-// It should begin with "." to avoid conflicts with
-// user labels.
-func autolabel(prefix string) *Node {
-	if prefix[0] != '.' {
-		Fatalf("autolabel prefix must start with '.', have %q", prefix)
-	}
-	fn := Curfn
-	if Curfn == nil {
-		Fatalf("autolabel outside function")
-	}
-	n := fn.Func.Label
-	fn.Func.Label++
-	return newname(lookupN(prefix, int(n)))
-}
-
-var initSyms []*Sym
-
-var nopkg = &Pkg{
-	Syms: make(map[string]*Sym),
-}
-
-func (pkg *Pkg) Lookup(name string) *Sym {
-	if pkg == nil {
-		pkg = nopkg
-	}
-	if s := pkg.Syms[name]; s != nil {
-		return s
-	}
-
-	s := &Sym{
-		Name: name,
-		Pkg:  pkg,
-	}
-	if name == "init" {
-		initSyms = append(initSyms, s)
-	}
-	pkg.Syms[name] = s
-	return s
-}
-
-func (pkg *Pkg) LookupBytes(name []byte) *Sym {
-	if pkg == nil {
-		pkg = nopkg
-	}
-	if s := pkg.Syms[string(name)]; s != nil {
-		return s
-	}
-	str := internString(name)
-	return pkg.Lookup(str)
-}
-
-func Pkglookup(name string, pkg *Pkg) *Sym {
-	return pkg.Lookup(name)
-}
-
-func restrictlookup(name string, pkg *Pkg) *Sym {
-	if !exportname(name) && pkg != localpkg {
-		yyerror("cannot refer to unexported name %s.%s", pkg.Name, name)
-	}
-	return Pkglookup(name, pkg)
-}
-
-// find all the exported symbols in package opkg
-// and make them available in the current package
-func importdot(opkg *Pkg, pack *Node) {
-	var s1 *Sym
-	var pkgerror string
-
-	n := 0
-	for _, s := range opkg.Syms {
-		if s.Def == nil {
-			continue
-		}
-		if !exportname(s.Name) || strings.ContainsRune(s.Name, 0xb7) { // 0xb7 = center dot
-			continue
-		}
-		s1 = lookup(s.Name)
-		if s1.Def != nil {
-			pkgerror = fmt.Sprintf("during import %q", opkg.Path)
-			redeclare(s1, pkgerror)
-			continue
-		}
-
-		s1.Def = s.Def
-		s1.Block = s.Block
-		if s1.Def.Name == nil {
-			Dump("s1def", s1.Def)
-			Fatalf("missing Name")
-		}
-		s1.Def.Name.Pack = pack
-		s1.Origpkg = opkg
-		n++
-	}
-
-	if n == 0 {
-		// can't possibly be used - there were no symbols
-		yyerrorl(pack.Lineno, "imported and not used: %q", opkg.Path)
-	}
-}
-
-func nod(op Op, nleft *Node, nright *Node) *Node {
-	n := new(Node)
-	n.Op = op
-	n.Left = nleft
-	n.Right = nright
-	n.Lineno = lineno
-	n.Xoffset = BADWIDTH
-	n.Orig = n
-	switch op {
-	case OCLOSURE, ODCLFUNC:
-		n.Func = new(Func)
-		n.Func.IsHiddenClosure = Curfn != nil
-	case ONAME:
-		n.Name = new(Name)
-		n.Name.Param = new(Param)
-	case OLABEL, OPACK:
-		n.Name = new(Name)
-	}
-	if n.Name != nil {
-		n.Name.Curfn = Curfn
-	}
-	return n
-}
-
-// nodSym makes a Node with Op op and with the Left field set to left
-// and the Sym field set to sym. This is for ODOT and friends.
-func nodSym(op Op, left *Node, sym *Sym) *Node {
-	n := nod(op, left, nil)
-	n.Sym = sym
-	return n
-}
-
-func saveorignode(n *Node) {
-	if n.Orig != nil {
-		return
-	}
-	norig := nod(n.Op, nil, nil)
-	*norig = *n
-	n.Orig = norig
-}
-
-// methcmp sorts by symbol, then by package path for unexported symbols.
-type methcmp []*Field
-
-func (x methcmp) Len() int      { return len(x) }
-func (x methcmp) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-func (x methcmp) Less(i, j int) bool {
-	a := x[i]
-	b := x[j]
-	if a.Sym == nil && b.Sym == nil {
-		return false
-	}
-	if a.Sym == nil {
-		return true
-	}
-	if b.Sym == nil {
-		return false
-	}
-	if a.Sym.Name != b.Sym.Name {
-		return a.Sym.Name < b.Sym.Name
-	}
-	if !exportname(a.Sym.Name) {
-		if a.Sym.Pkg.Path != b.Sym.Pkg.Path {
-			return a.Sym.Pkg.Path < b.Sym.Pkg.Path
-		}
-	}
-
-	return false
-}
-
-func nodintconst(v int64) *Node {
-	c := nod(OLITERAL, nil, nil)
-	c.Addable = true
-	c.SetVal(Val{new(Mpint)})
-	c.Val().U.(*Mpint).SetInt64(v)
-	c.Type = Types[TIDEAL]
-	ullmancalc(c)
-	return c
-}
-
-func nodfltconst(v *Mpflt) *Node {
-	c := nod(OLITERAL, nil, nil)
-	c.Addable = true
-	c.SetVal(Val{newMpflt()})
-	c.Val().U.(*Mpflt).Set(v)
-	c.Type = Types[TIDEAL]
-	ullmancalc(c)
-	return c
-}
-
-func Nodconst(n *Node, t *Type, v int64) {
-	*n = Node{}
-	n.Op = OLITERAL
-	n.Addable = true
-	ullmancalc(n)
-	n.SetVal(Val{new(Mpint)})
-	n.Val().U.(*Mpint).SetInt64(v)
-	n.Type = t
-
-	if t.IsFloat() {
-		Fatalf("nodconst: bad type %v", t)
-	}
-}
-
-func nodnil() *Node {
-	c := nodintconst(0)
-	c.SetVal(Val{new(NilVal)})
-	c.Type = Types[TNIL]
-	return c
-}
-
-func nodbool(b bool) *Node {
-	c := nodintconst(0)
-	c.SetVal(Val{b})
-	c.Type = idealbool
-	return c
-}
-
-// treecopy recursively copies n, with the exception of
-// ONAME, OLITERAL, OTYPE, and non-iota ONONAME leaves.
-// Copies of iota ONONAME nodes are assigned the current
-// value of iota_. If lineno != 0, it sets the line number
-// of newly allocated nodes to lineno.
-func treecopy(n *Node, lineno int32) *Node {
-	if n == nil {
-		return nil
-	}
-
-	switch n.Op {
-	default:
-		m := *n
-		m.Orig = &m
-		m.Left = treecopy(n.Left, lineno)
-		m.Right = treecopy(n.Right, lineno)
-		m.List.Set(listtreecopy(n.List.Slice(), lineno))
-		if lineno != 0 {
-			m.Lineno = lineno
-		}
-		if m.Name != nil && n.Op != ODCLFIELD {
-			Dump("treecopy", n)
-			Fatalf("treecopy Name")
-		}
-		return &m
-
-	case ONONAME:
-		if n.Sym == lookup("iota") {
-			// Not sure yet whether this is the real iota,
-			// but make a copy of the Node* just in case,
-			// so that all the copies of this const definition
-			// don't have the same iota value.
-			m := *n
-			if lineno != 0 {
-				m.Lineno = lineno
-			}
-			m.SetIota(iota_)
-			return &m
-		}
-		return n
-
-	case OPACK:
-		// OPACK nodes are never valid in const value declarations,
-		// but allow them like any other declared symbol to avoid
-		// crashing (golang.org/issue/11361).
-		fallthrough
-
-	case ONAME, OLITERAL, OTYPE:
-		return n
-
-	}
-}
-
-// isnil reports whether n represents the universal untyped zero value "nil".
-func isnil(n *Node) bool {
-	// Check n.Orig because constant propagation may produce typed nil constants,
-	// which don't exist in the Go spec.
-	return Isconst(n.Orig, CTNIL)
-}
-
-func isptrto(t *Type, et EType) bool {
-	if t == nil {
-		return false
-	}
-	if !t.IsPtr() {
-		return false
-	}
-	t = t.Elem()
-	if t == nil {
-		return false
-	}
-	if t.Etype != et {
-		return false
-	}
-	return true
-}
-
-func isblank(n *Node) bool {
-	if n == nil {
-		return false
-	}
-	return isblanksym(n.Sym)
-}
-
-func isblanksym(s *Sym) bool {
-	return s != nil && s.Name == "_"
-}
-
-// methtype returns the underlying type, if any,
-// that owns methods with receiver parameter t.
-// The result is either a named type or an anonymous struct.
-func methtype(t *Type) *Type {
-	if t == nil {
-		return nil
-	}
-
-	// Strip away pointer if it's there.
-	if t.IsPtr() {
-		if t.Sym != nil {
-			return nil
-		}
-		t = t.Elem()
-		if t == nil {
-			return nil
-		}
-	}
-
-	// Must be a named type or anonymous struct.
-	if t.Sym == nil && !t.IsStruct() {
-		return nil
-	}
-
-	// Check types.
-	if issimple[t.Etype] {
-		return t
-	}
-	switch t.Etype {
-	case TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRING, TSTRUCT:
-		return t
-	}
-	return nil
-}
-
-func cplxsubtype(et EType) EType {
-	switch et {
-	case TCOMPLEX64:
-		return TFLOAT32
-
-	case TCOMPLEX128:
-		return TFLOAT64
-	}
-
-	Fatalf("cplxsubtype: %v\n", et)
-	return 0
-}
-
-// eqtype reports whether t1 and t2 are identical, following the spec rules.
-//
-// Any cyclic type must go through a named type, and if one is
-// named, it is only identical to the other if they are the same
-// pointer (t1 == t2), so there's no chance of chasing cycles
-// ad infinitum, so no need for a depth counter.
-func eqtype(t1, t2 *Type) bool {
-	return eqtype1(t1, t2, true, nil)
-}
-
-// eqtypeIgnoreTags is like eqtype but it ignores struct tags for struct identity.
-func eqtypeIgnoreTags(t1, t2 *Type) bool {
-	return eqtype1(t1, t2, false, nil)
-}
-
-type typePair struct {
-	t1 *Type
-	t2 *Type
-}
-
-func eqtype1(t1, t2 *Type, cmpTags bool, assumedEqual map[typePair]struct{}) bool {
-	if t1 == t2 {
-		return true
-	}
-	if t1 == nil || t2 == nil || t1.Etype != t2.Etype || t1.Broke || t2.Broke {
-		return false
-	}
-	if t1.Sym != nil || t2.Sym != nil {
-		// Special case: we keep byte/uint8 and rune/int32
-		// separate for error messages. Treat them as equal.
-		switch t1.Etype {
-		case TUINT8:
-			return (t1 == Types[TUINT8] || t1 == bytetype) && (t2 == Types[TUINT8] || t2 == bytetype)
-		case TINT32:
-			return (t1 == Types[TINT32] || t1 == runetype) && (t2 == Types[TINT32] || t2 == runetype)
-		default:
-			return false
-		}
-	}
-
-	if assumedEqual == nil {
-		assumedEqual = make(map[typePair]struct{})
-	} else if _, ok := assumedEqual[typePair{t1, t2}]; ok {
-		return true
-	}
-	assumedEqual[typePair{t1, t2}] = struct{}{}
-
-	switch t1.Etype {
-	case TINTER, TSTRUCT:
-		t1, i1 := iterFields(t1)
-		t2, i2 := iterFields(t2)
-		for ; t1 != nil && t2 != nil; t1, t2 = i1.Next(), i2.Next() {
-			if t1.Sym != t2.Sym || t1.Embedded != t2.Embedded || !eqtype1(t1.Type, t2.Type, cmpTags, assumedEqual) || cmpTags && t1.Note != t2.Note {
-				return false
-			}
-		}
-
-		if t1 == nil && t2 == nil {
-			return true
-		}
-		return false
-
-	case TFUNC:
-		// Check parameters and result parameters for type equality.
-		// We intentionally ignore receiver parameters for type
-		// equality, because they're never relevant.
-		for _, f := range paramsResults {
-			// Loop over fields in structs, ignoring argument names.
-			ta, ia := iterFields(f(t1))
-			tb, ib := iterFields(f(t2))
-			for ; ta != nil && tb != nil; ta, tb = ia.Next(), ib.Next() {
-				if ta.Isddd != tb.Isddd || !eqtype1(ta.Type, tb.Type, cmpTags, assumedEqual) {
-					return false
-				}
-			}
-			if ta != nil || tb != nil {
-				return false
-			}
-		}
-		return true
-
-	case TARRAY:
-		if t1.NumElem() != t2.NumElem() {
-			return false
-		}
-
-	case TCHAN:
-		if t1.ChanDir() != t2.ChanDir() {
-			return false
-		}
-
-	case TMAP:
-		if !eqtype1(t1.Key(), t2.Key(), cmpTags, assumedEqual) {
-			return false
-		}
-		return eqtype1(t1.Val(), t2.Val(), cmpTags, assumedEqual)
-	}
-
-	return eqtype1(t1.Elem(), t2.Elem(), cmpTags, assumedEqual)
-}
-
-// Are t1 and t2 equal struct types when field names are ignored?
-// For deciding whether the result struct from g can be copied
-// directly when compiling f(g()).
-func eqtypenoname(t1 *Type, t2 *Type) bool {
-	if t1 == nil || t2 == nil || !t1.IsStruct() || !t2.IsStruct() {
-		return false
-	}
-
-	f1, i1 := iterFields(t1)
-	f2, i2 := iterFields(t2)
-	for {
-		if !eqtype(f1.Type, f2.Type) {
-			return false
-		}
-		if f1 == nil {
-			return true
-		}
-		f1 = i1.Next()
-		f2 = i2.Next()
-	}
-}
-
-// Is type src assignment compatible to type dst?
-// If so, return op code to use in conversion.
-// If not, return 0.
-func assignop(src *Type, dst *Type, why *string) Op {
-	if why != nil {
-		*why = ""
-	}
-
-	// TODO(rsc,lvd): This behaves poorly in the presence of inlining.
-	// https://golang.org/issue/2795
-	if safemode && importpkg == nil && src != nil && src.Etype == TUNSAFEPTR {
-		yyerror("cannot use unsafe.Pointer")
-		errorexit()
-	}
-
-	if src == dst {
-		return OCONVNOP
-	}
-	if src == nil || dst == nil || src.Etype == TFORW || dst.Etype == TFORW || src.Orig == nil || dst.Orig == nil {
-		return 0
-	}
-
-	// 1. src type is identical to dst.
-	if eqtype(src, dst) {
-		return OCONVNOP
-	}
-
-	// 2. src and dst have identical underlying types
-	// and either src or dst is not a named type or
-	// both are empty interface types.
-	// For assignable but different non-empty interface types,
-	// we want to recompute the itab.
-	if eqtype(src.Orig, dst.Orig) && (src.Sym == nil || dst.Sym == nil || src.IsEmptyInterface()) {
-		return OCONVNOP
-	}
-
-	// 3. dst is an interface type and src implements dst.
-	if dst.IsInterface() && src.Etype != TNIL {
-		var missing, have *Field
-		var ptr int
-		if implements(src, dst, &missing, &have, &ptr) {
-			return OCONVIFACE
-		}
-
-		// we'll have complained about this method anyway, suppress spurious messages.
-		if have != nil && have.Sym == missing.Sym && (have.Type.Broke || missing.Type.Broke) {
-			return OCONVIFACE
-		}
-
-		if why != nil {
-			if isptrto(src, TINTER) {
-				*why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", src)
-			} else if have != nil && have.Sym == missing.Sym && have.Nointerface {
-				*why = fmt.Sprintf(":\n\t%v does not implement %v (%v method is marked 'nointerface')", src, dst, missing.Sym)
-			} else if have != nil && have.Sym == missing.Sym {
-				*why = fmt.Sprintf(":\n\t%v does not implement %v (wrong type for %v method)\n"+
-					"\t\thave %v%0S\n\t\twant %v%0S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
-			} else if ptr != 0 {
-				*why = fmt.Sprintf(":\n\t%v does not implement %v (%v method has pointer receiver)", src, dst, missing.Sym)
-			} else if have != nil {
-				*why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)\n"+
-					"\t\thave %v%0S\n\t\twant %v%0S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
-			} else {
-				*why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)", src, dst, missing.Sym)
-			}
-		}
-
-		return 0
-	}
-
-	if isptrto(dst, TINTER) {
-		if why != nil {
-			*why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", dst)
-		}
-		return 0
-	}
-
-	if src.IsInterface() && dst.Etype != TBLANK {
-		var missing, have *Field
-		var ptr int
-		if why != nil && implements(dst, src, &missing, &have, &ptr) {
-			*why = ": need type assertion"
-		}
-		return 0
-	}
-
-	// 4. src is a bidirectional channel value, dst is a channel type,
-	// src and dst have identical element types, and
-	// either src or dst is not a named type.
-	if src.IsChan() && src.ChanDir() == Cboth && dst.IsChan() {
-		if eqtype(src.Elem(), dst.Elem()) && (src.Sym == nil || dst.Sym == nil) {
-			return OCONVNOP
-		}
-	}
-
-	// 5. src is the predeclared identifier nil and dst is a nillable type.
-	if src.Etype == TNIL {
-		switch dst.Etype {
-		case TPTR32,
-			TPTR64,
-			TFUNC,
-			TMAP,
-			TCHAN,
-			TINTER,
-			TSLICE:
-			return OCONVNOP
-		}
-	}
-
-	// 6. rule about untyped constants - already converted by defaultlit.
-
-	// 7. Any typed value can be assigned to the blank identifier.
-	if dst.Etype == TBLANK {
-		return OCONVNOP
-	}
-
-	return 0
-}
-
-// Can we convert a value of type src to a value of type dst?
-// If so, return op code to use in conversion (maybe OCONVNOP).
-// If not, return 0.
-func convertop(src *Type, dst *Type, why *string) Op {
-	if why != nil {
-		*why = ""
-	}
-
-	if src == dst {
-		return OCONVNOP
-	}
-	if src == nil || dst == nil {
-		return 0
-	}
-
-	// Conversions from regular to go:notinheap are not allowed
-	// (unless it's unsafe.Pointer). This is a runtime-specific
-	// rule.
-	if src.IsPtr() && dst.IsPtr() && dst.Elem().NotInHeap && !src.Elem().NotInHeap {
-		if why != nil {
-			*why = fmt.Sprintf(":\n\t%v is go:notinheap, but %v is not", dst.Elem(), src.Elem())
-		}
-		return 0
-	}
-
-	// 1. src can be assigned to dst.
-	op := assignop(src, dst, why)
-	if op != 0 {
-		return op
-	}
-
-	// The rules for interfaces are no different in conversions
-	// than assignments. If interfaces are involved, stop now
-	// with the good message from assignop.
-	// Otherwise clear the error.
-	if src.IsInterface() || dst.IsInterface() {
-		return 0
-	}
-	if why != nil {
-		*why = ""
-	}
-
-	// 2. Ignoring struct tags, src and dst have identical underlying types.
-	if eqtypeIgnoreTags(src.Orig, dst.Orig) {
-		return OCONVNOP
-	}
-
-	// 3. src and dst are unnamed pointer types and, ignoring struct tags,
-	// their base types have identical underlying types.
-	if src.IsPtr() && dst.IsPtr() && src.Sym == nil && dst.Sym == nil {
-		if eqtypeIgnoreTags(src.Elem().Orig, dst.Elem().Orig) {
-			return OCONVNOP
-		}
-	}
-
-	// 4. src and dst are both integer or floating point types.
-	if (src.IsInteger() || src.IsFloat()) && (dst.IsInteger() || dst.IsFloat()) {
-		if simtype[src.Etype] == simtype[dst.Etype] {
-			return OCONVNOP
-		}
-		return OCONV
-	}
-
-	// 5. src and dst are both complex types.
-	if src.IsComplex() && dst.IsComplex() {
-		if simtype[src.Etype] == simtype[dst.Etype] {
-			return OCONVNOP
-		}
-		return OCONV
-	}
-
-	// 6. src is an integer or has type []byte or []rune
-	// and dst is a string type.
-	if src.IsInteger() && dst.IsString() {
-		return ORUNESTR
-	}
-
-	if src.IsSlice() && dst.IsString() {
-		if src.Elem().Etype == bytetype.Etype {
-			return OARRAYBYTESTR
-		}
-		if src.Elem().Etype == runetype.Etype {
-			return OARRAYRUNESTR
-		}
-	}
-
-	// 7. src is a string and dst is []byte or []rune.
-	// String to slice.
-	if src.IsString() && dst.IsSlice() {
-		if dst.Elem().Etype == bytetype.Etype {
-			return OSTRARRAYBYTE
-		}
-		if dst.Elem().Etype == runetype.Etype {
-			return OSTRARRAYRUNE
-		}
-	}
-
-	// 8. src is a pointer or uintptr and dst is unsafe.Pointer.
-	if (src.IsPtr() || src.Etype == TUINTPTR) && dst.Etype == TUNSAFEPTR {
-		return OCONVNOP
-	}
-
-	// 9. src is unsafe.Pointer and dst is a pointer or uintptr.
-	if src.Etype == TUNSAFEPTR && (dst.IsPtr() || dst.Etype == TUINTPTR) {
-		return OCONVNOP
-	}
-
-	return 0
-}
-
-func assignconv(n *Node, t *Type, context string) *Node {
-	return assignconvfn(n, t, func() string { return context })
-}
-
-// Convert node n for assignment to type t.
-func assignconvfn(n *Node, t *Type, context func() string) *Node {
-	if n == nil || n.Type == nil || n.Type.Broke {
-		return n
-	}
-
-	if t.Etype == TBLANK && n.Type.Etype == TNIL {
-		yyerror("use of untyped nil")
-	}
-
-	old := n
-	od := old.Diag
-	old.Diag = true // silence errors about n; we'll issue one below
-	n = defaultlit(n, t)
-	old.Diag = od
-	if t.Etype == TBLANK {
-		return n
-	}
-
-	// Convert ideal bool from comparison to plain bool
-	// if the next step is non-bool (like interface{}).
-	if n.Type == idealbool && !t.IsBoolean() {
-		if n.Op == ONAME || n.Op == OLITERAL {
-			r := nod(OCONVNOP, n, nil)
-			r.Type = Types[TBOOL]
-			r.Typecheck = 1
-			r.Implicit = true
-			n = r
-		}
-	}
-
-	if eqtype(n.Type, t) {
-		return n
-	}
-
-	var why string
-	op := assignop(n.Type, t, &why)
-	if op == 0 {
-		yyerror("cannot use %L as type %v in %s%s", n, t, context(), why)
-		op = OCONV
-	}
-
-	r := nod(op, n, nil)
-	r.Type = t
-	r.Typecheck = 1
-	r.Implicit = true
-	r.Orig = n.Orig
-	return r
-}
-
-// IsMethod reports whether n is a method.
-// n must be a function or a method.
-func (n *Node) IsMethod() bool {
-	return n.Type.Recv() != nil
-}
-
-// SliceBounds returns n's slice bounds: low, high, and max in expr[low:high:max].
-// n must be a slice expression. max is nil if n is a simple slice expression.
-func (n *Node) SliceBounds() (low, high, max *Node) {
-	if n.List.Len() == 0 {
-		return nil, nil, nil
-	}
-
-	switch n.Op {
-	case OSLICE, OSLICEARR, OSLICESTR:
-		s := n.List.Slice()
-		return s[0], s[1], nil
-	case OSLICE3, OSLICE3ARR:
-		s := n.List.Slice()
-		return s[0], s[1], s[2]
-	}
-	Fatalf("SliceBounds op %v: %v", n.Op, n)
-	return nil, nil, nil
-}
-
-// SetSliceBounds sets n's slice bounds, where n is a slice expression.
-// n must be a slice expression. If max is non-nil, n must be a full slice expression.
-func (n *Node) SetSliceBounds(low, high, max *Node) {
-	switch n.Op {
-	case OSLICE, OSLICEARR, OSLICESTR:
-		if max != nil {
-			Fatalf("SetSliceBounds %v given three bounds", n.Op)
-		}
-		s := n.List.Slice()
-		if s == nil {
-			if low == nil && high == nil {
-				return
-			}
-			n.List.Set([]*Node{low, high})
-			return
-		}
-		s[0] = low
-		s[1] = high
-		return
-	case OSLICE3, OSLICE3ARR:
-		s := n.List.Slice()
-		if s == nil {
-			if low == nil && high == nil && max == nil {
-				return
-			}
-			n.List.Set([]*Node{low, high, max})
-			return
-		}
-		s[0] = low
-		s[1] = high
-		s[2] = max
-		return
-	}
-	Fatalf("SetSliceBounds op %v: %v", n.Op, n)
-}
-
-// IsSlice3 reports whether o is a slice3 op (OSLICE3, OSLICE3ARR).
-// o must be a slicing op.
-func (o Op) IsSlice3() bool {
-	switch o {
-	case OSLICE, OSLICEARR, OSLICESTR:
-		return false
-	case OSLICE3, OSLICE3ARR:
-		return true
-	}
-	Fatalf("IsSlice3 op %v", o)
-	return false
-}
-
-func syslook(name string) *Node {
-	s := Pkglookup(name, Runtimepkg)
-	if s == nil || s.Def == nil {
-		Fatalf("syslook: can't find runtime.%s", name)
-	}
-	return s.Def
-}
-
-// typehash computes a hash value for type t to use in type switch
-// statements.
-func typehash(t *Type) uint32 {
-	// t.tconv(FmtLeft | FmtUnsigned) already contains all the necessary logic
-	// to generate a representation that completely describes the type, so using
-	// it here avoids duplicating that code.
-	// See the comments in exprSwitch.checkDupCases.
-	p := t.tconv(FmtLeft | FmtUnsigned)
-
-	// Using MD5 is overkill, but reduces accidental collisions.
-	h := md5.Sum([]byte(p))
-	return binary.LittleEndian.Uint32(h[:4])
-}
-
-// ptrto returns the Type *t.
-// The returned struct must not be modified.
-func ptrto(t *Type) *Type {
-	if Tptr == 0 {
-		Fatalf("ptrto: no tptr")
-	}
-	if t == nil {
-		Fatalf("ptrto: nil ptr")
-	}
-	return typPtr(t)
-}
-
-func frame(context int) {
-	if context != 0 {
-		fmt.Printf("--- external frame ---\n")
-		for _, n := range externdcl {
-			printframenode(n)
-		}
-		return
-	}
-
-	if Curfn != nil {
-		fmt.Printf("--- %v frame ---\n", Curfn.Func.Nname.Sym)
-		for _, ln := range Curfn.Func.Dcl {
-			printframenode(ln)
-		}
-	}
-}
-
-func printframenode(n *Node) {
-	w := int64(-1)
-	if n.Type != nil {
-		w = n.Type.Width
-	}
-	switch n.Op {
-	case ONAME:
-		fmt.Printf("%v %v G%d %v width=%d\n", n.Op, n.Sym, n.Name.Vargen, n.Type, w)
-	case OTYPE:
-		fmt.Printf("%v %v width=%d\n", n.Op, n.Type, w)
-	}
-}
-
-// calculate sethi/ullman number
-// roughly how many registers needed to
-// compile a node. used to compile the
-// hardest side first to minimize registers.
-func ullmancalc(n *Node) {
-	if n == nil {
-		return
-	}
-
-	var ul int
-	var ur int
-	if n.Ninit.Len() != 0 {
-		ul = UINF
-		goto out
-	}
-
-	switch n.Op {
-	case OLITERAL, ONAME:
-		ul = 1
-		if n.Class == PAUTOHEAP {
-			ul++
-		}
-		goto out
-
-	case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER, OASWB:
-		ul = UINF
-		goto out
-
-		// hard with instrumented code
-	case OANDAND, OOROR:
-		if instrumenting {
-			ul = UINF
-			goto out
-		}
-	case OINDEX, OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR,
-		OIND, ODOTPTR, ODOTTYPE, ODIV, OMOD:
-		// These ops might panic, make sure they are done
-		// before we start marshaling args for a call. See issue 16760.
-		ul = UINF
-		goto out
-	}
-
-	ul = 1
-	if n.Left != nil {
-		ul = int(n.Left.Ullman)
-	}
-	ur = 1
-	if n.Right != nil {
-		ur = int(n.Right.Ullman)
-	}
-	if ul == ur {
-		ul += 1
-	}
-	if ur > ul {
-		ul = ur
-	}
-
-out:
-	if ul > 200 {
-		ul = 200 // clamp to uchar with room to grow
-	}
-	n.Ullman = uint8(ul)
-}
-
-func badtype(op Op, tl *Type, tr *Type) {
-	fmt_ := ""
-	if tl != nil {
-		fmt_ += fmt.Sprintf("\n\t%v", tl)
-	}
-	if tr != nil {
-		fmt_ += fmt.Sprintf("\n\t%v", tr)
-	}
-
-	// common mistake: *struct and *interface.
-	if tl != nil && tr != nil && tl.IsPtr() && tr.IsPtr() {
-		if tl.Elem().IsStruct() && tr.Elem().IsInterface() {
-			fmt_ += "\n\t(*struct vs *interface)"
-		} else if tl.Elem().IsInterface() && tr.Elem().IsStruct() {
-			fmt_ += "\n\t(*interface vs *struct)"
-		}
-	}
-
-	s := fmt_
-	yyerror("illegal types for operand: %v%s", op, s)
-}
-
-// brcom returns !(op).
-// For example, brcom(==) is !=.
-func brcom(op Op) Op {
-	switch op {
-	case OEQ:
-		return ONE
-	case ONE:
-		return OEQ
-	case OLT:
-		return OGE
-	case OGT:
-		return OLE
-	case OLE:
-		return OGT
-	case OGE:
-		return OLT
-	}
-	Fatalf("brcom: no com for %v\n", op)
-	return op
-}
-
-// brrev returns reverse(op).
-// For example, Brrev(<) is >.
-func brrev(op Op) Op {
-	switch op {
-	case OEQ:
-		return OEQ
-	case ONE:
-		return ONE
-	case OLT:
-		return OGT
-	case OGT:
-		return OLT
-	case OLE:
-		return OGE
-	case OGE:
-		return OLE
-	}
-	Fatalf("brrev: no rev for %v\n", op)
-	return op
-}
-
-// return side effect-free n, appending side effects to init.
-// result is assignable if n is.
-func safeexpr(n *Node, init *Nodes) *Node {
-	if n == nil {
-		return nil
-	}
-
-	if n.Ninit.Len() != 0 {
-		walkstmtlist(n.Ninit.Slice())
-		init.AppendNodes(&n.Ninit)
-	}
-
-	switch n.Op {
-	case ONAME, OLITERAL:
-		return n
-
-	case ODOT, OLEN, OCAP:
-		l := safeexpr(n.Left, init)
-		if l == n.Left {
-			return n
-		}
-		r := nod(OXXX, nil, nil)
-		*r = *n
-		r.Left = l
-		r = typecheck(r, Erv)
-		r = walkexpr(r, init)
-		return r
-
-	case ODOTPTR, OIND:
-		l := safeexpr(n.Left, init)
-		if l == n.Left {
-			return n
-		}
-		a := nod(OXXX, nil, nil)
-		*a = *n
-		a.Left = l
-		a = walkexpr(a, init)
-		return a
-
-	case OINDEX, OINDEXMAP:
-		l := safeexpr(n.Left, init)
-		r := safeexpr(n.Right, init)
-		if l == n.Left && r == n.Right {
-			return n
-		}
-		a := nod(OXXX, nil, nil)
-		*a = *n
-		a.Left = l
-		a.Right = r
-		a = walkexpr(a, init)
-		return a
-
-	case OSTRUCTLIT, OARRAYLIT, OSLICELIT:
-		if isStaticCompositeLiteral(n) {
-			return n
-		}
-	}
-
-	// make a copy; must not be used as an lvalue
-	if islvalue(n) {
-		Fatalf("missing lvalue case in safeexpr: %v", n)
-	}
-	return cheapexpr(n, init)
-}
-
-func copyexpr(n *Node, t *Type, init *Nodes) *Node {
-	l := temp(t)
-	a := nod(OAS, l, n)
-	a = typecheck(a, Etop)
-	a = walkexpr(a, init)
-	init.Append(a)
-	return l
-}
-
-// return side-effect free and cheap n, appending side effects to init.
-// result may not be assignable.
-func cheapexpr(n *Node, init *Nodes) *Node {
-	switch n.Op {
-	case ONAME, OLITERAL:
-		return n
-	}
-
-	return copyexpr(n, n.Type, init)
-}
-
-// Code to resolve elided DOTs in embedded types.
-
-// A Dlist stores a pointer to a TFIELD Type embedded within
-// a TSTRUCT or TINTER Type.
-type Dlist struct {
-	field *Field
-}
-
-// dotlist is used by adddot1 to record the path of embedded fields
-// used to access a target field or method.
-// Must be non-nil so that dotpath returns a non-nil slice even if d is zero.
-var dotlist = make([]Dlist, 10)
-
-// lookdot0 returns the number of fields or methods named s associated
-// with Type t. If exactly one exists, it will be returned in *save
-// (if save is not nil).
-func lookdot0(s *Sym, t *Type, save **Field, ignorecase bool) int {
-	u := t
-	if u.IsPtr() {
-		u = u.Elem()
-	}
-
-	c := 0
-	if u.IsStruct() || u.IsInterface() {
-		for _, f := range u.Fields().Slice() {
-			if f.Sym == s || (ignorecase && f.Type.Etype == TFUNC && f.Type.Recv() != nil && strings.EqualFold(f.Sym.Name, s.Name)) {
-				if save != nil {
-					*save = f
-				}
-				c++
-			}
-		}
-	}
-
-	u = methtype(t)
-	if u != nil {
-		for _, f := range u.Methods().Slice() {
-			if f.Embedded == 0 && (f.Sym == s || (ignorecase && strings.EqualFold(f.Sym.Name, s.Name))) {
-				if save != nil {
-					*save = f
-				}
-				c++
-			}
-		}
-	}
-
-	return c
-}
-
-// adddot1 returns the number of fields or methods named s at depth d in Type t.
-// If exactly one exists, it will be returned in *save (if save is not nil),
-// and dotlist will contain the path of embedded fields traversed to find it,
-// in reverse order. If none exist, more will indicate whether t contains any
-// embedded fields at depth d, so callers can decide whether to retry at
-// a greater depth.
-func adddot1(s *Sym, t *Type, d int, save **Field, ignorecase bool) (c int, more bool) {
-	if t.Trecur != 0 {
-		return
-	}
-	t.Trecur = 1
-
-	var u *Type
-	d--
-	if d < 0 {
-		// We've reached our target depth. If t has any fields/methods
-		// named s, then we're done. Otherwise, we still need to check
-		// below for embedded fields.
-		c = lookdot0(s, t, save, ignorecase)
-		if c != 0 {
-			goto out
-		}
-	}
-
-	u = t
-	if u.IsPtr() {
-		u = u.Elem()
-	}
-	if !u.IsStruct() && !u.IsInterface() {
-		goto out
-	}
-
-	for _, f := range u.Fields().Slice() {
-		if f.Embedded == 0 || f.Sym == nil {
-			continue
-		}
-		if d < 0 {
-			// Found an embedded field at target depth.
-			more = true
-			goto out
-		}
-		a, more1 := adddot1(s, f.Type, d, save, ignorecase)
-		if a != 0 && c == 0 {
-			dotlist[d].field = f
-		}
-		c += a
-		if more1 {
-			more = true
-		}
-	}
-
-out:
-	t.Trecur = 0
-	return c, more
-}
-
-// dotpath computes the unique shortest explicit selector path to fully qualify
-// a selection expression x.f, where x is of type t and f is the symbol s.
-// If no such path exists, dotpath returns nil.
-// If there are multiple shortest paths to the same depth, ambig is true.
-func dotpath(s *Sym, t *Type, save **Field, ignorecase bool) (path []Dlist, ambig bool) {
-	// The embedding of types within structs imposes a tree structure onto
-	// types: structs parent the types they embed, and types parent their
-	// fields or methods. Our goal here is to find the shortest path to
-	// a field or method named s in the subtree rooted at t. To accomplish
-	// that, we iteratively perform depth-first searches of increasing depth
-	// until we either find the named field/method or exhaust the tree.
-	for d := 0; ; d++ {
-		if d > len(dotlist) {
-			dotlist = append(dotlist, Dlist{})
-		}
-		if c, more := adddot1(s, t, d, save, ignorecase); c == 1 {
-			return dotlist[:d], false
-		} else if c > 1 {
-			return nil, true
-		} else if !more {
-			return nil, false
-		}
-	}
-}
-
-// in T.field
-// find missing fields that
-// will give shortest unique addressing.
-// modify the tree with missing type names.
-func adddot(n *Node) *Node {
-	n.Left = typecheck(n.Left, Etype|Erv)
-	if n.Left.Diag {
-		n.Diag = true
-	}
-	t := n.Left.Type
-	if t == nil {
-		return n
-	}
-
-	if n.Left.Op == OTYPE {
-		return n
-	}
-
-	s := n.Sym
-	if s == nil {
-		return n
-	}
-
-	switch path, ambig := dotpath(s, t, nil, false); {
-	case path != nil:
-		// rebuild elided dots
-		for c := len(path) - 1; c >= 0; c-- {
-			n.Left = nodSym(ODOT, n.Left, path[c].field.Sym)
-			n.Left.Implicit = true
-		}
-	case ambig:
-		yyerror("ambiguous selector %v", n)
-		n.Left = nil
-	}
-
-	return n
-}
-
-// code to help generate trampoline
-// functions for methods on embedded
-// subtypes.
-// these are approx the same as
-// the corresponding adddot routines
-// except that they expect to be called
-// with unique tasks and they return
-// the actual methods.
-type Symlink struct {
-	field     *Field
-	followptr bool
-}
-
-var slist []Symlink
-
-func expand0(t *Type, followptr bool) {
-	u := t
-	if u.IsPtr() {
-		followptr = true
-		u = u.Elem()
-	}
-
-	if u.IsInterface() {
-		for _, f := range u.Fields().Slice() {
-			if f.Sym.Flags&SymUniq != 0 {
-				continue
-			}
-			f.Sym.Flags |= SymUniq
-			slist = append(slist, Symlink{field: f, followptr: followptr})
-		}
-
-		return
-	}
-
-	u = methtype(t)
-	if u != nil {
-		for _, f := range u.Methods().Slice() {
-			if f.Sym.Flags&SymUniq != 0 {
-				continue
-			}
-			f.Sym.Flags |= SymUniq
-			slist = append(slist, Symlink{field: f, followptr: followptr})
-		}
-	}
-}
-
-func expand1(t *Type, top, followptr bool) {
-	if t.Trecur != 0 {
-		return
-	}
-	t.Trecur = 1
-
-	if !top {
-		expand0(t, followptr)
-	}
-
-	u := t
-	if u.IsPtr() {
-		followptr = true
-		u = u.Elem()
-	}
-
-	if !u.IsStruct() && !u.IsInterface() {
-		goto out
-	}
-
-	for _, f := range u.Fields().Slice() {
-		if f.Embedded == 0 {
-			continue
-		}
-		if f.Sym == nil {
-			continue
-		}
-		expand1(f.Type, false, followptr)
-	}
-
-out:
-	t.Trecur = 0
-}
-
-func expandmeth(t *Type) {
-	if t == nil || t.AllMethods().Len() != 0 {
-		return
-	}
-
-	// mark top-level method symbols
-	// so that expand1 doesn't consider them.
-	for _, f := range t.Methods().Slice() {
-		f.Sym.Flags |= SymUniq
-	}
-
-	// generate all reachable methods
-	slist = slist[:0]
-	expand1(t, true, false)
-
-	// check each method to be uniquely reachable
-	var ms []*Field
-	for i, sl := range slist {
-		slist[i].field = nil
-		sl.field.Sym.Flags &^= SymUniq
-
-		var f *Field
-		if path, _ := dotpath(sl.field.Sym, t, &f, false); path == nil {
-			continue
-		}
-
-		// dotpath may have dug out arbitrary fields, we only want methods.
-		if f.Type.Etype != TFUNC || f.Type.Recv() == nil {
-			continue
-		}
-
-		// add it to the base type method list
-		f = f.Copy()
-		f.Embedded = 1 // needs a trampoline
-		if sl.followptr {
-			f.Embedded = 2
-		}
-		ms = append(ms, f)
-	}
-
-	for _, f := range t.Methods().Slice() {
-		f.Sym.Flags &^= SymUniq
-	}
-
-	ms = append(ms, t.Methods().Slice()...)
-	t.AllMethods().Set(ms)
-}
-
-// Given funarg struct list, return list of ODCLFIELD Node fn args.
-func structargs(tl *Type, mustname bool) []*Node {
-	var args []*Node
-	gen := 0
-	for _, t := range tl.Fields().Slice() {
-		var n *Node
-		if mustname && (t.Sym == nil || t.Sym.Name == "_") {
-			// invent a name so that we can refer to it in the trampoline
-			buf := fmt.Sprintf(".anon%d", gen)
-			gen++
-			n = newname(lookup(buf))
-		} else if t.Sym != nil {
-			n = newname(t.Sym)
-		}
-		a := nod(ODCLFIELD, n, typenod(t.Type))
-		a.Isddd = t.Isddd
-		if n != nil {
-			n.Isddd = t.Isddd
-		}
-		args = append(args, a)
-	}
-
-	return args
-}
-
-// Generate a wrapper function to convert from
-// a receiver of type T to a receiver of type U.
-// That is,
-//
-//	func (t T) M() {
-//		...
-//	}
-//
-// already exists; this function generates
-//
-//	func (u U) M() {
-//		u.M()
-//	}
-//
-// where the types T and U are such that u.M() is valid
-// and calls the T.M method.
-// The resulting function is for use in method tables.
-//
-//	rcvr - U
-//	method - M func (t T)(), a TFIELD type struct
-//	newnam - the eventual mangled name of this function
-
-var genwrapper_linehistdone int = 0
-
-func genwrapper(rcvr *Type, method *Field, newnam *Sym, iface int) {
-	if false && Debug['r'] != 0 {
-		fmt.Printf("genwrapper rcvrtype=%v method=%v newnam=%v\n", rcvr, method, newnam)
-	}
-
-	lexlineno++
-	lineno = lexlineno
-	if genwrapper_linehistdone == 0 {
-		// All the wrappers can share the same linehist entry.
-		linehistpush("<autogenerated>")
-
-		genwrapper_linehistdone = 1
-	}
-
-	dclcontext = PEXTERN
-	markdcl()
-
-	this := nod(ODCLFIELD, newname(lookup(".this")), typenod(rcvr))
-	this.Left.Name.Param.Ntype = this.Right
-	in := structargs(method.Type.Params(), true)
-	out := structargs(method.Type.Results(), false)
-
-	t := nod(OTFUNC, nil, nil)
-	l := []*Node{this}
-	if iface != 0 && rcvr.Width < Types[Tptr].Width {
-		// Building method for interface table and receiver
-		// is smaller than the single pointer-sized word
-		// that the interface call will pass in.
-		// Add a dummy padding argument after the
-		// receiver to make up the difference.
-		tpad := typArray(Types[TUINT8], Types[Tptr].Width-rcvr.Width)
-		pad := nod(ODCLFIELD, newname(lookup(".pad")), typenod(tpad))
-		l = append(l, pad)
-	}
-
-	t.List.Set(append(l, in...))
-	t.Rlist.Set(out)
-
-	fn := nod(ODCLFUNC, nil, nil)
-	fn.Func.Nname = newname(newnam)
-	fn.Func.Nname.Name.Defn = fn
-	fn.Func.Nname.Name.Param.Ntype = t
-	declare(fn.Func.Nname, PFUNC)
-	funchdr(fn)
-
-	// arg list
-	var args []*Node
-
-	isddd := false
-	for _, n := range in {
-		args = append(args, n.Left)
-		isddd = n.Left.Isddd
-	}
-
-	methodrcvr := method.Type.Recv().Type
-
-	// generate nil pointer check for better error
-	if rcvr.IsPtr() && rcvr.Elem() == methodrcvr {
-		// generating wrapper from *T to T.
-		n := nod(OIF, nil, nil)
-
-		n.Left = nod(OEQ, this.Left, nodnil())
-
-		// these strings are already in the reflect tables,
-		// so no space cost to use them here.
-		var l []*Node
-
-		var v Val
-		v.U = rcvr.Elem().Sym.Pkg.Name // package name
-		l = append(l, nodlit(v))
-		v.U = rcvr.Elem().Sym.Name // type name
-		l = append(l, nodlit(v))
-		v.U = method.Sym.Name
-		l = append(l, nodlit(v)) // method name
-		call := nod(OCALL, syslook("panicwrap"), nil)
-		call.List.Set(l)
-		n.Nbody.Set1(call)
-		fn.Nbody.Append(n)
-	}
-
-	dot := adddot(nodSym(OXDOT, this.Left, method.Sym))
-
-	// generate call
-	// It's not possible to use a tail call when dynamic linking on ppc64le. The
-	// bad scenario is when a local call is made to the wrapper: the wrapper will
-	// call the implementation, which might be in a different module and so set
-	// the TOC to the appropriate value for that module. But if it returns
-	// directly to the wrapper's caller, nothing will reset it to the correct
-	// value for that function.
-	if !instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !isifacemethod(method.Type) && !(Thearch.LinkArch.Name == "ppc64le" && Ctxt.Flag_dynlink) {
-		// generate tail call: adjust pointer receiver and jump to embedded method.
-		dot = dot.Left // skip final .M
-		// TODO(mdempsky): Remove dependency on dotlist.
-		if !dotlist[0].field.Type.IsPtr() {
-			dot = nod(OADDR, dot, nil)
-		}
-		as := nod(OAS, this.Left, nod(OCONVNOP, dot, nil))
-		as.Right.Type = rcvr
-		fn.Nbody.Append(as)
-		n := nod(ORETJMP, nil, nil)
-		n.Left = newname(methodsym(method.Sym, methodrcvr, 0))
-		fn.Nbody.Append(n)
-		// When tail-calling, we can't use a frame pointer.
-		fn.Func.NoFramePointer = true
-	} else {
-		fn.Func.Wrapper = true // ignore frame for panic+recover matching
-		call := nod(OCALL, dot, nil)
-		call.List.Set(args)
-		call.Isddd = isddd
-		if method.Type.Results().NumFields() > 0 {
-			n := nod(ORETURN, nil, nil)
-			n.List.Set1(call)
-			call = n
-		}
-
-		fn.Nbody.Append(call)
-	}
-
-	if false && Debug['r'] != 0 {
-		dumplist("genwrapper body", fn.Nbody)
-	}
-
-	funcbody(fn)
-	Curfn = fn
-	popdcl()
-	if debug_dclstack != 0 {
-		testdclstack()
-	}
-
-	// wrappers where T is anonymous (struct or interface) can be duplicated.
-	if rcvr.IsStruct() || rcvr.IsInterface() || rcvr.IsPtr() && rcvr.Elem().IsStruct() {
-		fn.Func.Dupok = true
-	}
-	fn = typecheck(fn, Etop)
-	typecheckslice(fn.Nbody.Slice(), Etop)
-
-	inlcalls(fn)
-	escAnalyze([]*Node{fn}, false)
-
-	Curfn = nil
-	funccompile(fn)
-}
-
-func hashmem(t *Type) *Node {
-	sym := Pkglookup("memhash", Runtimepkg)
-
-	n := newname(sym)
-	n.Class = PFUNC
-	tfn := nod(OTFUNC, nil, nil)
-	tfn.List.Append(nod(ODCLFIELD, nil, typenod(ptrto(t))))
-	tfn.List.Append(nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
-	tfn.List.Append(nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
-	tfn.Rlist.Append(nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
-	tfn = typecheck(tfn, Etype)
-	n.Type = tfn.Type
-	return n
-}
-
-func ifacelookdot(s *Sym, t *Type, followptr *bool, ignorecase bool) *Field {
-	*followptr = false
-
-	if t == nil {
-		return nil
-	}
-
-	var m *Field
-	path, ambig := dotpath(s, t, &m, ignorecase)
-	if path == nil {
-		if ambig {
-			yyerror("%v.%v is ambiguous", t, s)
-		}
-		return nil
-	}
-
-	for _, d := range path {
-		if d.field.Type.IsPtr() {
-			*followptr = true
-			break
-		}
-	}
-
-	if m.Type.Etype != TFUNC || m.Type.Recv() == nil {
-		yyerror("%v.%v is a field, not a method", t, s)
-		return nil
-	}
-
-	return m
-}
-
-func implements(t, iface *Type, m, samename **Field, ptr *int) bool {
-	t0 := t
-	if t == nil {
-		return false
-	}
-
-	// if this is too slow,
-	// could sort these first
-	// and then do one loop.
-
-	if t.IsInterface() {
-		for _, im := range iface.Fields().Slice() {
-			for _, tm := range t.Fields().Slice() {
-				if tm.Sym == im.Sym {
-					if eqtype(tm.Type, im.Type) {
-						goto found
-					}
-					*m = im
-					*samename = tm
-					*ptr = 0
-					return false
-				}
-			}
-
-			*m = im
-			*samename = nil
-			*ptr = 0
-			return false
-		found:
-		}
-
-		return true
-	}
-
-	t = methtype(t)
-	if t != nil {
-		expandmeth(t)
-	}
-	for _, im := range iface.Fields().Slice() {
-		if im.Broke {
-			continue
-		}
-		var followptr bool
-		tm := ifacelookdot(im.Sym, t, &followptr, false)
-		if tm == nil || tm.Nointerface || !eqtype(tm.Type, im.Type) {
-			if tm == nil {
-				tm = ifacelookdot(im.Sym, t, &followptr, true)
-			}
-			*m = im
-			*samename = tm
-			*ptr = 0
-			return false
-		}
-
-		// if pointer receiver in method,
-		// the method does not exist for value types.
-		rcvr := tm.Type.Recv().Type
-
-		if rcvr.IsPtr() && !t0.IsPtr() && !followptr && !isifacemethod(tm.Type) {
-			if false && Debug['r'] != 0 {
-				yyerror("interface pointer mismatch")
-			}
-
-			*m = im
-			*samename = nil
-			*ptr = 1
-			return false
-		}
-	}
-
-	return true
-}
-
-// even simpler simtype; get rid of ptr, bool.
-// assuming that the front end has rejected
-// all the invalid conversions (like ptr -> bool)
-func Simsimtype(t *Type) EType {
-	if t == nil {
-		return 0
-	}
-
-	et := simtype[t.Etype]
-	switch et {
-	case TPTR32:
-		et = TUINT32
-
-	case TPTR64:
-		et = TUINT64
-
-	case TBOOL:
-		et = TUINT8
-	}
-
-	return et
-}
-
-func listtreecopy(l []*Node, lineno int32) []*Node {
-	var out []*Node
-	for _, n := range l {
-		out = append(out, treecopy(n, lineno))
-	}
-	return out
-}
-
-func liststmt(l []*Node) *Node {
-	n := nod(OBLOCK, nil, nil)
-	n.List.Set(l)
-	if len(l) != 0 {
-		n.Lineno = l[0].Lineno
-	}
-	return n
-}
-
-// return power of 2 of the constant
-// operand. -1 if it is not a power of 2.
-// 1000+ if it is a -(power of 2)
-func powtwo(n *Node) int {
-	if n == nil || n.Op != OLITERAL || n.Type == nil {
-		return -1
-	}
-	if !n.Type.IsInteger() {
-		return -1
-	}
-
-	v := uint64(n.Int64())
-	b := uint64(1)
-	for i := 0; i < 64; i++ {
-		if b == v {
-			return i
-		}
-		b = b << 1
-	}
-
-	if !n.Type.IsSigned() {
-		return -1
-	}
-
-	v = -v
-	b = 1
-	for i := 0; i < 64; i++ {
-		if b == v {
-			return i + 1000
-		}
-		b = b << 1
-	}
-
-	return -1
-}
-
-func ngotype(n *Node) *Sym {
-	if n.Type != nil {
-		return typenamesym(n.Type)
-	}
-	return nil
-}
-
-// Convert raw string to the prefix that will be used in the symbol
-// table. All control characters, space, '%' and '"', as well as
-// non-7-bit clean bytes turn into %xx. The period needs escaping
-// only in the last segment of the path, and it makes for happier
-// users if we escape that as little as possible.
-//
-// If you edit this, edit ../../debug/goobj/read.go:/importPathToPrefix too.
-func pathtoprefix(s string) string {
-	slash := strings.LastIndex(s, "/")
-	for i := 0; i < len(s); i++ {
-		c := s[i]
-		if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
-			var buf bytes.Buffer
-			for i := 0; i < len(s); i++ {
-				c := s[i]
-				if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
-					fmt.Fprintf(&buf, "%%%02x", c)
-					continue
-				}
-				buf.WriteByte(c)
-			}
-			return buf.String()
-		}
-	}
-	return s
-}
-
-var pkgMap = make(map[string]*Pkg)
-var pkgs []*Pkg
-
-func mkpkg(path string) *Pkg {
-	if p := pkgMap[path]; p != nil {
-		return p
-	}
-
-	p := new(Pkg)
-	p.Path = path
-	p.Prefix = pathtoprefix(path)
-	p.Syms = make(map[string]*Sym)
-	pkgMap[path] = p
-	pkgs = append(pkgs, p)
-	return p
-}
-
-// The result of addinit MUST be assigned back to n, e.g.
-// 	n.Left = addinit(n.Left, init)
-func addinit(n *Node, init []*Node) *Node {
-	if len(init) == 0 {
-		return n
-	}
-
-	switch n.Op {
-	// There may be multiple refs to this node;
-	// introduce OCONVNOP to hold init list.
-	case ONAME, OLITERAL:
-		n = nod(OCONVNOP, n, nil)
-		n.Type = n.Left.Type
-		n.Typecheck = 1
-	}
-
-	n.Ninit.Prepend(init...)
-	n.Ullman = UINF
-	return n
-}
-
-var reservedimports = []string{
-	"go",
-	"type",
-}
-
-func isbadimport(path string) bool {
-	if strings.Contains(path, "\x00") {
-		yyerror("import path contains NUL")
-		return true
-	}
-
-	for _, ri := range reservedimports {
-		if path == ri {
-			yyerror("import path %q is reserved and cannot be used", path)
-			return true
-		}
-	}
-
-	for _, r := range path {
-		if r == utf8.RuneError {
-			yyerror("import path contains invalid UTF-8 sequence: %q", path)
-			return true
-		}
-
-		if r < 0x20 || r == 0x7f {
-			yyerror("import path contains control character: %q", path)
-			return true
-		}
-
-		if r == '\\' {
-			yyerror("import path contains backslash; use slash: %q", path)
-			return true
-		}
-
-		if unicode.IsSpace(r) {
-			yyerror("import path contains space character: %q", path)
-			return true
-		}
-
-		if strings.ContainsRune("!\"#$%&'()*,:;<=>?[]^`{|}", r) {
-			yyerror("import path contains invalid character '%c': %q", r, path)
-			return true
-		}
-	}
-
-	return false
-}
-
-func checknil(x *Node, init *Nodes) {
-	x = walkexpr(x, nil) // caller has not done this yet
-	if x.Type.IsInterface() {
-		x = nod(OITAB, x, nil)
-		x = typecheck(x, Erv)
-	}
-
-	n := nod(OCHECKNIL, x, nil)
-	n.Typecheck = 1
-	init.Append(n)
-}
-
-// Can this type be stored directly in an interface word?
-// Yes, if the representation is a single pointer.
-func isdirectiface(t *Type) bool {
-	switch t.Etype {
-	case TPTR32,
-		TPTR64,
-		TCHAN,
-		TMAP,
-		TFUNC,
-		TUNSAFEPTR:
-		return true
-
-	case TARRAY:
-		// Array of 1 direct iface type can be direct.
-		return t.NumElem() == 1 && isdirectiface(t.Elem())
-
-	case TSTRUCT:
-		// Struct with 1 field of direct iface type can be direct.
-		return t.NumFields() == 1 && isdirectiface(t.Field(0).Type)
-	}
-
-	return false
-}
-
-// itabType loads the _type field from a runtime.itab struct.
-func itabType(itab *Node) *Node {
-	typ := nodSym(ODOTPTR, itab, nil)
-	typ.Type = ptrto(Types[TUINT8])
-	typ.Typecheck = 1
-	typ.Xoffset = int64(Widthptr) // offset of _type in runtime.itab
-	typ.Bounded = true            // guaranteed not to fault
-	return typ
-}
-
-// ifaceData loads the data field from an interface.
-// The concrete type must be known to have type t.
-// It follows the pointer if !isdirectiface(t).
-func ifaceData(n *Node, t *Type) *Node {
-	ptr := nodSym(OIDATA, n, nil)
-	if isdirectiface(t) {
-		ptr.Type = t
-		ptr.Typecheck = 1
-		return ptr
-	}
-	ptr.Type = ptrto(t)
-	ptr.Bounded = true
-	ptr.Typecheck = 1
-	ind := nod(OIND, ptr, nil)
-	ind.Type = t
-	ind.Typecheck = 1
-	return ind
-}
-
-// iet returns 'T' if t is a concrete type,
-// 'I' if t is an interface type, and 'E' if t is an empty interface type.
-// It is used to build calls to the conv* and assert* runtime routines.
-func (t *Type) iet() byte {
-	if t.IsEmptyInterface() {
-		return 'E'
-	}
-	if t.IsInterface() {
-		return 'I'
-	}
-	return 'T'
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/swt.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/swt.go
deleted file mode 100644
index 384d543..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/swt.go
+++ /dev/null
@@ -1,947 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/swt.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/swt.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import "sort"
-
-const (
-	// expression switch
-	switchKindExpr  = iota // switch a {...} or switch 5 {...}
-	switchKindTrue         // switch true {...} or switch {...}
-	switchKindFalse        // switch false {...}
-)
-
-const (
-	binarySearchMin = 4 // minimum number of cases for binary search
-	integerRangeMin = 2 // minimum size of integer ranges
-)
-
-// An exprSwitch walks an expression switch.
-type exprSwitch struct {
-	exprname *Node // node for the expression being switched on
-	kind     int   // kind of switch statement (switchKind*)
-}
-
-// A typeSwitch walks a type switch.
-type typeSwitch struct {
-	hashname *Node // node for the hash of the type of the variable being switched on
-	facename *Node // node for the concrete type of the variable being switched on
-	okname   *Node // boolean node used for comma-ok type assertions
-}
-
-// A caseClause is a single case clause in a switch statement.
-type caseClause struct {
-	node    *Node  // points at case statement
-	ordinal int    // position in switch
-	hash    uint32 // hash of a type switch
-	// isconst indicates whether this case clause is a constant,
-	// for the purposes of the switch code generation.
-	// For expression switches, that's generally literals (case 5:, not case x:).
-	// For type switches, that's concrete types (case time.Time:), not interfaces (case io.Reader:).
-	isconst bool
-}
-
-// caseClauses are all the case clauses in a switch statement.
-type caseClauses struct {
-	list   []caseClause // general cases
-	defjmp *Node        // OGOTO for default case or OBREAK if no default case present
-	niljmp *Node        // OGOTO for nil type case in a type switch
-}
-
-// typecheckswitch typechecks a switch statement.
-func typecheckswitch(n *Node) {
-	lno := lineno
-	typecheckslice(n.Ninit.Slice(), Etop)
-
-	var nilonly string
-	var top int
-	var t *Type
-
-	if n.Left != nil && n.Left.Op == OTYPESW {
-		// type switch
-		top = Etype
-		n.Left.Right = typecheck(n.Left.Right, Erv)
-		t = n.Left.Right.Type
-		if t != nil && !t.IsInterface() {
-			yyerror("cannot type switch on non-interface value %L", n.Left.Right)
-		}
-	} else {
-		// expression switch
-		top = Erv
-		if n.Left != nil {
-			n.Left = typecheck(n.Left, Erv)
-			n.Left = defaultlit(n.Left, nil)
-			t = n.Left.Type
-		} else {
-			t = Types[TBOOL]
-		}
-		if t != nil {
-			switch {
-			case !okforeq[t.Etype]:
-				yyerror("cannot switch on %L", n.Left)
-			case t.IsSlice():
-				nilonly = "slice"
-			case t.IsArray() && !t.IsComparable():
-				yyerror("cannot switch on %L", n.Left)
-			case t.IsStruct():
-				if f := t.IncomparableField(); f != nil {
-					yyerror("cannot switch on %L (struct containing %v cannot be compared)", n.Left, f.Type)
-				}
-			case t.Etype == TFUNC:
-				nilonly = "func"
-			case t.IsMap():
-				nilonly = "map"
-			}
-		}
-	}
-
-	n.Type = t
-
-	var def, niltype *Node
-	for _, ncase := range n.List.Slice() {
-		setlineno(n)
-		if ncase.List.Len() == 0 {
-			// default
-			if def != nil {
-				setlineno(ncase)
-				yyerror("multiple defaults in switch (first at %v)", def.Line())
-			} else {
-				def = ncase
-			}
-		} else {
-			ls := ncase.List.Slice()
-			for i1, n1 := range ls {
-				setlineno(n1)
-				ls[i1] = typecheck(ls[i1], Erv|Etype)
-				n1 = ls[i1]
-				if n1.Type == nil || t == nil {
-					continue
-				}
-				setlineno(ncase)
-				switch top {
-				// expression switch
-				case Erv:
-					ls[i1] = defaultlit(ls[i1], t)
-					n1 = ls[i1]
-					switch {
-					case n1.Op == OTYPE:
-						yyerror("type %v is not an expression", n1.Type)
-					case n1.Type != nil && assignop(n1.Type, t, nil) == 0 && assignop(t, n1.Type, nil) == 0:
-						if n.Left != nil {
-							yyerror("invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Left, n1.Type, t)
-						} else {
-							yyerror("invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type)
-						}
-					case nilonly != "" && !isnil(n1):
-						yyerror("invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Left)
-					case t.IsInterface() && !n1.Type.IsInterface() && !n1.Type.IsComparable():
-						yyerror("invalid case %L in switch (incomparable type)", n1)
-					}
-
-				// type switch
-				case Etype:
-					var missing, have *Field
-					var ptr int
-					switch {
-					case n1.Op == OLITERAL && n1.Type.IsKind(TNIL):
-						// case nil:
-						if niltype != nil {
-							yyerror("multiple nil cases in type switch (first at %v)", niltype.Line())
-						} else {
-							niltype = ncase
-						}
-					case n1.Op != OTYPE && n1.Type != nil: // should this be ||?
-						yyerror("%L is not a type", n1)
-						// reset to original type
-						n1 = n.Left.Right
-						ls[i1] = n1
-					case !n1.Type.IsInterface() && t.IsInterface() && !implements(n1.Type, t, &missing, &have, &ptr):
-						if have != nil && !missing.Broke && !have.Broke {
-							yyerror("impossible type switch case: %L cannot have dynamic type %v"+
-								" (wrong type for %v method)\n\thave %v%S\n\twant %v%S", n.Left.Right, n1.Type, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
-						} else if !missing.Broke {
-							yyerror("impossible type switch case: %L cannot have dynamic type %v"+
-								" (missing %v method)", n.Left.Right, n1.Type, missing.Sym)
-						}
-					}
-				}
-			}
-		}
-
-		if top == Etype && n.Type != nil {
-			ll := ncase.List
-			if ncase.Rlist.Len() != 0 {
-				nvar := ncase.Rlist.First()
-				if ll.Len() == 1 && ll.First().Type != nil && !ll.First().Type.IsKind(TNIL) {
-					// single entry type switch
-					nvar.Name.Param.Ntype = typenod(ll.First().Type)
-				} else {
-					// multiple entry type switch or default
-					nvar.Name.Param.Ntype = typenod(n.Type)
-				}
-
-				nvar = typecheck(nvar, Erv|Easgn)
-				ncase.Rlist.SetIndex(0, nvar)
-			}
-		}
-
-		typecheckslice(ncase.Nbody.Slice(), Etop)
-	}
-
-	lineno = lno
-}
-
-// walkswitch walks a switch statement.
-func walkswitch(sw *Node) {
-	// convert switch {...} to switch true {...}
-	if sw.Left == nil {
-		sw.Left = nodbool(true)
-		sw.Left = typecheck(sw.Left, Erv)
-	}
-
-	if sw.Left.Op == OTYPESW {
-		var s typeSwitch
-		s.walk(sw)
-	} else {
-		var s exprSwitch
-		s.walk(sw)
-	}
-}
-
-// walk generates an AST implementing sw.
-// sw is an expression switch.
-// The AST is generally of the form of a linear
-// search using if..goto, although binary search
-// is used with long runs of constants.
-func (s *exprSwitch) walk(sw *Node) {
-	casebody(sw, nil)
-
-	cond := sw.Left
-	sw.Left = nil
-
-	s.kind = switchKindExpr
-	if Isconst(cond, CTBOOL) {
-		s.kind = switchKindTrue
-		if !cond.Val().U.(bool) {
-			s.kind = switchKindFalse
-		}
-	}
-
-	cond = walkexpr(cond, &sw.Ninit)
-	t := sw.Type
-	if t == nil {
-		return
-	}
-
-	// convert the switch into OIF statements
-	var cas []*Node
-	if s.kind == switchKindTrue || s.kind == switchKindFalse {
-		s.exprname = nodbool(s.kind == switchKindTrue)
-	} else if consttype(cond) >= 0 {
-		// leave constants to enable dead code elimination (issue 9608)
-		s.exprname = cond
-	} else {
-		s.exprname = temp(cond.Type)
-		cas = []*Node{nod(OAS, s.exprname, cond)}
-		typecheckslice(cas, Etop)
-	}
-
-	// Enumerate the cases and prepare the default case.
-	clauses := s.genCaseClauses(sw.List.Slice())
-	sw.List.Set(nil)
-	cc := clauses.list
-
-	// handle the cases in order
-	for len(cc) > 0 {
-		// deal with expressions one at a time
-		if !okforcmp[t.Etype] || !cc[0].isconst {
-			a := s.walkCases(cc[:1])
-			cas = append(cas, a)
-			cc = cc[1:]
-			continue
-		}
-
-		// do binary search on runs of constants
-		var run int
-		for run = 1; run < len(cc) && cc[run].isconst; run++ {
-		}
-
-		// sort and compile constants
-		sort.Sort(caseClauseByConstVal(cc[:run]))
-		a := s.walkCases(cc[:run])
-		cas = append(cas, a)
-		cc = cc[run:]
-	}
-
-	// handle default case
-	if nerrors == 0 {
-		cas = append(cas, clauses.defjmp)
-		sw.Nbody.Prepend(cas...)
-		walkstmtlist(sw.Nbody.Slice())
-	}
-}
-
-// walkCases generates an AST implementing the cases in cc.
-func (s *exprSwitch) walkCases(cc []caseClause) *Node {
-	if len(cc) < binarySearchMin {
-		// linear search
-		var cas []*Node
-		for _, c := range cc {
-			n := c.node
-			lno := setlineno(n)
-
-			a := nod(OIF, nil, nil)
-			if rng := n.List.Slice(); rng != nil {
-				// Integer range.
-				// exprname is a temp or a constant,
-				// so it is safe to evaluate twice.
-				// In most cases, this conjunction will be
-				// rewritten by walkinrange into a single comparison.
-				low := nod(OGE, s.exprname, rng[0])
-				high := nod(OLE, s.exprname, rng[1])
-				a.Left = nod(OANDAND, low, high)
-				a.Left = typecheck(a.Left, Erv)
-				a.Left = walkexpr(a.Left, nil) // give walk the opportunity to optimize the range check
-			} else if (s.kind != switchKindTrue && s.kind != switchKindFalse) || assignop(n.Left.Type, s.exprname.Type, nil) == OCONVIFACE || assignop(s.exprname.Type, n.Left.Type, nil) == OCONVIFACE {
-				a.Left = nod(OEQ, s.exprname, n.Left) // if name == val
-				a.Left = typecheck(a.Left, Erv)
-			} else if s.kind == switchKindTrue {
-				a.Left = n.Left // if val
-			} else {
-				// s.kind == switchKindFalse
-				a.Left = nod(ONOT, n.Left, nil) // if !val
-				a.Left = typecheck(a.Left, Erv)
-			}
-			a.Nbody.Set1(n.Right) // goto l
-
-			cas = append(cas, a)
-			lineno = lno
-		}
-		return liststmt(cas)
-	}
-
-	// find the middle and recur
-	half := len(cc) / 2
-	a := nod(OIF, nil, nil)
-	n := cc[half-1].node
-	var mid *Node
-	if rng := n.List.Slice(); rng != nil {
-		mid = rng[1] // high end of range
-	} else {
-		mid = n.Left
-	}
-	le := nod(OLE, s.exprname, mid)
-	if Isconst(mid, CTSTR) {
-		// Search by length and then by value; see caseClauseByConstVal.
-		lenlt := nod(OLT, nod(OLEN, s.exprname, nil), nod(OLEN, mid, nil))
-		leneq := nod(OEQ, nod(OLEN, s.exprname, nil), nod(OLEN, mid, nil))
-		a.Left = nod(OOROR, lenlt, nod(OANDAND, leneq, le))
-	} else {
-		a.Left = le
-	}
-	a.Left = typecheck(a.Left, Erv)
-	a.Nbody.Set1(s.walkCases(cc[:half]))
-	a.Rlist.Set1(s.walkCases(cc[half:]))
-	return a
-}
-
-// casebody builds separate lists of statements and cases.
-// It makes labels between cases and statements
-// and deals with fallthrough, break, and unreachable statements.
-func casebody(sw *Node, typeswvar *Node) {
-	if sw.List.Len() == 0 {
-		return
-	}
-
-	lno := setlineno(sw)
-
-	var cas []*Node  // cases
-	var stat []*Node // statements
-	var def *Node    // defaults
-	br := nod(OBREAK, nil, nil)
-
-	for i, n := range sw.List.Slice() {
-		setlineno(n)
-		if n.Op != OXCASE {
-			Fatalf("casebody %v", n.Op)
-		}
-		n.Op = OCASE
-		needvar := n.List.Len() != 1 || n.List.First().Op == OLITERAL
-
-		jmp := nod(OGOTO, autolabel(".s"), nil)
-		switch n.List.Len() {
-		case 0:
-			// default
-			if def != nil {
-				yyerror("more than one default case")
-			}
-			// reuse original default case
-			n.Right = jmp
-			def = n
-		case 1:
-			// one case -- reuse OCASE node
-			n.Left = n.List.First()
-			n.Right = jmp
-			n.List.Set(nil)
-			cas = append(cas, n)
-		default:
-			// Expand multi-valued cases and detect ranges of integer cases.
-			if typeswvar != nil || sw.Left.Type.IsInterface() || !n.List.First().Type.IsInteger() || n.List.Len() < integerRangeMin {
-				// Can't use integer ranges. Expand each case into a separate node.
-				for _, n1 := range n.List.Slice() {
-					cas = append(cas, nod(OCASE, n1, jmp))
-				}
-				break
-			}
-			// Find integer ranges within runs of constants.
-			s := n.List.Slice()
-			j := 0
-			for j < len(s) {
-				// Find a run of constants.
-				var run int
-				for run = j; run < len(s) && Isconst(s[run], CTINT); run++ {
-				}
-				if run-j >= integerRangeMin {
-					// Search for integer ranges in s[j:run].
-					// Typechecking is done, so all values are already in an appropriate range.
-					search := s[j:run]
-					sort.Sort(constIntNodesByVal(search))
-					for beg, end := 0, 1; end <= len(search); end++ {
-						if end < len(search) && search[end].Int64() == search[end-1].Int64()+1 {
-							continue
-						}
-						if end-beg >= integerRangeMin {
-							// Record range in List.
-							c := nod(OCASE, nil, jmp)
-							c.List.Set2(search[beg], search[end-1])
-							cas = append(cas, c)
-						} else {
-							// Not large enough for range; record separately.
-							for _, n := range search[beg:end] {
-								cas = append(cas, nod(OCASE, n, jmp))
-							}
-						}
-						beg = end
-					}
-					j = run
-				}
-				// Advance to next constant, adding individual non-constant
-				// or as-yet-unhandled constant cases as we go.
-				for ; j < len(s) && (j < run || !Isconst(s[j], CTINT)); j++ {
-					cas = append(cas, nod(OCASE, s[j], jmp))
-				}
-			}
-		}
-
-		stat = append(stat, nod(OLABEL, jmp.Left, nil))
-		if typeswvar != nil && needvar && n.Rlist.Len() != 0 {
-			l := []*Node{
-				nod(ODCL, n.Rlist.First(), nil),
-				nod(OAS, n.Rlist.First(), typeswvar),
-			}
-			typecheckslice(l, Etop)
-			stat = append(stat, l...)
-		}
-		stat = append(stat, n.Nbody.Slice()...)
-
-		// Search backwards for the index of the fallthrough
-		// statement. Do not assume it'll be in the last
-		// position, since in some cases (e.g. when the statement
-		// list contains autotmp_ variables), one or more OVARKILL
-		// nodes will be at the end of the list.
-		fallIndex := len(stat) - 1
-		for stat[fallIndex].Op == OVARKILL {
-			fallIndex--
-		}
-		last := stat[fallIndex]
-
-		// botch - shouldn't fall through declaration
-		if last.Xoffset == n.Xoffset && last.Op == OXFALL {
-			if typeswvar != nil {
-				setlineno(last)
-				yyerror("cannot fallthrough in type switch")
-			}
-
-			if i+1 >= sw.List.Len() {
-				setlineno(last)
-				yyerror("cannot fallthrough final case in switch")
-			}
-
-			last.Op = OFALL
-		} else {
-			stat = append(stat, br)
-		}
-	}
-
-	stat = append(stat, br)
-	if def != nil {
-		cas = append(cas, def)
-	}
-
-	sw.List.Set(cas)
-	sw.Nbody.Set(stat)
-	lineno = lno
-}
-
-// genCaseClauses generates the caseClauses value for clauses.
-func (s *exprSwitch) genCaseClauses(clauses []*Node) caseClauses {
-	var cc caseClauses
-	for _, n := range clauses {
-		if n.Left == nil && n.List.Len() == 0 {
-			// default case
-			if cc.defjmp != nil {
-				Fatalf("duplicate default case not detected during typechecking")
-			}
-			cc.defjmp = n.Right
-			continue
-		}
-		c := caseClause{node: n, ordinal: len(cc.list)}
-		if n.List.Len() > 0 {
-			c.isconst = true
-		}
-		switch consttype(n.Left) {
-		case CTFLT, CTINT, CTRUNE, CTSTR:
-			c.isconst = true
-		}
-		cc.list = append(cc.list, c)
-	}
-
-	if cc.defjmp == nil {
-		cc.defjmp = nod(OBREAK, nil, nil)
-	}
-
-	// diagnose duplicate cases
-	s.checkDupCases(cc.list)
-	return cc
-}
-
-// genCaseClauses generates the caseClauses value for clauses.
-func (s *typeSwitch) genCaseClauses(clauses []*Node) caseClauses {
-	var cc caseClauses
-	for _, n := range clauses {
-		switch {
-		case n.Left == nil:
-			// default case
-			if cc.defjmp != nil {
-				Fatalf("duplicate default case not detected during typechecking")
-			}
-			cc.defjmp = n.Right
-			continue
-		case n.Left.Op == OLITERAL:
-			// nil case in type switch
-			if cc.niljmp != nil {
-				Fatalf("duplicate nil case not detected during typechecking")
-			}
-			cc.niljmp = n.Right
-			continue
-		}
-
-		// general case
-		c := caseClause{
-			node:    n,
-			ordinal: len(cc.list),
-			isconst: !n.Left.Type.IsInterface(),
-			hash:    typehash(n.Left.Type),
-		}
-		cc.list = append(cc.list, c)
-	}
-
-	if cc.defjmp == nil {
-		cc.defjmp = nod(OBREAK, nil, nil)
-	}
-
-	// diagnose duplicate cases
-	s.checkDupCases(cc.list)
-	return cc
-}
-
-func (s *typeSwitch) checkDupCases(cc []caseClause) {
-	if len(cc) < 2 {
-		return
-	}
-	// We store seen types in a map keyed by type hash.
-	// It is possible, but very unlikely, for multiple distinct types to have the same hash.
-	seen := make(map[uint32][]*Node)
-	// To avoid many small allocations of length 1 slices,
-	// also set up a single large slice to slice into.
-	nn := make([]*Node, 0, len(cc))
-Outer:
-	for _, c := range cc {
-		prev, ok := seen[c.hash]
-		if !ok {
-			// First entry for this hash.
-			nn = append(nn, c.node)
-			seen[c.hash] = nn[len(nn)-1 : len(nn):len(nn)]
-			continue
-		}
-		for _, n := range prev {
-			if eqtype(n.Left.Type, c.node.Left.Type) {
-				yyerrorl(c.node.Lineno, "duplicate case %v in type switch\n\tprevious case at %v", c.node.Left.Type, n.Line())
-				// avoid double-reporting errors
-				continue Outer
-			}
-		}
-		seen[c.hash] = append(seen[c.hash], c.node)
-	}
-}
-
-func (s *exprSwitch) checkDupCases(cc []caseClause) {
-	if len(cc) < 2 {
-		return
-	}
-	// The common case is that s's expression is not an interface.
-	// In that case, all constant clauses have the same type,
-	// so checking for duplicates can be done solely by value.
-	if !s.exprname.Type.IsInterface() {
-		seen := make(map[interface{}]*Node)
-		for _, c := range cc {
-			switch {
-			case c.node.Left != nil:
-				// Single constant.
-
-				// Can't check for duplicates that aren't constants, per the spec. Issue 15896.
-				// Don't check for duplicate bools. Although the spec allows it,
-				// (1) the compiler hasn't checked it in the past, so compatibility mandates it, and
-				// (2) it would disallow useful things like
-				//       case GOARCH == "arm" && GOARM == "5":
-				//       case GOARCH == "arm":
-				//     which would both evaluate to false for non-ARM compiles.
-				if ct := consttype(c.node.Left); ct < 0 || ct == CTBOOL {
-					continue
-				}
-
-				val := c.node.Left.Val().Interface()
-				prev, dup := seen[val]
-				if !dup {
-					seen[val] = c.node
-					continue
-				}
-				setlineno(c.node)
-				yyerror("duplicate case %#v in switch\n\tprevious case at %v", val, prev.Line())
-
-			case c.node.List.Len() == 2:
-				// Range of integers.
-				low := c.node.List.Index(0).Int64()
-				high := c.node.List.Index(1).Int64()
-				for i := low; i <= high; i++ {
-					prev, dup := seen[i]
-					if !dup {
-						seen[i] = c.node
-						continue
-					}
-					setlineno(c.node)
-					yyerror("duplicate case %d in switch\n\tprevious case at %v", i, prev.Line())
-				}
-
-			default:
-				Fatalf("bad caseClause node in checkDupCases: %v", c.node)
-			}
-		}
-		return
-	}
-	// s's expression is an interface. This is fairly rare, so keep this simple.
-	// Duplicates are only duplicates if they have the same type and the same value.
-	type typeVal struct {
-		typ string
-		val interface{}
-	}
-	seen := make(map[typeVal]*Node)
-	for _, c := range cc {
-		if ct := consttype(c.node.Left); ct < 0 || ct == CTBOOL {
-			continue
-		}
-		n := c.node.Left
-		tv := typeVal{
-			// n.Type.tconv(FmtLeft | FmtUnsigned) here serves to completely describe the type.
-			// See the comments in func typehash.
-			typ: n.Type.tconv(FmtLeft | FmtUnsigned),
-			val: n.Val().Interface(),
-		}
-		prev, dup := seen[tv]
-		if !dup {
-			seen[tv] = c.node
-			continue
-		}
-		setlineno(c.node)
-		yyerror("duplicate case %v in switch\n\tprevious case at %v", prev.Left, prev.Line())
-	}
-}
-
-// walk generates an AST that implements sw,
-// where sw is a type switch.
-// The AST is generally of the form of a linear
-// search using if..goto, although binary search
-// is used with long runs of concrete types.
-func (s *typeSwitch) walk(sw *Node) {
-	cond := sw.Left
-	sw.Left = nil
-
-	if cond == nil {
-		sw.List.Set(nil)
-		return
-	}
-	if cond.Right == nil {
-		setlineno(sw)
-		yyerror("type switch must have an assignment")
-		return
-	}
-
-	cond.Right = walkexpr(cond.Right, &sw.Ninit)
-	if !cond.Right.Type.IsInterface() {
-		yyerror("type switch must be on an interface")
-		return
-	}
-
-	var cas []*Node
-
-	// predeclare temporary variables and the boolean var
-	s.facename = temp(cond.Right.Type)
-
-	a := nod(OAS, s.facename, cond.Right)
-	a = typecheck(a, Etop)
-	cas = append(cas, a)
-
-	s.okname = temp(Types[TBOOL])
-	s.okname = typecheck(s.okname, Erv)
-
-	s.hashname = temp(Types[TUINT32])
-	s.hashname = typecheck(s.hashname, Erv)
-
-	// set up labels and jumps
-	casebody(sw, s.facename)
-
-	clauses := s.genCaseClauses(sw.List.Slice())
-	sw.List.Set(nil)
-	def := clauses.defjmp
-
-	// For empty interfaces, do:
-	//     if e._type == nil {
-	//         do nil case if it exists, otherwise default
-	//     }
-	//     h := e._type.hash
-	// Use a similar strategy for non-empty interfaces.
-
-	// Get interface descriptor word.
-	typ := nod(OITAB, s.facename, nil)
-
-	// Check for nil first.
-	i := nod(OIF, nil, nil)
-	i.Left = nod(OEQ, typ, nodnil())
-	if clauses.niljmp != nil {
-		// Do explicit nil case right here.
-		i.Nbody.Set1(clauses.niljmp)
-	} else {
-		// Jump to default case.
-		lbl := autolabel(".s")
-		i.Nbody.Set1(nod(OGOTO, lbl, nil))
-		// Wrap default case with label.
-		blk := nod(OBLOCK, nil, nil)
-		blk.List.Set([]*Node{nod(OLABEL, lbl, nil), def})
-		def = blk
-	}
-	i.Left = typecheck(i.Left, Erv)
-	cas = append(cas, i)
-
-	if !cond.Right.Type.IsEmptyInterface() {
-		// Load type from itab.
-		typ = itabType(typ)
-	}
-	// Load hash from type.
-	h := nodSym(ODOTPTR, typ, nil)
-	h.Type = Types[TUINT32]
-	h.Typecheck = 1
-	h.Xoffset = int64(2 * Widthptr) // offset of hash in runtime._type
-	h.Bounded = true                // guaranteed not to fault
-	a = nod(OAS, s.hashname, h)
-	a = typecheck(a, Etop)
-	cas = append(cas, a)
-
-	cc := clauses.list
-
-	// insert type equality check into each case block
-	for _, c := range cc {
-		c.node.Right = s.typeone(c.node)
-	}
-
-	// generate list of if statements, binary search for constant sequences
-	for len(cc) > 0 {
-		if !cc[0].isconst {
-			n := cc[0].node
-			cas = append(cas, n.Right)
-			cc = cc[1:]
-			continue
-		}
-
-		// identify run of constants
-		var run int
-		for run = 1; run < len(cc) && cc[run].isconst; run++ {
-		}
-
-		// sort by hash
-		sort.Sort(caseClauseByType(cc[:run]))
-
-		// for debugging: linear search
-		if false {
-			for i := 0; i < run; i++ {
-				n := cc[i].node
-				cas = append(cas, n.Right)
-			}
-			continue
-		}
-
-		// combine adjacent cases with the same hash
-		ncase := 0
-		for i := 0; i < run; i++ {
-			ncase++
-			hash := []*Node{cc[i].node.Right}
-			for j := i + 1; j < run && cc[i].hash == cc[j].hash; j++ {
-				hash = append(hash, cc[j].node.Right)
-			}
-			cc[i].node.Right = liststmt(hash)
-		}
-
-		// binary search among cases to narrow by hash
-		cas = append(cas, s.walkCases(cc[:ncase]))
-		cc = cc[ncase:]
-	}
-
-	// handle default case
-	if nerrors == 0 {
-		cas = append(cas, def)
-		sw.Nbody.Prepend(cas...)
-		sw.List.Set(nil)
-		walkstmtlist(sw.Nbody.Slice())
-	}
-}
-
-// typeone generates an AST that jumps to the
-// case body if the variable is of type t.
-func (s *typeSwitch) typeone(t *Node) *Node {
-	var name *Node
-	var init []*Node
-	if t.Rlist.Len() == 0 {
-		name = nblank
-		nblank = typecheck(nblank, Erv|Easgn)
-	} else {
-		name = t.Rlist.First()
-		init = []*Node{nod(ODCL, name, nil)}
-		a := nod(OAS, name, nil)
-		a = typecheck(a, Etop)
-		init = append(init, a)
-	}
-
-	a := nod(OAS2, nil, nil)
-	a.List.Set([]*Node{name, s.okname}) // name, ok =
-	b := nod(ODOTTYPE, s.facename, nil)
-	b.Type = t.Left.Type // interface.(type)
-	a.Rlist.Set1(b)
-	a = typecheck(a, Etop)
-	init = append(init, a)
-
-	c := nod(OIF, nil, nil)
-	c.Left = s.okname
-	c.Nbody.Set1(t.Right) // if ok { goto l }
-
-	return liststmt(append(init, c))
-}
-
-// walkCases generates an AST implementing the cases in cc.
-func (s *typeSwitch) walkCases(cc []caseClause) *Node {
-	if len(cc) < binarySearchMin {
-		var cas []*Node
-		for _, c := range cc {
-			n := c.node
-			if !c.isconst {
-				Fatalf("typeSwitch walkCases")
-			}
-			a := nod(OIF, nil, nil)
-			a.Left = nod(OEQ, s.hashname, nodintconst(int64(c.hash)))
-			a.Left = typecheck(a.Left, Erv)
-			a.Nbody.Set1(n.Right)
-			cas = append(cas, a)
-		}
-		return liststmt(cas)
-	}
-
-	// find the middle and recur
-	half := len(cc) / 2
-	a := nod(OIF, nil, nil)
-	a.Left = nod(OLE, s.hashname, nodintconst(int64(cc[half-1].hash)))
-	a.Left = typecheck(a.Left, Erv)
-	a.Nbody.Set1(s.walkCases(cc[:half]))
-	a.Rlist.Set1(s.walkCases(cc[half:]))
-	return a
-}
-
-// caseClauseByConstVal sorts clauses by constant value to enable binary search.
-type caseClauseByConstVal []caseClause
-
-func (x caseClauseByConstVal) Len() int      { return len(x) }
-func (x caseClauseByConstVal) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-func (x caseClauseByConstVal) Less(i, j int) bool {
-	// n1 and n2 might be individual constants or integer ranges.
-	// We have checked for duplicates already,
-	// so ranges can be safely represented by any value in the range.
-	n1 := x[i].node
-	var v1 interface{}
-	if s := n1.List.Slice(); s != nil {
-		v1 = s[0].Val().U
-	} else {
-		v1 = n1.Left.Val().U
-	}
-
-	n2 := x[j].node
-	var v2 interface{}
-	if s := n2.List.Slice(); s != nil {
-		v2 = s[0].Val().U
-	} else {
-		v2 = n2.Left.Val().U
-	}
-
-	switch v1 := v1.(type) {
-	case *Mpflt:
-		return v1.Cmp(v2.(*Mpflt)) < 0
-	case *Mpint:
-		return v1.Cmp(v2.(*Mpint)) < 0
-	case string:
-		// Sort strings by length and then by value.
-		// It is much cheaper to compare lengths than values,
-		// and all we need here is consistency.
-		// We respect this sorting in exprSwitch.walkCases.
-		a := v1
-		b := v2.(string)
-		if len(a) != len(b) {
-			return len(a) < len(b)
-		}
-		return a < b
-	}
-
-	Fatalf("caseClauseByConstVal passed bad clauses %v < %v", x[i].node.Left, x[j].node.Left)
-	return false
-}
-
-type caseClauseByType []caseClause
-
-func (x caseClauseByType) Len() int      { return len(x) }
-func (x caseClauseByType) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-func (x caseClauseByType) Less(i, j int) bool {
-	c1, c2 := x[i], x[j]
-	// sort by hash code, then ordinal (for the rare case of hash collisions)
-	if c1.hash != c2.hash {
-		return c1.hash < c2.hash
-	}
-	return c1.ordinal < c2.ordinal
-}
-
-type constIntNodesByVal []*Node
-
-func (x constIntNodesByVal) Len() int      { return len(x) }
-func (x constIntNodesByVal) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-func (x constIntNodesByVal) Less(i, j int) bool {
-	return x[i].Val().U.(*Mpint).Cmp(x[j].Val().U.(*Mpint)) < 0
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/swt_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/swt_test.go
deleted file mode 100644
index b3c45b7..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/swt_test.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/swt_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/swt_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"bootstrap/math/big"
-	"testing"
-)
-
-func nodrune(r rune) *Node {
-	return nodlit(Val{&Mpint{Val: *big.NewInt(int64(r)), Rune: true}})
-}
-
-func nodflt(f float64) *Node {
-	return nodlit(Val{&Mpflt{Val: *big.NewFloat(f)}})
-}
-
-func TestCaseClauseByConstVal(t *testing.T) {
-	tests := []struct {
-		a, b *Node
-	}{
-		// CTFLT
-		{nodflt(0.1), nodflt(0.2)},
-		// CTINT
-		{nodintconst(0), nodintconst(1)},
-		// CTRUNE
-		{nodrune('a'), nodrune('b')},
-		// CTSTR
-		{nodlit(Val{"ab"}), nodlit(Val{"abc"})},
-		{nodlit(Val{"ab"}), nodlit(Val{"xyz"})},
-		{nodlit(Val{"abc"}), nodlit(Val{"xyz"})},
-	}
-	for i, test := range tests {
-		a := caseClause{node: nod(OXXX, test.a, nil)}
-		b := caseClause{node: nod(OXXX, test.b, nil)}
-		s := caseClauseByConstVal{a, b}
-		if less := s.Less(0, 1); !less {
-			t.Errorf("%d: caseClauseByConstVal(%v, %v) = false", i, test.a, test.b)
-		}
-		if less := s.Less(1, 0); less {
-			t.Errorf("%d: caseClauseByConstVal(%v, %v) = true", i, test.a, test.b)
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/syntax.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/syntax.go
deleted file mode 100644
index bb77dc2..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/syntax.go
+++ /dev/null
@@ -1,630 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/syntax.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/syntax.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// “Abstract” syntax representation.
-
-package gc
-
-// A Node is a single node in the syntax tree.
-// Actually the syntax tree is a syntax DAG, because there is only one
-// node with Op=ONAME for a given instance of a variable x.
-// The same is true for Op=OTYPE and Op=OLITERAL.
-type Node struct {
-	// Tree structure.
-	// Generic recursive walks should follow these fields.
-	Left  *Node
-	Right *Node
-	Ninit Nodes
-	Nbody Nodes
-	List  Nodes
-	Rlist Nodes
-
-	// most nodes
-	Type *Type
-	Orig *Node // original form, for printing, and tracking copies of ONAMEs
-
-	// func
-	Func *Func
-
-	// ONAME
-	Name *Name
-
-	Sym *Sym        // various
-	E   interface{} // Opt or Val, see methods below
-
-	// Various. Usually an offset into a struct. For example:
-	// - ONAME nodes that refer to local variables use it to identify their stack frame position.
-	// - ODOT, ODOTPTR, and OINDREGSP use it to indicate offset relative to their base address.
-	// - OSTRUCTKEY uses it to store the named field's offset.
-	// - OXCASE and OXFALL use it to validate the use of fallthrough.
-	// - ONONAME uses it to store the current value of iota, see Node.Iota
-	// Possibly still more uses. If you find any, document them.
-	Xoffset int64
-
-	Lineno int32
-
-	Esc uint16 // EscXXX
-
-	Op        Op
-	Ullman    uint8 // sethi/ullman number
-	Addable   bool  // addressable
-	Etype     EType // op for OASOP, etype for OTYPE, exclam for export, 6g saved reg, ChanDir for OTCHAN, for OINDEXMAP 1=LHS,0=RHS
-	Bounded   bool  // bounds check unnecessary
-	NonNil    bool  // guaranteed to be non-nil
-	Class     Class // PPARAM, PAUTO, PEXTERN, etc
-	Embedded  uint8 // ODCLFIELD embedded type
-	Colas     bool  // OAS resulting from :=
-	Diag      bool  // already printed error about this
-	Noescape  bool  // func arguments do not escape; TODO(rsc): move Noescape to Func struct (see CL 7360)
-	Walkdef   uint8 // tracks state during typecheckdef; 2 == loop detected
-	Typecheck uint8 // tracks state during typechecking; 2 == loop detected
-	Local     bool
-	IsStatic  bool // whether this Node will be converted to purely static data
-	Initorder uint8
-	Used      bool // for variable/label declared and not used error
-	Isddd     bool // is the argument variadic
-	Implicit  bool
-	Addrtaken bool  // address taken, even if not moved to heap
-	Assigned  bool  // is the variable ever assigned to
-	Likely    int8  // likeliness of if statement
-	hasVal    int8  // +1 for Val, -1 for Opt, 0 for not yet set
-	flags     uint8 // TODO: store more bool fields in this flag field
-}
-
-// IsAutoTmp indicates if n was created by the compiler as a temporary,
-// based on the setting of the .AutoTemp flag in n's Name.
-func (n *Node) IsAutoTmp() bool {
-	if n == nil || n.Op != ONAME {
-		return false
-	}
-	return n.Name.AutoTemp
-}
-
-const (
-	hasBreak = 1 << iota
-	isClosureVar
-	isOutputParamHeapAddr
-	noInline // used internally by inliner to indicate that a function call should not be inlined; set for OCALLFUNC and OCALLMETH only
-)
-
-func (n *Node) HasBreak() bool {
-	return n.flags&hasBreak != 0
-}
-func (n *Node) SetHasBreak(b bool) {
-	if b {
-		n.flags |= hasBreak
-	} else {
-		n.flags &^= hasBreak
-	}
-}
-func (n *Node) isClosureVar() bool {
-	return n.flags&isClosureVar != 0
-}
-func (n *Node) setIsClosureVar(b bool) {
-	if b {
-		n.flags |= isClosureVar
-	} else {
-		n.flags &^= isClosureVar
-	}
-}
-func (n *Node) noInline() bool {
-	return n.flags&noInline != 0
-}
-func (n *Node) setNoInline(b bool) {
-	if b {
-		n.flags |= noInline
-	} else {
-		n.flags &^= noInline
-	}
-}
-
-func (n *Node) IsOutputParamHeapAddr() bool {
-	return n.flags&isOutputParamHeapAddr != 0
-}
-func (n *Node) setIsOutputParamHeapAddr(b bool) {
-	if b {
-		n.flags |= isOutputParamHeapAddr
-	} else {
-		n.flags &^= isOutputParamHeapAddr
-	}
-}
-
-// Val returns the Val for the node.
-func (n *Node) Val() Val {
-	if n.hasVal != +1 {
-		return Val{}
-	}
-	return Val{n.E}
-}
-
-// SetVal sets the Val for the node, which must not have been used with SetOpt.
-func (n *Node) SetVal(v Val) {
-	if n.hasVal == -1 {
-		Debug['h'] = 1
-		Dump("have Opt", n)
-		Fatalf("have Opt")
-	}
-	n.hasVal = +1
-	n.E = v.U
-}
-
-// Opt returns the optimizer data for the node.
-func (n *Node) Opt() interface{} {
-	if n.hasVal != -1 {
-		return nil
-	}
-	return n.E
-}
-
-// SetOpt sets the optimizer data for the node, which must not have been used with SetVal.
-// SetOpt(nil) is ignored for Vals to simplify call sites that are clearing Opts.
-func (n *Node) SetOpt(x interface{}) {
-	if x == nil && n.hasVal >= 0 {
-		return
-	}
-	if n.hasVal == +1 {
-		Debug['h'] = 1
-		Dump("have Val", n)
-		Fatalf("have Val")
-	}
-	n.hasVal = -1
-	n.E = x
-}
-
-func (n *Node) Iota() int64 {
-	return n.Xoffset
-}
-
-func (n *Node) SetIota(x int64) {
-	n.Xoffset = x
-}
-
-// Name holds Node fields used only by named nodes (ONAME, OPACK, OLABEL, some OLITERAL).
-type Name struct {
-	Pack      *Node  // real package for import . names
-	Pkg       *Pkg   // pkg for OPACK nodes
-	Heapaddr  *Node  // temp holding heap address of param (could move to Param?)
-	Defn      *Node  // initializing assignment
-	Curfn     *Node  // function for local variables
-	Param     *Param // additional fields for ONAME
-	Decldepth int32  // declaration loop depth, increased for every loop or label
-	Vargen    int32  // unique name for ONAME within a function.  Function outputs are numbered starting at one.
-	Funcdepth int32
-	Readonly  bool
-	Captured  bool // is the variable captured by a closure
-	Byval     bool // is the variable captured by value or by reference
-	Needzero  bool // if it contains pointers, needs to be zeroed on function entry
-	Keepalive bool // mark value live across unknown assembly call
-	AutoTemp  bool // is the variable a temporary (implies no dwarf info. reset if escapes to heap)
-}
-
-type Param struct {
-	Ntype *Node
-
-	// ONAME PAUTOHEAP
-	Stackcopy *Node // the PPARAM/PPARAMOUT on-stack slot (moved func params only)
-
-	// ONAME PPARAM
-	Field *Field // TFIELD in arg struct
-
-	// ONAME closure linkage
-	// Consider:
-	//
-	//	func f() {
-	//		x := 1 // x1
-	//		func() {
-	//			use(x) // x2
-	//			func() {
-	//				use(x) // x3
-	//				--- parser is here ---
-	//			}()
-	//		}()
-	//	}
-	//
-	// There is an original declaration of x and then a chain of mentions of x
-	// leading into the current function. Each time x is mentioned in a new closure,
-	// we create a variable representing x for use in that specific closure,
-	// since the way you get to x is different in each closure.
-	//
-	// Let's number the specific variables as shown in the code:
-	// x1 is the original x, x2 is when mentioned in the closure,
-	// and x3 is when mentioned in the closure in the closure.
-	//
-	// We keep these linked (assume N > 1):
-	//
-	//   - x1.Defn = original declaration statement for x (like most variables)
-	//   - x1.Innermost = current innermost closure x (in this case x3), or nil for none
-	//   - x1.isClosureVar() = false
-	//
-	//   - xN.Defn = x1, N > 1
-	//   - xN.isClosureVar() = true, N > 1
-	//   - x2.Outer = nil
-	//   - xN.Outer = x(N-1), N > 2
-	//
-	//
-	// When we look up x in the symbol table, we always get x1.
-	// Then we can use x1.Innermost (if not nil) to get the x
-	// for the innermost known closure function,
-	// but the first reference in a closure will find either no x1.Innermost
-	// or an x1.Innermost with .Funcdepth < Funcdepth.
-	// In that case, a new xN must be created, linked in with:
-	//
-	//     xN.Defn = x1
-	//     xN.Outer = x1.Innermost
-	//     x1.Innermost = xN
-	//
-	// When we finish the function, we'll process its closure variables
-	// and find xN and pop it off the list using:
-	//
-	//     x1 := xN.Defn
-	//     x1.Innermost = xN.Outer
-	//
-	// We leave xN.Innermost set so that we can still get to the original
-	// variable quickly. Not shown here, but once we're
-	// done parsing a function and no longer need xN.Outer for the
-	// lexical x reference links as described above, closurebody
-	// recomputes xN.Outer as the semantic x reference link tree,
-	// even filling in x in intermediate closures that might not
-	// have mentioned it along the way to inner closures that did.
-	// See closurebody for details.
-	//
-	// During the eventual compilation, then, for closure variables we have:
-	//
-	//     xN.Defn = original variable
-	//     xN.Outer = variable captured in next outward scope
-	//                to make closure where xN appears
-	//
-	// Because of the sharding of pieces of the node, x.Defn means x.Name.Defn
-	// and x.Innermost/Outer means x.Name.Param.Innermost/Outer.
-	Innermost *Node
-	Outer     *Node
-
-	// OTYPE pragmas
-	//
-	// TODO: Should Func pragmas also be stored on the Name?
-	Pragma Pragma
-}
-
-// Func holds Node fields used only with function-like nodes.
-type Func struct {
-	Shortname  *Node
-	Enter      Nodes // for example, allocate and initialize memory for escaping parameters
-	Exit       Nodes
-	Cvars      Nodes   // closure params
-	Dcl        []*Node // autodcl for this func/closure
-	Inldcl     Nodes   // copy of dcl for use in inlining
-	Closgen    int
-	Outerfunc  *Node // outer function (for closure)
-	FieldTrack map[*Sym]struct{}
-	Ntype      *Node // signature
-	Top        int   // top context (Ecall, Eproc, etc)
-	Closure    *Node // OCLOSURE <-> ODCLFUNC
-	Nname      *Node
-
-	Inl     Nodes // copy of the body for use in inlining
-	InlCost int32
-	Depth   int32
-
-	Label int32 // largest auto-generated label in this function
-
-	Endlineno int32
-	WBLineno  int32 // line number of first write barrier
-
-	Pragma          Pragma // go:xxx function annotations
-	Dupok           bool   // duplicate definitions ok
-	Wrapper         bool   // is method wrapper
-	Needctxt        bool   // function uses context register (has closure variables)
-	ReflectMethod   bool   // function calls reflect.Type.Method or MethodByName
-	IsHiddenClosure bool
-	NoFramePointer  bool // Must not use a frame pointer for this function
-}
-
-type Op uint8
-
-// Node ops.
-const (
-	OXXX = Op(iota)
-
-	// names
-	ONAME    // var, const or func name
-	ONONAME  // unnamed arg or return value: f(int, string) (int, error) { etc }
-	OTYPE    // type name
-	OPACK    // import
-	OLITERAL // literal
-
-	// expressions
-	OADD             // Left + Right
-	OSUB             // Left - Right
-	OOR              // Left | Right
-	OXOR             // Left ^ Right
-	OADDSTR          // +{List} (string addition, list elements are strings)
-	OADDR            // &Left
-	OANDAND          // Left && Right
-	OAPPEND          // append(List)
-	OARRAYBYTESTR    // Type(Left) (Type is string, Left is a []byte)
-	OARRAYBYTESTRTMP // Type(Left) (Type is string, Left is a []byte, ephemeral)
-	OARRAYRUNESTR    // Type(Left) (Type is string, Left is a []rune)
-	OSTRARRAYBYTE    // Type(Left) (Type is []byte, Left is a string)
-	OSTRARRAYBYTETMP // Type(Left) (Type is []byte, Left is a string, ephemeral)
-	OSTRARRAYRUNE    // Type(Left) (Type is []rune, Left is a string)
-	OAS              // Left = Right or (if Colas=true) Left := Right
-	OAS2             // List = Rlist (x, y, z = a, b, c)
-	OAS2FUNC         // List = Rlist (x, y = f())
-	OAS2RECV         // List = Rlist (x, ok = <-c)
-	OAS2MAPR         // List = Rlist (x, ok = m["foo"])
-	OAS2DOTTYPE      // List = Rlist (x, ok = I.(int))
-	OASOP            // Left Etype= Right (x += y)
-	OASWB            // Left = Right (with write barrier)
-	OCALL            // Left(List) (function call, method call or type conversion)
-	OCALLFUNC        // Left(List) (function call f(args))
-	OCALLMETH        // Left(List) (direct method call x.Method(args))
-	OCALLINTER       // Left(List) (interface method call x.Method(args))
-	OCALLPART        // Left.Right (method expression x.Method, not called)
-	OCAP             // cap(Left)
-	OCLOSE           // close(Left)
-	OCLOSURE         // func Type { Body } (func literal)
-	OCMPIFACE        // Left Etype Right (interface comparison, x == y or x != y)
-	OCMPSTR          // Left Etype Right (string comparison, x == y, x < y, etc)
-	OCOMPLIT         // Right{List} (composite literal, not yet lowered to specific form)
-	OMAPLIT          // Type{List} (composite literal, Type is map)
-	OSTRUCTLIT       // Type{List} (composite literal, Type is struct)
-	OARRAYLIT        // Type{List} (composite literal, Type is array)
-	OSLICELIT        // Type{List} (composite literal, Type is slice)
-	OPTRLIT          // &Left (left is composite literal)
-	OCONV            // Type(Left) (type conversion)
-	OCONVIFACE       // Type(Left) (type conversion, to interface)
-	OCONVNOP         // Type(Left) (type conversion, no effect)
-	OCOPY            // copy(Left, Right)
-	ODCL             // var Left (declares Left of type Left.Type)
-
-	// Used during parsing but don't last.
-	ODCLFUNC  // func f() or func (r) f()
-	ODCLFIELD // struct field, interface field, or func/method argument/return value.
-	ODCLCONST // const pi = 3.14
-	ODCLTYPE  // type Int int
-
-	ODELETE    // delete(Left, Right)
-	ODOT       // Left.Sym (Left is of struct type)
-	ODOTPTR    // Left.Sym (Left is of pointer to struct type)
-	ODOTMETH   // Left.Sym (Left is non-interface, Right is method name)
-	ODOTINTER  // Left.Sym (Left is interface, Right is method name)
-	OXDOT      // Left.Sym (before rewrite to one of the preceding)
-	ODOTTYPE   // Left.Right or Left.Type (.Right during parsing, .Type once resolved)
-	ODOTTYPE2  // Left.Right or Left.Type (.Right during parsing, .Type once resolved; on rhs of OAS2DOTTYPE)
-	OEQ        // Left == Right
-	ONE        // Left != Right
-	OLT        // Left < Right
-	OLE        // Left <= Right
-	OGE        // Left >= Right
-	OGT        // Left > Right
-	OIND       // *Left
-	OINDEX     // Left[Right] (index of array or slice)
-	OINDEXMAP  // Left[Right] (index of map)
-	OKEY       // Left:Right (key:value in struct/array/map literal)
-	OSTRUCTKEY // Sym:Left (key:value in struct literal, after type checking)
-	OLEN       // len(Left)
-	OMAKE      // make(List) (before type checking converts to one of the following)
-	OMAKECHAN  // make(Type, Left) (type is chan)
-	OMAKEMAP   // make(Type, Left) (type is map)
-	OMAKESLICE // make(Type, Left, Right) (type is slice)
-	OMUL       // Left * Right
-	ODIV       // Left / Right
-	OMOD       // Left % Right
-	OLSH       // Left << Right
-	ORSH       // Left >> Right
-	OAND       // Left & Right
-	OANDNOT    // Left &^ Right
-	ONEW       // new(Left)
-	ONOT       // !Left
-	OCOM       // ^Left
-	OPLUS      // +Left
-	OMINUS     // -Left
-	OOROR      // Left || Right
-	OPANIC     // panic(Left)
-	OPRINT     // print(List)
-	OPRINTN    // println(List)
-	OPAREN     // (Left)
-	OSEND      // Left <- Right
-	OSLICE     // Left[List[0] : List[1]] (Left is untypechecked or slice)
-	OSLICEARR  // Left[List[0] : List[1]] (Left is array)
-	OSLICESTR  // Left[List[0] : List[1]] (Left is string)
-	OSLICE3    // Left[List[0] : List[1] : List[2]] (Left is untypedchecked or slice)
-	OSLICE3ARR // Left[List[0] : List[1] : List[2]] (Left is array)
-	ORECOVER   // recover()
-	ORECV      // <-Left
-	ORUNESTR   // Type(Left) (Type is string, Left is rune)
-	OSELRECV   // Left = <-Right.Left: (appears as .Left of OCASE; Right.Op == ORECV)
-	OSELRECV2  // List = <-Right.Left: (apperas as .Left of OCASE; count(List) == 2, Right.Op == ORECV)
-	OIOTA      // iota
-	OREAL      // real(Left)
-	OIMAG      // imag(Left)
-	OCOMPLEX   // complex(Left, Right)
-	OALIGNOF   // unsafe.Alignof(Left)
-	OOFFSETOF  // unsafe.Offsetof(Left)
-	OSIZEOF    // unsafe.Sizeof(Left)
-
-	// statements
-	OBLOCK    // { List } (block of code)
-	OBREAK    // break
-	OCASE     // case Left or List[0]..List[1]: Nbody (select case after processing; Left==nil and List==nil means default)
-	OXCASE    // case List: Nbody (select case before processing; List==nil means default)
-	OCONTINUE // continue
-	ODEFER    // defer Left (Left must be call)
-	OEMPTY    // no-op (empty statement)
-	OFALL     // fallthrough (after processing)
-	OXFALL    // fallthrough (before processing)
-	OFOR      // for Ninit; Left; Right { Nbody }
-	OGOTO     // goto Left
-	OIF       // if Ninit; Left { Nbody } else { Rlist }
-	OLABEL    // Left:
-	OPROC     // go Left (Left must be call)
-	ORANGE    // for List = range Right { Nbody }
-	ORETURN   // return List
-	OSELECT   // select { List } (List is list of OXCASE or OCASE)
-	OSWITCH   // switch Ninit; Left { List } (List is a list of OXCASE or OCASE)
-	OTYPESW   // List = Left.(type) (appears as .Left of OSWITCH)
-
-	// types
-	OTCHAN   // chan int
-	OTMAP    // map[string]int
-	OTSTRUCT // struct{}
-	OTINTER  // interface{}
-	OTFUNC   // func()
-	OTARRAY  // []int, [8]int, [N]int or [...]int
-
-	// misc
-	ODDD        // func f(args ...int) or f(l...) or var a = [...]int{0, 1, 2}.
-	ODDDARG     // func f(args ...int), introduced by escape analysis.
-	OINLCALL    // intermediary representation of an inlined call.
-	OEFACE      // itable and data words of an empty-interface value.
-	OITAB       // itable word of an interface value.
-	OIDATA      // data word of an interface value in Left
-	OSPTR       // base pointer of a slice or string.
-	OCLOSUREVAR // variable reference at beginning of closure function
-	OCFUNC      // reference to c function pointer (not go func value)
-	OCHECKNIL   // emit code to ensure pointer/interface not nil
-	OVARKILL    // variable is dead
-	OVARLIVE    // variable is alive
-	OINDREGSP   // offset plus indirect of REGSP, such as 8(SP).
-
-	// arch-specific opcodes
-	OCMP    // compare: ACMP.
-	ODEC    // decrement: ADEC.
-	OINC    // increment: AINC.
-	OEXTEND // extend: ACWD/ACDQ/ACQO.
-	OHMUL   // high mul: AMUL/AIMUL for unsigned/signed (OMUL uses AIMUL for both).
-	OLROT   // left rotate: AROL.
-	ORROTC  // right rotate-carry: ARCR.
-	ORETJMP // return to other function
-	OPS     // compare parity set (for x86 NaN check)
-	OPC     // compare parity clear (for x86 NaN check)
-	OSQRT   // sqrt(float64), on systems that have hw support
-	OGETG   // runtime.getg() (read g pointer)
-
-	OEND
-)
-
-// Nodes is a pointer to a slice of *Node.
-// For fields that are not used in most nodes, this is used instead of
-// a slice to save space.
-type Nodes struct{ slice *[]*Node }
-
-// Slice returns the entries in Nodes as a slice.
-// Changes to the slice entries (as in s[i] = n) will be reflected in
-// the Nodes.
-func (n Nodes) Slice() []*Node {
-	if n.slice == nil {
-		return nil
-	}
-	return *n.slice
-}
-
-// Len returns the number of entries in Nodes.
-func (n Nodes) Len() int {
-	if n.slice == nil {
-		return 0
-	}
-	return len(*n.slice)
-}
-
-// Index returns the i'th element of Nodes.
-// It panics if n does not have at least i+1 elements.
-func (n Nodes) Index(i int) *Node {
-	return (*n.slice)[i]
-}
-
-// First returns the first element of Nodes (same as n.Index(0)).
-// It panics if n has no elements.
-func (n Nodes) First() *Node {
-	return (*n.slice)[0]
-}
-
-// Second returns the second element of Nodes (same as n.Index(1)).
-// It panics if n has fewer than two elements.
-func (n Nodes) Second() *Node {
-	return (*n.slice)[1]
-}
-
-// Set sets n to a slice.
-// This takes ownership of the slice.
-func (n *Nodes) Set(s []*Node) {
-	if len(s) == 0 {
-		n.slice = nil
-	} else {
-		// Copy s and take address of t rather than s to avoid
-		// allocation in the case where len(s) == 0 (which is
-		// over 3x more common, dynamically, for make.bash).
-		t := s
-		n.slice = &t
-	}
-}
-
-// Set1 sets n to a slice containing a single node.
-func (n *Nodes) Set1(node *Node) {
-	n.slice = &[]*Node{node}
-}
-
-// Set2 sets n to a slice containing two nodes.
-func (n *Nodes) Set2(n1, n2 *Node) {
-	n.slice = &[]*Node{n1, n2}
-}
-
-// MoveNodes sets n to the contents of n2, then clears n2.
-func (n *Nodes) MoveNodes(n2 *Nodes) {
-	n.slice = n2.slice
-	n2.slice = nil
-}
-
-// SetIndex sets the i'th element of Nodes to node.
-// It panics if n does not have at least i+1 elements.
-func (n Nodes) SetIndex(i int, node *Node) {
-	(*n.slice)[i] = node
-}
-
-// Addr returns the address of the i'th element of Nodes.
-// It panics if n does not have at least i+1 elements.
-func (n Nodes) Addr(i int) **Node {
-	return &(*n.slice)[i]
-}
-
-// Append appends entries to Nodes.
-// If a slice is passed in, this will take ownership of it.
-func (n *Nodes) Append(a ...*Node) {
-	if len(a) == 0 {
-		return
-	}
-	if n.slice == nil {
-		n.slice = &a
-	} else {
-		*n.slice = append(*n.slice, a...)
-	}
-}
-
-// Prepend prepends entries to Nodes.
-// If a slice is passed in, this will take ownership of it.
-func (n *Nodes) Prepend(a ...*Node) {
-	if len(a) == 0 {
-		return
-	}
-	if n.slice == nil {
-		n.slice = &a
-	} else {
-		*n.slice = append(a, *n.slice...)
-	}
-}
-
-// AppendNodes appends the contents of *n2 to n, then clears n2.
-func (n *Nodes) AppendNodes(n2 *Nodes) {
-	switch {
-	case n2.slice == nil:
-	case n.slice == nil:
-		n.slice = n2.slice
-	default:
-		*n.slice = append(*n.slice, *n2.slice...)
-	}
-	n2.slice = nil
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/timings.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/timings.go
deleted file mode 100644
index 1affa1f..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/timings.go
+++ /dev/null
@@ -1,238 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/timings.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/timings.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"fmt"
-	"io"
-	"strings"
-	"time"
-)
-
-// Timings collects the execution times of labeled phases
-// which are added trough a sequence of Start/Stop calls.
-// Events may be associated with each phase via AddEvent.
-type Timings struct {
-	list   []timestamp
-	events map[int][]*event // lazily allocated
-}
-
-type timestamp struct {
-	time  time.Time
-	label string
-	start bool
-}
-
-type event struct {
-	size int64  // count or amount of data processed (allocations, data size, lines, funcs, ...)
-	unit string // unit of size measure (count, MB, lines, funcs, ...)
-}
-
-func (t *Timings) append(labels []string, start bool) {
-	t.list = append(t.list, timestamp{time.Now(), strings.Join(labels, ":"), start})
-}
-
-// Start marks the beginning of a new phase and implicitly stops the previous phase.
-// The phase name is the colon-separated concatenation of the labels.
-func (t *Timings) Start(labels ...string) {
-	t.append(labels, true)
-}
-
-// Stop marks the end of a phase and implicitly starts a new phase.
-// The labels are added to the labels of the ended phase.
-func (t *Timings) Stop(labels ...string) {
-	t.append(labels, false)
-}
-
-// AddEvent associates an event, i.e., a count, or an amount of data,
-// with the most recently started or stopped phase; or the very first
-// phase if Start or Stop hasn't been called yet. The unit specifies
-// the unit of measurement (e.g., MB, lines, no. of funcs, etc.).
-func (t *Timings) AddEvent(size int64, unit string) {
-	m := t.events
-	if m == nil {
-		m = make(map[int][]*event)
-		t.events = m
-	}
-	i := len(t.list)
-	if i > 0 {
-		i--
-	}
-	m[i] = append(m[i], &event{size, unit})
-}
-
-// Write prints the phase times to w.
-// The prefix is printed at the start of each line.
-func (t *Timings) Write(w io.Writer, prefix string) {
-	if len(t.list) > 0 {
-		var lines lines
-
-		// group of phases with shared non-empty label prefix
-		var group struct {
-			label string        // label prefix
-			tot   time.Duration // accumulated phase time
-			size  int           // number of phases collected in group
-		}
-
-		// accumulated time between Stop/Start timestamps
-		var unaccounted time.Duration
-
-		// process Start/Stop timestamps
-		pt := &t.list[0] // previous timestamp
-		tot := t.list[len(t.list)-1].time.Sub(pt.time)
-		for i := 1; i < len(t.list); i++ {
-			qt := &t.list[i] // current timestamp
-			dt := qt.time.Sub(pt.time)
-
-			var label string
-			var events []*event
-			if pt.start {
-				// previous phase started
-				label = pt.label
-				events = t.events[i-1]
-				if qt.start {
-					// start implicitly ended previous phase; nothing to do
-				} else {
-					// stop ended previous phase; append stop labels, if any
-					if qt.label != "" {
-						label += ":" + qt.label
-					}
-					// events associated with stop replace prior events
-					if e := t.events[i]; e != nil {
-						events = e
-					}
-				}
-			} else {
-				// previous phase stopped
-				if qt.start {
-					// between a stopped and started phase; unaccounted time
-					unaccounted += dt
-				} else {
-					// previous stop implicitly started current phase
-					label = qt.label
-					events = t.events[i]
-				}
-			}
-			if label != "" {
-				// add phase to existing group, or start a new group
-				l := commonPrefix(group.label, label)
-				if group.size == 1 && l != "" || group.size > 1 && l == group.label {
-					// add to existing group
-					group.label = l
-					group.tot += dt
-					group.size++
-				} else {
-					// start a new group
-					if group.size > 1 {
-						lines.add(prefix+group.label+"subtotal", 1, group.tot, tot, nil)
-					}
-					group.label = label
-					group.tot = dt
-					group.size = 1
-				}
-
-				// write phase
-				lines.add(prefix+label, 1, dt, tot, events)
-			}
-
-			pt = qt
-		}
-
-		if group.size > 1 {
-			lines.add(prefix+group.label+"subtotal", 1, group.tot, tot, nil)
-		}
-
-		if unaccounted != 0 {
-			lines.add(prefix+"unaccounted", 1, unaccounted, tot, nil)
-		}
-
-		lines.add(prefix+"total", 1, tot, tot, nil)
-
-		lines.write(w)
-	}
-}
-
-func commonPrefix(a, b string) string {
-	i := 0
-	for i < len(a) && i < len(b) && a[i] == b[i] {
-		i++
-	}
-	return a[:i]
-}
-
-type lines [][]string
-
-func (lines *lines) add(label string, n int, dt, tot time.Duration, events []*event) {
-	var line []string
-	add := func(format string, args ...interface{}) {
-		line = append(line, fmt.Sprintf(format, args...))
-	}
-
-	add("%s", label)
-	add("    %d", n)
-	add("    %d ns/op", dt)
-	add("    %.2f %%", float64(dt)/float64(tot)*100)
-
-	for _, e := range events {
-		add("    %d", e.size)
-		add(" %s", e.unit)
-		add("    %d", int64(float64(e.size)/dt.Seconds()+0.5))
-		add(" %s/s", e.unit)
-	}
-
-	*lines = append(*lines, line)
-}
-
-func (lines lines) write(w io.Writer) {
-	// determine column widths and contents
-	var widths []int
-	var number []bool
-	for _, line := range lines {
-		for i, col := range line {
-			if i < len(widths) {
-				if len(col) > widths[i] {
-					widths[i] = len(col)
-				}
-			} else {
-				widths = append(widths, len(col))
-				number = append(number, isnumber(col)) // first line determines column contents
-			}
-		}
-	}
-
-	// make column widths a multiple of align for more stable output
-	const align = 1 // set to a value > 1 to enable
-	if align > 1 {
-		for i, w := range widths {
-			w += align - 1
-			widths[i] = w - w%align
-		}
-	}
-
-	// print lines taking column widths and contents into account
-	for _, line := range lines {
-		for i, col := range line {
-			format := "%-*s"
-			if number[i] {
-				format = "%*s" // numbers are right-aligned
-			}
-			fmt.Fprintf(w, format, widths[i], col)
-		}
-		fmt.Fprintln(w)
-	}
-}
-
-func isnumber(s string) bool {
-	for _, ch := range s {
-		if ch <= ' ' {
-			continue // ignore leading whitespace
-		}
-		return '0' <= ch && ch <= '9' || ch == '.' || ch == '-' || ch == '+'
-	}
-	return false
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/trace.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/trace.go
deleted file mode 100644
index 711d1db..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/trace.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/trace.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/trace.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build go1.7
-
-package gc
-
-import (
-	"os"
-	tracepkg "runtime/trace"
-)
-
-func init() {
-	traceHandler = traceHandlerGo17
-}
-
-func traceHandlerGo17(traceprofile string) {
-	f, err := os.Create(traceprofile)
-	if err != nil {
-		Fatalf("%v", err)
-	}
-	if err := tracepkg.Start(f); err != nil {
-		Fatalf("%v", err)
-	}
-	atExit(tracepkg.Stop)
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/type.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/type.go
deleted file mode 100644
index 6ed9348..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/type.go
+++ /dev/null
@@ -1,1253 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/type.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/type.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file provides methods that let us export a Type as an ../ssa:Type.
-// We don't export this package's Type directly because it would lead
-// to an import cycle with this package and ../ssa.
-// TODO: move Type to its own package, then we don't need to dance around import cycles.
-
-package gc
-
-import (
-	"bootstrap/cmd/compile/internal/ssa"
-	"fmt"
-)
-
-// EType describes a kind of type.
-type EType uint8
-
-const (
-	Txxx = iota
-
-	TINT8
-	TUINT8
-	TINT16
-	TUINT16
-	TINT32
-	TUINT32
-	TINT64
-	TUINT64
-	TINT
-	TUINT
-	TUINTPTR
-
-	TCOMPLEX64
-	TCOMPLEX128
-
-	TFLOAT32
-	TFLOAT64
-
-	TBOOL
-
-	TPTR32
-	TPTR64
-
-	TFUNC
-	TSLICE
-	TARRAY
-	TSTRUCT
-	TCHAN
-	TMAP
-	TINTER
-	TFORW
-	TANY
-	TSTRING
-	TUNSAFEPTR
-
-	// pseudo-types for literals
-	TIDEAL
-	TNIL
-	TBLANK
-
-	// pseudo-types for frame layout
-	TFUNCARGS
-	TCHANARGS
-	TINTERMETH
-
-	// pseudo-types for import/export
-	TDDDFIELD // wrapper: contained type is a ... field
-
-	NTYPE
-)
-
-// ChanDir is whether a channel can send, receive, or both.
-type ChanDir uint8
-
-func (c ChanDir) CanRecv() bool { return c&Crecv != 0 }
-func (c ChanDir) CanSend() bool { return c&Csend != 0 }
-
-const (
-	// types of channel
-	// must match ../../../../reflect/type.go:/ChanDir
-	Crecv ChanDir = 1 << 0
-	Csend ChanDir = 1 << 1
-	Cboth ChanDir = Crecv | Csend
-)
-
-// Types stores pointers to predeclared named types.
-//
-// It also stores pointers to several special types:
-//   - Types[TANY] is the placeholder "any" type recognized by substArgTypes.
-//   - Types[TBLANK] represents the blank variable's type.
-//   - Types[TIDEAL] represents untyped numeric constants.
-//   - Types[TNIL] represents the predeclared "nil" value's type.
-//   - Types[TUNSAFEPTR] is package unsafe's Pointer type.
-var Types [NTYPE]*Type
-
-var (
-	// Predeclared alias types. Kept separate for better error messages.
-	bytetype *Type
-	runetype *Type
-
-	// Predeclared error interface type.
-	errortype *Type
-
-	// Types to represent untyped string and boolean constants.
-	idealstring *Type
-	idealbool   *Type
-
-	// Types to represent untyped numeric constants.
-	// Note: Currently these are only used within the binary export
-	// data format. The rest of the compiler only uses Types[TIDEAL].
-	idealint     = typ(TIDEAL)
-	idealrune    = typ(TIDEAL)
-	idealfloat   = typ(TIDEAL)
-	idealcomplex = typ(TIDEAL)
-)
-
-// A Type represents a Go type.
-type Type struct {
-	// Extra contains extra etype-specific fields.
-	// As an optimization, those etype-specific structs which contain exactly
-	// one pointer-shaped field are stored as values rather than pointers when possible.
-	//
-	// TMAP: *MapType
-	// TFORW: *ForwardType
-	// TFUNC: *FuncType
-	// TINTERMETHOD: InterMethType
-	// TSTRUCT: *StructType
-	// TINTER: *InterType
-	// TDDDFIELD: DDDFieldType
-	// TFUNCARGS: FuncArgsType
-	// TCHANARGS: ChanArgsType
-	// TCHAN: *ChanType
-	// TPTR32, TPTR64: PtrType
-	// TARRAY: *ArrayType
-	// TSLICE: SliceType
-	Extra interface{}
-
-	// Width is the width of this Type in bytes.
-	Width int64
-
-	methods    Fields
-	allMethods Fields
-
-	nod  *Node // canonical OTYPE node
-	Orig *Type // original type (type literal or predefined type)
-
-	sliceOf *Type
-	ptrTo   *Type
-
-	Sym    *Sym  // symbol containing name, for named types
-	Vargen int32 // unique name for OTYPE/ONAME
-	Lineno int32 // line at which this type was declared, implicitly or explicitly
-
-	Etype      EType // kind of type
-	Noalg      bool  // suppress hash and eq algorithm generation
-	Trecur     uint8 // to detect loops
-	Local      bool  // created in this file
-	Deferwidth bool
-	Broke      bool  // broken type definition.
-	Align      uint8 // the required alignment of this type, in bytes
-	NotInHeap  bool  // type cannot be heap allocated
-}
-
-// MapType contains Type fields specific to maps.
-type MapType struct {
-	Key *Type // Key type
-	Val *Type // Val (elem) type
-
-	Bucket *Type // internal struct type representing a hash bucket
-	Hmap   *Type // internal struct type representing the Hmap (map header object)
-	Hiter  *Type // internal struct type representing hash iterator state
-}
-
-// MapType returns t's extra map-specific fields.
-func (t *Type) MapType() *MapType {
-	t.wantEtype(TMAP)
-	return t.Extra.(*MapType)
-}
-
-// ForwardType contains Type fields specific to forward types.
-type ForwardType struct {
-	Copyto      []*Node // where to copy the eventual value to
-	Embedlineno int32   // first use of this type as an embedded type
-}
-
-// ForwardType returns t's extra forward-type-specific fields.
-func (t *Type) ForwardType() *ForwardType {
-	t.wantEtype(TFORW)
-	return t.Extra.(*ForwardType)
-}
-
-// FuncType contains Type fields specific to func types.
-type FuncType struct {
-	Receiver *Type // function receiver
-	Results  *Type // function results
-	Params   *Type // function params
-
-	Nname *Node
-
-	// Argwid is the total width of the function receiver, params, and results.
-	// It gets calculated via a temporary TFUNCARGS type.
-	// Note that TFUNC's Width is Widthptr.
-	Argwid int64
-
-	Outnamed bool
-}
-
-// FuncType returns t's extra func-specific fields.
-func (t *Type) FuncType() *FuncType {
-	t.wantEtype(TFUNC)
-	return t.Extra.(*FuncType)
-}
-
-// InterMethType contains Type fields specific to interface method pseudo-types.
-type InterMethType struct {
-	Nname *Node
-}
-
-// StructType contains Type fields specific to struct types.
-type StructType struct {
-	fields Fields
-
-	// Maps have three associated internal structs (see struct MapType).
-	// Map links such structs back to their map type.
-	Map *Type
-
-	Funarg      Funarg // type of function arguments for arg struct
-	Haspointers uint8  // 0 unknown, 1 no, 2 yes
-}
-
-// Fnstruct records the kind of function argument
-type Funarg uint8
-
-const (
-	FunargNone    Funarg = iota
-	FunargRcvr           // receiver
-	FunargParams         // input parameters
-	FunargResults        // output results
-)
-
-// StructType returns t's extra struct-specific fields.
-func (t *Type) StructType() *StructType {
-	t.wantEtype(TSTRUCT)
-	return t.Extra.(*StructType)
-}
-
-// InterType contains Type fields specific to interface types.
-type InterType struct {
-	fields Fields
-}
-
-// PtrType contains Type fields specific to pointer types.
-type PtrType struct {
-	Elem *Type // element type
-}
-
-// DDDFieldType contains Type fields specific to TDDDFIELD types.
-type DDDFieldType struct {
-	T *Type // reference to a slice type for ... args
-}
-
-// ChanArgsType contains Type fields specific to TCHANARGS types.
-type ChanArgsType struct {
-	T *Type // reference to a chan type whose elements need a width check
-}
-
-// // FuncArgsType contains Type fields specific to TFUNCARGS types.
-type FuncArgsType struct {
-	T *Type // reference to a func type whose elements need a width check
-}
-
-// ChanType contains Type fields specific to channel types.
-type ChanType struct {
-	Elem *Type   // element type
-	Dir  ChanDir // channel direction
-}
-
-// ChanType returns t's extra channel-specific fields.
-func (t *Type) ChanType() *ChanType {
-	t.wantEtype(TCHAN)
-	return t.Extra.(*ChanType)
-}
-
-// ArrayType contains Type fields specific to array types.
-type ArrayType struct {
-	Elem        *Type // element type
-	Bound       int64 // number of elements; <0 if unknown yet
-	Haspointers uint8 // 0 unknown, 1 no, 2 yes
-}
-
-// SliceType contains Type fields specific to slice types.
-type SliceType struct {
-	Elem *Type // element type
-}
-
-// A Field represents a field in a struct or a method in an interface or
-// associated with a named type.
-type Field struct {
-	Nointerface bool
-	Embedded    uint8 // embedded field
-	Funarg      Funarg
-	Broke       bool // broken field definition
-	Isddd       bool // field is ... argument
-
-	Sym   *Sym
-	Nname *Node
-
-	Type *Type // field type
-
-	// Offset in bytes of this field or method within its enclosing struct
-	// or interface Type.
-	Offset int64
-
-	Note string // literal string annotation
-}
-
-// End returns the offset of the first byte immediately after this field.
-func (f *Field) End() int64 {
-	return f.Offset + f.Type.Width
-}
-
-// Fields is a pointer to a slice of *Field.
-// This saves space in Types that do not have fields or methods
-// compared to a simple slice of *Field.
-type Fields struct {
-	s *[]*Field
-}
-
-// Len returns the number of entries in f.
-func (f *Fields) Len() int {
-	if f.s == nil {
-		return 0
-	}
-	return len(*f.s)
-}
-
-// Slice returns the entries in f as a slice.
-// Changes to the slice entries will be reflected in f.
-func (f *Fields) Slice() []*Field {
-	if f.s == nil {
-		return nil
-	}
-	return *f.s
-}
-
-// Index returns the i'th element of Fields.
-// It panics if f does not have at least i+1 elements.
-func (f *Fields) Index(i int) *Field {
-	return (*f.s)[i]
-}
-
-// Set sets f to a slice.
-// This takes ownership of the slice.
-func (f *Fields) Set(s []*Field) {
-	if len(s) == 0 {
-		f.s = nil
-	} else {
-		// Copy s and take address of t rather than s to avoid
-		// allocation in the case where len(s) == 0.
-		t := s
-		f.s = &t
-	}
-}
-
-// Append appends entries to f.
-func (f *Fields) Append(s ...*Field) {
-	if f.s == nil {
-		f.s = new([]*Field)
-	}
-	*f.s = append(*f.s, s...)
-}
-
-// typ returns a new Type of the specified kind.
-func typ(et EType) *Type {
-	t := &Type{
-		Etype:  et,
-		Width:  BADWIDTH,
-		Lineno: lineno,
-	}
-	t.Orig = t
-	// TODO(josharian): lazily initialize some of these?
-	switch t.Etype {
-	case TMAP:
-		t.Extra = new(MapType)
-	case TFORW:
-		t.Extra = new(ForwardType)
-	case TFUNC:
-		t.Extra = new(FuncType)
-	case TINTERMETH:
-		t.Extra = InterMethType{}
-	case TSTRUCT:
-		t.Extra = new(StructType)
-	case TINTER:
-		t.Extra = new(InterType)
-	case TPTR32, TPTR64:
-		t.Extra = PtrType{}
-	case TCHANARGS:
-		t.Extra = ChanArgsType{}
-	case TFUNCARGS:
-		t.Extra = FuncArgsType{}
-	case TDDDFIELD:
-		t.Extra = DDDFieldType{}
-	case TCHAN:
-		t.Extra = new(ChanType)
-	}
-	return t
-}
-
-// typArray returns a new fixed-length array Type.
-func typArray(elem *Type, bound int64) *Type {
-	if bound < 0 {
-		Fatalf("typArray: invalid bound %v", bound)
-	}
-	t := typ(TARRAY)
-	t.Extra = &ArrayType{Elem: elem, Bound: bound}
-	t.NotInHeap = elem.NotInHeap
-	return t
-}
-
-// typSlice returns the slice Type with element type elem.
-func typSlice(elem *Type) *Type {
-	if t := elem.sliceOf; t != nil {
-		if t.Elem() != elem {
-			Fatalf("elem mismatch")
-		}
-		return t
-	}
-
-	t := typ(TSLICE)
-	t.Extra = SliceType{Elem: elem}
-	elem.sliceOf = t
-	return t
-}
-
-// typDDDArray returns a new [...]T array Type.
-func typDDDArray(elem *Type) *Type {
-	t := typ(TARRAY)
-	t.Extra = &ArrayType{Elem: elem, Bound: -1}
-	t.NotInHeap = elem.NotInHeap
-	return t
-}
-
-// typChan returns a new chan Type with direction dir.
-func typChan(elem *Type, dir ChanDir) *Type {
-	t := typ(TCHAN)
-	ct := t.ChanType()
-	ct.Elem = elem
-	ct.Dir = dir
-	return t
-}
-
-// typMap returns a new map Type with key type k and element (aka value) type v.
-func typMap(k, v *Type) *Type {
-	t := typ(TMAP)
-	mt := t.MapType()
-	mt.Key = k
-	mt.Val = v
-	return t
-}
-
-// typPtr returns the pointer type pointing to t.
-func typPtr(elem *Type) *Type {
-	if t := elem.ptrTo; t != nil {
-		if t.Elem() != elem {
-			Fatalf("elem mismatch")
-		}
-		return t
-	}
-
-	t := typ(Tptr)
-	t.Extra = PtrType{Elem: elem}
-	t.Width = int64(Widthptr)
-	t.Align = uint8(Widthptr)
-	elem.ptrTo = t
-	return t
-}
-
-// typDDDField returns a new TDDDFIELD type for slice type s.
-func typDDDField(s *Type) *Type {
-	t := typ(TDDDFIELD)
-	t.Extra = DDDFieldType{T: s}
-	return t
-}
-
-// typChanArgs returns a new TCHANARGS type for channel type c.
-func typChanArgs(c *Type) *Type {
-	t := typ(TCHANARGS)
-	t.Extra = ChanArgsType{T: c}
-	return t
-}
-
-// typFuncArgs returns a new TFUNCARGS type for func type f.
-func typFuncArgs(f *Type) *Type {
-	t := typ(TFUNCARGS)
-	t.Extra = FuncArgsType{T: f}
-	return t
-}
-
-func newField() *Field {
-	return &Field{
-		Offset: BADWIDTH,
-	}
-}
-
-// substArgTypes substitutes the given list of types for
-// successive occurrences of the "any" placeholder in the
-// type syntax expression n.Type.
-// The result of substArgTypes MUST be assigned back to old, e.g.
-// 	n.Left = substArgTypes(n.Left, t1, t2)
-func substArgTypes(old *Node, types ...*Type) *Node {
-	n := *old // make shallow copy
-
-	for _, t := range types {
-		dowidth(t)
-	}
-	n.Type = substAny(n.Type, &types)
-	if len(types) > 0 {
-		Fatalf("substArgTypes: too many argument types")
-	}
-	return &n
-}
-
-// substAny walks t, replacing instances of "any" with successive
-// elements removed from types.  It returns the substituted type.
-func substAny(t *Type, types *[]*Type) *Type {
-	if t == nil {
-		return nil
-	}
-
-	switch t.Etype {
-	default:
-		// Leave the type unchanged.
-
-	case TANY:
-		if len(*types) == 0 {
-			Fatalf("substArgTypes: not enough argument types")
-		}
-		t = (*types)[0]
-		*types = (*types)[1:]
-
-	case TPTR32, TPTR64:
-		elem := substAny(t.Elem(), types)
-		if elem != t.Elem() {
-			t = t.Copy()
-			t.Extra = PtrType{Elem: elem}
-		}
-
-	case TARRAY:
-		elem := substAny(t.Elem(), types)
-		if elem != t.Elem() {
-			t = t.Copy()
-			t.Extra.(*ArrayType).Elem = elem
-		}
-
-	case TSLICE:
-		elem := substAny(t.Elem(), types)
-		if elem != t.Elem() {
-			t = t.Copy()
-			t.Extra = SliceType{Elem: elem}
-		}
-
-	case TCHAN:
-		elem := substAny(t.Elem(), types)
-		if elem != t.Elem() {
-			t = t.Copy()
-			t.Extra.(*ChanType).Elem = elem
-		}
-
-	case TMAP:
-		key := substAny(t.Key(), types)
-		val := substAny(t.Val(), types)
-		if key != t.Key() || val != t.Val() {
-			t = t.Copy()
-			t.Extra.(*MapType).Key = key
-			t.Extra.(*MapType).Val = val
-		}
-
-	case TFUNC:
-		recvs := substAny(t.Recvs(), types)
-		params := substAny(t.Params(), types)
-		results := substAny(t.Results(), types)
-		if recvs != t.Recvs() || params != t.Params() || results != t.Results() {
-			t = t.Copy()
-			t.FuncType().Receiver = recvs
-			t.FuncType().Results = results
-			t.FuncType().Params = params
-		}
-
-	case TSTRUCT:
-		fields := t.FieldSlice()
-		var nfs []*Field
-		for i, f := range fields {
-			nft := substAny(f.Type, types)
-			if nft == f.Type {
-				continue
-			}
-			if nfs == nil {
-				nfs = append([]*Field(nil), fields...)
-			}
-			nfs[i] = f.Copy()
-			nfs[i].Type = nft
-		}
-		if nfs != nil {
-			t = t.Copy()
-			t.SetFields(nfs)
-		}
-	}
-
-	return t
-}
-
-// Copy returns a shallow copy of the Type.
-func (t *Type) Copy() *Type {
-	if t == nil {
-		return nil
-	}
-	nt := *t
-	// copy any *T Extra fields, to avoid aliasing
-	switch t.Etype {
-	case TMAP:
-		x := *t.Extra.(*MapType)
-		nt.Extra = &x
-	case TFORW:
-		x := *t.Extra.(*ForwardType)
-		nt.Extra = &x
-	case TFUNC:
-		x := *t.Extra.(*FuncType)
-		nt.Extra = &x
-	case TSTRUCT:
-		x := *t.Extra.(*StructType)
-		nt.Extra = &x
-	case TINTER:
-		x := *t.Extra.(*InterType)
-		nt.Extra = &x
-	case TCHAN:
-		x := *t.Extra.(*ChanType)
-		nt.Extra = &x
-	case TARRAY:
-		x := *t.Extra.(*ArrayType)
-		nt.Extra = &x
-	}
-	// TODO(mdempsky): Find out why this is necessary and explain.
-	if t.Orig == t {
-		nt.Orig = &nt
-	}
-	return &nt
-}
-
-func (f *Field) Copy() *Field {
-	nf := *f
-	return &nf
-}
-
-// Iter provides an abstraction for iterating across struct fields and
-// interface methods.
-type Iter struct {
-	s []*Field
-}
-
-// iterFields returns the first field or method in struct or interface type t
-// and an Iter value to continue iterating across the rest.
-func iterFields(t *Type) (*Field, Iter) {
-	return t.Fields().Iter()
-}
-
-// Iter returns the first field in fs and an Iter value to continue iterating
-// across its successor fields.
-// Deprecated: New code should use Slice instead.
-func (fs *Fields) Iter() (*Field, Iter) {
-	i := Iter{s: fs.Slice()}
-	f := i.Next()
-	return f, i
-}
-
-// Next returns the next field or method, if any.
-func (i *Iter) Next() *Field {
-	if len(i.s) == 0 {
-		return nil
-	}
-	f := i.s[0]
-	i.s = i.s[1:]
-	return f
-}
-
-func (t *Type) wantEtype(et EType) {
-	if t.Etype != et {
-		Fatalf("want %v, but have %v", et, t)
-	}
-}
-
-func (t *Type) Recvs() *Type   { return t.FuncType().Receiver }
-func (t *Type) Params() *Type  { return t.FuncType().Params }
-func (t *Type) Results() *Type { return t.FuncType().Results }
-
-// Recv returns the receiver of function type t, if any.
-func (t *Type) Recv() *Field {
-	s := t.Recvs()
-	if s.NumFields() == 0 {
-		return nil
-	}
-	return s.Field(0)
-}
-
-// recvsParamsResults stores the accessor functions for a function Type's
-// receiver, parameters, and result parameters, in that order.
-// It can be used to iterate over all of a function's parameter lists.
-var recvsParamsResults = [3]func(*Type) *Type{
-	(*Type).Recvs, (*Type).Params, (*Type).Results,
-}
-
-// paramsResults is like recvsParamsResults, but omits receiver parameters.
-var paramsResults = [2]func(*Type) *Type{
-	(*Type).Params, (*Type).Results,
-}
-
-// Key returns the key type of map type t.
-func (t *Type) Key() *Type {
-	t.wantEtype(TMAP)
-	return t.Extra.(*MapType).Key
-}
-
-// Val returns the value type of map type t.
-func (t *Type) Val() *Type {
-	t.wantEtype(TMAP)
-	return t.Extra.(*MapType).Val
-}
-
-// Elem returns the type of elements of t.
-// Usable with pointers, channels, arrays, and slices.
-func (t *Type) Elem() *Type {
-	switch t.Etype {
-	case TPTR32, TPTR64:
-		return t.Extra.(PtrType).Elem
-	case TARRAY:
-		return t.Extra.(*ArrayType).Elem
-	case TSLICE:
-		return t.Extra.(SliceType).Elem
-	case TCHAN:
-		return t.Extra.(*ChanType).Elem
-	}
-	Fatalf("Type.Elem %s", t.Etype)
-	return nil
-}
-
-// DDDField returns the slice ... type for TDDDFIELD type t.
-func (t *Type) DDDField() *Type {
-	t.wantEtype(TDDDFIELD)
-	return t.Extra.(DDDFieldType).T
-}
-
-// ChanArgs returns the channel type for TCHANARGS type t.
-func (t *Type) ChanArgs() *Type {
-	t.wantEtype(TCHANARGS)
-	return t.Extra.(ChanArgsType).T
-}
-
-// FuncArgs returns the channel type for TFUNCARGS type t.
-func (t *Type) FuncArgs() *Type {
-	t.wantEtype(TFUNCARGS)
-	return t.Extra.(FuncArgsType).T
-}
-
-// Nname returns the associated function's nname.
-func (t *Type) Nname() *Node {
-	switch t.Etype {
-	case TFUNC:
-		return t.Extra.(*FuncType).Nname
-	case TINTERMETH:
-		return t.Extra.(InterMethType).Nname
-	}
-	Fatalf("Type.Nname %v %v", t.Etype, t)
-	return nil
-}
-
-// Nname sets the associated function's nname.
-func (t *Type) SetNname(n *Node) {
-	switch t.Etype {
-	case TFUNC:
-		t.Extra.(*FuncType).Nname = n
-	case TINTERMETH:
-		t.Extra = InterMethType{Nname: n}
-	default:
-		Fatalf("Type.SetNname %v %v", t.Etype, t)
-	}
-}
-
-// IsFuncArgStruct reports whether t is a struct representing function parameters.
-func (t *Type) IsFuncArgStruct() bool {
-	return t.Etype == TSTRUCT && t.Extra.(*StructType).Funarg != FunargNone
-}
-
-func (t *Type) Methods() *Fields {
-	// TODO(mdempsky): Validate t?
-	return &t.methods
-}
-
-func (t *Type) AllMethods() *Fields {
-	// TODO(mdempsky): Validate t?
-	return &t.allMethods
-}
-
-func (t *Type) Fields() *Fields {
-	switch t.Etype {
-	case TSTRUCT:
-		return &t.Extra.(*StructType).fields
-	case TINTER:
-		return &t.Extra.(*InterType).fields
-	}
-	Fatalf("Fields: type %v does not have fields", t)
-	return nil
-}
-
-// Field returns the i'th field/method of struct/interface type t.
-func (t *Type) Field(i int) *Field {
-	return t.Fields().Slice()[i]
-}
-
-// FieldSlice returns a slice of containing all fields/methods of
-// struct/interface type t.
-func (t *Type) FieldSlice() []*Field {
-	return t.Fields().Slice()
-}
-
-// SetFields sets struct/interface type t's fields/methods to fields.
-func (t *Type) SetFields(fields []*Field) {
-	for _, f := range fields {
-		// If type T contains a field F with a go:notinheap
-		// type, then T must also be go:notinheap. Otherwise,
-		// you could heap allocate T and then get a pointer F,
-		// which would be a heap pointer to a go:notinheap
-		// type.
-		if f.Type != nil && f.Type.NotInHeap {
-			t.NotInHeap = true
-			break
-		}
-	}
-	t.Fields().Set(fields)
-}
-
-func (t *Type) isDDDArray() bool {
-	if t.Etype != TARRAY {
-		return false
-	}
-	return t.Extra.(*ArrayType).Bound < 0
-}
-
-// ArgWidth returns the total aligned argument size for a function.
-// It includes the receiver, parameters, and results.
-func (t *Type) ArgWidth() int64 {
-	t.wantEtype(TFUNC)
-	return t.Extra.(*FuncType).Argwid
-}
-
-func (t *Type) Size() int64 {
-	dowidth(t)
-	return t.Width
-}
-
-func (t *Type) Alignment() int64 {
-	dowidth(t)
-	return int64(t.Align)
-}
-
-func (t *Type) SimpleString() string {
-	return t.Etype.String()
-}
-
-// Compare compares types for purposes of the SSA back
-// end, returning an ssa.Cmp (one of CMPlt, CMPeq, CMPgt).
-// The answers are correct for an optimizer
-// or code generator, but not necessarily typechecking.
-// The order chosen is arbitrary, only consistency and division
-// into equivalence classes (Types that compare CMPeq) matters.
-func (t *Type) Compare(u ssa.Type) ssa.Cmp {
-	x, ok := u.(*Type)
-	// ssa.CompilerType is smaller than gc.Type
-	// bare pointer equality is easy.
-	if !ok {
-		return ssa.CMPgt
-	}
-	if x == t {
-		return ssa.CMPeq
-	}
-	return t.cmp(x)
-}
-
-func cmpForNe(x bool) ssa.Cmp {
-	if x {
-		return ssa.CMPlt
-	}
-	return ssa.CMPgt
-}
-
-func (r *Sym) cmpsym(s *Sym) ssa.Cmp {
-	if r == s {
-		return ssa.CMPeq
-	}
-	if r == nil {
-		return ssa.CMPlt
-	}
-	if s == nil {
-		return ssa.CMPgt
-	}
-	// Fast sort, not pretty sort
-	if len(r.Name) != len(s.Name) {
-		return cmpForNe(len(r.Name) < len(s.Name))
-	}
-	if r.Pkg != s.Pkg {
-		if len(r.Pkg.Prefix) != len(s.Pkg.Prefix) {
-			return cmpForNe(len(r.Pkg.Prefix) < len(s.Pkg.Prefix))
-		}
-		if r.Pkg.Prefix != s.Pkg.Prefix {
-			return cmpForNe(r.Pkg.Prefix < s.Pkg.Prefix)
-		}
-	}
-	if r.Name != s.Name {
-		return cmpForNe(r.Name < s.Name)
-	}
-	return ssa.CMPeq
-}
-
-// cmp compares two *Types t and x, returning ssa.CMPlt,
-// ssa.CMPeq, ssa.CMPgt as t<x, t==x, t>x, for an arbitrary
-// and optimizer-centric notion of comparison.
-func (t *Type) cmp(x *Type) ssa.Cmp {
-	// This follows the structure of eqtype in subr.go
-	// with two exceptions.
-	// 1. Symbols are compared more carefully because a <,=,> result is desired.
-	// 2. Maps are treated specially to avoid endless recursion -- maps
-	//    contain an internal data type not expressible in Go source code.
-	if t == x {
-		return ssa.CMPeq
-	}
-	if t == nil {
-		return ssa.CMPlt
-	}
-	if x == nil {
-		return ssa.CMPgt
-	}
-
-	if t.Etype != x.Etype {
-		return cmpForNe(t.Etype < x.Etype)
-	}
-
-	if t.Sym != nil || x.Sym != nil {
-		// Special case: we keep byte and uint8 separate
-		// for error messages. Treat them as equal.
-		switch t.Etype {
-		case TUINT8:
-			if (t == Types[TUINT8] || t == bytetype) && (x == Types[TUINT8] || x == bytetype) {
-				return ssa.CMPeq
-			}
-
-		case TINT32:
-			if (t == Types[runetype.Etype] || t == runetype) && (x == Types[runetype.Etype] || x == runetype) {
-				return ssa.CMPeq
-			}
-		}
-	}
-
-	if c := t.Sym.cmpsym(x.Sym); c != ssa.CMPeq {
-		return c
-	}
-
-	if x.Sym != nil {
-		// Syms non-nil, if vargens match then equal.
-		if t.Vargen != x.Vargen {
-			return cmpForNe(t.Vargen < x.Vargen)
-		}
-		return ssa.CMPeq
-	}
-	// both syms nil, look at structure below.
-
-	switch t.Etype {
-	case TBOOL, TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, TUNSAFEPTR, TUINTPTR,
-		TINT8, TINT16, TINT32, TINT64, TINT, TUINT8, TUINT16, TUINT32, TUINT64, TUINT:
-		return ssa.CMPeq
-	}
-
-	switch t.Etype {
-	case TMAP:
-		if c := t.Key().cmp(x.Key()); c != ssa.CMPeq {
-			return c
-		}
-		return t.Val().cmp(x.Val())
-
-	case TPTR32, TPTR64, TSLICE:
-		// No special cases for these, they are handled
-		// by the general code after the switch.
-
-	case TSTRUCT:
-		if t.StructType().Map == nil {
-			if x.StructType().Map != nil {
-				return ssa.CMPlt // nil < non-nil
-			}
-			// to the fallthrough
-		} else if x.StructType().Map == nil {
-			return ssa.CMPgt // nil > non-nil
-		} else if t.StructType().Map.MapType().Bucket == t {
-			// Both have non-nil Map
-			// Special case for Maps which include a recursive type where the recursion is not broken with a named type
-			if x.StructType().Map.MapType().Bucket != x {
-				return ssa.CMPlt // bucket maps are least
-			}
-			return t.StructType().Map.cmp(x.StructType().Map)
-		} else if x.StructType().Map.MapType().Bucket == x {
-			return ssa.CMPgt // bucket maps are least
-		} // If t != t.Map.Bucket, fall through to general case
-
-		fallthrough
-	case TINTER:
-		t1, ti := iterFields(t)
-		x1, xi := iterFields(x)
-		for ; t1 != nil && x1 != nil; t1, x1 = ti.Next(), xi.Next() {
-			if t1.Embedded != x1.Embedded {
-				return cmpForNe(t1.Embedded < x1.Embedded)
-			}
-			if t1.Note != x1.Note {
-				return cmpForNe(t1.Note < x1.Note)
-			}
-			if c := t1.Sym.cmpsym(x1.Sym); c != ssa.CMPeq {
-				return c
-			}
-			if c := t1.Type.cmp(x1.Type); c != ssa.CMPeq {
-				return c
-			}
-		}
-		if t1 != x1 {
-			return cmpForNe(t1 == nil)
-		}
-		return ssa.CMPeq
-
-	case TFUNC:
-		for _, f := range recvsParamsResults {
-			// Loop over fields in structs, ignoring argument names.
-			ta, ia := iterFields(f(t))
-			tb, ib := iterFields(f(x))
-			for ; ta != nil && tb != nil; ta, tb = ia.Next(), ib.Next() {
-				if ta.Isddd != tb.Isddd {
-					return cmpForNe(!ta.Isddd)
-				}
-				if c := ta.Type.cmp(tb.Type); c != ssa.CMPeq {
-					return c
-				}
-			}
-			if ta != tb {
-				return cmpForNe(ta == nil)
-			}
-		}
-		return ssa.CMPeq
-
-	case TARRAY:
-		if t.NumElem() != x.NumElem() {
-			return cmpForNe(t.NumElem() < x.NumElem())
-		}
-
-	case TCHAN:
-		if t.ChanDir() != x.ChanDir() {
-			return cmpForNe(t.ChanDir() < x.ChanDir())
-		}
-
-	default:
-		e := fmt.Sprintf("Do not know how to compare %v with %v", t, x)
-		panic(e)
-	}
-
-	// Common element type comparison for TARRAY, TCHAN, TPTR32, TPTR64, and TSLICE.
-	return t.Elem().cmp(x.Elem())
-}
-
-// IsKind reports whether t is a Type of the specified kind.
-func (t *Type) IsKind(et EType) bool {
-	return t != nil && t.Etype == et
-}
-
-func (t *Type) IsBoolean() bool {
-	return t.Etype == TBOOL
-}
-
-var unsignedEType = [...]EType{
-	TINT8:    TUINT8,
-	TUINT8:   TUINT8,
-	TINT16:   TUINT16,
-	TUINT16:  TUINT16,
-	TINT32:   TUINT32,
-	TUINT32:  TUINT32,
-	TINT64:   TUINT64,
-	TUINT64:  TUINT64,
-	TINT:     TUINT,
-	TUINT:    TUINT,
-	TUINTPTR: TUINTPTR,
-}
-
-// toUnsigned returns the unsigned equivalent of integer type t.
-func (t *Type) toUnsigned() *Type {
-	if !t.IsInteger() {
-		Fatalf("unsignedType(%v)", t)
-	}
-	return Types[unsignedEType[t.Etype]]
-}
-
-func (t *Type) IsInteger() bool {
-	switch t.Etype {
-	case TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TINT, TUINT, TUINTPTR:
-		return true
-	}
-	return false
-}
-
-func (t *Type) IsSigned() bool {
-	switch t.Etype {
-	case TINT8, TINT16, TINT32, TINT64, TINT:
-		return true
-	}
-	return false
-}
-
-func (t *Type) IsFloat() bool {
-	return t.Etype == TFLOAT32 || t.Etype == TFLOAT64
-}
-
-func (t *Type) IsComplex() bool {
-	return t.Etype == TCOMPLEX64 || t.Etype == TCOMPLEX128
-}
-
-// IsPtr reports whether t is a regular Go pointer type.
-// This does not include unsafe.Pointer.
-func (t *Type) IsPtr() bool {
-	return t.Etype == TPTR32 || t.Etype == TPTR64
-}
-
-// IsUnsafePtr reports whether t is an unsafe pointer.
-func (t *Type) IsUnsafePtr() bool {
-	return t.Etype == TUNSAFEPTR
-}
-
-// IsPtrShaped reports whether t is represented by a single machine pointer.
-// In addition to regular Go pointer types, this includes map, channel, and
-// function types and unsafe.Pointer. It does not include array or struct types
-// that consist of a single pointer shaped type.
-// TODO(mdempsky): Should it? See golang.org/issue/15028.
-func (t *Type) IsPtrShaped() bool {
-	return t.Etype == TPTR32 || t.Etype == TPTR64 || t.Etype == TUNSAFEPTR ||
-		t.Etype == TMAP || t.Etype == TCHAN || t.Etype == TFUNC
-}
-
-func (t *Type) IsString() bool {
-	return t.Etype == TSTRING
-}
-
-func (t *Type) IsMap() bool {
-	return t.Etype == TMAP
-}
-
-func (t *Type) IsChan() bool {
-	return t.Etype == TCHAN
-}
-
-func (t *Type) IsSlice() bool {
-	return t.Etype == TSLICE
-}
-
-func (t *Type) IsArray() bool {
-	return t.Etype == TARRAY
-}
-
-func (t *Type) IsStruct() bool {
-	return t.Etype == TSTRUCT
-}
-
-func (t *Type) IsInterface() bool {
-	return t.Etype == TINTER
-}
-
-// IsEmptyInterface reports whether t is an empty interface type.
-func (t *Type) IsEmptyInterface() bool {
-	return t.IsInterface() && t.NumFields() == 0
-}
-
-func (t *Type) ElemType() ssa.Type {
-	// TODO(josharian): If Type ever moves to a shared
-	// internal package, remove this silly wrapper.
-	return t.Elem()
-}
-func (t *Type) PtrTo() ssa.Type {
-	return ptrto(t)
-}
-
-func (t *Type) NumFields() int {
-	return t.Fields().Len()
-}
-func (t *Type) FieldType(i int) ssa.Type {
-	return t.Field(i).Type
-}
-func (t *Type) FieldOff(i int) int64 {
-	return t.Field(i).Offset
-}
-func (t *Type) FieldName(i int) string {
-	return t.Field(i).Sym.Name
-}
-
-func (t *Type) NumElem() int64 {
-	t.wantEtype(TARRAY)
-	at := t.Extra.(*ArrayType)
-	if at.Bound < 0 {
-		Fatalf("NumElem array %v does not have bound yet", t)
-	}
-	return at.Bound
-}
-
-// SetNumElem sets the number of elements in an array type.
-// The only allowed use is on array types created with typDDDArray.
-// For other uses, create a new array with typArray instead.
-func (t *Type) SetNumElem(n int64) {
-	t.wantEtype(TARRAY)
-	at := t.Extra.(*ArrayType)
-	if at.Bound >= 0 {
-		Fatalf("SetNumElem array %v already has bound %d", t, at.Bound)
-	}
-	at.Bound = n
-}
-
-// ChanDir returns the direction of a channel type t.
-// The direction will be one of Crecv, Csend, or Cboth.
-func (t *Type) ChanDir() ChanDir {
-	t.wantEtype(TCHAN)
-	return t.Extra.(*ChanType).Dir
-}
-
-func (t *Type) IsMemory() bool { return false }
-func (t *Type) IsFlags() bool  { return false }
-func (t *Type) IsVoid() bool   { return false }
-func (t *Type) IsTuple() bool  { return false }
-
-// IsUntyped reports whether t is an untyped type.
-func (t *Type) IsUntyped() bool {
-	if t == nil {
-		return false
-	}
-	if t == idealstring || t == idealbool {
-		return true
-	}
-	switch t.Etype {
-	case TNIL, TIDEAL:
-		return true
-	}
-	return false
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/typecheck.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/typecheck.go
deleted file mode 100644
index 3349c7b..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/typecheck.go
+++ /dev/null
@@ -1,3955 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/typecheck.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/typecheck.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"fmt"
-	"math"
-	"strings"
-)
-
-const (
-	Etop      = 1 << iota // evaluated at statement level
-	Erv                   // evaluated in value context
-	Etype                 // evaluated in type context
-	Ecall                 // call-only expressions are ok
-	Efnstruct             // multivalue function returns are ok
-	Easgn                 // assigning to expression
-	Ecomplit              // type in composite literal
-)
-
-// type check the whole tree of an expression.
-// calculates expression types.
-// evaluates compile time constants.
-// marks variables that escape the local frame.
-// rewrites n->op to be more specific in some cases.
-var typecheckdefstack []*Node
-
-// resolve ONONAME to definition, if any.
-func resolve(n *Node) *Node {
-	if n != nil && n.Op == ONONAME && n.Sym != nil {
-		r := n.Sym.Def
-		if r != nil {
-			if r.Op != OIOTA {
-				n = r
-			} else if n.Iota() >= 0 {
-				n = nodintconst(n.Iota())
-			}
-		}
-	}
-
-	return n
-}
-
-func typecheckslice(l []*Node, top int) {
-	for i := range l {
-		l[i] = typecheck(l[i], top)
-	}
-}
-
-var _typekind = []string{
-	TINT:        "int",
-	TUINT:       "uint",
-	TINT8:       "int8",
-	TUINT8:      "uint8",
-	TINT16:      "int16",
-	TUINT16:     "uint16",
-	TINT32:      "int32",
-	TUINT32:     "uint32",
-	TINT64:      "int64",
-	TUINT64:     "uint64",
-	TUINTPTR:    "uintptr",
-	TCOMPLEX64:  "complex64",
-	TCOMPLEX128: "complex128",
-	TFLOAT32:    "float32",
-	TFLOAT64:    "float64",
-	TBOOL:       "bool",
-	TSTRING:     "string",
-	TPTR32:      "pointer",
-	TPTR64:      "pointer",
-	TUNSAFEPTR:  "unsafe.Pointer",
-	TSTRUCT:     "struct",
-	TINTER:      "interface",
-	TCHAN:       "chan",
-	TMAP:        "map",
-	TARRAY:      "array",
-	TSLICE:      "slice",
-	TFUNC:       "func",
-	TNIL:        "nil",
-	TIDEAL:      "untyped number",
-}
-
-func typekind(t *Type) string {
-	if t.IsSlice() {
-		return "slice"
-	}
-	et := t.Etype
-	if int(et) < len(_typekind) {
-		s := _typekind[et]
-		if s != "" {
-			return s
-		}
-	}
-	return fmt.Sprintf("etype=%d", et)
-}
-
-// sprint_depchain prints a dependency chain of nodes into fmt.
-// It is used by typecheck in the case of OLITERAL nodes
-// to print constant definition loops.
-func sprint_depchain(fmt_ *string, stack []*Node, cur *Node, first *Node) {
-	for i := len(stack) - 1; i >= 0; i-- {
-		if n := stack[i]; n.Op == cur.Op {
-			if n != first {
-				sprint_depchain(fmt_, stack[:i], n, first)
-			}
-			*fmt_ += fmt.Sprintf("\n\t%v: %v uses %v", n.Line(), n, cur)
-			return
-		}
-	}
-}
-
-var typecheck_tcstack []*Node
-
-// typecheck type checks node n.
-// The result of typecheck MUST be assigned back to n, e.g.
-// 	n.Left = typecheck(n.Left, top)
-func typecheck(n *Node, top int) *Node {
-	// cannot type check until all the source has been parsed
-	if !typecheckok {
-		Fatalf("early typecheck")
-	}
-
-	if n == nil {
-		return nil
-	}
-
-	lno := setlineno(n)
-
-	// Skip over parens.
-	for n.Op == OPAREN {
-		n = n.Left
-	}
-
-	// Resolve definition of name and value of iota lazily.
-	n = resolve(n)
-
-	// Skip typecheck if already done.
-	// But re-typecheck ONAME/OTYPE/OLITERAL/OPACK node in case context has changed.
-	if n.Typecheck == 1 {
-		switch n.Op {
-		case ONAME, OTYPE, OLITERAL, OPACK:
-			break
-
-		default:
-			lineno = lno
-			return n
-		}
-	}
-
-	if n.Typecheck == 2 {
-		// Typechecking loop. Trying printing a meaningful message,
-		// otherwise a stack trace of typechecking.
-		var fmt_ string
-		switch n.Op {
-		// We can already diagnose variables used as types.
-		case ONAME:
-			if top&(Erv|Etype) == Etype {
-				yyerror("%v is not a type", n)
-			}
-
-		case OLITERAL:
-			if top&(Erv|Etype) == Etype {
-				yyerror("%v is not a type", n)
-				break
-			}
-			sprint_depchain(&fmt_, typecheck_tcstack, n, n)
-			yyerrorl(n.Lineno, "constant definition loop%s", fmt_)
-		}
-
-		if nsavederrors+nerrors == 0 {
-			fmt_ = ""
-			for i := len(typecheck_tcstack) - 1; i >= 0; i-- {
-				x := typecheck_tcstack[i]
-				fmt_ += fmt.Sprintf("\n\t%v %v", x.Line(), x)
-			}
-			yyerror("typechecking loop involving %v%s", n, fmt_)
-		}
-
-		lineno = lno
-		return n
-	}
-
-	n.Typecheck = 2
-
-	typecheck_tcstack = append(typecheck_tcstack, n)
-	n = typecheck1(n, top)
-
-	n.Typecheck = 1
-
-	last := len(typecheck_tcstack) - 1
-	typecheck_tcstack[last] = nil
-	typecheck_tcstack = typecheck_tcstack[:last]
-
-	lineno = lno
-	return n
-}
-
-// does n contain a call or receive operation?
-func callrecv(n *Node) bool {
-	if n == nil {
-		return false
-	}
-
-	switch n.Op {
-	case OCALL,
-		OCALLMETH,
-		OCALLINTER,
-		OCALLFUNC,
-		ORECV,
-		OCAP,
-		OLEN,
-		OCOPY,
-		ONEW,
-		OAPPEND,
-		ODELETE:
-		return true
-	}
-
-	return callrecv(n.Left) || callrecv(n.Right) || callrecvlist(n.Ninit) || callrecvlist(n.Nbody) || callrecvlist(n.List) || callrecvlist(n.Rlist)
-}
-
-func callrecvlist(l Nodes) bool {
-	for _, n := range l.Slice() {
-		if callrecv(n) {
-			return true
-		}
-	}
-	return false
-}
-
-// indexlit implements typechecking of untyped values as
-// array/slice indexes. It is equivalent to defaultlit
-// except for constants of numerical kind, which are acceptable
-// whenever they can be represented by a value of type int.
-// The result of indexlit MUST be assigned back to n, e.g.
-// 	n.Left = indexlit(n.Left)
-func indexlit(n *Node) *Node {
-	if n == nil || !n.Type.IsUntyped() {
-		return n
-	}
-	switch consttype(n) {
-	case CTINT, CTRUNE, CTFLT, CTCPLX:
-		n = defaultlit(n, Types[TINT])
-	}
-
-	n = defaultlit(n, nil)
-	return n
-}
-
-// The result of typecheck1 MUST be assigned back to n, e.g.
-// 	n.Left = typecheck1(n.Left, top)
-func typecheck1(n *Node, top int) *Node {
-	switch n.Op {
-	case OXDOT, ODOT, ODOTPTR, ODOTMETH, ODOTINTER:
-		// n.Sym is a field/method name, not a variable.
-	default:
-		if n.Sym != nil {
-			if n.Op == ONAME && n.Etype != 0 && top&Ecall == 0 {
-				yyerror("use of builtin %v not in function call", n.Sym)
-				n.Type = nil
-				return n
-			}
-
-			typecheckdef(n)
-			if n.Op == ONONAME {
-				n.Type = nil
-				return n
-			}
-		}
-	}
-
-	ok := 0
-OpSwitch:
-	switch n.Op {
-	// until typecheck is complete, do nothing.
-	default:
-		Dump("typecheck", n)
-
-		Fatalf("typecheck %v", n.Op)
-
-	// names
-	case OLITERAL:
-		ok |= Erv
-
-		if n.Type == nil && n.Val().Ctype() == CTSTR {
-			n.Type = idealstring
-		}
-		break OpSwitch
-
-	case ONONAME:
-		ok |= Erv
-		break OpSwitch
-
-	case ONAME:
-		if n.Name.Decldepth == 0 {
-			n.Name.Decldepth = decldepth
-		}
-		if n.Etype != 0 {
-			ok |= Ecall
-			break OpSwitch
-		}
-
-		if top&Easgn == 0 {
-			// not a write to the variable
-			if isblank(n) {
-				yyerror("cannot use _ as value")
-				n.Type = nil
-				return n
-			}
-
-			n.Used = true
-		}
-
-		ok |= Erv
-		break OpSwitch
-
-	case OPACK:
-		yyerror("use of package %v without selector", n.Sym)
-		n.Type = nil
-		return n
-
-	case ODDD:
-		break
-
-	// types (OIND is with exprs)
-	case OTYPE:
-		ok |= Etype
-
-		if n.Type == nil {
-			return n
-		}
-
-	case OTARRAY:
-		ok |= Etype
-		r := typecheck(n.Right, Etype)
-		if r.Type == nil {
-			n.Type = nil
-			return n
-		}
-
-		var t *Type
-		if n.Left == nil {
-			t = typSlice(r.Type)
-		} else if n.Left.Op == ODDD {
-			if top&Ecomplit == 0 {
-				if !n.Diag {
-					n.Diag = true
-					yyerror("use of [...] array outside of array literal")
-				}
-				n.Type = nil
-				return n
-			}
-			t = typDDDArray(r.Type)
-		} else {
-			n.Left = indexlit(typecheck(n.Left, Erv))
-			l := n.Left
-			if consttype(l) != CTINT {
-				if l.Type != nil && l.Type.IsInteger() && l.Op != OLITERAL {
-					yyerror("non-constant array bound %v", l)
-				} else {
-					yyerror("invalid array bound %v", l)
-				}
-				n.Type = nil
-				return n
-			}
-
-			v := l.Val()
-			if doesoverflow(v, Types[TINT]) {
-				yyerror("array bound is too large")
-				n.Type = nil
-				return n
-			}
-
-			bound := v.U.(*Mpint).Int64()
-			if bound < 0 {
-				yyerror("array bound must be non-negative")
-				n.Type = nil
-				return n
-			}
-			t = typArray(r.Type, bound)
-		}
-
-		n.Op = OTYPE
-		n.Type = t
-		n.Left = nil
-		n.Right = nil
-		if !t.isDDDArray() {
-			checkwidth(t)
-		}
-
-	case OTMAP:
-		ok |= Etype
-		n.Left = typecheck(n.Left, Etype)
-		n.Right = typecheck(n.Right, Etype)
-		l := n.Left
-		r := n.Right
-		if l.Type == nil || r.Type == nil {
-			n.Type = nil
-			return n
-		}
-		if l.Type.NotInHeap {
-			yyerror("go:notinheap map key not allowed")
-		}
-		if r.Type.NotInHeap {
-			yyerror("go:notinheap map value not allowed")
-		}
-		n.Op = OTYPE
-		n.Type = typMap(l.Type, r.Type)
-
-		// map key validation
-		alg, bad := algtype1(l.Type)
-		if alg == ANOEQ {
-			if bad.Etype == TFORW {
-				// queue check for map until all the types are done settling.
-				mapqueue = append(mapqueue, mapqueueval{l, n.Lineno})
-			} else if bad.Etype != TANY {
-				// no need to queue, key is already bad
-				yyerror("invalid map key type %v", l.Type)
-			}
-		}
-		n.Left = nil
-		n.Right = nil
-
-	case OTCHAN:
-		ok |= Etype
-		n.Left = typecheck(n.Left, Etype)
-		l := n.Left
-		if l.Type == nil {
-			n.Type = nil
-			return n
-		}
-		if l.Type.NotInHeap {
-			yyerror("chan of go:notinheap type not allowed")
-		}
-		t := typChan(l.Type, ChanDir(n.Etype)) // TODO(marvin): Fix Node.EType type union.
-		n.Op = OTYPE
-		n.Type = t
-		n.Left = nil
-		n.Etype = 0
-
-	case OTSTRUCT:
-		ok |= Etype
-		n.Op = OTYPE
-		n.Type = tostruct(n.List.Slice())
-		if n.Type == nil || n.Type.Broke {
-			n.Type = nil
-			return n
-		}
-		n.List.Set(nil)
-
-	case OTINTER:
-		ok |= Etype
-		n.Op = OTYPE
-		n.Type = tointerface(n.List.Slice())
-		if n.Type == nil {
-			return n
-		}
-
-	case OTFUNC:
-		ok |= Etype
-		n.Op = OTYPE
-		n.Type = functype(n.Left, n.List.Slice(), n.Rlist.Slice())
-		if n.Type == nil {
-			return n
-		}
-		n.Left = nil
-		n.List.Set(nil)
-		n.Rlist.Set(nil)
-
-	// type or expr
-	case OIND:
-		n.Left = typecheck(n.Left, Erv|Etype|top&Ecomplit)
-		l := n.Left
-		t := l.Type
-		if t == nil {
-			n.Type = nil
-			return n
-		}
-		if l.Op == OTYPE {
-			ok |= Etype
-			n.Op = OTYPE
-			n.Type = ptrto(l.Type)
-			n.Left = nil
-			break OpSwitch
-		}
-
-		if !t.IsPtr() {
-			if top&(Erv|Etop) != 0 {
-				yyerror("invalid indirect of %L", n.Left)
-				n.Type = nil
-				return n
-			}
-
-			break OpSwitch
-		}
-
-		ok |= Erv
-		n.Type = t.Elem()
-		break OpSwitch
-
-	// arithmetic exprs
-	case OASOP,
-		OADD,
-		OAND,
-		OANDAND,
-		OANDNOT,
-		ODIV,
-		OEQ,
-		OGE,
-		OGT,
-		OHMUL,
-		OLE,
-		OLT,
-		OLSH,
-		ORSH,
-		OMOD,
-		OMUL,
-		ONE,
-		OOR,
-		OOROR,
-		OSUB,
-		OXOR:
-		var l *Node
-		var op Op
-		var r *Node
-		if n.Op == OASOP {
-			ok |= Etop
-			n.Left = typecheck(n.Left, Erv)
-			n.Right = typecheck(n.Right, Erv)
-			l = n.Left
-			r = n.Right
-			checkassign(n, n.Left)
-			if l.Type == nil || r.Type == nil {
-				n.Type = nil
-				return n
-			}
-			if n.Implicit && !okforarith[l.Type.Etype] {
-				yyerror("invalid operation: %v (non-numeric type %v)", n, l.Type)
-				n.Type = nil
-				return n
-			}
-			// TODO(marvin): Fix Node.EType type union.
-			op = Op(n.Etype)
-		} else {
-			ok |= Erv
-			n.Left = typecheck(n.Left, Erv)
-			n.Right = typecheck(n.Right, Erv)
-			l = n.Left
-			r = n.Right
-			if l.Type == nil || r.Type == nil {
-				n.Type = nil
-				return n
-			}
-			op = n.Op
-		}
-		if op == OLSH || op == ORSH {
-			r = defaultlit(r, Types[TUINT])
-			n.Right = r
-			t := r.Type
-			if !t.IsInteger() || t.IsSigned() {
-				yyerror("invalid operation: %v (shift count type %v, must be unsigned integer)", n, r.Type)
-				n.Type = nil
-				return n
-			}
-
-			t = l.Type
-			if t != nil && t.Etype != TIDEAL && !t.IsInteger() {
-				yyerror("invalid operation: %v (shift of type %v)", n, t)
-				n.Type = nil
-				return n
-			}
-
-			// no defaultlit for left
-			// the outer context gives the type
-			n.Type = l.Type
-
-			break OpSwitch
-		}
-
-		// ideal mixed with non-ideal
-		l, r = defaultlit2(l, r, false)
-
-		n.Left = l
-		n.Right = r
-		if l.Type == nil || r.Type == nil {
-			n.Type = nil
-			return n
-		}
-		t := l.Type
-		if t.Etype == TIDEAL {
-			t = r.Type
-		}
-		et := t.Etype
-		if et == TIDEAL {
-			et = TINT
-		}
-		var aop Op = OXXX
-		if iscmp[n.Op] && t.Etype != TIDEAL && !eqtype(l.Type, r.Type) {
-			// comparison is okay as long as one side is
-			// assignable to the other.  convert so they have
-			// the same type.
-			//
-			// the only conversion that isn't a no-op is concrete == interface.
-			// in that case, check comparability of the concrete type.
-			// The conversion allocates, so only do it if the concrete type is huge.
-			if r.Type.Etype != TBLANK {
-				aop = assignop(l.Type, r.Type, nil)
-				if aop != 0 {
-					if r.Type.IsInterface() && !l.Type.IsInterface() && !l.Type.IsComparable() {
-						yyerror("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type))
-						n.Type = nil
-						return n
-					}
-
-					dowidth(l.Type)
-					if r.Type.IsInterface() == l.Type.IsInterface() || l.Type.Width >= 1<<16 {
-						l = nod(aop, l, nil)
-						l.Type = r.Type
-						l.Typecheck = 1
-						n.Left = l
-					}
-
-					t = r.Type
-					goto converted
-				}
-			}
-
-			if l.Type.Etype != TBLANK {
-				aop = assignop(r.Type, l.Type, nil)
-				if aop != 0 {
-					if l.Type.IsInterface() && !r.Type.IsInterface() && !r.Type.IsComparable() {
-						yyerror("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type))
-						n.Type = nil
-						return n
-					}
-
-					dowidth(r.Type)
-					if r.Type.IsInterface() == l.Type.IsInterface() || r.Type.Width >= 1<<16 {
-						r = nod(aop, r, nil)
-						r.Type = l.Type
-						r.Typecheck = 1
-						n.Right = r
-					}
-
-					t = l.Type
-				}
-			}
-
-		converted:
-			et = t.Etype
-		}
-
-		if t.Etype != TIDEAL && !eqtype(l.Type, r.Type) {
-			l, r = defaultlit2(l, r, true)
-			if r.Type.IsInterface() == l.Type.IsInterface() || aop == 0 {
-				yyerror("invalid operation: %v (mismatched types %v and %v)", n, l.Type, r.Type)
-				n.Type = nil
-				return n
-			}
-		}
-
-		if !okfor[op][et] {
-			yyerror("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(t))
-			n.Type = nil
-			return n
-		}
-
-		// okfor allows any array == array, map == map, func == func.
-		// restrict to slice/map/func == nil and nil == slice/map/func.
-		if l.Type.IsArray() && !l.Type.IsComparable() {
-			yyerror("invalid operation: %v (%v cannot be compared)", n, l.Type)
-			n.Type = nil
-			return n
-		}
-
-		if l.Type.IsSlice() && !isnil(l) && !isnil(r) {
-			yyerror("invalid operation: %v (slice can only be compared to nil)", n)
-			n.Type = nil
-			return n
-		}
-
-		if l.Type.IsMap() && !isnil(l) && !isnil(r) {
-			yyerror("invalid operation: %v (map can only be compared to nil)", n)
-			n.Type = nil
-			return n
-		}
-
-		if l.Type.Etype == TFUNC && !isnil(l) && !isnil(r) {
-			yyerror("invalid operation: %v (func can only be compared to nil)", n)
-			n.Type = nil
-			return n
-		}
-
-		if l.Type.IsStruct() {
-			if f := l.Type.IncomparableField(); f != nil {
-				yyerror("invalid operation: %v (struct containing %v cannot be compared)", n, f.Type)
-				n.Type = nil
-				return n
-			}
-		}
-
-		t = l.Type
-		if iscmp[n.Op] {
-			evconst(n)
-			t = idealbool
-			if n.Op != OLITERAL {
-				l, r = defaultlit2(l, r, true)
-				n.Left = l
-				n.Right = r
-			}
-		}
-
-		if et == TSTRING {
-			if iscmp[n.Op] {
-				// TODO(marvin): Fix Node.EType type union.
-				n.Etype = EType(n.Op)
-				n.Op = OCMPSTR
-			} else if n.Op == OADD {
-				// create OADDSTR node with list of strings in x + y + z + (w + v) + ...
-				n.Op = OADDSTR
-
-				if l.Op == OADDSTR {
-					n.List.Set(l.List.Slice())
-				} else {
-					n.List.Set1(l)
-				}
-				if r.Op == OADDSTR {
-					n.List.AppendNodes(&r.List)
-				} else {
-					n.List.Append(r)
-				}
-				n.Left = nil
-				n.Right = nil
-			}
-		}
-
-		if et == TINTER {
-			if l.Op == OLITERAL && l.Val().Ctype() == CTNIL {
-				// swap for back end
-				n.Left = r
-
-				n.Right = l
-			} else if r.Op == OLITERAL && r.Val().Ctype() == CTNIL {
-			} else // leave alone for back end
-			if r.Type.IsInterface() == l.Type.IsInterface() {
-				// TODO(marvin): Fix Node.EType type union.
-				n.Etype = EType(n.Op)
-				n.Op = OCMPIFACE
-			}
-		}
-
-		if (op == ODIV || op == OMOD) && Isconst(r, CTINT) {
-			if r.Val().U.(*Mpint).CmpInt64(0) == 0 {
-				yyerror("division by zero")
-				n.Type = nil
-				return n
-			}
-		}
-
-		n.Type = t
-		break OpSwitch
-
-	case OCOM, OMINUS, ONOT, OPLUS:
-		ok |= Erv
-		n.Left = typecheck(n.Left, Erv)
-		l := n.Left
-		t := l.Type
-		if t == nil {
-			n.Type = nil
-			return n
-		}
-		if !okfor[n.Op][t.Etype] {
-			yyerror("invalid operation: %v %v", n.Op, t)
-			n.Type = nil
-			return n
-		}
-
-		n.Type = t
-		break OpSwitch
-
-	// exprs
-	case OADDR:
-		ok |= Erv
-
-		n.Left = typecheck(n.Left, Erv)
-		if n.Left.Type == nil {
-			n.Type = nil
-			return n
-		}
-		checklvalue(n.Left, "take the address of")
-		r := outervalue(n.Left)
-		var l *Node
-		for l = n.Left; l != r; l = l.Left {
-			l.Addrtaken = true
-			if l.isClosureVar() {
-				l.Name.Defn.Addrtaken = true
-			}
-		}
-
-		if l.Orig != l && l.Op == ONAME {
-			Fatalf("found non-orig name node %v", l)
-		}
-		l.Addrtaken = true
-		if l.isClosureVar() {
-			l.Name.Defn.Addrtaken = true
-		}
-		n.Left = defaultlit(n.Left, nil)
-		l = n.Left
-		t := l.Type
-		if t == nil {
-			n.Type = nil
-			return n
-		}
-		n.Type = ptrto(t)
-		break OpSwitch
-
-	case OCOMPLIT:
-		ok |= Erv
-		n = typecheckcomplit(n)
-		if n.Type == nil {
-			return n
-		}
-		break OpSwitch
-
-	case OXDOT, ODOT:
-		if n.Op == OXDOT {
-			n = adddot(n)
-			n.Op = ODOT
-			if n.Left == nil {
-				n.Type = nil
-				return n
-			}
-		}
-
-		n.Left = typecheck(n.Left, Erv|Etype)
-
-		n.Left = defaultlit(n.Left, nil)
-
-		t := n.Left.Type
-		if t == nil {
-			adderrorname(n)
-			n.Type = nil
-			return n
-		}
-
-		s := n.Sym
-
-		if n.Left.Op == OTYPE {
-			if !looktypedot(n, t, 0) {
-				if looktypedot(n, t, 1) {
-					yyerror("%v undefined (cannot refer to unexported method %v)", n, n.Sym)
-				} else {
-					yyerror("%v undefined (type %v has no method %v)", n, t, n.Sym)
-				}
-				n.Type = nil
-				return n
-			}
-
-			if n.Type.Etype != TFUNC || !n.IsMethod() {
-				yyerror("type %v has no method %S", n.Left.Type, n.Sym)
-				n.Type = nil
-				return n
-			}
-
-			n.Op = ONAME
-			if n.Name == nil {
-				n.Name = new(Name)
-			}
-			n.Right = newname(n.Sym)
-			n.Type = methodfunc(n.Type, n.Left.Type)
-			n.Xoffset = 0
-			n.Class = PFUNC
-			ok = Erv
-			break OpSwitch
-		}
-
-		if t.IsPtr() && !t.Elem().IsInterface() {
-			t = t.Elem()
-			if t == nil {
-				n.Type = nil
-				return n
-			}
-			n.Op = ODOTPTR
-			checkwidth(t)
-		}
-
-		if isblanksym(n.Sym) {
-			yyerror("cannot refer to blank field or method")
-			n.Type = nil
-			return n
-		}
-
-		if lookdot(n, t, 0) == nil {
-			// Legitimate field or method lookup failed, try to explain the error
-			switch {
-			case t.IsEmptyInterface():
-				yyerror("%v undefined (type %v is interface with no methods)", n, n.Left.Type)
-
-			case t.IsPtr() && t.Elem().IsInterface():
-				// Pointer to interface is almost always a mistake.
-				yyerror("%v undefined (type %v is pointer to interface, not interface)", n, n.Left.Type)
-
-			case lookdot(n, t, 1) != nil:
-				// Field or method matches by name, but it is not exported.
-				yyerror("%v undefined (cannot refer to unexported field or method %v)", n, n.Sym)
-
-			default:
-				if mt := lookdot(n, t, 2); mt != nil { // Case-insensitive lookup.
-					yyerror("%v undefined (type %v has no field or method %v, but does have %v)", n, n.Left.Type, n.Sym, mt.Sym)
-				} else {
-					yyerror("%v undefined (type %v has no field or method %v)", n, n.Left.Type, n.Sym)
-				}
-			}
-			n.Type = nil
-			return n
-		}
-
-		switch n.Op {
-		case ODOTINTER, ODOTMETH:
-			if top&Ecall != 0 {
-				ok |= Ecall
-			} else {
-				typecheckpartialcall(n, s)
-				ok |= Erv
-			}
-
-		default:
-			ok |= Erv
-		}
-
-		break OpSwitch
-
-	case ODOTTYPE:
-		ok |= Erv
-		n.Left = typecheck(n.Left, Erv)
-		n.Left = defaultlit(n.Left, nil)
-		l := n.Left
-		t := l.Type
-		if t == nil {
-			n.Type = nil
-			return n
-		}
-		if !t.IsInterface() {
-			yyerror("invalid type assertion: %v (non-interface type %v on left)", n, t)
-			n.Type = nil
-			return n
-		}
-
-		if n.Right != nil {
-			n.Right = typecheck(n.Right, Etype)
-			n.Type = n.Right.Type
-			n.Right = nil
-			if n.Type == nil {
-				return n
-			}
-		}
-
-		if n.Type != nil && !n.Type.IsInterface() {
-			var missing, have *Field
-			var ptr int
-			if !implements(n.Type, t, &missing, &have, &ptr) {
-				if have != nil && have.Sym == missing.Sym {
-					yyerror("impossible type assertion:\n\t%v does not implement %v (wrong type for %v method)\n"+
-						"\t\thave %v%0S\n\t\twant %v%0S", n.Type, t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
-				} else if ptr != 0 {
-					yyerror("impossible type assertion:\n\t%v does not implement %v (%v method has pointer receiver)", n.Type, t, missing.Sym)
-				} else if have != nil {
-					yyerror("impossible type assertion:\n\t%v does not implement %v (missing %v method)\n"+
-						"\t\thave %v%0S\n\t\twant %v%0S", n.Type, t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
-				} else {
-					yyerror("impossible type assertion:\n\t%v does not implement %v (missing %v method)", n.Type, t, missing.Sym)
-				}
-				n.Type = nil
-				return n
-			}
-		}
-
-		break OpSwitch
-
-	case OINDEX:
-		ok |= Erv
-		n.Left = typecheck(n.Left, Erv)
-		n.Left = defaultlit(n.Left, nil)
-		n.Left = implicitstar(n.Left)
-		l := n.Left
-		n.Right = typecheck(n.Right, Erv)
-		r := n.Right
-		t := l.Type
-		if t == nil || r.Type == nil {
-			n.Type = nil
-			return n
-		}
-		switch t.Etype {
-		default:
-			yyerror("invalid operation: %v (type %v does not support indexing)", n, t)
-			n.Type = nil
-			return n
-
-		case TSTRING, TARRAY, TSLICE:
-			n.Right = indexlit(n.Right)
-			if t.IsString() {
-				n.Type = bytetype
-			} else {
-				n.Type = t.Elem()
-			}
-			why := "string"
-			if t.IsArray() {
-				why = "array"
-			} else if t.IsSlice() {
-				why = "slice"
-			}
-
-			if n.Right.Type != nil && !n.Right.Type.IsInteger() {
-				yyerror("non-integer %s index %v", why, n.Right)
-				break
-			}
-
-			if !n.Bounded && Isconst(n.Right, CTINT) {
-				x := n.Right.Int64()
-				if x < 0 {
-					yyerror("invalid %s index %v (index must be non-negative)", why, n.Right)
-				} else if t.IsArray() && x >= t.NumElem() {
-					yyerror("invalid array index %v (out of bounds for %d-element array)", n.Right, t.NumElem())
-				} else if Isconst(n.Left, CTSTR) && x >= int64(len(n.Left.Val().U.(string))) {
-					yyerror("invalid string index %v (out of bounds for %d-byte string)", n.Right, len(n.Left.Val().U.(string)))
-				} else if n.Right.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 {
-					yyerror("invalid %s index %v (index too large)", why, n.Right)
-				}
-			}
-
-		case TMAP:
-			n.Etype = 0
-			n.Right = defaultlit(n.Right, t.Key())
-			if n.Right.Type != nil {
-				n.Right = assignconv(n.Right, t.Key(), "map index")
-			}
-			n.Type = t.Val()
-			n.Op = OINDEXMAP
-		}
-
-		break OpSwitch
-
-	case ORECV:
-		ok |= Etop | Erv
-		n.Left = typecheck(n.Left, Erv)
-		n.Left = defaultlit(n.Left, nil)
-		l := n.Left
-		t := l.Type
-		if t == nil {
-			n.Type = nil
-			return n
-		}
-		if !t.IsChan() {
-			yyerror("invalid operation: %v (receive from non-chan type %v)", n, t)
-			n.Type = nil
-			return n
-		}
-
-		if !t.ChanDir().CanRecv() {
-			yyerror("invalid operation: %v (receive from send-only type %v)", n, t)
-			n.Type = nil
-			return n
-		}
-
-		n.Type = t.Elem()
-		break OpSwitch
-
-	case OSEND:
-		ok |= Etop
-		n.Left = typecheck(n.Left, Erv)
-		l := n.Left
-		n.Right = typecheck(n.Right, Erv)
-		n.Left = defaultlit(n.Left, nil)
-		l = n.Left
-		t := l.Type
-		if t == nil {
-			n.Type = nil
-			return n
-		}
-		if !t.IsChan() {
-			yyerror("invalid operation: %v (send to non-chan type %v)", n, t)
-			n.Type = nil
-			return n
-		}
-
-		if !t.ChanDir().CanSend() {
-			yyerror("invalid operation: %v (send to receive-only type %v)", n, t)
-			n.Type = nil
-			return n
-		}
-
-		n.Right = defaultlit(n.Right, t.Elem())
-		r := n.Right
-		if r.Type == nil {
-			n.Type = nil
-			return n
-		}
-		n.Right = assignconv(r, l.Type.Elem(), "send")
-
-		// TODO: more aggressive
-		n.Etype = 0
-
-		n.Type = nil
-		break OpSwitch
-
-	case OSLICE, OSLICE3:
-		ok |= Erv
-		n.Left = typecheck(n.Left, top)
-		low, high, max := n.SliceBounds()
-		hasmax := n.Op.IsSlice3()
-		low = typecheck(low, Erv)
-		high = typecheck(high, Erv)
-		max = typecheck(max, Erv)
-		n.Left = defaultlit(n.Left, nil)
-		low = indexlit(low)
-		high = indexlit(high)
-		max = indexlit(max)
-		n.SetSliceBounds(low, high, max)
-		l := n.Left
-		if l.Type.IsArray() {
-			if !islvalue(n.Left) {
-				yyerror("invalid operation %v (slice of unaddressable value)", n)
-				n.Type = nil
-				return n
-			}
-
-			n.Left = nod(OADDR, n.Left, nil)
-			n.Left.Implicit = true
-			n.Left = typecheck(n.Left, Erv)
-			l = n.Left
-		}
-
-		t := l.Type
-		if t == nil {
-			n.Type = nil
-			return n
-		}
-		var tp *Type
-		if t.IsString() {
-			if hasmax {
-				yyerror("invalid operation %v (3-index slice of string)", n)
-				n.Type = nil
-				return n
-			}
-			n.Type = t
-			n.Op = OSLICESTR
-		} else if t.IsPtr() && t.Elem().IsArray() {
-			tp = t.Elem()
-			n.Type = typSlice(tp.Elem())
-			dowidth(n.Type)
-			if hasmax {
-				n.Op = OSLICE3ARR
-			} else {
-				n.Op = OSLICEARR
-			}
-		} else if t.IsSlice() {
-			n.Type = t
-		} else {
-			yyerror("cannot slice %v (type %v)", l, t)
-			n.Type = nil
-			return n
-		}
-
-		if low != nil && !checksliceindex(l, low, tp) {
-			n.Type = nil
-			return n
-		}
-		if high != nil && !checksliceindex(l, high, tp) {
-			n.Type = nil
-			return n
-		}
-		if max != nil && !checksliceindex(l, max, tp) {
-			n.Type = nil
-			return n
-		}
-		if !checksliceconst(low, high) || !checksliceconst(low, max) || !checksliceconst(high, max) {
-			n.Type = nil
-			return n
-		}
-		break OpSwitch
-
-	// call and call like
-	case OCALL:
-		n.Left = typecheck(n.Left, Erv|Etype|Ecall)
-		if n.Left.Diag {
-			n.Diag = true
-		}
-
-		l := n.Left
-
-		if l.Op == ONAME && l.Etype != 0 {
-			// TODO(marvin): Fix Node.EType type union.
-			if n.Isddd && Op(l.Etype) != OAPPEND {
-				yyerror("invalid use of ... with builtin %v", l)
-			}
-
-			// builtin: OLEN, OCAP, etc.
-			// TODO(marvin): Fix Node.EType type union.
-			n.Op = Op(l.Etype)
-			n.Left = n.Right
-			n.Right = nil
-			n = typecheck1(n, top)
-			return n
-		}
-
-		n.Left = defaultlit(n.Left, nil)
-		l = n.Left
-		if l.Op == OTYPE {
-			if n.Isddd || l.Type.isDDDArray() {
-				if !l.Type.Broke {
-					yyerror("invalid use of ... in type conversion to %v", l.Type)
-				}
-				n.Diag = true
-			}
-
-			// pick off before type-checking arguments
-			ok |= Erv
-
-			// turn CALL(type, arg) into CONV(arg) w/ type
-			n.Left = nil
-
-			n.Op = OCONV
-			n.Type = l.Type
-			if !onearg(n, "conversion to %v", l.Type) {
-				n.Type = nil
-				return n
-			}
-			n = typecheck1(n, top)
-			return n
-		}
-
-		if n.List.Len() == 1 && !n.Isddd {
-			n.List.SetIndex(0, typecheck(n.List.Index(0), Erv|Efnstruct))
-		} else {
-			typecheckslice(n.List.Slice(), Erv)
-		}
-		t := l.Type
-		if t == nil {
-			n.Type = nil
-			return n
-		}
-		checkwidth(t)
-
-		switch l.Op {
-		case ODOTINTER:
-			n.Op = OCALLINTER
-
-		case ODOTMETH:
-			n.Op = OCALLMETH
-
-			// typecheckaste was used here but there wasn't enough
-			// information further down the call chain to know if we
-			// were testing a method receiver for unexported fields.
-			// It isn't necessary, so just do a sanity check.
-			tp := t.Recv().Type
-
-			if l.Left == nil || !eqtype(l.Left.Type, tp) {
-				Fatalf("method receiver")
-			}
-
-		default:
-			n.Op = OCALLFUNC
-			if t.Etype != TFUNC {
-				yyerror("cannot call non-function %v (type %v)", l, t)
-				n.Type = nil
-				return n
-			}
-		}
-
-		typecheckaste(OCALL, n.Left, n.Isddd, t.Params(), n.List, func() string { return fmt.Sprintf("argument to %v", n.Left) })
-		ok |= Etop
-		if t.Results().NumFields() == 0 {
-			break OpSwitch
-		}
-		ok |= Erv
-		if t.Results().NumFields() == 1 {
-			n.Type = l.Type.Results().Field(0).Type
-
-			if n.Op == OCALLFUNC && n.Left.Op == ONAME && (compiling_runtime || n.Left.Sym.Pkg == Runtimepkg) && n.Left.Sym.Name == "getg" {
-				// Emit code for runtime.getg() directly instead of calling function.
-				// Most such rewrites (for example the similar one for math.Sqrt) should be done in walk,
-				// so that the ordering pass can make sure to preserve the semantics of the original code
-				// (in particular, the exact time of the function call) by introducing temporaries.
-				// In this case, we know getg() always returns the same result within a given function
-				// and we want to avoid the temporaries, so we do the rewrite earlier than is typical.
-				n.Op = OGETG
-			}
-
-			break OpSwitch
-		}
-
-		// multiple return
-		if top&(Efnstruct|Etop) == 0 {
-			yyerror("multiple-value %v() in single-value context", l)
-			break OpSwitch
-		}
-
-		n.Type = l.Type.Results()
-
-		break OpSwitch
-
-	case OALIGNOF, OOFFSETOF, OSIZEOF:
-		ok |= Erv
-		if !onearg(n, "%v", n.Op) {
-			n.Type = nil
-			return n
-		}
-
-		// any side effects disappear; ignore init
-		var r Node
-		Nodconst(&r, Types[TUINTPTR], evalunsafe(n))
-		r.Orig = n
-		n = &r
-
-		break OpSwitch
-
-	case OCAP, OLEN, OREAL, OIMAG:
-		ok |= Erv
-		if !onearg(n, "%v", n.Op) {
-			n.Type = nil
-			return n
-		}
-		n.Left = typecheck(n.Left, Erv)
-		n.Left = defaultlit(n.Left, nil)
-		n.Left = implicitstar(n.Left)
-		l := n.Left
-		t := l.Type
-		if t == nil {
-			n.Type = nil
-			return n
-		}
-		switch n.Op {
-		case OCAP:
-			if !okforcap[t.Etype] {
-				goto badcall1
-			}
-
-		case OLEN:
-			if !okforlen[t.Etype] {
-				goto badcall1
-			}
-
-		case OREAL, OIMAG:
-			if !t.IsComplex() {
-				goto badcall1
-			}
-			if Isconst(l, CTCPLX) {
-				r := n
-				if n.Op == OREAL {
-					n = nodfltconst(&l.Val().U.(*Mpcplx).Real)
-				} else {
-					n = nodfltconst(&l.Val().U.(*Mpcplx).Imag)
-				}
-				n.Orig = r
-			}
-
-			n.Type = Types[cplxsubtype(t.Etype)]
-			break OpSwitch
-		}
-
-		// might be constant
-		switch t.Etype {
-		case TSTRING:
-			if Isconst(l, CTSTR) {
-				var r Node
-				Nodconst(&r, Types[TINT], int64(len(l.Val().U.(string))))
-				r.Orig = n
-				n = &r
-			}
-
-		case TARRAY:
-			if callrecv(l) { // has call or receive
-				break
-			}
-			var r Node
-			Nodconst(&r, Types[TINT], t.NumElem())
-			r.Orig = n
-			n = &r
-		}
-
-		n.Type = Types[TINT]
-		break OpSwitch
-
-	badcall1:
-		yyerror("invalid argument %L for %v", n.Left, n.Op)
-		n.Type = nil
-		return n
-
-	case OCOMPLEX:
-		ok |= Erv
-		var r *Node
-		var l *Node
-		if n.List.Len() == 1 {
-			typecheckslice(n.List.Slice(), Efnstruct)
-			if n.List.First().Op != OCALLFUNC && n.List.First().Op != OCALLMETH {
-				yyerror("invalid operation: complex expects two arguments")
-				n.Type = nil
-				return n
-			}
-
-			t := n.List.First().Left.Type
-			if !t.IsKind(TFUNC) {
-				// Bail. This error will be reported elsewhere.
-				return n
-			}
-			if t.Results().NumFields() != 2 {
-				yyerror("invalid operation: complex expects two arguments, %v returns %d results", n.List.First(), t.Results().NumFields())
-				n.Type = nil
-				return n
-			}
-
-			t = n.List.First().Type
-			l = t.Field(0).Nname
-			r = t.Field(1).Nname
-		} else {
-			if !twoarg(n) {
-				n.Type = nil
-				return n
-			}
-			n.Left = typecheck(n.Left, Erv)
-			n.Right = typecheck(n.Right, Erv)
-			l = n.Left
-			r = n.Right
-			if l.Type == nil || r.Type == nil {
-				n.Type = nil
-				return n
-			}
-			l, r = defaultlit2(l, r, false)
-			if l.Type == nil || r.Type == nil {
-				n.Type = nil
-				return n
-			}
-			n.Left = l
-			n.Right = r
-		}
-
-		if !eqtype(l.Type, r.Type) {
-			yyerror("invalid operation: %v (mismatched types %v and %v)", n, l.Type, r.Type)
-			n.Type = nil
-			return n
-		}
-
-		var t *Type
-		switch l.Type.Etype {
-		default:
-			yyerror("invalid operation: %v (arguments have type %v, expected floating-point)", n, l.Type)
-			n.Type = nil
-			return n
-
-		case TIDEAL:
-			t = Types[TIDEAL]
-
-		case TFLOAT32:
-			t = Types[TCOMPLEX64]
-
-		case TFLOAT64:
-			t = Types[TCOMPLEX128]
-		}
-
-		if l.Op == OLITERAL && r.Op == OLITERAL {
-			// make it a complex literal
-			r = nodcplxlit(l.Val(), r.Val())
-
-			r.Orig = n
-			n = r
-		}
-
-		n.Type = t
-		break OpSwitch
-
-	case OCLOSE:
-		if !onearg(n, "%v", n.Op) {
-			n.Type = nil
-			return n
-		}
-		n.Left = typecheck(n.Left, Erv)
-		n.Left = defaultlit(n.Left, nil)
-		l := n.Left
-		t := l.Type
-		if t == nil {
-			n.Type = nil
-			return n
-		}
-		if !t.IsChan() {
-			yyerror("invalid operation: %v (non-chan type %v)", n, t)
-			n.Type = nil
-			return n
-		}
-
-		if !t.ChanDir().CanSend() {
-			yyerror("invalid operation: %v (cannot close receive-only channel)", n)
-			n.Type = nil
-			return n
-		}
-
-		ok |= Etop
-		break OpSwitch
-
-	case ODELETE:
-		args := n.List
-		if args.Len() == 0 {
-			yyerror("missing arguments to delete")
-			n.Type = nil
-			return n
-		}
-
-		if args.Len() == 1 {
-			yyerror("missing second (key) argument to delete")
-			n.Type = nil
-			return n
-		}
-
-		if args.Len() != 2 {
-			yyerror("too many arguments to delete")
-			n.Type = nil
-			return n
-		}
-
-		ok |= Etop
-		typecheckslice(args.Slice(), Erv)
-		l := args.First()
-		r := args.Second()
-		if l.Type != nil && !l.Type.IsMap() {
-			yyerror("first argument to delete must be map; have %L", l.Type)
-			n.Type = nil
-			return n
-		}
-
-		args.SetIndex(1, assignconv(r, l.Type.Key(), "delete"))
-		break OpSwitch
-
-	case OAPPEND:
-		ok |= Erv
-		args := n.List
-		if args.Len() == 0 {
-			yyerror("missing arguments to append")
-			n.Type = nil
-			return n
-		}
-
-		if args.Len() == 1 && !n.Isddd {
-			args.SetIndex(0, typecheck(args.Index(0), Erv|Efnstruct))
-		} else {
-			typecheckslice(args.Slice(), Erv)
-		}
-
-		t := args.First().Type
-		if t == nil {
-			n.Type = nil
-			return n
-		}
-
-		// Unpack multiple-return result before type-checking.
-		var funarg *Type
-		if t.IsFuncArgStruct() {
-			funarg = t
-			t = t.Field(0).Type
-		}
-
-		n.Type = t
-		if !t.IsSlice() {
-			if Isconst(args.First(), CTNIL) {
-				yyerror("first argument to append must be typed slice; have untyped nil")
-				n.Type = nil
-				return n
-			}
-
-			yyerror("first argument to append must be slice; have %L", t)
-			n.Type = nil
-			return n
-		}
-
-		if n.Isddd {
-			if args.Len() == 1 {
-				yyerror("cannot use ... on first argument to append")
-				n.Type = nil
-				return n
-			}
-
-			if args.Len() != 2 {
-				yyerror("too many arguments to append")
-				n.Type = nil
-				return n
-			}
-
-			if t.Elem().IsKind(TUINT8) && args.Second().Type.IsString() {
-				args.SetIndex(1, defaultlit(args.Index(1), Types[TSTRING]))
-				break OpSwitch
-			}
-
-			args.SetIndex(1, assignconv(args.Index(1), t.Orig, "append"))
-			break OpSwitch
-		}
-
-		if funarg != nil {
-			_, it := iterFields(funarg) // Skip first field
-			for t := it.Next(); t != nil; t = it.Next() {
-				if assignop(t.Type, n.Type.Elem(), nil) == 0 {
-					yyerror("cannot append %v value to []%v", t.Type, n.Type.Elem())
-				}
-			}
-		} else {
-			as := args.Slice()[1:]
-			for i, n := range as {
-				if n.Type == nil {
-					continue
-				}
-				as[i] = assignconv(n, t.Elem(), "append")
-			}
-		}
-
-		break OpSwitch
-
-	case OCOPY:
-		ok |= Etop | Erv
-		args := n.List
-		if args.Len() < 2 {
-			yyerror("missing arguments to copy")
-			n.Type = nil
-			return n
-		}
-
-		if args.Len() > 2 {
-			yyerror("too many arguments to copy")
-			n.Type = nil
-			return n
-		}
-
-		n.Left = args.First()
-		n.Right = args.Second()
-		n.List.Set(nil)
-		n.Type = Types[TINT]
-		n.Left = typecheck(n.Left, Erv)
-		n.Right = typecheck(n.Right, Erv)
-		if n.Left.Type == nil || n.Right.Type == nil {
-			n.Type = nil
-			return n
-		}
-		n.Left = defaultlit(n.Left, nil)
-		n.Right = defaultlit(n.Right, nil)
-		if n.Left.Type == nil || n.Right.Type == nil {
-			n.Type = nil
-			return n
-		}
-
-		// copy([]byte, string)
-		if n.Left.Type.IsSlice() && n.Right.Type.IsString() {
-			if eqtype(n.Left.Type.Elem(), bytetype) {
-				break OpSwitch
-			}
-			yyerror("arguments to copy have different element types: %L and string", n.Left.Type)
-			n.Type = nil
-			return n
-		}
-
-		if !n.Left.Type.IsSlice() || !n.Right.Type.IsSlice() {
-			if !n.Left.Type.IsSlice() && !n.Right.Type.IsSlice() {
-				yyerror("arguments to copy must be slices; have %L, %L", n.Left.Type, n.Right.Type)
-			} else if !n.Left.Type.IsSlice() {
-				yyerror("first argument to copy should be slice; have %L", n.Left.Type)
-			} else {
-				yyerror("second argument to copy should be slice or string; have %L", n.Right.Type)
-			}
-			n.Type = nil
-			return n
-		}
-
-		if !eqtype(n.Left.Type.Elem(), n.Right.Type.Elem()) {
-			yyerror("arguments to copy have different element types: %L and %L", n.Left.Type, n.Right.Type)
-			n.Type = nil
-			return n
-		}
-
-		break OpSwitch
-
-	case OCONV:
-		ok |= Erv
-		saveorignode(n)
-		n.Left = typecheck(n.Left, Erv)
-		n.Left = convlit1(n.Left, n.Type, true, noReuse)
-		t := n.Left.Type
-		if t == nil || n.Type == nil {
-			n.Type = nil
-			return n
-		}
-		var why string
-		n.Op = convertop(t, n.Type, &why)
-		if n.Op == 0 {
-			if !n.Diag && !n.Type.Broke {
-				yyerror("cannot convert %L to type %v%s", n.Left, n.Type, why)
-				n.Diag = true
-			}
-
-			n.Op = OCONV
-		}
-
-		switch n.Op {
-		case OCONVNOP:
-			if n.Left.Op == OLITERAL {
-				r := nod(OXXX, nil, nil)
-				n.Op = OCONV
-				n.Orig = r
-				*r = *n
-				n.Op = OLITERAL
-				n.SetVal(n.Left.Val())
-			}
-
-		// do not use stringtoarraylit.
-		// generated code and compiler memory footprint is better without it.
-		case OSTRARRAYBYTE:
-			break
-
-		case OSTRARRAYRUNE:
-			if n.Left.Op == OLITERAL {
-				n = stringtoarraylit(n)
-			}
-		}
-
-		break OpSwitch
-
-	case OMAKE:
-		ok |= Erv
-		args := n.List.Slice()
-		if len(args) == 0 {
-			yyerror("missing argument to make")
-			n.Type = nil
-			return n
-		}
-
-		n.List.Set(nil)
-		l := args[0]
-		l = typecheck(l, Etype)
-		t := l.Type
-		if t == nil {
-			n.Type = nil
-			return n
-		}
-
-		i := 1
-		switch t.Etype {
-		default:
-			yyerror("cannot make type %v", t)
-			n.Type = nil
-			return n
-
-		case TSLICE:
-			if i >= len(args) {
-				yyerror("missing len argument to make(%v)", t)
-				n.Type = nil
-				return n
-			}
-
-			l = args[i]
-			i++
-			l = typecheck(l, Erv)
-			var r *Node
-			if i < len(args) {
-				r = args[i]
-				i++
-				r = typecheck(r, Erv)
-			}
-
-			if l.Type == nil || (r != nil && r.Type == nil) {
-				n.Type = nil
-				return n
-			}
-			if !checkmake(t, "len", l) || r != nil && !checkmake(t, "cap", r) {
-				n.Type = nil
-				return n
-			}
-			if Isconst(l, CTINT) && r != nil && Isconst(r, CTINT) && l.Val().U.(*Mpint).Cmp(r.Val().U.(*Mpint)) > 0 {
-				yyerror("len larger than cap in make(%v)", t)
-				n.Type = nil
-				return n
-			}
-
-			n.Left = l
-			n.Right = r
-			n.Op = OMAKESLICE
-
-		case TMAP:
-			if i < len(args) {
-				l = args[i]
-				i++
-				l = typecheck(l, Erv)
-				l = defaultlit(l, Types[TINT])
-				if l.Type == nil {
-					n.Type = nil
-					return n
-				}
-				if !checkmake(t, "size", l) {
-					n.Type = nil
-					return n
-				}
-				n.Left = l
-			} else {
-				n.Left = nodintconst(0)
-			}
-			n.Op = OMAKEMAP
-
-		case TCHAN:
-			l = nil
-			if i < len(args) {
-				l = args[i]
-				i++
-				l = typecheck(l, Erv)
-				l = defaultlit(l, Types[TINT])
-				if l.Type == nil {
-					n.Type = nil
-					return n
-				}
-				if !checkmake(t, "buffer", l) {
-					n.Type = nil
-					return n
-				}
-				n.Left = l
-			} else {
-				n.Left = nodintconst(0)
-			}
-			n.Op = OMAKECHAN
-		}
-
-		if i < len(args) {
-			yyerror("too many arguments to make(%v)", t)
-			n.Op = OMAKE
-			n.Type = nil
-			return n
-		}
-
-		n.Type = t
-		break OpSwitch
-
-	case ONEW:
-		ok |= Erv
-		args := n.List
-		if args.Len() == 0 {
-			yyerror("missing argument to new")
-			n.Type = nil
-			return n
-		}
-
-		l := args.First()
-		l = typecheck(l, Etype)
-		t := l.Type
-		if t == nil {
-			n.Type = nil
-			return n
-		}
-		if args.Len() > 1 {
-			yyerror("too many arguments to new(%v)", t)
-			n.Type = nil
-			return n
-		}
-
-		n.Left = l
-		n.Type = ptrto(t)
-		break OpSwitch
-
-	case OPRINT, OPRINTN:
-		ok |= Etop
-		typecheckslice(n.List.Slice(), Erv)
-		ls := n.List.Slice()
-		for i1, n1 := range ls {
-			// Special case for print: int constant is int64, not int.
-			if Isconst(n1, CTINT) {
-				ls[i1] = defaultlit(ls[i1], Types[TINT64])
-			} else {
-				ls[i1] = defaultlit(ls[i1], nil)
-			}
-		}
-
-		break OpSwitch
-
-	case OPANIC:
-		ok |= Etop
-		if !onearg(n, "panic") {
-			n.Type = nil
-			return n
-		}
-		n.Left = typecheck(n.Left, Erv)
-		n.Left = defaultlit(n.Left, Types[TINTER])
-		if n.Left.Type == nil {
-			n.Type = nil
-			return n
-		}
-		break OpSwitch
-
-	case ORECOVER:
-		ok |= Erv | Etop
-		if n.List.Len() != 0 {
-			yyerror("too many arguments to recover")
-			n.Type = nil
-			return n
-		}
-
-		n.Type = Types[TINTER]
-		break OpSwitch
-
-	case OCLOSURE:
-		ok |= Erv
-		typecheckclosure(n, top)
-		if n.Type == nil {
-			return n
-		}
-		break OpSwitch
-
-	case OITAB:
-		ok |= Erv
-		n.Left = typecheck(n.Left, Erv)
-		t := n.Left.Type
-		if t == nil {
-			n.Type = nil
-			return n
-		}
-		if !t.IsInterface() {
-			Fatalf("OITAB of %v", t)
-		}
-		n.Type = ptrto(Types[TUINTPTR])
-		break OpSwitch
-
-	case OIDATA:
-		// Whoever creates the OIDATA node must know a priori the concrete type at that moment,
-		// usually by just having checked the OITAB.
-		Fatalf("cannot typecheck interface data %v", n)
-		break OpSwitch
-
-	case OSPTR:
-		ok |= Erv
-		n.Left = typecheck(n.Left, Erv)
-		t := n.Left.Type
-		if t == nil {
-			n.Type = nil
-			return n
-		}
-		if !t.IsSlice() && !t.IsString() {
-			Fatalf("OSPTR of %v", t)
-		}
-		if t.IsString() {
-			n.Type = ptrto(Types[TUINT8])
-		} else {
-			n.Type = ptrto(t.Elem())
-		}
-		break OpSwitch
-
-	case OCLOSUREVAR:
-		ok |= Erv
-		break OpSwitch
-
-	case OCFUNC:
-		ok |= Erv
-		n.Left = typecheck(n.Left, Erv)
-		n.Type = Types[TUINTPTR]
-		break OpSwitch
-
-	case OCONVNOP:
-		ok |= Erv
-		n.Left = typecheck(n.Left, Erv)
-		break OpSwitch
-
-	// statements
-	case OAS:
-		ok |= Etop
-
-		typecheckas(n)
-
-		// Code that creates temps does not bother to set defn, so do it here.
-		if n.Left.Op == ONAME && n.Left.IsAutoTmp() {
-			n.Left.Name.Defn = n
-		}
-		break OpSwitch
-
-	case OAS2:
-		ok |= Etop
-		typecheckas2(n)
-		break OpSwitch
-
-	case OBREAK,
-		OCONTINUE,
-		ODCL,
-		OEMPTY,
-		OGOTO,
-		OXFALL,
-		OVARKILL,
-		OVARLIVE:
-		ok |= Etop
-		break OpSwitch
-
-	case OLABEL:
-		ok |= Etop
-		decldepth++
-		break OpSwitch
-
-	case ODEFER:
-		ok |= Etop
-		n.Left = typecheck(n.Left, Etop|Erv)
-		if !n.Left.Diag {
-			checkdefergo(n)
-		}
-		break OpSwitch
-
-	case OPROC:
-		ok |= Etop
-		n.Left = typecheck(n.Left, Etop|Erv)
-		checkdefergo(n)
-		break OpSwitch
-
-	case OFOR:
-		ok |= Etop
-		typecheckslice(n.Ninit.Slice(), Etop)
-		decldepth++
-		n.Left = typecheck(n.Left, Erv)
-		if n.Left != nil {
-			t := n.Left.Type
-			if t != nil && !t.IsBoolean() {
-				yyerror("non-bool %L used as for condition", n.Left)
-			}
-		}
-		n.Right = typecheck(n.Right, Etop)
-		typecheckslice(n.Nbody.Slice(), Etop)
-		decldepth--
-		break OpSwitch
-
-	case OIF:
-		ok |= Etop
-		typecheckslice(n.Ninit.Slice(), Etop)
-		n.Left = typecheck(n.Left, Erv)
-		if n.Left != nil {
-			t := n.Left.Type
-			if t != nil && !t.IsBoolean() {
-				yyerror("non-bool %L used as if condition", n.Left)
-			}
-		}
-		typecheckslice(n.Nbody.Slice(), Etop)
-		typecheckslice(n.Rlist.Slice(), Etop)
-		break OpSwitch
-
-	case ORETURN:
-		ok |= Etop
-		if n.List.Len() == 1 {
-			typecheckslice(n.List.Slice(), Erv|Efnstruct)
-		} else {
-			typecheckslice(n.List.Slice(), Erv)
-		}
-		if Curfn == nil {
-			yyerror("return outside function")
-			n.Type = nil
-			return n
-		}
-
-		if Curfn.Type.FuncType().Outnamed && n.List.Len() == 0 {
-			break OpSwitch
-		}
-		typecheckaste(ORETURN, nil, false, Curfn.Type.Results(), n.List, func() string { return "return argument" })
-		break OpSwitch
-
-	case ORETJMP:
-		ok |= Etop
-		break OpSwitch
-
-	case OSELECT:
-		ok |= Etop
-		typecheckselect(n)
-		break OpSwitch
-
-	case OSWITCH:
-		ok |= Etop
-		typecheckswitch(n)
-		break OpSwitch
-
-	case ORANGE:
-		ok |= Etop
-		typecheckrange(n)
-		break OpSwitch
-
-	case OTYPESW:
-		yyerror("use of .(type) outside type switch")
-		n.Type = nil
-		return n
-
-	case OXCASE:
-		ok |= Etop
-		typecheckslice(n.List.Slice(), Erv)
-		typecheckslice(n.Nbody.Slice(), Etop)
-		break OpSwitch
-
-	case ODCLFUNC:
-		ok |= Etop
-		typecheckfunc(n)
-		break OpSwitch
-
-	case ODCLCONST:
-		ok |= Etop
-		n.Left = typecheck(n.Left, Erv)
-		break OpSwitch
-
-	case ODCLTYPE:
-		ok |= Etop
-		n.Left = typecheck(n.Left, Etype)
-		checkwidth(n.Left.Type)
-		if n.Left.Type != nil && n.Left.Type.NotInHeap && n.Left.Name.Param.Pragma&NotInHeap == 0 {
-			// The type contains go:notinheap types, so it
-			// must be marked as such (alternatively, we
-			// could silently propagate go:notinheap).
-			yyerror("type %v must be go:notinheap", n.Left.Type)
-		}
-		break OpSwitch
-	}
-
-	t := n.Type
-	if t != nil && !t.IsFuncArgStruct() && n.Op != OTYPE {
-		switch t.Etype {
-		case TFUNC, // might have TANY; wait until it's called
-			TANY, TFORW, TIDEAL, TNIL, TBLANK:
-			break
-
-		default:
-			checkwidth(t)
-		}
-	}
-
-	if safemode && importpkg == nil && compiling_wrappers == 0 && t != nil && t.Etype == TUNSAFEPTR {
-		yyerror("cannot use unsafe.Pointer")
-	}
-
-	evconst(n)
-	if n.Op == OTYPE && top&Etype == 0 {
-		yyerror("type %v is not an expression", n.Type)
-		n.Type = nil
-		return n
-	}
-
-	if top&(Erv|Etype) == Etype && n.Op != OTYPE {
-		yyerror("%v is not a type", n)
-		n.Type = nil
-		return n
-	}
-
-	// TODO(rsc): simplify
-	if (top&(Ecall|Erv|Etype) != 0) && top&Etop == 0 && ok&(Erv|Etype|Ecall) == 0 {
-		yyerror("%v used as value", n)
-		n.Type = nil
-		return n
-	}
-
-	if (top&Etop != 0) && top&(Ecall|Erv|Etype) == 0 && ok&Etop == 0 {
-		if !n.Diag {
-			yyerror("%v evaluated but not used", n)
-			n.Diag = true
-		}
-
-		n.Type = nil
-		return n
-	}
-
-	/* TODO
-	if(n->type == T)
-		fatal("typecheck nil type");
-	*/
-	return n
-}
-
-func checksliceindex(l *Node, r *Node, tp *Type) bool {
-	t := r.Type
-	if t == nil {
-		return false
-	}
-	if !t.IsInteger() {
-		yyerror("invalid slice index %v (type %v)", r, t)
-		return false
-	}
-
-	if r.Op == OLITERAL {
-		if r.Int64() < 0 {
-			yyerror("invalid slice index %v (index must be non-negative)", r)
-			return false
-		} else if tp != nil && tp.NumElem() > 0 && r.Int64() > tp.NumElem() {
-			yyerror("invalid slice index %v (out of bounds for %d-element array)", r, tp.NumElem())
-			return false
-		} else if Isconst(l, CTSTR) && r.Int64() > int64(len(l.Val().U.(string))) {
-			yyerror("invalid slice index %v (out of bounds for %d-byte string)", r, len(l.Val().U.(string)))
-			return false
-		} else if r.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 {
-			yyerror("invalid slice index %v (index too large)", r)
-			return false
-		}
-	}
-
-	return true
-}
-
-func checksliceconst(lo *Node, hi *Node) bool {
-	if lo != nil && hi != nil && lo.Op == OLITERAL && hi.Op == OLITERAL && lo.Val().U.(*Mpint).Cmp(hi.Val().U.(*Mpint)) > 0 {
-		yyerror("invalid slice index: %v > %v", lo, hi)
-		return false
-	}
-
-	return true
-}
-
-func checkdefergo(n *Node) {
-	what := "defer"
-	if n.Op == OPROC {
-		what = "go"
-	}
-
-	switch n.Left.Op {
-	// ok
-	case OCALLINTER,
-		OCALLMETH,
-		OCALLFUNC,
-		OCLOSE,
-		OCOPY,
-		ODELETE,
-		OPANIC,
-		OPRINT,
-		OPRINTN,
-		ORECOVER:
-		return
-
-	case OAPPEND,
-		OCAP,
-		OCOMPLEX,
-		OIMAG,
-		OLEN,
-		OMAKE,
-		OMAKESLICE,
-		OMAKECHAN,
-		OMAKEMAP,
-		ONEW,
-		OREAL,
-		OLITERAL: // conversion or unsafe.Alignof, Offsetof, Sizeof
-		if n.Left.Orig != nil && n.Left.Orig.Op == OCONV {
-			break
-		}
-		yyerror("%s discards result of %v", what, n.Left)
-		return
-	}
-
-	// type is broken or missing, most likely a method call on a broken type
-	// we will warn about the broken type elsewhere. no need to emit a potentially confusing error
-	if n.Left.Type == nil || n.Left.Type.Broke {
-		return
-	}
-
-	if !n.Diag {
-		// The syntax made sure it was a call, so this must be
-		// a conversion.
-		n.Diag = true
-		yyerror("%s requires function call, not conversion", what)
-	}
-}
-
-// The result of implicitstar MUST be assigned back to n, e.g.
-// 	n.Left = implicitstar(n.Left)
-func implicitstar(n *Node) *Node {
-	// insert implicit * if needed for fixed array
-	t := n.Type
-	if t == nil || !t.IsPtr() {
-		return n
-	}
-	t = t.Elem()
-	if t == nil {
-		return n
-	}
-	if !t.IsArray() {
-		return n
-	}
-	n = nod(OIND, n, nil)
-	n.Implicit = true
-	n = typecheck(n, Erv)
-	return n
-}
-
-func onearg(n *Node, f string, args ...interface{}) bool {
-	if n.Left != nil {
-		return true
-	}
-	if n.List.Len() == 0 {
-		p := fmt.Sprintf(f, args...)
-		yyerror("missing argument to %s: %v", p, n)
-		return false
-	}
-
-	if n.List.Len() > 1 {
-		p := fmt.Sprintf(f, args...)
-		yyerror("too many arguments to %s: %v", p, n)
-		n.Left = n.List.First()
-		n.List.Set(nil)
-		return false
-	}
-
-	n.Left = n.List.First()
-	n.List.Set(nil)
-	return true
-}
-
-func twoarg(n *Node) bool {
-	if n.Left != nil {
-		return true
-	}
-	if n.List.Len() == 0 {
-		yyerror("missing argument to %v - %v", n.Op, n)
-		return false
-	}
-
-	n.Left = n.List.First()
-	if n.List.Len() == 1 {
-		yyerror("missing argument to %v - %v", n.Op, n)
-		n.List.Set(nil)
-		return false
-	}
-
-	if n.List.Len() > 2 {
-		yyerror("too many arguments to %v - %v", n.Op, n)
-		n.List.Set(nil)
-		return false
-	}
-
-	n.Right = n.List.Second()
-	n.List.Set(nil)
-	return true
-}
-
-func lookdot1(errnode *Node, s *Sym, t *Type, fs *Fields, dostrcmp int) *Field {
-	var r *Field
-	for _, f := range fs.Slice() {
-		if dostrcmp != 0 && f.Sym.Name == s.Name {
-			return f
-		}
-		if dostrcmp == 2 && strings.EqualFold(f.Sym.Name, s.Name) {
-			return f
-		}
-		if f.Sym != s {
-			continue
-		}
-		if r != nil {
-			if errnode != nil {
-				yyerror("ambiguous selector %v", errnode)
-			} else if t.IsPtr() {
-				yyerror("ambiguous selector (%v).%v", t, s)
-			} else {
-				yyerror("ambiguous selector %v.%v", t, s)
-			}
-			break
-		}
-
-		r = f
-	}
-
-	return r
-}
-
-func looktypedot(n *Node, t *Type, dostrcmp int) bool {
-	s := n.Sym
-
-	if t.IsInterface() {
-		f1 := lookdot1(n, s, t, t.Fields(), dostrcmp)
-		if f1 == nil {
-			return false
-		}
-
-		n.Sym = methodsym(n.Sym, t, 0)
-		n.Xoffset = f1.Offset
-		n.Type = f1.Type
-		n.Op = ODOTINTER
-		return true
-	}
-
-	// Find the base type: methtype will fail if t
-	// is not of the form T or *T.
-	mt := methtype(t)
-	if mt == nil {
-		return false
-	}
-
-	expandmeth(mt)
-	f2 := lookdot1(n, s, mt, mt.AllMethods(), dostrcmp)
-	if f2 == nil {
-		return false
-	}
-
-	// disallow T.m if m requires *T receiver
-	if f2.Type.Recv().Type.IsPtr() && !t.IsPtr() && f2.Embedded != 2 && !isifacemethod(f2.Type) {
-		yyerror("invalid method expression %v (needs pointer receiver: (*%v).%S)", n, t, f2.Sym)
-		return false
-	}
-
-	n.Sym = methodsym(n.Sym, t, 0)
-	n.Xoffset = f2.Offset
-	n.Type = f2.Type
-	n.Op = ODOTMETH
-	return true
-}
-
-func derefall(t *Type) *Type {
-	for t != nil && t.Etype == Tptr {
-		t = t.Elem()
-	}
-	return t
-}
-
-type typeSym struct {
-	t *Type
-	s *Sym
-}
-
-// dotField maps (*Type, *Sym) pairs to the corresponding struct field (*Type with Etype==TFIELD).
-// It is a cache for use during usefield in walk.go, only enabled when field tracking.
-var dotField = map[typeSym]*Field{}
-
-func lookdot(n *Node, t *Type, dostrcmp int) *Field {
-	s := n.Sym
-
-	dowidth(t)
-	var f1 *Field
-	if t.IsStruct() || t.IsInterface() {
-		f1 = lookdot1(n, s, t, t.Fields(), dostrcmp)
-	}
-
-	var f2 *Field
-	if n.Left.Type == t || n.Left.Type.Sym == nil {
-		mt := methtype(t)
-		if mt != nil {
-			// Use f2->method, not f2->xmethod: adddot has
-			// already inserted all the necessary embedded dots.
-			f2 = lookdot1(n, s, mt, mt.Methods(), dostrcmp)
-		}
-	}
-
-	if f1 != nil {
-		if dostrcmp > 1 {
-			// Already in the process of diagnosing an error.
-			return f1
-		}
-		if f2 != nil {
-			yyerror("%v is both field and method", n.Sym)
-		}
-		if f1.Offset == BADWIDTH {
-			Fatalf("lookdot badwidth %v %p", f1, f1)
-		}
-		n.Xoffset = f1.Offset
-		n.Type = f1.Type
-		if obj.Fieldtrack_enabled > 0 {
-			dotField[typeSym{t.Orig, s}] = f1
-		}
-		if t.IsInterface() {
-			if n.Left.Type.IsPtr() {
-				n.Left = nod(OIND, n.Left, nil) // implicitstar
-				n.Left.Implicit = true
-				n.Left = typecheck(n.Left, Erv)
-			}
-
-			n.Op = ODOTINTER
-		}
-
-		return f1
-	}
-
-	if f2 != nil {
-		if dostrcmp > 1 {
-			// Already in the process of diagnosing an error.
-			return f2
-		}
-		tt := n.Left.Type
-		dowidth(tt)
-		rcvr := f2.Type.Recv().Type
-		if !eqtype(rcvr, tt) {
-			if rcvr.Etype == Tptr && eqtype(rcvr.Elem(), tt) {
-				checklvalue(n.Left, "call pointer method on")
-				n.Left = nod(OADDR, n.Left, nil)
-				n.Left.Implicit = true
-				n.Left = typecheck(n.Left, Etype|Erv)
-			} else if tt.Etype == Tptr && rcvr.Etype != Tptr && eqtype(tt.Elem(), rcvr) {
-				n.Left = nod(OIND, n.Left, nil)
-				n.Left.Implicit = true
-				n.Left = typecheck(n.Left, Etype|Erv)
-			} else if tt.Etype == Tptr && tt.Elem().Etype == Tptr && eqtype(derefall(tt), derefall(rcvr)) {
-				yyerror("calling method %v with receiver %L requires explicit dereference", n.Sym, n.Left)
-				for tt.Etype == Tptr {
-					// Stop one level early for method with pointer receiver.
-					if rcvr.Etype == Tptr && tt.Elem().Etype != Tptr {
-						break
-					}
-					n.Left = nod(OIND, n.Left, nil)
-					n.Left.Implicit = true
-					n.Left = typecheck(n.Left, Etype|Erv)
-					tt = tt.Elem()
-				}
-			} else {
-				Fatalf("method mismatch: %v for %v", rcvr, tt)
-			}
-		}
-
-		pll := n
-		ll := n.Left
-		for ll.Left != nil && (ll.Op == ODOT || ll.Op == ODOTPTR || ll.Op == OIND) {
-			pll = ll
-			ll = ll.Left
-		}
-		if pll.Implicit && ll.Type.IsPtr() && ll.Type.Sym != nil && ll.Type.Sym.Def != nil && ll.Type.Sym.Def.Op == OTYPE {
-			// It is invalid to automatically dereference a named pointer type when selecting a method.
-			// Make n->left == ll to clarify error message.
-			n.Left = ll
-			return nil
-		}
-
-		n.Sym = methodsym(n.Sym, n.Left.Type, 0)
-		n.Xoffset = f2.Offset
-		n.Type = f2.Type
-
-		//		print("lookdot found [%p] %T\n", f2->type, f2->type);
-		n.Op = ODOTMETH
-
-		return f2
-	}
-
-	return nil
-}
-
-func nokeys(l Nodes) bool {
-	for _, n := range l.Slice() {
-		if n.Op == OKEY || n.Op == OSTRUCTKEY {
-			return false
-		}
-	}
-	return true
-}
-
-func hasddd(t *Type) bool {
-	for _, tl := range t.Fields().Slice() {
-		if tl.Isddd {
-			return true
-		}
-	}
-
-	return false
-}
-
-// typecheck assignment: type list = expression list
-func typecheckaste(op Op, call *Node, isddd bool, tstruct *Type, nl Nodes, desc func() string) {
-	var t *Type
-	var n *Node
-	var n1 int
-	var n2 int
-	var i int
-
-	lno := lineno
-
-	if tstruct.Broke {
-		goto out
-	}
-
-	n = nil
-	if nl.Len() == 1 {
-		n = nl.First()
-		if n.Type != nil {
-			if n.Type.IsFuncArgStruct() {
-				if !hasddd(tstruct) {
-					n1 := tstruct.NumFields()
-					n2 := n.Type.NumFields()
-					if n2 > n1 {
-						goto toomany
-					}
-					if n2 < n1 {
-						goto notenough
-					}
-				}
-
-				tn, it := iterFields(n.Type)
-				var why string
-				for _, tl := range tstruct.Fields().Slice() {
-					if tl.Isddd {
-						for ; tn != nil; tn = it.Next() {
-							if assignop(tn.Type, tl.Type.Elem(), &why) == 0 {
-								if call != nil {
-									yyerror("cannot use %v as type %v in argument to %v%s", tn.Type, tl.Type.Elem(), call, why)
-								} else {
-									yyerror("cannot use %v as type %v in %s%s", tn.Type, tl.Type.Elem(), desc(), why)
-								}
-							}
-						}
-
-						goto out
-					}
-
-					if tn == nil {
-						goto notenough
-					}
-					if assignop(tn.Type, tl.Type, &why) == 0 {
-						if call != nil {
-							yyerror("cannot use %v as type %v in argument to %v%s", tn.Type, tl.Type, call, why)
-						} else {
-							yyerror("cannot use %v as type %v in %s%s", tn.Type, tl.Type, desc(), why)
-						}
-					}
-
-					tn = it.Next()
-				}
-
-				if tn != nil {
-					goto toomany
-				}
-				goto out
-			}
-		}
-	}
-
-	n1 = tstruct.NumFields()
-	n2 = nl.Len()
-	if !hasddd(tstruct) {
-		if n2 > n1 {
-			goto toomany
-		}
-		if n2 < n1 {
-			goto notenough
-		}
-	} else {
-		if !isddd {
-			if n2 < n1-1 {
-				goto notenough
-			}
-		} else {
-			if n2 > n1 {
-				goto toomany
-			}
-			if n2 < n1 {
-				goto notenough
-			}
-		}
-	}
-
-	i = 0
-	for _, tl := range tstruct.Fields().Slice() {
-		t = tl.Type
-		if tl.Isddd {
-			if isddd {
-				if i >= nl.Len() {
-					goto notenough
-				}
-				if nl.Len()-i > 1 {
-					goto toomany
-				}
-				n = nl.Index(i)
-				setlineno(n)
-				if n.Type != nil {
-					nl.SetIndex(i, assignconvfn(n, t, desc))
-				}
-				goto out
-			}
-
-			for ; i < nl.Len(); i++ {
-				n = nl.Index(i)
-				setlineno(n)
-				if n.Type != nil {
-					nl.SetIndex(i, assignconvfn(n, t.Elem(), desc))
-				}
-			}
-
-			goto out
-		}
-
-		if i >= nl.Len() {
-			goto notenough
-		}
-		n = nl.Index(i)
-		setlineno(n)
-		if n.Type != nil {
-			nl.SetIndex(i, assignconvfn(n, t, desc))
-		}
-		i++
-	}
-
-	if i < nl.Len() {
-		goto toomany
-	}
-	if isddd {
-		if call != nil {
-			yyerror("invalid use of ... in call to %v", call)
-		} else {
-			yyerror("invalid use of ... in %v", op)
-		}
-	}
-
-out:
-	lineno = lno
-	return
-
-notenough:
-	if n == nil || !n.Diag {
-		if call != nil {
-			// call is the expression being called, not the overall call.
-			// Method expressions have the form T.M, and the compiler has
-			// rewritten those to ONAME nodes but left T in Left.
-			if call.Op == ONAME && call.Left != nil && call.Left.Op == OTYPE {
-				yyerror("not enough arguments in call to method expression %v\n\thave %s\n\twant %v", call, nl.retsigerr(isddd), tstruct)
-			} else {
-				yyerror("not enough arguments in call to %v\n\thave %s\n\twant %v", call, nl.retsigerr(isddd), tstruct)
-			}
-		} else {
-			yyerror("not enough arguments to %v\n\thave %s\n\twant %v", op, nl.retsigerr(isddd), tstruct)
-		}
-		if n != nil {
-			n.Diag = true
-		}
-	}
-
-	goto out
-
-toomany:
-	if call != nil {
-		yyerror("too many arguments in call to %v\n\thave %s\n\twant %v", call, nl.retsigerr(isddd), tstruct)
-	} else {
-		yyerror("too many arguments to %v\n\thave %s\n\twant %v", op, nl.retsigerr(isddd), tstruct)
-	}
-	goto out
-}
-
-// sigrepr is a type's representation to the outside world,
-// in string representations of return signatures
-// e.g in error messages about wrong arguments to return.
-func (t *Type) sigrepr() string {
-	switch t {
-	default:
-		return t.String()
-
-	case Types[TIDEAL]:
-		// "untyped number" is not commonly used
-		// outside of the compiler, so let's use "number".
-		return "number"
-
-	case idealstring:
-		return "string"
-
-	case idealbool:
-		return "bool"
-	}
-}
-
-// retsigerr returns the signature of the types
-// at the respective return call site of a function.
-func (nl Nodes) retsigerr(isddd bool) string {
-	if nl.Len() < 1 {
-		return "()"
-	}
-
-	var typeStrings []string
-	if nl.Len() == 1 && nl.First().Type != nil && nl.First().Type.IsFuncArgStruct() {
-		for _, f := range nl.First().Type.Fields().Slice() {
-			typeStrings = append(typeStrings, f.Type.sigrepr())
-		}
-	} else {
-		for _, n := range nl.Slice() {
-			typeStrings = append(typeStrings, n.Type.sigrepr())
-		}
-	}
-
-	ddd := ""
-	if isddd {
-		ddd = "..."
-	}
-	return fmt.Sprintf("(%s%s)", strings.Join(typeStrings, ", "), ddd)
-}
-
-// type check composite
-func fielddup(name string, hash map[string]bool) {
-	if hash[name] {
-		yyerror("duplicate field name in struct literal: %s", name)
-		return
-	}
-	hash[name] = true
-}
-
-func keydup(n *Node, hash map[uint32][]*Node) {
-	orign := n
-	if n.Op == OCONVIFACE {
-		n = n.Left
-	}
-	evconst(n)
-	if n.Op != OLITERAL {
-		return // we don't check variables
-	}
-
-	const PRIME1 = 3
-
-	var h uint32
-	switch v := n.Val().U.(type) {
-	default: // unknown, bool, nil
-		h = 23
-
-	case *Mpint:
-		h = uint32(v.Int64())
-
-	case *Mpflt:
-		x := math.Float64bits(v.Float64())
-		for i := 0; i < 8; i++ {
-			h = h*PRIME1 + uint32(x&0xFF)
-			x >>= 8
-		}
-
-	case string:
-		for i := 0; i < len(v); i++ {
-			h = h*PRIME1 + uint32(v[i])
-		}
-	}
-
-	var cmp Node
-	for _, a := range hash[h] {
-		cmp.Op = OEQ
-		cmp.Left = n
-		if a.Op == OCONVIFACE && orign.Op == OCONVIFACE {
-			a = a.Left
-		}
-		if !eqtype(a.Type, n.Type) {
-			continue
-		}
-		cmp.Right = a
-		evconst(&cmp)
-		if cmp.Op != OLITERAL {
-			// Sometimes evconst fails. See issue 12536.
-			continue
-		}
-		if cmp.Val().U.(bool) {
-			yyerror("duplicate key %v in map literal", n)
-			return
-		}
-	}
-
-	hash[h] = append(hash[h], orign)
-}
-
-// iscomptype reports whether type t is a composite literal type
-// or a pointer to one.
-func iscomptype(t *Type) bool {
-	if t.IsPtr() {
-		t = t.Elem()
-	}
-
-	switch t.Etype {
-	case TARRAY, TSLICE, TSTRUCT, TMAP:
-		return true
-	default:
-		return false
-	}
-}
-
-func pushtype(n *Node, t *Type) {
-	if n == nil || n.Op != OCOMPLIT || !iscomptype(t) {
-		return
-	}
-
-	if n.Right == nil {
-		n.Right = typenod(t)
-		n.Implicit = true       // don't print
-		n.Right.Implicit = true // * is okay
-	} else if Debug['s'] != 0 {
-		n.Right = typecheck(n.Right, Etype)
-		if n.Right.Type != nil && eqtype(n.Right.Type, t) {
-			fmt.Printf("%v: redundant type: %v\n", n.Line(), t)
-		}
-	}
-}
-
-// The result of typecheckcomplit MUST be assigned back to n, e.g.
-// 	n.Left = typecheckcomplit(n.Left)
-func typecheckcomplit(n *Node) *Node {
-	lno := lineno
-	defer func() {
-		lineno = lno
-	}()
-
-	if n.Right == nil {
-		if n.List.Len() != 0 {
-			setlineno(n.List.First())
-		}
-		yyerror("missing type in composite literal")
-		n.Type = nil
-		return n
-	}
-
-	// Save original node (including n->right)
-	norig := nod(n.Op, nil, nil)
-
-	*norig = *n
-
-	setlineno(n.Right)
-	n.Right = typecheck(n.Right, Etype|Ecomplit)
-	l := n.Right // sic
-	t := l.Type
-	if t == nil {
-		n.Type = nil
-		return n
-	}
-	nerr := nerrors
-	n.Type = t
-
-	if t.IsPtr() {
-		// For better or worse, we don't allow pointers as the composite literal type,
-		// except when using the &T syntax, which sets implicit on the OIND.
-		if !n.Right.Implicit {
-			yyerror("invalid pointer type %v for composite literal (use &%v instead)", t, t.Elem())
-			n.Type = nil
-			return n
-		}
-
-		// Also, the underlying type must be a struct, map, slice, or array.
-		if !iscomptype(t) {
-			yyerror("invalid pointer type %v for composite literal", t)
-			n.Type = nil
-			return n
-		}
-
-		t = t.Elem()
-	}
-
-	switch t.Etype {
-	default:
-		yyerror("invalid type for composite literal: %v", t)
-		n.Type = nil
-
-	case TARRAY, TSLICE:
-		// If there are key/value pairs, create a map to keep seen
-		// keys so we can check for duplicate indices.
-		var indices map[int64]bool
-		for _, n1 := range n.List.Slice() {
-			if n1.Op == OKEY {
-				indices = make(map[int64]bool)
-				break
-			}
-		}
-
-		var length, i int64
-		checkBounds := t.IsArray() && !t.isDDDArray()
-		nl := n.List.Slice()
-		for i2, l := range nl {
-			setlineno(l)
-			vp := &nl[i2]
-			if l.Op == OKEY {
-				l.Left = typecheck(l.Left, Erv)
-				evconst(l.Left)
-				i = nonnegintconst(l.Left)
-				if i < 0 && !l.Left.Diag {
-					yyerror("index must be non-negative integer constant")
-					l.Left.Diag = true
-					i = -(1 << 30) // stay negative for a while
-				}
-				vp = &l.Right
-			}
-
-			if i >= 0 && indices != nil {
-				if indices[i] {
-					yyerror("duplicate index in array literal: %d", i)
-				} else {
-					indices[i] = true
-				}
-			}
-
-			r := *vp
-			pushtype(r, t.Elem())
-			r = typecheck(r, Erv)
-			r = defaultlit(r, t.Elem())
-			*vp = assignconv(r, t.Elem(), "array or slice literal")
-
-			i++
-			if i > length {
-				length = i
-				if checkBounds && length > t.NumElem() {
-					setlineno(l)
-					yyerror("array index %d out of bounds [0:%d]", length-1, t.NumElem())
-					checkBounds = false
-				}
-			}
-		}
-
-		if t.isDDDArray() {
-			t.SetNumElem(length)
-		}
-		if t.IsSlice() {
-			n.Right = nodintconst(length)
-			n.Op = OSLICELIT
-		} else {
-			n.Op = OARRAYLIT
-		}
-
-	case TMAP:
-		hash := make(map[uint32][]*Node)
-		for i3, l := range n.List.Slice() {
-			setlineno(l)
-			if l.Op != OKEY {
-				n.List.SetIndex(i3, typecheck(n.List.Index(i3), Erv))
-				yyerror("missing key in map literal")
-				continue
-			}
-
-			r := l.Left
-			pushtype(r, t.Key())
-			r = typecheck(r, Erv)
-			r = defaultlit(r, t.Key())
-			l.Left = assignconv(r, t.Key(), "map key")
-			if l.Left.Op != OCONV {
-				keydup(l.Left, hash)
-			}
-
-			r = l.Right
-			pushtype(r, t.Val())
-			r = typecheck(r, Erv)
-			r = defaultlit(r, t.Val())
-			l.Right = assignconv(r, t.Val(), "map value")
-		}
-
-		n.Op = OMAPLIT
-
-	case TSTRUCT:
-		// Need valid field offsets for Xoffset below.
-		dowidth(t)
-
-		bad := 0
-		if n.List.Len() != 0 && nokeys(n.List) {
-			// simple list of variables
-			f, it := iterFields(t)
-
-			ls := n.List.Slice()
-			for i1, n1 := range ls {
-				setlineno(n1)
-				ls[i1] = typecheck(ls[i1], Erv)
-				n1 = ls[i1]
-				if f == nil {
-					if bad == 0 {
-						yyerror("too many values in struct initializer")
-					}
-					bad++
-					continue
-				}
-
-				s := f.Sym
-				if s != nil && !exportname(s.Name) && s.Pkg != localpkg {
-					yyerror("implicit assignment of unexported field '%s' in %v literal", s.Name, t)
-				}
-				// No pushtype allowed here. Must name fields for that.
-				n1 = assignconv(n1, f.Type, "field value")
-				n1 = nodSym(OSTRUCTKEY, n1, f.Sym)
-				n1.Xoffset = f.Offset
-				ls[i1] = n1
-				f = it.Next()
-			}
-
-			if f != nil {
-				yyerror("too few values in struct initializer")
-			}
-		} else {
-			hash := make(map[string]bool)
-
-			// keyed list
-			ls := n.List.Slice()
-			for i, l := range ls {
-				setlineno(l)
-
-				if l.Op == OKEY {
-					key := l.Left
-
-					l.Op = OSTRUCTKEY
-					l.Left = l.Right
-					l.Right = nil
-
-					// An OXDOT uses the Sym field to hold
-					// the field to the right of the dot,
-					// so s will be non-nil, but an OXDOT
-					// is never a valid struct literal key.
-					if key.Sym == nil || key.Op == OXDOT {
-						yyerror("invalid field name %v in struct initializer", key)
-						l.Left = typecheck(l.Left, Erv)
-						continue
-					}
-
-					// Sym might have resolved to name in other top-level
-					// package, because of import dot. Redirect to correct sym
-					// before we do the lookup.
-					s := key.Sym
-					if s.Pkg != localpkg && exportname(s.Name) {
-						s1 := lookup(s.Name)
-						if s1.Origpkg == s.Pkg {
-							s = s1
-						}
-					}
-					l.Sym = s
-				}
-
-				if l.Op != OSTRUCTKEY {
-					if bad == 0 {
-						yyerror("mixture of field:value and value initializers")
-					}
-					bad++
-					ls[i] = typecheck(ls[i], Erv)
-					continue
-				}
-
-				f := lookdot1(nil, l.Sym, t, t.Fields(), 0)
-				if f == nil {
-					yyerror("unknown field '%v' in struct literal of type %v", l.Sym, t)
-					continue
-				}
-				fielddup(f.Sym.Name, hash)
-				l.Xoffset = f.Offset
-
-				// No pushtype allowed here. Tried and rejected.
-				l.Left = typecheck(l.Left, Erv)
-				l.Left = assignconv(l.Left, f.Type, "field value")
-			}
-		}
-
-		n.Op = OSTRUCTLIT
-	}
-
-	if nerr != nerrors {
-		return n
-	}
-
-	n.Orig = norig
-	if n.Type.IsPtr() {
-		n = nod(OPTRLIT, n, nil)
-		n.Typecheck = 1
-		n.Type = n.Left.Type
-		n.Left.Type = t
-		n.Left.Typecheck = 1
-	}
-
-	n.Orig = norig
-	return n
-}
-
-// lvalue etc
-func islvalue(n *Node) bool {
-	switch n.Op {
-	case OINDEX:
-		if n.Left.Type != nil && n.Left.Type.IsArray() {
-			return islvalue(n.Left)
-		}
-		if n.Left.Type != nil && n.Left.Type.IsString() {
-			return false
-		}
-		fallthrough
-	case OIND, ODOTPTR, OCLOSUREVAR:
-		return true
-
-	case ODOT:
-		return islvalue(n.Left)
-
-	case ONAME:
-		if n.Class == PFUNC {
-			return false
-		}
-		return true
-	}
-
-	return false
-}
-
-func checklvalue(n *Node, verb string) {
-	if !islvalue(n) {
-		yyerror("cannot %s %v", verb, n)
-	}
-}
-
-func checkassign(stmt *Node, n *Node) {
-	// Variables declared in ORANGE are assigned on every iteration.
-	if n.Name == nil || n.Name.Defn != stmt || stmt.Op == ORANGE {
-		r := outervalue(n)
-		var l *Node
-		for l = n; l != r; l = l.Left {
-			l.Assigned = true
-			if l.isClosureVar() {
-				l.Name.Defn.Assigned = true
-			}
-		}
-
-		l.Assigned = true
-		if l.isClosureVar() {
-			l.Name.Defn.Assigned = true
-		}
-	}
-
-	if islvalue(n) {
-		return
-	}
-	if n.Op == OINDEXMAP {
-		n.Etype = 1
-		return
-	}
-
-	// have already complained about n being undefined
-	if n.Op == ONONAME {
-		return
-	}
-
-	if n.Op == ODOT && n.Left.Op == OINDEXMAP {
-		yyerror("cannot assign to struct field %v in map", n)
-		return
-	}
-
-	yyerror("cannot assign to %v", n)
-}
-
-func checkassignlist(stmt *Node, l Nodes) {
-	for _, n := range l.Slice() {
-		checkassign(stmt, n)
-	}
-}
-
-// Check whether l and r are the same side effect-free expression,
-// so that it is safe to reuse one instead of computing both.
-func samesafeexpr(l *Node, r *Node) bool {
-	if l.Op != r.Op || !eqtype(l.Type, r.Type) {
-		return false
-	}
-
-	switch l.Op {
-	case ONAME, OCLOSUREVAR:
-		return l == r
-
-	case ODOT, ODOTPTR:
-		return l.Sym != nil && r.Sym != nil && l.Sym == r.Sym && samesafeexpr(l.Left, r.Left)
-
-	case OIND, OCONVNOP:
-		return samesafeexpr(l.Left, r.Left)
-
-	case OCONV:
-		// Some conversions can't be reused, such as []byte(str).
-		// Allow only numeric-ish types. This is a bit conservative.
-		return issimple[l.Type.Etype] && samesafeexpr(l.Left, r.Left)
-
-	case OINDEX:
-		return samesafeexpr(l.Left, r.Left) && samesafeexpr(l.Right, r.Right)
-
-	case OLITERAL:
-		return eqval(l.Val(), r.Val())
-	}
-
-	return false
-}
-
-// type check assignment.
-// if this assignment is the definition of a var on the left side,
-// fill in the var's type.
-func typecheckas(n *Node) {
-	// delicate little dance.
-	// the definition of n may refer to this assignment
-	// as its definition, in which case it will call typecheckas.
-	// in that case, do not call typecheck back, or it will cycle.
-	// if the variable has a type (ntype) then typechecking
-	// will not look at defn, so it is okay (and desirable,
-	// so that the conversion below happens).
-	n.Left = resolve(n.Left)
-
-	if n.Left.Name == nil || n.Left.Name.Defn != n || n.Left.Name.Param.Ntype != nil {
-		n.Left = typecheck(n.Left, Erv|Easgn)
-	}
-
-	n.Right = typecheck(n.Right, Erv)
-	checkassign(n, n.Left)
-	if n.Right != nil && n.Right.Type != nil {
-		if n.Left.Type != nil {
-			n.Right = assignconv(n.Right, n.Left.Type, "assignment")
-		}
-	}
-
-	if n.Left.Name != nil && n.Left.Name.Defn == n && n.Left.Name.Param.Ntype == nil {
-		n.Right = defaultlit(n.Right, nil)
-		n.Left.Type = n.Right.Type
-	}
-
-	// second half of dance.
-	// now that right is done, typecheck the left
-	// just to get it over with.  see dance above.
-	n.Typecheck = 1
-
-	if n.Left.Typecheck == 0 {
-		n.Left = typecheck(n.Left, Erv|Easgn)
-	}
-}
-
-func checkassignto(src *Type, dst *Node) {
-	var why string
-
-	if assignop(src, dst.Type, &why) == 0 {
-		yyerror("cannot assign %v to %L in multiple assignment%s", src, dst, why)
-		return
-	}
-}
-
-func typecheckas2(n *Node) {
-	ls := n.List.Slice()
-	for i1, n1 := range ls {
-		// delicate little dance.
-		n1 = resolve(n1)
-		ls[i1] = n1
-
-		if n1.Name == nil || n1.Name.Defn != n || n1.Name.Param.Ntype != nil {
-			ls[i1] = typecheck(ls[i1], Erv|Easgn)
-		}
-	}
-
-	cl := n.List.Len()
-	cr := n.Rlist.Len()
-	if cl > 1 && cr == 1 {
-		n.Rlist.SetIndex(0, typecheck(n.Rlist.Index(0), Erv|Efnstruct))
-	} else {
-		typecheckslice(n.Rlist.Slice(), Erv)
-	}
-	checkassignlist(n, n.List)
-
-	var l *Node
-	var r *Node
-	if cl == cr {
-		// easy
-		ls := n.List.Slice()
-		rs := n.Rlist.Slice()
-		for il, nl := range ls {
-			nr := rs[il]
-			if nl.Type != nil && nr.Type != nil {
-				rs[il] = assignconv(nr, nl.Type, "assignment")
-			}
-			if nl.Name != nil && nl.Name.Defn == n && nl.Name.Param.Ntype == nil {
-				rs[il] = defaultlit(rs[il], nil)
-				nl.Type = rs[il].Type
-			}
-		}
-
-		goto out
-	}
-
-	l = n.List.First()
-	r = n.Rlist.First()
-
-	// x,y,z = f()
-	if cr == 1 {
-		if r.Type == nil {
-			goto out
-		}
-		switch r.Op {
-		case OCALLMETH, OCALLINTER, OCALLFUNC:
-			if !r.Type.IsFuncArgStruct() {
-				break
-			}
-			cr = r.Type.NumFields()
-			if cr != cl {
-				goto mismatch
-			}
-			n.Op = OAS2FUNC
-			t, s := iterFields(r.Type)
-			for _, n3 := range n.List.Slice() {
-				if t.Type != nil && n3.Type != nil {
-					checkassignto(t.Type, n3)
-				}
-				if n3.Name != nil && n3.Name.Defn == n && n3.Name.Param.Ntype == nil {
-					n3.Type = t.Type
-				}
-				t = s.Next()
-			}
-
-			goto out
-		}
-	}
-
-	// x, ok = y
-	if cl == 2 && cr == 1 {
-		if r.Type == nil {
-			goto out
-		}
-		switch r.Op {
-		case OINDEXMAP, ORECV, ODOTTYPE:
-			switch r.Op {
-			case OINDEXMAP:
-				n.Op = OAS2MAPR
-
-			case ORECV:
-				n.Op = OAS2RECV
-
-			case ODOTTYPE:
-				n.Op = OAS2DOTTYPE
-				r.Op = ODOTTYPE2
-			}
-
-			if l.Type != nil {
-				checkassignto(r.Type, l)
-			}
-			if l.Name != nil && l.Name.Defn == n {
-				l.Type = r.Type
-			}
-			l := n.List.Second()
-			if l.Type != nil && !l.Type.IsBoolean() {
-				checkassignto(Types[TBOOL], l)
-			}
-			if l.Name != nil && l.Name.Defn == n && l.Name.Param.Ntype == nil {
-				l.Type = Types[TBOOL]
-			}
-			goto out
-		}
-	}
-
-mismatch:
-	yyerror("assignment count mismatch: %d = %d", cl, cr)
-
-	// second half of dance
-out:
-	n.Typecheck = 1
-	ls = n.List.Slice()
-	for i1, n1 := range ls {
-		if n1.Typecheck == 0 {
-			ls[i1] = typecheck(ls[i1], Erv|Easgn)
-		}
-	}
-}
-
-// type check function definition
-func typecheckfunc(n *Node) {
-	for _, ln := range n.Func.Dcl {
-		if ln.Op == ONAME && (ln.Class == PPARAM || ln.Class == PPARAMOUT) {
-			ln.Name.Decldepth = 1
-		}
-	}
-
-	n.Func.Nname = typecheck(n.Func.Nname, Erv|Easgn)
-	t := n.Func.Nname.Type
-	if t == nil {
-		return
-	}
-	n.Type = t
-	t.SetNname(n.Func.Nname)
-	rcvr := t.Recv()
-	if rcvr != nil && n.Func.Shortname != nil {
-		addmethod(n.Func.Shortname.Sym, t, true, n.Func.Pragma&Nointerface != 0)
-	}
-}
-
-// The result of stringtoarraylit MUST be assigned back to n, e.g.
-// 	n.Left = stringtoarraylit(n.Left)
-func stringtoarraylit(n *Node) *Node {
-	if n.Left.Op != OLITERAL || n.Left.Val().Ctype() != CTSTR {
-		Fatalf("stringtoarraylit %v", n)
-	}
-
-	s := n.Left.Val().U.(string)
-	var l []*Node
-	if n.Type.Elem().Etype == TUINT8 {
-		// []byte
-		for i := 0; i < len(s); i++ {
-			l = append(l, nod(OKEY, nodintconst(int64(i)), nodintconst(int64(s[0]))))
-		}
-	} else {
-		// []rune
-		i := 0
-		for _, r := range s {
-			l = append(l, nod(OKEY, nodintconst(int64(i)), nodintconst(int64(r))))
-			i++
-		}
-	}
-
-	nn := nod(OCOMPLIT, nil, typenod(n.Type))
-	nn.List.Set(l)
-	nn = typecheck(nn, Erv)
-	return nn
-}
-
-var ntypecheckdeftype int
-
-var methodqueue []*Node
-
-func domethod(n *Node) {
-	nt := n.Type.Nname()
-	nt = typecheck(nt, Etype)
-	if nt.Type == nil {
-		// type check failed; leave empty func
-		// TODO(mdempsky): Fix Type rekinding.
-		n.Type.Etype = TFUNC
-		n.Type.nod = nil
-		return
-	}
-
-	// If we have
-	//	type I interface {
-	//		M(_ int)
-	//	}
-	// then even though I.M looks like it doesn't care about the
-	// value of its argument, a specific implementation of I may
-	// care. The _ would suppress the assignment to that argument
-	// while generating a call, so remove it.
-	for _, t := range nt.Type.Params().Fields().Slice() {
-		if t.Sym != nil && t.Sym.Name == "_" {
-			t.Sym = nil
-		}
-	}
-
-	// TODO(mdempsky): Fix Type rekinding.
-	*n.Type = *nt.Type
-	n.Type.nod = nil
-	checkwidth(n.Type)
-}
-
-type mapqueueval struct {
-	n   *Node
-	lno int32
-}
-
-// tracks the line numbers at which forward types are first used as map keys
-var mapqueue []mapqueueval
-
-func copytype(n *Node, t *Type) {
-	if t.Etype == TFORW {
-		// This type isn't computed yet; when it is, update n.
-		t.ForwardType().Copyto = append(t.ForwardType().Copyto, n)
-		return
-	}
-
-	embedlineno := n.Type.ForwardType().Embedlineno
-	l := n.Type.ForwardType().Copyto
-
-	ptrTo := n.Type.ptrTo
-	sliceOf := n.Type.sliceOf
-
-	// TODO(mdempsky): Fix Type rekinding.
-	*n.Type = *t
-
-	t = n.Type
-	t.Sym = n.Sym
-	t.Local = n.Local
-	if n.Name != nil {
-		t.Vargen = n.Name.Vargen
-	}
-	t.methods = Fields{}
-	t.allMethods = Fields{}
-	t.nod = nil
-	t.Deferwidth = false
-	t.ptrTo = ptrTo
-	t.sliceOf = sliceOf
-
-	// Propagate go:notinheap pragma from the Name to the Type.
-	if n.Name != nil && n.Name.Param != nil && n.Name.Param.Pragma&NotInHeap != 0 {
-		t.NotInHeap = true
-	}
-
-	// Update nodes waiting on this type.
-	for _, n := range l {
-		copytype(n, t)
-	}
-
-	// Double-check use of type as embedded type.
-	lno := lineno
-
-	if embedlineno != 0 {
-		lineno = embedlineno
-		if t.IsPtr() || t.IsUnsafePtr() {
-			yyerror("embedded type cannot be a pointer")
-		}
-	}
-
-	lineno = lno
-}
-
-func typecheckdeftype(n *Node) {
-	ntypecheckdeftype++
-	lno := lineno
-	setlineno(n)
-	n.Type.Sym = n.Sym
-	n.Typecheck = 1
-	n.Name.Param.Ntype = typecheck(n.Name.Param.Ntype, Etype)
-	t := n.Name.Param.Ntype.Type
-	if t == nil {
-		n.Diag = true
-		n.Type = nil
-		goto ret
-	}
-
-	if n.Type == nil {
-		n.Diag = true
-		goto ret
-	}
-
-	// copy new type and clear fields
-	// that don't come along.
-	// anything zeroed here must be zeroed in
-	// typedcl2 too.
-	copytype(n, t)
-
-ret:
-	lineno = lno
-
-	// if there are no type definitions going on, it's safe to
-	// try to resolve the method types for the interfaces
-	// we just read.
-	if ntypecheckdeftype == 1 {
-		for {
-			s := methodqueue
-			if len(s) == 0 {
-				break
-			}
-			methodqueue = nil
-			for _, n := range s {
-				domethod(n)
-			}
-		}
-		for _, e := range mapqueue {
-			lineno = e.lno
-			if !e.n.Type.IsComparable() {
-				yyerror("invalid map key type %v", e.n.Type)
-			}
-		}
-		mapqueue = nil
-		lineno = lno
-	}
-
-	ntypecheckdeftype--
-}
-
-func queuemethod(n *Node) {
-	if ntypecheckdeftype == 0 {
-		domethod(n)
-		return
-	}
-
-	methodqueue = append(methodqueue, n)
-}
-
-func typecheckdef(n *Node) *Node {
-	lno := lineno
-	setlineno(n)
-
-	if n.Op == ONONAME {
-		if !n.Diag {
-			n.Diag = true
-			if n.Lineno != 0 {
-				lineno = n.Lineno
-			}
-
-			// Note: adderrorname looks for this string and
-			// adds context about the outer expression
-			yyerror("undefined: %v", n.Sym)
-		}
-
-		return n
-	}
-
-	if n.Walkdef == 1 {
-		return n
-	}
-
-	typecheckdefstack = append(typecheckdefstack, n)
-	if n.Walkdef == 2 {
-		flusherrors()
-		fmt.Printf("typecheckdef loop:")
-		for i := len(typecheckdefstack) - 1; i >= 0; i-- {
-			n := typecheckdefstack[i]
-			fmt.Printf(" %v", n.Sym)
-		}
-		fmt.Printf("\n")
-		Fatalf("typecheckdef loop")
-	}
-
-	n.Walkdef = 2
-
-	if n.Type != nil || n.Sym == nil { // builtin or no name
-		goto ret
-	}
-
-	switch n.Op {
-	default:
-		Fatalf("typecheckdef %v", n.Op)
-
-	case OGOTO, OLABEL, OPACK:
-		// nothing to do here
-
-	case OLITERAL:
-		if n.Name.Param.Ntype != nil {
-			n.Name.Param.Ntype = typecheck(n.Name.Param.Ntype, Etype)
-			n.Type = n.Name.Param.Ntype.Type
-			n.Name.Param.Ntype = nil
-			if n.Type == nil {
-				n.Diag = true
-				goto ret
-			}
-		}
-
-		e := n.Name.Defn
-		n.Name.Defn = nil
-		if e == nil {
-			lineno = n.Lineno
-			Dump("typecheckdef nil defn", n)
-			yyerror("xxx")
-		}
-
-		e = typecheck(e, Erv)
-		if Isconst(e, CTNIL) {
-			yyerror("const initializer cannot be nil")
-			goto ret
-		}
-
-		if e.Type != nil && e.Op != OLITERAL || !isgoconst(e) {
-			if !e.Diag {
-				yyerror("const initializer %v is not a constant", e)
-				e.Diag = true
-			}
-
-			goto ret
-		}
-
-		t := n.Type
-		if t != nil {
-			if !okforconst[t.Etype] {
-				yyerror("invalid constant type %v", t)
-				goto ret
-			}
-
-			if !e.Type.IsUntyped() && !eqtype(t, e.Type) {
-				yyerror("cannot use %L as type %v in const initializer", e, t)
-				goto ret
-			}
-
-			e = convlit(e, t)
-		}
-
-		n.SetVal(e.Val())
-		n.Type = e.Type
-
-	case ONAME:
-		if n.Name.Param.Ntype != nil {
-			n.Name.Param.Ntype = typecheck(n.Name.Param.Ntype, Etype)
-			n.Type = n.Name.Param.Ntype.Type
-			if n.Type == nil {
-				n.Diag = true
-				goto ret
-			}
-		}
-
-		if n.Type != nil {
-			break
-		}
-		if n.Name.Defn == nil {
-			if n.Etype != 0 { // like OPRINTN
-				break
-			}
-			if nsavederrors+nerrors > 0 {
-				// Can have undefined variables in x := foo
-				// that make x have an n->ndefn == nil.
-				// If there are other errors anyway, don't
-				// bother adding to the noise.
-				break
-			}
-
-			Fatalf("var without type, init: %v", n.Sym)
-		}
-
-		if n.Name.Defn.Op == ONAME {
-			n.Name.Defn = typecheck(n.Name.Defn, Erv)
-			n.Type = n.Name.Defn.Type
-			break
-		}
-
-		n.Name.Defn = typecheck(n.Name.Defn, Etop) // fills in n->type
-
-	case OTYPE:
-		if Curfn != nil {
-			defercheckwidth()
-		}
-		n.Walkdef = 1
-		n.Type = typ(TFORW)
-		n.Type.Sym = n.Sym
-		nerrors0 := nerrors
-		typecheckdeftype(n)
-		if n.Type.Etype == TFORW && nerrors > nerrors0 {
-			// Something went wrong during type-checking,
-			// but it was reported. Silence future errors.
-			n.Type.Broke = true
-		}
-
-		if Curfn != nil {
-			resumecheckwidth()
-		}
-	}
-
-ret:
-	if n.Op != OLITERAL && n.Type != nil && n.Type.IsUntyped() {
-		Fatalf("got %v for %v", n.Type, n)
-	}
-	last := len(typecheckdefstack) - 1
-	if typecheckdefstack[last] != n {
-		Fatalf("typecheckdefstack mismatch")
-	}
-	typecheckdefstack[last] = nil
-	typecheckdefstack = typecheckdefstack[:last]
-
-	lineno = lno
-	n.Walkdef = 1
-	return n
-}
-
-func checkmake(t *Type, arg string, n *Node) bool {
-	if !n.Type.IsInteger() && n.Type.Etype != TIDEAL {
-		yyerror("non-integer %s argument in make(%v) - %v", arg, t, n.Type)
-		return false
-	}
-
-	// Do range checks for constants before defaultlit
-	// to avoid redundant "constant NNN overflows int" errors.
-	switch consttype(n) {
-	case CTINT, CTRUNE, CTFLT, CTCPLX:
-		n.SetVal(toint(n.Val()))
-		if n.Val().U.(*Mpint).CmpInt64(0) < 0 {
-			yyerror("negative %s argument in make(%v)", arg, t)
-			return false
-		}
-		if n.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 {
-			yyerror("%s argument too large in make(%v)", arg, t)
-			return false
-		}
-	}
-
-	// defaultlit is necessary for non-constants too: n might be 1.1<<k.
-	n = defaultlit(n, Types[TINT])
-
-	return true
-}
-
-func markbreak(n *Node, implicit *Node) {
-	if n == nil {
-		return
-	}
-
-	switch n.Op {
-	case OBREAK:
-		if n.Left == nil {
-			if implicit != nil {
-				implicit.SetHasBreak(true)
-			}
-		} else {
-			lab := n.Left.Sym.Label
-			if lab != nil {
-				lab.SetHasBreak(true)
-			}
-		}
-
-	case OFOR,
-		OSWITCH,
-		OTYPESW,
-		OSELECT,
-		ORANGE:
-		implicit = n
-		fallthrough
-	default:
-		markbreak(n.Left, implicit)
-		markbreak(n.Right, implicit)
-		markbreaklist(n.Ninit, implicit)
-		markbreaklist(n.Nbody, implicit)
-		markbreaklist(n.List, implicit)
-		markbreaklist(n.Rlist, implicit)
-	}
-}
-
-func markbreaklist(l Nodes, implicit *Node) {
-	s := l.Slice()
-	for i := 0; i < len(s); i++ {
-		n := s[i]
-		if n == nil {
-			continue
-		}
-		if n.Op == OLABEL && i+1 < len(s) && n.Name.Defn == s[i+1] {
-			switch n.Name.Defn.Op {
-			case OFOR, OSWITCH, OTYPESW, OSELECT, ORANGE:
-				n.Left.Sym.Label = n.Name.Defn
-				markbreak(n.Name.Defn, n.Name.Defn)
-				n.Left.Sym.Label = nil
-				i++
-				continue
-			}
-		}
-
-		markbreak(n, implicit)
-	}
-}
-
-// Isterminating whether the Nodes list ends with a terminating
-// statement.
-func (l Nodes) isterminating() bool {
-	s := l.Slice()
-	c := len(s)
-	if c == 0 {
-		return false
-	}
-	return s[c-1].isterminating()
-}
-
-// Isterminating returns whether the node n, the last one in a
-// statement list, is a terminating statement.
-func (n *Node) isterminating() bool {
-	switch n.Op {
-	// NOTE: OLABEL is treated as a separate statement,
-	// not a separate prefix, so skipping to the last statement
-	// in the block handles the labeled statement case by
-	// skipping over the label. No case OLABEL here.
-
-	case OBLOCK:
-		return n.List.isterminating()
-
-	case OGOTO,
-		ORETURN,
-		ORETJMP,
-		OPANIC,
-		OXFALL:
-		return true
-
-	case OFOR:
-		if n.Left != nil {
-			return false
-		}
-		if n.HasBreak() {
-			return false
-		}
-		return true
-
-	case OIF:
-		return n.Nbody.isterminating() && n.Rlist.isterminating()
-
-	case OSWITCH, OTYPESW, OSELECT:
-		if n.HasBreak() {
-			return false
-		}
-		def := 0
-		for _, n1 := range n.List.Slice() {
-			if !n1.Nbody.isterminating() {
-				return false
-			}
-			if n1.List.Len() == 0 { // default
-				def = 1
-			}
-		}
-
-		if n.Op != OSELECT && def == 0 {
-			return false
-		}
-		return true
-	}
-
-	return false
-}
-
-func checkreturn(fn *Node) {
-	if fn.Type.Results().NumFields() != 0 && fn.Nbody.Len() != 0 {
-		markbreaklist(fn.Nbody, nil)
-		if !fn.Nbody.isterminating() {
-			yyerrorl(fn.Func.Endlineno, "missing return at end of function")
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/universe.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/universe.go
deleted file mode 100644
index 74e3689..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/universe.go
+++ /dev/null
@@ -1,468 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/universe.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/universe.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-// builtinpkg is a fake package that declares the universe block.
-var builtinpkg *Pkg
-
-var itable *Type // distinguished *byte
-
-var basicTypes = [...]struct {
-	name  string
-	etype EType
-}{
-	{"int8", TINT8},
-	{"int16", TINT16},
-	{"int32", TINT32},
-	{"int64", TINT64},
-	{"uint8", TUINT8},
-	{"uint16", TUINT16},
-	{"uint32", TUINT32},
-	{"uint64", TUINT64},
-	{"float32", TFLOAT32},
-	{"float64", TFLOAT64},
-	{"complex64", TCOMPLEX64},
-	{"complex128", TCOMPLEX128},
-	{"bool", TBOOL},
-	{"string", TSTRING},
-}
-
-var typedefs = [...]struct {
-	name     string
-	etype    EType
-	width    *int
-	sameas32 EType
-	sameas64 EType
-}{
-	{"int", TINT, &Widthint, TINT32, TINT64},
-	{"uint", TUINT, &Widthint, TUINT32, TUINT64},
-	{"uintptr", TUINTPTR, &Widthptr, TUINT32, TUINT64},
-}
-
-var builtinFuncs = [...]struct {
-	name string
-	op   Op
-}{
-	{"append", OAPPEND},
-	{"cap", OCAP},
-	{"close", OCLOSE},
-	{"complex", OCOMPLEX},
-	{"copy", OCOPY},
-	{"delete", ODELETE},
-	{"imag", OIMAG},
-	{"len", OLEN},
-	{"make", OMAKE},
-	{"new", ONEW},
-	{"panic", OPANIC},
-	{"print", OPRINT},
-	{"println", OPRINTN},
-	{"real", OREAL},
-	{"recover", ORECOVER},
-}
-
-var unsafeFuncs = [...]struct {
-	name string
-	op   Op
-}{
-	{"Alignof", OALIGNOF},
-	{"Offsetof", OOFFSETOF},
-	{"Sizeof", OSIZEOF},
-}
-
-// initUniverse initializes the universe block.
-func initUniverse() {
-	lexinit()
-	typeinit()
-	lexinit1()
-}
-
-// lexinit initializes known symbols and the basic types.
-func lexinit() {
-	for _, s := range basicTypes {
-		etype := s.etype
-		if int(etype) >= len(Types) {
-			Fatalf("lexinit: %s bad etype", s.name)
-		}
-		s2 := Pkglookup(s.name, builtinpkg)
-		t := Types[etype]
-		if t == nil {
-			t = typ(etype)
-			t.Sym = s2
-			if etype != TANY && etype != TSTRING {
-				dowidth(t)
-			}
-			Types[etype] = t
-		}
-		s2.Def = typenod(t)
-		s2.Def.Name = new(Name)
-	}
-
-	for _, s := range builtinFuncs {
-		// TODO(marvin): Fix Node.EType type union.
-		s2 := Pkglookup(s.name, builtinpkg)
-		s2.Def = nod(ONAME, nil, nil)
-		s2.Def.Sym = s2
-		s2.Def.Etype = EType(s.op)
-	}
-
-	for _, s := range unsafeFuncs {
-		s2 := Pkglookup(s.name, unsafepkg)
-		s2.Def = nod(ONAME, nil, nil)
-		s2.Def.Sym = s2
-		s2.Def.Etype = EType(s.op)
-	}
-
-	idealstring = typ(TSTRING)
-	idealbool = typ(TBOOL)
-	Types[TANY] = typ(TANY)
-
-	s := Pkglookup("true", builtinpkg)
-	s.Def = nodbool(true)
-	s.Def.Sym = lookup("true")
-	s.Def.Name = new(Name)
-	s.Def.Type = idealbool
-
-	s = Pkglookup("false", builtinpkg)
-	s.Def = nodbool(false)
-	s.Def.Sym = lookup("false")
-	s.Def.Name = new(Name)
-	s.Def.Type = idealbool
-
-	s = lookup("_")
-	s.Block = -100
-	s.Def = nod(ONAME, nil, nil)
-	s.Def.Sym = s
-	Types[TBLANK] = typ(TBLANK)
-	s.Def.Type = Types[TBLANK]
-	nblank = s.Def
-
-	s = Pkglookup("_", builtinpkg)
-	s.Block = -100
-	s.Def = nod(ONAME, nil, nil)
-	s.Def.Sym = s
-	Types[TBLANK] = typ(TBLANK)
-	s.Def.Type = Types[TBLANK]
-
-	Types[TNIL] = typ(TNIL)
-	s = Pkglookup("nil", builtinpkg)
-	var v Val
-	v.U = new(NilVal)
-	s.Def = nodlit(v)
-	s.Def.Sym = s
-	s.Def.Name = new(Name)
-
-	s = Pkglookup("iota", builtinpkg)
-	s.Def = nod(OIOTA, nil, nil)
-	s.Def.Sym = s
-	s.Def.Name = new(Name)
-}
-
-func typeinit() {
-	if Widthptr == 0 {
-		Fatalf("typeinit before betypeinit")
-	}
-
-	for et := EType(0); et < NTYPE; et++ {
-		simtype[et] = et
-	}
-
-	Types[TPTR32] = typ(TPTR32)
-	dowidth(Types[TPTR32])
-
-	Types[TPTR64] = typ(TPTR64)
-	dowidth(Types[TPTR64])
-
-	t := typ(TUNSAFEPTR)
-	Types[TUNSAFEPTR] = t
-	t.Sym = Pkglookup("Pointer", unsafepkg)
-	t.Sym.Def = typenod(t)
-	t.Sym.Def.Name = new(Name)
-	dowidth(Types[TUNSAFEPTR])
-
-	Tptr = TPTR32
-	if Widthptr == 8 {
-		Tptr = TPTR64
-	}
-
-	for et := TINT8; et <= TUINT64; et++ {
-		isInt[et] = true
-	}
-	isInt[TINT] = true
-	isInt[TUINT] = true
-	isInt[TUINTPTR] = true
-
-	isFloat[TFLOAT32] = true
-	isFloat[TFLOAT64] = true
-
-	isComplex[TCOMPLEX64] = true
-	isComplex[TCOMPLEX128] = true
-
-	isforw[TFORW] = true
-
-	// initialize okfor
-	for et := EType(0); et < NTYPE; et++ {
-		if isInt[et] || et == TIDEAL {
-			okforeq[et] = true
-			okforcmp[et] = true
-			okforarith[et] = true
-			okforadd[et] = true
-			okforand[et] = true
-			okforconst[et] = true
-			issimple[et] = true
-			minintval[et] = new(Mpint)
-			maxintval[et] = new(Mpint)
-		}
-
-		if isFloat[et] {
-			okforeq[et] = true
-			okforcmp[et] = true
-			okforadd[et] = true
-			okforarith[et] = true
-			okforconst[et] = true
-			issimple[et] = true
-			minfltval[et] = newMpflt()
-			maxfltval[et] = newMpflt()
-		}
-
-		if isComplex[et] {
-			okforeq[et] = true
-			okforadd[et] = true
-			okforarith[et] = true
-			okforconst[et] = true
-			issimple[et] = true
-		}
-	}
-
-	issimple[TBOOL] = true
-
-	okforadd[TSTRING] = true
-
-	okforbool[TBOOL] = true
-
-	okforcap[TARRAY] = true
-	okforcap[TCHAN] = true
-	okforcap[TSLICE] = true
-
-	okforconst[TBOOL] = true
-	okforconst[TSTRING] = true
-
-	okforlen[TARRAY] = true
-	okforlen[TCHAN] = true
-	okforlen[TMAP] = true
-	okforlen[TSLICE] = true
-	okforlen[TSTRING] = true
-
-	okforeq[TPTR32] = true
-	okforeq[TPTR64] = true
-	okforeq[TUNSAFEPTR] = true
-	okforeq[TINTER] = true
-	okforeq[TCHAN] = true
-	okforeq[TSTRING] = true
-	okforeq[TBOOL] = true
-	okforeq[TMAP] = true    // nil only; refined in typecheck
-	okforeq[TFUNC] = true   // nil only; refined in typecheck
-	okforeq[TSLICE] = true  // nil only; refined in typecheck
-	okforeq[TARRAY] = true  // only if element type is comparable; refined in typecheck
-	okforeq[TSTRUCT] = true // only if all struct fields are comparable; refined in typecheck
-
-	okforcmp[TSTRING] = true
-
-	var i int
-	for i = 0; i < len(okfor); i++ {
-		okfor[i] = okfornone[:]
-	}
-
-	// binary
-	okfor[OADD] = okforadd[:]
-
-	okfor[OAND] = okforand[:]
-	okfor[OANDAND] = okforbool[:]
-	okfor[OANDNOT] = okforand[:]
-	okfor[ODIV] = okforarith[:]
-	okfor[OEQ] = okforeq[:]
-	okfor[OGE] = okforcmp[:]
-	okfor[OGT] = okforcmp[:]
-	okfor[OLE] = okforcmp[:]
-	okfor[OLT] = okforcmp[:]
-	okfor[OMOD] = okforand[:]
-	okfor[OHMUL] = okforarith[:]
-	okfor[OMUL] = okforarith[:]
-	okfor[ONE] = okforeq[:]
-	okfor[OOR] = okforand[:]
-	okfor[OOROR] = okforbool[:]
-	okfor[OSUB] = okforarith[:]
-	okfor[OXOR] = okforand[:]
-	okfor[OLSH] = okforand[:]
-	okfor[ORSH] = okforand[:]
-
-	// unary
-	okfor[OCOM] = okforand[:]
-
-	okfor[OMINUS] = okforarith[:]
-	okfor[ONOT] = okforbool[:]
-	okfor[OPLUS] = okforarith[:]
-
-	// special
-	okfor[OCAP] = okforcap[:]
-
-	okfor[OLEN] = okforlen[:]
-
-	// comparison
-	iscmp[OLT] = true
-
-	iscmp[OGT] = true
-	iscmp[OGE] = true
-	iscmp[OLE] = true
-	iscmp[OEQ] = true
-	iscmp[ONE] = true
-
-	maxintval[TINT8].SetString("0x7f")
-	minintval[TINT8].SetString("-0x80")
-	maxintval[TINT16].SetString("0x7fff")
-	minintval[TINT16].SetString("-0x8000")
-	maxintval[TINT32].SetString("0x7fffffff")
-	minintval[TINT32].SetString("-0x80000000")
-	maxintval[TINT64].SetString("0x7fffffffffffffff")
-	minintval[TINT64].SetString("-0x8000000000000000")
-
-	maxintval[TUINT8].SetString("0xff")
-	maxintval[TUINT16].SetString("0xffff")
-	maxintval[TUINT32].SetString("0xffffffff")
-	maxintval[TUINT64].SetString("0xffffffffffffffff")
-
-	// f is valid float if min < f < max.  (min and max are not themselves valid.)
-	maxfltval[TFLOAT32].SetString("33554431p103") // 2^24-1 p (127-23) + 1/2 ulp
-	minfltval[TFLOAT32].SetString("-33554431p103")
-	maxfltval[TFLOAT64].SetString("18014398509481983p970") // 2^53-1 p (1023-52) + 1/2 ulp
-	minfltval[TFLOAT64].SetString("-18014398509481983p970")
-
-	maxfltval[TCOMPLEX64] = maxfltval[TFLOAT32]
-	minfltval[TCOMPLEX64] = minfltval[TFLOAT32]
-	maxfltval[TCOMPLEX128] = maxfltval[TFLOAT64]
-	minfltval[TCOMPLEX128] = minfltval[TFLOAT64]
-
-	// for walk to use in error messages
-	Types[TFUNC] = functype(nil, nil, nil)
-
-	// types used in front end
-	// types[TNIL] got set early in lexinit
-	Types[TIDEAL] = typ(TIDEAL)
-
-	Types[TINTER] = typ(TINTER)
-
-	// simple aliases
-	simtype[TMAP] = Tptr
-
-	simtype[TCHAN] = Tptr
-	simtype[TFUNC] = Tptr
-	simtype[TUNSAFEPTR] = Tptr
-
-	array_array = int(Rnd(0, int64(Widthptr)))
-	array_nel = int(Rnd(int64(array_array)+int64(Widthptr), int64(Widthint)))
-	array_cap = int(Rnd(int64(array_nel)+int64(Widthint), int64(Widthint)))
-	sizeof_Array = int(Rnd(int64(array_cap)+int64(Widthint), int64(Widthptr)))
-
-	// string is same as slice wo the cap
-	sizeof_String = int(Rnd(int64(array_nel)+int64(Widthint), int64(Widthptr)))
-
-	dowidth(Types[TSTRING])
-	dowidth(idealstring)
-
-	itable = typPtr(Types[TUINT8])
-}
-
-func makeErrorInterface() *Type {
-	field := newField()
-	field.Type = Types[TSTRING]
-	f := functypefield(fakethisfield(), nil, []*Field{field})
-
-	field = newField()
-	field.Sym = lookup("Error")
-	field.Type = f
-
-	t := typ(TINTER)
-	t.SetFields([]*Field{field})
-	return t
-}
-
-func lexinit1() {
-	// error type
-	s := Pkglookup("error", builtinpkg)
-	errortype = makeErrorInterface()
-	errortype.Sym = s
-	// TODO: If we can prove that it's safe to set errortype.Orig here
-	// than we don't need the special errortype/errorInterface case in
-	// bexport.go. See also issue #15920.
-	// errortype.Orig = makeErrorInterface()
-	s.Def = typenod(errortype)
-
-	// byte alias
-	s = Pkglookup("byte", builtinpkg)
-	bytetype = typ(TUINT8)
-	bytetype.Sym = s
-	s.Def = typenod(bytetype)
-	s.Def.Name = new(Name)
-
-	// rune alias
-	s = Pkglookup("rune", builtinpkg)
-	runetype = typ(TINT32)
-	runetype.Sym = s
-	s.Def = typenod(runetype)
-	s.Def.Name = new(Name)
-
-	// backend-dependent builtin types (e.g. int).
-	for _, s := range typedefs {
-		s1 := Pkglookup(s.name, builtinpkg)
-
-		sameas := s.sameas32
-		if *s.width == 8 {
-			sameas = s.sameas64
-		}
-
-		simtype[s.etype] = sameas
-		minfltval[s.etype] = minfltval[sameas]
-		maxfltval[s.etype] = maxfltval[sameas]
-		minintval[s.etype] = minintval[sameas]
-		maxintval[s.etype] = maxintval[sameas]
-
-		t := typ(s.etype)
-		t.Sym = s1
-		Types[s.etype] = t
-		s1.Def = typenod(t)
-		s1.Def.Name = new(Name)
-		s1.Origpkg = builtinpkg
-
-		dowidth(t)
-	}
-}
-
-// finishUniverse makes the universe block visible within the current package.
-func finishUniverse() {
-	// Operationally, this is similar to a dot import of builtinpkg, except
-	// that we silently skip symbols that are already declared in the
-	// package block rather than emitting a redeclared symbol error.
-
-	for _, s := range builtinpkg.Syms {
-		if s.Def == nil {
-			continue
-		}
-		s1 := lookup(s.Name)
-		if s1.Def != nil {
-			continue
-		}
-
-		s1.Def = s.Def
-		s1.Block = s.Block
-	}
-
-	nodfp = nod(ONAME, nil, nil)
-	nodfp.Type = Types[TINT32]
-	nodfp.Xoffset = 0
-	nodfp.Class = PPARAM
-	nodfp.Sym = lookup(".fp")
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/unsafe.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/unsafe.go
deleted file mode 100644
index 1b0e3e2..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/unsafe.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/unsafe.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/unsafe.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-// evalunsafe evaluates a package unsafe operation and returns the result.
-func evalunsafe(n *Node) int64 {
-	switch n.Op {
-	case OALIGNOF, OSIZEOF:
-		n.Left = typecheck(n.Left, Erv)
-		n.Left = defaultlit(n.Left, nil)
-		tr := n.Left.Type
-		if tr == nil {
-			yyerror("invalid expression %v", n)
-			return 0
-		}
-		dowidth(tr)
-		if n.Op == OALIGNOF {
-			return int64(tr.Align)
-		}
-		return tr.Width
-
-	case OOFFSETOF:
-		// must be a selector.
-		if n.Left.Op != OXDOT {
-			yyerror("invalid expression %v", n)
-			return 0
-		}
-
-		// Remember base of selector to find it back after dot insertion.
-		// Since r->left may be mutated by typechecking, check it explicitly
-		// first to track it correctly.
-		n.Left.Left = typecheck(n.Left.Left, Erv)
-		base := n.Left.Left
-
-		n.Left = typecheck(n.Left, Erv)
-		switch n.Left.Op {
-		case ODOT, ODOTPTR:
-			break
-		case OCALLPART:
-			yyerror("invalid expression %v: argument is a method value", n)
-			return 0
-		default:
-			yyerror("invalid expression %v", n)
-			return 0
-		}
-
-		// Sum offsets for dots until we reach base.
-		var v int64
-		for r := n.Left; r != base; r = r.Left {
-			switch r.Op {
-			case ODOTPTR:
-				// For Offsetof(s.f), s may itself be a pointer,
-				// but accessing f must not otherwise involve
-				// indirection via embedded pointer types.
-				if r.Left != base {
-					yyerror("invalid expression %v: selector implies indirection of embedded %v", n, r.Left)
-					return 0
-				}
-				fallthrough
-			case ODOT:
-				v += r.Xoffset
-			default:
-				Dump("unsafenmagic", n.Left)
-				Fatalf("impossible %#v node after dot insertion", r.Op)
-			}
-		}
-		return v
-	}
-
-	Fatalf("unexpected op %v", n.Op)
-	return 0
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/util.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/util.go
deleted file mode 100644
index a4fb26d..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/util.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/util.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/util.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"os"
-	"runtime"
-	"runtime/pprof"
-)
-
-func (n *Node) Line() string {
-	return Ctxt.LineHist.LineString(int(n.Lineno))
-}
-
-var atExitFuncs []func()
-
-func atExit(f func()) {
-	atExitFuncs = append(atExitFuncs, f)
-}
-
-func Exit(code int) {
-	for i := len(atExitFuncs) - 1; i >= 0; i-- {
-		f := atExitFuncs[i]
-		atExitFuncs = atExitFuncs[:i]
-		f()
-	}
-	os.Exit(code)
-}
-
-var (
-	cpuprofile     string
-	memprofile     string
-	memprofilerate int64
-	traceprofile   string
-	traceHandler   func(string)
-)
-
-func startProfile() {
-	if cpuprofile != "" {
-		f, err := os.Create(cpuprofile)
-		if err != nil {
-			Fatalf("%v", err)
-		}
-		if err := pprof.StartCPUProfile(f); err != nil {
-			Fatalf("%v", err)
-		}
-		atExit(pprof.StopCPUProfile)
-	}
-	if memprofile != "" {
-		if memprofilerate != 0 {
-			runtime.MemProfileRate = int(memprofilerate)
-		}
-		f, err := os.Create(memprofile)
-		if err != nil {
-			Fatalf("%v", err)
-		}
-		atExit(func() {
-			runtime.GC() // profile all outstanding allocations
-			if err := pprof.WriteHeapProfile(f); err != nil {
-				Fatalf("%v", err)
-			}
-		})
-	}
-	if traceprofile != "" && traceHandler != nil {
-		traceHandler(traceprofile)
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/walk.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/walk.go
deleted file mode 100644
index 4c0b05e..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/gc/walk.go
+++ /dev/null
@@ -1,4080 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/walk.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/gc/walk.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gc
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"fmt"
-	"strings"
-)
-
-// The constant is known to runtime.
-const (
-	tmpstringbufsize = 32
-)
-
-func walk(fn *Node) {
-	Curfn = fn
-
-	if Debug['W'] != 0 {
-		s := fmt.Sprintf("\nbefore %v", Curfn.Func.Nname.Sym)
-		dumplist(s, Curfn.Nbody)
-	}
-
-	lno := lineno
-
-	// Final typecheck for any unused variables.
-	for i, ln := range fn.Func.Dcl {
-		if ln.Op == ONAME && (ln.Class == PAUTO || ln.Class == PAUTOHEAP) {
-			ln = typecheck(ln, Erv|Easgn)
-			fn.Func.Dcl[i] = ln
-		}
-	}
-
-	// Propagate the used flag for typeswitch variables up to the NONAME in it's definition.
-	for _, ln := range fn.Func.Dcl {
-		if ln.Op == ONAME && (ln.Class == PAUTO || ln.Class == PAUTOHEAP) && ln.Name.Defn != nil && ln.Name.Defn.Op == OTYPESW && ln.Used {
-			ln.Name.Defn.Left.Used = true
-		}
-	}
-
-	for _, ln := range fn.Func.Dcl {
-		if ln.Op != ONAME || (ln.Class != PAUTO && ln.Class != PAUTOHEAP) || ln.Sym.Name[0] == '&' || ln.Used {
-			continue
-		}
-		if defn := ln.Name.Defn; defn != nil && defn.Op == OTYPESW {
-			if defn.Left.Used {
-				continue
-			}
-			lineno = defn.Left.Lineno
-			yyerror("%v declared and not used", ln.Sym)
-			defn.Left.Used = true // suppress repeats
-		} else {
-			lineno = ln.Lineno
-			yyerror("%v declared and not used", ln.Sym)
-		}
-	}
-
-	lineno = lno
-	if nerrors != 0 {
-		return
-	}
-	walkstmtlist(Curfn.Nbody.Slice())
-	if Debug['W'] != 0 {
-		s := fmt.Sprintf("after walk %v", Curfn.Func.Nname.Sym)
-		dumplist(s, Curfn.Nbody)
-	}
-
-	heapmoves()
-	if Debug['W'] != 0 && Curfn.Func.Enter.Len() > 0 {
-		s := fmt.Sprintf("enter %v", Curfn.Func.Nname.Sym)
-		dumplist(s, Curfn.Func.Enter)
-	}
-}
-
-func walkstmtlist(s []*Node) {
-	for i := range s {
-		s[i] = walkstmt(s[i])
-	}
-}
-
-func samelist(a, b []*Node) bool {
-	if len(a) != len(b) {
-		return false
-	}
-	for i, n := range a {
-		if n != b[i] {
-			return false
-		}
-	}
-	return true
-}
-
-func paramoutheap(fn *Node) bool {
-	for _, ln := range fn.Func.Dcl {
-		switch ln.Class {
-		case PPARAMOUT:
-			if ln.isParamStackCopy() || ln.Addrtaken {
-				return true
-			}
-
-		case PAUTO:
-			// stop early - parameters are over
-			return false
-		}
-	}
-
-	return false
-}
-
-// adds "adjust" to all the argument locations for the call n.
-// n must be a defer or go node that has already been walked.
-func adjustargs(n *Node, adjust int) {
-	var arg *Node
-	var lhs *Node
-
-	callfunc := n.Left
-	for _, arg = range callfunc.List.Slice() {
-		if arg.Op != OAS {
-			yyerror("call arg not assignment")
-		}
-		lhs = arg.Left
-		if lhs.Op == ONAME {
-			// This is a temporary introduced by reorder1.
-			// The real store to the stack appears later in the arg list.
-			continue
-		}
-
-		if lhs.Op != OINDREGSP {
-			yyerror("call argument store does not use OINDREGSP")
-		}
-
-		// can't really check this in machine-indep code.
-		//if(lhs->val.u.reg != D_SP)
-		//      yyerror("call arg assign not indreg(SP)");
-		lhs.Xoffset += int64(adjust)
-	}
-}
-
-// The result of walkstmt MUST be assigned back to n, e.g.
-// 	n.Left = walkstmt(n.Left)
-func walkstmt(n *Node) *Node {
-	if n == nil {
-		return n
-	}
-	if n.IsStatic { // don't walk, generated by anylit.
-		return n
-	}
-
-	setlineno(n)
-
-	walkstmtlist(n.Ninit.Slice())
-
-	switch n.Op {
-	default:
-		if n.Op == ONAME {
-			yyerror("%v is not a top level statement", n.Sym)
-		} else {
-			yyerror("%v is not a top level statement", n.Op)
-		}
-		Dump("nottop", n)
-
-	case OAS,
-		OASOP,
-		OAS2,
-		OAS2DOTTYPE,
-		OAS2RECV,
-		OAS2FUNC,
-		OAS2MAPR,
-		OCLOSE,
-		OCOPY,
-		OCALLMETH,
-		OCALLINTER,
-		OCALL,
-		OCALLFUNC,
-		ODELETE,
-		OSEND,
-		OPRINT,
-		OPRINTN,
-		OPANIC,
-		OEMPTY,
-		ORECOVER,
-		OGETG:
-		if n.Typecheck == 0 {
-			Fatalf("missing typecheck: %+v", n)
-		}
-		wascopy := n.Op == OCOPY
-		init := n.Ninit
-		n.Ninit.Set(nil)
-		n = walkexpr(n, &init)
-		n = addinit(n, init.Slice())
-		if wascopy && n.Op == OCONVNOP {
-			n.Op = OEMPTY // don't leave plain values as statements.
-		}
-
-	// special case for a receive where we throw away
-	// the value received.
-	case ORECV:
-		if n.Typecheck == 0 {
-			Fatalf("missing typecheck: %+v", n)
-		}
-		init := n.Ninit
-		n.Ninit.Set(nil)
-
-		n.Left = walkexpr(n.Left, &init)
-		n = mkcall1(chanfn("chanrecv1", 2, n.Left.Type), nil, &init, typename(n.Left.Type), n.Left, nodnil())
-		n = walkexpr(n, &init)
-
-		n = addinit(n, init.Slice())
-
-	case OBREAK,
-		OCONTINUE,
-		OFALL,
-		OGOTO,
-		OLABEL,
-		ODCLCONST,
-		ODCLTYPE,
-		OCHECKNIL,
-		OVARKILL,
-		OVARLIVE:
-		break
-
-	case ODCL:
-		v := n.Left
-		if v.Class == PAUTOHEAP {
-			if compiling_runtime {
-				yyerror("%v escapes to heap, not allowed in runtime.", v)
-			}
-			if prealloc[v] == nil {
-				prealloc[v] = callnew(v.Type)
-			}
-			nn := nod(OAS, v.Name.Heapaddr, prealloc[v])
-			nn.Colas = true
-			nn = typecheck(nn, Etop)
-			return walkstmt(nn)
-		}
-
-	case OBLOCK:
-		walkstmtlist(n.List.Slice())
-
-	case OXCASE:
-		yyerror("case statement out of place")
-		n.Op = OCASE
-		fallthrough
-
-	case OCASE:
-		n.Right = walkstmt(n.Right)
-
-	case ODEFER:
-		hasdefer = true
-		switch n.Left.Op {
-		case OPRINT, OPRINTN:
-			n.Left = walkprintfunc(n.Left, &n.Ninit)
-
-		case OCOPY:
-			n.Left = copyany(n.Left, &n.Ninit, true)
-
-		default:
-			n.Left = walkexpr(n.Left, &n.Ninit)
-		}
-
-		// make room for size & fn arguments.
-		adjustargs(n, 2*Widthptr)
-
-	case OFOR:
-		if n.Left != nil {
-			walkstmtlist(n.Left.Ninit.Slice())
-			init := n.Left.Ninit
-			n.Left.Ninit.Set(nil)
-			n.Left = walkexpr(n.Left, &init)
-			n.Left = addinit(n.Left, init.Slice())
-		}
-
-		n.Right = walkstmt(n.Right)
-		walkstmtlist(n.Nbody.Slice())
-
-	case OIF:
-		n.Left = walkexpr(n.Left, &n.Ninit)
-		walkstmtlist(n.Nbody.Slice())
-		walkstmtlist(n.Rlist.Slice())
-
-	case OPROC:
-		switch n.Left.Op {
-		case OPRINT, OPRINTN:
-			n.Left = walkprintfunc(n.Left, &n.Ninit)
-
-		case OCOPY:
-			n.Left = copyany(n.Left, &n.Ninit, true)
-
-		default:
-			n.Left = walkexpr(n.Left, &n.Ninit)
-		}
-
-		// make room for size & fn arguments.
-		adjustargs(n, 2*Widthptr)
-
-	case ORETURN:
-		walkexprlist(n.List.Slice(), &n.Ninit)
-		if n.List.Len() == 0 {
-			break
-		}
-		if (Curfn.Type.FuncType().Outnamed && n.List.Len() > 1) || paramoutheap(Curfn) {
-			// assign to the function out parameters,
-			// so that reorder3 can fix up conflicts
-			var rl []*Node
-
-			var cl Class
-			for _, ln := range Curfn.Func.Dcl {
-				cl = ln.Class
-				if cl == PAUTO || cl == PAUTOHEAP {
-					break
-				}
-				if cl == PPARAMOUT {
-					if ln.isParamStackCopy() {
-						ln = walkexpr(typecheck(nod(OIND, ln.Name.Heapaddr, nil), Erv), nil)
-					}
-					rl = append(rl, ln)
-				}
-			}
-
-			if got, want := n.List.Len(), len(rl); got != want {
-				// order should have rewritten multi-value function calls
-				// with explicit OAS2FUNC nodes.
-				Fatalf("expected %v return arguments, have %v", want, got)
-			}
-
-			if samelist(rl, n.List.Slice()) {
-				// special return in disguise
-				n.List.Set(nil)
-
-				break
-			}
-
-			// move function calls out, to make reorder3's job easier.
-			walkexprlistsafe(n.List.Slice(), &n.Ninit)
-
-			ll := ascompatee(n.Op, rl, n.List.Slice(), &n.Ninit)
-			n.List.Set(reorder3(ll))
-			ls := n.List.Slice()
-			for i, n := range ls {
-				ls[i] = applywritebarrier(n)
-			}
-			break
-		}
-
-		ll := ascompatte(n.Op, nil, false, Curfn.Type.Results(), n.List.Slice(), 1, &n.Ninit)
-		n.List.Set(ll)
-
-	case ORETJMP:
-		break
-
-	case OSELECT:
-		walkselect(n)
-
-	case OSWITCH:
-		walkswitch(n)
-
-	case ORANGE:
-		walkrange(n)
-
-	case OXFALL:
-		yyerror("fallthrough statement out of place")
-		n.Op = OFALL
-	}
-
-	if n.Op == ONAME {
-		Fatalf("walkstmt ended up with name: %+v", n)
-	}
-	return n
-}
-
-func isSmallMakeSlice(n *Node) bool {
-	if n.Op != OMAKESLICE {
-		return false
-	}
-	l := n.Left
-	r := n.Right
-	if r == nil {
-		r = l
-	}
-	t := n.Type
-
-	return smallintconst(l) && smallintconst(r) && (t.Elem().Width == 0 || r.Int64() < (1<<16)/t.Elem().Width)
-}
-
-// walk the whole tree of the body of an
-// expression or simple statement.
-// the types expressions are calculated.
-// compile-time constants are evaluated.
-// complex side effects like statements are appended to init
-func walkexprlist(s []*Node, init *Nodes) {
-	for i := range s {
-		s[i] = walkexpr(s[i], init)
-	}
-}
-
-func walkexprlistsafe(s []*Node, init *Nodes) {
-	for i, n := range s {
-		s[i] = safeexpr(n, init)
-		s[i] = walkexpr(s[i], init)
-	}
-}
-
-func walkexprlistcheap(s []*Node, init *Nodes) {
-	for i, n := range s {
-		s[i] = cheapexpr(n, init)
-		s[i] = walkexpr(s[i], init)
-	}
-}
-
-// Build name of function for interface conversion.
-// Not all names are possible
-// (e.g., we'll never generate convE2E or convE2I or convI2E).
-func convFuncName(from, to *Type) string {
-	tkind := to.iet()
-	switch from.iet() {
-	case 'I':
-		switch tkind {
-		case 'I':
-			return "convI2I"
-		}
-	case 'T':
-		switch tkind {
-		case 'E':
-			return "convT2E"
-		case 'I':
-			return "convT2I"
-		}
-	}
-	Fatalf("unknown conv func %c2%c", from.iet(), to.iet())
-	panic("unreachable")
-}
-
-// The result of walkexpr MUST be assigned back to n, e.g.
-// 	n.Left = walkexpr(n.Left, init)
-func walkexpr(n *Node, init *Nodes) *Node {
-	if n == nil {
-		return n
-	}
-
-	if init == &n.Ninit {
-		// not okay to use n->ninit when walking n,
-		// because we might replace n with some other node
-		// and would lose the init list.
-		Fatalf("walkexpr init == &n->ninit")
-	}
-
-	if n.Ninit.Len() != 0 {
-		walkstmtlist(n.Ninit.Slice())
-		init.AppendNodes(&n.Ninit)
-	}
-
-	lno := setlineno(n)
-
-	if Debug['w'] > 1 {
-		Dump("walk-before", n)
-	}
-
-	if n.Typecheck != 1 {
-		Fatalf("missed typecheck: %+v", n)
-	}
-
-	if n.Op == ONAME && n.Class == PAUTOHEAP {
-		nn := nod(OIND, n.Name.Heapaddr, nil)
-		nn = typecheck(nn, Erv)
-		nn = walkexpr(nn, init)
-		nn.Left.NonNil = true
-		return nn
-	}
-
-opswitch:
-	switch n.Op {
-	default:
-		Dump("walk", n)
-		Fatalf("walkexpr: switch 1 unknown op %+S", n)
-
-	case OTYPE,
-		ONONAME,
-		OINDREGSP,
-		OEMPTY,
-		OGETG:
-
-	case ONOT,
-		OMINUS,
-		OPLUS,
-		OCOM,
-		OREAL,
-		OIMAG,
-		ODOTMETH,
-		ODOTINTER:
-		n.Left = walkexpr(n.Left, init)
-
-	case OIND:
-		n.Left = walkexpr(n.Left, init)
-
-	case ODOT:
-		usefield(n)
-		n.Left = walkexpr(n.Left, init)
-
-	case ODOTPTR:
-		usefield(n)
-		if n.Op == ODOTPTR && n.Left.Type.Elem().Width == 0 {
-			// No actual copy will be generated, so emit an explicit nil check.
-			n.Left = cheapexpr(n.Left, init)
-
-			checknil(n.Left, init)
-		}
-
-		n.Left = walkexpr(n.Left, init)
-
-	case OEFACE:
-		n.Left = walkexpr(n.Left, init)
-		n.Right = walkexpr(n.Right, init)
-
-	case OSPTR, OITAB, OIDATA:
-		n.Left = walkexpr(n.Left, init)
-
-	case OLEN, OCAP:
-		n.Left = walkexpr(n.Left, init)
-
-		// replace len(*[10]int) with 10.
-		// delayed until now to preserve side effects.
-		t := n.Left.Type
-
-		if t.IsPtr() {
-			t = t.Elem()
-		}
-		if t.IsArray() {
-			safeexpr(n.Left, init)
-			Nodconst(n, n.Type, t.NumElem())
-			n.Typecheck = 1
-		}
-
-	case OLSH, ORSH:
-		n.Left = walkexpr(n.Left, init)
-		n.Right = walkexpr(n.Right, init)
-		t := n.Left.Type
-		n.Bounded = bounded(n.Right, 8*t.Width)
-		if Debug['m'] != 0 && n.Etype != 0 && !Isconst(n.Right, CTINT) {
-			Warn("shift bounds check elided")
-		}
-
-		// Use results from call expression as arguments for complex.
-	case OAND,
-		OSUB,
-		OHMUL,
-		OLT,
-		OLE,
-		OGE,
-		OGT,
-		OADD,
-		OCOMPLEX,
-		OLROT:
-		if n.Op == OCOMPLEX && n.Left == nil && n.Right == nil {
-			n.Left = n.List.First()
-			n.Right = n.List.Second()
-		}
-
-		n.Left = walkexpr(n.Left, init)
-		n.Right = walkexpr(n.Right, init)
-
-	case OOR, OXOR:
-		n.Left = walkexpr(n.Left, init)
-		n.Right = walkexpr(n.Right, init)
-		n = walkrotate(n)
-
-	case OEQ, ONE:
-		n.Left = walkexpr(n.Left, init)
-		n.Right = walkexpr(n.Right, init)
-
-		// Disable safemode while compiling this code: the code we
-		// generate internally can refer to unsafe.Pointer.
-		// In this case it can happen if we need to generate an ==
-		// for a struct containing a reflect.Value, which itself has
-		// an unexported field of type unsafe.Pointer.
-		old_safemode := safemode
-		safemode = false
-		n = walkcompare(n, init)
-		safemode = old_safemode
-
-	case OANDAND, OOROR:
-		n.Left = walkexpr(n.Left, init)
-
-		// cannot put side effects from n.Right on init,
-		// because they cannot run before n.Left is checked.
-		// save elsewhere and store on the eventual n.Right.
-		var ll Nodes
-
-		n.Right = walkexpr(n.Right, &ll)
-		n.Right = addinit(n.Right, ll.Slice())
-		n = walkinrange(n, init)
-
-	case OPRINT, OPRINTN:
-		walkexprlist(n.List.Slice(), init)
-		n = walkprint(n, init)
-
-	case OPANIC:
-		n = mkcall("gopanic", nil, init, n.Left)
-
-	case ORECOVER:
-		n = mkcall("gorecover", n.Type, init, nod(OADDR, nodfp, nil))
-
-	case OLITERAL:
-		n.Addable = true
-
-	case OCLOSUREVAR, OCFUNC:
-		n.Addable = true
-
-	case ONAME:
-		n.Addable = true
-
-	case OCALLINTER:
-		usemethod(n)
-		t := n.Left.Type
-		if n.List.Len() != 0 && n.List.First().Op == OAS {
-			break
-		}
-		n.Left = walkexpr(n.Left, init)
-		walkexprlist(n.List.Slice(), init)
-		ll := ascompatte(n.Op, n, n.Isddd, t.Params(), n.List.Slice(), 0, init)
-		n.List.Set(reorder1(ll))
-
-	case OCALLFUNC:
-		if n.Left.Op == OCLOSURE {
-			// Transform direct call of a closure to call of a normal function.
-			// transformclosure already did all preparation work.
-
-			// Prepend captured variables to argument list.
-			n.List.Prepend(n.Left.Func.Enter.Slice()...)
-
-			n.Left.Func.Enter.Set(nil)
-
-			// Replace OCLOSURE with ONAME/PFUNC.
-			n.Left = n.Left.Func.Closure.Func.Nname
-
-			// Update type of OCALLFUNC node.
-			// Output arguments had not changed, but their offsets could.
-			if n.Left.Type.Results().NumFields() == 1 {
-				n.Type = n.Left.Type.Results().Field(0).Type
-			} else {
-				n.Type = n.Left.Type.Results()
-			}
-		}
-
-		t := n.Left.Type
-		if n.List.Len() != 0 && n.List.First().Op == OAS {
-			break
-		}
-
-		n.Left = walkexpr(n.Left, init)
-		walkexprlist(n.List.Slice(), init)
-
-		if n.Left.Op == ONAME && n.Left.Sym.Name == "Sqrt" &&
-			(n.Left.Sym.Pkg.Path == "math" || n.Left.Sym.Pkg == localpkg && myimportpath == "math") {
-			if Thearch.LinkArch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X) {
-				n.Op = OSQRT
-				n.Left = n.List.First()
-				n.List.Set(nil)
-				break opswitch
-			}
-		}
-
-		ll := ascompatte(n.Op, n, n.Isddd, t.Params(), n.List.Slice(), 0, init)
-		n.List.Set(reorder1(ll))
-
-	case OCALLMETH:
-		t := n.Left.Type
-		if n.List.Len() != 0 && n.List.First().Op == OAS {
-			break
-		}
-		n.Left = walkexpr(n.Left, init)
-		walkexprlist(n.List.Slice(), init)
-		ll := ascompatte(n.Op, n, false, t.Recvs(), []*Node{n.Left.Left}, 0, init)
-		lr := ascompatte(n.Op, n, n.Isddd, t.Params(), n.List.Slice(), 0, init)
-		ll = append(ll, lr...)
-		n.Left.Left = nil
-		ullmancalc(n.Left)
-		n.List.Set(reorder1(ll))
-
-	case OAS:
-		init.AppendNodes(&n.Ninit)
-
-		n.Left = walkexpr(n.Left, init)
-		n.Left = safeexpr(n.Left, init)
-
-		if oaslit(n, init) {
-			break
-		}
-
-		if n.Right == nil {
-			// TODO(austin): Check all "implicit zeroing"
-			break
-		}
-
-		if !instrumenting && iszero(n.Right) && !needwritebarrier(n.Left, n.Right) {
-			break
-		}
-
-		switch n.Right.Op {
-		default:
-			n.Right = walkexpr(n.Right, init)
-
-		case ORECV:
-			// x = <-c; n.Left is x, n.Right.Left is c.
-			// orderstmt made sure x is addressable.
-			n.Right.Left = walkexpr(n.Right.Left, init)
-
-			n1 := nod(OADDR, n.Left, nil)
-			r := n.Right.Left // the channel
-			n = mkcall1(chanfn("chanrecv1", 2, r.Type), nil, init, typename(r.Type), r, n1)
-			n = walkexpr(n, init)
-			break opswitch
-
-		case OAPPEND:
-			// x = append(...)
-			r := n.Right
-			if r.Type.Elem().NotInHeap {
-				yyerror("%v is go:notinheap; heap allocation disallowed", r.Type.Elem())
-			}
-			if r.Isddd {
-				r = appendslice(r, init) // also works for append(slice, string).
-			} else {
-				r = walkappend(r, init, n)
-			}
-			n.Right = r
-			if r.Op == OAPPEND {
-				// Left in place for back end.
-				// Do not add a new write barrier.
-				break opswitch
-			}
-			// Otherwise, lowered for race detector.
-			// Treat as ordinary assignment.
-		}
-
-		if n.Left != nil && n.Right != nil {
-			static := n.IsStatic
-			n = convas(n, init)
-			n.IsStatic = static
-			n = applywritebarrier(n)
-		}
-
-	case OAS2:
-		init.AppendNodes(&n.Ninit)
-		walkexprlistsafe(n.List.Slice(), init)
-		walkexprlistsafe(n.Rlist.Slice(), init)
-		ll := ascompatee(OAS, n.List.Slice(), n.Rlist.Slice(), init)
-		ll = reorder3(ll)
-		for i, n := range ll {
-			ll[i] = applywritebarrier(n)
-		}
-		n = liststmt(ll)
-
-	// a,b,... = fn()
-	case OAS2FUNC:
-		init.AppendNodes(&n.Ninit)
-
-		r := n.Rlist.First()
-		walkexprlistsafe(n.List.Slice(), init)
-		r = walkexpr(r, init)
-
-		if isIntrinsicCall(r) {
-			n.Rlist.Set1(r)
-			break
-		}
-		init.Append(r)
-
-		ll := ascompatet(n.Op, n.List, r.Type)
-		for i, n := range ll {
-			ll[i] = applywritebarrier(n)
-		}
-		n = liststmt(ll)
-
-	// x, y = <-c
-	// orderstmt made sure x is addressable.
-	case OAS2RECV:
-		init.AppendNodes(&n.Ninit)
-
-		r := n.Rlist.First()
-		walkexprlistsafe(n.List.Slice(), init)
-		r.Left = walkexpr(r.Left, init)
-		var n1 *Node
-		if isblank(n.List.First()) {
-			n1 = nodnil()
-		} else {
-			n1 = nod(OADDR, n.List.First(), nil)
-		}
-		n1.Etype = 1 // addr does not escape
-		fn := chanfn("chanrecv2", 2, r.Left.Type)
-		ok := n.List.Second()
-		call := mkcall1(fn, ok.Type, init, typename(r.Left.Type), r.Left, n1)
-		n = nod(OAS, ok, call)
-		n = typecheck(n, Etop)
-
-		// a,b = m[i];
-	case OAS2MAPR:
-		init.AppendNodes(&n.Ninit)
-
-		r := n.Rlist.First()
-		walkexprlistsafe(n.List.Slice(), init)
-		r.Left = walkexpr(r.Left, init)
-		r.Right = walkexpr(r.Right, init)
-		t := r.Left.Type
-		p := ""
-		if t.Val().Width <= 128 { // Check ../../runtime/hashmap.go:maxValueSize before changing.
-			switch algtype(t.Key()) {
-			case AMEM32:
-				p = "mapaccess2_fast32"
-			case AMEM64:
-				p = "mapaccess2_fast64"
-			case ASTRING:
-				p = "mapaccess2_faststr"
-			}
-		}
-
-		var key *Node
-		if p != "" {
-			// fast versions take key by value
-			key = r.Right
-		} else {
-			// standard version takes key by reference
-			// orderexpr made sure key is addressable.
-			key = nod(OADDR, r.Right, nil)
-
-			p = "mapaccess2"
-		}
-
-		// from:
-		//   a,b = m[i]
-		// to:
-		//   var,b = mapaccess2*(t, m, i)
-		//   a = *var
-		a := n.List.First()
-
-		if w := t.Val().Width; w <= 1024 { // 1024 must match ../../../../runtime/hashmap.go:maxZero
-			fn := mapfn(p, t)
-			r = mkcall1(fn, fn.Type.Results(), init, typename(t), r.Left, key)
-		} else {
-			fn := mapfn("mapaccess2_fat", t)
-			z := zeroaddr(w)
-			r = mkcall1(fn, fn.Type.Results(), init, typename(t), r.Left, key, z)
-		}
-
-		// mapaccess2* returns a typed bool, but due to spec changes,
-		// the boolean result of i.(T) is now untyped so we make it the
-		// same type as the variable on the lhs.
-		if ok := n.List.Second(); !isblank(ok) && ok.Type.IsBoolean() {
-			r.Type.Field(1).Type = ok.Type
-		}
-		n.Rlist.Set1(r)
-		n.Op = OAS2FUNC
-
-		// don't generate a = *var if a is _
-		if !isblank(a) {
-			var_ := temp(ptrto(t.Val()))
-			var_.Typecheck = 1
-			var_.NonNil = true // mapaccess always returns a non-nil pointer
-			n.List.SetIndex(0, var_)
-			n = walkexpr(n, init)
-			init.Append(n)
-			n = nod(OAS, a, nod(OIND, var_, nil))
-		}
-
-		n = typecheck(n, Etop)
-		n = walkexpr(n, init)
-
-	case ODELETE:
-		init.AppendNodes(&n.Ninit)
-		map_ := n.List.First()
-		key := n.List.Second()
-		map_ = walkexpr(map_, init)
-		key = walkexpr(key, init)
-
-		// orderstmt made sure key is addressable.
-		key = nod(OADDR, key, nil)
-
-		t := map_.Type
-		n = mkcall1(mapfndel("mapdelete", t), nil, init, typename(t), map_, key)
-
-	case OAS2DOTTYPE:
-		walkexprlistsafe(n.List.Slice(), init)
-		e := n.Rlist.First() // i.(T)
-		e.Left = walkexpr(e.Left, init)
-
-	case ODOTTYPE, ODOTTYPE2:
-		n.Left = walkexpr(n.Left, init)
-
-	case OCONVIFACE:
-		n.Left = walkexpr(n.Left, init)
-
-		// Optimize convT2E or convT2I as a two-word copy when T is pointer-shaped.
-		if isdirectiface(n.Left.Type) {
-			var t *Node
-			if n.Type.IsEmptyInterface() {
-				t = typename(n.Left.Type)
-			} else {
-				t = itabname(n.Left.Type, n.Type)
-			}
-			l := nod(OEFACE, t, n.Left)
-			l.Type = n.Type
-			l.Typecheck = n.Typecheck
-			n = l
-			break
-		}
-		// Optimize convT2{E,I} when T is not pointer-shaped.
-		// We make the interface by initializing a stack temporary to
-		// the value we want to put in the interface, then using the address of
-		// that stack temporary for the interface data word.
-		if !n.Left.Type.IsInterface() && n.Esc == EscNone && n.Left.Type.Width <= 1024 {
-			tmp := temp(n.Left.Type)
-			init.Append(typecheck(nod(OAS, tmp, n.Left), Etop))
-			var t *Node
-			if n.Type.IsEmptyInterface() {
-				t = typename(n.Left.Type)
-			} else {
-				t = itabname(n.Left.Type, n.Type)
-			}
-			l := nod(OEFACE, t, typecheck(nod(OADDR, tmp, nil), Erv))
-			l.Type = n.Type
-			l.Typecheck = n.Typecheck
-			n = l
-			break
-		}
-
-		// Implement interface to empty interface conversion.
-		// tmp = i.itab
-		// if tmp != nil {
-		//    tmp = tmp.type
-		// }
-		// e = iface{tmp, i.data}
-		if n.Type.IsEmptyInterface() && n.Left.Type.IsInterface() && !n.Left.Type.IsEmptyInterface() {
-			// Evaluate the input interface.
-			c := temp(n.Left.Type)
-			init.Append(nod(OAS, c, n.Left))
-
-			// Get the itab out of the interface.
-			tmp := temp(ptrto(Types[TUINT8]))
-			init.Append(nod(OAS, tmp, typecheck(nod(OITAB, c, nil), Erv)))
-
-			// Get the type out of the itab.
-			nif := nod(OIF, typecheck(nod(ONE, tmp, nodnil()), Erv), nil)
-			nif.Nbody.Set1(nod(OAS, tmp, itabType(tmp)))
-			init.Append(nif)
-
-			// Build the result.
-			e := nod(OEFACE, tmp, ifaceData(c, ptrto(Types[TUINT8])))
-			e.Type = n.Type // assign type manually, typecheck doesn't understand OEFACE.
-			e.Typecheck = 1
-			n = e
-			break
-		}
-
-		var ll []*Node
-		if n.Type.IsEmptyInterface() {
-			if !n.Left.Type.IsInterface() {
-				ll = append(ll, typename(n.Left.Type))
-			}
-		} else {
-			if n.Left.Type.IsInterface() {
-				ll = append(ll, typename(n.Type))
-			} else {
-				ll = append(ll, itabname(n.Left.Type, n.Type))
-			}
-		}
-
-		if n.Left.Type.IsInterface() {
-			ll = append(ll, n.Left)
-		} else {
-			// regular types are passed by reference to avoid C vararg calls
-			// orderexpr arranged for n.Left to be a temporary for all
-			// the conversions it could see. comparison of an interface
-			// with a non-interface, especially in a switch on interface value
-			// with non-interface cases, is not visible to orderstmt, so we
-			// have to fall back on allocating a temp here.
-			if islvalue(n.Left) {
-				ll = append(ll, nod(OADDR, n.Left, nil))
-			} else {
-				ll = append(ll, nod(OADDR, copyexpr(n.Left, n.Left.Type, init), nil))
-			}
-			dowidth(n.Left.Type)
-		}
-
-		fn := syslook(convFuncName(n.Left.Type, n.Type))
-		fn = substArgTypes(fn, n.Left.Type, n.Type)
-		dowidth(fn.Type)
-		n = nod(OCALL, fn, nil)
-		n.List.Set(ll)
-		n = typecheck(n, Erv)
-		n = walkexpr(n, init)
-
-	case OCONV, OCONVNOP:
-		if Thearch.LinkArch.Family == sys.ARM || Thearch.LinkArch.Family == sys.MIPS {
-			if n.Left.Type.IsFloat() {
-				if n.Type.Etype == TINT64 {
-					n = mkcall("float64toint64", n.Type, init, conv(n.Left, Types[TFLOAT64]))
-					break
-				}
-
-				if n.Type.Etype == TUINT64 {
-					n = mkcall("float64touint64", n.Type, init, conv(n.Left, Types[TFLOAT64]))
-					break
-				}
-			}
-
-			if n.Type.IsFloat() {
-				if n.Left.Type.Etype == TINT64 {
-					n = conv(mkcall("int64tofloat64", Types[TFLOAT64], init, conv(n.Left, Types[TINT64])), n.Type)
-					break
-				}
-
-				if n.Left.Type.Etype == TUINT64 {
-					n = conv(mkcall("uint64tofloat64", Types[TFLOAT64], init, conv(n.Left, Types[TUINT64])), n.Type)
-					break
-				}
-			}
-		}
-
-		if Thearch.LinkArch.Family == sys.I386 {
-			if n.Left.Type.IsFloat() {
-				if n.Type.Etype == TINT64 {
-					n = mkcall("float64toint64", n.Type, init, conv(n.Left, Types[TFLOAT64]))
-					break
-				}
-
-				if n.Type.Etype == TUINT64 {
-					n = mkcall("float64touint64", n.Type, init, conv(n.Left, Types[TFLOAT64]))
-					break
-				}
-				if n.Type.Etype == TUINT32 || n.Type.Etype == TUINT || n.Type.Etype == TUINTPTR {
-					n = mkcall("float64touint32", n.Type, init, conv(n.Left, Types[TFLOAT64]))
-					break
-				}
-			}
-			if n.Type.IsFloat() {
-				if n.Left.Type.Etype == TINT64 {
-					n = conv(mkcall("int64tofloat64", Types[TFLOAT64], init, conv(n.Left, Types[TINT64])), n.Type)
-					break
-				}
-
-				if n.Left.Type.Etype == TUINT64 {
-					n = conv(mkcall("uint64tofloat64", Types[TFLOAT64], init, conv(n.Left, Types[TUINT64])), n.Type)
-					break
-				}
-				if n.Left.Type.Etype == TUINT32 || n.Left.Type.Etype == TUINT || n.Left.Type.Etype == TUINTPTR {
-					n = conv(mkcall("uint32tofloat64", Types[TFLOAT64], init, conv(n.Left, Types[TUINT32])), n.Type)
-					break
-				}
-			}
-		}
-
-		n.Left = walkexpr(n.Left, init)
-
-	case OANDNOT:
-		n.Left = walkexpr(n.Left, init)
-		n.Op = OAND
-		n.Right = nod(OCOM, n.Right, nil)
-		n.Right = typecheck(n.Right, Erv)
-		n.Right = walkexpr(n.Right, init)
-
-	case OMUL:
-		n.Left = walkexpr(n.Left, init)
-		n.Right = walkexpr(n.Right, init)
-		n = walkmul(n, init)
-
-	case ODIV, OMOD:
-		n.Left = walkexpr(n.Left, init)
-		n.Right = walkexpr(n.Right, init)
-
-		// rewrite complex div into function call.
-		et := n.Left.Type.Etype
-
-		if isComplex[et] && n.Op == ODIV {
-			t := n.Type
-			n = mkcall("complex128div", Types[TCOMPLEX128], init, conv(n.Left, Types[TCOMPLEX128]), conv(n.Right, Types[TCOMPLEX128]))
-			n = conv(n, t)
-			break
-		}
-
-		// Nothing to do for float divisions.
-		if isFloat[et] {
-			break
-		}
-
-		// Try rewriting as shifts or magic multiplies.
-		n = walkdiv(n, init)
-
-		// rewrite 64-bit div and mod into function calls
-		// on 32-bit architectures.
-		switch n.Op {
-		case OMOD, ODIV:
-			if Widthreg >= 8 || (et != TUINT64 && et != TINT64) {
-				break opswitch
-			}
-			var fn string
-			if et == TINT64 {
-				fn = "int64"
-			} else {
-				fn = "uint64"
-			}
-			if n.Op == ODIV {
-				fn += "div"
-			} else {
-				fn += "mod"
-			}
-			n = mkcall(fn, n.Type, init, conv(n.Left, Types[et]), conv(n.Right, Types[et]))
-		}
-
-	case OINDEX:
-		n.Left = walkexpr(n.Left, init)
-
-		// save the original node for bounds checking elision.
-		// If it was a ODIV/OMOD walk might rewrite it.
-		r := n.Right
-
-		n.Right = walkexpr(n.Right, init)
-
-		// if range of type cannot exceed static array bound,
-		// disable bounds check.
-		if n.Bounded {
-			break
-		}
-		t := n.Left.Type
-		if t != nil && t.IsPtr() {
-			t = t.Elem()
-		}
-		if t.IsArray() {
-			n.Bounded = bounded(r, t.NumElem())
-			if Debug['m'] != 0 && n.Bounded && !Isconst(n.Right, CTINT) {
-				Warn("index bounds check elided")
-			}
-			if smallintconst(n.Right) && !n.Bounded {
-				yyerror("index out of bounds")
-			}
-		} else if Isconst(n.Left, CTSTR) {
-			n.Bounded = bounded(r, int64(len(n.Left.Val().U.(string))))
-			if Debug['m'] != 0 && n.Bounded && !Isconst(n.Right, CTINT) {
-				Warn("index bounds check elided")
-			}
-			if smallintconst(n.Right) && !n.Bounded {
-				yyerror("index out of bounds")
-			}
-		}
-
-		if Isconst(n.Right, CTINT) {
-			if n.Right.Val().U.(*Mpint).CmpInt64(0) < 0 || n.Right.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 {
-				yyerror("index out of bounds")
-			}
-		}
-
-	case OINDEXMAP:
-		// Replace m[k] with *map{access1,assign}(maptype, m, &k)
-		n.Left = walkexpr(n.Left, init)
-		n.Right = walkexpr(n.Right, init)
-		map_ := n.Left
-		key := n.Right
-		t := map_.Type
-		if n.Etype == 1 {
-			// This m[k] expression is on the left-hand side of an assignment.
-			// orderexpr made sure key is addressable.
-			key = nod(OADDR, key, nil)
-			n = mkcall1(mapfn("mapassign", t), nil, init, typename(t), map_, key)
-		} else {
-			// m[k] is not the target of an assignment.
-			p := ""
-			if t.Val().Width <= 128 { // Check ../../runtime/hashmap.go:maxValueSize before changing.
-				switch algtype(t.Key()) {
-				case AMEM32:
-					p = "mapaccess1_fast32"
-				case AMEM64:
-					p = "mapaccess1_fast64"
-				case ASTRING:
-					p = "mapaccess1_faststr"
-				}
-			}
-
-			if p == "" {
-				// standard version takes key by reference.
-				// orderexpr made sure key is addressable.
-				key = nod(OADDR, key, nil)
-				p = "mapaccess1"
-			}
-
-			if w := t.Val().Width; w <= 1024 { // 1024 must match ../../../../runtime/hashmap.go:maxZero
-				n = mkcall1(mapfn(p, t), ptrto(t.Val()), init, typename(t), map_, key)
-			} else {
-				p = "mapaccess1_fat"
-				z := zeroaddr(w)
-				n = mkcall1(mapfn(p, t), ptrto(t.Val()), init, typename(t), map_, key, z)
-			}
-		}
-		n.Type = ptrto(t.Val())
-		n.NonNil = true // mapaccess1* and mapassign always return non-nil pointers.
-		n = nod(OIND, n, nil)
-		n.Type = t.Val()
-		n.Typecheck = 1
-
-	case ORECV:
-		Fatalf("walkexpr ORECV") // should see inside OAS only
-
-	case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR:
-		n.Left = walkexpr(n.Left, init)
-		low, high, max := n.SliceBounds()
-		low = walkexpr(low, init)
-		if low != nil && iszero(low) {
-			// Reduce x[0:j] to x[:j] and x[0:j:k] to x[:j:k].
-			low = nil
-		}
-		high = walkexpr(high, init)
-		max = walkexpr(max, init)
-		n.SetSliceBounds(low, high, max)
-		if n.Op.IsSlice3() {
-			if max != nil && max.Op == OCAP && samesafeexpr(n.Left, max.Left) {
-				// Reduce x[i:j:cap(x)] to x[i:j].
-				if n.Op == OSLICE3 {
-					n.Op = OSLICE
-				} else {
-					n.Op = OSLICEARR
-				}
-				n = reduceSlice(n)
-			}
-		} else {
-			n = reduceSlice(n)
-		}
-
-	case OADDR:
-		n.Left = walkexpr(n.Left, init)
-
-	case ONEW:
-		if n.Esc == EscNone {
-			if n.Type.Elem().Width >= 1<<16 {
-				Fatalf("large ONEW with EscNone: %v", n)
-			}
-			r := temp(n.Type.Elem())
-			r = nod(OAS, r, nil) // zero temp
-			r = typecheck(r, Etop)
-			init.Append(r)
-			r = nod(OADDR, r.Left, nil)
-			r = typecheck(r, Erv)
-			n = r
-		} else {
-			n = callnew(n.Type.Elem())
-		}
-
-	case OCMPSTR:
-		// s + "badgerbadgerbadger" == "badgerbadgerbadger"
-		if (Op(n.Etype) == OEQ || Op(n.Etype) == ONE) && Isconst(n.Right, CTSTR) && n.Left.Op == OADDSTR && n.Left.List.Len() == 2 && Isconst(n.Left.List.Second(), CTSTR) && strlit(n.Right) == strlit(n.Left.List.Second()) {
-			// TODO(marvin): Fix Node.EType type union.
-			r := nod(Op(n.Etype), nod(OLEN, n.Left.List.First(), nil), nodintconst(0))
-			r = typecheck(r, Erv)
-			r = walkexpr(r, init)
-			r.Type = n.Type
-			n = r
-			break
-		}
-
-		// Rewrite comparisons to short constant strings as length+byte-wise comparisons.
-		var cs, ncs *Node // const string, non-const string
-		switch {
-		case Isconst(n.Left, CTSTR) && Isconst(n.Right, CTSTR):
-			// ignore; will be constant evaluated
-		case Isconst(n.Left, CTSTR):
-			cs = n.Left
-			ncs = n.Right
-		case Isconst(n.Right, CTSTR):
-			cs = n.Right
-			ncs = n.Left
-		}
-		if cs != nil {
-			cmp := Op(n.Etype)
-			// maxRewriteLen was chosen empirically.
-			// It is the value that minimizes cmd/go file size
-			// across most architectures.
-			// See the commit description for CL 26758 for details.
-			maxRewriteLen := 6
-			var and Op
-			switch cmp {
-			case OEQ:
-				and = OANDAND
-			case ONE:
-				and = OOROR
-			default:
-				// Don't do byte-wise comparisons for <, <=, etc.
-				// They're fairly complicated.
-				// Length-only checks are ok, though.
-				maxRewriteLen = 0
-			}
-			if s := cs.Val().U.(string); len(s) <= maxRewriteLen {
-				if len(s) > 0 {
-					ncs = safeexpr(ncs, init)
-				}
-				// TODO(marvin): Fix Node.EType type union.
-				r := nod(cmp, nod(OLEN, ncs, nil), nodintconst(int64(len(s))))
-				for i := 0; i < len(s); i++ {
-					cb := nodintconst(int64(s[i]))
-					ncb := nod(OINDEX, ncs, nodintconst(int64(i)))
-					r = nod(and, r, nod(cmp, ncb, cb))
-				}
-				r = typecheck(r, Erv)
-				r = walkexpr(r, init)
-				r.Type = n.Type
-				n = r
-				break
-			}
-		}
-
-		var r *Node
-		// TODO(marvin): Fix Node.EType type union.
-		if Op(n.Etype) == OEQ || Op(n.Etype) == ONE {
-			// prepare for rewrite below
-			n.Left = cheapexpr(n.Left, init)
-			n.Right = cheapexpr(n.Right, init)
-
-			r = mkcall("eqstring", Types[TBOOL], init, conv(n.Left, Types[TSTRING]), conv(n.Right, Types[TSTRING]))
-
-			// quick check of len before full compare for == or !=
-			// eqstring assumes that the lengths are equal
-			// TODO(marvin): Fix Node.EType type union.
-			if Op(n.Etype) == OEQ {
-				// len(left) == len(right) && eqstring(left, right)
-				r = nod(OANDAND, nod(OEQ, nod(OLEN, n.Left, nil), nod(OLEN, n.Right, nil)), r)
-			} else {
-				// len(left) != len(right) || !eqstring(left, right)
-				r = nod(ONOT, r, nil)
-				r = nod(OOROR, nod(ONE, nod(OLEN, n.Left, nil), nod(OLEN, n.Right, nil)), r)
-			}
-
-			r = typecheck(r, Erv)
-			r = walkexpr(r, nil)
-		} else {
-			// sys_cmpstring(s1, s2) :: 0
-			r = mkcall("cmpstring", Types[TINT], init, conv(n.Left, Types[TSTRING]), conv(n.Right, Types[TSTRING]))
-			// TODO(marvin): Fix Node.EType type union.
-			r = nod(Op(n.Etype), r, nodintconst(0))
-		}
-
-		r = typecheck(r, Erv)
-		if !n.Type.IsBoolean() {
-			Fatalf("cmp %v", n.Type)
-		}
-		r.Type = n.Type
-		n = r
-
-	case OADDSTR:
-		n = addstr(n, init)
-
-	case OAPPEND:
-		// order should make sure we only see OAS(node, OAPPEND), which we handle above.
-		Fatalf("append outside assignment")
-
-	case OCOPY:
-		n = copyany(n, init, instrumenting && !compiling_runtime)
-
-		// cannot use chanfn - closechan takes any, not chan any
-	case OCLOSE:
-		fn := syslook("closechan")
-
-		fn = substArgTypes(fn, n.Left.Type)
-		n = mkcall1(fn, nil, init, n.Left)
-
-	case OMAKECHAN:
-		n = mkcall1(chanfn("makechan", 1, n.Type), n.Type, init, typename(n.Type), conv(n.Left, Types[TINT64]))
-
-	case OMAKEMAP:
-		t := n.Type
-
-		a := nodnil() // hmap buffer
-		r := nodnil() // bucket buffer
-		if n.Esc == EscNone {
-			// Allocate hmap buffer on stack.
-			var_ := temp(hmap(t))
-
-			a = nod(OAS, var_, nil) // zero temp
-			a = typecheck(a, Etop)
-			init.Append(a)
-			a = nod(OADDR, var_, nil)
-
-			// Allocate one bucket on stack.
-			// Maximum key/value size is 128 bytes, larger objects
-			// are stored with an indirection. So max bucket size is 2048+eps.
-			var_ = temp(mapbucket(t))
-
-			r = nod(OAS, var_, nil) // zero temp
-			r = typecheck(r, Etop)
-			init.Append(r)
-			r = nod(OADDR, var_, nil)
-		}
-
-		fn := syslook("makemap")
-		fn = substArgTypes(fn, hmap(t), mapbucket(t), t.Key(), t.Val())
-		n = mkcall1(fn, n.Type, init, typename(n.Type), conv(n.Left, Types[TINT64]), a, r)
-
-	case OMAKESLICE:
-		l := n.Left
-		r := n.Right
-		if r == nil {
-			r = safeexpr(l, init)
-			l = r
-		}
-		t := n.Type
-		if n.Esc == EscNone {
-			if !isSmallMakeSlice(n) {
-				Fatalf("non-small OMAKESLICE with EscNone: %v", n)
-			}
-			// var arr [r]T
-			// n = arr[:l]
-			t = typArray(t.Elem(), nonnegintconst(r)) // [r]T
-			var_ := temp(t)
-			a := nod(OAS, var_, nil) // zero temp
-			a = typecheck(a, Etop)
-			init.Append(a)
-			r := nod(OSLICE, var_, nil) // arr[:l]
-			r.SetSliceBounds(nil, l, nil)
-			r = conv(r, n.Type) // in case n.Type is named.
-			r = typecheck(r, Erv)
-			r = walkexpr(r, init)
-			n = r
-		} else {
-			// n escapes; set up a call to makeslice.
-			// When len and cap can fit into int, use makeslice instead of
-			// makeslice64, which is faster and shorter on 32 bit platforms.
-
-			if t.Elem().NotInHeap {
-				yyerror("%v is go:notinheap; heap allocation disallowed", t.Elem())
-			}
-
-			len, cap := l, r
-
-			fnname := "makeslice64"
-			argtype := Types[TINT64]
-
-			// typechecking guarantees that TIDEAL len/cap are positive and fit in an int.
-			// The case of len or cap overflow when converting TUINT or TUINTPTR to TINT
-			// will be handled by the negative range checks in makeslice during runtime.
-			if (len.Type.IsKind(TIDEAL) || maxintval[len.Type.Etype].Cmp(maxintval[TUINT]) <= 0) &&
-				(cap.Type.IsKind(TIDEAL) || maxintval[cap.Type.Etype].Cmp(maxintval[TUINT]) <= 0) {
-				fnname = "makeslice"
-				argtype = Types[TINT]
-			}
-
-			fn := syslook(fnname)
-			fn = substArgTypes(fn, t.Elem()) // any-1
-			n = mkcall1(fn, t, init, typename(t.Elem()), conv(len, argtype), conv(cap, argtype))
-		}
-
-	case ORUNESTR:
-		a := nodnil()
-		if n.Esc == EscNone {
-			t := typArray(Types[TUINT8], 4)
-			var_ := temp(t)
-			a = nod(OADDR, var_, nil)
-		}
-
-		// intstring(*[4]byte, rune)
-		n = mkcall("intstring", n.Type, init, a, conv(n.Left, Types[TINT64]))
-
-	case OARRAYBYTESTR:
-		a := nodnil()
-		if n.Esc == EscNone {
-			// Create temporary buffer for string on stack.
-			t := typArray(Types[TUINT8], tmpstringbufsize)
-
-			a = nod(OADDR, temp(t), nil)
-		}
-
-		// slicebytetostring(*[32]byte, []byte) string;
-		n = mkcall("slicebytetostring", n.Type, init, a, n.Left)
-
-		// slicebytetostringtmp([]byte) string;
-	case OARRAYBYTESTRTMP:
-		n.Left = walkexpr(n.Left, init)
-
-		if !instrumenting {
-			// Let the backend handle OARRAYBYTESTRTMP directly
-			// to avoid a function call to slicebytetostringtmp.
-			break
-		}
-
-		n = mkcall("slicebytetostringtmp", n.Type, init, n.Left)
-
-		// slicerunetostring(*[32]byte, []rune) string;
-	case OARRAYRUNESTR:
-		a := nodnil()
-
-		if n.Esc == EscNone {
-			// Create temporary buffer for string on stack.
-			t := typArray(Types[TUINT8], tmpstringbufsize)
-
-			a = nod(OADDR, temp(t), nil)
-		}
-
-		n = mkcall("slicerunetostring", n.Type, init, a, n.Left)
-
-		// stringtoslicebyte(*32[byte], string) []byte;
-	case OSTRARRAYBYTE:
-		a := nodnil()
-
-		if n.Esc == EscNone {
-			// Create temporary buffer for slice on stack.
-			t := typArray(Types[TUINT8], tmpstringbufsize)
-
-			a = nod(OADDR, temp(t), nil)
-		}
-
-		n = mkcall("stringtoslicebyte", n.Type, init, a, conv(n.Left, Types[TSTRING]))
-
-	case OSTRARRAYBYTETMP:
-		// []byte(string) conversion that creates a slice
-		// referring to the actual string bytes.
-		// This conversion is handled later by the backend and
-		// is only for use by internal compiler optimizations
-		// that know that the slice won't be mutated.
-		// The only such case today is:
-		// for i, c := range []byte(string)
-		n.Left = walkexpr(n.Left, init)
-
-		// stringtoslicerune(*[32]rune, string) []rune
-	case OSTRARRAYRUNE:
-		a := nodnil()
-
-		if n.Esc == EscNone {
-			// Create temporary buffer for slice on stack.
-			t := typArray(Types[TINT32], tmpstringbufsize)
-
-			a = nod(OADDR, temp(t), nil)
-		}
-
-		n = mkcall("stringtoslicerune", n.Type, init, a, n.Left)
-
-		// ifaceeq(i1 any-1, i2 any-2) (ret bool);
-	case OCMPIFACE:
-		if !eqtype(n.Left.Type, n.Right.Type) {
-			Fatalf("ifaceeq %v %v %v", n.Op, n.Left.Type, n.Right.Type)
-		}
-		var fn *Node
-		if n.Left.Type.IsEmptyInterface() {
-			fn = syslook("efaceeq")
-		} else {
-			fn = syslook("ifaceeq")
-		}
-
-		n.Right = cheapexpr(n.Right, init)
-		n.Left = cheapexpr(n.Left, init)
-		fn = substArgTypes(fn, n.Right.Type, n.Left.Type)
-		r := mkcall1(fn, n.Type, init, n.Left, n.Right)
-		// TODO(marvin): Fix Node.EType type union.
-		if Op(n.Etype) == ONE {
-			r = nod(ONOT, r, nil)
-		}
-
-		// check itable/type before full compare.
-		// TODO(marvin): Fix Node.EType type union.
-		if Op(n.Etype) == OEQ {
-			r = nod(OANDAND, nod(OEQ, nod(OITAB, n.Left, nil), nod(OITAB, n.Right, nil)), r)
-		} else {
-			r = nod(OOROR, nod(ONE, nod(OITAB, n.Left, nil), nod(OITAB, n.Right, nil)), r)
-		}
-		r = typecheck(r, Erv)
-		r = walkexpr(r, init)
-		r.Type = n.Type
-		n = r
-
-	case OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT, OPTRLIT:
-		if isStaticCompositeLiteral(n) {
-			// n can be directly represented in the read-only data section.
-			// Make direct reference to the static data. See issue 12841.
-			vstat := staticname(n.Type)
-			vstat.Name.Readonly = true
-			fixedlit(inInitFunction, initKindStatic, n, vstat, init)
-			n = vstat
-			n = typecheck(n, Erv)
-			break
-		}
-		var_ := temp(n.Type)
-		anylit(n, var_, init)
-		n = var_
-
-	case OSEND:
-		n1 := n.Right
-		n1 = assignconv(n1, n.Left.Type.Elem(), "chan send")
-		n1 = walkexpr(n1, init)
-		n1 = nod(OADDR, n1, nil)
-		n = mkcall1(chanfn("chansend1", 2, n.Left.Type), nil, init, typename(n.Left.Type), n.Left, n1)
-
-	case OCLOSURE:
-		n = walkclosure(n, init)
-
-	case OCALLPART:
-		n = walkpartialcall(n, init)
-	}
-
-	// Expressions that are constant at run time but not
-	// considered const by the language spec are not turned into
-	// constants until walk. For example, if n is y%1 == 0, the
-	// walk of y%1 may have replaced it by 0.
-	// Check whether n with its updated args is itself now a constant.
-	t := n.Type
-
-	evconst(n)
-	n.Type = t
-	if n.Op == OLITERAL {
-		n = typecheck(n, Erv)
-	}
-
-	ullmancalc(n)
-
-	if Debug['w'] != 0 && n != nil {
-		Dump("walk", n)
-	}
-
-	lineno = lno
-	return n
-}
-
-// TODO(josharian): combine this with its caller and simplify
-func reduceSlice(n *Node) *Node {
-	low, high, max := n.SliceBounds()
-	if high != nil && high.Op == OLEN && samesafeexpr(n.Left, high.Left) {
-		// Reduce x[i:len(x)] to x[i:].
-		high = nil
-	}
-	n.SetSliceBounds(low, high, max)
-	if (n.Op == OSLICE || n.Op == OSLICESTR) && low == nil && high == nil {
-		// Reduce x[:] to x.
-		if Debug_slice > 0 {
-			Warn("slice: omit slice operation")
-		}
-		return n.Left
-	}
-	return n
-}
-
-func ascompatee1(op Op, l *Node, r *Node, init *Nodes) *Node {
-	// convas will turn map assigns into function calls,
-	// making it impossible for reorder3 to work.
-	n := nod(OAS, l, r)
-
-	if l.Op == OINDEXMAP {
-		return n
-	}
-
-	return convas(n, init)
-}
-
-func ascompatee(op Op, nl, nr []*Node, init *Nodes) []*Node {
-	// check assign expression list to
-	// a expression list. called in
-	//	expr-list = expr-list
-
-	// ensure order of evaluation for function calls
-	for i := range nl {
-		nl[i] = safeexpr(nl[i], init)
-	}
-	for i1 := range nr {
-		nr[i1] = safeexpr(nr[i1], init)
-	}
-
-	var nn []*Node
-	i := 0
-	for ; i < len(nl); i++ {
-		if i >= len(nr) {
-			break
-		}
-		// Do not generate 'x = x' during return. See issue 4014.
-		if op == ORETURN && samesafeexpr(nl[i], nr[i]) {
-			continue
-		}
-		nn = append(nn, ascompatee1(op, nl[i], nr[i], init))
-	}
-
-	// cannot happen: caller checked that lists had same length
-	if i < len(nl) || i < len(nr) {
-		var nln, nrn Nodes
-		nln.Set(nl)
-		nrn.Set(nr)
-		yyerror("error in shape across %+v %v %+v / %d %d [%s]", nln, op, nrn, len(nl), len(nr), Curfn.Func.Nname.Sym.Name)
-	}
-	return nn
-}
-
-// l is an lv and rt is the type of an rv
-// return 1 if this implies a function call
-// evaluating the lv or a function call
-// in the conversion of the types
-func fncall(l *Node, rt *Type) bool {
-	if l.Ullman >= UINF || l.Op == OINDEXMAP {
-		return true
-	}
-	var r Node
-	if needwritebarrier(l, &r) {
-		return true
-	}
-	if eqtype(l.Type, rt) {
-		return false
-	}
-	return true
-}
-
-// check assign type list to
-// a expression list. called in
-//	expr-list = func()
-func ascompatet(op Op, nl Nodes, nr *Type) []*Node {
-	r, saver := iterFields(nr)
-
-	var nn, mm Nodes
-	var ullmanOverflow bool
-	var i int
-	for i = 0; i < nl.Len(); i++ {
-		if r == nil {
-			break
-		}
-		l := nl.Index(i)
-		if isblank(l) {
-			r = saver.Next()
-			continue
-		}
-
-		// any lv that causes a fn call must be
-		// deferred until all the return arguments
-		// have been pulled from the output arguments
-		if fncall(l, r.Type) {
-			tmp := temp(r.Type)
-			tmp = typecheck(tmp, Erv)
-			a := nod(OAS, l, tmp)
-			a = convas(a, &mm)
-			mm.Append(a)
-			l = tmp
-		}
-
-		a := nod(OAS, l, nodarg(r, 0))
-		a = convas(a, &nn)
-		ullmancalc(a)
-		if a.Ullman >= UINF {
-			Dump("ascompatet ucount", a)
-			ullmanOverflow = true
-		}
-
-		nn.Append(a)
-		r = saver.Next()
-	}
-
-	if i < nl.Len() || r != nil {
-		yyerror("ascompatet: assignment count mismatch: %d = %d", nl.Len(), nr.NumFields())
-	}
-
-	if ullmanOverflow {
-		Fatalf("ascompatet: too many function calls evaluating parameters")
-	}
-	return append(nn.Slice(), mm.Slice()...)
-}
-
-// package all the arguments that match a ... T parameter into a []T.
-func mkdotargslice(lr0, nn []*Node, l *Field, fp int, init *Nodes, ddd *Node) []*Node {
-	esc := uint16(EscUnknown)
-	if ddd != nil {
-		esc = ddd.Esc
-	}
-
-	tslice := typSlice(l.Type.Elem())
-
-	var n *Node
-	if len(lr0) == 0 {
-		n = nodnil()
-		n.Type = tslice
-	} else {
-		n = nod(OCOMPLIT, nil, typenod(tslice))
-		if ddd != nil && prealloc[ddd] != nil {
-			prealloc[n] = prealloc[ddd] // temporary to use
-		}
-		n.List.Set(lr0)
-		n.Esc = esc
-		n = typecheck(n, Erv)
-		if n.Type == nil {
-			Fatalf("mkdotargslice: typecheck failed")
-		}
-		n = walkexpr(n, init)
-	}
-
-	a := nod(OAS, nodarg(l, fp), n)
-	nn = append(nn, convas(a, init))
-	return nn
-}
-
-// helpers for shape errors
-func dumptypes(nl *Type, what string) string {
-	s := ""
-	for _, l := range nl.Fields().Slice() {
-		if s != "" {
-			s += ", "
-		}
-		s += fldconv(l, 0)
-	}
-	if s == "" {
-		s = fmt.Sprintf("[no arguments %s]", what)
-	}
-	return s
-}
-
-func dumpnodetypes(l []*Node, what string) string {
-	s := ""
-	for _, r := range l {
-		if s != "" {
-			s += ", "
-		}
-		s += r.Type.String()
-	}
-	if s == "" {
-		s = fmt.Sprintf("[no arguments %s]", what)
-	}
-	return s
-}
-
-// check assign expression list to
-// a type list. called in
-//	return expr-list
-//	func(expr-list)
-func ascompatte(op Op, call *Node, isddd bool, nl *Type, lr []*Node, fp int, init *Nodes) []*Node {
-	lr0 := lr
-	l, savel := iterFields(nl)
-	var r *Node
-	if len(lr) > 0 {
-		r = lr[0]
-	}
-	var nn []*Node
-
-	// f(g()) where g has multiple return values
-	if r != nil && len(lr) <= 1 && r.Type.IsFuncArgStruct() {
-		// optimization - can do block copy
-		if eqtypenoname(r.Type, nl) {
-			arg := nodarg(nl, fp)
-			r = nod(OCONVNOP, r, nil)
-			r.Type = arg.Type
-			nn = []*Node{convas(nod(OAS, arg, r), init)}
-			goto ret
-		}
-
-		// conversions involved.
-		// copy into temporaries.
-		var alist []*Node
-
-		for _, l := range r.Type.Fields().Slice() {
-			tmp := temp(l.Type)
-			alist = append(alist, tmp)
-		}
-
-		a := nod(OAS2, nil, nil)
-		a.List.Set(alist)
-		a.Rlist.Set(lr)
-		a = typecheck(a, Etop)
-		a = walkstmt(a)
-		init.Append(a)
-		lr = alist
-		r = lr[0]
-		l, savel = iterFields(nl)
-	}
-
-	for {
-		if l != nil && l.Isddd {
-			// the ddd parameter must be last
-			ll := savel.Next()
-
-			if ll != nil {
-				yyerror("... must be last argument")
-			}
-
-			// special case --
-			// only if we are assigning a single ddd
-			// argument to a ddd parameter then it is
-			// passed through unencapsulated
-			if r != nil && len(lr) <= 1 && isddd && eqtype(l.Type, r.Type) {
-				a := nod(OAS, nodarg(l, fp), r)
-				a = convas(a, init)
-				nn = append(nn, a)
-				break
-			}
-
-			// normal case -- make a slice of all
-			// remaining arguments and pass it to
-			// the ddd parameter.
-			nn = mkdotargslice(lr, nn, l, fp, init, call.Right)
-
-			break
-		}
-
-		if l == nil || r == nil {
-			if l != nil || r != nil {
-				l1 := dumptypes(nl, "expected")
-				l2 := dumpnodetypes(lr0, "given")
-				if l != nil {
-					yyerror("not enough arguments to %v\n\t%s\n\t%s", op, l1, l2)
-				} else {
-					yyerror("too many arguments to %v\n\t%s\n\t%s", op, l1, l2)
-				}
-			}
-
-			break
-		}
-
-		a := nod(OAS, nodarg(l, fp), r)
-		a = convas(a, init)
-		nn = append(nn, a)
-
-		l = savel.Next()
-		r = nil
-		lr = lr[1:]
-		if len(lr) > 0 {
-			r = lr[0]
-		}
-	}
-
-ret:
-	for _, n := range nn {
-		n.Typecheck = 1
-	}
-	return nn
-}
-
-// generate code for print
-func walkprint(nn *Node, init *Nodes) *Node {
-	var r *Node
-	var n *Node
-	var on *Node
-	var t *Type
-	var et EType
-
-	op := nn.Op
-	all := nn.List
-	var calls []*Node
-	notfirst := false
-
-	// Hoist all the argument evaluation up before the lock.
-	walkexprlistcheap(all.Slice(), init)
-
-	calls = append(calls, mkcall("printlock", nil, init))
-	for i1, n1 := range all.Slice() {
-		if notfirst {
-			calls = append(calls, mkcall("printsp", nil, init))
-		}
-
-		notfirst = op == OPRINTN
-
-		n = n1
-		if n.Op == OLITERAL {
-			switch n.Val().Ctype() {
-			case CTRUNE:
-				n = defaultlit(n, runetype)
-
-			case CTINT:
-				n = defaultlit(n, Types[TINT64])
-
-			case CTFLT:
-				n = defaultlit(n, Types[TFLOAT64])
-			}
-		}
-
-		if n.Op != OLITERAL && n.Type != nil && n.Type.Etype == TIDEAL {
-			n = defaultlit(n, Types[TINT64])
-		}
-		n = defaultlit(n, nil)
-		all.SetIndex(i1, n)
-		if n.Type == nil || n.Type.Etype == TFORW {
-			continue
-		}
-
-		t = n.Type
-		et = n.Type.Etype
-		if n.Type.IsInterface() {
-			if n.Type.IsEmptyInterface() {
-				on = syslook("printeface")
-			} else {
-				on = syslook("printiface")
-			}
-			on = substArgTypes(on, n.Type) // any-1
-		} else if n.Type.IsPtr() || et == TCHAN || et == TMAP || et == TFUNC || et == TUNSAFEPTR {
-			on = syslook("printpointer")
-			on = substArgTypes(on, n.Type) // any-1
-		} else if n.Type.IsSlice() {
-			on = syslook("printslice")
-			on = substArgTypes(on, n.Type) // any-1
-		} else if isInt[et] {
-			if et == TUINT64 {
-				if (t.Sym.Pkg == Runtimepkg || compiling_runtime) && t.Sym.Name == "hex" {
-					on = syslook("printhex")
-				} else {
-					on = syslook("printuint")
-				}
-			} else {
-				on = syslook("printint")
-			}
-		} else if isFloat[et] {
-			on = syslook("printfloat")
-		} else if isComplex[et] {
-			on = syslook("printcomplex")
-		} else if et == TBOOL {
-			on = syslook("printbool")
-		} else if et == TSTRING {
-			on = syslook("printstring")
-		} else {
-			badtype(OPRINT, n.Type, nil)
-			continue
-		}
-
-		t = on.Type.Params().Field(0).Type
-
-		if !eqtype(t, n.Type) {
-			n = nod(OCONV, n, nil)
-			n.Type = t
-		}
-
-		r = nod(OCALL, on, nil)
-		r.List.Append(n)
-		calls = append(calls, r)
-	}
-
-	if op == OPRINTN {
-		calls = append(calls, mkcall("printnl", nil, nil))
-	}
-
-	calls = append(calls, mkcall("printunlock", nil, init))
-
-	typecheckslice(calls, Etop)
-	walkexprlist(calls, init)
-
-	r = nod(OEMPTY, nil, nil)
-	r = typecheck(r, Etop)
-	r = walkexpr(r, init)
-	r.Ninit.Set(calls)
-	return r
-}
-
-func callnew(t *Type) *Node {
-	if t.NotInHeap {
-		yyerror("%v is go:notinheap; heap allocation disallowed", t)
-	}
-	dowidth(t)
-	fn := syslook("newobject")
-	fn = substArgTypes(fn, t)
-	v := mkcall1(fn, ptrto(t), nil, typename(t))
-	v.NonNil = true
-	return v
-}
-
-func iscallret(n *Node) bool {
-	n = outervalue(n)
-	return n.Op == OINDREGSP
-}
-
-func isstack(n *Node) bool {
-	n = outervalue(n)
-
-	// If n is *autotmp and autotmp = &foo, replace n with foo.
-	// We introduce such temps when initializing struct literals.
-	if n.Op == OIND && n.Left.Op == ONAME && n.Left.IsAutoTmp() {
-		defn := n.Left.Name.Defn
-		if defn != nil && defn.Op == OAS && defn.Right.Op == OADDR {
-			n = defn.Right.Left
-		}
-	}
-
-	switch n.Op {
-	case OINDREGSP:
-		return true
-
-	case ONAME:
-		switch n.Class {
-		case PAUTO, PPARAM, PPARAMOUT:
-			return true
-		}
-	}
-
-	return false
-}
-
-// Do we need a write barrier for the assignment l = r?
-func needwritebarrier(l *Node, r *Node) bool {
-	if !use_writebarrier {
-		return false
-	}
-
-	if l == nil || isblank(l) {
-		return false
-	}
-
-	// No write barrier for write of non-pointers.
-	dowidth(l.Type)
-
-	if !haspointers(l.Type) {
-		return false
-	}
-
-	// No write barrier for write to stack.
-	if isstack(l) {
-		return false
-	}
-
-	// No write barrier if this is a pointer to a go:notinheap
-	// type, since the write barrier's inheap(ptr) check will fail.
-	if l.Type.IsPtr() && l.Type.Elem().NotInHeap {
-		return false
-	}
-
-	// Implicit zeroing is still zeroing, so it needs write
-	// barriers. In practice, these are all to stack variables
-	// (even if isstack isn't smart enough to figure that out), so
-	// they'll be eliminated by the backend.
-	if r == nil {
-		return true
-	}
-
-	// Ignore no-op conversions when making decision.
-	// Ensures that xp = unsafe.Pointer(&x) is treated
-	// the same as xp = &x.
-	for r.Op == OCONVNOP {
-		r = r.Left
-	}
-
-	// TODO: We can eliminate write barriers if we know *both* the
-	// current and new content of the slot must already be shaded.
-	// We know a pointer is shaded if it's nil, or points to
-	// static data, a global (variable or function), or the stack.
-	// The nil optimization could be particularly useful for
-	// writes to just-allocated objects. Unfortunately, knowing
-	// the "current" value of the slot requires flow analysis.
-
-	// No write barrier for storing address of stack values,
-	// which are guaranteed only to be written to the stack.
-	if r.Op == OADDR && isstack(r.Left) {
-		return false
-	}
-
-	// Otherwise, be conservative and use write barrier.
-	return true
-}
-
-// TODO(rsc): Perhaps componentgen should run before this.
-
-func applywritebarrier(n *Node) *Node {
-	if n.Left != nil && n.Right != nil && needwritebarrier(n.Left, n.Right) {
-		if Debug_wb > 1 {
-			Warnl(n.Lineno, "marking %v for barrier", n.Left)
-		}
-		n.Op = OASWB
-		return n
-	}
-	return n
-}
-
-func convas(n *Node, init *Nodes) *Node {
-	if n.Op != OAS {
-		Fatalf("convas: not OAS %v", n.Op)
-	}
-
-	n.Typecheck = 1
-
-	var lt *Type
-	var rt *Type
-	if n.Left == nil || n.Right == nil {
-		goto out
-	}
-
-	lt = n.Left.Type
-	rt = n.Right.Type
-	if lt == nil || rt == nil {
-		goto out
-	}
-
-	if isblank(n.Left) {
-		n.Right = defaultlit(n.Right, nil)
-		goto out
-	}
-
-	if !eqtype(lt, rt) {
-		n.Right = assignconv(n.Right, lt, "assignment")
-		n.Right = walkexpr(n.Right, init)
-	}
-
-out:
-	ullmancalc(n)
-	return n
-}
-
-// from ascompat[te]
-// evaluating actual function arguments.
-//	f(a,b)
-// if there is exactly one function expr,
-// then it is done first. otherwise must
-// make temp variables
-func reorder1(all []*Node) []*Node {
-	c := 0 // function calls
-	t := 0 // total parameters
-
-	for _, n := range all {
-		t++
-		ullmancalc(n)
-		if n.Ullman >= UINF {
-			c++
-		}
-	}
-
-	if c == 0 || t == 1 {
-		return all
-	}
-
-	var g []*Node // fncalls assigned to tempnames
-	var f *Node   // last fncall assigned to stack
-	var r []*Node // non fncalls and tempnames assigned to stack
-	d := 0
-	var a *Node
-	for _, n := range all {
-		if n.Ullman < UINF {
-			r = append(r, n)
-			continue
-		}
-
-		d++
-		if d == c {
-			f = n
-			continue
-		}
-
-		// make assignment of fncall to tempname
-		a = temp(n.Right.Type)
-
-		a = nod(OAS, a, n.Right)
-		g = append(g, a)
-
-		// put normal arg assignment on list
-		// with fncall replaced by tempname
-		n.Right = a.Left
-
-		r = append(r, n)
-	}
-
-	if f != nil {
-		g = append(g, f)
-	}
-	return append(g, r...)
-}
-
-// from ascompat[ee]
-//	a,b = c,d
-// simultaneous assignment. there cannot
-// be later use of an earlier lvalue.
-//
-// function calls have been removed.
-func reorder3(all []*Node) []*Node {
-	var l *Node
-
-	// If a needed expression may be affected by an
-	// earlier assignment, make an early copy of that
-	// expression and use the copy instead.
-	var early []*Node
-
-	var mapinit Nodes
-	for i, n := range all {
-		l = n.Left
-
-		// Save subexpressions needed on left side.
-		// Drill through non-dereferences.
-		for {
-			if l.Op == ODOT || l.Op == OPAREN {
-				l = l.Left
-				continue
-			}
-
-			if l.Op == OINDEX && l.Left.Type.IsArray() {
-				l.Right = reorder3save(l.Right, all, i, &early)
-				l = l.Left
-				continue
-			}
-
-			break
-		}
-
-		switch l.Op {
-		default:
-			Fatalf("reorder3 unexpected lvalue %#v", l.Op)
-
-		case ONAME:
-			break
-
-		case OINDEX, OINDEXMAP:
-			l.Left = reorder3save(l.Left, all, i, &early)
-			l.Right = reorder3save(l.Right, all, i, &early)
-			if l.Op == OINDEXMAP {
-				all[i] = convas(all[i], &mapinit)
-			}
-
-		case OIND, ODOTPTR:
-			l.Left = reorder3save(l.Left, all, i, &early)
-		}
-
-		// Save expression on right side.
-		all[i].Right = reorder3save(all[i].Right, all, i, &early)
-	}
-
-	early = append(mapinit.Slice(), early...)
-	return append(early, all...)
-}
-
-// if the evaluation of *np would be affected by the
-// assignments in all up to but not including the ith assignment,
-// copy into a temporary during *early and
-// replace *np with that temp.
-// The result of reorder3save MUST be assigned back to n, e.g.
-// 	n.Left = reorder3save(n.Left, all, i, early)
-func reorder3save(n *Node, all []*Node, i int, early *[]*Node) *Node {
-	if !aliased(n, all, i) {
-		return n
-	}
-
-	q := temp(n.Type)
-	q = nod(OAS, q, n)
-	q = typecheck(q, Etop)
-	*early = append(*early, q)
-	return q.Left
-}
-
-// what's the outer value that a write to n affects?
-// outer value means containing struct or array.
-func outervalue(n *Node) *Node {
-	for {
-		if n.Op == OXDOT {
-			Fatalf("OXDOT in walk")
-		}
-		if n.Op == ODOT || n.Op == OPAREN || n.Op == OCONVNOP {
-			n = n.Left
-			continue
-		}
-
-		if n.Op == OINDEX && n.Left.Type != nil && n.Left.Type.IsArray() {
-			n = n.Left
-			continue
-		}
-
-		break
-	}
-
-	return n
-}
-
-// Is it possible that the computation of n might be
-// affected by writes in as up to but not including the ith element?
-func aliased(n *Node, all []*Node, i int) bool {
-	if n == nil {
-		return false
-	}
-
-	// Treat all fields of a struct as referring to the whole struct.
-	// We could do better but we would have to keep track of the fields.
-	for n.Op == ODOT {
-		n = n.Left
-	}
-
-	// Look for obvious aliasing: a variable being assigned
-	// during the all list and appearing in n.
-	// Also record whether there are any writes to main memory.
-	// Also record whether there are any writes to variables
-	// whose addresses have been taken.
-	memwrite := 0
-
-	varwrite := 0
-	var a *Node
-	for _, an := range all[:i] {
-		a = outervalue(an.Left)
-
-		for a.Op == ODOT {
-			a = a.Left
-		}
-
-		if a.Op != ONAME {
-			memwrite = 1
-			continue
-		}
-
-		switch n.Class {
-		default:
-			varwrite = 1
-			continue
-
-		case PAUTO, PPARAM, PPARAMOUT:
-			if n.Addrtaken {
-				varwrite = 1
-				continue
-			}
-
-			if vmatch2(a, n) {
-				// Direct hit.
-				return true
-			}
-		}
-	}
-
-	// The variables being written do not appear in n.
-	// However, n might refer to computed addresses
-	// that are being written.
-
-	// If no computed addresses are affected by the writes, no aliasing.
-	if memwrite == 0 && varwrite == 0 {
-		return false
-	}
-
-	// If n does not refer to computed addresses
-	// (that is, if n only refers to variables whose addresses
-	// have not been taken), no aliasing.
-	if varexpr(n) {
-		return false
-	}
-
-	// Otherwise, both the writes and n refer to computed memory addresses.
-	// Assume that they might conflict.
-	return true
-}
-
-// does the evaluation of n only refer to variables
-// whose addresses have not been taken?
-// (and no other memory)
-func varexpr(n *Node) bool {
-	if n == nil {
-		return true
-	}
-
-	switch n.Op {
-	case OLITERAL:
-		return true
-
-	case ONAME:
-		switch n.Class {
-		case PAUTO, PPARAM, PPARAMOUT:
-			if !n.Addrtaken {
-				return true
-			}
-		}
-
-		return false
-
-	case OADD,
-		OSUB,
-		OOR,
-		OXOR,
-		OMUL,
-		ODIV,
-		OMOD,
-		OLSH,
-		ORSH,
-		OAND,
-		OANDNOT,
-		OPLUS,
-		OMINUS,
-		OCOM,
-		OPAREN,
-		OANDAND,
-		OOROR,
-		OCONV,
-		OCONVNOP,
-		OCONVIFACE,
-		ODOTTYPE:
-		return varexpr(n.Left) && varexpr(n.Right)
-
-	case ODOT: // but not ODOTPTR
-		// Should have been handled in aliased.
-		Fatalf("varexpr unexpected ODOT")
-	}
-
-	// Be conservative.
-	return false
-}
-
-// is the name l mentioned in r?
-func vmatch2(l *Node, r *Node) bool {
-	if r == nil {
-		return false
-	}
-	switch r.Op {
-	// match each right given left
-	case ONAME:
-		return l == r
-
-	case OLITERAL:
-		return false
-	}
-
-	if vmatch2(l, r.Left) {
-		return true
-	}
-	if vmatch2(l, r.Right) {
-		return true
-	}
-	for _, n := range r.List.Slice() {
-		if vmatch2(l, n) {
-			return true
-		}
-	}
-	return false
-}
-
-// is any name mentioned in l also mentioned in r?
-// called by sinit.go
-func vmatch1(l *Node, r *Node) bool {
-	// isolate all left sides
-	if l == nil || r == nil {
-		return false
-	}
-	switch l.Op {
-	case ONAME:
-		switch l.Class {
-		case PPARAM, PAUTO:
-			break
-
-		// assignment to non-stack variable
-		// must be delayed if right has function calls.
-		default:
-			if r.Ullman >= UINF {
-				return true
-			}
-		}
-
-		return vmatch2(l, r)
-
-	case OLITERAL:
-		return false
-	}
-
-	if vmatch1(l.Left, r) {
-		return true
-	}
-	if vmatch1(l.Right, r) {
-		return true
-	}
-	for _, n := range l.List.Slice() {
-		if vmatch1(n, r) {
-			return true
-		}
-	}
-	return false
-}
-
-// paramstoheap returns code to allocate memory for heap-escaped parameters
-// and to copy non-result prameters' values from the stack.
-// If out is true, then code is also produced to zero-initialize their
-// stack memory addresses.
-func paramstoheap(params *Type) []*Node {
-	var nn []*Node
-	for _, t := range params.Fields().Slice() {
-		// For precise stacks, the garbage collector assumes results
-		// are always live, so zero them always.
-		if params.StructType().Funarg == FunargResults {
-			// Defer might stop a panic and show the
-			// return values as they exist at the time of panic.
-			// Make sure to zero them on entry to the function.
-			nn = append(nn, nod(OAS, nodarg(t, 1), nil))
-		}
-
-		v := t.Nname
-		if v != nil && v.Sym != nil && strings.HasPrefix(v.Sym.Name, "~r") { // unnamed result
-			v = nil
-		}
-		if v == nil {
-			continue
-		}
-
-		if stackcopy := v.Name.Param.Stackcopy; stackcopy != nil {
-			nn = append(nn, walkstmt(nod(ODCL, v, nil)))
-			if stackcopy.Class == PPARAM {
-				nn = append(nn, walkstmt(typecheck(nod(OAS, v, stackcopy), Etop)))
-			}
-		}
-	}
-
-	return nn
-}
-
-// returnsfromheap returns code to copy values for heap-escaped parameters
-// back to the stack.
-func returnsfromheap(params *Type) []*Node {
-	var nn []*Node
-	for _, t := range params.Fields().Slice() {
-		v := t.Nname
-		if v == nil {
-			continue
-		}
-		if stackcopy := v.Name.Param.Stackcopy; stackcopy != nil && stackcopy.Class == PPARAMOUT {
-			nn = append(nn, walkstmt(typecheck(nod(OAS, stackcopy, v), Etop)))
-		}
-	}
-
-	return nn
-}
-
-// heapmoves generates code to handle migrating heap-escaped parameters
-// between the stack and the heap. The generated code is added to Curfn's
-// Enter and Exit lists.
-func heapmoves() {
-	lno := lineno
-	lineno = Curfn.Lineno
-	nn := paramstoheap(Curfn.Type.Recvs())
-	nn = append(nn, paramstoheap(Curfn.Type.Params())...)
-	nn = append(nn, paramstoheap(Curfn.Type.Results())...)
-	Curfn.Func.Enter.Append(nn...)
-	lineno = Curfn.Func.Endlineno
-	Curfn.Func.Exit.Append(returnsfromheap(Curfn.Type.Results())...)
-	lineno = lno
-}
-
-func vmkcall(fn *Node, t *Type, init *Nodes, va []*Node) *Node {
-	if fn.Type == nil || fn.Type.Etype != TFUNC {
-		Fatalf("mkcall %v %v", fn, fn.Type)
-	}
-
-	n := fn.Type.Params().NumFields()
-
-	r := nod(OCALL, fn, nil)
-	r.List.Set(va[:n])
-	if fn.Type.Results().NumFields() > 0 {
-		r = typecheck(r, Erv|Efnstruct)
-	} else {
-		r = typecheck(r, Etop)
-	}
-	r = walkexpr(r, init)
-	r.Type = t
-	return r
-}
-
-func mkcall(name string, t *Type, init *Nodes, args ...*Node) *Node {
-	return vmkcall(syslook(name), t, init, args)
-}
-
-func mkcall1(fn *Node, t *Type, init *Nodes, args ...*Node) *Node {
-	return vmkcall(fn, t, init, args)
-}
-
-func conv(n *Node, t *Type) *Node {
-	if eqtype(n.Type, t) {
-		return n
-	}
-	n = nod(OCONV, n, nil)
-	n.Type = t
-	n = typecheck(n, Erv)
-	return n
-}
-
-func chanfn(name string, n int, t *Type) *Node {
-	if !t.IsChan() {
-		Fatalf("chanfn %v", t)
-	}
-	fn := syslook(name)
-	switch n {
-	default:
-		Fatalf("chanfn %d", n)
-	case 1:
-		fn = substArgTypes(fn, t.Elem())
-	case 2:
-		fn = substArgTypes(fn, t.Elem(), t.Elem())
-	}
-	return fn
-}
-
-func mapfn(name string, t *Type) *Node {
-	if !t.IsMap() {
-		Fatalf("mapfn %v", t)
-	}
-	fn := syslook(name)
-	fn = substArgTypes(fn, t.Key(), t.Val(), t.Key(), t.Val())
-	return fn
-}
-
-func mapfndel(name string, t *Type) *Node {
-	if !t.IsMap() {
-		Fatalf("mapfn %v", t)
-	}
-	fn := syslook(name)
-	fn = substArgTypes(fn, t.Key(), t.Val(), t.Key())
-	return fn
-}
-
-func writebarrierfn(name string, l *Type, r *Type) *Node {
-	fn := syslook(name)
-	fn = substArgTypes(fn, l, r)
-	return fn
-}
-
-func addstr(n *Node, init *Nodes) *Node {
-	// orderexpr rewrote OADDSTR to have a list of strings.
-	c := n.List.Len()
-
-	if c < 2 {
-		yyerror("addstr count %d too small", c)
-	}
-
-	buf := nodnil()
-	if n.Esc == EscNone {
-		sz := int64(0)
-		for _, n1 := range n.List.Slice() {
-			if n1.Op == OLITERAL {
-				sz += int64(len(n1.Val().U.(string)))
-			}
-		}
-
-		// Don't allocate the buffer if the result won't fit.
-		if sz < tmpstringbufsize {
-			// Create temporary buffer for result string on stack.
-			t := typArray(Types[TUINT8], tmpstringbufsize)
-
-			buf = nod(OADDR, temp(t), nil)
-		}
-	}
-
-	// build list of string arguments
-	args := []*Node{buf}
-	for _, n2 := range n.List.Slice() {
-		args = append(args, conv(n2, Types[TSTRING]))
-	}
-
-	var fn string
-	if c <= 5 {
-		// small numbers of strings use direct runtime helpers.
-		// note: orderexpr knows this cutoff too.
-		fn = fmt.Sprintf("concatstring%d", c)
-	} else {
-		// large numbers of strings are passed to the runtime as a slice.
-		fn = "concatstrings"
-
-		t := typSlice(Types[TSTRING])
-		slice := nod(OCOMPLIT, nil, typenod(t))
-		if prealloc[n] != nil {
-			prealloc[slice] = prealloc[n]
-		}
-		slice.List.Set(args[1:]) // skip buf arg
-		args = []*Node{buf, slice}
-		slice.Esc = EscNone
-	}
-
-	cat := syslook(fn)
-	r := nod(OCALL, cat, nil)
-	r.List.Set(args)
-	r = typecheck(r, Erv)
-	r = walkexpr(r, init)
-	r.Type = n.Type
-
-	return r
-}
-
-// expand append(l1, l2...) to
-//   init {
-//     s := l1
-//     n := len(s) + len(l2)
-//     // Compare as uint so growslice can panic on overflow.
-//     if uint(n) > uint(cap(s)) {
-//       s = growslice(s, n)
-//     }
-//     s = s[:n]
-//     memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
-//   }
-//   s
-//
-// l2 is allowed to be a string.
-func appendslice(n *Node, init *Nodes) *Node {
-	walkexprlistsafe(n.List.Slice(), init)
-
-	// walkexprlistsafe will leave OINDEX (s[n]) alone if both s
-	// and n are name or literal, but those may index the slice we're
-	// modifying here. Fix explicitly.
-	ls := n.List.Slice()
-	for i1, n1 := range ls {
-		ls[i1] = cheapexpr(n1, init)
-	}
-
-	l1 := n.List.First()
-	l2 := n.List.Second()
-
-	var l []*Node
-
-	// var s []T
-	s := temp(l1.Type)
-	l = append(l, nod(OAS, s, l1)) // s = l1
-
-	// n := len(s) + len(l2)
-	nn := temp(Types[TINT])
-	l = append(l, nod(OAS, nn, nod(OADD, nod(OLEN, s, nil), nod(OLEN, l2, nil))))
-
-	// if uint(n) > uint(cap(s))
-	nif := nod(OIF, nil, nil)
-	nif.Left = nod(OGT, nod(OCONV, nn, nil), nod(OCONV, nod(OCAP, s, nil), nil))
-	nif.Left.Left.Type = Types[TUINT]
-	nif.Left.Right.Type = Types[TUINT]
-
-	// instantiate growslice(Type*, []any, int) []any
-	fn := syslook("growslice")
-	fn = substArgTypes(fn, s.Type.Elem(), s.Type.Elem())
-
-	// s = growslice(T, s, n)
-	nif.Nbody.Set1(nod(OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(s.Type.Elem()), s, nn)))
-	l = append(l, nif)
-
-	// s = s[:n]
-	nt := nod(OSLICE, s, nil)
-	nt.SetSliceBounds(nil, nn, nil)
-	nt.Etype = 1
-	l = append(l, nod(OAS, s, nt))
-
-	if haspointers(l1.Type.Elem()) {
-		// copy(s[len(l1):], l2)
-		nptr1 := nod(OSLICE, s, nil)
-		nptr1.SetSliceBounds(nod(OLEN, l1, nil), nil, nil)
-		nptr1.Etype = 1
-		nptr2 := l2
-		fn := syslook("typedslicecopy")
-		fn = substArgTypes(fn, l1.Type, l2.Type)
-		var ln Nodes
-		ln.Set(l)
-		nt := mkcall1(fn, Types[TINT], &ln, typename(l1.Type.Elem()), nptr1, nptr2)
-		l = append(ln.Slice(), nt)
-	} else if instrumenting && !compiling_runtime {
-		// rely on runtime to instrument copy.
-		// copy(s[len(l1):], l2)
-		nptr1 := nod(OSLICE, s, nil)
-		nptr1.SetSliceBounds(nod(OLEN, l1, nil), nil, nil)
-		nptr1.Etype = 1
-		nptr2 := l2
-		var fn *Node
-		if l2.Type.IsString() {
-			fn = syslook("slicestringcopy")
-		} else {
-			fn = syslook("slicecopy")
-		}
-		fn = substArgTypes(fn, l1.Type, l2.Type)
-		var ln Nodes
-		ln.Set(l)
-		nt := mkcall1(fn, Types[TINT], &ln, nptr1, nptr2, nodintconst(s.Type.Elem().Width))
-		l = append(ln.Slice(), nt)
-	} else {
-		// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
-		nptr1 := nod(OINDEX, s, nod(OLEN, l1, nil))
-		nptr1.Bounded = true
-
-		nptr1 = nod(OADDR, nptr1, nil)
-
-		nptr2 := nod(OSPTR, l2, nil)
-
-		fn := syslook("memmove")
-		fn = substArgTypes(fn, s.Type.Elem(), s.Type.Elem())
-
-		var ln Nodes
-		ln.Set(l)
-		nwid := cheapexpr(conv(nod(OLEN, l2, nil), Types[TUINTPTR]), &ln)
-
-		nwid = nod(OMUL, nwid, nodintconst(s.Type.Elem().Width))
-		nt := mkcall1(fn, nil, &ln, nptr1, nptr2, nwid)
-		l = append(ln.Slice(), nt)
-	}
-
-	typecheckslice(l, Etop)
-	walkstmtlist(l)
-	init.Append(l...)
-	return s
-}
-
-// Rewrite append(src, x, y, z) so that any side effects in
-// x, y, z (including runtime panics) are evaluated in
-// initialization statements before the append.
-// For normal code generation, stop there and leave the
-// rest to cgen_append.
-//
-// For race detector, expand append(src, a [, b]* ) to
-//
-//   init {
-//     s := src
-//     const argc = len(args) - 1
-//     if cap(s) - len(s) < argc {
-//	    s = growslice(s, len(s)+argc)
-//     }
-//     n := len(s)
-//     s = s[:n+argc]
-//     s[n] = a
-//     s[n+1] = b
-//     ...
-//   }
-//   s
-func walkappend(n *Node, init *Nodes, dst *Node) *Node {
-	if !samesafeexpr(dst, n.List.First()) {
-		n.List.SetIndex(0, safeexpr(n.List.Index(0), init))
-		n.List.SetIndex(0, walkexpr(n.List.Index(0), init))
-	}
-	walkexprlistsafe(n.List.Slice()[1:], init)
-
-	// walkexprlistsafe will leave OINDEX (s[n]) alone if both s
-	// and n are name or literal, but those may index the slice we're
-	// modifying here. Fix explicitly.
-	// Using cheapexpr also makes sure that the evaluation
-	// of all arguments (and especially any panics) happen
-	// before we begin to modify the slice in a visible way.
-	ls := n.List.Slice()[1:]
-	for i, n := range ls {
-		ls[i] = cheapexpr(n, init)
-	}
-
-	nsrc := n.List.First()
-
-	argc := n.List.Len() - 1
-	if argc < 1 {
-		return nsrc
-	}
-
-	// General case, with no function calls left as arguments.
-	// Leave for gen, except that instrumentation requires old form.
-	if !instrumenting || compiling_runtime {
-		return n
-	}
-
-	var l []*Node
-
-	ns := temp(nsrc.Type)
-	l = append(l, nod(OAS, ns, nsrc)) // s = src
-
-	na := nodintconst(int64(argc)) // const argc
-	nx := nod(OIF, nil, nil)       // if cap(s) - len(s) < argc
-	nx.Left = nod(OLT, nod(OSUB, nod(OCAP, ns, nil), nod(OLEN, ns, nil)), na)
-
-	fn := syslook("growslice") //   growslice(<type>, old []T, mincap int) (ret []T)
-	fn = substArgTypes(fn, ns.Type.Elem(), ns.Type.Elem())
-
-	nx.Nbody.Set1(nod(OAS, ns,
-		mkcall1(fn, ns.Type, &nx.Ninit, typename(ns.Type.Elem()), ns,
-			nod(OADD, nod(OLEN, ns, nil), na))))
-
-	l = append(l, nx)
-
-	nn := temp(Types[TINT])
-	l = append(l, nod(OAS, nn, nod(OLEN, ns, nil))) // n = len(s)
-
-	nx = nod(OSLICE, ns, nil) // ...s[:n+argc]
-	nx.SetSliceBounds(nil, nod(OADD, nn, na), nil)
-	nx.Etype = 1
-	l = append(l, nod(OAS, ns, nx)) // s = s[:n+argc]
-
-	ls = n.List.Slice()[1:]
-	for i, n := range ls {
-		nx = nod(OINDEX, ns, nn) // s[n] ...
-		nx.Bounded = true
-		l = append(l, nod(OAS, nx, n)) // s[n] = arg
-		if i+1 < len(ls) {
-			l = append(l, nod(OAS, nn, nod(OADD, nn, nodintconst(1)))) // n = n + 1
-		}
-	}
-
-	typecheckslice(l, Etop)
-	walkstmtlist(l)
-	init.Append(l...)
-	return ns
-}
-
-// Lower copy(a, b) to a memmove call or a runtime call.
-//
-// init {
-//   n := len(a)
-//   if n > len(b) { n = len(b) }
-//   memmove(a.ptr, b.ptr, n*sizeof(elem(a)))
-// }
-// n;
-//
-// Also works if b is a string.
-//
-func copyany(n *Node, init *Nodes, runtimecall bool) *Node {
-	if haspointers(n.Left.Type.Elem()) {
-		fn := writebarrierfn("typedslicecopy", n.Left.Type, n.Right.Type)
-		return mkcall1(fn, n.Type, init, typename(n.Left.Type.Elem()), n.Left, n.Right)
-	}
-
-	if runtimecall {
-		var fn *Node
-		if n.Right.Type.IsString() {
-			fn = syslook("slicestringcopy")
-		} else {
-			fn = syslook("slicecopy")
-		}
-		fn = substArgTypes(fn, n.Left.Type, n.Right.Type)
-		return mkcall1(fn, n.Type, init, n.Left, n.Right, nodintconst(n.Left.Type.Elem().Width))
-	}
-
-	n.Left = walkexpr(n.Left, init)
-	n.Right = walkexpr(n.Right, init)
-	nl := temp(n.Left.Type)
-	nr := temp(n.Right.Type)
-	var l []*Node
-	l = append(l, nod(OAS, nl, n.Left))
-	l = append(l, nod(OAS, nr, n.Right))
-
-	nfrm := nod(OSPTR, nr, nil)
-	nto := nod(OSPTR, nl, nil)
-
-	nlen := temp(Types[TINT])
-
-	// n = len(to)
-	l = append(l, nod(OAS, nlen, nod(OLEN, nl, nil)))
-
-	// if n > len(frm) { n = len(frm) }
-	nif := nod(OIF, nil, nil)
-
-	nif.Left = nod(OGT, nlen, nod(OLEN, nr, nil))
-	nif.Nbody.Append(nod(OAS, nlen, nod(OLEN, nr, nil)))
-	l = append(l, nif)
-
-	// Call memmove.
-	fn := syslook("memmove")
-
-	fn = substArgTypes(fn, nl.Type.Elem(), nl.Type.Elem())
-	nwid := temp(Types[TUINTPTR])
-	l = append(l, nod(OAS, nwid, conv(nlen, Types[TUINTPTR])))
-	nwid = nod(OMUL, nwid, nodintconst(nl.Type.Elem().Width))
-	l = append(l, mkcall1(fn, nil, init, nto, nfrm, nwid))
-
-	typecheckslice(l, Etop)
-	walkstmtlist(l)
-	init.Append(l...)
-	return nlen
-}
-
-func eqfor(t *Type, needsize *int) *Node {
-	// Should only arrive here with large memory or
-	// a struct/array containing a non-memory field/element.
-	// Small memory is handled inline, and single non-memory
-	// is handled during type check (OCMPSTR etc).
-	switch a, _ := algtype1(t); a {
-	case AMEM:
-		n := syslook("memequal")
-		n = substArgTypes(n, t, t)
-		*needsize = 1
-		return n
-	case ASPECIAL:
-		sym := typesymprefix(".eq", t)
-		n := newname(sym)
-		n.Class = PFUNC
-		ntype := nod(OTFUNC, nil, nil)
-		ntype.List.Append(nod(ODCLFIELD, nil, typenod(ptrto(t))))
-		ntype.List.Append(nod(ODCLFIELD, nil, typenod(ptrto(t))))
-		ntype.Rlist.Append(nod(ODCLFIELD, nil, typenod(Types[TBOOL])))
-		ntype = typecheck(ntype, Etype)
-		n.Type = ntype.Type
-		*needsize = 0
-		return n
-	}
-	Fatalf("eqfor %v", t)
-	return nil
-}
-
-// The result of walkcompare MUST be assigned back to n, e.g.
-// 	n.Left = walkcompare(n.Left, init)
-func walkcompare(n *Node, init *Nodes) *Node {
-	// Given interface value l and concrete value r, rewrite
-	//   l == r
-	// into types-equal && data-equal.
-	// This is efficient, avoids allocations, and avoids runtime calls.
-	var l, r *Node
-	if n.Left.Type.IsInterface() && !n.Right.Type.IsInterface() {
-		l = n.Left
-		r = n.Right
-	} else if !n.Left.Type.IsInterface() && n.Right.Type.IsInterface() {
-		l = n.Right
-		r = n.Left
-	}
-
-	if l != nil {
-		// Handle both == and !=.
-		eq := n.Op
-		var andor Op
-		if eq == OEQ {
-			andor = OANDAND
-		} else {
-			andor = OOROR
-		}
-		// Check for types equal.
-		// For empty interface, this is:
-		//   l.tab == type(r)
-		// For non-empty interface, this is:
-		//   l.tab != nil && l.tab._type == type(r)
-		var eqtype *Node
-		tab := nod(OITAB, l, nil)
-		rtyp := typename(r.Type)
-		if l.Type.IsEmptyInterface() {
-			tab.Type = ptrto(Types[TUINT8])
-			tab.Typecheck = 1
-			eqtype = nod(eq, tab, rtyp)
-		} else {
-			nonnil := nod(brcom(eq), nodnil(), tab)
-			match := nod(eq, itabType(tab), rtyp)
-			eqtype = nod(andor, nonnil, match)
-		}
-		// Check for data equal.
-		eqdata := nod(eq, ifaceData(l, r.Type), r)
-		// Put it all together.
-		expr := nod(andor, eqtype, eqdata)
-		n = finishcompare(n, expr, init)
-		return n
-	}
-
-	// Must be comparison of array or struct.
-	// Otherwise back end handles it.
-	// While we're here, decide whether to
-	// inline or call an eq alg.
-	t := n.Left.Type
-	var inline bool
-	switch t.Etype {
-	default:
-		return n
-	case TARRAY:
-		inline = t.NumElem() <= 1 || (t.NumElem() <= 4 && issimple[t.Elem().Etype])
-	case TSTRUCT:
-		inline = t.NumFields() <= 4
-	}
-
-	cmpl := n.Left
-	for cmpl != nil && cmpl.Op == OCONVNOP {
-		cmpl = cmpl.Left
-	}
-	cmpr := n.Right
-	for cmpr != nil && cmpr.Op == OCONVNOP {
-		cmpr = cmpr.Left
-	}
-
-	// Chose not to inline. Call equality function directly.
-	if !inline {
-		if !islvalue(cmpl) || !islvalue(cmpr) {
-			Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr)
-		}
-
-		// eq algs take pointers
-		pl := temp(ptrto(t))
-		al := nod(OAS, pl, nod(OADDR, cmpl, nil))
-		al.Right.Etype = 1 // addr does not escape
-		al = typecheck(al, Etop)
-		init.Append(al)
-
-		pr := temp(ptrto(t))
-		ar := nod(OAS, pr, nod(OADDR, cmpr, nil))
-		ar.Right.Etype = 1 // addr does not escape
-		ar = typecheck(ar, Etop)
-		init.Append(ar)
-
-		var needsize int
-		call := nod(OCALL, eqfor(t, &needsize), nil)
-		call.List.Append(pl)
-		call.List.Append(pr)
-		if needsize != 0 {
-			call.List.Append(nodintconst(t.Width))
-		}
-		res := call
-		if n.Op != OEQ {
-			res = nod(ONOT, res, nil)
-		}
-		n = finishcompare(n, res, init)
-		return n
-	}
-
-	// inline: build boolean expression comparing element by element
-	andor := OANDAND
-	if n.Op == ONE {
-		andor = OOROR
-	}
-	var expr *Node
-	compare := func(el, er *Node) {
-		a := nod(n.Op, el, er)
-		if expr == nil {
-			expr = a
-		} else {
-			expr = nod(andor, expr, a)
-		}
-	}
-	cmpl = safeexpr(cmpl, init)
-	cmpr = safeexpr(cmpr, init)
-	if t.IsStruct() {
-		for _, f := range t.Fields().Slice() {
-			sym := f.Sym
-			if isblanksym(sym) {
-				continue
-			}
-			compare(
-				nodSym(OXDOT, cmpl, sym),
-				nodSym(OXDOT, cmpr, sym),
-			)
-		}
-	} else {
-		for i := 0; int64(i) < t.NumElem(); i++ {
-			compare(
-				nod(OINDEX, cmpl, nodintconst(int64(i))),
-				nod(OINDEX, cmpr, nodintconst(int64(i))),
-			)
-		}
-	}
-	if expr == nil {
-		expr = nodbool(n.Op == OEQ)
-	}
-	n = finishcompare(n, expr, init)
-	return n
-}
-
-// The result of finishcompare MUST be assigned back to n, e.g.
-// 	n.Left = finishcompare(n.Left, x, r, init)
-func finishcompare(n, r *Node, init *Nodes) *Node {
-	// Use nn here to avoid passing r to typecheck.
-	nn := r
-	nn = typecheck(nn, Erv)
-	nn = walkexpr(nn, init)
-	r = nn
-	if r.Type != n.Type {
-		r = nod(OCONVNOP, r, nil)
-		r.Type = n.Type
-		r.Typecheck = 1
-		nn = r
-	}
-	return nn
-}
-
-func samecheap(a *Node, b *Node) bool {
-	var ar *Node
-	var br *Node
-	for a != nil && b != nil && a.Op == b.Op {
-		switch a.Op {
-		default:
-			return false
-
-		case ONAME:
-			return a == b
-
-		case ODOT, ODOTPTR:
-			if a.Sym != b.Sym {
-				return false
-			}
-
-		case OINDEX:
-			ar = a.Right
-			br = b.Right
-			if !Isconst(ar, CTINT) || !Isconst(br, CTINT) || ar.Val().U.(*Mpint).Cmp(br.Val().U.(*Mpint)) != 0 {
-				return false
-			}
-		}
-
-		a = a.Left
-		b = b.Left
-	}
-
-	return false
-}
-
-// The result of walkrotate MUST be assigned back to n, e.g.
-// 	n.Left = walkrotate(n.Left)
-func walkrotate(n *Node) *Node {
-	if Thearch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.PPC64) {
-		return n
-	}
-
-	// Want << | >> or >> | << or << ^ >> or >> ^ << on unsigned value.
-	l := n.Left
-
-	r := n.Right
-	if (n.Op != OOR && n.Op != OXOR) || (l.Op != OLSH && l.Op != ORSH) || (r.Op != OLSH && r.Op != ORSH) || n.Type == nil || n.Type.IsSigned() || l.Op == r.Op {
-		return n
-	}
-
-	// Want same, side effect-free expression on lhs of both shifts.
-	if !samecheap(l.Left, r.Left) {
-		return n
-	}
-
-	// Constants adding to width?
-	w := int(l.Type.Width * 8)
-
-	if Thearch.LinkArch.Family == sys.S390X && w != 32 && w != 64 {
-		// only supports 32-bit and 64-bit rotates
-		return n
-	}
-
-	if smallintconst(l.Right) && smallintconst(r.Right) {
-		sl := int(l.Right.Int64())
-		if sl >= 0 {
-			sr := int(r.Right.Int64())
-			if sr >= 0 && sl+sr == w {
-				// Rewrite left shift half to left rotate.
-				if l.Op == OLSH {
-					n = l
-				} else {
-					n = r
-				}
-				n.Op = OLROT
-
-				// Remove rotate 0 and rotate w.
-				s := int(n.Right.Int64())
-
-				if s == 0 || s == w {
-					n = n.Left
-				}
-				return n
-			}
-		}
-		return n
-	}
-
-	// TODO: Could allow s and 32-s if s is bounded (maybe s&31 and 32-s&31).
-	return n
-}
-
-// isIntOrdering reports whether n is a <, ≤, >, or ≥ ordering between integers.
-func (n *Node) isIntOrdering() bool {
-	switch n.Op {
-	case OLE, OLT, OGE, OGT:
-	default:
-		return false
-	}
-	return n.Left.Type.IsInteger() && n.Right.Type.IsInteger()
-}
-
-// walkinrange optimizes integer-in-range checks, such as 4 <= x && x < 10.
-// n must be an OANDAND or OOROR node.
-// The result of walkinrange MUST be assigned back to n, e.g.
-// 	n.Left = walkinrange(n.Left)
-func walkinrange(n *Node, init *Nodes) *Node {
-	// We are looking for something equivalent to a opl b OP b opr c, where:
-	// * a, b, and c have integer type
-	// * b is side-effect-free
-	// * opl and opr are each < or ≤
-	// * OP is &&
-	l := n.Left
-	r := n.Right
-	if !l.isIntOrdering() || !r.isIntOrdering() {
-		return n
-	}
-
-	// Find b, if it exists, and rename appropriately.
-	// Input is: l.Left l.Op l.Right ANDAND/OROR r.Left r.Op r.Right
-	// Output is: a opl b(==x) ANDAND/OROR b(==x) opr c
-	a, opl, b := l.Left, l.Op, l.Right
-	x, opr, c := r.Left, r.Op, r.Right
-	for i := 0; ; i++ {
-		if samesafeexpr(b, x) {
-			break
-		}
-		if i == 3 {
-			// Tried all permutations and couldn't find an appropriate b == x.
-			return n
-		}
-		if i&1 == 0 {
-			a, opl, b = b, brrev(opl), a
-		} else {
-			x, opr, c = c, brrev(opr), x
-		}
-	}
-
-	// If n.Op is ||, apply de Morgan.
-	// Negate the internal ops now; we'll negate the top level op at the end.
-	// Henceforth assume &&.
-	negateResult := n.Op == OOROR
-	if negateResult {
-		opl = brcom(opl)
-		opr = brcom(opr)
-	}
-
-	cmpdir := func(o Op) int {
-		switch o {
-		case OLE, OLT:
-			return -1
-		case OGE, OGT:
-			return +1
-		}
-		Fatalf("walkinrange cmpdir %v", o)
-		return 0
-	}
-	if cmpdir(opl) != cmpdir(opr) {
-		// Not a range check; something like b < a && b < c.
-		return n
-	}
-
-	switch opl {
-	case OGE, OGT:
-		// We have something like a > b && b ≥ c.
-		// Switch and reverse ops and rename constants,
-		// to make it look like a ≤ b && b < c.
-		a, c = c, a
-		opl, opr = brrev(opr), brrev(opl)
-	}
-
-	// We must ensure that c-a is non-negative.
-	// For now, require a and c to be constants.
-	// In the future, we could also support a == 0 and c == len/cap(...).
-	// Unfortunately, by this point, most len/cap expressions have been
-	// stored into temporary variables.
-	if !Isconst(a, CTINT) || !Isconst(c, CTINT) {
-		return n
-	}
-
-	if opl == OLT {
-		// We have a < b && ...
-		// We need a ≤ b && ... to safely use unsigned comparison tricks.
-		// If a is not the maximum constant for b's type,
-		// we can increment a and switch to ≤.
-		if a.Int64() >= maxintval[b.Type.Etype].Int64() {
-			return n
-		}
-		a = nodintconst(a.Int64() + 1)
-		opl = OLE
-	}
-
-	bound := c.Int64() - a.Int64()
-	if bound < 0 {
-		// Bad news. Something like 5 <= x && x < 3.
-		// Rare in practice, and we still need to generate side-effects,
-		// so just leave it alone.
-		return n
-	}
-
-	// We have a ≤ b && b < c (or a ≤ b && b ≤ c).
-	// This is equivalent to (a-a) ≤ (b-a) && (b-a) < (c-a),
-	// which is equivalent to 0 ≤ (b-a) && (b-a) < (c-a),
-	// which is equivalent to uint(b-a) < uint(c-a).
-	ut := b.Type.toUnsigned()
-	lhs := conv(nod(OSUB, b, a), ut)
-	rhs := nodintconst(bound)
-	if negateResult {
-		// Negate top level.
-		opr = brcom(opr)
-	}
-	cmp := nod(opr, lhs, rhs)
-	cmp.Lineno = n.Lineno
-	cmp = addinit(cmp, l.Ninit.Slice())
-	cmp = addinit(cmp, r.Ninit.Slice())
-	// Typecheck the AST rooted at cmp...
-	cmp = typecheck(cmp, Erv)
-	// ...but then reset cmp's type to match n's type.
-	cmp.Type = n.Type
-	cmp = walkexpr(cmp, init)
-	return cmp
-}
-
-// walkmul rewrites integer multiplication by powers of two as shifts.
-// The result of walkmul MUST be assigned back to n, e.g.
-// 	n.Left = walkmul(n.Left, init)
-func walkmul(n *Node, init *Nodes) *Node {
-	if !n.Type.IsInteger() {
-		return n
-	}
-
-	var nr *Node
-	var nl *Node
-	if n.Right.Op == OLITERAL {
-		nl = n.Left
-		nr = n.Right
-	} else if n.Left.Op == OLITERAL {
-		nl = n.Right
-		nr = n.Left
-	} else {
-		return n
-	}
-
-	neg := 0
-
-	// x*0 is 0 (and side effects of x).
-	var pow int
-	var w int
-	if nr.Int64() == 0 {
-		cheapexpr(nl, init)
-		Nodconst(n, n.Type, 0)
-		goto ret
-	}
-
-	// nr is a constant.
-	pow = powtwo(nr)
-
-	if pow < 0 {
-		return n
-	}
-	if pow >= 1000 {
-		// negative power of 2, like -16
-		neg = 1
-
-		pow -= 1000
-	}
-
-	w = int(nl.Type.Width * 8)
-	if pow+1 >= w { // too big, shouldn't happen
-		return n
-	}
-
-	nl = cheapexpr(nl, init)
-
-	if pow == 0 {
-		// x*1 is x
-		n = nl
-
-		goto ret
-	}
-
-	n = nod(OLSH, nl, nodintconst(int64(pow)))
-
-ret:
-	if neg != 0 {
-		n = nod(OMINUS, n, nil)
-	}
-
-	n = typecheck(n, Erv)
-	n = walkexpr(n, init)
-	return n
-}
-
-// walkdiv rewrites division by a constant as less expensive
-// operations.
-// The result of walkdiv MUST be assigned back to n, e.g.
-// 	n.Left = walkdiv(n.Left, init)
-func walkdiv(n *Node, init *Nodes) *Node {
-	// if >= 0, nr is 1<<pow // 1 if nr is negative.
-
-	if n.Right.Op != OLITERAL {
-		return n
-	}
-
-	// nr is a constant.
-	nl := cheapexpr(n.Left, init)
-
-	nr := n.Right
-
-	// special cases of mod/div
-	// by a constant
-	w := int(nl.Type.Width * 8)
-
-	s := 0            // 1 if nr is negative.
-	pow := powtwo(nr) // if >= 0, nr is 1<<pow
-	if pow >= 1000 {
-		// negative power of 2
-		s = 1
-
-		pow -= 1000
-	}
-
-	if pow+1 >= w {
-		// divisor too large.
-		return n
-	}
-
-	if pow < 0 {
-		// try to do division by multiply by (2^w)/d
-		// see hacker's delight chapter 10
-		// TODO: support 64-bit magic multiply here.
-		var m Magic
-		m.W = w
-
-		if nl.Type.IsSigned() {
-			m.Sd = nr.Int64()
-			smagic(&m)
-		} else {
-			m.Ud = uint64(nr.Int64())
-			umagic(&m)
-		}
-
-		if m.Bad != 0 {
-			return n
-		}
-
-		// We have a quick division method so use it
-		// for modulo too.
-		if n.Op == OMOD {
-			// rewrite as A%B = A - (A/B*B).
-			n1 := nod(ODIV, nl, nr)
-
-			n2 := nod(OMUL, n1, nr)
-			n = nod(OSUB, nl, n2)
-			goto ret
-		}
-
-		switch simtype[nl.Type.Etype] {
-		default:
-			return n
-
-			// n1 = nl * magic >> w (HMUL)
-		case TUINT8, TUINT16, TUINT32:
-			var nc Node
-
-			Nodconst(&nc, nl.Type, int64(m.Um))
-			n1 := nod(OHMUL, nl, &nc)
-			n1 = typecheck(n1, Erv)
-			if m.Ua != 0 {
-				// Select a Go type with (at least) twice the width.
-				var twide *Type
-				switch simtype[nl.Type.Etype] {
-				default:
-					return n
-
-				case TUINT8, TUINT16:
-					twide = Types[TUINT32]
-
-				case TUINT32:
-					twide = Types[TUINT64]
-
-				case TINT8, TINT16:
-					twide = Types[TINT32]
-
-				case TINT32:
-					twide = Types[TINT64]
-				}
-
-				// add numerator (might overflow).
-				// n2 = (n1 + nl)
-				n2 := nod(OADD, conv(n1, twide), conv(nl, twide))
-
-				// shift by m.s
-				var nc Node
-
-				Nodconst(&nc, Types[TUINT], int64(m.S))
-				n = conv(nod(ORSH, n2, &nc), nl.Type)
-			} else {
-				// n = n1 >> m.s
-				var nc Node
-
-				Nodconst(&nc, Types[TUINT], int64(m.S))
-				n = nod(ORSH, n1, &nc)
-			}
-
-			// n1 = nl * magic >> w
-		case TINT8, TINT16, TINT32:
-			var nc Node
-
-			Nodconst(&nc, nl.Type, m.Sm)
-			n1 := nod(OHMUL, nl, &nc)
-			n1 = typecheck(n1, Erv)
-			if m.Sm < 0 {
-				// add the numerator.
-				n1 = nod(OADD, n1, nl)
-			}
-
-			// shift by m.s
-			var ns Node
-
-			Nodconst(&ns, Types[TUINT], int64(m.S))
-			n2 := conv(nod(ORSH, n1, &ns), nl.Type)
-
-			// add 1 iff n1 is negative.
-			var nneg Node
-
-			Nodconst(&nneg, Types[TUINT], int64(w)-1)
-			n3 := nod(ORSH, nl, &nneg) // n4 = -1 iff n1 is negative.
-			n = nod(OSUB, n2, n3)
-
-			// apply sign.
-			if m.Sd < 0 {
-				n = nod(OMINUS, n, nil)
-			}
-		}
-
-		goto ret
-	}
-
-	switch pow {
-	case 0:
-		if n.Op == OMOD {
-			// nl % 1 is zero.
-			Nodconst(n, n.Type, 0)
-		} else if s != 0 {
-			// divide by -1
-			n.Op = OMINUS
-
-			n.Right = nil
-		} else {
-			// divide by 1
-			n = nl
-		}
-
-	default:
-		if n.Type.IsSigned() {
-			if n.Op == OMOD {
-				// signed modulo 2^pow is like ANDing
-				// with the last pow bits, but if nl < 0,
-				// nl & (2^pow-1) is (nl+1)%2^pow - 1.
-				var nc Node
-
-				Nodconst(&nc, Types[simtype[TUINT]], int64(w)-1)
-				n1 := nod(ORSH, nl, &nc) // n1 = -1 iff nl < 0.
-				if pow == 1 {
-					n1 = typecheck(n1, Erv)
-					n1 = cheapexpr(n1, init)
-
-					// n = (nl+ε)&1 -ε where ε=1 iff nl<0.
-					n2 := nod(OSUB, nl, n1)
-
-					var nc Node
-					Nodconst(&nc, nl.Type, 1)
-					n3 := nod(OAND, n2, &nc)
-					n = nod(OADD, n3, n1)
-				} else {
-					// n = (nl+ε)&(nr-1) - ε where ε=2^pow-1 iff nl<0.
-					var nc Node
-
-					Nodconst(&nc, nl.Type, (1<<uint(pow))-1)
-					n2 := nod(OAND, n1, &nc) // n2 = 2^pow-1 iff nl<0.
-					n2 = typecheck(n2, Erv)
-					n2 = cheapexpr(n2, init)
-
-					n3 := nod(OADD, nl, n2)
-					n4 := nod(OAND, n3, &nc)
-					n = nod(OSUB, n4, n2)
-				}
-
-				break
-			} else {
-				// arithmetic right shift does not give the correct rounding.
-				// if nl >= 0, nl >> n == nl / nr
-				// if nl < 0, we want to add 2^n-1 first.
-				var nc Node
-
-				Nodconst(&nc, Types[simtype[TUINT]], int64(w)-1)
-				n1 := nod(ORSH, nl, &nc) // n1 = -1 iff nl < 0.
-				if pow == 1 {
-					// nl+1 is nl-(-1)
-					n.Left = nod(OSUB, nl, n1)
-				} else {
-					// Do a logical right right on -1 to keep pow bits.
-					var nc Node
-
-					Nodconst(&nc, Types[simtype[TUINT]], int64(w)-int64(pow))
-					n2 := nod(ORSH, conv(n1, nl.Type.toUnsigned()), &nc)
-					n.Left = nod(OADD, nl, conv(n2, nl.Type))
-				}
-
-				// n = (nl + 2^pow-1) >> pow
-				n.Op = ORSH
-
-				var n2 Node
-				Nodconst(&n2, Types[simtype[TUINT]], int64(pow))
-				n.Right = &n2
-				n.Typecheck = 0
-			}
-
-			if s != 0 {
-				n = nod(OMINUS, n, nil)
-			}
-			break
-		}
-
-		var nc Node
-		if n.Op == OMOD {
-			// n = nl & (nr-1)
-			n.Op = OAND
-
-			Nodconst(&nc, nl.Type, nr.Int64()-1)
-		} else {
-			// n = nl >> pow
-			n.Op = ORSH
-
-			Nodconst(&nc, Types[simtype[TUINT]], int64(pow))
-		}
-
-		n.Typecheck = 0
-		n.Right = &nc
-	}
-
-	goto ret
-
-ret:
-	n = typecheck(n, Erv)
-	n = walkexpr(n, init)
-	return n
-}
-
-// return 1 if integer n must be in range [0, max), 0 otherwise
-func bounded(n *Node, max int64) bool {
-	if n.Type == nil || !n.Type.IsInteger() {
-		return false
-	}
-
-	sign := n.Type.IsSigned()
-	bits := int32(8 * n.Type.Width)
-
-	if smallintconst(n) {
-		v := n.Int64()
-		return 0 <= v && v < max
-	}
-
-	switch n.Op {
-	case OAND:
-		v := int64(-1)
-		if smallintconst(n.Left) {
-			v = n.Left.Int64()
-		} else if smallintconst(n.Right) {
-			v = n.Right.Int64()
-		}
-
-		if 0 <= v && v < max {
-			return true
-		}
-
-	case OMOD:
-		if !sign && smallintconst(n.Right) {
-			v := n.Right.Int64()
-			if 0 <= v && v <= max {
-				return true
-			}
-		}
-
-	case ODIV:
-		if !sign && smallintconst(n.Right) {
-			v := n.Right.Int64()
-			for bits > 0 && v >= 2 {
-				bits--
-				v >>= 1
-			}
-		}
-
-	case ORSH:
-		if !sign && smallintconst(n.Right) {
-			v := n.Right.Int64()
-			if v > int64(bits) {
-				return true
-			}
-			bits -= int32(v)
-		}
-	}
-
-	if !sign && bits <= 62 && 1<<uint(bits) <= max {
-		return true
-	}
-
-	return false
-}
-
-// usemethod check interface method calls for uses of reflect.Type.Method.
-func usemethod(n *Node) {
-	t := n.Left.Type
-
-	// Looking for either of:
-	//	Method(int) reflect.Method
-	//	MethodByName(string) (reflect.Method, bool)
-	//
-	// TODO(crawshaw): improve precision of match by working out
-	//                 how to check the method name.
-	if n := t.Params().NumFields(); n != 1 {
-		return
-	}
-	if n := t.Results().NumFields(); n != 1 && n != 2 {
-		return
-	}
-	p0 := t.Params().Field(0)
-	res0 := t.Results().Field(0)
-	var res1 *Field
-	if t.Results().NumFields() == 2 {
-		res1 = t.Results().Field(1)
-	}
-
-	if res1 == nil {
-		if p0.Type.Etype != TINT {
-			return
-		}
-	} else {
-		if !p0.Type.IsString() {
-			return
-		}
-		if !res1.Type.IsBoolean() {
-			return
-		}
-	}
-	if res0.Type.String() != "reflect.Method" {
-		return
-	}
-
-	Curfn.Func.ReflectMethod = true
-}
-
-func usefield(n *Node) {
-	if obj.Fieldtrack_enabled == 0 {
-		return
-	}
-
-	switch n.Op {
-	default:
-		Fatalf("usefield %v", n.Op)
-
-	case ODOT, ODOTPTR:
-		break
-	}
-	if n.Sym == nil {
-		// No field name.  This DOTPTR was built by the compiler for access
-		// to runtime data structures.  Ignore.
-		return
-	}
-
-	t := n.Left.Type
-	if t.IsPtr() {
-		t = t.Elem()
-	}
-	field := dotField[typeSym{t.Orig, n.Sym}]
-	if field == nil {
-		Fatalf("usefield %v %v without paramfld", n.Left.Type, n.Sym)
-	}
-	if !strings.Contains(field.Note, "go:\"track\"") {
-		return
-	}
-
-	outer := n.Left.Type
-	if outer.IsPtr() {
-		outer = outer.Elem()
-	}
-	if outer.Sym == nil {
-		yyerror("tracked field must be in named struct type")
-	}
-	if !exportname(field.Sym.Name) {
-		yyerror("tracked field must be exported (upper case)")
-	}
-
-	sym := tracksym(outer, field)
-	if Curfn.Func.FieldTrack == nil {
-		Curfn.Func.FieldTrack = make(map[*Sym]struct{})
-	}
-	Curfn.Func.FieldTrack[sym] = struct{}{}
-}
-
-func candiscardlist(l Nodes) bool {
-	for _, n := range l.Slice() {
-		if !candiscard(n) {
-			return false
-		}
-	}
-	return true
-}
-
-func candiscard(n *Node) bool {
-	if n == nil {
-		return true
-	}
-
-	switch n.Op {
-	default:
-		return false
-
-		// Discardable as long as the subpieces are.
-	case ONAME,
-		ONONAME,
-		OTYPE,
-		OPACK,
-		OLITERAL,
-		OADD,
-		OSUB,
-		OOR,
-		OXOR,
-		OADDSTR,
-		OADDR,
-		OANDAND,
-		OARRAYBYTESTR,
-		OARRAYRUNESTR,
-		OSTRARRAYBYTE,
-		OSTRARRAYRUNE,
-		OCAP,
-		OCMPIFACE,
-		OCMPSTR,
-		OCOMPLIT,
-		OMAPLIT,
-		OSTRUCTLIT,
-		OARRAYLIT,
-		OSLICELIT,
-		OPTRLIT,
-		OCONV,
-		OCONVIFACE,
-		OCONVNOP,
-		ODOT,
-		OEQ,
-		ONE,
-		OLT,
-		OLE,
-		OGT,
-		OGE,
-		OKEY,
-		OSTRUCTKEY,
-		OLEN,
-		OMUL,
-		OLSH,
-		ORSH,
-		OAND,
-		OANDNOT,
-		ONEW,
-		ONOT,
-		OCOM,
-		OPLUS,
-		OMINUS,
-		OOROR,
-		OPAREN,
-		ORUNESTR,
-		OREAL,
-		OIMAG,
-		OCOMPLEX:
-		break
-
-		// Discardable as long as we know it's not division by zero.
-	case ODIV, OMOD:
-		if Isconst(n.Right, CTINT) && n.Right.Val().U.(*Mpint).CmpInt64(0) != 0 {
-			break
-		}
-		if Isconst(n.Right, CTFLT) && n.Right.Val().U.(*Mpflt).CmpFloat64(0) != 0 {
-			break
-		}
-		return false
-
-		// Discardable as long as we know it won't fail because of a bad size.
-	case OMAKECHAN, OMAKEMAP:
-		if Isconst(n.Left, CTINT) && n.Left.Val().U.(*Mpint).CmpInt64(0) == 0 {
-			break
-		}
-		return false
-
-		// Difficult to tell what sizes are okay.
-	case OMAKESLICE:
-		return false
-	}
-
-	if !candiscard(n.Left) || !candiscard(n.Right) || !candiscardlist(n.Ninit) || !candiscardlist(n.Nbody) || !candiscardlist(n.List) || !candiscardlist(n.Rlist) {
-		return false
-	}
-
-	return true
-}
-
-// rewrite
-//	print(x, y, z)
-// into
-//	func(a1, a2, a3) {
-//		print(a1, a2, a3)
-//	}(x, y, z)
-// and same for println.
-
-var walkprintfunc_prgen int
-
-// The result of walkprintfunc MUST be assigned back to n, e.g.
-// 	n.Left = walkprintfunc(n.Left, init)
-func walkprintfunc(n *Node, init *Nodes) *Node {
-	if n.Ninit.Len() != 0 {
-		walkstmtlist(n.Ninit.Slice())
-		init.AppendNodes(&n.Ninit)
-	}
-
-	t := nod(OTFUNC, nil, nil)
-	num := 0
-	var printargs []*Node
-	var a *Node
-	var buf string
-	for _, n1 := range n.List.Slice() {
-		buf = fmt.Sprintf("a%d", num)
-		num++
-		a = nod(ODCLFIELD, newname(lookup(buf)), typenod(n1.Type))
-		t.List.Append(a)
-		printargs = append(printargs, a.Left)
-	}
-
-	fn := nod(ODCLFUNC, nil, nil)
-	walkprintfunc_prgen++
-	buf = fmt.Sprintf("print·%d", walkprintfunc_prgen)
-	fn.Func.Nname = newname(lookup(buf))
-	fn.Func.Nname.Name.Defn = fn
-	fn.Func.Nname.Name.Param.Ntype = t
-	declare(fn.Func.Nname, PFUNC)
-
-	oldfn := Curfn
-	Curfn = nil
-	funchdr(fn)
-
-	a = nod(n.Op, nil, nil)
-	a.List.Set(printargs)
-	a = typecheck(a, Etop)
-	a = walkstmt(a)
-
-	fn.Nbody.Set1(a)
-
-	funcbody(fn)
-
-	fn = typecheck(fn, Etop)
-	typecheckslice(fn.Nbody.Slice(), Etop)
-	xtop = append(xtop, fn)
-	Curfn = oldfn
-
-	a = nod(OCALL, nil, nil)
-	a.Left = fn.Func.Nname
-	a.List.Set(n.List.Slice())
-	a = typecheck(a, Etop)
-	a = walkexpr(a, init)
-	return a
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/mips/galign.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/mips/galign.go
deleted file mode 100644
index de44558..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/mips/galign.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/mips/galign.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/mips/galign.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package mips
-
-import (
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/compile/internal/ssa"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/mips"
-)
-
-func Init() {
-	gc.Thearch.LinkArch = &mips.Linkmips
-	if obj.GOARCH == "mipsle" {
-		gc.Thearch.LinkArch = &mips.Linkmipsle
-	}
-	gc.Thearch.REGSP = mips.REGSP
-	gc.Thearch.MAXWIDTH = (1 << 31) - 1
-	gc.Thearch.Defframe = defframe
-	gc.Thearch.Proginfo = proginfo
-	gc.Thearch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
-	gc.Thearch.SSAGenValue = ssaGenValue
-	gc.Thearch.SSAGenBlock = ssaGenBlock
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/mips/ggen.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/mips/ggen.go
deleted file mode 100644
index 70a7b4f..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/mips/ggen.go
+++ /dev/null
@@ -1,104 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/mips/ggen.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/mips/ggen.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package mips
-
-import (
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/mips"
-)
-
-func defframe(ptxt *obj.Prog) {
-	// fill in argument size, stack size
-	ptxt.To.Type = obj.TYPE_TEXTSIZE
-
-	ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.ArgWidth(), int64(gc.Widthptr)))
-	frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
-	ptxt.To.Offset = int64(frame)
-
-	// insert code to zero ambiguously live variables
-	// so that the garbage collector only sees initialized values
-	// when it looks for pointers.
-	p := ptxt
-
-	hi := int64(0)
-	lo := hi
-
-	// iterate through declarations - they are sorted in decreasing xoffset order.
-	for _, n := range gc.Curfn.Func.Dcl {
-		if !n.Name.Needzero {
-			continue
-		}
-		if n.Class != gc.PAUTO {
-			gc.Fatalf("needzero class %d", n.Class)
-		}
-		if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
-			gc.Fatalf("var %L has size %d offset %d", n, int(n.Type.Width), int(n.Xoffset))
-		}
-
-		if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) {
-			// merge with range we already have
-			lo = n.Xoffset
-
-			continue
-		}
-
-		// zero old range
-		p = zerorange(p, int64(frame), lo, hi)
-
-		// set new range
-		hi = n.Xoffset + n.Type.Width
-
-		lo = n.Xoffset
-	}
-
-	// zero final range
-	zerorange(p, int64(frame), lo, hi)
-}
-
-// TODO(mips): implement DUFFZERO
-func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
-
-	cnt := hi - lo
-	if cnt == 0 {
-		return p
-	}
-	if cnt < int64(4*gc.Widthptr) {
-		for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
-			p = gc.Appendpp(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, gc.Ctxt.FixedFrameSize()+frame+lo+i)
-		}
-	} else {
-		//fmt.Printf("zerorange frame:%v, lo: %v, hi:%v \n", frame ,lo, hi)
-		//	ADD 	$(FIXED_FRAME+frame+lo-4), SP, r1
-		//	ADD 	$cnt, r1, r2
-		// loop:
-		//	MOVW	R0, (Widthptr)r1
-		//	ADD 	$Widthptr, r1
-		//	BNE		r1, r2, loop
-		p = gc.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+frame+lo-4, obj.TYPE_REG, mips.REGRT1, 0)
-		p.Reg = mips.REGSP
-		p = gc.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
-		p.Reg = mips.REGRT1
-		p = gc.Appendpp(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(gc.Widthptr))
-		p1 := p
-		p = gc.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, int64(gc.Widthptr), obj.TYPE_REG, mips.REGRT1, 0)
-		p = gc.Appendpp(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
-		p.Reg = mips.REGRT2
-		gc.Patch(p, p1)
-	}
-
-	return p
-}
-
-func ginsnop() {
-	p := gc.Prog(mips.ANOR)
-	p.From.Type = obj.TYPE_REG
-	p.From.Reg = mips.REG_R0
-	p.To.Type = obj.TYPE_REG
-	p.To.Reg = mips.REG_R0
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/mips/prog.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/mips/prog.go
deleted file mode 100644
index e4c0a98..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/mips/prog.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/mips/prog.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/mips/prog.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package mips
-
-import (
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/mips"
-)
-
-const (
-	LeftRdwr  uint32 = gc.LeftRead | gc.LeftWrite
-	RightRdwr uint32 = gc.RightRead | gc.RightWrite
-)
-
-// This table gives the basic information about instruction
-// generated by the compiler and processed in the optimizer.
-// See opt.h for bit definitions.
-//
-// Instructions not generated need not be listed.
-// As an exception to that rule, we typically write down all the
-// size variants of an operation even if we just use a subset.
-//
-// The table is formatted for 8-space tabs.
-var progtable = [mips.ALAST & obj.AMask]gc.ProgInfo{
-	obj.ATYPE:     {Flags: gc.Pseudo | gc.Skip},
-	obj.ATEXT:     {Flags: gc.Pseudo},
-	obj.AFUNCDATA: {Flags: gc.Pseudo},
-	obj.APCDATA:   {Flags: gc.Pseudo},
-	obj.AUNDEF:    {Flags: gc.Break},
-	obj.AUSEFIELD: {Flags: gc.OK},
-	obj.AVARDEF:   {Flags: gc.Pseudo | gc.RightWrite},
-	obj.AVARKILL:  {Flags: gc.Pseudo | gc.RightWrite},
-	obj.AVARLIVE:  {Flags: gc.Pseudo | gc.LeftRead},
-
-	// NOP is an internal no-op that also stands
-	// for USED and SET annotations, not the MIPS opcode.
-	obj.ANOP: {Flags: gc.LeftRead | gc.RightWrite},
-
-	// Integer
-	mips.AADD & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.AADDU & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.ASUB & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.ASUBU & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.AAND & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.AOR & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.AXOR & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.ANOR & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.AMUL & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead},
-	mips.AMULU & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead},
-	mips.ADIV & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead},
-	mips.ADIVU & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead},
-	mips.ASLL & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.ASRA & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.ASRL & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.ASGT & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.ASGTU & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-
-	mips.ACLZ & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightRead},
-	mips.ACLO & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightRead},
-
-	// Floating point.
-	mips.AADDF & obj.AMask:    {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.AADDD & obj.AMask:    {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.ASUBF & obj.AMask:    {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.ASUBD & obj.AMask:    {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.AMULF & obj.AMask:    {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.AMULD & obj.AMask:    {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.ADIVF & obj.AMask:    {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.ADIVD & obj.AMask:    {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.AABSF & obj.AMask:    {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite},
-	mips.AABSD & obj.AMask:    {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite},
-	mips.ANEGF & obj.AMask:    {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite},
-	mips.ANEGD & obj.AMask:    {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite},
-	mips.ACMPEQF & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RegRead},
-	mips.ACMPEQD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RegRead},
-	mips.ACMPGTF & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RegRead},
-	mips.ACMPGTD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RegRead},
-	mips.ACMPGEF & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RegRead},
-	mips.ACMPGED & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RegRead},
-	mips.AMOVFD & obj.AMask:   {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
-	mips.AMOVDF & obj.AMask:   {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
-	mips.AMOVFW & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	mips.AMOVWF & obj.AMask:   {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
-	mips.AMOVDW & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	mips.AMOVWD & obj.AMask:   {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
-	mips.ATRUNCFW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	mips.ATRUNCDW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-
-	mips.ASQRTF & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite},
-	mips.ASQRTD & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite},
-
-	// Moves
-	mips.AMOVB & obj.AMask:  {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	mips.AMOVBU & obj.AMask: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	mips.AMOVH & obj.AMask:  {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	mips.AMOVHU & obj.AMask: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	mips.AMOVW & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	mips.AMOVF & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	mips.AMOVD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-
-	// Conditional moves
-	mips.ACMOVN & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | RightRdwr},
-	mips.ACMOVZ & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | RightRdwr},
-	mips.ACMOVT & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | RightRdwr},
-	mips.ACMOVF & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | RightRdwr},
-
-	// Conditional trap
-	mips.ATEQ & obj.AMask: {Flags: gc.SizeL | gc.RegRead | gc.RightRead},
-	mips.ATNE & obj.AMask: {Flags: gc.SizeL | gc.RegRead | gc.RightRead},
-
-	// Atomic
-	mips.ASYNC & obj.AMask: {Flags: gc.OK},
-	mips.ALL & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite},
-	mips.ASC & obj.AMask:   {Flags: gc.SizeL | LeftRdwr | gc.RightRead},
-
-	// Jumps
-	mips.AJMP & obj.AMask:  {Flags: gc.Jump | gc.Break},
-	mips.AJAL & obj.AMask:  {Flags: gc.Call},
-	mips.ABEQ & obj.AMask:  {Flags: gc.Cjmp},
-	mips.ABNE & obj.AMask:  {Flags: gc.Cjmp},
-	mips.ABGEZ & obj.AMask: {Flags: gc.Cjmp},
-	mips.ABLTZ & obj.AMask: {Flags: gc.Cjmp},
-	mips.ABGTZ & obj.AMask: {Flags: gc.Cjmp},
-	mips.ABLEZ & obj.AMask: {Flags: gc.Cjmp},
-	mips.ABFPF & obj.AMask: {Flags: gc.Cjmp},
-	mips.ABFPT & obj.AMask: {Flags: gc.Cjmp},
-	mips.ARET & obj.AMask:  {Flags: gc.Break},
-	obj.ADUFFZERO:          {Flags: gc.Call},
-	obj.ADUFFCOPY:          {Flags: gc.Call},
-}
-
-func proginfo(p *obj.Prog) gc.ProgInfo {
-	info := progtable[p.As&obj.AMask]
-
-	if info.Flags == 0 {
-		gc.Fatalf("proginfo: unknown instruction %v", p)
-	}
-
-	if (info.Flags&gc.RegRead != 0) && p.Reg == 0 {
-		info.Flags &^= gc.RegRead
-		info.Flags |= gc.RightRead
-	}
-
-	if p.From.Type == obj.TYPE_ADDR && p.From.Sym != nil && (info.Flags&gc.LeftRead != 0) {
-		info.Flags &^= gc.LeftRead
-		info.Flags |= gc.LeftAddr
-	}
-
-	if p.As == mips.AMUL && p.To.Reg != 0 {
-		info.Flags |= gc.RightWrite
-	}
-
-	return info
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/mips/ssa.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/mips/ssa.go
deleted file mode 100644
index e097fb1..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/mips/ssa.go
+++ /dev/null
@@ -1,910 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/mips/ssa.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/mips/ssa.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package mips
-
-import (
-	"math"
-
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/compile/internal/ssa"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/mips"
-)
-
-// isFPreg returns whether r is an FP register
-func isFPreg(r int16) bool {
-	return mips.REG_F0 <= r && r <= mips.REG_F31
-}
-
-// isHILO returns whether r is HI or LO register
-func isHILO(r int16) bool {
-	return r == mips.REG_HI || r == mips.REG_LO
-}
-
-// loadByType returns the load instruction of the given type.
-func loadByType(t ssa.Type, r int16) obj.As {
-	if isFPreg(r) {
-		if t.Size() == 4 { // float32 or int32
-			return mips.AMOVF
-		} else { // float64 or int64
-			return mips.AMOVD
-		}
-	} else {
-		switch t.Size() {
-		case 1:
-			if t.IsSigned() {
-				return mips.AMOVB
-			} else {
-				return mips.AMOVBU
-			}
-		case 2:
-			if t.IsSigned() {
-				return mips.AMOVH
-			} else {
-				return mips.AMOVHU
-			}
-		case 4:
-			return mips.AMOVW
-		}
-	}
-	panic("bad load type")
-}
-
-// storeByType returns the store instruction of the given type.
-func storeByType(t ssa.Type, r int16) obj.As {
-	if isFPreg(r) {
-		if t.Size() == 4 { // float32 or int32
-			return mips.AMOVF
-		} else { // float64 or int64
-			return mips.AMOVD
-		}
-	} else {
-		switch t.Size() {
-		case 1:
-			return mips.AMOVB
-		case 2:
-			return mips.AMOVH
-		case 4:
-			return mips.AMOVW
-		}
-	}
-	panic("bad store type")
-}
-
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
-	s.SetLineno(v.Line)
-	switch v.Op {
-	case ssa.OpInitMem:
-		// memory arg needs no code
-	case ssa.OpArg:
-		// input args need no code
-	case ssa.OpSP, ssa.OpSB, ssa.OpGetG:
-		// nothing to do
-	case ssa.OpSelect0, ssa.OpSelect1:
-		// nothing to do
-	case ssa.OpCopy, ssa.OpMIPSMOVWconvert, ssa.OpMIPSMOVWreg:
-		t := v.Type
-		if t.IsMemory() {
-			return
-		}
-		x := v.Args[0].Reg()
-		y := v.Reg()
-		if x == y {
-			return
-		}
-		as := mips.AMOVW
-		if isFPreg(x) && isFPreg(y) {
-			as = mips.AMOVF
-			if t.Size() == 8 {
-				as = mips.AMOVD
-			}
-		}
-
-		p := gc.Prog(as)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = x
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = y
-		if isHILO(x) && isHILO(y) || isHILO(x) && isFPreg(y) || isFPreg(x) && isHILO(y) {
-			// cannot move between special registers, use TMP as intermediate
-			p.To.Reg = mips.REGTMP
-			p = gc.Prog(mips.AMOVW)
-			p.From.Type = obj.TYPE_REG
-			p.From.Reg = mips.REGTMP
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = y
-		}
-	case ssa.OpMIPSMOVWnop:
-		if v.Reg() != v.Args[0].Reg() {
-			v.Fatalf("input[0] and output not in same register %s", v.LongString())
-		}
-		// nothing to do
-	case ssa.OpLoadReg:
-		if v.Type.IsFlags() {
-			v.Fatalf("load flags not implemented: %v", v.LongString())
-			return
-		}
-		r := v.Reg()
-		p := gc.Prog(loadByType(v.Type, r))
-		gc.AddrAuto(&p.From, v.Args[0])
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-		if isHILO(r) {
-			// cannot directly load, load to TMP and move
-			p.To.Reg = mips.REGTMP
-			p = gc.Prog(mips.AMOVW)
-			p.From.Type = obj.TYPE_REG
-			p.From.Reg = mips.REGTMP
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = r
-		}
-	case ssa.OpStoreReg:
-		if v.Type.IsFlags() {
-			v.Fatalf("store flags not implemented: %v", v.LongString())
-			return
-		}
-		r := v.Args[0].Reg()
-		if isHILO(r) {
-			// cannot directly store, move to TMP and store
-			p := gc.Prog(mips.AMOVW)
-			p.From.Type = obj.TYPE_REG
-			p.From.Reg = r
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = mips.REGTMP
-			r = mips.REGTMP
-		}
-		p := gc.Prog(storeByType(v.Type, r))
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = r
-		gc.AddrAuto(&p.To, v)
-	case ssa.OpMIPSADD,
-		ssa.OpMIPSSUB,
-		ssa.OpMIPSAND,
-		ssa.OpMIPSOR,
-		ssa.OpMIPSXOR,
-		ssa.OpMIPSNOR,
-		ssa.OpMIPSSLL,
-		ssa.OpMIPSSRL,
-		ssa.OpMIPSSRA,
-		ssa.OpMIPSADDF,
-		ssa.OpMIPSADDD,
-		ssa.OpMIPSSUBF,
-		ssa.OpMIPSSUBD,
-		ssa.OpMIPSMULF,
-		ssa.OpMIPSMULD,
-		ssa.OpMIPSDIVF,
-		ssa.OpMIPSDIVD,
-		ssa.OpMIPSMUL:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[1].Reg()
-		p.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpMIPSSGT,
-		ssa.OpMIPSSGTU:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		p.Reg = v.Args[1].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpMIPSSGTzero,
-		ssa.OpMIPSSGTUzero:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		p.Reg = mips.REGZERO
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpMIPSADDconst,
-		ssa.OpMIPSSUBconst,
-		ssa.OpMIPSANDconst,
-		ssa.OpMIPSORconst,
-		ssa.OpMIPSXORconst,
-		ssa.OpMIPSNORconst,
-		ssa.OpMIPSSLLconst,
-		ssa.OpMIPSSRLconst,
-		ssa.OpMIPSSRAconst,
-		ssa.OpMIPSSGTconst,
-		ssa.OpMIPSSGTUconst:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = v.AuxInt
-		p.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpMIPSMULT,
-		ssa.OpMIPSMULTU,
-		ssa.OpMIPSDIV,
-		ssa.OpMIPSDIVU:
-		// result in hi,lo
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[1].Reg()
-		p.Reg = v.Args[0].Reg()
-	case ssa.OpMIPSMOVWconst:
-		r := v.Reg()
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = v.AuxInt
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-		if isFPreg(r) || isHILO(r) {
-			// cannot move into FP or special registers, use TMP as intermediate
-			p.To.Reg = mips.REGTMP
-			p = gc.Prog(mips.AMOVW)
-			p.From.Type = obj.TYPE_REG
-			p.From.Reg = mips.REGTMP
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = r
-		}
-	case ssa.OpMIPSMOVFconst,
-		ssa.OpMIPSMOVDconst:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_FCONST
-		p.From.Val = math.Float64frombits(uint64(v.AuxInt))
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpMIPSCMOVZ:
-		if v.Reg() != v.Args[0].Reg() {
-			v.Fatalf("input[0] and output not in same register %s", v.LongString())
-		}
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[2].Reg()
-		p.Reg = v.Args[1].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpMIPSCMOVZzero:
-		if v.Reg() != v.Args[0].Reg() {
-			v.Fatalf("input[0] and output not in same register %s", v.LongString())
-		}
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[1].Reg()
-		p.Reg = mips.REGZERO
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpMIPSCMPEQF,
-		ssa.OpMIPSCMPEQD,
-		ssa.OpMIPSCMPGEF,
-		ssa.OpMIPSCMPGED,
-		ssa.OpMIPSCMPGTF,
-		ssa.OpMIPSCMPGTD:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		p.Reg = v.Args[1].Reg()
-	case ssa.OpMIPSMOVWaddr:
-		p := gc.Prog(mips.AMOVW)
-		p.From.Type = obj.TYPE_ADDR
-		var wantreg string
-		// MOVW $sym+off(base), R
-		// the assembler expands it as the following:
-		// - base is SP: add constant offset to SP (R29)
-		//               when constant is large, tmp register (R23) may be used
-		// - base is SB: load external address with relocation
-		switch v.Aux.(type) {
-		default:
-			v.Fatalf("aux is of unknown type %T", v.Aux)
-		case *ssa.ExternSymbol:
-			wantreg = "SB"
-			gc.AddAux(&p.From, v)
-		case *ssa.ArgSymbol, *ssa.AutoSymbol:
-			wantreg = "SP"
-			gc.AddAux(&p.From, v)
-		case nil:
-			// No sym, just MOVW $off(SP), R
-			wantreg = "SP"
-			p.From.Reg = mips.REGSP
-			p.From.Offset = v.AuxInt
-		}
-		if reg := v.Args[0].RegName(); reg != wantreg {
-			v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
-		}
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpMIPSMOVBload,
-		ssa.OpMIPSMOVBUload,
-		ssa.OpMIPSMOVHload,
-		ssa.OpMIPSMOVHUload,
-		ssa.OpMIPSMOVWload,
-		ssa.OpMIPSMOVFload,
-		ssa.OpMIPSMOVDload:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpMIPSMOVBstore,
-		ssa.OpMIPSMOVHstore,
-		ssa.OpMIPSMOVWstore,
-		ssa.OpMIPSMOVFstore,
-		ssa.OpMIPSMOVDstore:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[1].Reg()
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
-	case ssa.OpMIPSMOVBstorezero,
-		ssa.OpMIPSMOVHstorezero,
-		ssa.OpMIPSMOVWstorezero:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = mips.REGZERO
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
-	case ssa.OpMIPSMOVBreg,
-		ssa.OpMIPSMOVBUreg,
-		ssa.OpMIPSMOVHreg,
-		ssa.OpMIPSMOVHUreg:
-		a := v.Args[0]
-		for a.Op == ssa.OpCopy || a.Op == ssa.OpMIPSMOVWreg || a.Op == ssa.OpMIPSMOVWnop {
-			a = a.Args[0]
-		}
-		if a.Op == ssa.OpLoadReg {
-			t := a.Type
-			switch {
-			case v.Op == ssa.OpMIPSMOVBreg && t.Size() == 1 && t.IsSigned(),
-				v.Op == ssa.OpMIPSMOVBUreg && t.Size() == 1 && !t.IsSigned(),
-				v.Op == ssa.OpMIPSMOVHreg && t.Size() == 2 && t.IsSigned(),
-				v.Op == ssa.OpMIPSMOVHUreg && t.Size() == 2 && !t.IsSigned():
-				// arg is a proper-typed load, already zero/sign-extended, don't extend again
-				if v.Reg() == v.Args[0].Reg() {
-					return
-				}
-				p := gc.Prog(mips.AMOVW)
-				p.From.Type = obj.TYPE_REG
-				p.From.Reg = v.Args[0].Reg()
-				p.To.Type = obj.TYPE_REG
-				p.To.Reg = v.Reg()
-				return
-			default:
-			}
-		}
-		fallthrough
-	case ssa.OpMIPSMOVWF,
-		ssa.OpMIPSMOVWD,
-		ssa.OpMIPSTRUNCFW,
-		ssa.OpMIPSTRUNCDW,
-		ssa.OpMIPSMOVFD,
-		ssa.OpMIPSMOVDF,
-		ssa.OpMIPSNEGF,
-		ssa.OpMIPSNEGD,
-		ssa.OpMIPSSQRTD,
-		ssa.OpMIPSCLZ:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpMIPSNEG:
-		// SUB from REGZERO
-		p := gc.Prog(mips.ASUBU)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		p.Reg = mips.REGZERO
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpMIPSLoweredZero:
-		// SUBU	$4, R1
-		// MOVW	R0, 4(R1)
-		// ADDU	$4, R1
-		// BNE	Rarg1, R1, -2(PC)
-		// arg1 is the address of the last element to zero
-		var sz int64
-		var mov obj.As
-		switch {
-		case v.AuxInt%4 == 0:
-			sz = 4
-			mov = mips.AMOVW
-		case v.AuxInt%2 == 0:
-			sz = 2
-			mov = mips.AMOVH
-		default:
-			sz = 1
-			mov = mips.AMOVB
-		}
-		p := gc.Prog(mips.ASUBU)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = sz
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = mips.REG_R1
-		p2 := gc.Prog(mov)
-		p2.From.Type = obj.TYPE_REG
-		p2.From.Reg = mips.REGZERO
-		p2.To.Type = obj.TYPE_MEM
-		p2.To.Reg = mips.REG_R1
-		p2.To.Offset = sz
-		p3 := gc.Prog(mips.AADDU)
-		p3.From.Type = obj.TYPE_CONST
-		p3.From.Offset = sz
-		p3.To.Type = obj.TYPE_REG
-		p3.To.Reg = mips.REG_R1
-		p4 := gc.Prog(mips.ABNE)
-		p4.From.Type = obj.TYPE_REG
-		p4.From.Reg = v.Args[1].Reg()
-		p4.Reg = mips.REG_R1
-		p4.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p4, p2)
-	case ssa.OpMIPSLoweredMove:
-		// SUBU	$4, R1
-		// MOVW	4(R1), Rtmp
-		// MOVW	Rtmp, (R2)
-		// ADDU	$4, R1
-		// ADDU	$4, R2
-		// BNE	Rarg2, R1, -4(PC)
-		// arg2 is the address of the last element of src
-		var sz int64
-		var mov obj.As
-		switch {
-		case v.AuxInt%4 == 0:
-			sz = 4
-			mov = mips.AMOVW
-		case v.AuxInt%2 == 0:
-			sz = 2
-			mov = mips.AMOVH
-		default:
-			sz = 1
-			mov = mips.AMOVB
-		}
-		p := gc.Prog(mips.ASUBU)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = sz
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = mips.REG_R1
-		p2 := gc.Prog(mov)
-		p2.From.Type = obj.TYPE_MEM
-		p2.From.Reg = mips.REG_R1
-		p2.From.Offset = sz
-		p2.To.Type = obj.TYPE_REG
-		p2.To.Reg = mips.REGTMP
-		p3 := gc.Prog(mov)
-		p3.From.Type = obj.TYPE_REG
-		p3.From.Reg = mips.REGTMP
-		p3.To.Type = obj.TYPE_MEM
-		p3.To.Reg = mips.REG_R2
-		p4 := gc.Prog(mips.AADDU)
-		p4.From.Type = obj.TYPE_CONST
-		p4.From.Offset = sz
-		p4.To.Type = obj.TYPE_REG
-		p4.To.Reg = mips.REG_R1
-		p5 := gc.Prog(mips.AADDU)
-		p5.From.Type = obj.TYPE_CONST
-		p5.From.Offset = sz
-		p5.To.Type = obj.TYPE_REG
-		p5.To.Reg = mips.REG_R2
-		p6 := gc.Prog(mips.ABNE)
-		p6.From.Type = obj.TYPE_REG
-		p6.From.Reg = v.Args[2].Reg()
-		p6.Reg = mips.REG_R1
-		p6.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p6, p2)
-	case ssa.OpMIPSCALLstatic:
-		if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym {
-			// Deferred calls will appear to be returning to
-			// the CALL deferreturn(SB) that we are about to emit.
-			// However, the stack trace code will show the line
-			// of the instruction byte before the return PC.
-			// To avoid that being an unrelated instruction,
-			// insert an actual hardware NOP that will have the right line number.
-			// This is different from obj.ANOP, which is a virtual no-op
-			// that doesn't make it into the instruction stream.
-			ginsnop()
-		}
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(v.Aux.(*gc.Sym))
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpMIPSCALLclosure:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Offset = 0
-		p.To.Reg = v.Args[0].Reg()
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpMIPSCALLdefer:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(gc.Deferproc.Sym)
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpMIPSCALLgo:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(gc.Newproc.Sym)
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpMIPSCALLinter:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Offset = 0
-		p.To.Reg = v.Args[0].Reg()
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpMIPSLoweredAtomicLoad:
-		gc.Prog(mips.ASYNC)
-
-		p := gc.Prog(mips.AMOVW)
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg0()
-
-		gc.Prog(mips.ASYNC)
-	case ssa.OpMIPSLoweredAtomicStore:
-		gc.Prog(mips.ASYNC)
-
-		p := gc.Prog(mips.AMOVW)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[1].Reg()
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-
-		gc.Prog(mips.ASYNC)
-	case ssa.OpMIPSLoweredAtomicStorezero:
-		gc.Prog(mips.ASYNC)
-
-		p := gc.Prog(mips.AMOVW)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = mips.REGZERO
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-
-		gc.Prog(mips.ASYNC)
-	case ssa.OpMIPSLoweredAtomicExchange:
-		// SYNC
-		// MOVW Rarg1, Rtmp
-		// LL	(Rarg0), Rout
-		// SC	Rtmp, (Rarg0)
-		// BEQ	Rtmp, -3(PC)
-		// SYNC
-		gc.Prog(mips.ASYNC)
-
-		p := gc.Prog(mips.AMOVW)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[1].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = mips.REGTMP
-
-		p1 := gc.Prog(mips.ALL)
-		p1.From.Type = obj.TYPE_MEM
-		p1.From.Reg = v.Args[0].Reg()
-		p1.To.Type = obj.TYPE_REG
-		p1.To.Reg = v.Reg0()
-
-		p2 := gc.Prog(mips.ASC)
-		p2.From.Type = obj.TYPE_REG
-		p2.From.Reg = mips.REGTMP
-		p2.To.Type = obj.TYPE_MEM
-		p2.To.Reg = v.Args[0].Reg()
-
-		p3 := gc.Prog(mips.ABEQ)
-		p3.From.Type = obj.TYPE_REG
-		p3.From.Reg = mips.REGTMP
-		p3.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p3, p)
-
-		gc.Prog(mips.ASYNC)
-	case ssa.OpMIPSLoweredAtomicAdd:
-		// SYNC
-		// LL	(Rarg0), Rout
-		// ADDU Rarg1, Rout, Rtmp
-		// SC	Rtmp, (Rarg0)
-		// BEQ	Rtmp, -3(PC)
-		// SYNC
-		// ADDU Rarg1, Rout
-		gc.Prog(mips.ASYNC)
-
-		p := gc.Prog(mips.ALL)
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg0()
-
-		p1 := gc.Prog(mips.AADDU)
-		p1.From.Type = obj.TYPE_REG
-		p1.From.Reg = v.Args[1].Reg()
-		p1.Reg = v.Reg0()
-		p1.To.Type = obj.TYPE_REG
-		p1.To.Reg = mips.REGTMP
-
-		p2 := gc.Prog(mips.ASC)
-		p2.From.Type = obj.TYPE_REG
-		p2.From.Reg = mips.REGTMP
-		p2.To.Type = obj.TYPE_MEM
-		p2.To.Reg = v.Args[0].Reg()
-
-		p3 := gc.Prog(mips.ABEQ)
-		p3.From.Type = obj.TYPE_REG
-		p3.From.Reg = mips.REGTMP
-		p3.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p3, p)
-
-		gc.Prog(mips.ASYNC)
-
-		p4 := gc.Prog(mips.AADDU)
-		p4.From.Type = obj.TYPE_REG
-		p4.From.Reg = v.Args[1].Reg()
-		p4.Reg = v.Reg0()
-		p4.To.Type = obj.TYPE_REG
-		p4.To.Reg = v.Reg0()
-
-	case ssa.OpMIPSLoweredAtomicAddconst:
-		// SYNC
-		// LL	(Rarg0), Rout
-		// ADDU $auxInt, Rout, Rtmp
-		// SC	Rtmp, (Rarg0)
-		// BEQ	Rtmp, -3(PC)
-		// SYNC
-		// ADDU $auxInt, Rout
-		gc.Prog(mips.ASYNC)
-
-		p := gc.Prog(mips.ALL)
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg0()
-
-		p1 := gc.Prog(mips.AADDU)
-		p1.From.Type = obj.TYPE_CONST
-		p1.From.Offset = v.AuxInt
-		p1.Reg = v.Reg0()
-		p1.To.Type = obj.TYPE_REG
-		p1.To.Reg = mips.REGTMP
-
-		p2 := gc.Prog(mips.ASC)
-		p2.From.Type = obj.TYPE_REG
-		p2.From.Reg = mips.REGTMP
-		p2.To.Type = obj.TYPE_MEM
-		p2.To.Reg = v.Args[0].Reg()
-
-		p3 := gc.Prog(mips.ABEQ)
-		p3.From.Type = obj.TYPE_REG
-		p3.From.Reg = mips.REGTMP
-		p3.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p3, p)
-
-		gc.Prog(mips.ASYNC)
-
-		p4 := gc.Prog(mips.AADDU)
-		p4.From.Type = obj.TYPE_CONST
-		p4.From.Offset = v.AuxInt
-		p4.Reg = v.Reg0()
-		p4.To.Type = obj.TYPE_REG
-		p4.To.Reg = v.Reg0()
-
-	case ssa.OpMIPSLoweredAtomicAnd,
-		ssa.OpMIPSLoweredAtomicOr:
-		// SYNC
-		// LL	(Rarg0), Rtmp
-		// AND/OR	Rarg1, Rtmp
-		// SC	Rtmp, (Rarg0)
-		// BEQ	Rtmp, -3(PC)
-		// SYNC
-		gc.Prog(mips.ASYNC)
-
-		p := gc.Prog(mips.ALL)
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = mips.REGTMP
-
-		p1 := gc.Prog(v.Op.Asm())
-		p1.From.Type = obj.TYPE_REG
-		p1.From.Reg = v.Args[1].Reg()
-		p1.Reg = mips.REGTMP
-		p1.To.Type = obj.TYPE_REG
-		p1.To.Reg = mips.REGTMP
-
-		p2 := gc.Prog(mips.ASC)
-		p2.From.Type = obj.TYPE_REG
-		p2.From.Reg = mips.REGTMP
-		p2.To.Type = obj.TYPE_MEM
-		p2.To.Reg = v.Args[0].Reg()
-
-		p3 := gc.Prog(mips.ABEQ)
-		p3.From.Type = obj.TYPE_REG
-		p3.From.Reg = mips.REGTMP
-		p3.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p3, p)
-
-		gc.Prog(mips.ASYNC)
-
-	case ssa.OpMIPSLoweredAtomicCas:
-		// MOVW $0, Rout
-		// SYNC
-		// LL	(Rarg0), Rtmp
-		// BNE	Rtmp, Rarg1, 4(PC)
-		// MOVW Rarg2, Rout
-		// SC	Rout, (Rarg0)
-		// BEQ	Rout, -4(PC)
-		// SYNC
-		p := gc.Prog(mips.AMOVW)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = mips.REGZERO
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg0()
-
-		gc.Prog(mips.ASYNC)
-
-		p1 := gc.Prog(mips.ALL)
-		p1.From.Type = obj.TYPE_MEM
-		p1.From.Reg = v.Args[0].Reg()
-		p1.To.Type = obj.TYPE_REG
-		p1.To.Reg = mips.REGTMP
-
-		p2 := gc.Prog(mips.ABNE)
-		p2.From.Type = obj.TYPE_REG
-		p2.From.Reg = v.Args[1].Reg()
-		p2.Reg = mips.REGTMP
-		p2.To.Type = obj.TYPE_BRANCH
-
-		p3 := gc.Prog(mips.AMOVW)
-		p3.From.Type = obj.TYPE_REG
-		p3.From.Reg = v.Args[2].Reg()
-		p3.To.Type = obj.TYPE_REG
-		p3.To.Reg = v.Reg0()
-
-		p4 := gc.Prog(mips.ASC)
-		p4.From.Type = obj.TYPE_REG
-		p4.From.Reg = v.Reg0()
-		p4.To.Type = obj.TYPE_MEM
-		p4.To.Reg = v.Args[0].Reg()
-
-		p5 := gc.Prog(mips.ABEQ)
-		p5.From.Type = obj.TYPE_REG
-		p5.From.Reg = v.Reg0()
-		p5.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p5, p1)
-
-		gc.Prog(mips.ASYNC)
-
-		p6 := gc.Prog(obj.ANOP)
-		gc.Patch(p2, p6)
-
-	case ssa.OpVarDef:
-		gc.Gvardef(v.Aux.(*gc.Node))
-	case ssa.OpVarKill:
-		gc.Gvarkill(v.Aux.(*gc.Node))
-	case ssa.OpVarLive:
-		gc.Gvarlive(v.Aux.(*gc.Node))
-	case ssa.OpKeepAlive:
-		gc.KeepAlive(v)
-	case ssa.OpPhi:
-		gc.CheckLoweredPhi(v)
-	case ssa.OpMIPSLoweredNilCheck:
-		// Issue a load which will fault if arg is nil.
-		p := gc.Prog(mips.AMOVB)
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = mips.REGTMP
-		if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers
-			gc.Warnl(v.Line, "generated nil check")
-		}
-	case ssa.OpMIPSFPFlagTrue,
-		ssa.OpMIPSFPFlagFalse:
-		// MOVW		$1, r
-		// CMOVF	R0, r
-
-		cmov := mips.ACMOVF
-		if v.Op == ssa.OpMIPSFPFlagFalse {
-			cmov = mips.ACMOVT
-		}
-		p := gc.Prog(mips.AMOVW)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = 1
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-		p1 := gc.Prog(cmov)
-		p1.From.Type = obj.TYPE_REG
-		p1.From.Reg = mips.REGZERO
-		p1.To.Type = obj.TYPE_REG
-		p1.To.Reg = v.Reg()
-
-	case ssa.OpMIPSLoweredGetClosurePtr:
-		// Closure pointer is R22 (mips.REGCTXT).
-		gc.CheckLoweredGetClosurePtr(v)
-	default:
-		v.Fatalf("genValue not implemented: %s", v.LongString())
-	}
-}
-
-var blockJump = map[ssa.BlockKind]struct {
-	asm, invasm obj.As
-}{
-	ssa.BlockMIPSEQ:  {mips.ABEQ, mips.ABNE},
-	ssa.BlockMIPSNE:  {mips.ABNE, mips.ABEQ},
-	ssa.BlockMIPSLTZ: {mips.ABLTZ, mips.ABGEZ},
-	ssa.BlockMIPSGEZ: {mips.ABGEZ, mips.ABLTZ},
-	ssa.BlockMIPSLEZ: {mips.ABLEZ, mips.ABGTZ},
-	ssa.BlockMIPSGTZ: {mips.ABGTZ, mips.ABLEZ},
-	ssa.BlockMIPSFPT: {mips.ABFPT, mips.ABFPF},
-	ssa.BlockMIPSFPF: {mips.ABFPF, mips.ABFPT},
-}
-
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
-	s.SetLineno(b.Line)
-
-	switch b.Kind {
-	case ssa.BlockPlain:
-		if b.Succs[0].Block() != next {
-			p := gc.Prog(obj.AJMP)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-		}
-	case ssa.BlockDefer:
-		// defer returns in R1:
-		// 0 if we should continue executing
-		// 1 if we should jump to deferreturn call
-		p := gc.Prog(mips.ABNE)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = mips.REGZERO
-		p.Reg = mips.REG_R1
-		p.To.Type = obj.TYPE_BRANCH
-		s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
-		if b.Succs[0].Block() != next {
-			p := gc.Prog(obj.AJMP)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-		}
-	case ssa.BlockExit:
-		gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
-	case ssa.BlockRet:
-		gc.Prog(obj.ARET)
-	case ssa.BlockRetJmp:
-		p := gc.Prog(obj.ARET)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(b.Aux.(*gc.Sym))
-	case ssa.BlockMIPSEQ, ssa.BlockMIPSNE,
-		ssa.BlockMIPSLTZ, ssa.BlockMIPSGEZ,
-		ssa.BlockMIPSLEZ, ssa.BlockMIPSGTZ,
-		ssa.BlockMIPSFPT, ssa.BlockMIPSFPF:
-		jmp := blockJump[b.Kind]
-		var p *obj.Prog
-		switch next {
-		case b.Succs[0].Block():
-			p = gc.Prog(jmp.invasm)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
-		case b.Succs[1].Block():
-			p = gc.Prog(jmp.asm)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-		default:
-			p = gc.Prog(jmp.asm)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-			q := gc.Prog(obj.AJMP)
-			q.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
-		}
-		if !b.Control.Type.IsFlags() {
-			p.From.Type = obj.TYPE_REG
-			p.From.Reg = b.Control.Reg()
-		}
-	default:
-		b.Fatalf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString())
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/mips64/galign.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/mips64/galign.go
deleted file mode 100644
index 3b36519..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/mips64/galign.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/mips64/galign.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/mips64/galign.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package mips64
-
-import (
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/compile/internal/ssa"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/mips"
-)
-
-func Init() {
-	gc.Thearch.LinkArch = &mips.Linkmips64
-	if obj.GOARCH == "mips64le" {
-		gc.Thearch.LinkArch = &mips.Linkmips64le
-	}
-	gc.Thearch.REGSP = mips.REGSP
-	gc.Thearch.MAXWIDTH = 1 << 50
-
-	gc.Thearch.Defframe = defframe
-	gc.Thearch.Proginfo = proginfo
-
-	gc.Thearch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
-	gc.Thearch.SSAGenValue = ssaGenValue
-	gc.Thearch.SSAGenBlock = ssaGenBlock
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/mips64/ggen.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/mips64/ggen.go
deleted file mode 100644
index f63159e..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/mips64/ggen.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/mips64/ggen.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/mips64/ggen.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package mips64
-
-import (
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/mips"
-)
-
-func defframe(ptxt *obj.Prog) {
-	// fill in argument size, stack size
-	ptxt.To.Type = obj.TYPE_TEXTSIZE
-
-	ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.ArgWidth(), int64(gc.Widthptr)))
-	frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
-	ptxt.To.Offset = int64(frame)
-
-	// insert code to zero ambiguously live variables
-	// so that the garbage collector only sees initialized values
-	// when it looks for pointers.
-	p := ptxt
-
-	hi := int64(0)
-	lo := hi
-
-	// iterate through declarations - they are sorted in decreasing xoffset order.
-	for _, n := range gc.Curfn.Func.Dcl {
-		if !n.Name.Needzero {
-			continue
-		}
-		if n.Class != gc.PAUTO {
-			gc.Fatalf("needzero class %d", n.Class)
-		}
-		if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
-			gc.Fatalf("var %L has size %d offset %d", n, int(n.Type.Width), int(n.Xoffset))
-		}
-
-		if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) {
-			// merge with range we already have
-			lo = n.Xoffset
-
-			continue
-		}
-
-		// zero old range
-		p = zerorange(p, int64(frame), lo, hi)
-
-		// set new range
-		hi = n.Xoffset + n.Type.Width
-
-		lo = n.Xoffset
-	}
-
-	// zero final range
-	zerorange(p, int64(frame), lo, hi)
-}
-
-func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
-	cnt := hi - lo
-	if cnt == 0 {
-		return p
-	}
-	if cnt < int64(4*gc.Widthptr) {
-		for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
-			p = gc.Appendpp(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, 8+frame+lo+i)
-		}
-	} else if cnt <= int64(128*gc.Widthptr) {
-		p = gc.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, mips.REGRT1, 0)
-		p.Reg = mips.REGSP
-		p = gc.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
-		gc.Naddr(&p.To, gc.Sysfunc("duffzero"))
-		p.To.Offset = 8 * (128 - cnt/int64(gc.Widthptr))
-	} else {
-		//	ADDV	$(8+frame+lo-8), SP, r1
-		//	ADDV	$cnt, r1, r2
-		// loop:
-		//	MOVV	R0, (Widthptr)r1
-		//	ADDV	$Widthptr, r1
-		//	BNE		r1, r2, loop
-		p = gc.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, mips.REGRT1, 0)
-		p.Reg = mips.REGSP
-		p = gc.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
-		p.Reg = mips.REGRT1
-		p = gc.Appendpp(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(gc.Widthptr))
-		p1 := p
-		p = gc.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, int64(gc.Widthptr), obj.TYPE_REG, mips.REGRT1, 0)
-		p = gc.Appendpp(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
-		p.Reg = mips.REGRT2
-		gc.Patch(p, p1)
-	}
-
-	return p
-}
-
-func ginsnop() {
-	p := gc.Prog(mips.ANOR)
-	p.From.Type = obj.TYPE_REG
-	p.From.Reg = mips.REG_R0
-	p.To.Type = obj.TYPE_REG
-	p.To.Reg = mips.REG_R0
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/mips64/prog.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/mips64/prog.go
deleted file mode 100644
index 75a2d15..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/mips64/prog.go
+++ /dev/null
@@ -1,157 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/mips64/prog.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/mips64/prog.go:1
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package mips64
-
-import (
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/mips"
-)
-
-const (
-	LeftRdwr  uint32 = gc.LeftRead | gc.LeftWrite
-	RightRdwr uint32 = gc.RightRead | gc.RightWrite
-)
-
-// This table gives the basic information about instruction
-// generated by the compiler and processed in the optimizer.
-// See opt.h for bit definitions.
-//
-// Instructions not generated need not be listed.
-// As an exception to that rule, we typically write down all the
-// size variants of an operation even if we just use a subset.
-//
-// The table is formatted for 8-space tabs.
-var progtable = [mips.ALAST & obj.AMask]gc.ProgInfo{
-	obj.ATYPE:     {Flags: gc.Pseudo | gc.Skip},
-	obj.ATEXT:     {Flags: gc.Pseudo},
-	obj.AFUNCDATA: {Flags: gc.Pseudo},
-	obj.APCDATA:   {Flags: gc.Pseudo},
-	obj.AUNDEF:    {Flags: gc.Break},
-	obj.AUSEFIELD: {Flags: gc.OK},
-	obj.AVARDEF:   {Flags: gc.Pseudo | gc.RightWrite},
-	obj.AVARKILL:  {Flags: gc.Pseudo | gc.RightWrite},
-	obj.AVARLIVE:  {Flags: gc.Pseudo | gc.LeftRead},
-
-	// NOP is an internal no-op that also stands
-	// for USED and SET annotations, not the MIPS opcode.
-	obj.ANOP: {Flags: gc.LeftRead | gc.RightWrite},
-
-	// Integer
-	mips.AADD & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.AADDU & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.AADDV & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.AADDVU & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.ASUB & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.ASUBU & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.ASUBV & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.ASUBVU & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.AAND & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.AOR & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.AXOR & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.ANOR & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.AMUL & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead},
-	mips.AMULU & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead},
-	mips.AMULV & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead},
-	mips.AMULVU & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead},
-	mips.ADIV & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead},
-	mips.ADIVU & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead},
-	mips.ADIVV & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead},
-	mips.ADIVVU & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead},
-	mips.AREM & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead},
-	mips.AREMU & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead},
-	mips.AREMV & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead},
-	mips.AREMVU & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead},
-	mips.ASLL & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.ASLLV & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.ASRA & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.ASRAV & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.ASRL & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.ASRLV & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.ASGT & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.ASGTU & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-
-	// Floating point.
-	mips.AADDF & obj.AMask:    {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.AADDD & obj.AMask:    {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.ASUBF & obj.AMask:    {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.ASUBD & obj.AMask:    {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.AMULF & obj.AMask:    {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.AMULD & obj.AMask:    {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.ADIVF & obj.AMask:    {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.ADIVD & obj.AMask:    {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	mips.AABSF & obj.AMask:    {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite},
-	mips.AABSD & obj.AMask:    {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite},
-	mips.ANEGF & obj.AMask:    {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite},
-	mips.ANEGD & obj.AMask:    {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite},
-	mips.ACMPEQF & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RegRead},
-	mips.ACMPEQD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RegRead},
-	mips.ACMPGTF & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RegRead},
-	mips.ACMPGTD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RegRead},
-	mips.ACMPGEF & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RegRead},
-	mips.ACMPGED & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RegRead},
-	mips.AMOVFD & obj.AMask:   {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
-	mips.AMOVDF & obj.AMask:   {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
-	mips.AMOVFW & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	mips.AMOVWF & obj.AMask:   {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
-	mips.AMOVDW & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	mips.AMOVWD & obj.AMask:   {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
-	mips.AMOVFV & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
-	mips.AMOVVF & obj.AMask:   {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
-	mips.AMOVDV & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
-	mips.AMOVVD & obj.AMask:   {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
-	mips.ATRUNCFW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	mips.ATRUNCDW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	mips.ATRUNCFV & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
-	mips.ATRUNCDV & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
-
-	// Moves
-	mips.AMOVB & obj.AMask:  {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	mips.AMOVBU & obj.AMask: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	mips.AMOVH & obj.AMask:  {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	mips.AMOVHU & obj.AMask: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	mips.AMOVW & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	mips.AMOVWU & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	mips.AMOVV & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
-	mips.AMOVF & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	mips.AMOVD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move},
-
-	// Jumps
-	mips.AJMP & obj.AMask:  {Flags: gc.Jump | gc.Break},
-	mips.AJAL & obj.AMask:  {Flags: gc.Call},
-	mips.ABEQ & obj.AMask:  {Flags: gc.Cjmp},
-	mips.ABNE & obj.AMask:  {Flags: gc.Cjmp},
-	mips.ABGEZ & obj.AMask: {Flags: gc.Cjmp},
-	mips.ABLTZ & obj.AMask: {Flags: gc.Cjmp},
-	mips.ABGTZ & obj.AMask: {Flags: gc.Cjmp},
-	mips.ABLEZ & obj.AMask: {Flags: gc.Cjmp},
-	mips.ABFPF & obj.AMask: {Flags: gc.Cjmp},
-	mips.ABFPT & obj.AMask: {Flags: gc.Cjmp},
-	mips.ARET & obj.AMask:  {Flags: gc.Break},
-	obj.ADUFFZERO:          {Flags: gc.Call},
-	obj.ADUFFCOPY:          {Flags: gc.Call},
-}
-
-func proginfo(p *obj.Prog) gc.ProgInfo {
-	info := progtable[p.As&obj.AMask]
-	if info.Flags == 0 {
-		gc.Fatalf("proginfo: unknown instruction %v", p)
-	}
-
-	if (info.Flags&gc.RegRead != 0) && p.Reg == 0 {
-		info.Flags &^= gc.RegRead
-		info.Flags |= gc.RightRead /*CanRegRead |*/
-	}
-
-	if p.From.Type == obj.TYPE_ADDR && p.From.Sym != nil && (info.Flags&gc.LeftRead != 0) {
-		info.Flags &^= gc.LeftRead
-		info.Flags |= gc.LeftAddr
-	}
-
-	return info
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/mips64/ssa.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/mips64/ssa.go
deleted file mode 100644
index 1ff53ed..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/mips64/ssa.go
+++ /dev/null
@@ -1,675 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/mips64/ssa.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/mips64/ssa.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package mips64
-
-import (
-	"math"
-
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/compile/internal/ssa"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/mips"
-)
-
-// isFPreg returns whether r is an FP register
-func isFPreg(r int16) bool {
-	return mips.REG_F0 <= r && r <= mips.REG_F31
-}
-
-// isHILO returns whether r is HI or LO register
-func isHILO(r int16) bool {
-	return r == mips.REG_HI || r == mips.REG_LO
-}
-
-// loadByType returns the load instruction of the given type.
-func loadByType(t ssa.Type, r int16) obj.As {
-	if isFPreg(r) {
-		if t.Size() == 4 { // float32 or int32
-			return mips.AMOVF
-		} else { // float64 or int64
-			return mips.AMOVD
-		}
-	} else {
-		switch t.Size() {
-		case 1:
-			if t.IsSigned() {
-				return mips.AMOVB
-			} else {
-				return mips.AMOVBU
-			}
-		case 2:
-			if t.IsSigned() {
-				return mips.AMOVH
-			} else {
-				return mips.AMOVHU
-			}
-		case 4:
-			if t.IsSigned() {
-				return mips.AMOVW
-			} else {
-				return mips.AMOVWU
-			}
-		case 8:
-			return mips.AMOVV
-		}
-	}
-	panic("bad load type")
-}
-
-// storeByType returns the store instruction of the given type.
-func storeByType(t ssa.Type, r int16) obj.As {
-	if isFPreg(r) {
-		if t.Size() == 4 { // float32 or int32
-			return mips.AMOVF
-		} else { // float64 or int64
-			return mips.AMOVD
-		}
-	} else {
-		switch t.Size() {
-		case 1:
-			return mips.AMOVB
-		case 2:
-			return mips.AMOVH
-		case 4:
-			return mips.AMOVW
-		case 8:
-			return mips.AMOVV
-		}
-	}
-	panic("bad store type")
-}
-
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
-	s.SetLineno(v.Line)
-	switch v.Op {
-	case ssa.OpInitMem:
-		// memory arg needs no code
-	case ssa.OpArg:
-		// input args need no code
-	case ssa.OpSP, ssa.OpSB, ssa.OpGetG:
-		// nothing to do
-	case ssa.OpCopy, ssa.OpMIPS64MOVVconvert, ssa.OpMIPS64MOVVreg:
-		if v.Type.IsMemory() {
-			return
-		}
-		x := v.Args[0].Reg()
-		y := v.Reg()
-		if x == y {
-			return
-		}
-		as := mips.AMOVV
-		if isFPreg(x) && isFPreg(y) {
-			as = mips.AMOVD
-		}
-		p := gc.Prog(as)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = x
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = y
-		if isHILO(x) && isHILO(y) || isHILO(x) && isFPreg(y) || isFPreg(x) && isHILO(y) {
-			// cannot move between special registers, use TMP as intermediate
-			p.To.Reg = mips.REGTMP
-			p = gc.Prog(mips.AMOVV)
-			p.From.Type = obj.TYPE_REG
-			p.From.Reg = mips.REGTMP
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = y
-		}
-	case ssa.OpMIPS64MOVVnop:
-		if v.Reg() != v.Args[0].Reg() {
-			v.Fatalf("input[0] and output not in same register %s", v.LongString())
-		}
-		// nothing to do
-	case ssa.OpLoadReg:
-		if v.Type.IsFlags() {
-			v.Fatalf("load flags not implemented: %v", v.LongString())
-			return
-		}
-		r := v.Reg()
-		p := gc.Prog(loadByType(v.Type, r))
-		gc.AddrAuto(&p.From, v.Args[0])
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-		if isHILO(r) {
-			// cannot directly load, load to TMP and move
-			p.To.Reg = mips.REGTMP
-			p = gc.Prog(mips.AMOVV)
-			p.From.Type = obj.TYPE_REG
-			p.From.Reg = mips.REGTMP
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = r
-		}
-	case ssa.OpPhi:
-		gc.CheckLoweredPhi(v)
-	case ssa.OpStoreReg:
-		if v.Type.IsFlags() {
-			v.Fatalf("store flags not implemented: %v", v.LongString())
-			return
-		}
-		r := v.Args[0].Reg()
-		if isHILO(r) {
-			// cannot directly store, move to TMP and store
-			p := gc.Prog(mips.AMOVV)
-			p.From.Type = obj.TYPE_REG
-			p.From.Reg = r
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = mips.REGTMP
-			r = mips.REGTMP
-		}
-		p := gc.Prog(storeByType(v.Type, r))
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = r
-		gc.AddrAuto(&p.To, v)
-	case ssa.OpMIPS64ADDV,
-		ssa.OpMIPS64SUBV,
-		ssa.OpMIPS64AND,
-		ssa.OpMIPS64OR,
-		ssa.OpMIPS64XOR,
-		ssa.OpMIPS64NOR,
-		ssa.OpMIPS64SLLV,
-		ssa.OpMIPS64SRLV,
-		ssa.OpMIPS64SRAV,
-		ssa.OpMIPS64ADDF,
-		ssa.OpMIPS64ADDD,
-		ssa.OpMIPS64SUBF,
-		ssa.OpMIPS64SUBD,
-		ssa.OpMIPS64MULF,
-		ssa.OpMIPS64MULD,
-		ssa.OpMIPS64DIVF,
-		ssa.OpMIPS64DIVD:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[1].Reg()
-		p.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpMIPS64SGT,
-		ssa.OpMIPS64SGTU:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		p.Reg = v.Args[1].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpMIPS64ADDVconst,
-		ssa.OpMIPS64SUBVconst,
-		ssa.OpMIPS64ANDconst,
-		ssa.OpMIPS64ORconst,
-		ssa.OpMIPS64XORconst,
-		ssa.OpMIPS64NORconst,
-		ssa.OpMIPS64SLLVconst,
-		ssa.OpMIPS64SRLVconst,
-		ssa.OpMIPS64SRAVconst,
-		ssa.OpMIPS64SGTconst,
-		ssa.OpMIPS64SGTUconst:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = v.AuxInt
-		p.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpMIPS64MULV,
-		ssa.OpMIPS64MULVU,
-		ssa.OpMIPS64DIVV,
-		ssa.OpMIPS64DIVVU:
-		// result in hi,lo
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[1].Reg()
-		p.Reg = v.Args[0].Reg()
-	case ssa.OpMIPS64MOVVconst:
-		r := v.Reg()
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = v.AuxInt
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-		if isFPreg(r) || isHILO(r) {
-			// cannot move into FP or special registers, use TMP as intermediate
-			p.To.Reg = mips.REGTMP
-			p = gc.Prog(mips.AMOVV)
-			p.From.Type = obj.TYPE_REG
-			p.From.Reg = mips.REGTMP
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = r
-		}
-	case ssa.OpMIPS64MOVFconst,
-		ssa.OpMIPS64MOVDconst:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_FCONST
-		p.From.Val = math.Float64frombits(uint64(v.AuxInt))
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpMIPS64CMPEQF,
-		ssa.OpMIPS64CMPEQD,
-		ssa.OpMIPS64CMPGEF,
-		ssa.OpMIPS64CMPGED,
-		ssa.OpMIPS64CMPGTF,
-		ssa.OpMIPS64CMPGTD:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		p.Reg = v.Args[1].Reg()
-	case ssa.OpMIPS64MOVVaddr:
-		p := gc.Prog(mips.AMOVV)
-		p.From.Type = obj.TYPE_ADDR
-		var wantreg string
-		// MOVV $sym+off(base), R
-		// the assembler expands it as the following:
-		// - base is SP: add constant offset to SP (R29)
-		//               when constant is large, tmp register (R23) may be used
-		// - base is SB: load external address with relocation
-		switch v.Aux.(type) {
-		default:
-			v.Fatalf("aux is of unknown type %T", v.Aux)
-		case *ssa.ExternSymbol:
-			wantreg = "SB"
-			gc.AddAux(&p.From, v)
-		case *ssa.ArgSymbol, *ssa.AutoSymbol:
-			wantreg = "SP"
-			gc.AddAux(&p.From, v)
-		case nil:
-			// No sym, just MOVV $off(SP), R
-			wantreg = "SP"
-			p.From.Reg = mips.REGSP
-			p.From.Offset = v.AuxInt
-		}
-		if reg := v.Args[0].RegName(); reg != wantreg {
-			v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
-		}
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpMIPS64MOVBload,
-		ssa.OpMIPS64MOVBUload,
-		ssa.OpMIPS64MOVHload,
-		ssa.OpMIPS64MOVHUload,
-		ssa.OpMIPS64MOVWload,
-		ssa.OpMIPS64MOVWUload,
-		ssa.OpMIPS64MOVVload,
-		ssa.OpMIPS64MOVFload,
-		ssa.OpMIPS64MOVDload:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpMIPS64MOVBstore,
-		ssa.OpMIPS64MOVHstore,
-		ssa.OpMIPS64MOVWstore,
-		ssa.OpMIPS64MOVVstore,
-		ssa.OpMIPS64MOVFstore,
-		ssa.OpMIPS64MOVDstore:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[1].Reg()
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
-	case ssa.OpMIPS64MOVBstorezero,
-		ssa.OpMIPS64MOVHstorezero,
-		ssa.OpMIPS64MOVWstorezero,
-		ssa.OpMIPS64MOVVstorezero:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = mips.REGZERO
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
-	case ssa.OpMIPS64MOVBreg,
-		ssa.OpMIPS64MOVBUreg,
-		ssa.OpMIPS64MOVHreg,
-		ssa.OpMIPS64MOVHUreg,
-		ssa.OpMIPS64MOVWreg,
-		ssa.OpMIPS64MOVWUreg:
-		a := v.Args[0]
-		for a.Op == ssa.OpCopy || a.Op == ssa.OpMIPS64MOVVreg {
-			a = a.Args[0]
-		}
-		if a.Op == ssa.OpLoadReg {
-			t := a.Type
-			switch {
-			case v.Op == ssa.OpMIPS64MOVBreg && t.Size() == 1 && t.IsSigned(),
-				v.Op == ssa.OpMIPS64MOVBUreg && t.Size() == 1 && !t.IsSigned(),
-				v.Op == ssa.OpMIPS64MOVHreg && t.Size() == 2 && t.IsSigned(),
-				v.Op == ssa.OpMIPS64MOVHUreg && t.Size() == 2 && !t.IsSigned(),
-				v.Op == ssa.OpMIPS64MOVWreg && t.Size() == 4 && t.IsSigned(),
-				v.Op == ssa.OpMIPS64MOVWUreg && t.Size() == 4 && !t.IsSigned():
-				// arg is a proper-typed load, already zero/sign-extended, don't extend again
-				if v.Reg() == v.Args[0].Reg() {
-					return
-				}
-				p := gc.Prog(mips.AMOVV)
-				p.From.Type = obj.TYPE_REG
-				p.From.Reg = v.Args[0].Reg()
-				p.To.Type = obj.TYPE_REG
-				p.To.Reg = v.Reg()
-				return
-			default:
-			}
-		}
-		fallthrough
-	case ssa.OpMIPS64MOVWF,
-		ssa.OpMIPS64MOVWD,
-		ssa.OpMIPS64TRUNCFW,
-		ssa.OpMIPS64TRUNCDW,
-		ssa.OpMIPS64MOVVF,
-		ssa.OpMIPS64MOVVD,
-		ssa.OpMIPS64TRUNCFV,
-		ssa.OpMIPS64TRUNCDV,
-		ssa.OpMIPS64MOVFD,
-		ssa.OpMIPS64MOVDF,
-		ssa.OpMIPS64NEGF,
-		ssa.OpMIPS64NEGD:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpMIPS64NEGV:
-		// SUB from REGZERO
-		p := gc.Prog(mips.ASUBVU)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		p.Reg = mips.REGZERO
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpMIPS64DUFFZERO:
-		// runtime.duffzero expects start address - 8 in R1
-		p := gc.Prog(mips.ASUBVU)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = 8
-		p.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = mips.REG_R1
-		p = gc.Prog(obj.ADUFFZERO)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
-		p.To.Offset = v.AuxInt
-	case ssa.OpMIPS64LoweredZero:
-		// SUBV	$8, R1
-		// MOVV	R0, 8(R1)
-		// ADDV	$8, R1
-		// BNE	Rarg1, R1, -2(PC)
-		// arg1 is the address of the last element to zero
-		var sz int64
-		var mov obj.As
-		switch {
-		case v.AuxInt%8 == 0:
-			sz = 8
-			mov = mips.AMOVV
-		case v.AuxInt%4 == 0:
-			sz = 4
-			mov = mips.AMOVW
-		case v.AuxInt%2 == 0:
-			sz = 2
-			mov = mips.AMOVH
-		default:
-			sz = 1
-			mov = mips.AMOVB
-		}
-		p := gc.Prog(mips.ASUBVU)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = sz
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = mips.REG_R1
-		p2 := gc.Prog(mov)
-		p2.From.Type = obj.TYPE_REG
-		p2.From.Reg = mips.REGZERO
-		p2.To.Type = obj.TYPE_MEM
-		p2.To.Reg = mips.REG_R1
-		p2.To.Offset = sz
-		p3 := gc.Prog(mips.AADDVU)
-		p3.From.Type = obj.TYPE_CONST
-		p3.From.Offset = sz
-		p3.To.Type = obj.TYPE_REG
-		p3.To.Reg = mips.REG_R1
-		p4 := gc.Prog(mips.ABNE)
-		p4.From.Type = obj.TYPE_REG
-		p4.From.Reg = v.Args[1].Reg()
-		p4.Reg = mips.REG_R1
-		p4.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p4, p2)
-	case ssa.OpMIPS64LoweredMove:
-		// SUBV	$8, R1
-		// MOVV	8(R1), Rtmp
-		// MOVV	Rtmp, (R2)
-		// ADDV	$8, R1
-		// ADDV	$8, R2
-		// BNE	Rarg2, R1, -4(PC)
-		// arg2 is the address of the last element of src
-		var sz int64
-		var mov obj.As
-		switch {
-		case v.AuxInt%8 == 0:
-			sz = 8
-			mov = mips.AMOVV
-		case v.AuxInt%4 == 0:
-			sz = 4
-			mov = mips.AMOVW
-		case v.AuxInt%2 == 0:
-			sz = 2
-			mov = mips.AMOVH
-		default:
-			sz = 1
-			mov = mips.AMOVB
-		}
-		p := gc.Prog(mips.ASUBVU)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = sz
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = mips.REG_R1
-		p2 := gc.Prog(mov)
-		p2.From.Type = obj.TYPE_MEM
-		p2.From.Reg = mips.REG_R1
-		p2.From.Offset = sz
-		p2.To.Type = obj.TYPE_REG
-		p2.To.Reg = mips.REGTMP
-		p3 := gc.Prog(mov)
-		p3.From.Type = obj.TYPE_REG
-		p3.From.Reg = mips.REGTMP
-		p3.To.Type = obj.TYPE_MEM
-		p3.To.Reg = mips.REG_R2
-		p4 := gc.Prog(mips.AADDVU)
-		p4.From.Type = obj.TYPE_CONST
-		p4.From.Offset = sz
-		p4.To.Type = obj.TYPE_REG
-		p4.To.Reg = mips.REG_R1
-		p5 := gc.Prog(mips.AADDVU)
-		p5.From.Type = obj.TYPE_CONST
-		p5.From.Offset = sz
-		p5.To.Type = obj.TYPE_REG
-		p5.To.Reg = mips.REG_R2
-		p6 := gc.Prog(mips.ABNE)
-		p6.From.Type = obj.TYPE_REG
-		p6.From.Reg = v.Args[2].Reg()
-		p6.Reg = mips.REG_R1
-		p6.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p6, p2)
-	case ssa.OpMIPS64CALLstatic:
-		if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym {
-			// Deferred calls will appear to be returning to
-			// the CALL deferreturn(SB) that we are about to emit.
-			// However, the stack trace code will show the line
-			// of the instruction byte before the return PC.
-			// To avoid that being an unrelated instruction,
-			// insert an actual hardware NOP that will have the right line number.
-			// This is different from obj.ANOP, which is a virtual no-op
-			// that doesn't make it into the instruction stream.
-			ginsnop()
-		}
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(v.Aux.(*gc.Sym))
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpMIPS64CALLclosure:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Offset = 0
-		p.To.Reg = v.Args[0].Reg()
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpMIPS64CALLdefer:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(gc.Deferproc.Sym)
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpMIPS64CALLgo:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(gc.Newproc.Sym)
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpMIPS64CALLinter:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Offset = 0
-		p.To.Reg = v.Args[0].Reg()
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpMIPS64LoweredNilCheck:
-		// Issue a load which will fault if arg is nil.
-		p := gc.Prog(mips.AMOVB)
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = mips.REGTMP
-		if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers
-			gc.Warnl(v.Line, "generated nil check")
-		}
-	case ssa.OpVarDef:
-		gc.Gvardef(v.Aux.(*gc.Node))
-	case ssa.OpVarKill:
-		gc.Gvarkill(v.Aux.(*gc.Node))
-	case ssa.OpVarLive:
-		gc.Gvarlive(v.Aux.(*gc.Node))
-	case ssa.OpKeepAlive:
-		gc.KeepAlive(v)
-	case ssa.OpMIPS64FPFlagTrue,
-		ssa.OpMIPS64FPFlagFalse:
-		// MOVV	$0, r
-		// BFPF	2(PC)
-		// MOVV	$1, r
-		branch := mips.ABFPF
-		if v.Op == ssa.OpMIPS64FPFlagFalse {
-			branch = mips.ABFPT
-		}
-		p := gc.Prog(mips.AMOVV)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = mips.REGZERO
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-		p2 := gc.Prog(branch)
-		p2.To.Type = obj.TYPE_BRANCH
-		p3 := gc.Prog(mips.AMOVV)
-		p3.From.Type = obj.TYPE_CONST
-		p3.From.Offset = 1
-		p3.To.Type = obj.TYPE_REG
-		p3.To.Reg = v.Reg()
-		p4 := gc.Prog(obj.ANOP) // not a machine instruction, for branch to land
-		gc.Patch(p2, p4)
-	case ssa.OpSelect0, ssa.OpSelect1:
-		// nothing to do
-	case ssa.OpMIPS64LoweredGetClosurePtr:
-		// Closure pointer is R22 (mips.REGCTXT).
-		gc.CheckLoweredGetClosurePtr(v)
-	default:
-		v.Fatalf("genValue not implemented: %s", v.LongString())
-	}
-}
-
-var blockJump = map[ssa.BlockKind]struct {
-	asm, invasm obj.As
-}{
-	ssa.BlockMIPS64EQ:  {mips.ABEQ, mips.ABNE},
-	ssa.BlockMIPS64NE:  {mips.ABNE, mips.ABEQ},
-	ssa.BlockMIPS64LTZ: {mips.ABLTZ, mips.ABGEZ},
-	ssa.BlockMIPS64GEZ: {mips.ABGEZ, mips.ABLTZ},
-	ssa.BlockMIPS64LEZ: {mips.ABLEZ, mips.ABGTZ},
-	ssa.BlockMIPS64GTZ: {mips.ABGTZ, mips.ABLEZ},
-	ssa.BlockMIPS64FPT: {mips.ABFPT, mips.ABFPF},
-	ssa.BlockMIPS64FPF: {mips.ABFPF, mips.ABFPT},
-}
-
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
-	s.SetLineno(b.Line)
-
-	switch b.Kind {
-	case ssa.BlockPlain:
-		if b.Succs[0].Block() != next {
-			p := gc.Prog(obj.AJMP)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-		}
-	case ssa.BlockDefer:
-		// defer returns in R1:
-		// 0 if we should continue executing
-		// 1 if we should jump to deferreturn call
-		p := gc.Prog(mips.ABNE)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = mips.REGZERO
-		p.Reg = mips.REG_R1
-		p.To.Type = obj.TYPE_BRANCH
-		s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
-		if b.Succs[0].Block() != next {
-			p := gc.Prog(obj.AJMP)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-		}
-	case ssa.BlockExit:
-		gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
-	case ssa.BlockRet:
-		gc.Prog(obj.ARET)
-	case ssa.BlockRetJmp:
-		p := gc.Prog(obj.ARET)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(b.Aux.(*gc.Sym))
-	case ssa.BlockMIPS64EQ, ssa.BlockMIPS64NE,
-		ssa.BlockMIPS64LTZ, ssa.BlockMIPS64GEZ,
-		ssa.BlockMIPS64LEZ, ssa.BlockMIPS64GTZ,
-		ssa.BlockMIPS64FPT, ssa.BlockMIPS64FPF:
-		jmp := blockJump[b.Kind]
-		var p *obj.Prog
-		switch next {
-		case b.Succs[0].Block():
-			p = gc.Prog(jmp.invasm)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
-		case b.Succs[1].Block():
-			p = gc.Prog(jmp.asm)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-		default:
-			p = gc.Prog(jmp.asm)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-			q := gc.Prog(obj.AJMP)
-			q.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
-		}
-		if !b.Control.Type.IsFlags() {
-			p.From.Type = obj.TYPE_REG
-			p.From.Reg = b.Control.Reg()
-		}
-	default:
-		b.Fatalf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString())
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ppc64/galign.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ppc64/galign.go
deleted file mode 100644
index 07554ec..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ppc64/galign.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ppc64/galign.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ppc64/galign.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ppc64
-
-import (
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/ppc64"
-)
-
-func Init() {
-	gc.Thearch.LinkArch = &ppc64.Linkppc64
-	if obj.GOARCH == "ppc64le" {
-		gc.Thearch.LinkArch = &ppc64.Linkppc64le
-	}
-	gc.Thearch.REGSP = ppc64.REGSP
-	gc.Thearch.MAXWIDTH = 1 << 50
-
-	gc.Thearch.Defframe = defframe
-	gc.Thearch.Proginfo = proginfo
-
-	gc.Thearch.SSAMarkMoves = ssaMarkMoves
-	gc.Thearch.SSAGenValue = ssaGenValue
-	gc.Thearch.SSAGenBlock = ssaGenBlock
-
-	initvariants()
-	initproginfo()
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ppc64/ggen.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ppc64/ggen.go
deleted file mode 100644
index ea3944b..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ppc64/ggen.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ppc64/ggen.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ppc64/ggen.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ppc64
-
-import (
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/ppc64"
-)
-
-func defframe(ptxt *obj.Prog) {
-	// fill in argument size, stack size
-	ptxt.To.Type = obj.TYPE_TEXTSIZE
-
-	ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.ArgWidth(), int64(gc.Widthptr)))
-	frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
-	ptxt.To.Offset = int64(frame)
-
-	// insert code to zero ambiguously live variables
-	// so that the garbage collector only sees initialized values
-	// when it looks for pointers.
-	p := ptxt
-
-	hi := int64(0)
-	lo := hi
-
-	// iterate through declarations - they are sorted in decreasing xoffset order.
-	for _, n := range gc.Curfn.Func.Dcl {
-		if !n.Name.Needzero {
-			continue
-		}
-		if n.Class != gc.PAUTO {
-			gc.Fatalf("needzero class %d", n.Class)
-		}
-		if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
-			gc.Fatalf("var %L has size %d offset %d", n, int(n.Type.Width), int(n.Xoffset))
-		}
-
-		if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) {
-			// merge with range we already have
-			lo = n.Xoffset
-
-			continue
-		}
-
-		// zero old range
-		p = zerorange(p, int64(frame), lo, hi)
-
-		// set new range
-		hi = n.Xoffset + n.Type.Width
-
-		lo = n.Xoffset
-	}
-
-	// zero final range
-	zerorange(p, int64(frame), lo, hi)
-}
-
-func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
-	cnt := hi - lo
-	if cnt == 0 {
-		return p
-	}
-	if cnt < int64(4*gc.Widthptr) {
-		for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
-			p = gc.Appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, gc.Ctxt.FixedFrameSize()+frame+lo+i)
-		}
-	} else if cnt <= int64(128*gc.Widthptr) {
-		p = gc.Appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+frame+lo-8, obj.TYPE_REG, ppc64.REGRT1, 0)
-		p.Reg = ppc64.REGSP
-		p = gc.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
-		gc.Naddr(&p.To, gc.Sysfunc("duffzero"))
-		p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
-	} else {
-		p = gc.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+frame+lo-8, obj.TYPE_REG, ppc64.REGTMP, 0)
-		p = gc.Appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
-		p.Reg = ppc64.REGSP
-		p = gc.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
-		p = gc.Appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
-		p.Reg = ppc64.REGRT1
-		p = gc.Appendpp(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(gc.Widthptr))
-		p1 := p
-		p = gc.Appendpp(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
-		p = gc.Appendpp(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
-		gc.Patch(p, p1)
-	}
-
-	return p
-}
-
-func ginsnop() {
-	p := gc.Prog(ppc64.AOR)
-	p.From.Type = obj.TYPE_REG
-	p.From.Reg = ppc64.REG_R0
-	p.To.Type = obj.TYPE_REG
-	p.To.Reg = ppc64.REG_R0
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ppc64/opt.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ppc64/opt.go
deleted file mode 100644
index cc735d9..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ppc64/opt.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ppc64/opt.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ppc64/opt.go:1
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ppc64
-
-// Many Power ISA arithmetic and logical instructions come in four
-// standard variants. These bits let us map between variants.
-const (
-	V_CC = 1 << 0 // xCC (affect CR field 0 flags)
-	V_V  = 1 << 1 // xV (affect SO and OV flags)
-)
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ppc64/prog.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ppc64/prog.go
deleted file mode 100644
index 1e2cfe3..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ppc64/prog.go
+++ /dev/null
@@ -1,307 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ppc64/prog.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ppc64/prog.go:1
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ppc64
-
-import (
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/ppc64"
-)
-
-const (
-	LeftRdwr  uint32 = gc.LeftRead | gc.LeftWrite
-	RightRdwr uint32 = gc.RightRead | gc.RightWrite
-)
-
-// This table gives the basic information about instruction
-// generated by the compiler and processed in the optimizer.
-// See opt.h for bit definitions.
-//
-// Instructions not generated need not be listed.
-// As an exception to that rule, we typically write down all the
-// size variants of an operation even if we just use a subset.
-//
-// The table is formatted for 8-space tabs.
-var progtable = [ppc64.ALAST & obj.AMask]gc.ProgInfo{
-	obj.ATYPE:     {Flags: gc.Pseudo | gc.Skip},
-	obj.ATEXT:     {Flags: gc.Pseudo},
-	obj.AFUNCDATA: {Flags: gc.Pseudo},
-	obj.APCDATA:   {Flags: gc.Pseudo},
-	obj.AUNDEF:    {Flags: gc.Break},
-	obj.AUSEFIELD: {Flags: gc.OK},
-	obj.AVARDEF:   {Flags: gc.Pseudo | gc.RightWrite},
-	obj.AVARKILL:  {Flags: gc.Pseudo | gc.RightWrite},
-	obj.AVARLIVE:  {Flags: gc.Pseudo | gc.LeftRead},
-
-	// NOP is an internal no-op that also stands
-	// for USED and SET annotations, not the Power opcode.
-	obj.ANOP: {Flags: gc.LeftRead | gc.RightWrite},
-
-	// Integer
-	ppc64.AADD & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AADDC & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.ASUB & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AADDME & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.ANEG & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AAND & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AANDN & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AOR & obj.AMask:     {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AORN & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AXOR & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AEQV & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AMULLD & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AMULLW & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AMULHD & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AMULHDU & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AMULHW & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AMULHWU & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.ADIVD & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.ADIVDU & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.ADIVW & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.ADIVWU & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.ASLD & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.ASRD & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.ASRAD & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.ASLW & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.ASRW & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.ASRAW & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.ACMP & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead},
-	ppc64.ACMPU & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead},
-	ppc64.ACMPW & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RightRead},
-	ppc64.ACMPWU & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RightRead},
-	ppc64.ATD & obj.AMask:     {Flags: gc.SizeQ | gc.RightRead},
-
-	// Floating point.
-	ppc64.AFADD & obj.AMask:   {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AFADDS & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AFSUB & obj.AMask:   {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AFSUBS & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AFMUL & obj.AMask:   {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AFMULS & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AFDIV & obj.AMask:   {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AFDIVS & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AFCTIDZ & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AFCTIWZ & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AFCFID & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AFCFIDU & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	ppc64.AFCMPU & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RightRead},
-	ppc64.AFRSP & obj.AMask:   {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
-	ppc64.AFSQRT & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite},
-	ppc64.AFNEG & obj.AMask:   {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite},
-
-	// Moves
-	ppc64.AMOVB & obj.AMask:  {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	ppc64.AMOVBU & obj.AMask: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv | gc.PostInc},
-	ppc64.AMOVBZ & obj.AMask: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	ppc64.AMOVH & obj.AMask:  {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	ppc64.AMOVHU & obj.AMask: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv | gc.PostInc},
-	ppc64.AMOVHZ & obj.AMask: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	ppc64.AMOVW & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-
-	ppc64.AISEL & obj.AMask: {Flags: gc.SizeQ | gc.RegRead | gc.From3Read | gc.RightWrite},
-
-	// there is no AMOVWU.
-	ppc64.AMOVWZU & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv | gc.PostInc},
-	ppc64.AMOVWZ & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	ppc64.AMOVD & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
-	ppc64.AMOVDU & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move | gc.PostInc},
-	ppc64.AFMOVS & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	ppc64.AFMOVSX & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	ppc64.AFMOVSZ & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	ppc64.AFMOVD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move},
-
-	// Jumps
-	ppc64.ABR & obj.AMask:  {Flags: gc.Jump | gc.Break},
-	ppc64.ABL & obj.AMask:  {Flags: gc.Call},
-	ppc64.ABVS & obj.AMask: {Flags: gc.Cjmp},
-	ppc64.ABVC & obj.AMask: {Flags: gc.Cjmp},
-	ppc64.ABEQ & obj.AMask: {Flags: gc.Cjmp},
-	ppc64.ABNE & obj.AMask: {Flags: gc.Cjmp},
-	ppc64.ABGE & obj.AMask: {Flags: gc.Cjmp},
-	ppc64.ABLT & obj.AMask: {Flags: gc.Cjmp},
-	ppc64.ABGT & obj.AMask: {Flags: gc.Cjmp},
-	ppc64.ABLE & obj.AMask: {Flags: gc.Cjmp},
-	obj.ARET:               {Flags: gc.Break},
-	obj.ADUFFZERO:          {Flags: gc.Call},
-	obj.ADUFFCOPY:          {Flags: gc.Call},
-}
-
-func initproginfo() {
-	var addvariant = []int{V_CC, V_V, V_CC | V_V}
-
-	// Perform one-time expansion of instructions in progtable to
-	// their CC, V, and VCC variants
-	for i := range progtable {
-		as := obj.As(i)
-		if progtable[as].Flags == 0 {
-			continue
-		}
-		variant := as2variant(as)
-		for i := range addvariant {
-			as2 := variant2as(as, variant|addvariant[i])
-			if as2 != 0 && progtable[as2&obj.AMask].Flags == 0 {
-				progtable[as2&obj.AMask] = progtable[as]
-			}
-		}
-	}
-}
-
-func proginfo(p *obj.Prog) gc.ProgInfo {
-	info := progtable[p.As&obj.AMask]
-	if info.Flags == 0 {
-		gc.Fatalf("proginfo: unknown instruction %v", p)
-	}
-
-	if (info.Flags&gc.RegRead != 0) && p.Reg == 0 {
-		info.Flags &^= gc.RegRead
-		info.Flags |= gc.RightRead /*CanRegRead |*/
-	}
-
-	if p.From.Type == obj.TYPE_ADDR && p.From.Sym != nil && (info.Flags&gc.LeftRead != 0) {
-		info.Flags &^= gc.LeftRead
-		info.Flags |= gc.LeftAddr
-	}
-
-	return info
-}
-
-// Instruction variants table, populated by initvariants via Main.
-// The index is the base form of the instruction, masked by obj.AMask.
-// The 4 values are the unmasked base form, then the unmasked CC, V,
-// and VCC variants, respectively.
-var varianttable = [ppc64.ALAST & obj.AMask][4]obj.As{}
-
-func initvariant(as obj.As, variants ...obj.As) {
-	vv := &varianttable[as&obj.AMask]
-	vv[0] = as
-	for i, v := range variants {
-		vv[i+1] = v
-	}
-}
-
-func initvariants() {
-	initvariant(ppc64.AADD, ppc64.AADDCC, ppc64.AADDV, ppc64.AADDVCC)
-	initvariant(ppc64.AADDC, ppc64.AADDCCC, ppc64.AADDCV, ppc64.AADDCVCC)
-	initvariant(ppc64.AADDE, ppc64.AADDECC, ppc64.AADDEV, ppc64.AADDEVCC)
-	initvariant(ppc64.AADDME, ppc64.AADDMECC, ppc64.AADDMEV, ppc64.AADDMEVCC)
-	initvariant(ppc64.AADDZE, ppc64.AADDZECC, ppc64.AADDZEV, ppc64.AADDZEVCC)
-	initvariant(ppc64.AAND, ppc64.AANDCC)
-	initvariant(ppc64.AANDN, ppc64.AANDNCC)
-	initvariant(ppc64.ACNTLZD, ppc64.ACNTLZDCC)
-	initvariant(ppc64.ACNTLZW, ppc64.ACNTLZWCC)
-	initvariant(ppc64.ADIVD, ppc64.ADIVDCC, ppc64.ADIVDV, ppc64.ADIVDVCC)
-	initvariant(ppc64.ADIVDU, ppc64.ADIVDUCC, ppc64.ADIVDUV, ppc64.ADIVDUVCC)
-	initvariant(ppc64.ADIVW, ppc64.ADIVWCC, ppc64.ADIVWV, ppc64.ADIVWVCC)
-	initvariant(ppc64.ADIVWU, ppc64.ADIVWUCC, ppc64.ADIVWUV, ppc64.ADIVWUVCC)
-	initvariant(ppc64.AEQV, ppc64.AEQVCC)
-	initvariant(ppc64.AEXTSB, ppc64.AEXTSBCC)
-	initvariant(ppc64.AEXTSH, ppc64.AEXTSHCC)
-	initvariant(ppc64.AEXTSW, ppc64.AEXTSWCC)
-	initvariant(ppc64.AFABS, ppc64.AFABSCC)
-	initvariant(ppc64.AFADD, ppc64.AFADDCC)
-	initvariant(ppc64.AFADDS, ppc64.AFADDSCC)
-	initvariant(ppc64.AFCFID, ppc64.AFCFIDCC)
-	initvariant(ppc64.AFCFIDU, ppc64.AFCFIDUCC)
-	initvariant(ppc64.AFCTID, ppc64.AFCTIDCC)
-	initvariant(ppc64.AFCTIDZ, ppc64.AFCTIDZCC)
-	initvariant(ppc64.AFCTIW, ppc64.AFCTIWCC)
-	initvariant(ppc64.AFCTIWZ, ppc64.AFCTIWZCC)
-	initvariant(ppc64.AFDIV, ppc64.AFDIVCC)
-	initvariant(ppc64.AFDIVS, ppc64.AFDIVSCC)
-	initvariant(ppc64.AFMADD, ppc64.AFMADDCC)
-	initvariant(ppc64.AFMADDS, ppc64.AFMADDSCC)
-	initvariant(ppc64.AFMOVD, ppc64.AFMOVDCC)
-	initvariant(ppc64.AFMSUB, ppc64.AFMSUBCC)
-	initvariant(ppc64.AFMSUBS, ppc64.AFMSUBSCC)
-	initvariant(ppc64.AFMUL, ppc64.AFMULCC)
-	initvariant(ppc64.AFMULS, ppc64.AFMULSCC)
-	initvariant(ppc64.AFNABS, ppc64.AFNABSCC)
-	initvariant(ppc64.AFNEG, ppc64.AFNEGCC)
-	initvariant(ppc64.AFNMADD, ppc64.AFNMADDCC)
-	initvariant(ppc64.AFNMADDS, ppc64.AFNMADDSCC)
-	initvariant(ppc64.AFNMSUB, ppc64.AFNMSUBCC)
-	initvariant(ppc64.AFNMSUBS, ppc64.AFNMSUBSCC)
-	initvariant(ppc64.AFRES, ppc64.AFRESCC)
-	initvariant(ppc64.AFRSP, ppc64.AFRSPCC)
-	initvariant(ppc64.AFRSQRTE, ppc64.AFRSQRTECC)
-	initvariant(ppc64.AFSEL, ppc64.AFSELCC)
-	initvariant(ppc64.AFSQRT, ppc64.AFSQRTCC)
-	initvariant(ppc64.AFSQRTS, ppc64.AFSQRTSCC)
-	initvariant(ppc64.AFSUB, ppc64.AFSUBCC)
-	initvariant(ppc64.AFSUBS, ppc64.AFSUBSCC)
-	initvariant(ppc64.AMTFSB0, ppc64.AMTFSB0CC)
-	initvariant(ppc64.AMTFSB1, ppc64.AMTFSB1CC)
-	initvariant(ppc64.AMULHD, ppc64.AMULHDCC)
-	initvariant(ppc64.AMULHDU, ppc64.AMULHDUCC)
-	initvariant(ppc64.AMULHW, ppc64.AMULHWCC)
-	initvariant(ppc64.AMULHWU, ppc64.AMULHWUCC)
-	initvariant(ppc64.AMULLD, ppc64.AMULLDCC, ppc64.AMULLDV, ppc64.AMULLDVCC)
-	initvariant(ppc64.AMULLW, ppc64.AMULLWCC, ppc64.AMULLWV, ppc64.AMULLWVCC)
-	initvariant(ppc64.ANAND, ppc64.ANANDCC)
-	initvariant(ppc64.ANEG, ppc64.ANEGCC, ppc64.ANEGV, ppc64.ANEGVCC)
-	initvariant(ppc64.ANOR, ppc64.ANORCC)
-	initvariant(ppc64.AOR, ppc64.AORCC)
-	initvariant(ppc64.AORN, ppc64.AORNCC)
-	initvariant(ppc64.AREM, ppc64.AREMCC, ppc64.AREMV, ppc64.AREMVCC)
-	initvariant(ppc64.AREMD, ppc64.AREMDCC, ppc64.AREMDV, ppc64.AREMDVCC)
-	initvariant(ppc64.AREMDU, ppc64.AREMDUCC, ppc64.AREMDUV, ppc64.AREMDUVCC)
-	initvariant(ppc64.AREMU, ppc64.AREMUCC, ppc64.AREMUV, ppc64.AREMUVCC)
-	initvariant(ppc64.ARLDC, ppc64.ARLDCCC)
-	initvariant(ppc64.ARLDCL, ppc64.ARLDCLCC)
-	initvariant(ppc64.ARLDCR, ppc64.ARLDCRCC)
-	initvariant(ppc64.ARLDMI, ppc64.ARLDMICC)
-	initvariant(ppc64.ARLWMI, ppc64.ARLWMICC)
-	initvariant(ppc64.ARLWNM, ppc64.ARLWNMCC)
-	initvariant(ppc64.ASLD, ppc64.ASLDCC)
-	initvariant(ppc64.ASLW, ppc64.ASLWCC)
-	initvariant(ppc64.ASRAD, ppc64.ASRADCC)
-	initvariant(ppc64.ASRAW, ppc64.ASRAWCC)
-	initvariant(ppc64.ASRD, ppc64.ASRDCC)
-	initvariant(ppc64.ASRW, ppc64.ASRWCC)
-	initvariant(ppc64.ASUB, ppc64.ASUBCC, ppc64.ASUBV, ppc64.ASUBVCC)
-	initvariant(ppc64.ASUBC, ppc64.ASUBCCC, ppc64.ASUBCV, ppc64.ASUBCVCC)
-	initvariant(ppc64.ASUBE, ppc64.ASUBECC, ppc64.ASUBEV, ppc64.ASUBEVCC)
-	initvariant(ppc64.ASUBME, ppc64.ASUBMECC, ppc64.ASUBMEV, ppc64.ASUBMEVCC)
-	initvariant(ppc64.ASUBZE, ppc64.ASUBZECC, ppc64.ASUBZEV, ppc64.ASUBZEVCC)
-	initvariant(ppc64.AXOR, ppc64.AXORCC)
-
-	for i := range varianttable {
-		vv := &varianttable[i]
-		if vv[0] == 0 {
-			// Instruction has no variants
-			varianttable[i][0] = obj.As(i)
-			continue
-		}
-
-		// Copy base form to other variants
-		if vv[0]&obj.AMask == obj.As(i) {
-			for _, v := range vv {
-				if v != 0 {
-					varianttable[v&obj.AMask] = varianttable[i]
-				}
-			}
-		}
-	}
-}
-
-// as2variant returns the variant (V_*) flags of instruction as.
-func as2variant(as obj.As) int {
-	for i, v := range varianttable[as&obj.AMask] {
-		if v&obj.AMask == as&obj.AMask {
-			return i
-		}
-	}
-	gc.Fatalf("as2variant: instruction %v is not a variant of itself", as&obj.AMask)
-	return 0
-}
-
-// variant2as returns the instruction as with the given variant (V_*) flags.
-// If no such variant exists, this returns 0.
-func variant2as(as obj.As, flags int) obj.As {
-	return varianttable[as&obj.AMask][flags]
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ppc64/ssa.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ppc64/ssa.go
deleted file mode 100644
index cf05b4d..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ppc64/ssa.go
+++ /dev/null
@@ -1,941 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ppc64/ssa.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ppc64/ssa.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ppc64
-
-import (
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/compile/internal/ssa"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/ppc64"
-	"math"
-)
-
-var condOps = map[ssa.Op]obj.As{
-	ssa.OpPPC64Equal:        ppc64.ABEQ,
-	ssa.OpPPC64NotEqual:     ppc64.ABNE,
-	ssa.OpPPC64LessThan:     ppc64.ABLT,
-	ssa.OpPPC64GreaterEqual: ppc64.ABGE,
-	ssa.OpPPC64GreaterThan:  ppc64.ABGT,
-	ssa.OpPPC64LessEqual:    ppc64.ABLE,
-
-	ssa.OpPPC64FLessThan:     ppc64.ABLT, // 1 branch for FCMP
-	ssa.OpPPC64FGreaterThan:  ppc64.ABGT, // 1 branch for FCMP
-	ssa.OpPPC64FLessEqual:    ppc64.ABLT, // 2 branches for FCMP <=, second is BEQ
-	ssa.OpPPC64FGreaterEqual: ppc64.ABGT, // 2 branches for FCMP >=, second is BEQ
-}
-
-// iselOp encodes mapping of comparison operations onto ISEL operands
-type iselOp struct {
-	cond        int64
-	valueIfCond int // if cond is true, the value to return (0 or 1)
-}
-
-// Input registers to ISEL used for comparison. Index 0 is zero, 1 is (will be) 1
-var iselRegs = [2]int16{ppc64.REG_R0, ppc64.REGTMP}
-
-var iselOps = map[ssa.Op]iselOp{
-	ssa.OpPPC64Equal:         iselOp{cond: ppc64.C_COND_EQ, valueIfCond: 1},
-	ssa.OpPPC64NotEqual:      iselOp{cond: ppc64.C_COND_EQ, valueIfCond: 0},
-	ssa.OpPPC64LessThan:      iselOp{cond: ppc64.C_COND_LT, valueIfCond: 1},
-	ssa.OpPPC64GreaterEqual:  iselOp{cond: ppc64.C_COND_LT, valueIfCond: 0},
-	ssa.OpPPC64GreaterThan:   iselOp{cond: ppc64.C_COND_GT, valueIfCond: 1},
-	ssa.OpPPC64LessEqual:     iselOp{cond: ppc64.C_COND_GT, valueIfCond: 0},
-	ssa.OpPPC64FLessThan:     iselOp{cond: ppc64.C_COND_LT, valueIfCond: 1},
-	ssa.OpPPC64FGreaterThan:  iselOp{cond: ppc64.C_COND_GT, valueIfCond: 1},
-	ssa.OpPPC64FLessEqual:    iselOp{cond: ppc64.C_COND_LT, valueIfCond: 1}, // 2 comparisons, 2nd is EQ
-	ssa.OpPPC64FGreaterEqual: iselOp{cond: ppc64.C_COND_GT, valueIfCond: 1}, // 2 comparisons, 2nd is EQ
-}
-
-// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
-func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
-	//	flive := b.FlagsLiveAtEnd
-	//	if b.Control != nil && b.Control.Type.IsFlags() {
-	//		flive = true
-	//	}
-	//	for i := len(b.Values) - 1; i >= 0; i-- {
-	//		v := b.Values[i]
-	//		if flive && (v.Op == v.Op == ssa.OpPPC64MOVDconst) {
-	//			// The "mark" is any non-nil Aux value.
-	//			v.Aux = v
-	//		}
-	//		if v.Type.IsFlags() {
-	//			flive = false
-	//		}
-	//		for _, a := range v.Args {
-	//			if a.Type.IsFlags() {
-	//				flive = true
-	//			}
-	//		}
-	//	}
-}
-
-// loadByType returns the load instruction of the given type.
-func loadByType(t ssa.Type) obj.As {
-	if t.IsFloat() {
-		switch t.Size() {
-		case 4:
-			return ppc64.AFMOVS
-		case 8:
-			return ppc64.AFMOVD
-		}
-	} else {
-		switch t.Size() {
-		case 1:
-			if t.IsSigned() {
-				return ppc64.AMOVB
-			} else {
-				return ppc64.AMOVBZ
-			}
-		case 2:
-			if t.IsSigned() {
-				return ppc64.AMOVH
-			} else {
-				return ppc64.AMOVHZ
-			}
-		case 4:
-			if t.IsSigned() {
-				return ppc64.AMOVW
-			} else {
-				return ppc64.AMOVWZ
-			}
-		case 8:
-			return ppc64.AMOVD
-		}
-	}
-	panic("bad load type")
-}
-
-// storeByType returns the store instruction of the given type.
-func storeByType(t ssa.Type) obj.As {
-	if t.IsFloat() {
-		switch t.Size() {
-		case 4:
-			return ppc64.AFMOVS
-		case 8:
-			return ppc64.AFMOVD
-		}
-	} else {
-		switch t.Size() {
-		case 1:
-			return ppc64.AMOVB
-		case 2:
-			return ppc64.AMOVH
-		case 4:
-			return ppc64.AMOVW
-		case 8:
-			return ppc64.AMOVD
-		}
-	}
-	panic("bad store type")
-}
-
-func ssaGenISEL(v *ssa.Value, cr int64, r1, r2 int16) {
-	r := v.Reg()
-	p := gc.Prog(ppc64.AISEL)
-	p.To.Type = obj.TYPE_REG
-	p.To.Reg = r
-	p.Reg = r1
-	p.From3 = &obj.Addr{Type: obj.TYPE_REG, Reg: r2}
-	p.From.Type = obj.TYPE_CONST
-	p.From.Offset = cr
-}
-
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
-	s.SetLineno(v.Line)
-	switch v.Op {
-	case ssa.OpInitMem:
-		// memory arg needs no code
-	case ssa.OpArg:
-		// input args need no code
-	case ssa.OpSP, ssa.OpSB, ssa.OpGetG:
-		// nothing to do
-
-	case ssa.OpCopy, ssa.OpPPC64MOVDconvert:
-		t := v.Type
-		if t.IsMemory() {
-			return
-		}
-		x := v.Args[0].Reg()
-		y := v.Reg()
-		if x != y {
-			rt := obj.TYPE_REG
-			op := ppc64.AMOVD
-
-			if t.IsFloat() {
-				op = ppc64.AFMOVD
-			}
-			p := gc.Prog(op)
-			p.From.Type = rt
-			p.From.Reg = x
-			p.To.Type = rt
-			p.To.Reg = y
-		}
-
-	case ssa.OpPPC64Xf2i64:
-		{
-			x := v.Args[0].Reg()
-			y := v.Reg()
-			p := gc.Prog(ppc64.AFMOVD)
-			p.From.Type = obj.TYPE_REG
-			p.From.Reg = x
-			s.AddrScratch(&p.To)
-			p = gc.Prog(ppc64.AMOVD)
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = y
-			s.AddrScratch(&p.From)
-		}
-	case ssa.OpPPC64Xi2f64:
-		{
-			x := v.Args[0].Reg()
-			y := v.Reg()
-			p := gc.Prog(ppc64.AMOVD)
-			p.From.Type = obj.TYPE_REG
-			p.From.Reg = x
-			s.AddrScratch(&p.To)
-			p = gc.Prog(ppc64.AFMOVD)
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = y
-			s.AddrScratch(&p.From)
-		}
-
-	case ssa.OpPPC64LoweredGetClosurePtr:
-		// Closure pointer is R11 (already)
-		gc.CheckLoweredGetClosurePtr(v)
-
-	case ssa.OpLoadReg:
-		loadOp := loadByType(v.Type)
-		p := gc.Prog(loadOp)
-		gc.AddrAuto(&p.From, v.Args[0])
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-
-	case ssa.OpStoreReg:
-		storeOp := storeByType(v.Type)
-		p := gc.Prog(storeOp)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddrAuto(&p.To, v)
-
-	case ssa.OpPPC64DIVD:
-		// For now,
-		//
-		// cmp arg1, -1
-		// be  ahead
-		// v = arg0 / arg1
-		// b over
-		// ahead: v = - arg0
-		// over: nop
-		r := v.Reg()
-		r0 := v.Args[0].Reg()
-		r1 := v.Args[1].Reg()
-
-		p := gc.Prog(ppc64.ACMP)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = r1
-		p.To.Type = obj.TYPE_CONST
-		p.To.Offset = -1
-
-		pbahead := gc.Prog(ppc64.ABEQ)
-		pbahead.To.Type = obj.TYPE_BRANCH
-
-		p = gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = r1
-		p.Reg = r0
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-
-		pbover := gc.Prog(obj.AJMP)
-		pbover.To.Type = obj.TYPE_BRANCH
-
-		p = gc.Prog(ppc64.ANEG)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = r0
-		gc.Patch(pbahead, p)
-
-		p = gc.Prog(obj.ANOP)
-		gc.Patch(pbover, p)
-
-	case ssa.OpPPC64DIVW:
-		// word-width version of above
-		r := v.Reg()
-		r0 := v.Args[0].Reg()
-		r1 := v.Args[1].Reg()
-
-		p := gc.Prog(ppc64.ACMPW)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = r1
-		p.To.Type = obj.TYPE_CONST
-		p.To.Offset = -1
-
-		pbahead := gc.Prog(ppc64.ABEQ)
-		pbahead.To.Type = obj.TYPE_BRANCH
-
-		p = gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = r1
-		p.Reg = r0
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-
-		pbover := gc.Prog(obj.AJMP)
-		pbover.To.Type = obj.TYPE_BRANCH
-
-		p = gc.Prog(ppc64.ANEG)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = r0
-		gc.Patch(pbahead, p)
-
-		p = gc.Prog(obj.ANOP)
-		gc.Patch(pbover, p)
-
-	case ssa.OpPPC64ADD, ssa.OpPPC64FADD, ssa.OpPPC64FADDS, ssa.OpPPC64SUB, ssa.OpPPC64FSUB, ssa.OpPPC64FSUBS,
-		ssa.OpPPC64MULLD, ssa.OpPPC64MULLW, ssa.OpPPC64DIVDU, ssa.OpPPC64DIVWU,
-		ssa.OpPPC64SRAD, ssa.OpPPC64SRAW, ssa.OpPPC64SRD, ssa.OpPPC64SRW, ssa.OpPPC64SLD, ssa.OpPPC64SLW,
-		ssa.OpPPC64MULHD, ssa.OpPPC64MULHW, ssa.OpPPC64MULHDU, ssa.OpPPC64MULHWU,
-		ssa.OpPPC64FMUL, ssa.OpPPC64FMULS, ssa.OpPPC64FDIV, ssa.OpPPC64FDIVS,
-		ssa.OpPPC64AND, ssa.OpPPC64OR, ssa.OpPPC64ANDN, ssa.OpPPC64ORN, ssa.OpPPC64XOR, ssa.OpPPC64EQV:
-		r := v.Reg()
-		r1 := v.Args[0].Reg()
-		r2 := v.Args[1].Reg()
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = r2
-		p.Reg = r1
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-
-	case ssa.OpPPC64MaskIfNotCarry:
-		r := v.Reg()
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = ppc64.REGZERO
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-
-	case ssa.OpPPC64ADDconstForCarry:
-		r1 := v.Args[0].Reg()
-		p := gc.Prog(v.Op.Asm())
-		p.Reg = r1
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = v.AuxInt
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = ppc64.REGTMP // Ignored; this is for the carry effect.
-
-	case ssa.OpPPC64NEG, ssa.OpPPC64FNEG, ssa.OpPPC64FSQRT, ssa.OpPPC64FSQRTS, ssa.OpPPC64FCTIDZ, ssa.OpPPC64FCTIWZ, ssa.OpPPC64FCFID, ssa.OpPPC64FRSP:
-		r := v.Reg()
-		p := gc.Prog(v.Op.Asm())
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-
-	case ssa.OpPPC64ADDconst, ssa.OpPPC64ANDconst, ssa.OpPPC64ORconst, ssa.OpPPC64XORconst,
-		ssa.OpPPC64SRADconst, ssa.OpPPC64SRAWconst, ssa.OpPPC64SRDconst, ssa.OpPPC64SRWconst, ssa.OpPPC64SLDconst, ssa.OpPPC64SLWconst:
-		p := gc.Prog(v.Op.Asm())
-		p.Reg = v.Args[0].Reg()
-
-		if v.Aux != nil {
-			p.From.Type = obj.TYPE_CONST
-			p.From.Offset = gc.AuxOffset(v)
-		} else {
-			p.From.Type = obj.TYPE_CONST
-			p.From.Offset = v.AuxInt
-		}
-
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-
-	case ssa.OpPPC64ANDCCconst:
-		p := gc.Prog(v.Op.Asm())
-		p.Reg = v.Args[0].Reg()
-
-		if v.Aux != nil {
-			p.From.Type = obj.TYPE_CONST
-			p.From.Offset = gc.AuxOffset(v)
-		} else {
-			p.From.Type = obj.TYPE_CONST
-			p.From.Offset = v.AuxInt
-		}
-
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = ppc64.REGTMP // discard result
-
-	case ssa.OpPPC64MOVDaddr:
-		p := gc.Prog(ppc64.AMOVD)
-		p.From.Type = obj.TYPE_ADDR
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-
-		var wantreg string
-		// Suspect comment, copied from ARM code
-		// MOVD $sym+off(base), R
-		// the assembler expands it as the following:
-		// - base is SP: add constant offset to SP
-		//               when constant is large, tmp register (R11) may be used
-		// - base is SB: load external address from constant pool (use relocation)
-		switch v.Aux.(type) {
-		default:
-			v.Fatalf("aux is of unknown type %T", v.Aux)
-		case *ssa.ExternSymbol:
-			wantreg = "SB"
-			gc.AddAux(&p.From, v)
-		case *ssa.ArgSymbol, *ssa.AutoSymbol:
-			wantreg = "SP"
-			gc.AddAux(&p.From, v)
-		case nil:
-			// No sym, just MOVD $off(SP), R
-			wantreg = "SP"
-			p.From.Reg = ppc64.REGSP
-			p.From.Offset = v.AuxInt
-		}
-		if reg := v.Args[0].RegName(); reg != wantreg {
-			v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
-		}
-
-	case ssa.OpPPC64MOVDconst:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = v.AuxInt
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-
-	case ssa.OpPPC64FMOVDconst, ssa.OpPPC64FMOVSconst:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_FCONST
-		p.From.Val = math.Float64frombits(uint64(v.AuxInt))
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-
-	case ssa.OpPPC64FCMPU, ssa.OpPPC64CMP, ssa.OpPPC64CMPW, ssa.OpPPC64CMPU, ssa.OpPPC64CMPWU:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Args[1].Reg()
-
-	case ssa.OpPPC64CMPconst, ssa.OpPPC64CMPUconst, ssa.OpPPC64CMPWconst, ssa.OpPPC64CMPWUconst:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_CONST
-		p.To.Offset = v.AuxInt
-
-	case ssa.OpPPC64MOVBreg, ssa.OpPPC64MOVBZreg, ssa.OpPPC64MOVHreg, ssa.OpPPC64MOVHZreg, ssa.OpPPC64MOVWreg, ssa.OpPPC64MOVWZreg:
-		// Shift in register to required size
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		p.To.Reg = v.Reg()
-		p.To.Type = obj.TYPE_REG
-
-	case ssa.OpPPC64MOVDload, ssa.OpPPC64MOVWload, ssa.OpPPC64MOVHload, ssa.OpPPC64MOVWZload, ssa.OpPPC64MOVBZload, ssa.OpPPC64MOVHZload:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-
-	case ssa.OpPPC64FMOVDload, ssa.OpPPC64FMOVSload:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-
-	case ssa.OpPPC64MOVDstorezero, ssa.OpPPC64MOVWstorezero, ssa.OpPPC64MOVHstorezero, ssa.OpPPC64MOVBstorezero:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = ppc64.REGZERO
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
-
-	case ssa.OpPPC64MOVDstore, ssa.OpPPC64MOVWstore, ssa.OpPPC64MOVHstore, ssa.OpPPC64MOVBstore:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[1].Reg()
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
-	case ssa.OpPPC64FMOVDstore, ssa.OpPPC64FMOVSstore:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[1].Reg()
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
-
-	case ssa.OpPPC64Equal,
-		ssa.OpPPC64NotEqual,
-		ssa.OpPPC64LessThan,
-		ssa.OpPPC64FLessThan,
-		ssa.OpPPC64LessEqual,
-		ssa.OpPPC64GreaterThan,
-		ssa.OpPPC64FGreaterThan,
-		ssa.OpPPC64GreaterEqual:
-
-		// On Power7 or later, can use isel instruction:
-		// for a < b, a > b, a = b:
-		//   rtmp := 1
-		//   isel rt,rtmp,r0,cond // rt is target in ppc asm
-
-		// for  a >= b, a <= b, a != b:
-		//   rtmp := 1
-		//   isel rt,0,rtmp,!cond // rt is target in ppc asm
-
-		if v.Block.Func.Config.OldArch {
-			p := gc.Prog(ppc64.AMOVD)
-			p.From.Type = obj.TYPE_CONST
-			p.From.Offset = 1
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = v.Reg()
-
-			pb := gc.Prog(condOps[v.Op])
-			pb.To.Type = obj.TYPE_BRANCH
-
-			p = gc.Prog(ppc64.AMOVD)
-			p.From.Type = obj.TYPE_CONST
-			p.From.Offset = 0
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = v.Reg()
-
-			p = gc.Prog(obj.ANOP)
-			gc.Patch(pb, p)
-			break
-		}
-		// Modern PPC uses ISEL
-		p := gc.Prog(ppc64.AMOVD)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = 1
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = iselRegs[1]
-		iop := iselOps[v.Op]
-		ssaGenISEL(v, iop.cond, iselRegs[iop.valueIfCond], iselRegs[1-iop.valueIfCond])
-
-	case ssa.OpPPC64FLessEqual, // These include a second branch for EQ -- dealing with NaN prevents REL= to !REL conversion
-		ssa.OpPPC64FGreaterEqual:
-
-		if v.Block.Func.Config.OldArch {
-			p := gc.Prog(ppc64.AMOVW)
-			p.From.Type = obj.TYPE_CONST
-			p.From.Offset = 1
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = v.Reg()
-
-			pb0 := gc.Prog(condOps[v.Op])
-			pb0.To.Type = obj.TYPE_BRANCH
-			pb1 := gc.Prog(ppc64.ABEQ)
-			pb1.To.Type = obj.TYPE_BRANCH
-
-			p = gc.Prog(ppc64.AMOVW)
-			p.From.Type = obj.TYPE_CONST
-			p.From.Offset = 0
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = v.Reg()
-
-			p = gc.Prog(obj.ANOP)
-			gc.Patch(pb0, p)
-			gc.Patch(pb1, p)
-			break
-		}
-		// Modern PPC uses ISEL
-		p := gc.Prog(ppc64.AMOVD)
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = 1
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = iselRegs[1]
-		iop := iselOps[v.Op]
-		ssaGenISEL(v, iop.cond, iselRegs[iop.valueIfCond], iselRegs[1-iop.valueIfCond])
-		ssaGenISEL(v, ppc64.C_COND_EQ, iselRegs[1], v.Reg())
-
-	case ssa.OpPPC64LoweredZero:
-		// Similar to how this is done on ARM,
-		// except that PPC MOVDU x,off(y) is *(y+off) = x; y=y+off
-		// not store-and-increment.
-		// Therefore R3 should be dest-align
-		// and arg1 should be dest+size-align
-		// HOWEVER, the input dest address cannot be dest-align because
-		// that does not necessarily address valid memory and it's not
-		// known how that might be optimized.  Therefore, correct it in
-		// in the expansion:
-		//
-		// ADD    -8,R3,R3
-		// MOVDU  R0, 8(R3)
-		// CMP	  R3, Rarg1
-		// BL	  -2(PC)
-		// arg1 is the address of the last element to zero
-		// auxint is alignment
-		var sz int64
-		var movu obj.As
-		switch {
-		case v.AuxInt%8 == 0:
-			sz = 8
-			movu = ppc64.AMOVDU
-		case v.AuxInt%4 == 0:
-			sz = 4
-			movu = ppc64.AMOVWZU // MOVWU instruction not implemented
-		case v.AuxInt%2 == 0:
-			sz = 2
-			movu = ppc64.AMOVHU
-		default:
-			sz = 1
-			movu = ppc64.AMOVBU
-		}
-
-		p := gc.Prog(ppc64.AADD)
-		p.Reg = v.Args[0].Reg()
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = -sz
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Args[0].Reg()
-
-		p = gc.Prog(movu)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = ppc64.REG_R0
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		p.To.Offset = sz
-
-		p2 := gc.Prog(ppc64.ACMPU)
-		p2.From.Type = obj.TYPE_REG
-		p2.From.Reg = v.Args[0].Reg()
-		p2.To.Reg = v.Args[1].Reg()
-		p2.To.Type = obj.TYPE_REG
-
-		p3 := gc.Prog(ppc64.ABLT)
-		p3.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p3, p)
-
-	case ssa.OpPPC64LoweredMove:
-		// Similar to how this is done on ARM,
-		// except that PPC MOVDU x,off(y) is *(y+off) = x; y=y+off,
-		// not store-and-increment.
-		// Inputs must be valid pointers to memory,
-		// so adjust arg0 and arg1 as part of the expansion.
-		// arg2 should be src+size-align,
-		//
-		// ADD    -8,R3,R3
-		// ADD    -8,R4,R4
-		// MOVDU	8(R4), Rtmp
-		// MOVDU 	Rtmp, 8(R3)
-		// CMP	R4, Rarg2
-		// BL	-3(PC)
-		// arg2 is the address of the last element of src
-		// auxint is alignment
-		var sz int64
-		var movu obj.As
-		switch {
-		case v.AuxInt%8 == 0:
-			sz = 8
-			movu = ppc64.AMOVDU
-		case v.AuxInt%4 == 0:
-			sz = 4
-			movu = ppc64.AMOVWZU // MOVWU instruction not implemented
-		case v.AuxInt%2 == 0:
-			sz = 2
-			movu = ppc64.AMOVHU
-		default:
-			sz = 1
-			movu = ppc64.AMOVBU
-		}
-
-		p := gc.Prog(ppc64.AADD)
-		p.Reg = v.Args[0].Reg()
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = -sz
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Args[0].Reg()
-
-		p = gc.Prog(ppc64.AADD)
-		p.Reg = v.Args[1].Reg()
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = -sz
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Args[1].Reg()
-
-		p = gc.Prog(movu)
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[1].Reg()
-		p.From.Offset = sz
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = ppc64.REGTMP
-
-		p2 := gc.Prog(movu)
-		p2.From.Type = obj.TYPE_REG
-		p2.From.Reg = ppc64.REGTMP
-		p2.To.Type = obj.TYPE_MEM
-		p2.To.Reg = v.Args[0].Reg()
-		p2.To.Offset = sz
-
-		p3 := gc.Prog(ppc64.ACMPU)
-		p3.From.Reg = v.Args[1].Reg()
-		p3.From.Type = obj.TYPE_REG
-		p3.To.Reg = v.Args[2].Reg()
-		p3.To.Type = obj.TYPE_REG
-
-		p4 := gc.Prog(ppc64.ABLT)
-		p4.To.Type = obj.TYPE_BRANCH
-		gc.Patch(p4, p)
-
-	case ssa.OpPPC64CALLstatic:
-		if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym {
-			// Deferred calls will appear to be returning to
-			// the CALL deferreturn(SB) that we are about to emit.
-			// However, the stack trace code will show the line
-			// of the instruction byte before the return PC.
-			// To avoid that being an unrelated instruction,
-			// insert two actual hardware NOPs that will have the right line number.
-			// This is different from obj.ANOP, which is a virtual no-op
-			// that doesn't make it into the instruction stream.
-			// PPC64 is unusual because TWO nops are required
-			// (see gc/cgen.go, gc/plive.go -- copy of comment below)
-			//
-			// On ppc64, when compiling Go into position
-			// independent code on ppc64le we insert an
-			// instruction to reload the TOC pointer from the
-			// stack as well. See the long comment near
-			// jmpdefer in runtime/asm_ppc64.s for why.
-			// If the MOVD is not needed, insert a hardware NOP
-			// so that the same number of instructions are used
-			// on ppc64 in both shared and non-shared modes.
-			ginsnop()
-			if gc.Ctxt.Flag_shared {
-				p := gc.Prog(ppc64.AMOVD)
-				p.From.Type = obj.TYPE_MEM
-				p.From.Offset = 24
-				p.From.Reg = ppc64.REGSP
-				p.To.Type = obj.TYPE_REG
-				p.To.Reg = ppc64.REG_R2
-			} else {
-				ginsnop()
-			}
-		}
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(v.Aux.(*gc.Sym))
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-
-	case ssa.OpPPC64CALLclosure, ssa.OpPPC64CALLinter:
-		p := gc.Prog(ppc64.AMOVD)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = ppc64.REG_CTR
-
-		if gc.Ctxt.Flag_shared && p.From.Reg != ppc64.REG_R12 {
-			// Make sure function pointer is in R12 as well when
-			// compiling Go into PIC.
-			// TODO(mwhudson): it would obviously be better to
-			// change the register allocation to put the value in
-			// R12 already, but I don't know how to do that.
-			// TODO: We have the technology now to implement TODO above.
-			q := gc.Prog(ppc64.AMOVD)
-			q.From = p.From
-			q.To.Type = obj.TYPE_REG
-			q.To.Reg = ppc64.REG_R12
-		}
-
-		pp := gc.Prog(obj.ACALL)
-		pp.To.Type = obj.TYPE_REG
-		pp.To.Reg = ppc64.REG_CTR
-
-		if gc.Ctxt.Flag_shared {
-			// When compiling Go into PIC, the function we just
-			// called via pointer might have been implemented in
-			// a separate module and so overwritten the TOC
-			// pointer in R2; reload it.
-			q := gc.Prog(ppc64.AMOVD)
-			q.From.Type = obj.TYPE_MEM
-			q.From.Offset = 24
-			q.From.Reg = ppc64.REGSP
-			q.To.Type = obj.TYPE_REG
-			q.To.Reg = ppc64.REG_R2
-		}
-
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-
-	case ssa.OpPPC64CALLdefer:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(gc.Deferproc.Sym)
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpPPC64CALLgo:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(gc.Newproc.Sym)
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpVarDef:
-		gc.Gvardef(v.Aux.(*gc.Node))
-	case ssa.OpVarKill:
-		gc.Gvarkill(v.Aux.(*gc.Node))
-	case ssa.OpVarLive:
-		gc.Gvarlive(v.Aux.(*gc.Node))
-	case ssa.OpKeepAlive:
-		gc.KeepAlive(v)
-	case ssa.OpPhi:
-		gc.CheckLoweredPhi(v)
-
-	case ssa.OpPPC64LoweredNilCheck:
-		// Issue a load which will fault if arg is nil.
-		p := gc.Prog(ppc64.AMOVBZ)
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = ppc64.REGTMP
-		if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers
-			gc.Warnl(v.Line, "generated nil check")
-		}
-
-	case ssa.OpPPC64InvertFlags:
-		v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
-	case ssa.OpPPC64FlagEQ, ssa.OpPPC64FlagLT, ssa.OpPPC64FlagGT:
-		v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
-
-	default:
-		v.Fatalf("genValue not implemented: %s", v.LongString())
-	}
-}
-
-var blockJump = [...]struct {
-	asm, invasm     obj.As
-	asmeq, invasmun bool
-}{
-	ssa.BlockPPC64EQ: {ppc64.ABEQ, ppc64.ABNE, false, false},
-	ssa.BlockPPC64NE: {ppc64.ABNE, ppc64.ABEQ, false, false},
-
-	ssa.BlockPPC64LT: {ppc64.ABLT, ppc64.ABGE, false, false},
-	ssa.BlockPPC64GE: {ppc64.ABGE, ppc64.ABLT, false, false},
-	ssa.BlockPPC64LE: {ppc64.ABLE, ppc64.ABGT, false, false},
-	ssa.BlockPPC64GT: {ppc64.ABGT, ppc64.ABLE, false, false},
-
-	// TODO: need to work FP comparisons into block jumps
-	ssa.BlockPPC64FLT: {ppc64.ABLT, ppc64.ABGE, false, false},
-	ssa.BlockPPC64FGE: {ppc64.ABGT, ppc64.ABLT, true, true}, // GE = GT or EQ; !GE = LT or UN
-	ssa.BlockPPC64FLE: {ppc64.ABLT, ppc64.ABGT, true, true}, // LE = LT or EQ; !LE = GT or UN
-	ssa.BlockPPC64FGT: {ppc64.ABGT, ppc64.ABLE, false, false},
-}
-
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
-	s.SetLineno(b.Line)
-
-	switch b.Kind {
-
-	case ssa.BlockDefer:
-		// defer returns in R3:
-		// 0 if we should continue executing
-		// 1 if we should jump to deferreturn call
-		p := gc.Prog(ppc64.ACMP)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = ppc64.REG_R3
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = ppc64.REG_R0
-
-		p = gc.Prog(ppc64.ABNE)
-		p.To.Type = obj.TYPE_BRANCH
-		s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
-		if b.Succs[0].Block() != next {
-			p := gc.Prog(obj.AJMP)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-		}
-
-	case ssa.BlockPlain:
-		if b.Succs[0].Block() != next {
-			p := gc.Prog(obj.AJMP)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-		}
-	case ssa.BlockExit:
-		gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
-	case ssa.BlockRet:
-		gc.Prog(obj.ARET)
-	case ssa.BlockRetJmp:
-		p := gc.Prog(obj.AJMP)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(b.Aux.(*gc.Sym))
-
-	case ssa.BlockPPC64EQ, ssa.BlockPPC64NE,
-		ssa.BlockPPC64LT, ssa.BlockPPC64GE,
-		ssa.BlockPPC64LE, ssa.BlockPPC64GT,
-		ssa.BlockPPC64FLT, ssa.BlockPPC64FGE,
-		ssa.BlockPPC64FLE, ssa.BlockPPC64FGT:
-		jmp := blockJump[b.Kind]
-		likely := b.Likely
-		var p *obj.Prog
-		switch next {
-		case b.Succs[0].Block():
-			p = gc.Prog(jmp.invasm)
-			likely *= -1
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
-			if jmp.invasmun {
-				// TODO: The second branch is probably predict-not-taken since it is for FP unordered
-				q := gc.Prog(ppc64.ABVS)
-				q.To.Type = obj.TYPE_BRANCH
-				s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
-			}
-		case b.Succs[1].Block():
-			p = gc.Prog(jmp.asm)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-			if jmp.asmeq {
-				q := gc.Prog(ppc64.ABEQ)
-				q.To.Type = obj.TYPE_BRANCH
-				s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[0].Block()})
-			}
-		default:
-			p = gc.Prog(jmp.asm)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-			if jmp.asmeq {
-				q := gc.Prog(ppc64.ABEQ)
-				q.To.Type = obj.TYPE_BRANCH
-				s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[0].Block()})
-			}
-			q := gc.Prog(obj.AJMP)
-			q.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
-		}
-
-		// liblink reorders the instruction stream as it sees fit.
-		// Pass along what we know so liblink can make use of it.
-		// TODO: Once we've fully switched to SSA,
-		// make liblink leave our output alone.
-		//switch likely {
-		//case ssa.BranchUnlikely:
-		//	p.From.Type = obj.TYPE_CONST
-		//	p.From.Offset = 0
-		//case ssa.BranchLikely:
-		//	p.From.Type = obj.TYPE_CONST
-		//	p.From.Offset = 1
-		//}
-
-	default:
-		b.Fatalf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString())
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/s390x/galign.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/s390x/galign.go
deleted file mode 100644
index eb8cfed..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/s390x/galign.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/s390x/galign.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/s390x/galign.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package s390x
-
-import (
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/internal/obj/s390x"
-)
-
-func Init() {
-	gc.Thearch.LinkArch = &s390x.Links390x
-	gc.Thearch.REGSP = s390x.REGSP
-	gc.Thearch.MAXWIDTH = 1 << 50
-
-	gc.Thearch.Defframe = defframe
-	gc.Thearch.Proginfo = proginfo
-
-	gc.Thearch.SSAMarkMoves = ssaMarkMoves
-	gc.Thearch.SSAGenValue = ssaGenValue
-	gc.Thearch.SSAGenBlock = ssaGenBlock
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/s390x/ggen.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/s390x/ggen.go
deleted file mode 100644
index b17dfb5..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/s390x/ggen.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/s390x/ggen.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/s390x/ggen.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package s390x
-
-import (
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/s390x"
-)
-
-// clearLoopCutOff is the (somewhat arbitrary) value above which it is better
-// to have a loop of clear instructions (e.g. XCs) rather than just generating
-// multiple instructions (i.e. loop unrolling).
-// Must be between 256 and 4096.
-const clearLoopCutoff = 1024
-
-func defframe(ptxt *obj.Prog) {
-	// fill in argument size, stack size
-	ptxt.To.Type = obj.TYPE_TEXTSIZE
-
-	ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.ArgWidth(), int64(gc.Widthptr)))
-	frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
-	ptxt.To.Offset = int64(frame)
-
-	// insert code to zero ambiguously live variables
-	// so that the garbage collector only sees initialized values
-	// when it looks for pointers.
-	p := ptxt
-
-	hi := int64(0)
-	lo := hi
-
-	// iterate through declarations - they are sorted in decreasing xoffset order.
-	for _, n := range gc.Curfn.Func.Dcl {
-		if !n.Name.Needzero {
-			continue
-		}
-		if n.Class != gc.PAUTO {
-			gc.Fatalf("needzero class %d", n.Class)
-		}
-		if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
-			gc.Fatalf("var %L has size %d offset %d", n, int(n.Type.Width), int(n.Xoffset))
-		}
-
-		if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) {
-			// merge with range we already have
-			lo = n.Xoffset
-
-			continue
-		}
-
-		// zero old range
-		p = zerorange(p, int64(frame), lo, hi)
-
-		// set new range
-		hi = n.Xoffset + n.Type.Width
-
-		lo = n.Xoffset
-	}
-
-	// zero final range
-	zerorange(p, int64(frame), lo, hi)
-}
-
-// zerorange clears the stack in the given range.
-func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
-	cnt := hi - lo
-	if cnt == 0 {
-		return p
-	}
-
-	// Adjust the frame to account for LR.
-	frame += gc.Ctxt.FixedFrameSize()
-	offset := frame + lo
-	reg := int16(s390x.REGSP)
-
-	// If the offset cannot fit in a 12-bit unsigned displacement then we
-	// need to create a copy of the stack pointer that we can adjust.
-	// We also need to do this if we are going to loop.
-	if offset < 0 || offset > 4096-clearLoopCutoff || cnt > clearLoopCutoff {
-		p = gc.Appendpp(p, s390x.AADD, obj.TYPE_CONST, 0, offset, obj.TYPE_REG, s390x.REGRT1, 0)
-		p.Reg = int16(s390x.REGSP)
-		reg = s390x.REGRT1
-		offset = 0
-	}
-
-	// Generate a loop of large clears.
-	if cnt > clearLoopCutoff {
-		n := cnt - (cnt % 256)
-		end := int16(s390x.REGRT2)
-		p = gc.Appendpp(p, s390x.AADD, obj.TYPE_CONST, 0, offset+n, obj.TYPE_REG, end, 0)
-		p.Reg = reg
-		p = gc.Appendpp(p, s390x.AXC, obj.TYPE_MEM, reg, offset, obj.TYPE_MEM, reg, offset)
-		p.From3 = new(obj.Addr)
-		p.From3.Type = obj.TYPE_CONST
-		p.From3.Offset = 256
-		pl := p
-		p = gc.Appendpp(p, s390x.AADD, obj.TYPE_CONST, 0, 256, obj.TYPE_REG, reg, 0)
-		p = gc.Appendpp(p, s390x.ACMP, obj.TYPE_REG, reg, 0, obj.TYPE_REG, end, 0)
-		p = gc.Appendpp(p, s390x.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
-		gc.Patch(p, pl)
-
-		cnt -= n
-	}
-
-	// Generate remaining clear instructions without a loop.
-	for cnt > 0 {
-		n := cnt
-
-		// Can clear at most 256 bytes per instruction.
-		if n > 256 {
-			n = 256
-		}
-
-		switch n {
-		// Handle very small clears with move instructions.
-		case 8, 4, 2, 1:
-			ins := s390x.AMOVB
-			switch n {
-			case 8:
-				ins = s390x.AMOVD
-			case 4:
-				ins = s390x.AMOVW
-			case 2:
-				ins = s390x.AMOVH
-			}
-			p = gc.Appendpp(p, ins, obj.TYPE_CONST, 0, 0, obj.TYPE_MEM, reg, offset)
-
-		// Handle clears that would require multiple move instructions with XC.
-		default:
-			p = gc.Appendpp(p, s390x.AXC, obj.TYPE_MEM, reg, offset, obj.TYPE_MEM, reg, offset)
-			p.From3 = new(obj.Addr)
-			p.From3.Type = obj.TYPE_CONST
-			p.From3.Offset = n
-		}
-
-		cnt -= n
-		offset += n
-	}
-
-	return p
-}
-
-func ginsnop() {
-	p := gc.Prog(s390x.AOR)
-	p.From.Type = obj.TYPE_REG
-	p.From.Reg = int16(s390x.REG_R0)
-	p.To.Type = obj.TYPE_REG
-	p.To.Reg = int16(s390x.REG_R0)
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/s390x/prog.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/s390x/prog.go
deleted file mode 100644
index 56f080a..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/s390x/prog.go
+++ /dev/null
@@ -1,199 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/s390x/prog.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/s390x/prog.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package s390x
-
-import (
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/s390x"
-)
-
-// This table gives the basic information about instruction
-// generated by the compiler and processed in the optimizer.
-// See opt.h for bit definitions.
-//
-// Instructions not generated need not be listed.
-// As an exception to that rule, we typically write down all the
-// size variants of an operation even if we just use a subset.
-var progtable = [s390x.ALAST & obj.AMask]gc.ProgInfo{
-	obj.ATYPE & obj.AMask:     {Flags: gc.Pseudo | gc.Skip},
-	obj.ATEXT & obj.AMask:     {Flags: gc.Pseudo},
-	obj.AFUNCDATA & obj.AMask: {Flags: gc.Pseudo},
-	obj.APCDATA & obj.AMask:   {Flags: gc.Pseudo},
-	obj.AUNDEF & obj.AMask:    {Flags: gc.Break},
-	obj.AUSEFIELD & obj.AMask: {Flags: gc.OK},
-	obj.AVARDEF & obj.AMask:   {Flags: gc.Pseudo | gc.RightWrite},
-	obj.AVARKILL & obj.AMask:  {Flags: gc.Pseudo | gc.RightWrite},
-	obj.AVARLIVE & obj.AMask:  {Flags: gc.Pseudo | gc.LeftRead},
-
-	// NOP is an internal no-op that also stands
-	// for USED and SET annotations.
-	obj.ANOP & obj.AMask: {Flags: gc.LeftRead | gc.RightWrite},
-
-	// Integer
-	s390x.AADD & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.ASUB & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.ASUBE & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.AADDW & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.ASUBW & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.ANEG & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.ANEGW & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.AAND & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.AANDW & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.AOR & obj.AMask:     {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.AORW & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.AXOR & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.AXORW & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.AMULLD & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.AMULLW & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.AMULHD & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.AMULHDU & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.ADIVD & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.ADIVDU & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.ADIVW & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.ADIVWU & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.ASLD & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.ASLW & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.ASRD & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.ASRW & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.ASRAD & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.ASRAW & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.ARLL & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.ARLLG & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.ACMP & obj.AMask:    {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead},
-	s390x.ACMPU & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead},
-	s390x.ACMPW & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RightRead},
-	s390x.ACMPWU & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RightRead},
-	s390x.AMODD & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.AMODDU & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.AMODW & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.AMODWU & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.AFLOGR & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite},
-
-	// Floating point.
-	s390x.AFADD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.AFADDS & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.AFSUB & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.AFSUBS & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.AFMUL & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.AFMULS & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.AFDIV & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.AFDIVS & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
-	s390x.AFCMPU & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightRead},
-	s390x.ACEBR & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RightRead},
-	s390x.ALEDBR & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
-	s390x.ALDEBR & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
-	s390x.AFSQRT & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite},
-	s390x.AFNEG & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite},
-	s390x.AFNEGS & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite},
-
-	// Conversions
-	s390x.ACEFBRA & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
-	s390x.ACDFBRA & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
-	s390x.ACEGBRA & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
-	s390x.ACDGBRA & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
-	s390x.ACFEBRA & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	s390x.ACFDBRA & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	s390x.ACGEBRA & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
-	s390x.ACGDBRA & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
-	s390x.ACELFBR & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
-	s390x.ACDLFBR & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
-	s390x.ACELGBR & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
-	s390x.ACDLGBR & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
-	s390x.ACLFEBR & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	s390x.ACLFDBR & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	s390x.ACLGEBR & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
-	s390x.ACLGDBR & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
-
-	// Moves
-	s390x.AMOVB & obj.AMask:   {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	s390x.AMOVBZ & obj.AMask:  {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	s390x.AMOVH & obj.AMask:   {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	s390x.AMOVHZ & obj.AMask:  {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	s390x.AMOVW & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	s390x.AMOVWZ & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	s390x.AMOVD & obj.AMask:   {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
-	s390x.AMOVHBR & obj.AMask: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	s390x.AMOVWBR & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	s390x.AMOVDBR & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
-	s390x.AFMOVS & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
-	s390x.AFMOVD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move},
-	s390x.AMOVDEQ & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
-	s390x.AMOVDGE & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
-	s390x.AMOVDGT & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
-	s390x.AMOVDLE & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
-	s390x.AMOVDLT & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
-	s390x.AMOVDNE & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
-
-	// Storage operations
-	s390x.AMVC & obj.AMask: {Flags: gc.LeftRead | gc.LeftAddr | gc.RightWrite | gc.RightAddr},
-	s390x.ACLC & obj.AMask: {Flags: gc.LeftRead | gc.LeftAddr | gc.RightRead | gc.RightAddr},
-	s390x.AXC & obj.AMask:  {Flags: gc.LeftRead | gc.LeftAddr | gc.RightWrite | gc.RightAddr},
-	s390x.AOC & obj.AMask:  {Flags: gc.LeftRead | gc.LeftAddr | gc.RightWrite | gc.RightAddr},
-	s390x.ANC & obj.AMask:  {Flags: gc.LeftRead | gc.LeftAddr | gc.RightWrite | gc.RightAddr},
-
-	// Jumps
-	s390x.ABR & obj.AMask:      {Flags: gc.Jump | gc.Break},
-	s390x.ABL & obj.AMask:      {Flags: gc.Call},
-	s390x.ABEQ & obj.AMask:     {Flags: gc.Cjmp},
-	s390x.ABNE & obj.AMask:     {Flags: gc.Cjmp},
-	s390x.ABGE & obj.AMask:     {Flags: gc.Cjmp},
-	s390x.ABLT & obj.AMask:     {Flags: gc.Cjmp},
-	s390x.ABGT & obj.AMask:     {Flags: gc.Cjmp},
-	s390x.ABLE & obj.AMask:     {Flags: gc.Cjmp},
-	s390x.ABLEU & obj.AMask:    {Flags: gc.Cjmp},
-	s390x.ABLTU & obj.AMask:    {Flags: gc.Cjmp},
-	s390x.ACMPBEQ & obj.AMask:  {Flags: gc.Cjmp},
-	s390x.ACMPBNE & obj.AMask:  {Flags: gc.Cjmp},
-	s390x.ACMPBGE & obj.AMask:  {Flags: gc.Cjmp},
-	s390x.ACMPBLT & obj.AMask:  {Flags: gc.Cjmp},
-	s390x.ACMPBGT & obj.AMask:  {Flags: gc.Cjmp},
-	s390x.ACMPBLE & obj.AMask:  {Flags: gc.Cjmp},
-	s390x.ACMPUBEQ & obj.AMask: {Flags: gc.Cjmp},
-	s390x.ACMPUBNE & obj.AMask: {Flags: gc.Cjmp},
-	s390x.ACMPUBGE & obj.AMask: {Flags: gc.Cjmp},
-	s390x.ACMPUBLT & obj.AMask: {Flags: gc.Cjmp},
-	s390x.ACMPUBGT & obj.AMask: {Flags: gc.Cjmp},
-	s390x.ACMPUBLE & obj.AMask: {Flags: gc.Cjmp},
-
-	// Atomic
-	s390x.ACS & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.LeftWrite | gc.RegRead | gc.RightRead | gc.RightWrite},
-	s390x.ACSG & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.LeftWrite | gc.RegRead | gc.RightRead | gc.RightWrite},
-	s390x.ALAA & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RightRead | gc.RightWrite},
-	s390x.ALAAG & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead | gc.RightWrite},
-
-	// Macros
-	s390x.ACLEAR & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightAddr | gc.RightWrite},
-
-	// Load/store multiple
-	s390x.ASTMG & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightAddr | gc.RightWrite},
-	s390x.ASTMY & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightAddr | gc.RightWrite},
-	s390x.ALMG & obj.AMask:  {Flags: gc.SizeQ | gc.LeftAddr | gc.LeftRead | gc.RightWrite},
-	s390x.ALMY & obj.AMask:  {Flags: gc.SizeL | gc.LeftAddr | gc.LeftRead | gc.RightWrite},
-
-	obj.ARET & obj.AMask: {Flags: gc.Break},
-}
-
-func proginfo(p *obj.Prog) gc.ProgInfo {
-	info := progtable[p.As&obj.AMask]
-	if info.Flags == 0 {
-		gc.Fatalf("proginfo: unknown instruction %v", p)
-	}
-
-	if (info.Flags&gc.RegRead != 0) && p.Reg == 0 {
-		info.Flags &^= gc.RegRead
-		info.Flags |= gc.RightRead /*CanRegRead |*/
-	}
-
-	if p.From.Type == obj.TYPE_ADDR && p.From.Sym != nil && (info.Flags&gc.LeftRead != 0) {
-		info.Flags &^= gc.LeftRead
-		info.Flags |= gc.LeftAddr
-	}
-
-	return info
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/s390x/ssa.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/s390x/ssa.go
deleted file mode 100644
index aecca9c..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/s390x/ssa.go
+++ /dev/null
@@ -1,865 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/s390x/ssa.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/s390x/ssa.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package s390x
-
-import (
-	"math"
-
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/compile/internal/ssa"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/s390x"
-)
-
-// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
-func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
-	flive := b.FlagsLiveAtEnd
-	if b.Control != nil && b.Control.Type.IsFlags() {
-		flive = true
-	}
-	for i := len(b.Values) - 1; i >= 0; i-- {
-		v := b.Values[i]
-		if flive && v.Op == ssa.OpS390XMOVDconst {
-			// The "mark" is any non-nil Aux value.
-			v.Aux = v
-		}
-		if v.Type.IsFlags() {
-			flive = false
-		}
-		for _, a := range v.Args {
-			if a.Type.IsFlags() {
-				flive = true
-			}
-		}
-	}
-}
-
-// loadByType returns the load instruction of the given type.
-func loadByType(t ssa.Type) obj.As {
-	if t.IsFloat() {
-		switch t.Size() {
-		case 4:
-			return s390x.AFMOVS
-		case 8:
-			return s390x.AFMOVD
-		}
-	} else {
-		switch t.Size() {
-		case 1:
-			if t.IsSigned() {
-				return s390x.AMOVB
-			} else {
-				return s390x.AMOVBZ
-			}
-		case 2:
-			if t.IsSigned() {
-				return s390x.AMOVH
-			} else {
-				return s390x.AMOVHZ
-			}
-		case 4:
-			if t.IsSigned() {
-				return s390x.AMOVW
-			} else {
-				return s390x.AMOVWZ
-			}
-		case 8:
-			return s390x.AMOVD
-		}
-	}
-	panic("bad load type")
-}
-
-// storeByType returns the store instruction of the given type.
-func storeByType(t ssa.Type) obj.As {
-	width := t.Size()
-	if t.IsFloat() {
-		switch width {
-		case 4:
-			return s390x.AFMOVS
-		case 8:
-			return s390x.AFMOVD
-		}
-	} else {
-		switch width {
-		case 1:
-			return s390x.AMOVB
-		case 2:
-			return s390x.AMOVH
-		case 4:
-			return s390x.AMOVW
-		case 8:
-			return s390x.AMOVD
-		}
-	}
-	panic("bad store type")
-}
-
-// moveByType returns the reg->reg move instruction of the given type.
-func moveByType(t ssa.Type) obj.As {
-	if t.IsFloat() {
-		return s390x.AFMOVD
-	} else {
-		switch t.Size() {
-		case 1:
-			if t.IsSigned() {
-				return s390x.AMOVB
-			} else {
-				return s390x.AMOVBZ
-			}
-		case 2:
-			if t.IsSigned() {
-				return s390x.AMOVH
-			} else {
-				return s390x.AMOVHZ
-			}
-		case 4:
-			if t.IsSigned() {
-				return s390x.AMOVW
-			} else {
-				return s390x.AMOVWZ
-			}
-		case 8:
-			return s390x.AMOVD
-		}
-	}
-	panic("bad load type")
-}
-
-// opregreg emits instructions for
-//     dest := dest(To) op src(From)
-// and also returns the created obj.Prog so it
-// may be further adjusted (offset, scale, etc).
-func opregreg(op obj.As, dest, src int16) *obj.Prog {
-	p := gc.Prog(op)
-	p.From.Type = obj.TYPE_REG
-	p.To.Type = obj.TYPE_REG
-	p.To.Reg = dest
-	p.From.Reg = src
-	return p
-}
-
-// opregregimm emits instructions for
-//	dest := src(From) op off
-// and also returns the created obj.Prog so it
-// may be further adjusted (offset, scale, etc).
-func opregregimm(op obj.As, dest, src int16, off int64) *obj.Prog {
-	p := gc.Prog(op)
-	p.From.Type = obj.TYPE_CONST
-	p.From.Offset = off
-	p.Reg = src
-	p.To.Reg = dest
-	p.To.Type = obj.TYPE_REG
-	return p
-}
-
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
-	s.SetLineno(v.Line)
-	switch v.Op {
-	case ssa.OpS390XSLD, ssa.OpS390XSLW,
-		ssa.OpS390XSRD, ssa.OpS390XSRW,
-		ssa.OpS390XSRAD, ssa.OpS390XSRAW:
-		r := v.Reg()
-		r1 := v.Args[0].Reg()
-		r2 := v.Args[1].Reg()
-		if r2 == s390x.REG_R0 {
-			v.Fatalf("cannot use R0 as shift value %s", v.LongString())
-		}
-		p := opregreg(v.Op.Asm(), r, r2)
-		if r != r1 {
-			p.Reg = r1
-		}
-	case ssa.OpS390XADD, ssa.OpS390XADDW,
-		ssa.OpS390XSUB, ssa.OpS390XSUBW,
-		ssa.OpS390XAND, ssa.OpS390XANDW,
-		ssa.OpS390XOR, ssa.OpS390XORW,
-		ssa.OpS390XXOR, ssa.OpS390XXORW:
-		r := v.Reg()
-		r1 := v.Args[0].Reg()
-		r2 := v.Args[1].Reg()
-		p := opregreg(v.Op.Asm(), r, r2)
-		if r != r1 {
-			p.Reg = r1
-		}
-	// 2-address opcode arithmetic
-	case ssa.OpS390XMULLD, ssa.OpS390XMULLW,
-		ssa.OpS390XMULHD, ssa.OpS390XMULHDU,
-		ssa.OpS390XFADDS, ssa.OpS390XFADD, ssa.OpS390XFSUBS, ssa.OpS390XFSUB,
-		ssa.OpS390XFMULS, ssa.OpS390XFMUL, ssa.OpS390XFDIVS, ssa.OpS390XFDIV:
-		r := v.Reg()
-		if r != v.Args[0].Reg() {
-			v.Fatalf("input[0] and output not in same register %s", v.LongString())
-		}
-		opregreg(v.Op.Asm(), r, v.Args[1].Reg())
-	case ssa.OpS390XDIVD, ssa.OpS390XDIVW,
-		ssa.OpS390XDIVDU, ssa.OpS390XDIVWU,
-		ssa.OpS390XMODD, ssa.OpS390XMODW,
-		ssa.OpS390XMODDU, ssa.OpS390XMODWU:
-
-		// TODO(mundaym): use the temp registers every time like x86 does with AX?
-		dividend := v.Args[0].Reg()
-		divisor := v.Args[1].Reg()
-
-		// CPU faults upon signed overflow, which occurs when most
-		// negative int is divided by -1.
-		var j *obj.Prog
-		if v.Op == ssa.OpS390XDIVD || v.Op == ssa.OpS390XDIVW ||
-			v.Op == ssa.OpS390XMODD || v.Op == ssa.OpS390XMODW {
-
-			var c *obj.Prog
-			c = gc.Prog(s390x.ACMP)
-			j = gc.Prog(s390x.ABEQ)
-
-			c.From.Type = obj.TYPE_REG
-			c.From.Reg = divisor
-			c.To.Type = obj.TYPE_CONST
-			c.To.Offset = -1
-
-			j.To.Type = obj.TYPE_BRANCH
-
-		}
-
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = divisor
-		p.Reg = 0
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = dividend
-
-		// signed division, rest of the check for -1 case
-		if j != nil {
-			j2 := gc.Prog(s390x.ABR)
-			j2.To.Type = obj.TYPE_BRANCH
-
-			var n *obj.Prog
-			if v.Op == ssa.OpS390XDIVD || v.Op == ssa.OpS390XDIVW {
-				// n * -1 = -n
-				n = gc.Prog(s390x.ANEG)
-				n.To.Type = obj.TYPE_REG
-				n.To.Reg = dividend
-			} else {
-				// n % -1 == 0
-				n = gc.Prog(s390x.AXOR)
-				n.From.Type = obj.TYPE_REG
-				n.From.Reg = dividend
-				n.To.Type = obj.TYPE_REG
-				n.To.Reg = dividend
-			}
-
-			j.To.Val = n
-			j2.To.Val = s.Pc()
-		}
-	case ssa.OpS390XADDconst, ssa.OpS390XADDWconst:
-		opregregimm(v.Op.Asm(), v.Reg(), v.Args[0].Reg(), v.AuxInt)
-	case ssa.OpS390XMULLDconst, ssa.OpS390XMULLWconst,
-		ssa.OpS390XSUBconst, ssa.OpS390XSUBWconst,
-		ssa.OpS390XANDconst, ssa.OpS390XANDWconst,
-		ssa.OpS390XORconst, ssa.OpS390XORWconst,
-		ssa.OpS390XXORconst, ssa.OpS390XXORWconst:
-		r := v.Reg()
-		if r != v.Args[0].Reg() {
-			v.Fatalf("input[0] and output not in same register %s", v.LongString())
-		}
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = v.AuxInt
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-	case ssa.OpS390XSLDconst, ssa.OpS390XSLWconst,
-		ssa.OpS390XSRDconst, ssa.OpS390XSRWconst,
-		ssa.OpS390XSRADconst, ssa.OpS390XSRAWconst,
-		ssa.OpS390XRLLGconst, ssa.OpS390XRLLconst:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = v.AuxInt
-		r := v.Reg()
-		r1 := v.Args[0].Reg()
-		if r != r1 {
-			p.Reg = r1
-		}
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-	case ssa.OpS390XSUBEcarrymask, ssa.OpS390XSUBEWcarrymask:
-		r := v.Reg()
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = r
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-	case ssa.OpS390XMOVDaddridx:
-		r := v.Args[0].Reg()
-		i := v.Args[1].Reg()
-		p := gc.Prog(s390x.AMOVD)
-		p.From.Scale = 1
-		if i == s390x.REGSP {
-			r, i = i, r
-		}
-		p.From.Type = obj.TYPE_ADDR
-		p.From.Reg = r
-		p.From.Index = i
-		gc.AddAux(&p.From, v)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpS390XMOVDaddr:
-		p := gc.Prog(s390x.AMOVD)
-		p.From.Type = obj.TYPE_ADDR
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpS390XCMP, ssa.OpS390XCMPW, ssa.OpS390XCMPU, ssa.OpS390XCMPWU:
-		opregreg(v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
-	case ssa.OpS390XFCMPS, ssa.OpS390XFCMP:
-		opregreg(v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
-	case ssa.OpS390XCMPconst, ssa.OpS390XCMPWconst, ssa.OpS390XCMPUconst, ssa.OpS390XCMPWUconst:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_CONST
-		p.To.Offset = v.AuxInt
-	case ssa.OpS390XMOVDconst:
-		x := v.Reg()
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = v.AuxInt
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = x
-	case ssa.OpS390XFMOVSconst, ssa.OpS390XFMOVDconst:
-		x := v.Reg()
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_FCONST
-		p.From.Val = math.Float64frombits(uint64(v.AuxInt))
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = x
-	case ssa.OpS390XADDWload, ssa.OpS390XADDload,
-		ssa.OpS390XMULLWload, ssa.OpS390XMULLDload,
-		ssa.OpS390XSUBWload, ssa.OpS390XSUBload,
-		ssa.OpS390XANDWload, ssa.OpS390XANDload,
-		ssa.OpS390XORWload, ssa.OpS390XORload,
-		ssa.OpS390XXORWload, ssa.OpS390XXORload:
-		r := v.Reg()
-		if r != v.Args[0].Reg() {
-			v.Fatalf("input[0] and output not in same register %s", v.LongString())
-		}
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[1].Reg()
-		gc.AddAux(&p.From, v)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-	case ssa.OpS390XMOVDload,
-		ssa.OpS390XMOVWZload, ssa.OpS390XMOVHZload, ssa.OpS390XMOVBZload,
-		ssa.OpS390XMOVDBRload, ssa.OpS390XMOVWBRload, ssa.OpS390XMOVHBRload,
-		ssa.OpS390XMOVBload, ssa.OpS390XMOVHload, ssa.OpS390XMOVWload,
-		ssa.OpS390XFMOVSload, ssa.OpS390XFMOVDload:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpS390XMOVBZloadidx, ssa.OpS390XMOVHZloadidx, ssa.OpS390XMOVWZloadidx, ssa.OpS390XMOVDloadidx,
-		ssa.OpS390XMOVHBRloadidx, ssa.OpS390XMOVWBRloadidx, ssa.OpS390XMOVDBRloadidx,
-		ssa.OpS390XFMOVSloadidx, ssa.OpS390XFMOVDloadidx:
-		r := v.Args[0].Reg()
-		i := v.Args[1].Reg()
-		if i == s390x.REGSP {
-			r, i = i, r
-		}
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = r
-		p.From.Scale = 1
-		p.From.Index = i
-		gc.AddAux(&p.From, v)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpS390XMOVBstore, ssa.OpS390XMOVHstore, ssa.OpS390XMOVWstore, ssa.OpS390XMOVDstore,
-		ssa.OpS390XMOVHBRstore, ssa.OpS390XMOVWBRstore, ssa.OpS390XMOVDBRstore,
-		ssa.OpS390XFMOVSstore, ssa.OpS390XFMOVDstore:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[1].Reg()
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
-	case ssa.OpS390XMOVBstoreidx, ssa.OpS390XMOVHstoreidx, ssa.OpS390XMOVWstoreidx, ssa.OpS390XMOVDstoreidx,
-		ssa.OpS390XMOVHBRstoreidx, ssa.OpS390XMOVWBRstoreidx, ssa.OpS390XMOVDBRstoreidx,
-		ssa.OpS390XFMOVSstoreidx, ssa.OpS390XFMOVDstoreidx:
-		r := v.Args[0].Reg()
-		i := v.Args[1].Reg()
-		if i == s390x.REGSP {
-			r, i = i, r
-		}
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[2].Reg()
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = r
-		p.To.Scale = 1
-		p.To.Index = i
-		gc.AddAux(&p.To, v)
-	case ssa.OpS390XMOVDstoreconst, ssa.OpS390XMOVWstoreconst, ssa.OpS390XMOVHstoreconst, ssa.OpS390XMOVBstoreconst:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_CONST
-		sc := v.AuxValAndOff()
-		p.From.Offset = sc.Val()
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux2(&p.To, v, sc.Off())
-	case ssa.OpS390XMOVBreg, ssa.OpS390XMOVHreg, ssa.OpS390XMOVWreg,
-		ssa.OpS390XMOVBZreg, ssa.OpS390XMOVHZreg, ssa.OpS390XMOVWZreg,
-		ssa.OpS390XCEFBRA, ssa.OpS390XCDFBRA, ssa.OpS390XCEGBRA, ssa.OpS390XCDGBRA,
-		ssa.OpS390XCFEBRA, ssa.OpS390XCFDBRA, ssa.OpS390XCGEBRA, ssa.OpS390XCGDBRA,
-		ssa.OpS390XLDEBR, ssa.OpS390XLEDBR,
-		ssa.OpS390XFNEG, ssa.OpS390XFNEGS:
-		opregreg(v.Op.Asm(), v.Reg(), v.Args[0].Reg())
-	case ssa.OpS390XCLEAR:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_CONST
-		sc := v.AuxValAndOff()
-		p.From.Offset = sc.Val()
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux2(&p.To, v, sc.Off())
-	case ssa.OpCopy, ssa.OpS390XMOVDconvert:
-		if v.Type.IsMemory() {
-			return
-		}
-		x := v.Args[0].Reg()
-		y := v.Reg()
-		if x != y {
-			opregreg(moveByType(v.Type), y, x)
-		}
-	case ssa.OpLoadReg:
-		if v.Type.IsFlags() {
-			v.Fatalf("load flags not implemented: %v", v.LongString())
-			return
-		}
-		p := gc.Prog(loadByType(v.Type))
-		gc.AddrAuto(&p.From, v.Args[0])
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpStoreReg:
-		if v.Type.IsFlags() {
-			v.Fatalf("store flags not implemented: %v", v.LongString())
-			return
-		}
-		p := gc.Prog(storeByType(v.Type))
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddrAuto(&p.To, v)
-	case ssa.OpPhi:
-		gc.CheckLoweredPhi(v)
-	case ssa.OpInitMem:
-		// memory arg needs no code
-	case ssa.OpArg:
-		// input args need no code
-	case ssa.OpS390XLoweredGetClosurePtr:
-		// Closure pointer is R12 (already)
-		gc.CheckLoweredGetClosurePtr(v)
-	case ssa.OpS390XLoweredGetG:
-		r := v.Reg()
-		p := gc.Prog(s390x.AMOVD)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = s390x.REGG
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-	case ssa.OpS390XCALLstatic:
-		if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym {
-			// Deferred calls will appear to be returning to
-			// the CALL deferreturn(SB) that we are about to emit.
-			// However, the stack trace code will show the line
-			// of the instruction byte before the return PC.
-			// To avoid that being an unrelated instruction,
-			// insert an actual hardware NOP that will have the right line number.
-			// This is different from obj.ANOP, which is a virtual no-op
-			// that doesn't make it into the instruction stream.
-			ginsnop()
-		}
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(v.Aux.(*gc.Sym))
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpS390XCALLclosure:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Args[0].Reg()
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpS390XCALLdefer:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(gc.Deferproc.Sym)
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpS390XCALLgo:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(gc.Newproc.Sym)
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpS390XCALLinter:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Args[0].Reg()
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.OpS390XFLOGR, ssa.OpS390XNEG, ssa.OpS390XNEGW,
-		ssa.OpS390XMOVWBR, ssa.OpS390XMOVDBR:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpS390XNOT, ssa.OpS390XNOTW:
-		v.Fatalf("NOT/NOTW generated %s", v.LongString())
-	case ssa.OpS390XMOVDEQ, ssa.OpS390XMOVDNE,
-		ssa.OpS390XMOVDLT, ssa.OpS390XMOVDLE,
-		ssa.OpS390XMOVDGT, ssa.OpS390XMOVDGE,
-		ssa.OpS390XMOVDGTnoinv, ssa.OpS390XMOVDGEnoinv:
-		r := v.Reg()
-		if r != v.Args[0].Reg() {
-			v.Fatalf("input[0] and output not in same register %s", v.LongString())
-		}
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[1].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-	case ssa.OpS390XFSQRT:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpSP, ssa.OpSB:
-		// nothing to do
-	case ssa.OpSelect0, ssa.OpSelect1:
-		// nothing to do
-	case ssa.OpVarDef:
-		gc.Gvardef(v.Aux.(*gc.Node))
-	case ssa.OpVarKill:
-		gc.Gvarkill(v.Aux.(*gc.Node))
-	case ssa.OpVarLive:
-		gc.Gvarlive(v.Aux.(*gc.Node))
-	case ssa.OpKeepAlive:
-		gc.KeepAlive(v)
-	case ssa.OpS390XInvertFlags:
-		v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
-	case ssa.OpS390XFlagEQ, ssa.OpS390XFlagLT, ssa.OpS390XFlagGT:
-		v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
-	case ssa.OpS390XAddTupleFirst32, ssa.OpS390XAddTupleFirst64:
-		v.Fatalf("AddTupleFirst* should never make it to codegen %v", v.LongString())
-	case ssa.OpS390XLoweredNilCheck:
-		// Issue a load which will fault if the input is nil.
-		p := gc.Prog(s390x.AMOVBZ)
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = s390x.REGTMP
-		if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers
-			gc.Warnl(v.Line, "generated nil check")
-		}
-	case ssa.OpS390XMVC:
-		vo := v.AuxValAndOff()
-		p := gc.Prog(s390x.AMVC)
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[1].Reg()
-		p.From.Offset = vo.Off()
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		p.To.Offset = vo.Off()
-		p.From3 = new(obj.Addr)
-		p.From3.Type = obj.TYPE_CONST
-		p.From3.Offset = vo.Val()
-	case ssa.OpS390XSTMG2, ssa.OpS390XSTMG3, ssa.OpS390XSTMG4,
-		ssa.OpS390XSTM2, ssa.OpS390XSTM3, ssa.OpS390XSTM4:
-		for i := 2; i < len(v.Args)-1; i++ {
-			if v.Args[i].Reg() != v.Args[i-1].Reg()+1 {
-				v.Fatalf("invalid store multiple %s", v.LongString())
-			}
-		}
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[1].Reg()
-		p.Reg = v.Args[len(v.Args)-2].Reg()
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
-	case ssa.OpS390XLoweredMove:
-		// Inputs must be valid pointers to memory,
-		// so adjust arg0 and arg1 as part of the expansion.
-		// arg2 should be src+size,
-		//
-		// mvc: MVC  $256, 0(R2), 0(R1)
-		//      MOVD $256(R1), R1
-		//      MOVD $256(R2), R2
-		//      CMP  R2, Rarg2
-		//      BNE  mvc
-		//      MVC  $rem, 0(R2), 0(R1) // if rem > 0
-		// arg2 is the last address to move in the loop + 256
-		mvc := gc.Prog(s390x.AMVC)
-		mvc.From.Type = obj.TYPE_MEM
-		mvc.From.Reg = v.Args[1].Reg()
-		mvc.To.Type = obj.TYPE_MEM
-		mvc.To.Reg = v.Args[0].Reg()
-		mvc.From3 = new(obj.Addr)
-		mvc.From3.Type = obj.TYPE_CONST
-		mvc.From3.Offset = 256
-
-		for i := 0; i < 2; i++ {
-			movd := gc.Prog(s390x.AMOVD)
-			movd.From.Type = obj.TYPE_ADDR
-			movd.From.Reg = v.Args[i].Reg()
-			movd.From.Offset = 256
-			movd.To.Type = obj.TYPE_REG
-			movd.To.Reg = v.Args[i].Reg()
-		}
-
-		cmpu := gc.Prog(s390x.ACMPU)
-		cmpu.From.Reg = v.Args[1].Reg()
-		cmpu.From.Type = obj.TYPE_REG
-		cmpu.To.Reg = v.Args[2].Reg()
-		cmpu.To.Type = obj.TYPE_REG
-
-		bne := gc.Prog(s390x.ABLT)
-		bne.To.Type = obj.TYPE_BRANCH
-		gc.Patch(bne, mvc)
-
-		if v.AuxInt > 0 {
-			mvc := gc.Prog(s390x.AMVC)
-			mvc.From.Type = obj.TYPE_MEM
-			mvc.From.Reg = v.Args[1].Reg()
-			mvc.To.Type = obj.TYPE_MEM
-			mvc.To.Reg = v.Args[0].Reg()
-			mvc.From3 = new(obj.Addr)
-			mvc.From3.Type = obj.TYPE_CONST
-			mvc.From3.Offset = v.AuxInt
-		}
-	case ssa.OpS390XLoweredZero:
-		// Input must be valid pointers to memory,
-		// so adjust arg0 as part of the expansion.
-		// arg1 should be src+size,
-		//
-		// clear: CLEAR $256, 0(R1)
-		//        MOVD  $256(R1), R1
-		//        CMP   R1, Rarg1
-		//        BNE   clear
-		//        CLEAR $rem, 0(R1) // if rem > 0
-		// arg1 is the last address to zero in the loop + 256
-		clear := gc.Prog(s390x.ACLEAR)
-		clear.From.Type = obj.TYPE_CONST
-		clear.From.Offset = 256
-		clear.To.Type = obj.TYPE_MEM
-		clear.To.Reg = v.Args[0].Reg()
-
-		movd := gc.Prog(s390x.AMOVD)
-		movd.From.Type = obj.TYPE_ADDR
-		movd.From.Reg = v.Args[0].Reg()
-		movd.From.Offset = 256
-		movd.To.Type = obj.TYPE_REG
-		movd.To.Reg = v.Args[0].Reg()
-
-		cmpu := gc.Prog(s390x.ACMPU)
-		cmpu.From.Reg = v.Args[0].Reg()
-		cmpu.From.Type = obj.TYPE_REG
-		cmpu.To.Reg = v.Args[1].Reg()
-		cmpu.To.Type = obj.TYPE_REG
-
-		bne := gc.Prog(s390x.ABLT)
-		bne.To.Type = obj.TYPE_BRANCH
-		gc.Patch(bne, clear)
-
-		if v.AuxInt > 0 {
-			clear := gc.Prog(s390x.ACLEAR)
-			clear.From.Type = obj.TYPE_CONST
-			clear.From.Offset = v.AuxInt
-			clear.To.Type = obj.TYPE_MEM
-			clear.To.Reg = v.Args[0].Reg()
-		}
-	case ssa.OpS390XMOVWZatomicload, ssa.OpS390XMOVDatomicload:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg0()
-	case ssa.OpS390XMOVWatomicstore, ssa.OpS390XMOVDatomicstore:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[1].Reg()
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
-	case ssa.OpS390XLAA, ssa.OpS390XLAAG:
-		p := gc.Prog(v.Op.Asm())
-		p.Reg = v.Reg0()
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[1].Reg()
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
-	case ssa.OpS390XLoweredAtomicCas32, ssa.OpS390XLoweredAtomicCas64:
-		// Convert the flags output of CS{,G} into a bool.
-		//    CS{,G} arg1, arg2, arg0
-		//    MOVD   $0, ret
-		//    BNE    2(PC)
-		//    MOVD   $1, ret
-		//    NOP (so the BNE has somewhere to land)
-
-		// CS{,G} arg1, arg2, arg0
-		cs := gc.Prog(v.Op.Asm())
-		cs.From.Type = obj.TYPE_REG
-		cs.From.Reg = v.Args[1].Reg() // old
-		cs.Reg = v.Args[2].Reg()      // new
-		cs.To.Type = obj.TYPE_MEM
-		cs.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&cs.To, v)
-
-		// MOVD $0, ret
-		movd := gc.Prog(s390x.AMOVD)
-		movd.From.Type = obj.TYPE_CONST
-		movd.From.Offset = 0
-		movd.To.Type = obj.TYPE_REG
-		movd.To.Reg = v.Reg0()
-
-		// BNE 2(PC)
-		bne := gc.Prog(s390x.ABNE)
-		bne.To.Type = obj.TYPE_BRANCH
-
-		// MOVD $1, ret
-		movd = gc.Prog(s390x.AMOVD)
-		movd.From.Type = obj.TYPE_CONST
-		movd.From.Offset = 1
-		movd.To.Type = obj.TYPE_REG
-		movd.To.Reg = v.Reg0()
-
-		// NOP (so the BNE has somewhere to land)
-		nop := gc.Prog(obj.ANOP)
-		gc.Patch(bne, nop)
-	case ssa.OpS390XLoweredAtomicExchange32, ssa.OpS390XLoweredAtomicExchange64:
-		// Loop until the CS{,G} succeeds.
-		//     MOV{WZ,D} arg0, ret
-		// cs: CS{,G}    ret, arg1, arg0
-		//     BNE       cs
-
-		// MOV{WZ,D} arg0, ret
-		load := gc.Prog(loadByType(v.Type.FieldType(0)))
-		load.From.Type = obj.TYPE_MEM
-		load.From.Reg = v.Args[0].Reg()
-		load.To.Type = obj.TYPE_REG
-		load.To.Reg = v.Reg0()
-		gc.AddAux(&load.From, v)
-
-		// CS{,G} ret, arg1, arg0
-		cs := gc.Prog(v.Op.Asm())
-		cs.From.Type = obj.TYPE_REG
-		cs.From.Reg = v.Reg0()   // old
-		cs.Reg = v.Args[1].Reg() // new
-		cs.To.Type = obj.TYPE_MEM
-		cs.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&cs.To, v)
-
-		// BNE cs
-		bne := gc.Prog(s390x.ABNE)
-		bne.To.Type = obj.TYPE_BRANCH
-		gc.Patch(bne, cs)
-	default:
-		v.Fatalf("genValue not implemented: %s", v.LongString())
-	}
-}
-
-var blockJump = [...]struct {
-	asm, invasm obj.As
-}{
-	ssa.BlockS390XEQ:  {s390x.ABEQ, s390x.ABNE},
-	ssa.BlockS390XNE:  {s390x.ABNE, s390x.ABEQ},
-	ssa.BlockS390XLT:  {s390x.ABLT, s390x.ABGE},
-	ssa.BlockS390XGE:  {s390x.ABGE, s390x.ABLT},
-	ssa.BlockS390XLE:  {s390x.ABLE, s390x.ABGT},
-	ssa.BlockS390XGT:  {s390x.ABGT, s390x.ABLE},
-	ssa.BlockS390XGTF: {s390x.ABGT, s390x.ABLEU},
-	ssa.BlockS390XGEF: {s390x.ABGE, s390x.ABLTU},
-}
-
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
-	s.SetLineno(b.Line)
-
-	switch b.Kind {
-	case ssa.BlockPlain:
-		if b.Succs[0].Block() != next {
-			p := gc.Prog(s390x.ABR)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-		}
-	case ssa.BlockDefer:
-		// defer returns in R3:
-		// 0 if we should continue executing
-		// 1 if we should jump to deferreturn call
-		p := gc.Prog(s390x.ACMPW)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = s390x.REG_R3
-		p.To.Type = obj.TYPE_CONST
-		p.To.Offset = 0
-		p = gc.Prog(s390x.ABNE)
-		p.To.Type = obj.TYPE_BRANCH
-		s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
-		if b.Succs[0].Block() != next {
-			p := gc.Prog(s390x.ABR)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-		}
-	case ssa.BlockExit:
-		gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
-	case ssa.BlockRet:
-		gc.Prog(obj.ARET)
-	case ssa.BlockRetJmp:
-		p := gc.Prog(s390x.ABR)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(b.Aux.(*gc.Sym))
-	case ssa.BlockS390XEQ, ssa.BlockS390XNE,
-		ssa.BlockS390XLT, ssa.BlockS390XGE,
-		ssa.BlockS390XLE, ssa.BlockS390XGT,
-		ssa.BlockS390XGEF, ssa.BlockS390XGTF:
-		jmp := blockJump[b.Kind]
-		likely := b.Likely
-		var p *obj.Prog
-		switch next {
-		case b.Succs[0].Block():
-			p = gc.Prog(jmp.invasm)
-			likely *= -1
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
-		case b.Succs[1].Block():
-			p = gc.Prog(jmp.asm)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-		default:
-			p = gc.Prog(jmp.asm)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-			q := gc.Prog(s390x.ABR)
-			q.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
-		}
-	default:
-		b.Fatalf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString())
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/TODO b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/TODO
deleted file mode 100644
index fe90ef9..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/TODO
+++ /dev/null
@@ -1,49 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/TODO
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/TODO:1
-This is a list of things that need to be worked on.  It will hopefully
-be complete soon.
-
-Correctness
------------
-- Debugging info (check & fix as much as we can)
-
-Optimizations (better compiled code)
-------------------------------------
-- Reduce register pressure in scheduler
-- More strength reduction: multiply -> shift/add combos (Worth doing?)
-- Add a value range propagation pass (for bounds elim & bitwidth reduction)
-- Make dead store pass inter-block
-- If there are a lot of MOVQ $0, ..., then load
-  0 into a register and use the register as the source instead.
-- Allow arrays of length 1 (or longer, with all constant indexes?) to be SSAable.
-- If strings are being passed around without being interpreted (ptr
-  and len fields being accessed) pass them in xmm registers?
-  Same for interfaces?
-- Non-constant rotate detection.
-- Do 0 <= x && x < n with one unsigned compare
-- nil-check removal in indexed load/store case:
-    lea    (%rdx,%rax,1),%rcx
-    test   %al,(%rcx)           // nil check
-    mov    (%rdx,%rax,1),%cl    // load to same address
-- any pointer generated by unsafe arithmetic must be non-nil?
-  (Of course that may not be true in general, but it is for all uses
-   in the runtime, and we can play games with unsafe.)
-
-Optimizations (better compiler)
--------------------------------
-- Handle signed division overflow and sign extension earlier
-
-Regalloc
---------
-- Make less arch-dependent
-- Handle 2-address instructions
-- Make liveness analysis non-quadratic
-
-Future/other
-------------
-- Start another architecture (arm?)
-- 64-bit ops on 32-bit machines
-- Should we get rid of named types in favor of underlying types during SSA generation?
-- Infrastructure for enabling/disabling/configuring passes
-- Modify logging for at least pass=1, to be Warnl compatible
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/block.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/block.go
deleted file mode 100644
index d9551f6..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/block.go
+++ /dev/null
@@ -1,211 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/block.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/block.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import "fmt"
-
-// Block represents a basic block in the control flow graph of a function.
-type Block struct {
-	// A unique identifier for the block. The system will attempt to allocate
-	// these IDs densely, but no guarantees.
-	ID ID
-
-	// Line number for block's control operation
-	Line int32
-
-	// The kind of block this is.
-	Kind BlockKind
-
-	// Likely direction for branches.
-	// If BranchLikely, Succs[0] is the most likely branch taken.
-	// If BranchUnlikely, Succs[1] is the most likely branch taken.
-	// Ignored if len(Succs) < 2.
-	// Fatal if not BranchUnknown and len(Succs) > 2.
-	Likely BranchPrediction
-
-	// After flagalloc, records whether flags are live at the end of the block.
-	FlagsLiveAtEnd bool
-
-	// Subsequent blocks, if any. The number and order depend on the block kind.
-	Succs []Edge
-
-	// Inverse of successors.
-	// The order is significant to Phi nodes in the block.
-	// TODO: predecessors is a pain to maintain. Can we somehow order phi
-	// arguments by block id and have this field computed explicitly when needed?
-	Preds []Edge
-
-	// A value that determines how the block is exited. Its value depends on the kind
-	// of the block. For instance, a BlockIf has a boolean control value and BlockExit
-	// has a memory control value.
-	Control *Value
-
-	// Auxiliary info for the block. Its value depends on the Kind.
-	Aux interface{}
-
-	// The unordered set of Values that define the operation of this block.
-	// The list must include the control value, if any. (TODO: need this last condition?)
-	// After the scheduling pass, this list is ordered.
-	Values []*Value
-
-	// The containing function
-	Func *Func
-
-	// Storage for Succs, Preds, and Values
-	succstorage [2]Edge
-	predstorage [4]Edge
-	valstorage  [9]*Value
-}
-
-// Edge represents a CFG edge.
-// Example edges for b branching to either c or d.
-// (c and d have other predecessors.)
-//   b.Succs = [{c,3}, {d,1}]
-//   c.Preds = [?, ?, ?, {b,0}]
-//   d.Preds = [?, {b,1}, ?]
-// These indexes allow us to edit the CFG in constant time.
-// In addition, it informs phi ops in degenerate cases like:
-// b:
-//    if k then c else c
-// c:
-//    v = Phi(x, y)
-// Then the indexes tell you whether x is chosen from
-// the if or else branch from b.
-//   b.Succs = [{c,0},{c,1}]
-//   c.Preds = [{b,0},{b,1}]
-// means x is chosen if k is true.
-type Edge struct {
-	// block edge goes to (in a Succs list) or from (in a Preds list)
-	b *Block
-	// index of reverse edge.  Invariant:
-	//   e := x.Succs[idx]
-	//   e.b.Preds[e.i] = Edge{x,idx}
-	// and similarly for predecessors.
-	i int
-}
-
-func (e Edge) Block() *Block {
-	return e.b
-}
-func (e Edge) Index() int {
-	return e.i
-}
-
-//     kind           control    successors
-//   ------------------------------------------
-//     Exit        return mem                []
-//    Plain               nil            [next]
-//       If   a boolean Value      [then, else]
-//    Defer               mem  [nopanic, panic]  (control opcode should be OpDeferCall)
-type BlockKind int8
-
-// short form print
-func (b *Block) String() string {
-	return fmt.Sprintf("b%d", b.ID)
-}
-
-// long form print
-func (b *Block) LongString() string {
-	s := b.Kind.String()
-	if b.Aux != nil {
-		s += fmt.Sprintf(" %s", b.Aux)
-	}
-	if b.Control != nil {
-		s += fmt.Sprintf(" %s", b.Control)
-	}
-	if len(b.Succs) > 0 {
-		s += " ->"
-		for _, c := range b.Succs {
-			s += " " + c.b.String()
-		}
-	}
-	switch b.Likely {
-	case BranchUnlikely:
-		s += " (unlikely)"
-	case BranchLikely:
-		s += " (likely)"
-	}
-	return s
-}
-
-func (b *Block) SetControl(v *Value) {
-	if w := b.Control; w != nil {
-		w.Uses--
-	}
-	b.Control = v
-	if v != nil {
-		v.Uses++
-	}
-}
-
-// AddEdgeTo adds an edge from block b to block c. Used during building of the
-// SSA graph; do not use on an already-completed SSA graph.
-func (b *Block) AddEdgeTo(c *Block) {
-	i := len(b.Succs)
-	j := len(c.Preds)
-	b.Succs = append(b.Succs, Edge{c, j})
-	c.Preds = append(c.Preds, Edge{b, i})
-	b.Func.invalidateCFG()
-}
-
-// removePred removes the ith input edge from b.
-// It is the responsibility of the caller to remove
-// the corresponding successor edge.
-func (b *Block) removePred(i int) {
-	n := len(b.Preds) - 1
-	if i != n {
-		e := b.Preds[n]
-		b.Preds[i] = e
-		// Update the other end of the edge we moved.
-		e.b.Succs[e.i].i = i
-	}
-	b.Preds[n] = Edge{}
-	b.Preds = b.Preds[:n]
-	b.Func.invalidateCFG()
-}
-
-// removeSucc removes the ith output edge from b.
-// It is the responsibility of the caller to remove
-// the corresponding predecessor edge.
-func (b *Block) removeSucc(i int) {
-	n := len(b.Succs) - 1
-	if i != n {
-		e := b.Succs[n]
-		b.Succs[i] = e
-		// Update the other end of the edge we moved.
-		e.b.Preds[e.i].i = i
-	}
-	b.Succs[n] = Edge{}
-	b.Succs = b.Succs[:n]
-	b.Func.invalidateCFG()
-}
-
-func (b *Block) swapSuccessors() {
-	if len(b.Succs) != 2 {
-		b.Fatalf("swapSuccessors with len(Succs)=%d", len(b.Succs))
-	}
-	e0 := b.Succs[0]
-	e1 := b.Succs[1]
-	b.Succs[0] = e1
-	b.Succs[1] = e0
-	e0.b.Preds[e0.i].i = 1
-	e1.b.Preds[e1.i].i = 0
-	b.Likely *= -1
-}
-
-func (b *Block) Logf(msg string, args ...interface{})   { b.Func.Logf(msg, args...) }
-func (b *Block) Log() bool                              { return b.Func.Log() }
-func (b *Block) Fatalf(msg string, args ...interface{}) { b.Func.Fatalf(msg, args...) }
-
-type BranchPrediction int8
-
-const (
-	BranchUnlikely = BranchPrediction(-1)
-	BranchUnknown  = BranchPrediction(0)
-	BranchLikely   = BranchPrediction(+1)
-)
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/check.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/check.go
deleted file mode 100644
index 91d5145..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/check.go
+++ /dev/null
@@ -1,314 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/check.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/check.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-// checkFunc checks invariants of f.
-func checkFunc(f *Func) {
-	blockMark := make([]bool, f.NumBlocks())
-	valueMark := make([]bool, f.NumValues())
-
-	for _, b := range f.Blocks {
-		if blockMark[b.ID] {
-			f.Fatalf("block %s appears twice in %s!", b, f.Name)
-		}
-		blockMark[b.ID] = true
-		if b.Func != f {
-			f.Fatalf("%s.Func=%s, want %s", b, b.Func.Name, f.Name)
-		}
-
-		for i, e := range b.Preds {
-			if se := e.b.Succs[e.i]; se.b != b || se.i != i {
-				f.Fatalf("block pred/succ not crosslinked correctly %d:%s %d:%s", i, b, se.i, se.b)
-			}
-		}
-		for i, e := range b.Succs {
-			if pe := e.b.Preds[e.i]; pe.b != b || pe.i != i {
-				f.Fatalf("block succ/pred not crosslinked correctly %d:%s %d:%s", i, b, pe.i, pe.b)
-			}
-		}
-
-		switch b.Kind {
-		case BlockExit:
-			if len(b.Succs) != 0 {
-				f.Fatalf("exit block %s has successors", b)
-			}
-			if b.Control == nil {
-				f.Fatalf("exit block %s has no control value", b)
-			}
-			if !b.Control.Type.IsMemory() {
-				f.Fatalf("exit block %s has non-memory control value %s", b, b.Control.LongString())
-			}
-		case BlockRet:
-			if len(b.Succs) != 0 {
-				f.Fatalf("ret block %s has successors", b)
-			}
-			if b.Control == nil {
-				f.Fatalf("ret block %s has nil control", b)
-			}
-			if !b.Control.Type.IsMemory() {
-				f.Fatalf("ret block %s has non-memory control value %s", b, b.Control.LongString())
-			}
-		case BlockRetJmp:
-			if len(b.Succs) != 0 {
-				f.Fatalf("retjmp block %s len(Succs)==%d, want 0", b, len(b.Succs))
-			}
-			if b.Control == nil {
-				f.Fatalf("retjmp block %s has nil control", b)
-			}
-			if !b.Control.Type.IsMemory() {
-				f.Fatalf("retjmp block %s has non-memory control value %s", b, b.Control.LongString())
-			}
-			if b.Aux == nil {
-				f.Fatalf("retjmp block %s has nil Aux field", b)
-			}
-		case BlockPlain:
-			if len(b.Succs) != 1 {
-				f.Fatalf("plain block %s len(Succs)==%d, want 1", b, len(b.Succs))
-			}
-			if b.Control != nil {
-				f.Fatalf("plain block %s has non-nil control %s", b, b.Control.LongString())
-			}
-		case BlockIf:
-			if len(b.Succs) != 2 {
-				f.Fatalf("if block %s len(Succs)==%d, want 2", b, len(b.Succs))
-			}
-			if b.Control == nil {
-				f.Fatalf("if block %s has no control value", b)
-			}
-			if !b.Control.Type.IsBoolean() {
-				f.Fatalf("if block %s has non-bool control value %s", b, b.Control.LongString())
-			}
-		case BlockDefer:
-			if len(b.Succs) != 2 {
-				f.Fatalf("defer block %s len(Succs)==%d, want 2", b, len(b.Succs))
-			}
-			if b.Control == nil {
-				f.Fatalf("defer block %s has no control value", b)
-			}
-			if !b.Control.Type.IsMemory() {
-				f.Fatalf("defer block %s has non-memory control value %s", b, b.Control.LongString())
-			}
-		case BlockFirst:
-			if len(b.Succs) != 2 {
-				f.Fatalf("plain/dead block %s len(Succs)==%d, want 2", b, len(b.Succs))
-			}
-			if b.Control != nil {
-				f.Fatalf("plain/dead block %s has a control value", b)
-			}
-		}
-		if len(b.Succs) > 2 && b.Likely != BranchUnknown {
-			f.Fatalf("likeliness prediction %d for block %s with %d successors", b.Likely, b, len(b.Succs))
-		}
-
-		for _, v := range b.Values {
-			// Check to make sure argument count makes sense (argLen of -1 indicates
-			// variable length args)
-			nArgs := opcodeTable[v.Op].argLen
-			if nArgs != -1 && int32(len(v.Args)) != nArgs {
-				f.Fatalf("value %s has %d args, expected %d", v.LongString(),
-					len(v.Args), nArgs)
-			}
-
-			// Check to make sure aux values make sense.
-			canHaveAux := false
-			canHaveAuxInt := false
-			switch opcodeTable[v.Op].auxType {
-			case auxNone:
-			case auxBool:
-				if v.AuxInt < 0 || v.AuxInt > 1 {
-					f.Fatalf("bad bool AuxInt value for %v", v)
-				}
-				canHaveAuxInt = true
-			case auxInt8:
-				if v.AuxInt != int64(int8(v.AuxInt)) {
-					f.Fatalf("bad int8 AuxInt value for %v", v)
-				}
-				canHaveAuxInt = true
-			case auxInt16:
-				if v.AuxInt != int64(int16(v.AuxInt)) {
-					f.Fatalf("bad int16 AuxInt value for %v", v)
-				}
-				canHaveAuxInt = true
-			case auxInt32:
-				if v.AuxInt != int64(int32(v.AuxInt)) {
-					f.Fatalf("bad int32 AuxInt value for %v", v)
-				}
-				canHaveAuxInt = true
-			case auxInt64, auxFloat64:
-				canHaveAuxInt = true
-			case auxInt128:
-				// AuxInt must be zero, so leave canHaveAuxInt set to false.
-			case auxFloat32:
-				canHaveAuxInt = true
-				if !isExactFloat32(v) {
-					f.Fatalf("value %v has an AuxInt value that is not an exact float32", v)
-				}
-			case auxSizeAndAlign:
-				canHaveAuxInt = true
-			case auxString, auxSym:
-				canHaveAux = true
-			case auxSymOff, auxSymValAndOff, auxSymSizeAndAlign:
-				canHaveAuxInt = true
-				canHaveAux = true
-			case auxSymInt32:
-				if v.AuxInt != int64(int32(v.AuxInt)) {
-					f.Fatalf("bad int32 AuxInt value for %v", v)
-				}
-				canHaveAuxInt = true
-				canHaveAux = true
-			default:
-				f.Fatalf("unknown aux type for %s", v.Op)
-			}
-			if !canHaveAux && v.Aux != nil {
-				f.Fatalf("value %s has an Aux value %v but shouldn't", v.LongString(), v.Aux)
-			}
-			if !canHaveAuxInt && v.AuxInt != 0 {
-				f.Fatalf("value %s has an AuxInt value %d but shouldn't", v.LongString(), v.AuxInt)
-			}
-
-			for i, arg := range v.Args {
-				if arg == nil {
-					f.Fatalf("value %s has nil arg", v.LongString())
-				}
-				if v.Op != OpPhi {
-					// For non-Phi ops, memory args must be last, if present
-					if arg.Type.IsMemory() && i != len(v.Args)-1 {
-						f.Fatalf("value %s has non-final memory arg (%d < %d)", v.LongString(), i, len(v.Args)-1)
-					}
-				}
-			}
-
-			if valueMark[v.ID] {
-				f.Fatalf("value %s appears twice!", v.LongString())
-			}
-			valueMark[v.ID] = true
-
-			if v.Block != b {
-				f.Fatalf("%s.block != %s", v, b)
-			}
-			if v.Op == OpPhi && len(v.Args) != len(b.Preds) {
-				f.Fatalf("phi length %s does not match pred length %d for block %s", v.LongString(), len(b.Preds), b)
-			}
-
-			if v.Op == OpAddr {
-				if len(v.Args) == 0 {
-					f.Fatalf("no args for OpAddr %s", v.LongString())
-				}
-				if v.Args[0].Op != OpSP && v.Args[0].Op != OpSB {
-					f.Fatalf("bad arg to OpAddr %v", v)
-				}
-			}
-
-			// TODO: check for cycles in values
-			// TODO: check type
-		}
-	}
-
-	// Check to make sure all Blocks referenced are in the function.
-	if !blockMark[f.Entry.ID] {
-		f.Fatalf("entry block %v is missing", f.Entry)
-	}
-	for _, b := range f.Blocks {
-		for _, c := range b.Preds {
-			if !blockMark[c.b.ID] {
-				f.Fatalf("predecessor block %v for %v is missing", c, b)
-			}
-		}
-		for _, c := range b.Succs {
-			if !blockMark[c.b.ID] {
-				f.Fatalf("successor block %v for %v is missing", c, b)
-			}
-		}
-	}
-
-	if len(f.Entry.Preds) > 0 {
-		f.Fatalf("entry block %s of %s has predecessor(s) %v", f.Entry, f.Name, f.Entry.Preds)
-	}
-
-	// Check to make sure all Values referenced are in the function.
-	for _, b := range f.Blocks {
-		for _, v := range b.Values {
-			for i, a := range v.Args {
-				if !valueMark[a.ID] {
-					f.Fatalf("%v, arg %d of %s, is missing", a, i, v.LongString())
-				}
-			}
-		}
-		if b.Control != nil && !valueMark[b.Control.ID] {
-			f.Fatalf("control value for %s is missing: %v", b, b.Control)
-		}
-	}
-	for b := f.freeBlocks; b != nil; b = b.succstorage[0].b {
-		if blockMark[b.ID] {
-			f.Fatalf("used block b%d in free list", b.ID)
-		}
-	}
-	for v := f.freeValues; v != nil; v = v.argstorage[0] {
-		if valueMark[v.ID] {
-			f.Fatalf("used value v%d in free list", v.ID)
-		}
-	}
-
-	// Check to make sure all args dominate uses.
-	if f.RegAlloc == nil {
-		// Note: regalloc introduces non-dominating args.
-		// See TODO in regalloc.go.
-		sdom := f.sdom()
-		for _, b := range f.Blocks {
-			for _, v := range b.Values {
-				for i, arg := range v.Args {
-					x := arg.Block
-					y := b
-					if v.Op == OpPhi {
-						y = b.Preds[i].b
-					}
-					if !domCheck(f, sdom, x, y) {
-						f.Fatalf("arg %d of value %s does not dominate, arg=%s", i, v.LongString(), arg.LongString())
-					}
-				}
-			}
-			if b.Control != nil && !domCheck(f, sdom, b.Control.Block, b) {
-				f.Fatalf("control value %s for %s doesn't dominate", b.Control, b)
-			}
-		}
-	}
-
-	// Check use counts
-	uses := make([]int32, f.NumValues())
-	for _, b := range f.Blocks {
-		for _, v := range b.Values {
-			for _, a := range v.Args {
-				uses[a.ID]++
-			}
-		}
-		if b.Control != nil {
-			uses[b.Control.ID]++
-		}
-	}
-	for _, b := range f.Blocks {
-		for _, v := range b.Values {
-			if v.Uses != uses[v.ID] {
-				f.Fatalf("%s has %d uses, but has Uses=%d", v, uses[v.ID], v.Uses)
-			}
-		}
-	}
-}
-
-// domCheck reports whether x dominates y (including x==y).
-func domCheck(f *Func, sdom SparseTree, x, y *Block) bool {
-	if !sdom.isAncestorEq(f.Entry, y) {
-		// unreachable - ignore
-		return true
-	}
-	return sdom.isAncestorEq(x, y)
-}
-
-// isExactFloat32 reoprts whether v has an AuxInt that can be exactly represented as a float32.
-func isExactFloat32(v *Value) bool {
-	return v.AuxFloat() == float64(float32(v.AuxFloat()))
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/checkbce.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/checkbce.go
deleted file mode 100644
index c6fec7f..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/checkbce.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/checkbce.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/checkbce.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-// checkbce prints all bounds checks that are present in the function.
-// Useful to find regressions. checkbce is only activated when with
-// corresponding debug options, so it's off by default.
-// See test/checkbce.go
-func checkbce(f *Func) {
-	if f.pass.debug <= 0 {
-		return
-	}
-
-	for _, b := range f.Blocks {
-		for _, v := range b.Values {
-			if v.Op == OpIsInBounds || v.Op == OpIsSliceInBounds {
-				f.Config.Warnl(v.Line, "Found %v", v.Op)
-			}
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/compile.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/compile.go
deleted file mode 100644
index b0cd9bc..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/compile.go
+++ /dev/null
@@ -1,461 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/compile.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/compile.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"fmt"
-	"log"
-	"os"
-	"regexp"
-	"runtime"
-	"strings"
-	"time"
-)
-
-// Compile is the main entry point for this package.
-// Compile modifies f so that on return:
-//   · all Values in f map to 0 or 1 assembly instructions of the target architecture
-//   · the order of f.Blocks is the order to emit the Blocks
-//   · the order of b.Values is the order to emit the Values in each Block
-//   · f has a non-nil regAlloc field
-func Compile(f *Func) {
-	// TODO: debugging - set flags to control verbosity of compiler,
-	// which phases to dump IR before/after, etc.
-	if f.Log() {
-		f.Logf("compiling %s\n", f.Name)
-	}
-
-	// hook to print function & phase if panic happens
-	phaseName := "init"
-	defer func() {
-		if phaseName != "" {
-			err := recover()
-			stack := make([]byte, 16384)
-			n := runtime.Stack(stack, false)
-			stack = stack[:n]
-			f.Fatalf("panic during %s while compiling %s:\n\n%v\n\n%s\n", phaseName, f.Name, err, stack)
-		}
-	}()
-
-	// Run all the passes
-	printFunc(f)
-	f.Config.HTML.WriteFunc("start", f)
-	if BuildDump != "" && BuildDump == f.Name {
-		f.dumpFile("build")
-	}
-	if checkEnabled {
-		checkFunc(f)
-	}
-	const logMemStats = false
-	for _, p := range passes {
-		if !f.Config.optimize && !p.required || p.disabled {
-			continue
-		}
-		f.pass = &p
-		phaseName = p.name
-		if f.Log() {
-			f.Logf("  pass %s begin\n", p.name)
-		}
-		// TODO: capture logging during this pass, add it to the HTML
-		var mStart runtime.MemStats
-		if logMemStats || p.mem {
-			runtime.ReadMemStats(&mStart)
-		}
-
-		tStart := time.Now()
-		p.fn(f)
-		tEnd := time.Now()
-
-		// Need something less crude than "Log the whole intermediate result".
-		if f.Log() || f.Config.HTML != nil {
-			time := tEnd.Sub(tStart).Nanoseconds()
-			var stats string
-			if logMemStats {
-				var mEnd runtime.MemStats
-				runtime.ReadMemStats(&mEnd)
-				nBytes := mEnd.TotalAlloc - mStart.TotalAlloc
-				nAllocs := mEnd.Mallocs - mStart.Mallocs
-				stats = fmt.Sprintf("[%d ns %d allocs %d bytes]", time, nAllocs, nBytes)
-			} else {
-				stats = fmt.Sprintf("[%d ns]", time)
-			}
-
-			f.Logf("  pass %s end %s\n", p.name, stats)
-			printFunc(f)
-			f.Config.HTML.WriteFunc(fmt.Sprintf("after %s <span class=\"stats\">%s</span>", phaseName, stats), f)
-		}
-		if p.time || p.mem {
-			// Surround timing information w/ enough context to allow comparisons.
-			time := tEnd.Sub(tStart).Nanoseconds()
-			if p.time {
-				f.LogStat("TIME(ns)", time)
-			}
-			if p.mem {
-				var mEnd runtime.MemStats
-				runtime.ReadMemStats(&mEnd)
-				nBytes := mEnd.TotalAlloc - mStart.TotalAlloc
-				nAllocs := mEnd.Mallocs - mStart.Mallocs
-				f.LogStat("TIME(ns):BYTES:ALLOCS", time, nBytes, nAllocs)
-			}
-		}
-		if p.dump != nil && p.dump[f.Name] {
-			// Dump function to appropriately named file
-			f.dumpFile(phaseName)
-		}
-		if checkEnabled {
-			checkFunc(f)
-		}
-	}
-
-	// Squash error printing defer
-	phaseName = ""
-}
-
-// TODO: should be a config field
-var dumpFileSeq int
-
-// dumpFile creates a file from the phase name and function name
-// Dumping is done to files to avoid buffering huge strings before
-// output.
-func (f *Func) dumpFile(phaseName string) {
-	dumpFileSeq++
-	fname := fmt.Sprintf("%s__%s_%d.dump", phaseName, f.Name, dumpFileSeq)
-	fname = strings.Replace(fname, " ", "_", -1)
-	fname = strings.Replace(fname, "/", "_", -1)
-	fname = strings.Replace(fname, ":", "_", -1)
-
-	fi, err := os.Create(fname)
-	if err != nil {
-		f.Config.Warnl(0, "Unable to create after-phase dump file %s", fname)
-		return
-	}
-
-	p := stringFuncPrinter{w: fi}
-	fprintFunc(p, f)
-	fi.Close()
-}
-
-type pass struct {
-	name     string
-	fn       func(*Func)
-	required bool
-	disabled bool
-	time     bool            // report time to run pass
-	mem      bool            // report mem stats to run pass
-	stats    int             // pass reports own "stats" (e.g., branches removed)
-	debug    int             // pass performs some debugging. =1 should be in error-testing-friendly Warnl format.
-	test     int             // pass-specific ad-hoc option, perhaps useful in development
-	dump     map[string]bool // dump if function name matches
-}
-
-func (p *pass) addDump(s string) {
-	if p.dump == nil {
-		p.dump = make(map[string]bool)
-	}
-	p.dump[s] = true
-}
-
-// Run consistency checker between each phase
-var checkEnabled = false
-
-// Debug output
-var IntrinsicsDebug int
-var IntrinsicsDisable bool
-
-var BuildDebug int
-var BuildTest int
-var BuildStats int
-var BuildDump string // name of function to dump after initial build of ssa
-
-// PhaseOption sets the specified flag in the specified ssa phase,
-// returning empty string if this was successful or a string explaining
-// the error if it was not.
-// A version of the phase name with "_" replaced by " " is also checked for a match.
-// If the phase name begins a '~' then the rest of the underscores-replaced-with-blanks
-// version is used as a regular expression to match the phase name(s).
-//
-// Special cases that have turned out to be useful:
-//  ssa/check/on enables checking after each phase
-//  ssa/all/time enables time reporting for all phases
-//
-// See gc/lex.go for dissection of the option string.
-// Example uses:
-//
-// GO_GCFLAGS=-d=ssa/generic_cse/time,ssa/generic_cse/stats,ssa/generic_cse/debug=3 ./make.bash
-//
-// BOOT_GO_GCFLAGS=-d='ssa/~^.*scc$/off' GO_GCFLAGS='-d=ssa/~^.*scc$/off' ./make.bash
-//
-func PhaseOption(phase, flag string, val int, valString string) string {
-	if phase == "help" {
-		lastcr := 0
-		phasenames := "check, all, build, intrinsics"
-		for _, p := range passes {
-			pn := strings.Replace(p.name, " ", "_", -1)
-			if len(pn)+len(phasenames)-lastcr > 70 {
-				phasenames += "\n"
-				lastcr = len(phasenames)
-				phasenames += pn
-			} else {
-				phasenames += ", " + pn
-			}
-		}
-		return "" +
-			`GcFlag -d=ssa/<phase>/<flag>[=<value>]|[:<function_name>]
-<phase> is one of:
-` + phasenames + `
-<flag> is one of on, off, debug, mem, time, test, stats, dump
-<value> defaults to 1
-<function_name> is required for "dump", specifies name of function to dump after <phase>
-Except for dump, output is directed to standard out; dump appears in a file.
-Phase "all" supports flags "time", "mem", and "dump".
-Phases "intrinsics" supports flags "on", "off", and "debug".
-Interpretation of the "debug" value depends on the phase.
-Dump files are named <phase>__<function_name>_<seq>.dump.
-`
-	}
-
-	if phase == "check" && flag == "on" {
-		checkEnabled = val != 0
-		return ""
-	}
-	if phase == "check" && flag == "off" {
-		checkEnabled = val == 0
-		return ""
-	}
-
-	alltime := false
-	allmem := false
-	alldump := false
-	if phase == "all" {
-		if flag == "time" {
-			alltime = val != 0
-		} else if flag == "mem" {
-			allmem = val != 0
-		} else if flag == "dump" {
-			alldump = val != 0
-			if alldump {
-				BuildDump = valString
-			}
-		} else {
-			return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option", flag, phase)
-		}
-	}
-
-	if phase == "intrinsics" {
-		switch flag {
-		case "on":
-			IntrinsicsDisable = val == 0
-		case "off":
-			IntrinsicsDisable = val != 0
-		case "debug":
-			IntrinsicsDebug = val
-		default:
-			return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option", flag, phase)
-		}
-		return ""
-	}
-	if phase == "build" {
-		switch flag {
-		case "debug":
-			BuildDebug = val
-		case "test":
-			BuildTest = val
-		case "stats":
-			BuildStats = val
-		case "dump":
-			BuildDump = valString
-		default:
-			return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option", flag, phase)
-		}
-		return ""
-	}
-
-	underphase := strings.Replace(phase, "_", " ", -1)
-	var re *regexp.Regexp
-	if phase[0] == '~' {
-		r, ok := regexp.Compile(underphase[1:])
-		if ok != nil {
-			return fmt.Sprintf("Error %s in regexp for phase %s, flag %s", ok.Error(), phase, flag)
-		}
-		re = r
-	}
-	matchedOne := false
-	for i, p := range passes {
-		if phase == "all" {
-			p.time = alltime
-			p.mem = allmem
-			if alldump {
-				p.addDump(valString)
-			}
-			passes[i] = p
-			matchedOne = true
-		} else if p.name == phase || p.name == underphase || re != nil && re.MatchString(p.name) {
-			switch flag {
-			case "on":
-				p.disabled = val == 0
-			case "off":
-				p.disabled = val != 0
-			case "time":
-				p.time = val != 0
-			case "mem":
-				p.mem = val != 0
-			case "debug":
-				p.debug = val
-			case "stats":
-				p.stats = val
-			case "test":
-				p.test = val
-			case "dump":
-				p.addDump(valString)
-			default:
-				return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option", flag, phase)
-			}
-			if p.disabled && p.required {
-				return fmt.Sprintf("Cannot disable required SSA phase %s using -d=ssa/%s debug option", phase, phase)
-			}
-			passes[i] = p
-			matchedOne = true
-		}
-	}
-	if matchedOne {
-		return ""
-	}
-	return fmt.Sprintf("Did not find a phase matching %s in -d=ssa/... debug option", phase)
-}
-
-// list of passes for the compiler
-var passes = [...]pass{
-	// TODO: combine phielim and copyelim into a single pass?
-	{name: "early phielim", fn: phielim},
-	{name: "early copyelim", fn: copyelim},
-	{name: "early deadcode", fn: deadcode}, // remove generated dead code to avoid doing pointless work during opt
-	{name: "short circuit", fn: shortcircuit},
-	{name: "decompose user", fn: decomposeUser, required: true},
-	{name: "opt", fn: opt, required: true},               // TODO: split required rules and optimizing rules
-	{name: "zero arg cse", fn: zcse, required: true},     // required to merge OpSB values
-	{name: "opt deadcode", fn: deadcode, required: true}, // remove any blocks orphaned during opt
-	{name: "generic cse", fn: cse},
-	{name: "phiopt", fn: phiopt},
-	{name: "nilcheckelim", fn: nilcheckelim},
-	{name: "prove", fn: prove},
-	{name: "loopbce", fn: loopbce},
-	{name: "decompose builtin", fn: decomposeBuiltIn, required: true},
-	{name: "dec", fn: dec, required: true},
-	{name: "late opt", fn: opt, required: true}, // TODO: split required rules and optimizing rules
-	{name: "generic deadcode", fn: deadcode},
-	{name: "check bce", fn: checkbce},
-	{name: "writebarrier", fn: writebarrier, required: true}, // expand write barrier ops
-	{name: "fuse", fn: fuse},
-	{name: "dse", fn: dse},
-	{name: "insert resched checks", fn: insertLoopReschedChecks,
-		disabled: obj.Preemptibleloops_enabled == 0}, // insert resched checks in loops.
-	{name: "tighten", fn: tighten}, // move values closer to their uses
-	{name: "lower", fn: lower, required: true},
-	{name: "lowered cse", fn: cse},
-	{name: "lowered deadcode", fn: deadcode, required: true},
-	{name: "checkLower", fn: checkLower, required: true},
-	{name: "late phielim", fn: phielim},
-	{name: "late copyelim", fn: copyelim},
-	{name: "phi tighten", fn: phiTighten},
-	{name: "late deadcode", fn: deadcode},
-	{name: "critical", fn: critical, required: true}, // remove critical edges
-	{name: "likelyadjust", fn: likelyadjust},
-	{name: "layout", fn: layout, required: true},     // schedule blocks
-	{name: "schedule", fn: schedule, required: true}, // schedule values
-	{name: "late nilcheck", fn: nilcheckelim2},
-	{name: "flagalloc", fn: flagalloc, required: true}, // allocate flags register
-	{name: "regalloc", fn: regalloc, required: true},   // allocate int & float registers + stack slots
-	{name: "stackframe", fn: stackframe, required: true},
-	{name: "trim", fn: trim}, // remove empty blocks
-}
-
-// Double-check phase ordering constraints.
-// This code is intended to document the ordering requirements
-// between different phases. It does not override the passes
-// list above.
-type constraint struct {
-	a, b string // a must come before b
-}
-
-var passOrder = [...]constraint{
-	// "insert resched checks" uses mem, better to clean out stores first.
-	{"dse", "insert resched checks"},
-	// insert resched checks adds new blocks containing generic instructions
-	{"insert resched checks", "lower"},
-	{"insert resched checks", "tighten"},
-
-	// prove relies on common-subexpression elimination for maximum benefits.
-	{"generic cse", "prove"},
-	// deadcode after prove to eliminate all new dead blocks.
-	{"prove", "generic deadcode"},
-	// common-subexpression before dead-store elim, so that we recognize
-	// when two address expressions are the same.
-	{"generic cse", "dse"},
-	// cse substantially improves nilcheckelim efficacy
-	{"generic cse", "nilcheckelim"},
-	// allow deadcode to clean up after nilcheckelim
-	{"nilcheckelim", "generic deadcode"},
-	// nilcheckelim generates sequences of plain basic blocks
-	{"nilcheckelim", "fuse"},
-	// nilcheckelim relies on opt to rewrite user nil checks
-	{"opt", "nilcheckelim"},
-	// tighten should happen before lowering to avoid splitting naturally paired instructions such as CMP/SET
-	{"tighten", "lower"},
-	// tighten will be most effective when as many values have been removed as possible
-	{"generic deadcode", "tighten"},
-	{"generic cse", "tighten"},
-	// checkbce needs the values removed
-	{"generic deadcode", "check bce"},
-	// don't run optimization pass until we've decomposed builtin objects
-	{"decompose builtin", "late opt"},
-	// don't layout blocks until critical edges have been removed
-	{"critical", "layout"},
-	// regalloc requires the removal of all critical edges
-	{"critical", "regalloc"},
-	// regalloc requires all the values in a block to be scheduled
-	{"schedule", "regalloc"},
-	// checkLower must run after lowering & subsequent dead code elim
-	{"lower", "checkLower"},
-	{"lowered deadcode", "checkLower"},
-	// late nilcheck needs instructions to be scheduled.
-	{"schedule", "late nilcheck"},
-	// flagalloc needs instructions to be scheduled.
-	{"schedule", "flagalloc"},
-	// regalloc needs flags to be allocated first.
-	{"flagalloc", "regalloc"},
-	// stackframe needs to know about spilled registers.
-	{"regalloc", "stackframe"},
-	// trim needs regalloc to be done first.
-	{"regalloc", "trim"},
-}
-
-func init() {
-	for _, c := range passOrder {
-		a, b := c.a, c.b
-		i := -1
-		j := -1
-		for k, p := range passes {
-			if p.name == a {
-				i = k
-			}
-			if p.name == b {
-				j = k
-			}
-		}
-		if i < 0 {
-			log.Panicf("pass %s not found", a)
-		}
-		if j < 0 {
-			log.Panicf("pass %s not found", b)
-		}
-		if i >= j {
-			log.Panicf("passes %s and %s out of order", a, b)
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/config.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/config.go
deleted file mode 100644
index 41406c8..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/config.go
+++ /dev/null
@@ -1,428 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/config.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/config.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"crypto/sha1"
-	"fmt"
-	"os"
-	"strconv"
-	"strings"
-)
-
-type Config struct {
-	arch            string                     // "amd64", etc.
-	IntSize         int64                      // 4 or 8
-	PtrSize         int64                      // 4 or 8
-	RegSize         int64                      // 4 or 8
-	lowerBlock      func(*Block, *Config) bool // lowering function
-	lowerValue      func(*Value, *Config) bool // lowering function
-	registers       []Register                 // machine registers
-	gpRegMask       regMask                    // general purpose integer register mask
-	fpRegMask       regMask                    // floating point register mask
-	specialRegMask  regMask                    // special register mask
-	FPReg           int8                       // register number of frame pointer, -1 if not used
-	LinkReg         int8                       // register number of link register if it is a general purpose register, -1 if not used
-	hasGReg         bool                       // has hardware g register
-	fe              Frontend                   // callbacks into compiler frontend
-	HTML            *HTMLWriter                // html writer, for debugging
-	ctxt            *obj.Link                  // Generic arch information
-	optimize        bool                       // Do optimization
-	noDuffDevice    bool                       // Don't use Duff's device
-	nacl            bool                       // GOOS=nacl
-	use387          bool                       // GO386=387
-	OldArch         bool                       // True for older versions of architecture, e.g. true for PPC64BE, false for PPC64LE
-	NeedsFpScratch  bool                       // No direct move between GP and FP register sets
-	BigEndian       bool                       //
-	DebugTest       bool                       // default true unless $GOSSAHASH != ""; as a debugging aid, make new code conditional on this and use GOSSAHASH to binary search for failing cases
-	sparsePhiCutoff uint64                     // Sparse phi location algorithm used above this #blocks*#variables score
-	curFunc         *Func
-
-	// TODO: more stuff. Compiler flags of interest, ...
-
-	// Given an environment variable used for debug hash match,
-	// what file (if any) receives the yes/no logging?
-	logfiles map[string]*os.File
-
-	// Storage for low-numbered values and blocks.
-	values [2000]Value
-	blocks [200]Block
-
-	// Reusable stackAllocState.
-	// See stackalloc.go's {new,put}StackAllocState.
-	stackAllocState *stackAllocState
-
-	domblockstore []ID         // scratch space for computing dominators
-	scrSparse     []*sparseSet // scratch sparse sets to be re-used.
-}
-
-type TypeSource interface {
-	TypeBool() Type
-	TypeInt8() Type
-	TypeInt16() Type
-	TypeInt32() Type
-	TypeInt64() Type
-	TypeUInt8() Type
-	TypeUInt16() Type
-	TypeUInt32() Type
-	TypeUInt64() Type
-	TypeInt() Type
-	TypeFloat32() Type
-	TypeFloat64() Type
-	TypeUintptr() Type
-	TypeString() Type
-	TypeBytePtr() Type // TODO: use unsafe.Pointer instead?
-
-	CanSSA(t Type) bool
-}
-
-type Logger interface {
-	// Logf logs a message from the compiler.
-	Logf(string, ...interface{})
-
-	// Log returns true if logging is not a no-op
-	// some logging calls account for more than a few heap allocations.
-	Log() bool
-
-	// Fatal reports a compiler error and exits.
-	Fatalf(line int32, msg string, args ...interface{})
-
-	// Warnl writes compiler messages in the form expected by "errorcheck" tests
-	Warnl(line int32, fmt_ string, args ...interface{})
-
-	// Forwards the Debug flags from gc
-	Debug_checknil() bool
-	Debug_wb() bool
-}
-
-type Frontend interface {
-	TypeSource
-	Logger
-
-	// StringData returns a symbol pointing to the given string's contents.
-	StringData(string) interface{} // returns *gc.Sym
-
-	// Auto returns a Node for an auto variable of the given type.
-	// The SSA compiler uses this function to allocate space for spills.
-	Auto(Type) GCNode
-
-	// Given the name for a compound type, returns the name we should use
-	// for the parts of that compound type.
-	SplitString(LocalSlot) (LocalSlot, LocalSlot)
-	SplitInterface(LocalSlot) (LocalSlot, LocalSlot)
-	SplitSlice(LocalSlot) (LocalSlot, LocalSlot, LocalSlot)
-	SplitComplex(LocalSlot) (LocalSlot, LocalSlot)
-	SplitStruct(LocalSlot, int) LocalSlot
-	SplitArray(LocalSlot) LocalSlot              // array must be length 1
-	SplitInt64(LocalSlot) (LocalSlot, LocalSlot) // returns (hi, lo)
-
-	// Line returns a string describing the given line number.
-	Line(int32) string
-
-	// AllocFrame assigns frame offsets to all live auto variables.
-	AllocFrame(f *Func)
-
-	// Syslook returns a symbol of the runtime function/variable with the
-	// given name.
-	Syslook(string) interface{} // returns *gc.Sym
-}
-
-// interface used to hold *gc.Node. We'd use *gc.Node directly but
-// that would lead to an import cycle.
-type GCNode interface {
-	Typ() Type
-	String() string
-}
-
-// NewConfig returns a new configuration object for the given architecture.
-func NewConfig(arch string, fe Frontend, ctxt *obj.Link, optimize bool) *Config {
-	c := &Config{arch: arch, fe: fe}
-	switch arch {
-	case "amd64":
-		c.IntSize = 8
-		c.PtrSize = 8
-		c.RegSize = 8
-		c.lowerBlock = rewriteBlockAMD64
-		c.lowerValue = rewriteValueAMD64
-		c.registers = registersAMD64[:]
-		c.gpRegMask = gpRegMaskAMD64
-		c.fpRegMask = fpRegMaskAMD64
-		c.FPReg = framepointerRegAMD64
-		c.LinkReg = linkRegAMD64
-		c.hasGReg = false
-	case "amd64p32":
-		c.IntSize = 4
-		c.PtrSize = 4
-		c.RegSize = 8
-		c.lowerBlock = rewriteBlockAMD64
-		c.lowerValue = rewriteValueAMD64
-		c.registers = registersAMD64[:]
-		c.gpRegMask = gpRegMaskAMD64
-		c.fpRegMask = fpRegMaskAMD64
-		c.FPReg = framepointerRegAMD64
-		c.LinkReg = linkRegAMD64
-		c.hasGReg = false
-		c.noDuffDevice = true
-	case "386":
-		c.IntSize = 4
-		c.PtrSize = 4
-		c.RegSize = 4
-		c.lowerBlock = rewriteBlock386
-		c.lowerValue = rewriteValue386
-		c.registers = registers386[:]
-		c.gpRegMask = gpRegMask386
-		c.fpRegMask = fpRegMask386
-		c.FPReg = framepointerReg386
-		c.LinkReg = linkReg386
-		c.hasGReg = false
-	case "arm":
-		c.IntSize = 4
-		c.PtrSize = 4
-		c.RegSize = 4
-		c.lowerBlock = rewriteBlockARM
-		c.lowerValue = rewriteValueARM
-		c.registers = registersARM[:]
-		c.gpRegMask = gpRegMaskARM
-		c.fpRegMask = fpRegMaskARM
-		c.FPReg = framepointerRegARM
-		c.LinkReg = linkRegARM
-		c.hasGReg = true
-	case "arm64":
-		c.IntSize = 8
-		c.PtrSize = 8
-		c.RegSize = 8
-		c.lowerBlock = rewriteBlockARM64
-		c.lowerValue = rewriteValueARM64
-		c.registers = registersARM64[:]
-		c.gpRegMask = gpRegMaskARM64
-		c.fpRegMask = fpRegMaskARM64
-		c.FPReg = framepointerRegARM64
-		c.LinkReg = linkRegARM64
-		c.hasGReg = true
-		c.noDuffDevice = obj.GOOS == "darwin" // darwin linker cannot handle BR26 reloc with non-zero addend
-	case "ppc64":
-		c.OldArch = true
-		c.BigEndian = true
-		fallthrough
-	case "ppc64le":
-		c.IntSize = 8
-		c.PtrSize = 8
-		c.RegSize = 8
-		c.lowerBlock = rewriteBlockPPC64
-		c.lowerValue = rewriteValuePPC64
-		c.registers = registersPPC64[:]
-		c.gpRegMask = gpRegMaskPPC64
-		c.fpRegMask = fpRegMaskPPC64
-		c.FPReg = framepointerRegPPC64
-		c.LinkReg = linkRegPPC64
-		c.noDuffDevice = true // TODO: Resolve PPC64 DuffDevice (has zero, but not copy)
-		c.NeedsFpScratch = true
-		c.hasGReg = true
-	case "mips64":
-		c.BigEndian = true
-		fallthrough
-	case "mips64le":
-		c.IntSize = 8
-		c.PtrSize = 8
-		c.RegSize = 8
-		c.lowerBlock = rewriteBlockMIPS64
-		c.lowerValue = rewriteValueMIPS64
-		c.registers = registersMIPS64[:]
-		c.gpRegMask = gpRegMaskMIPS64
-		c.fpRegMask = fpRegMaskMIPS64
-		c.specialRegMask = specialRegMaskMIPS64
-		c.FPReg = framepointerRegMIPS64
-		c.LinkReg = linkRegMIPS64
-		c.hasGReg = true
-	case "s390x":
-		c.IntSize = 8
-		c.PtrSize = 8
-		c.RegSize = 8
-		c.lowerBlock = rewriteBlockS390X
-		c.lowerValue = rewriteValueS390X
-		c.registers = registersS390X[:]
-		c.gpRegMask = gpRegMaskS390X
-		c.fpRegMask = fpRegMaskS390X
-		c.FPReg = framepointerRegS390X
-		c.LinkReg = linkRegS390X
-		c.hasGReg = true
-		c.noDuffDevice = true
-		c.BigEndian = true
-	case "mips":
-		c.BigEndian = true
-		fallthrough
-	case "mipsle":
-		c.IntSize = 4
-		c.PtrSize = 4
-		c.RegSize = 4
-		c.lowerBlock = rewriteBlockMIPS
-		c.lowerValue = rewriteValueMIPS
-		c.registers = registersMIPS[:]
-		c.gpRegMask = gpRegMaskMIPS
-		c.fpRegMask = fpRegMaskMIPS
-		c.specialRegMask = specialRegMaskMIPS
-		c.FPReg = framepointerRegMIPS
-		c.LinkReg = linkRegMIPS
-		c.hasGReg = true
-		c.noDuffDevice = true
-	default:
-		fe.Fatalf(0, "arch %s not implemented", arch)
-	}
-	c.ctxt = ctxt
-	c.optimize = optimize
-	c.nacl = obj.GOOS == "nacl"
-
-	// Don't use Duff's device on Plan 9 AMD64, because floating
-	// point operations are not allowed in note handler.
-	if obj.GOOS == "plan9" && arch == "amd64" {
-		c.noDuffDevice = true
-	}
-
-	if c.nacl {
-		c.noDuffDevice = true // Don't use Duff's device on NaCl
-
-		// runtime call clobber R12 on nacl
-		opcodeTable[OpARMUDIVrtcall].reg.clobbers |= 1 << 12 // R12
-	}
-
-	// Assign IDs to preallocated values/blocks.
-	for i := range c.values {
-		c.values[i].ID = ID(i)
-	}
-	for i := range c.blocks {
-		c.blocks[i].ID = ID(i)
-	}
-
-	c.logfiles = make(map[string]*os.File)
-
-	// cutoff is compared with product of numblocks and numvalues,
-	// if product is smaller than cutoff, use old non-sparse method.
-	// cutoff == 0 implies all sparse.
-	// cutoff == -1 implies none sparse.
-	// Good cutoff values seem to be O(million) depending on constant factor cost of sparse.
-	// TODO: get this from a flag, not an environment variable
-	c.sparsePhiCutoff = 2500000 // 0 for testing. // 2500000 determined with crude experiments w/ make.bash
-	ev := os.Getenv("GO_SSA_PHI_LOC_CUTOFF")
-	if ev != "" {
-		v, err := strconv.ParseInt(ev, 10, 64)
-		if err != nil {
-			fe.Fatalf(0, "Environment variable GO_SSA_PHI_LOC_CUTOFF (value '%s') did not parse as a number", ev)
-		}
-		c.sparsePhiCutoff = uint64(v) // convert -1 to maxint, for never use sparse
-	}
-
-	return c
-}
-
-func (c *Config) Set387(b bool) {
-	c.NeedsFpScratch = b
-	c.use387 = b
-}
-
-func (c *Config) Frontend() Frontend      { return c.fe }
-func (c *Config) SparsePhiCutoff() uint64 { return c.sparsePhiCutoff }
-func (c *Config) Ctxt() *obj.Link         { return c.ctxt }
-
-// NewFunc returns a new, empty function object.
-// Caller must call f.Free() before calling NewFunc again.
-func (c *Config) NewFunc() *Func {
-	// TODO(khr): should this function take name, type, etc. as arguments?
-	if c.curFunc != nil {
-		c.Fatalf(0, "NewFunc called without previous Free")
-	}
-	f := &Func{Config: c, NamedValues: map[LocalSlot][]*Value{}}
-	c.curFunc = f
-	return f
-}
-
-func (c *Config) Logf(msg string, args ...interface{})               { c.fe.Logf(msg, args...) }
-func (c *Config) Log() bool                                          { return c.fe.Log() }
-func (c *Config) Fatalf(line int32, msg string, args ...interface{}) { c.fe.Fatalf(line, msg, args...) }
-func (c *Config) Warnl(line int32, msg string, args ...interface{})  { c.fe.Warnl(line, msg, args...) }
-func (c *Config) Debug_checknil() bool                               { return c.fe.Debug_checknil() }
-func (c *Config) Debug_wb() bool                                     { return c.fe.Debug_wb() }
-
-func (c *Config) logDebugHashMatch(evname, name string) {
-	file := c.logfiles[evname]
-	if file == nil {
-		file = os.Stdout
-		tmpfile := os.Getenv("GSHS_LOGFILE")
-		if tmpfile != "" {
-			var ok error
-			file, ok = os.Create(tmpfile)
-			if ok != nil {
-				c.Fatalf(0, "Could not open hash-testing logfile %s", tmpfile)
-			}
-		}
-		c.logfiles[evname] = file
-	}
-	s := fmt.Sprintf("%s triggered %s\n", evname, name)
-	file.WriteString(s)
-	file.Sync()
-}
-
-// DebugHashMatch returns true if environment variable evname
-// 1) is empty (this is a special more-quickly implemented case of 3)
-// 2) is "y" or "Y"
-// 3) is a suffix of the sha1 hash of name
-// 4) is a suffix of the environment variable
-//    fmt.Sprintf("%s%d", evname, n)
-//    provided that all such variables are nonempty for 0 <= i <= n
-// Otherwise it returns false.
-// When true is returned the message
-//  "%s triggered %s\n", evname, name
-// is printed on the file named in environment variable
-//  GSHS_LOGFILE
-// or standard out if that is empty or there is an error
-// opening the file.
-
-func (c *Config) DebugHashMatch(evname, name string) bool {
-	evhash := os.Getenv(evname)
-	if evhash == "" {
-		return true // default behavior with no EV is "on"
-	}
-	if evhash == "y" || evhash == "Y" {
-		c.logDebugHashMatch(evname, name)
-		return true
-	}
-	if evhash == "n" || evhash == "N" {
-		return false
-	}
-	// Check the hash of the name against a partial input hash.
-	// We use this feature to do a binary search to
-	// find a function that is incorrectly compiled.
-	hstr := ""
-	for _, b := range sha1.Sum([]byte(name)) {
-		hstr += fmt.Sprintf("%08b", b)
-	}
-
-	if strings.HasSuffix(hstr, evhash) {
-		c.logDebugHashMatch(evname, name)
-		return true
-	}
-
-	// Iteratively try additional hashes to allow tests for multi-point
-	// failure.
-	for i := 0; true; i++ {
-		ev := fmt.Sprintf("%s%d", evname, i)
-		evv := os.Getenv(ev)
-		if evv == "" {
-			break
-		}
-		if strings.HasSuffix(hstr, evv) {
-			c.logDebugHashMatch(ev, name)
-			return true
-		}
-	}
-	return false
-}
-
-func (c *Config) DebugNameMatch(evname, name string) bool {
-	return os.Getenv(evname) == name
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/copyelim.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/copyelim.go
deleted file mode 100644
index ce2fcc9..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/copyelim.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/copyelim.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/copyelim.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-// copyelim removes all uses of OpCopy values from f.
-// A subsequent deadcode pass is needed to actually remove the copies.
-func copyelim(f *Func) {
-	// Modify all values so no arg (including args
-	// of OpCopy) is a copy.
-	for _, b := range f.Blocks {
-		for _, v := range b.Values {
-			copyelimValue(v)
-		}
-	}
-
-	// Update block control values.
-	for _, b := range f.Blocks {
-		if v := b.Control; v != nil && v.Op == OpCopy {
-			b.SetControl(v.Args[0])
-		}
-	}
-
-	// Update named values.
-	for _, name := range f.Names {
-		values := f.NamedValues[name]
-		for i, v := range values {
-			if v.Op == OpCopy {
-				values[i] = v.Args[0]
-			}
-		}
-	}
-}
-
-// copySource returns the (non-copy) op which is the
-// ultimate source of v.  v must be a copy op.
-func copySource(v *Value) *Value {
-	w := v.Args[0]
-
-	// This loop is just:
-	// for w.Op == OpCopy {
-	//     w = w.Args[0]
-	// }
-	// but we take some extra care to make sure we
-	// don't get stuck in an infinite loop.
-	// Infinite copy loops may happen in unreachable code.
-	// (TODO: or can they?  Needs a test.)
-	slow := w
-	var advance bool
-	for w.Op == OpCopy {
-		w = w.Args[0]
-		if w == slow {
-			w.reset(OpUnknown)
-			break
-		}
-		if advance {
-			slow = slow.Args[0]
-		}
-		advance = !advance
-	}
-
-	// The answer is w.  Update all the copies we saw
-	// to point directly to w.  Doing this update makes
-	// sure that we don't end up doing O(n^2) work
-	// for a chain of n copies.
-	for v != w {
-		x := v.Args[0]
-		v.SetArg(0, w)
-		v = x
-	}
-	return w
-}
-
-// copyelimValue ensures that no args of v are copies.
-func copyelimValue(v *Value) {
-	for i, a := range v.Args {
-		if a.Op == OpCopy {
-			v.SetArg(i, copySource(a))
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/copyelim_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/copyelim_test.go
deleted file mode 100644
index 87fa367..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/copyelim_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/copyelim_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/copyelim_test.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import (
-	"fmt"
-	"testing"
-)
-
-func BenchmarkCopyElim1(b *testing.B)      { benchmarkCopyElim(b, 1) }
-func BenchmarkCopyElim10(b *testing.B)     { benchmarkCopyElim(b, 10) }
-func BenchmarkCopyElim100(b *testing.B)    { benchmarkCopyElim(b, 100) }
-func BenchmarkCopyElim1000(b *testing.B)   { benchmarkCopyElim(b, 1000) }
-func BenchmarkCopyElim10000(b *testing.B)  { benchmarkCopyElim(b, 10000) }
-func BenchmarkCopyElim100000(b *testing.B) { benchmarkCopyElim(b, 100000) }
-
-func benchmarkCopyElim(b *testing.B, n int) {
-	c := testConfig(b)
-
-	values := make([]interface{}, 0, n+2)
-	values = append(values, Valu("mem", OpInitMem, TypeMem, 0, nil))
-	last := "mem"
-	for i := 0; i < n; i++ {
-		name := fmt.Sprintf("copy%d", i)
-		values = append(values, Valu(name, OpCopy, TypeMem, 0, nil, last))
-		last = name
-	}
-	values = append(values, Exit(last))
-	// Reverse values array to make it hard
-	for i := 0; i < len(values)/2; i++ {
-		values[i], values[len(values)-1-i] = values[len(values)-1-i], values[i]
-	}
-
-	for i := 0; i < b.N; i++ {
-		fun := Fun(c, "entry", Bloc("entry", values...))
-		Copyelim(fun.f)
-		fun.f.Free()
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/critical.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/critical.go
deleted file mode 100644
index 3de2d68..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/critical.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/critical.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/critical.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-// critical splits critical edges (those that go from a block with
-// more than one outedge to a block with more than one inedge).
-// Regalloc wants a critical-edge-free CFG so it can implement phi values.
-func critical(f *Func) {
-	// maps from phi arg ID to the new block created for that argument
-	blocks := make([]*Block, f.NumValues())
-	// need to iterate over f.Blocks without range, as we might
-	// need to split critical edges on newly constructed blocks
-	for j := 0; j < len(f.Blocks); j++ {
-		b := f.Blocks[j]
-		if len(b.Preds) <= 1 {
-			continue
-		}
-
-		var phi *Value
-		// determine if we've only got a single phi in this
-		// block, this is easier to handle than the general
-		// case of a block with multiple phi values.
-		for _, v := range b.Values {
-			if v.Op == OpPhi {
-				if phi != nil {
-					phi = nil
-					break
-				}
-				phi = v
-			}
-		}
-
-		// reset our block map
-		if phi != nil {
-			for _, v := range phi.Args {
-				blocks[v.ID] = nil
-			}
-		}
-
-		// split input edges coming from multi-output blocks.
-		for i := 0; i < len(b.Preds); {
-			e := b.Preds[i]
-			p := e.b
-			pi := e.i
-			if p.Kind == BlockPlain {
-				i++
-				continue // only single output block
-			}
-
-			var d *Block         // new block used to remove critical edge
-			reusedBlock := false // if true, then this is not the first use of this block
-			if phi != nil {
-				argID := phi.Args[i].ID
-				// find or record the block that we used to split
-				// critical edges for this argument
-				if d = blocks[argID]; d == nil {
-					// splitting doesn't necessarily remove the critical edge,
-					// since we're iterating over len(f.Blocks) above, this forces
-					// the new blocks to be re-examined.
-					d = f.NewBlock(BlockPlain)
-					d.Line = p.Line
-					blocks[argID] = d
-					if f.pass.debug > 0 {
-						f.Config.Warnl(p.Line, "split critical edge")
-					}
-				} else {
-					reusedBlock = true
-				}
-			} else {
-				// no existing block, so allocate a new block
-				// to place on the edge
-				d = f.NewBlock(BlockPlain)
-				d.Line = p.Line
-				if f.pass.debug > 0 {
-					f.Config.Warnl(p.Line, "split critical edge")
-				}
-			}
-
-			// if this not the first argument for the
-			// block, then we need to remove the
-			// corresponding elements from the block
-			// predecessors and phi args
-			if reusedBlock {
-				// Add p->d edge
-				p.Succs[pi] = Edge{d, len(d.Preds)}
-				d.Preds = append(d.Preds, Edge{p, pi})
-
-				// Remove p as a predecessor from b.
-				b.removePred(i)
-
-				// Update corresponding phi args
-				n := len(b.Preds)
-				phi.Args[i].Uses--
-				phi.Args[i] = phi.Args[n]
-				phi.Args[n] = nil
-				phi.Args = phi.Args[:n]
-				// splitting occasionally leads to a phi having
-				// a single argument (occurs with -N)
-				if n == 1 {
-					phi.Op = OpCopy
-				}
-				// Don't increment i in this case because we moved
-				// an unprocessed predecessor down into slot i.
-			} else {
-				// splice it in
-				p.Succs[pi] = Edge{d, 0}
-				b.Preds[i] = Edge{d, 0}
-				d.Preds = append(d.Preds, Edge{p, pi})
-				d.Succs = append(d.Succs, Edge{b, i})
-				i++
-			}
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/cse.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/cse.go
deleted file mode 100644
index 1ad6eb1..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/cse.go
+++ /dev/null
@@ -1,389 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/cse.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/cse.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import (
-	"fmt"
-	"sort"
-)
-
-// cse does common-subexpression elimination on the Function.
-// Values are just relinked, nothing is deleted. A subsequent deadcode
-// pass is required to actually remove duplicate expressions.
-func cse(f *Func) {
-	// Two values are equivalent if they satisfy the following definition:
-	// equivalent(v, w):
-	//   v.op == w.op
-	//   v.type == w.type
-	//   v.aux == w.aux
-	//   v.auxint == w.auxint
-	//   len(v.args) == len(w.args)
-	//   v.block == w.block if v.op == OpPhi
-	//   equivalent(v.args[i], w.args[i]) for i in 0..len(v.args)-1
-
-	// The algorithm searches for a partition of f's values into
-	// equivalence classes using the above definition.
-	// It starts with a coarse partition and iteratively refines it
-	// until it reaches a fixed point.
-
-	// Make initial coarse partitions by using a subset of the conditions above.
-	a := make([]*Value, 0, f.NumValues())
-	auxIDs := auxmap{}
-	for _, b := range f.Blocks {
-		for _, v := range b.Values {
-			if auxIDs[v.Aux] == 0 {
-				auxIDs[v.Aux] = int32(len(auxIDs)) + 1
-			}
-			if v.Type.IsMemory() {
-				continue // memory values can never cse
-			}
-			if opcodeTable[v.Op].commutative && len(v.Args) == 2 && v.Args[1].ID < v.Args[0].ID {
-				// Order the arguments of binary commutative operations.
-				v.Args[0], v.Args[1] = v.Args[1], v.Args[0]
-			}
-			a = append(a, v)
-		}
-	}
-	partition := partitionValues(a, auxIDs)
-
-	// map from value id back to eqclass id
-	valueEqClass := make([]ID, f.NumValues())
-	for _, b := range f.Blocks {
-		for _, v := range b.Values {
-			// Use negative equivalence class #s for unique values.
-			valueEqClass[v.ID] = -v.ID
-		}
-	}
-	var pNum ID = 1
-	for _, e := range partition {
-		if f.pass.debug > 1 && len(e) > 500 {
-			fmt.Printf("CSE.large partition (%d): ", len(e))
-			for j := 0; j < 3; j++ {
-				fmt.Printf("%s ", e[j].LongString())
-			}
-			fmt.Println()
-		}
-
-		for _, v := range e {
-			valueEqClass[v.ID] = pNum
-		}
-		if f.pass.debug > 2 && len(e) > 1 {
-			fmt.Printf("CSE.partition #%d:", pNum)
-			for _, v := range e {
-				fmt.Printf(" %s", v.String())
-			}
-			fmt.Printf("\n")
-		}
-		pNum++
-	}
-
-	// Split equivalence classes at points where they have
-	// non-equivalent arguments.  Repeat until we can't find any
-	// more splits.
-	var splitPoints []int
-	byArgClass := new(partitionByArgClass) // reuseable partitionByArgClass to reduce allocations
-	for {
-		changed := false
-
-		// partition can grow in the loop. By not using a range loop here,
-		// we process new additions as they arrive, avoiding O(n^2) behavior.
-		for i := 0; i < len(partition); i++ {
-			e := partition[i]
-
-			// Sort by eq class of arguments.
-			byArgClass.a = e
-			byArgClass.eqClass = valueEqClass
-			sort.Sort(byArgClass)
-
-			// Find split points.
-			splitPoints = append(splitPoints[:0], 0)
-			for j := 1; j < len(e); j++ {
-				v, w := e[j-1], e[j]
-				eqArgs := true
-				for k, a := range v.Args {
-					b := w.Args[k]
-					if valueEqClass[a.ID] != valueEqClass[b.ID] {
-						eqArgs = false
-						break
-					}
-				}
-				if !eqArgs {
-					splitPoints = append(splitPoints, j)
-				}
-			}
-			if len(splitPoints) == 1 {
-				continue // no splits, leave equivalence class alone.
-			}
-
-			// Move another equivalence class down in place of e.
-			partition[i] = partition[len(partition)-1]
-			partition = partition[:len(partition)-1]
-			i--
-
-			// Add new equivalence classes for the parts of e we found.
-			splitPoints = append(splitPoints, len(e))
-			for j := 0; j < len(splitPoints)-1; j++ {
-				f := e[splitPoints[j]:splitPoints[j+1]]
-				if len(f) == 1 {
-					// Don't add singletons.
-					valueEqClass[f[0].ID] = -f[0].ID
-					continue
-				}
-				for _, v := range f {
-					valueEqClass[v.ID] = pNum
-				}
-				pNum++
-				partition = append(partition, f)
-			}
-			changed = true
-		}
-
-		if !changed {
-			break
-		}
-	}
-
-	sdom := f.sdom()
-
-	// Compute substitutions we would like to do. We substitute v for w
-	// if v and w are in the same equivalence class and v dominates w.
-	rewrite := make([]*Value, f.NumValues())
-	byDom := new(partitionByDom) // reusable partitionByDom to reduce allocs
-	for _, e := range partition {
-		byDom.a = e
-		byDom.sdom = sdom
-		sort.Sort(byDom)
-		for i := 0; i < len(e)-1; i++ {
-			// e is sorted by domorder, so a maximal dominant element is first in the slice
-			v := e[i]
-			if v == nil {
-				continue
-			}
-
-			e[i] = nil
-			// Replace all elements of e which v dominates
-			for j := i + 1; j < len(e); j++ {
-				w := e[j]
-				if w == nil {
-					continue
-				}
-				if sdom.isAncestorEq(v.Block, w.Block) {
-					rewrite[w.ID] = v
-					e[j] = nil
-				} else {
-					// e is sorted by domorder, so v.Block doesn't dominate any subsequent blocks in e
-					break
-				}
-			}
-		}
-	}
-
-	// if we rewrite a tuple generator to a new one in a different block,
-	// copy its selectors to the new generator's block, so tuple generator
-	// and selectors stay together.
-	// be careful not to copy same selectors more than once (issue 16741).
-	copiedSelects := make(map[ID][]*Value)
-	for _, b := range f.Blocks {
-	out:
-		for _, v := range b.Values {
-			// New values are created when selectors are copied to
-			// a new block. We can safely ignore those new values,
-			// since they have already been copied (issue 17918).
-			if int(v.ID) >= len(rewrite) || rewrite[v.ID] != nil {
-				continue
-			}
-			if v.Op != OpSelect0 && v.Op != OpSelect1 {
-				continue
-			}
-			if !v.Args[0].Type.IsTuple() {
-				f.Fatalf("arg of tuple selector %s is not a tuple: %s", v.String(), v.Args[0].LongString())
-			}
-			t := rewrite[v.Args[0].ID]
-			if t != nil && t.Block != b {
-				// v.Args[0] is tuple generator, CSE'd into a different block as t, v is left behind
-				for _, c := range copiedSelects[t.ID] {
-					if v.Op == c.Op {
-						// an equivalent selector is already copied
-						rewrite[v.ID] = c
-						continue out
-					}
-				}
-				c := v.copyInto(t.Block)
-				rewrite[v.ID] = c
-				copiedSelects[t.ID] = append(copiedSelects[t.ID], c)
-			}
-		}
-	}
-
-	rewrites := int64(0)
-
-	// Apply substitutions
-	for _, b := range f.Blocks {
-		for _, v := range b.Values {
-			for i, w := range v.Args {
-				if x := rewrite[w.ID]; x != nil {
-					v.SetArg(i, x)
-					rewrites++
-				}
-			}
-		}
-		if v := b.Control; v != nil {
-			if x := rewrite[v.ID]; x != nil {
-				if v.Op == OpNilCheck {
-					// nilcheck pass will remove the nil checks and log
-					// them appropriately, so don't mess with them here.
-					continue
-				}
-				b.SetControl(x)
-			}
-		}
-	}
-	if f.pass.stats > 0 {
-		f.LogStat("CSE REWRITES", rewrites)
-	}
-}
-
-// An eqclass approximates an equivalence class. During the
-// algorithm it may represent the union of several of the
-// final equivalence classes.
-type eqclass []*Value
-
-// partitionValues partitions the values into equivalence classes
-// based on having all the following features match:
-//  - opcode
-//  - type
-//  - auxint
-//  - aux
-//  - nargs
-//  - block # if a phi op
-//  - first two arg's opcodes and auxint
-//  - NOT first two arg's aux; that can break CSE.
-// partitionValues returns a list of equivalence classes, each
-// being a sorted by ID list of *Values. The eqclass slices are
-// backed by the same storage as the input slice.
-// Equivalence classes of size 1 are ignored.
-func partitionValues(a []*Value, auxIDs auxmap) []eqclass {
-	sort.Sort(sortvalues{a, auxIDs})
-
-	var partition []eqclass
-	for len(a) > 0 {
-		v := a[0]
-		j := 1
-		for ; j < len(a); j++ {
-			w := a[j]
-			if cmpVal(v, w, auxIDs) != CMPeq {
-				break
-			}
-		}
-		if j > 1 {
-			partition = append(partition, a[:j])
-		}
-		a = a[j:]
-	}
-
-	return partition
-}
-func lt2Cmp(isLt bool) Cmp {
-	if isLt {
-		return CMPlt
-	}
-	return CMPgt
-}
-
-type auxmap map[interface{}]int32
-
-func cmpVal(v, w *Value, auxIDs auxmap) Cmp {
-	// Try to order these comparison by cost (cheaper first)
-	if v.Op != w.Op {
-		return lt2Cmp(v.Op < w.Op)
-	}
-	if v.AuxInt != w.AuxInt {
-		return lt2Cmp(v.AuxInt < w.AuxInt)
-	}
-	if len(v.Args) != len(w.Args) {
-		return lt2Cmp(len(v.Args) < len(w.Args))
-	}
-	if v.Op == OpPhi && v.Block != w.Block {
-		return lt2Cmp(v.Block.ID < w.Block.ID)
-	}
-	if v.Type.IsMemory() {
-		// We will never be able to CSE two values
-		// that generate memory.
-		return lt2Cmp(v.ID < w.ID)
-	}
-
-	if tc := v.Type.Compare(w.Type); tc != CMPeq {
-		return tc
-	}
-
-	if v.Aux != w.Aux {
-		if v.Aux == nil {
-			return CMPlt
-		}
-		if w.Aux == nil {
-			return CMPgt
-		}
-		return lt2Cmp(auxIDs[v.Aux] < auxIDs[w.Aux])
-	}
-
-	return CMPeq
-}
-
-// Sort values to make the initial partition.
-type sortvalues struct {
-	a      []*Value // array of values
-	auxIDs auxmap   // aux -> aux ID map
-}
-
-func (sv sortvalues) Len() int      { return len(sv.a) }
-func (sv sortvalues) Swap(i, j int) { sv.a[i], sv.a[j] = sv.a[j], sv.a[i] }
-func (sv sortvalues) Less(i, j int) bool {
-	v := sv.a[i]
-	w := sv.a[j]
-	if cmp := cmpVal(v, w, sv.auxIDs); cmp != CMPeq {
-		return cmp == CMPlt
-	}
-
-	// Sort by value ID last to keep the sort result deterministic.
-	return v.ID < w.ID
-}
-
-type partitionByDom struct {
-	a    []*Value // array of values
-	sdom SparseTree
-}
-
-func (sv partitionByDom) Len() int      { return len(sv.a) }
-func (sv partitionByDom) Swap(i, j int) { sv.a[i], sv.a[j] = sv.a[j], sv.a[i] }
-func (sv partitionByDom) Less(i, j int) bool {
-	v := sv.a[i]
-	w := sv.a[j]
-	return sv.sdom.domorder(v.Block) < sv.sdom.domorder(w.Block)
-}
-
-type partitionByArgClass struct {
-	a       []*Value // array of values
-	eqClass []ID     // equivalence class IDs of values
-}
-
-func (sv partitionByArgClass) Len() int      { return len(sv.a) }
-func (sv partitionByArgClass) Swap(i, j int) { sv.a[i], sv.a[j] = sv.a[j], sv.a[i] }
-func (sv partitionByArgClass) Less(i, j int) bool {
-	v := sv.a[i]
-	w := sv.a[j]
-	for i, a := range v.Args {
-		b := w.Args[i]
-		if sv.eqClass[a.ID] < sv.eqClass[b.ID] {
-			return true
-		}
-		if sv.eqClass[a.ID] > sv.eqClass[b.ID] {
-			return false
-		}
-	}
-	return false
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/cse_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/cse_test.go
deleted file mode 100644
index b574bce..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/cse_test.go
+++ /dev/null
@@ -1,126 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/cse_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/cse_test.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import "testing"
-
-type tstAux struct {
-	s string
-}
-
-// This tests for a bug found when partitioning, but not sorting by the Aux value.
-func TestCSEAuxPartitionBug(t *testing.T) {
-	c := testConfig(t)
-	arg1Aux := &tstAux{"arg1-aux"}
-	arg2Aux := &tstAux{"arg2-aux"}
-	arg3Aux := &tstAux{"arg3-aux"}
-
-	// construct lots of values with args that have aux values and place
-	// them in an order that triggers the bug
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("start", OpInitMem, TypeMem, 0, nil),
-			Valu("sp", OpSP, TypeBytePtr, 0, nil),
-			Valu("r7", OpAdd64, TypeInt64, 0, nil, "arg3", "arg1"),
-			Valu("r1", OpAdd64, TypeInt64, 0, nil, "arg1", "arg2"),
-			Valu("arg1", OpArg, TypeInt64, 0, arg1Aux),
-			Valu("arg2", OpArg, TypeInt64, 0, arg2Aux),
-			Valu("arg3", OpArg, TypeInt64, 0, arg3Aux),
-			Valu("r9", OpAdd64, TypeInt64, 0, nil, "r7", "r8"),
-			Valu("r4", OpAdd64, TypeInt64, 0, nil, "r1", "r2"),
-			Valu("r8", OpAdd64, TypeInt64, 0, nil, "arg3", "arg2"),
-			Valu("r2", OpAdd64, TypeInt64, 0, nil, "arg1", "arg2"),
-			Valu("raddr", OpAddr, TypeInt64Ptr, 0, nil, "sp"),
-			Valu("raddrdef", OpVarDef, TypeMem, 0, nil, "start"),
-			Valu("r6", OpAdd64, TypeInt64, 0, nil, "r4", "r5"),
-			Valu("r3", OpAdd64, TypeInt64, 0, nil, "arg1", "arg2"),
-			Valu("r5", OpAdd64, TypeInt64, 0, nil, "r2", "r3"),
-			Valu("r10", OpAdd64, TypeInt64, 0, nil, "r6", "r9"),
-			Valu("rstore", OpStore, TypeMem, 8, nil, "raddr", "r10", "raddrdef"),
-			Goto("exit")),
-		Bloc("exit",
-			Exit("rstore")))
-
-	CheckFunc(fun.f)
-	cse(fun.f)
-	deadcode(fun.f)
-	CheckFunc(fun.f)
-
-	s1Cnt := 2
-	// r1 == r2 == r3, needs to remove two of this set
-	s2Cnt := 1
-	// r4 == r5, needs to remove one of these
-	for k, v := range fun.values {
-		if v.Op == OpInvalid {
-			switch k {
-			case "r1":
-				fallthrough
-			case "r2":
-				fallthrough
-			case "r3":
-				if s1Cnt == 0 {
-					t.Errorf("cse removed all of r1,r2,r3")
-				}
-				s1Cnt--
-
-			case "r4":
-				fallthrough
-			case "r5":
-				if s2Cnt == 0 {
-					t.Errorf("cse removed all of r4,r5")
-				}
-				s2Cnt--
-			default:
-				t.Errorf("cse removed %s, but shouldn't have", k)
-			}
-		}
-	}
-
-	if s1Cnt != 0 || s2Cnt != 0 {
-		t.Errorf("%d values missed during cse", s1Cnt+s2Cnt)
-	}
-}
-
-// TestZCSE tests the zero arg cse.
-func TestZCSE(t *testing.T) {
-	c := testConfig(t)
-
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("start", OpInitMem, TypeMem, 0, nil),
-			Valu("sp", OpSP, TypeBytePtr, 0, nil),
-			Valu("sb1", OpSB, TypeBytePtr, 0, nil),
-			Valu("sb2", OpSB, TypeBytePtr, 0, nil),
-			Valu("addr1", OpAddr, TypeInt64Ptr, 0, nil, "sb1"),
-			Valu("addr2", OpAddr, TypeInt64Ptr, 0, nil, "sb2"),
-			Valu("a1ld", OpLoad, TypeInt64, 0, nil, "addr1", "start"),
-			Valu("a2ld", OpLoad, TypeInt64, 0, nil, "addr2", "start"),
-			Valu("c1", OpConst64, TypeInt64, 1, nil),
-			Valu("r1", OpAdd64, TypeInt64, 0, nil, "a1ld", "c1"),
-			Valu("c2", OpConst64, TypeInt64, 1, nil),
-			Valu("r2", OpAdd64, TypeInt64, 0, nil, "a2ld", "c2"),
-			Valu("r3", OpAdd64, TypeInt64, 0, nil, "r1", "r2"),
-			Valu("raddr", OpAddr, TypeInt64Ptr, 0, nil, "sp"),
-			Valu("raddrdef", OpVarDef, TypeMem, 0, nil, "start"),
-			Valu("rstore", OpStore, TypeMem, 8, nil, "raddr", "r3", "raddrdef"),
-			Goto("exit")),
-		Bloc("exit",
-			Exit("rstore")))
-
-	CheckFunc(fun.f)
-	zcse(fun.f)
-	deadcode(fun.f)
-	CheckFunc(fun.f)
-
-	if fun.values["c1"].Op != OpInvalid && fun.values["c2"].Op != OpInvalid {
-		t.Errorf("zsce should have removed c1 or c2")
-	}
-	if fun.values["sb1"].Op != OpInvalid && fun.values["sb2"].Op != OpInvalid {
-		t.Errorf("zsce should have removed sb1 or sb2")
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/deadcode.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/deadcode.go
deleted file mode 100644
index eb4bcc0..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/deadcode.go
+++ /dev/null
@@ -1,288 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/deadcode.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/deadcode.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-// findlive returns the reachable blocks and live values in f.
-func findlive(f *Func) (reachable []bool, live []bool) {
-	reachable = reachableBlocks(f)
-	live = liveValues(f, reachable)
-	return
-}
-
-// reachableBlocks returns the reachable blocks in f.
-func reachableBlocks(f *Func) []bool {
-	reachable := make([]bool, f.NumBlocks())
-	reachable[f.Entry.ID] = true
-	p := []*Block{f.Entry} // stack-like worklist
-	for len(p) > 0 {
-		// Pop a reachable block
-		b := p[len(p)-1]
-		p = p[:len(p)-1]
-		// Mark successors as reachable
-		s := b.Succs
-		if b.Kind == BlockFirst {
-			s = s[:1]
-		}
-		for _, e := range s {
-			c := e.b
-			if !reachable[c.ID] {
-				reachable[c.ID] = true
-				p = append(p, c) // push
-			}
-		}
-	}
-	return reachable
-}
-
-// liveValues returns the live values in f.
-// reachable is a map from block ID to whether the block is reachable.
-func liveValues(f *Func, reachable []bool) []bool {
-	live := make([]bool, f.NumValues())
-
-	// After regalloc, consider all values to be live.
-	// See the comment at the top of regalloc.go and in deadcode for details.
-	if f.RegAlloc != nil {
-		for i := range live {
-			live[i] = true
-		}
-		return live
-	}
-
-	// Find all live values
-	var q []*Value // stack-like worklist of unscanned values
-
-	// Starting set: all control values of reachable blocks are live.
-	// Calls are live (because callee can observe the memory state).
-	for _, b := range f.Blocks {
-		if !reachable[b.ID] {
-			continue
-		}
-		if v := b.Control; v != nil && !live[v.ID] {
-			live[v.ID] = true
-			q = append(q, v)
-		}
-		for _, v := range b.Values {
-			if opcodeTable[v.Op].call && !live[v.ID] {
-				live[v.ID] = true
-				q = append(q, v)
-			}
-			if v.Type.IsVoid() && !live[v.ID] {
-				// The only Void ops are nil checks.  We must keep these.
-				live[v.ID] = true
-				q = append(q, v)
-			}
-		}
-	}
-
-	// Compute transitive closure of live values.
-	for len(q) > 0 {
-		// pop a reachable value
-		v := q[len(q)-1]
-		q = q[:len(q)-1]
-		for i, x := range v.Args {
-			if v.Op == OpPhi && !reachable[v.Block.Preds[i].b.ID] {
-				continue
-			}
-			if !live[x.ID] {
-				live[x.ID] = true
-				q = append(q, x) // push
-			}
-		}
-	}
-
-	return live
-}
-
-// deadcode removes dead code from f.
-func deadcode(f *Func) {
-	// deadcode after regalloc is forbidden for now. Regalloc
-	// doesn't quite generate legal SSA which will lead to some
-	// required moves being eliminated. See the comment at the
-	// top of regalloc.go for details.
-	if f.RegAlloc != nil {
-		f.Fatalf("deadcode after regalloc")
-	}
-
-	// Find reachable blocks.
-	reachable := reachableBlocks(f)
-
-	// Get rid of edges from dead to live code.
-	for _, b := range f.Blocks {
-		if reachable[b.ID] {
-			continue
-		}
-		for i := 0; i < len(b.Succs); {
-			e := b.Succs[i]
-			if reachable[e.b.ID] {
-				b.removeEdge(i)
-			} else {
-				i++
-			}
-		}
-	}
-
-	// Get rid of dead edges from live code.
-	for _, b := range f.Blocks {
-		if !reachable[b.ID] {
-			continue
-		}
-		if b.Kind != BlockFirst {
-			continue
-		}
-		b.removeEdge(1)
-		b.Kind = BlockPlain
-		b.Likely = BranchUnknown
-	}
-
-	// Splice out any copies introduced during dead block removal.
-	copyelim(f)
-
-	// Find live values.
-	live := liveValues(f, reachable)
-
-	// Remove dead & duplicate entries from namedValues map.
-	s := f.newSparseSet(f.NumValues())
-	defer f.retSparseSet(s)
-	i := 0
-	for _, name := range f.Names {
-		j := 0
-		s.clear()
-		values := f.NamedValues[name]
-		for _, v := range values {
-			if live[v.ID] && !s.contains(v.ID) {
-				values[j] = v
-				j++
-				s.add(v.ID)
-			}
-		}
-		if j == 0 {
-			delete(f.NamedValues, name)
-		} else {
-			f.Names[i] = name
-			i++
-			for k := len(values) - 1; k >= j; k-- {
-				values[k] = nil
-			}
-			f.NamedValues[name] = values[:j]
-		}
-	}
-	for k := len(f.Names) - 1; k >= i; k-- {
-		f.Names[k] = LocalSlot{}
-	}
-	f.Names = f.Names[:i]
-
-	// Unlink values.
-	for _, b := range f.Blocks {
-		if !reachable[b.ID] {
-			b.SetControl(nil)
-		}
-		for _, v := range b.Values {
-			if !live[v.ID] {
-				v.resetArgs()
-			}
-		}
-	}
-
-	// Remove dead values from blocks' value list. Return dead
-	// values to the allocator.
-	for _, b := range f.Blocks {
-		i := 0
-		for _, v := range b.Values {
-			if live[v.ID] {
-				b.Values[i] = v
-				i++
-			} else {
-				f.freeValue(v)
-			}
-		}
-		// aid GC
-		tail := b.Values[i:]
-		for j := range tail {
-			tail[j] = nil
-		}
-		b.Values = b.Values[:i]
-	}
-
-	// Remove unreachable blocks. Return dead blocks to allocator.
-	i = 0
-	for _, b := range f.Blocks {
-		if reachable[b.ID] {
-			f.Blocks[i] = b
-			i++
-		} else {
-			if len(b.Values) > 0 {
-				b.Fatalf("live values in unreachable block %v: %v", b, b.Values)
-			}
-			f.freeBlock(b)
-		}
-	}
-	// zero remainder to help GC
-	tail := f.Blocks[i:]
-	for j := range tail {
-		tail[j] = nil
-	}
-	f.Blocks = f.Blocks[:i]
-}
-
-// removeEdge removes the i'th outgoing edge from b (and
-// the corresponding incoming edge from b.Succs[i].b).
-func (b *Block) removeEdge(i int) {
-	e := b.Succs[i]
-	c := e.b
-	j := e.i
-
-	// Adjust b.Succs
-	b.removeSucc(i)
-
-	// Adjust c.Preds
-	c.removePred(j)
-
-	// Remove phi args from c's phis.
-	n := len(c.Preds)
-	for _, v := range c.Values {
-		if v.Op != OpPhi {
-			continue
-		}
-		v.Args[j].Uses--
-		v.Args[j] = v.Args[n]
-		v.Args[n] = nil
-		v.Args = v.Args[:n]
-		phielimValue(v)
-		// Note: this is trickier than it looks. Replacing
-		// a Phi with a Copy can in general cause problems because
-		// Phi and Copy don't have exactly the same semantics.
-		// Phi arguments always come from a predecessor block,
-		// whereas copies don't. This matters in loops like:
-		// 1: x = (Phi y)
-		//    y = (Add x 1)
-		//    goto 1
-		// If we replace Phi->Copy, we get
-		// 1: x = (Copy y)
-		//    y = (Add x 1)
-		//    goto 1
-		// (Phi y) refers to the *previous* value of y, whereas
-		// (Copy y) refers to the *current* value of y.
-		// The modified code has a cycle and the scheduler
-		// will barf on it.
-		//
-		// Fortunately, this situation can only happen for dead
-		// code loops. We know the code we're working with is
-		// not dead, so we're ok.
-		// Proof: If we have a potential bad cycle, we have a
-		// situation like this:
-		//   x = (Phi z)
-		//   y = (op1 x ...)
-		//   z = (op2 y ...)
-		// Where opX are not Phi ops. But such a situation
-		// implies a cycle in the dominator graph. In the
-		// example, x.Block dominates y.Block, y.Block dominates
-		// z.Block, and z.Block dominates x.Block (treating
-		// "dominates" as reflexive).  Cycles in the dominator
-		// graph can only happen in an unreachable cycle.
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/deadcode_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/deadcode_test.go
deleted file mode 100644
index 85a6ce5..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/deadcode_test.go
+++ /dev/null
@@ -1,164 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/deadcode_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/deadcode_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import (
-	"fmt"
-	"strconv"
-	"testing"
-)
-
-func TestDeadLoop(t *testing.T) {
-	c := testConfig(t)
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Goto("exit")),
-		Bloc("exit",
-			Exit("mem")),
-		// dead loop
-		Bloc("deadblock",
-			// dead value in dead block
-			Valu("deadval", OpConstBool, TypeBool, 1, nil),
-			If("deadval", "deadblock", "exit")))
-
-	CheckFunc(fun.f)
-	Deadcode(fun.f)
-	CheckFunc(fun.f)
-
-	for _, b := range fun.f.Blocks {
-		if b == fun.blocks["deadblock"] {
-			t.Errorf("dead block not removed")
-		}
-		for _, v := range b.Values {
-			if v == fun.values["deadval"] {
-				t.Errorf("control value of dead block not removed")
-			}
-		}
-	}
-}
-
-func TestDeadValue(t *testing.T) {
-	c := testConfig(t)
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Valu("deadval", OpConst64, TypeInt64, 37, nil),
-			Goto("exit")),
-		Bloc("exit",
-			Exit("mem")))
-
-	CheckFunc(fun.f)
-	Deadcode(fun.f)
-	CheckFunc(fun.f)
-
-	for _, b := range fun.f.Blocks {
-		for _, v := range b.Values {
-			if v == fun.values["deadval"] {
-				t.Errorf("dead value not removed")
-			}
-		}
-	}
-}
-
-func TestNeverTaken(t *testing.T) {
-	c := testConfig(t)
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("cond", OpConstBool, TypeBool, 0, nil),
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			If("cond", "then", "else")),
-		Bloc("then",
-			Goto("exit")),
-		Bloc("else",
-			Goto("exit")),
-		Bloc("exit",
-			Exit("mem")))
-
-	CheckFunc(fun.f)
-	Opt(fun.f)
-	Deadcode(fun.f)
-	CheckFunc(fun.f)
-
-	if fun.blocks["entry"].Kind != BlockPlain {
-		t.Errorf("if(false) not simplified")
-	}
-	for _, b := range fun.f.Blocks {
-		if b == fun.blocks["then"] {
-			t.Errorf("then block still present")
-		}
-		for _, v := range b.Values {
-			if v == fun.values["cond"] {
-				t.Errorf("constant condition still present")
-			}
-		}
-	}
-
-}
-
-func TestNestedDeadBlocks(t *testing.T) {
-	c := testConfig(t)
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Valu("cond", OpConstBool, TypeBool, 0, nil),
-			If("cond", "b2", "b4")),
-		Bloc("b2",
-			If("cond", "b3", "b4")),
-		Bloc("b3",
-			If("cond", "b3", "b4")),
-		Bloc("b4",
-			If("cond", "b3", "exit")),
-		Bloc("exit",
-			Exit("mem")))
-
-	CheckFunc(fun.f)
-	Opt(fun.f)
-	CheckFunc(fun.f)
-	Deadcode(fun.f)
-	CheckFunc(fun.f)
-	if fun.blocks["entry"].Kind != BlockPlain {
-		t.Errorf("if(false) not simplified")
-	}
-	for _, b := range fun.f.Blocks {
-		if b == fun.blocks["b2"] {
-			t.Errorf("b2 block still present")
-		}
-		if b == fun.blocks["b3"] {
-			t.Errorf("b3 block still present")
-		}
-		for _, v := range b.Values {
-			if v == fun.values["cond"] {
-				t.Errorf("constant condition still present")
-			}
-		}
-	}
-}
-
-func BenchmarkDeadCode(b *testing.B) {
-	for _, n := range [...]int{1, 10, 100, 1000, 10000, 100000, 200000} {
-		b.Run(strconv.Itoa(n), func(b *testing.B) {
-			c := testConfig(b)
-			blocks := make([]bloc, 0, n+2)
-			blocks = append(blocks,
-				Bloc("entry",
-					Valu("mem", OpInitMem, TypeMem, 0, nil),
-					Goto("exit")))
-			blocks = append(blocks, Bloc("exit", Exit("mem")))
-			for i := 0; i < n; i++ {
-				blocks = append(blocks, Bloc(fmt.Sprintf("dead%d", i), Goto("exit")))
-			}
-			b.ResetTimer()
-			for i := 0; i < b.N; i++ {
-				fun := Fun(c, "entry", blocks...)
-				Deadcode(fun.f)
-				fun.f.Free()
-			}
-		})
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/deadstore.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/deadstore.go
deleted file mode 100644
index fb61fc4..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/deadstore.go
+++ /dev/null
@@ -1,131 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/deadstore.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/deadstore.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-// dse does dead-store elimination on the Function.
-// Dead stores are those which are unconditionally followed by
-// another store to the same location, with no intervening load.
-// This implementation only works within a basic block. TODO: use something more global.
-func dse(f *Func) {
-	var stores []*Value
-	loadUse := f.newSparseSet(f.NumValues())
-	defer f.retSparseSet(loadUse)
-	storeUse := f.newSparseSet(f.NumValues())
-	defer f.retSparseSet(storeUse)
-	shadowed := newSparseMap(f.NumValues()) // TODO: cache
-	for _, b := range f.Blocks {
-		// Find all the stores in this block. Categorize their uses:
-		//  loadUse contains stores which are used by a subsequent load.
-		//  storeUse contains stores which are used by a subsequent store.
-		loadUse.clear()
-		storeUse.clear()
-		stores = stores[:0]
-		for _, v := range b.Values {
-			if v.Op == OpPhi {
-				// Ignore phis - they will always be first and can't be eliminated
-				continue
-			}
-			if v.Type.IsMemory() {
-				stores = append(stores, v)
-				if v.Op == OpSelect1 {
-					// Use the args of the tuple-generating op.
-					v = v.Args[0]
-				}
-				for _, a := range v.Args {
-					if a.Block == b && a.Type.IsMemory() {
-						storeUse.add(a.ID)
-						if v.Op != OpStore && v.Op != OpZero && v.Op != OpVarDef && v.Op != OpVarKill {
-							// CALL, DUFFCOPY, etc. are both
-							// reads and writes.
-							loadUse.add(a.ID)
-						}
-					}
-				}
-			} else {
-				for _, a := range v.Args {
-					if a.Block == b && a.Type.IsMemory() {
-						loadUse.add(a.ID)
-					}
-				}
-			}
-		}
-		if len(stores) == 0 {
-			continue
-		}
-
-		// find last store in the block
-		var last *Value
-		for _, v := range stores {
-			if storeUse.contains(v.ID) {
-				continue
-			}
-			if last != nil {
-				b.Fatalf("two final stores - simultaneous live stores %s %s", last, v)
-			}
-			last = v
-		}
-		if last == nil {
-			b.Fatalf("no last store found - cycle?")
-		}
-
-		// Walk backwards looking for dead stores. Keep track of shadowed addresses.
-		// An "address" is an SSA Value which encodes both the address and size of
-		// the write. This code will not remove dead stores to the same address
-		// of different types.
-		shadowed.clear()
-		v := last
-
-	walkloop:
-		if loadUse.contains(v.ID) {
-			// Someone might be reading this memory state.
-			// Clear all shadowed addresses.
-			shadowed.clear()
-		}
-		if v.Op == OpStore || v.Op == OpZero {
-			var sz int64
-			if v.Op == OpStore {
-				sz = v.AuxInt
-			} else { // OpZero
-				sz = SizeAndAlign(v.AuxInt).Size()
-			}
-			if shadowedSize := int64(shadowed.get(v.Args[0].ID)); shadowedSize != -1 && shadowedSize >= sz {
-				// Modify store into a copy
-				if v.Op == OpStore {
-					// store addr value mem
-					v.SetArgs1(v.Args[2])
-				} else {
-					// zero addr mem
-					typesz := v.Args[0].Type.ElemType().Size()
-					if sz != typesz {
-						f.Fatalf("mismatched zero/store sizes: %d and %d [%s]",
-							sz, typesz, v.LongString())
-					}
-					v.SetArgs1(v.Args[1])
-				}
-				v.Aux = nil
-				v.AuxInt = 0
-				v.Op = OpCopy
-			} else {
-				if sz > 0x7fffffff { // work around sparseMap's int32 value type
-					sz = 0x7fffffff
-				}
-				shadowed.set(v.Args[0].ID, int32(sz), 0)
-			}
-		}
-		// walk to previous store
-		if v.Op == OpPhi {
-			continue // At start of block.  Move on to next block.
-		}
-		for _, a := range v.Args {
-			if a.Block == b && a.Type.IsMemory() {
-				v = a
-				goto walkloop
-			}
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/deadstore_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/deadstore_test.go
deleted file mode 100644
index 2a4d969..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/deadstore_test.go
+++ /dev/null
@@ -1,129 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/deadstore_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/deadstore_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import "testing"
-
-func TestDeadStore(t *testing.T) {
-	c := testConfig(t)
-	elemType := &TypeImpl{Size_: 1, Name: "testtype"}
-	ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr", Elem_: elemType} // dummy for testing
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("start", OpInitMem, TypeMem, 0, nil),
-			Valu("sb", OpSB, TypeInvalid, 0, nil),
-			Valu("v", OpConstBool, TypeBool, 1, nil),
-			Valu("addr1", OpAddr, ptrType, 0, nil, "sb"),
-			Valu("addr2", OpAddr, ptrType, 0, nil, "sb"),
-			Valu("addr3", OpAddr, ptrType, 0, nil, "sb"),
-			Valu("zero1", OpZero, TypeMem, 1, nil, "addr3", "start"),
-			Valu("store1", OpStore, TypeMem, 1, nil, "addr1", "v", "zero1"),
-			Valu("store2", OpStore, TypeMem, 1, nil, "addr2", "v", "store1"),
-			Valu("store3", OpStore, TypeMem, 1, nil, "addr1", "v", "store2"),
-			Valu("store4", OpStore, TypeMem, 1, nil, "addr3", "v", "store3"),
-			Goto("exit")),
-		Bloc("exit",
-			Exit("store3")))
-
-	CheckFunc(fun.f)
-	dse(fun.f)
-	CheckFunc(fun.f)
-
-	v1 := fun.values["store1"]
-	if v1.Op != OpCopy {
-		t.Errorf("dead store not removed")
-	}
-
-	v2 := fun.values["zero1"]
-	if v2.Op != OpCopy {
-		t.Errorf("dead store (zero) not removed")
-	}
-}
-func TestDeadStorePhi(t *testing.T) {
-	// make sure we don't get into an infinite loop with phi values.
-	c := testConfig(t)
-	ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("start", OpInitMem, TypeMem, 0, nil),
-			Valu("sb", OpSB, TypeInvalid, 0, nil),
-			Valu("v", OpConstBool, TypeBool, 1, nil),
-			Valu("addr", OpAddr, ptrType, 0, nil, "sb"),
-			Goto("loop")),
-		Bloc("loop",
-			Valu("phi", OpPhi, TypeMem, 0, nil, "start", "store"),
-			Valu("store", OpStore, TypeMem, 1, nil, "addr", "v", "phi"),
-			If("v", "loop", "exit")),
-		Bloc("exit",
-			Exit("store")))
-
-	CheckFunc(fun.f)
-	dse(fun.f)
-	CheckFunc(fun.f)
-}
-
-func TestDeadStoreTypes(t *testing.T) {
-	// Make sure a narrow store can't shadow a wider one. We test an even
-	// stronger restriction, that one store can't shadow another unless the
-	// types of the address fields are identical (where identicalness is
-	// decided by the CSE pass).
-	c := testConfig(t)
-	t1 := &TypeImpl{Size_: 8, Ptr: true, Name: "t1"}
-	t2 := &TypeImpl{Size_: 4, Ptr: true, Name: "t2"}
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("start", OpInitMem, TypeMem, 0, nil),
-			Valu("sb", OpSB, TypeInvalid, 0, nil),
-			Valu("v", OpConstBool, TypeBool, 1, nil),
-			Valu("addr1", OpAddr, t1, 0, nil, "sb"),
-			Valu("addr2", OpAddr, t2, 0, nil, "sb"),
-			Valu("store1", OpStore, TypeMem, 1, nil, "addr1", "v", "start"),
-			Valu("store2", OpStore, TypeMem, 1, nil, "addr2", "v", "store1"),
-			Goto("exit")),
-		Bloc("exit",
-			Exit("store2")))
-
-	CheckFunc(fun.f)
-	cse(fun.f)
-	dse(fun.f)
-	CheckFunc(fun.f)
-
-	v := fun.values["store1"]
-	if v.Op == OpCopy {
-		t.Errorf("store %s incorrectly removed", v)
-	}
-}
-
-func TestDeadStoreUnsafe(t *testing.T) {
-	// Make sure a narrow store can't shadow a wider one. The test above
-	// covers the case of two different types, but unsafe pointer casting
-	// can get to a point where the size is changed but type unchanged.
-	c := testConfig(t)
-	ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("start", OpInitMem, TypeMem, 0, nil),
-			Valu("sb", OpSB, TypeInvalid, 0, nil),
-			Valu("v", OpConstBool, TypeBool, 1, nil),
-			Valu("addr1", OpAddr, ptrType, 0, nil, "sb"),
-			Valu("store1", OpStore, TypeMem, 8, nil, "addr1", "v", "start"),  // store 8 bytes
-			Valu("store2", OpStore, TypeMem, 1, nil, "addr1", "v", "store1"), // store 1 byte
-			Goto("exit")),
-		Bloc("exit",
-			Exit("store2")))
-
-	CheckFunc(fun.f)
-	cse(fun.f)
-	dse(fun.f)
-	CheckFunc(fun.f)
-
-	v := fun.values["store1"]
-	if v.Op == OpCopy {
-		t.Errorf("store %s incorrectly removed", v)
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/decompose.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/decompose.go
deleted file mode 100644
index 52d0e25..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/decompose.go
+++ /dev/null
@@ -1,357 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/decompose.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/decompose.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-// decompose converts phi ops on compound builtin types into phi
-// ops on simple types.
-// (The remaining compound ops are decomposed with rewrite rules.)
-func decomposeBuiltIn(f *Func) {
-	for _, b := range f.Blocks {
-		for _, v := range b.Values {
-			if v.Op != OpPhi {
-				continue
-			}
-			decomposeBuiltInPhi(v)
-		}
-	}
-
-	// Split up named values into their components.
-	// NOTE: the component values we are making are dead at this point.
-	// We must do the opt pass before any deadcode elimination or we will
-	// lose the name->value correspondence.
-	var newNames []LocalSlot
-	for _, name := range f.Names {
-		t := name.Type
-		switch {
-		case t.IsInteger() && t.Size() == 8 && f.Config.IntSize == 4:
-			var elemType Type
-			if t.IsSigned() {
-				elemType = f.Config.fe.TypeInt32()
-			} else {
-				elemType = f.Config.fe.TypeUInt32()
-			}
-			hiName, loName := f.Config.fe.SplitInt64(name)
-			newNames = append(newNames, hiName, loName)
-			for _, v := range f.NamedValues[name] {
-				hi := v.Block.NewValue1(v.Line, OpInt64Hi, elemType, v)
-				lo := v.Block.NewValue1(v.Line, OpInt64Lo, f.Config.fe.TypeUInt32(), v)
-				f.NamedValues[hiName] = append(f.NamedValues[hiName], hi)
-				f.NamedValues[loName] = append(f.NamedValues[loName], lo)
-			}
-			delete(f.NamedValues, name)
-		case t.IsComplex():
-			var elemType Type
-			if t.Size() == 16 {
-				elemType = f.Config.fe.TypeFloat64()
-			} else {
-				elemType = f.Config.fe.TypeFloat32()
-			}
-			rName, iName := f.Config.fe.SplitComplex(name)
-			newNames = append(newNames, rName, iName)
-			for _, v := range f.NamedValues[name] {
-				r := v.Block.NewValue1(v.Line, OpComplexReal, elemType, v)
-				i := v.Block.NewValue1(v.Line, OpComplexImag, elemType, v)
-				f.NamedValues[rName] = append(f.NamedValues[rName], r)
-				f.NamedValues[iName] = append(f.NamedValues[iName], i)
-			}
-			delete(f.NamedValues, name)
-		case t.IsString():
-			ptrType := f.Config.fe.TypeBytePtr()
-			lenType := f.Config.fe.TypeInt()
-			ptrName, lenName := f.Config.fe.SplitString(name)
-			newNames = append(newNames, ptrName, lenName)
-			for _, v := range f.NamedValues[name] {
-				ptr := v.Block.NewValue1(v.Line, OpStringPtr, ptrType, v)
-				len := v.Block.NewValue1(v.Line, OpStringLen, lenType, v)
-				f.NamedValues[ptrName] = append(f.NamedValues[ptrName], ptr)
-				f.NamedValues[lenName] = append(f.NamedValues[lenName], len)
-			}
-			delete(f.NamedValues, name)
-		case t.IsSlice():
-			ptrType := f.Config.fe.TypeBytePtr()
-			lenType := f.Config.fe.TypeInt()
-			ptrName, lenName, capName := f.Config.fe.SplitSlice(name)
-			newNames = append(newNames, ptrName, lenName, capName)
-			for _, v := range f.NamedValues[name] {
-				ptr := v.Block.NewValue1(v.Line, OpSlicePtr, ptrType, v)
-				len := v.Block.NewValue1(v.Line, OpSliceLen, lenType, v)
-				cap := v.Block.NewValue1(v.Line, OpSliceCap, lenType, v)
-				f.NamedValues[ptrName] = append(f.NamedValues[ptrName], ptr)
-				f.NamedValues[lenName] = append(f.NamedValues[lenName], len)
-				f.NamedValues[capName] = append(f.NamedValues[capName], cap)
-			}
-			delete(f.NamedValues, name)
-		case t.IsInterface():
-			ptrType := f.Config.fe.TypeBytePtr()
-			typeName, dataName := f.Config.fe.SplitInterface(name)
-			newNames = append(newNames, typeName, dataName)
-			for _, v := range f.NamedValues[name] {
-				typ := v.Block.NewValue1(v.Line, OpITab, ptrType, v)
-				data := v.Block.NewValue1(v.Line, OpIData, ptrType, v)
-				f.NamedValues[typeName] = append(f.NamedValues[typeName], typ)
-				f.NamedValues[dataName] = append(f.NamedValues[dataName], data)
-			}
-			delete(f.NamedValues, name)
-		case t.IsFloat():
-			// floats are never decomposed, even ones bigger than IntSize
-		case t.Size() > f.Config.IntSize:
-			f.Fatalf("undecomposed named type %v %v", name, t)
-		default:
-			newNames = append(newNames, name)
-		}
-	}
-	f.Names = newNames
-}
-
-func decomposeBuiltInPhi(v *Value) {
-	switch {
-	case v.Type.IsInteger() && v.Type.Size() == 8 && v.Block.Func.Config.IntSize == 4:
-		if v.Block.Func.Config.arch == "amd64p32" {
-			// Even though ints are 32 bits, we have 64-bit ops.
-			break
-		}
-		decomposeInt64Phi(v)
-	case v.Type.IsComplex():
-		decomposeComplexPhi(v)
-	case v.Type.IsString():
-		decomposeStringPhi(v)
-	case v.Type.IsSlice():
-		decomposeSlicePhi(v)
-	case v.Type.IsInterface():
-		decomposeInterfacePhi(v)
-	case v.Type.IsFloat():
-		// floats are never decomposed, even ones bigger than IntSize
-	case v.Type.Size() > v.Block.Func.Config.IntSize:
-		v.Fatalf("undecomposed type %s", v.Type)
-	}
-}
-
-func decomposeStringPhi(v *Value) {
-	fe := v.Block.Func.Config.fe
-	ptrType := fe.TypeBytePtr()
-	lenType := fe.TypeInt()
-
-	ptr := v.Block.NewValue0(v.Line, OpPhi, ptrType)
-	len := v.Block.NewValue0(v.Line, OpPhi, lenType)
-	for _, a := range v.Args {
-		ptr.AddArg(a.Block.NewValue1(v.Line, OpStringPtr, ptrType, a))
-		len.AddArg(a.Block.NewValue1(v.Line, OpStringLen, lenType, a))
-	}
-	v.reset(OpStringMake)
-	v.AddArg(ptr)
-	v.AddArg(len)
-}
-
-func decomposeSlicePhi(v *Value) {
-	fe := v.Block.Func.Config.fe
-	ptrType := fe.TypeBytePtr()
-	lenType := fe.TypeInt()
-
-	ptr := v.Block.NewValue0(v.Line, OpPhi, ptrType)
-	len := v.Block.NewValue0(v.Line, OpPhi, lenType)
-	cap := v.Block.NewValue0(v.Line, OpPhi, lenType)
-	for _, a := range v.Args {
-		ptr.AddArg(a.Block.NewValue1(v.Line, OpSlicePtr, ptrType, a))
-		len.AddArg(a.Block.NewValue1(v.Line, OpSliceLen, lenType, a))
-		cap.AddArg(a.Block.NewValue1(v.Line, OpSliceCap, lenType, a))
-	}
-	v.reset(OpSliceMake)
-	v.AddArg(ptr)
-	v.AddArg(len)
-	v.AddArg(cap)
-}
-
-func decomposeInt64Phi(v *Value) {
-	fe := v.Block.Func.Config.fe
-	var partType Type
-	if v.Type.IsSigned() {
-		partType = fe.TypeInt32()
-	} else {
-		partType = fe.TypeUInt32()
-	}
-
-	hi := v.Block.NewValue0(v.Line, OpPhi, partType)
-	lo := v.Block.NewValue0(v.Line, OpPhi, fe.TypeUInt32())
-	for _, a := range v.Args {
-		hi.AddArg(a.Block.NewValue1(v.Line, OpInt64Hi, partType, a))
-		lo.AddArg(a.Block.NewValue1(v.Line, OpInt64Lo, fe.TypeUInt32(), a))
-	}
-	v.reset(OpInt64Make)
-	v.AddArg(hi)
-	v.AddArg(lo)
-}
-
-func decomposeComplexPhi(v *Value) {
-	fe := v.Block.Func.Config.fe
-	var partType Type
-	switch z := v.Type.Size(); z {
-	case 8:
-		partType = fe.TypeFloat32()
-	case 16:
-		partType = fe.TypeFloat64()
-	default:
-		v.Fatalf("decomposeComplexPhi: bad complex size %d", z)
-	}
-
-	real := v.Block.NewValue0(v.Line, OpPhi, partType)
-	imag := v.Block.NewValue0(v.Line, OpPhi, partType)
-	for _, a := range v.Args {
-		real.AddArg(a.Block.NewValue1(v.Line, OpComplexReal, partType, a))
-		imag.AddArg(a.Block.NewValue1(v.Line, OpComplexImag, partType, a))
-	}
-	v.reset(OpComplexMake)
-	v.AddArg(real)
-	v.AddArg(imag)
-}
-
-func decomposeInterfacePhi(v *Value) {
-	ptrType := v.Block.Func.Config.fe.TypeBytePtr()
-
-	itab := v.Block.NewValue0(v.Line, OpPhi, ptrType)
-	data := v.Block.NewValue0(v.Line, OpPhi, ptrType)
-	for _, a := range v.Args {
-		itab.AddArg(a.Block.NewValue1(v.Line, OpITab, ptrType, a))
-		data.AddArg(a.Block.NewValue1(v.Line, OpIData, ptrType, a))
-	}
-	v.reset(OpIMake)
-	v.AddArg(itab)
-	v.AddArg(data)
-}
-
-func decomposeUser(f *Func) {
-	for _, b := range f.Blocks {
-		for _, v := range b.Values {
-			if v.Op != OpPhi {
-				continue
-			}
-			decomposeUserPhi(v)
-		}
-	}
-	// Split up named values into their components.
-	// NOTE: the component values we are making are dead at this point.
-	// We must do the opt pass before any deadcode elimination or we will
-	// lose the name->value correspondence.
-	i := 0
-	var fnames []LocalSlot
-	var newNames []LocalSlot
-	for _, name := range f.Names {
-		t := name.Type
-		switch {
-		case t.IsStruct():
-			n := t.NumFields()
-			fnames = fnames[:0]
-			for i := 0; i < n; i++ {
-				fnames = append(fnames, f.Config.fe.SplitStruct(name, i))
-			}
-			for _, v := range f.NamedValues[name] {
-				for i := 0; i < n; i++ {
-					x := v.Block.NewValue1I(v.Line, OpStructSelect, t.FieldType(i), int64(i), v)
-					f.NamedValues[fnames[i]] = append(f.NamedValues[fnames[i]], x)
-				}
-			}
-			delete(f.NamedValues, name)
-			newNames = append(newNames, fnames...)
-		case t.IsArray():
-			if t.NumElem() == 0 {
-				// TODO(khr): Not sure what to do here.  Probably nothing.
-				// Names for empty arrays aren't important.
-				break
-			}
-			if t.NumElem() != 1 {
-				f.Fatalf("array not of size 1")
-			}
-			elemName := f.Config.fe.SplitArray(name)
-			for _, v := range f.NamedValues[name] {
-				e := v.Block.NewValue1I(v.Line, OpArraySelect, t.ElemType(), 0, v)
-				f.NamedValues[elemName] = append(f.NamedValues[elemName], e)
-			}
-
-		default:
-			f.Names[i] = name
-			i++
-		}
-	}
-	f.Names = f.Names[:i]
-	f.Names = append(f.Names, newNames...)
-}
-
-func decomposeUserPhi(v *Value) {
-	switch {
-	case v.Type.IsStruct():
-		decomposeStructPhi(v)
-	case v.Type.IsArray():
-		decomposeArrayPhi(v)
-	}
-}
-
-// decomposeStructPhi replaces phi-of-struct with structmake(phi-for-each-field),
-// and then recursively decomposes the phis for each field.
-func decomposeStructPhi(v *Value) {
-	t := v.Type
-	n := t.NumFields()
-	var fields [MaxStruct]*Value
-	for i := 0; i < n; i++ {
-		fields[i] = v.Block.NewValue0(v.Line, OpPhi, t.FieldType(i))
-	}
-	for _, a := range v.Args {
-		for i := 0; i < n; i++ {
-			fields[i].AddArg(a.Block.NewValue1I(v.Line, OpStructSelect, t.FieldType(i), int64(i), a))
-		}
-	}
-	v.reset(StructMakeOp(n))
-	v.AddArgs(fields[:n]...)
-
-	// Recursively decompose phis for each field.
-	for _, f := range fields[:n] {
-		decomposeUserPhi(f)
-	}
-}
-
-// decomposeArrayPhi replaces phi-of-array with arraymake(phi-of-array-element),
-// and then recursively decomposes the element phi.
-func decomposeArrayPhi(v *Value) {
-	t := v.Type
-	if t.NumElem() == 0 {
-		v.reset(OpArrayMake0)
-		return
-	}
-	if t.NumElem() != 1 {
-		v.Fatalf("SSAable array must have no more than 1 element")
-	}
-	elem := v.Block.NewValue0(v.Line, OpPhi, t.ElemType())
-	for _, a := range v.Args {
-		elem.AddArg(a.Block.NewValue1I(v.Line, OpArraySelect, t.ElemType(), 0, a))
-	}
-	v.reset(OpArrayMake1)
-	v.AddArg(elem)
-
-	// Recursively decompose elem phi.
-	decomposeUserPhi(elem)
-}
-
-// MaxStruct is the maximum number of fields a struct
-// can have and still be SSAable.
-const MaxStruct = 4
-
-// StructMakeOp returns the opcode to construct a struct with the
-// given number of fields.
-func StructMakeOp(nf int) Op {
-	switch nf {
-	case 0:
-		return OpStructMake0
-	case 1:
-		return OpStructMake1
-	case 2:
-		return OpStructMake2
-	case 3:
-		return OpStructMake3
-	case 4:
-		return OpStructMake4
-	}
-	panic("too many fields in an SSAable struct")
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/dom.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/dom.go
deleted file mode 100644
index bf1c998..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/dom.go
+++ /dev/null
@@ -1,311 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/dom.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/dom.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-// mark values
-type markKind uint8
-
-const (
-	notFound    markKind = 0 // block has not been discovered yet
-	notExplored markKind = 1 // discovered and in queue, outedges not processed yet
-	explored    markKind = 2 // discovered and in queue, outedges processed
-	done        markKind = 3 // all done, in output ordering
-)
-
-// This file contains code to compute the dominator tree
-// of a control-flow graph.
-
-// postorder computes a postorder traversal ordering for the
-// basic blocks in f. Unreachable blocks will not appear.
-func postorder(f *Func) []*Block {
-	return postorderWithNumbering(f, []int32{})
-}
-func postorderWithNumbering(f *Func, ponums []int32) []*Block {
-	mark := make([]markKind, f.NumBlocks())
-
-	// result ordering
-	var order []*Block
-
-	// stack of blocks
-	var s []*Block
-	s = append(s, f.Entry)
-	mark[f.Entry.ID] = notExplored
-	for len(s) > 0 {
-		b := s[len(s)-1]
-		switch mark[b.ID] {
-		case explored:
-			// Children have all been visited. Pop & output block.
-			s = s[:len(s)-1]
-			mark[b.ID] = done
-			if len(ponums) > 0 {
-				ponums[b.ID] = int32(len(order))
-			}
-			order = append(order, b)
-		case notExplored:
-			// Children have not been visited yet. Mark as explored
-			// and queue any children we haven't seen yet.
-			mark[b.ID] = explored
-			for _, e := range b.Succs {
-				c := e.b
-				if mark[c.ID] == notFound {
-					mark[c.ID] = notExplored
-					s = append(s, c)
-				}
-			}
-		default:
-			b.Fatalf("bad stack state %v %d", b, mark[b.ID])
-		}
-	}
-	return order
-}
-
-type linkedBlocks func(*Block) []Edge
-
-const nscratchslices = 7
-
-// experimentally, functions with 512 or fewer blocks account
-// for 75% of memory (size) allocation for dominator computation
-// in make.bash.
-const minscratchblocks = 512
-
-func (cfg *Config) scratchBlocksForDom(maxBlockID int) (a, b, c, d, e, f, g []ID) {
-	tot := maxBlockID * nscratchslices
-	scratch := cfg.domblockstore
-	if len(scratch) < tot {
-		// req = min(1.5*tot, nscratchslices*minscratchblocks)
-		// 50% padding allows for graph growth in later phases.
-		req := (tot * 3) >> 1
-		if req < nscratchslices*minscratchblocks {
-			req = nscratchslices * minscratchblocks
-		}
-		scratch = make([]ID, req)
-		cfg.domblockstore = scratch
-	} else {
-		// Clear as much of scratch as we will (re)use
-		scratch = scratch[0:tot]
-		for i := range scratch {
-			scratch[i] = 0
-		}
-	}
-
-	a = scratch[0*maxBlockID : 1*maxBlockID]
-	b = scratch[1*maxBlockID : 2*maxBlockID]
-	c = scratch[2*maxBlockID : 3*maxBlockID]
-	d = scratch[3*maxBlockID : 4*maxBlockID]
-	e = scratch[4*maxBlockID : 5*maxBlockID]
-	f = scratch[5*maxBlockID : 6*maxBlockID]
-	g = scratch[6*maxBlockID : 7*maxBlockID]
-
-	return
-}
-
-func dominators(f *Func) []*Block {
-	preds := func(b *Block) []Edge { return b.Preds }
-	succs := func(b *Block) []Edge { return b.Succs }
-
-	//TODO: benchmark and try to find criteria for swapping between
-	// dominatorsSimple and dominatorsLT
-	return f.dominatorsLTOrig(f.Entry, preds, succs)
-}
-
-// dominatorsLTOrig runs Lengauer-Tarjan to compute a dominator tree starting at
-// entry and using predFn/succFn to find predecessors/successors to allow
-// computing both dominator and post-dominator trees.
-func (f *Func) dominatorsLTOrig(entry *Block, predFn linkedBlocks, succFn linkedBlocks) []*Block {
-	// Adapted directly from the original TOPLAS article's "simple" algorithm
-
-	maxBlockID := entry.Func.NumBlocks()
-	semi, vertex, label, parent, ancestor, bucketHead, bucketLink := f.Config.scratchBlocksForDom(maxBlockID)
-
-	// This version uses integers for most of the computation,
-	// to make the work arrays smaller and pointer-free.
-	// fromID translates from ID to *Block where that is needed.
-	fromID := make([]*Block, maxBlockID)
-	for _, v := range f.Blocks {
-		fromID[v.ID] = v
-	}
-	idom := make([]*Block, maxBlockID)
-
-	// Step 1. Carry out a depth first search of the problem graph. Number
-	// the vertices from 1 to n as they are reached during the search.
-	n := f.dfsOrig(entry, succFn, semi, vertex, label, parent)
-
-	for i := n; i >= 2; i-- {
-		w := vertex[i]
-
-		// step2 in TOPLAS paper
-		for _, e := range predFn(fromID[w]) {
-			v := e.b
-			if semi[v.ID] == 0 {
-				// skip unreachable predecessor
-				// not in original, but we're using existing pred instead of building one.
-				continue
-			}
-			u := evalOrig(v.ID, ancestor, semi, label)
-			if semi[u] < semi[w] {
-				semi[w] = semi[u]
-			}
-		}
-
-		// add w to bucket[vertex[semi[w]]]
-		// implement bucket as a linked list implemented
-		// in a pair of arrays.
-		vsw := vertex[semi[w]]
-		bucketLink[w] = bucketHead[vsw]
-		bucketHead[vsw] = w
-
-		linkOrig(parent[w], w, ancestor)
-
-		// step3 in TOPLAS paper
-		for v := bucketHead[parent[w]]; v != 0; v = bucketLink[v] {
-			u := evalOrig(v, ancestor, semi, label)
-			if semi[u] < semi[v] {
-				idom[v] = fromID[u]
-			} else {
-				idom[v] = fromID[parent[w]]
-			}
-		}
-	}
-	// step 4 in toplas paper
-	for i := ID(2); i <= n; i++ {
-		w := vertex[i]
-		if idom[w].ID != vertex[semi[w]] {
-			idom[w] = idom[idom[w].ID]
-		}
-	}
-
-	return idom
-}
-
-// dfs performs a depth first search over the blocks starting at entry block
-// (in arbitrary order).  This is a de-recursed version of dfs from the
-// original Tarjan-Lengauer TOPLAS article.  It's important to return the
-// same values for parent as the original algorithm.
-func (f *Func) dfsOrig(entry *Block, succFn linkedBlocks, semi, vertex, label, parent []ID) ID {
-	n := ID(0)
-	s := make([]*Block, 0, 256)
-	s = append(s, entry)
-
-	for len(s) > 0 {
-		v := s[len(s)-1]
-		s = s[:len(s)-1]
-		// recursing on v
-
-		if semi[v.ID] != 0 {
-			continue // already visited
-		}
-		n++
-		semi[v.ID] = n
-		vertex[n] = v.ID
-		label[v.ID] = v.ID
-		// ancestor[v] already zero
-		for _, e := range succFn(v) {
-			w := e.b
-			// if it has a dfnum, we've already visited it
-			if semi[w.ID] == 0 {
-				// yes, w can be pushed multiple times.
-				s = append(s, w)
-				parent[w.ID] = v.ID // keep overwriting this till it is visited.
-			}
-		}
-	}
-	return n
-}
-
-// compressOrig is the "simple" compress function from LT paper
-func compressOrig(v ID, ancestor, semi, label []ID) {
-	if ancestor[ancestor[v]] != 0 {
-		compressOrig(ancestor[v], ancestor, semi, label)
-		if semi[label[ancestor[v]]] < semi[label[v]] {
-			label[v] = label[ancestor[v]]
-		}
-		ancestor[v] = ancestor[ancestor[v]]
-	}
-}
-
-// evalOrig is the "simple" eval function from LT paper
-func evalOrig(v ID, ancestor, semi, label []ID) ID {
-	if ancestor[v] == 0 {
-		return v
-	}
-	compressOrig(v, ancestor, semi, label)
-	return label[v]
-}
-
-func linkOrig(v, w ID, ancestor []ID) {
-	ancestor[w] = v
-}
-
-// dominators computes the dominator tree for f. It returns a slice
-// which maps block ID to the immediate dominator of that block.
-// Unreachable blocks map to nil. The entry block maps to nil.
-func dominatorsSimple(f *Func) []*Block {
-	// A simple algorithm for now
-	// Cooper, Harvey, Kennedy
-	idom := make([]*Block, f.NumBlocks())
-
-	// Compute postorder walk
-	post := f.postorder()
-
-	// Make map from block id to order index (for intersect call)
-	postnum := make([]int, f.NumBlocks())
-	for i, b := range post {
-		postnum[b.ID] = i
-	}
-
-	// Make the entry block a self-loop
-	idom[f.Entry.ID] = f.Entry
-	if postnum[f.Entry.ID] != len(post)-1 {
-		f.Fatalf("entry block %v not last in postorder", f.Entry)
-	}
-
-	// Compute relaxation of idom entries
-	for {
-		changed := false
-
-		for i := len(post) - 2; i >= 0; i-- {
-			b := post[i]
-			var d *Block
-			for _, e := range b.Preds {
-				p := e.b
-				if idom[p.ID] == nil {
-					continue
-				}
-				if d == nil {
-					d = p
-					continue
-				}
-				d = intersect(d, p, postnum, idom)
-			}
-			if d != idom[b.ID] {
-				idom[b.ID] = d
-				changed = true
-			}
-		}
-		if !changed {
-			break
-		}
-	}
-	// Set idom of entry block to nil instead of itself.
-	idom[f.Entry.ID] = nil
-	return idom
-}
-
-// intersect finds the closest dominator of both b and c.
-// It requires a postorder numbering of all the blocks.
-func intersect(b, c *Block, postnum []int, idom []*Block) *Block {
-	// TODO: This loop is O(n^2). See BenchmarkNilCheckDeep*.
-	for b != c {
-		if postnum[b.ID] < postnum[c.ID] {
-			b = idom[b.ID]
-		} else {
-			c = idom[c.ID]
-		}
-	}
-	return b
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/dom_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/dom_test.go
deleted file mode 100644
index 320ec0e..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/dom_test.go
+++ /dev/null
@@ -1,575 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/dom_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/dom_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import "testing"
-
-func BenchmarkDominatorsLinear(b *testing.B)     { benchmarkDominators(b, 10000, genLinear) }
-func BenchmarkDominatorsFwdBack(b *testing.B)    { benchmarkDominators(b, 10000, genFwdBack) }
-func BenchmarkDominatorsManyPred(b *testing.B)   { benchmarkDominators(b, 10000, genManyPred) }
-func BenchmarkDominatorsMaxPred(b *testing.B)    { benchmarkDominators(b, 10000, genMaxPred) }
-func BenchmarkDominatorsMaxPredVal(b *testing.B) { benchmarkDominators(b, 10000, genMaxPredValue) }
-
-type blockGen func(size int) []bloc
-
-// genLinear creates an array of blocks that succeed one another
-// b_n -> [b_n+1].
-func genLinear(size int) []bloc {
-	var blocs []bloc
-	blocs = append(blocs,
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Goto(blockn(0)),
-		),
-	)
-	for i := 0; i < size; i++ {
-		blocs = append(blocs, Bloc(blockn(i),
-			Goto(blockn(i+1))))
-	}
-
-	blocs = append(blocs,
-		Bloc(blockn(size), Goto("exit")),
-		Bloc("exit", Exit("mem")),
-	)
-
-	return blocs
-}
-
-// genLinear creates an array of blocks that alternate between
-// b_n -> [b_n+1], b_n -> [b_n+1, b_n-1] , b_n -> [b_n+1, b_n+2]
-func genFwdBack(size int) []bloc {
-	var blocs []bloc
-	blocs = append(blocs,
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Valu("p", OpConstBool, TypeBool, 1, nil),
-			Goto(blockn(0)),
-		),
-	)
-	for i := 0; i < size; i++ {
-		switch i % 2 {
-		case 0:
-			blocs = append(blocs, Bloc(blockn(i),
-				If("p", blockn(i+1), blockn(i+2))))
-		case 1:
-			blocs = append(blocs, Bloc(blockn(i),
-				If("p", blockn(i+1), blockn(i-1))))
-		}
-	}
-
-	blocs = append(blocs,
-		Bloc(blockn(size), Goto("exit")),
-		Bloc("exit", Exit("mem")),
-	)
-
-	return blocs
-}
-
-// genManyPred creates an array of blocks where 1/3rd have a successor of the
-// first block, 1/3rd the last block, and the remaining third are plain.
-func genManyPred(size int) []bloc {
-	var blocs []bloc
-	blocs = append(blocs,
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Valu("p", OpConstBool, TypeBool, 1, nil),
-			Goto(blockn(0)),
-		),
-	)
-
-	// We want predecessor lists to be long, so 2/3rds of the blocks have a
-	// successor of the first or last block.
-	for i := 0; i < size; i++ {
-		switch i % 3 {
-		case 0:
-			blocs = append(blocs, Bloc(blockn(i),
-				Valu("a", OpConstBool, TypeBool, 1, nil),
-				Goto(blockn(i+1))))
-		case 1:
-			blocs = append(blocs, Bloc(blockn(i),
-				Valu("a", OpConstBool, TypeBool, 1, nil),
-				If("p", blockn(i+1), blockn(0))))
-		case 2:
-			blocs = append(blocs, Bloc(blockn(i),
-				Valu("a", OpConstBool, TypeBool, 1, nil),
-				If("p", blockn(i+1), blockn(size))))
-		}
-	}
-
-	blocs = append(blocs,
-		Bloc(blockn(size), Goto("exit")),
-		Bloc("exit", Exit("mem")),
-	)
-
-	return blocs
-}
-
-// genMaxPred maximizes the size of the 'exit' predecessor list.
-func genMaxPred(size int) []bloc {
-	var blocs []bloc
-	blocs = append(blocs,
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Valu("p", OpConstBool, TypeBool, 1, nil),
-			Goto(blockn(0)),
-		),
-	)
-
-	for i := 0; i < size; i++ {
-		blocs = append(blocs, Bloc(blockn(i),
-			If("p", blockn(i+1), "exit")))
-	}
-
-	blocs = append(blocs,
-		Bloc(blockn(size), Goto("exit")),
-		Bloc("exit", Exit("mem")),
-	)
-
-	return blocs
-}
-
-// genMaxPredValue is identical to genMaxPred but contains an
-// additional value.
-func genMaxPredValue(size int) []bloc {
-	var blocs []bloc
-	blocs = append(blocs,
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Valu("p", OpConstBool, TypeBool, 1, nil),
-			Goto(blockn(0)),
-		),
-	)
-
-	for i := 0; i < size; i++ {
-		blocs = append(blocs, Bloc(blockn(i),
-			Valu("a", OpConstBool, TypeBool, 1, nil),
-			If("p", blockn(i+1), "exit")))
-	}
-
-	blocs = append(blocs,
-		Bloc(blockn(size), Goto("exit")),
-		Bloc("exit", Exit("mem")),
-	)
-
-	return blocs
-}
-
-// sink for benchmark
-var domBenchRes []*Block
-
-func benchmarkDominators(b *testing.B, size int, bg blockGen) {
-	c := NewConfig("amd64", DummyFrontend{b}, nil, true)
-	fun := Fun(c, "entry", bg(size)...)
-
-	CheckFunc(fun.f)
-	b.SetBytes(int64(size))
-	b.ResetTimer()
-	for i := 0; i < b.N; i++ {
-		domBenchRes = dominators(fun.f)
-	}
-}
-
-type domFunc func(f *Func) []*Block
-
-// verifyDominators verifies that the dominators of fut (function under test)
-// as determined by domFn, match the map node->dominator
-func verifyDominators(t *testing.T, fut fun, domFn domFunc, doms map[string]string) {
-	blockNames := map[*Block]string{}
-	for n, b := range fut.blocks {
-		blockNames[b] = n
-	}
-
-	calcDom := domFn(fut.f)
-
-	for n, d := range doms {
-		nblk, ok := fut.blocks[n]
-		if !ok {
-			t.Errorf("invalid block name %s", n)
-		}
-		dblk, ok := fut.blocks[d]
-		if !ok {
-			t.Errorf("invalid block name %s", d)
-		}
-
-		domNode := calcDom[nblk.ID]
-		switch {
-		case calcDom[nblk.ID] == dblk:
-			calcDom[nblk.ID] = nil
-			continue
-		case calcDom[nblk.ID] != dblk:
-			t.Errorf("expected %s as dominator of %s, found %s", d, n, blockNames[domNode])
-		default:
-			t.Fatal("unexpected dominator condition")
-		}
-	}
-
-	for id, d := range calcDom {
-		// If nil, we've already verified it
-		if d == nil {
-			continue
-		}
-		for _, b := range fut.blocks {
-			if int(b.ID) == id {
-				t.Errorf("unexpected dominator of %s for %s", blockNames[d], blockNames[b])
-			}
-		}
-	}
-
-}
-
-func TestDominatorsSingleBlock(t *testing.T) {
-	c := testConfig(t)
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Exit("mem")))
-
-	doms := map[string]string{}
-
-	CheckFunc(fun.f)
-	verifyDominators(t, fun, dominators, doms)
-	verifyDominators(t, fun, dominatorsSimple, doms)
-
-}
-
-func TestDominatorsSimple(t *testing.T) {
-	c := testConfig(t)
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Goto("a")),
-		Bloc("a",
-			Goto("b")),
-		Bloc("b",
-			Goto("c")),
-		Bloc("c",
-			Goto("exit")),
-		Bloc("exit",
-			Exit("mem")))
-
-	doms := map[string]string{
-		"a":    "entry",
-		"b":    "a",
-		"c":    "b",
-		"exit": "c",
-	}
-
-	CheckFunc(fun.f)
-	verifyDominators(t, fun, dominators, doms)
-	verifyDominators(t, fun, dominatorsSimple, doms)
-
-}
-
-func TestDominatorsMultPredFwd(t *testing.T) {
-	c := testConfig(t)
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Valu("p", OpConstBool, TypeBool, 1, nil),
-			If("p", "a", "c")),
-		Bloc("a",
-			If("p", "b", "c")),
-		Bloc("b",
-			Goto("c")),
-		Bloc("c",
-			Goto("exit")),
-		Bloc("exit",
-			Exit("mem")))
-
-	doms := map[string]string{
-		"a":    "entry",
-		"b":    "a",
-		"c":    "entry",
-		"exit": "c",
-	}
-
-	CheckFunc(fun.f)
-	verifyDominators(t, fun, dominators, doms)
-	verifyDominators(t, fun, dominatorsSimple, doms)
-}
-
-func TestDominatorsDeadCode(t *testing.T) {
-	c := testConfig(t)
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Valu("p", OpConstBool, TypeBool, 0, nil),
-			If("p", "b3", "b5")),
-		Bloc("b2", Exit("mem")),
-		Bloc("b3", Goto("b2")),
-		Bloc("b4", Goto("b2")),
-		Bloc("b5", Goto("b2")))
-
-	doms := map[string]string{
-		"b2": "entry",
-		"b3": "entry",
-		"b5": "entry",
-	}
-
-	CheckFunc(fun.f)
-	verifyDominators(t, fun, dominators, doms)
-	verifyDominators(t, fun, dominatorsSimple, doms)
-}
-
-func TestDominatorsMultPredRev(t *testing.T) {
-	c := testConfig(t)
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Goto("first")),
-		Bloc("first",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Valu("p", OpConstBool, TypeBool, 1, nil),
-			Goto("a")),
-		Bloc("a",
-			If("p", "b", "first")),
-		Bloc("b",
-			Goto("c")),
-		Bloc("c",
-			If("p", "exit", "b")),
-		Bloc("exit",
-			Exit("mem")))
-
-	doms := map[string]string{
-		"first": "entry",
-		"a":     "first",
-		"b":     "a",
-		"c":     "b",
-		"exit":  "c",
-	}
-
-	CheckFunc(fun.f)
-	verifyDominators(t, fun, dominators, doms)
-	verifyDominators(t, fun, dominatorsSimple, doms)
-}
-
-func TestDominatorsMultPred(t *testing.T) {
-	c := testConfig(t)
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Valu("p", OpConstBool, TypeBool, 1, nil),
-			If("p", "a", "c")),
-		Bloc("a",
-			If("p", "b", "c")),
-		Bloc("b",
-			Goto("c")),
-		Bloc("c",
-			If("p", "b", "exit")),
-		Bloc("exit",
-			Exit("mem")))
-
-	doms := map[string]string{
-		"a":    "entry",
-		"b":    "entry",
-		"c":    "entry",
-		"exit": "c",
-	}
-
-	CheckFunc(fun.f)
-	verifyDominators(t, fun, dominators, doms)
-	verifyDominators(t, fun, dominatorsSimple, doms)
-}
-
-func TestInfiniteLoop(t *testing.T) {
-	c := testConfig(t)
-	// note lack of an exit block
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Valu("p", OpConstBool, TypeBool, 1, nil),
-			Goto("a")),
-		Bloc("a",
-			Goto("b")),
-		Bloc("b",
-			Goto("a")))
-
-	CheckFunc(fun.f)
-	doms := map[string]string{"a": "entry",
-		"b": "a"}
-	verifyDominators(t, fun, dominators, doms)
-}
-
-func TestDomTricky(t *testing.T) {
-	doms := map[string]string{
-		"4":  "1",
-		"2":  "4",
-		"5":  "4",
-		"11": "4",
-		"15": "4", // the incorrect answer is "5"
-		"10": "15",
-		"19": "15",
-	}
-
-	if4 := [2]string{"2", "5"}
-	if5 := [2]string{"15", "11"}
-	if15 := [2]string{"19", "10"}
-
-	for i := 0; i < 8; i++ {
-		a := 1 & i
-		b := 1 & i >> 1
-		c := 1 & i >> 2
-
-		fun := Fun(testConfig(t), "1",
-			Bloc("1",
-				Valu("mem", OpInitMem, TypeMem, 0, nil),
-				Valu("p", OpConstBool, TypeBool, 1, nil),
-				Goto("4")),
-			Bloc("2",
-				Goto("11")),
-			Bloc("4",
-				If("p", if4[a], if4[1-a])), // 2, 5
-			Bloc("5",
-				If("p", if5[b], if5[1-b])), //15, 11
-			Bloc("10",
-				Exit("mem")),
-			Bloc("11",
-				Goto("15")),
-			Bloc("15",
-				If("p", if15[c], if15[1-c])), //19, 10
-			Bloc("19",
-				Goto("10")))
-		CheckFunc(fun.f)
-		verifyDominators(t, fun, dominators, doms)
-		verifyDominators(t, fun, dominatorsSimple, doms)
-	}
-}
-
-// generateDominatorMap uses dominatorsSimple to obtain a
-// reference dominator tree for testing faster algorithms.
-func generateDominatorMap(fut fun) map[string]string {
-	blockNames := map[*Block]string{}
-	for n, b := range fut.blocks {
-		blockNames[b] = n
-	}
-	referenceDom := dominatorsSimple(fut.f)
-	doms := make(map[string]string)
-	for _, b := range fut.f.Blocks {
-		if d := referenceDom[b.ID]; d != nil {
-			doms[blockNames[b]] = blockNames[d]
-		}
-	}
-	return doms
-}
-
-func TestDominatorsPostTricky(t *testing.T) {
-	c := testConfig(t)
-	fun := Fun(c, "b1",
-		Bloc("b1",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Valu("p", OpConstBool, TypeBool, 1, nil),
-			If("p", "b3", "b2")),
-		Bloc("b3",
-			If("p", "b5", "b6")),
-		Bloc("b5",
-			Goto("b7")),
-		Bloc("b7",
-			If("p", "b8", "b11")),
-		Bloc("b8",
-			Goto("b13")),
-		Bloc("b13",
-			If("p", "b14", "b15")),
-		Bloc("b14",
-			Goto("b10")),
-		Bloc("b15",
-			Goto("b16")),
-		Bloc("b16",
-			Goto("b9")),
-		Bloc("b9",
-			Goto("b7")),
-		Bloc("b11",
-			Goto("b12")),
-		Bloc("b12",
-			If("p", "b10", "b8")),
-		Bloc("b10",
-			Goto("b6")),
-		Bloc("b6",
-			Goto("b17")),
-		Bloc("b17",
-			Goto("b18")),
-		Bloc("b18",
-			If("p", "b22", "b19")),
-		Bloc("b22",
-			Goto("b23")),
-		Bloc("b23",
-			If("p", "b21", "b19")),
-		Bloc("b19",
-			If("p", "b24", "b25")),
-		Bloc("b24",
-			Goto("b26")),
-		Bloc("b26",
-			Goto("b25")),
-		Bloc("b25",
-			If("p", "b27", "b29")),
-		Bloc("b27",
-			Goto("b30")),
-		Bloc("b30",
-			Goto("b28")),
-		Bloc("b29",
-			Goto("b31")),
-		Bloc("b31",
-			Goto("b28")),
-		Bloc("b28",
-			If("p", "b32", "b33")),
-		Bloc("b32",
-			Goto("b21")),
-		Bloc("b21",
-			Goto("b47")),
-		Bloc("b47",
-			If("p", "b45", "b46")),
-		Bloc("b45",
-			Goto("b48")),
-		Bloc("b48",
-			Goto("b49")),
-		Bloc("b49",
-			If("p", "b50", "b51")),
-		Bloc("b50",
-			Goto("b52")),
-		Bloc("b52",
-			Goto("b53")),
-		Bloc("b53",
-			Goto("b51")),
-		Bloc("b51",
-			Goto("b54")),
-		Bloc("b54",
-			Goto("b46")),
-		Bloc("b46",
-			Exit("mem")),
-		Bloc("b33",
-			Goto("b34")),
-		Bloc("b34",
-			Goto("b37")),
-		Bloc("b37",
-			If("p", "b35", "b36")),
-		Bloc("b35",
-			Goto("b38")),
-		Bloc("b38",
-			Goto("b39")),
-		Bloc("b39",
-			If("p", "b40", "b41")),
-		Bloc("b40",
-			Goto("b42")),
-		Bloc("b42",
-			Goto("b43")),
-		Bloc("b43",
-			Goto("b41")),
-		Bloc("b41",
-			Goto("b44")),
-		Bloc("b44",
-			Goto("b36")),
-		Bloc("b36",
-			Goto("b20")),
-		Bloc("b20",
-			Goto("b18")),
-		Bloc("b2",
-			Goto("b4")),
-		Bloc("b4",
-			Exit("mem")))
-	CheckFunc(fun.f)
-	doms := generateDominatorMap(fun)
-	verifyDominators(t, fun, dominators, doms)
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/export_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/export_test.go
deleted file mode 100644
index 9c1b1a6..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/export_test.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/export_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/export_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/x86"
-	"testing"
-)
-
-var CheckFunc = checkFunc
-var PrintFunc = printFunc
-var Opt = opt
-var Deadcode = deadcode
-var Copyelim = copyelim
-
-func testConfig(t testing.TB) *Config {
-	testCtxt := &obj.Link{Arch: &x86.Linkamd64}
-	return NewConfig("amd64", DummyFrontend{t}, testCtxt, true)
-}
-
-// DummyFrontend is a test-only frontend.
-// It assumes 64 bit integers and pointers.
-type DummyFrontend struct {
-	t testing.TB
-}
-
-func (DummyFrontend) StringData(s string) interface{} {
-	return nil
-}
-func (DummyFrontend) Auto(t Type) GCNode {
-	return nil
-}
-func (d DummyFrontend) SplitString(s LocalSlot) (LocalSlot, LocalSlot) {
-	return LocalSlot{s.N, d.TypeBytePtr(), s.Off}, LocalSlot{s.N, d.TypeInt(), s.Off + 8}
-}
-func (d DummyFrontend) SplitInterface(s LocalSlot) (LocalSlot, LocalSlot) {
-	return LocalSlot{s.N, d.TypeBytePtr(), s.Off}, LocalSlot{s.N, d.TypeBytePtr(), s.Off + 8}
-}
-func (d DummyFrontend) SplitSlice(s LocalSlot) (LocalSlot, LocalSlot, LocalSlot) {
-	return LocalSlot{s.N, s.Type.ElemType().PtrTo(), s.Off},
-		LocalSlot{s.N, d.TypeInt(), s.Off + 8},
-		LocalSlot{s.N, d.TypeInt(), s.Off + 16}
-}
-func (d DummyFrontend) SplitComplex(s LocalSlot) (LocalSlot, LocalSlot) {
-	if s.Type.Size() == 16 {
-		return LocalSlot{s.N, d.TypeFloat64(), s.Off}, LocalSlot{s.N, d.TypeFloat64(), s.Off + 8}
-	}
-	return LocalSlot{s.N, d.TypeFloat32(), s.Off}, LocalSlot{s.N, d.TypeFloat32(), s.Off + 4}
-}
-func (d DummyFrontend) SplitInt64(s LocalSlot) (LocalSlot, LocalSlot) {
-	if s.Type.IsSigned() {
-		return LocalSlot{s.N, d.TypeInt32(), s.Off + 4}, LocalSlot{s.N, d.TypeUInt32(), s.Off}
-	}
-	return LocalSlot{s.N, d.TypeUInt32(), s.Off + 4}, LocalSlot{s.N, d.TypeUInt32(), s.Off}
-}
-func (d DummyFrontend) SplitStruct(s LocalSlot, i int) LocalSlot {
-	return LocalSlot{s.N, s.Type.FieldType(i), s.Off + s.Type.FieldOff(i)}
-}
-func (d DummyFrontend) SplitArray(s LocalSlot) LocalSlot {
-	return LocalSlot{s.N, s.Type.ElemType(), s.Off}
-}
-func (DummyFrontend) Line(line int32) string {
-	return "unknown.go:0"
-}
-func (DummyFrontend) AllocFrame(f *Func) {
-}
-func (DummyFrontend) Syslook(s string) interface{} {
-	return DummySym(s)
-}
-
-func (d DummyFrontend) Logf(msg string, args ...interface{}) { d.t.Logf(msg, args...) }
-func (d DummyFrontend) Log() bool                            { return true }
-
-func (d DummyFrontend) Fatalf(line int32, msg string, args ...interface{}) { d.t.Fatalf(msg, args...) }
-func (d DummyFrontend) Warnl(line int32, msg string, args ...interface{})  { d.t.Logf(msg, args...) }
-func (d DummyFrontend) Debug_checknil() bool                               { return false }
-func (d DummyFrontend) Debug_wb() bool                                     { return false }
-
-func (d DummyFrontend) TypeBool() Type    { return TypeBool }
-func (d DummyFrontend) TypeInt8() Type    { return TypeInt8 }
-func (d DummyFrontend) TypeInt16() Type   { return TypeInt16 }
-func (d DummyFrontend) TypeInt32() Type   { return TypeInt32 }
-func (d DummyFrontend) TypeInt64() Type   { return TypeInt64 }
-func (d DummyFrontend) TypeUInt8() Type   { return TypeUInt8 }
-func (d DummyFrontend) TypeUInt16() Type  { return TypeUInt16 }
-func (d DummyFrontend) TypeUInt32() Type  { return TypeUInt32 }
-func (d DummyFrontend) TypeUInt64() Type  { return TypeUInt64 }
-func (d DummyFrontend) TypeFloat32() Type { return TypeFloat32 }
-func (d DummyFrontend) TypeFloat64() Type { return TypeFloat64 }
-func (d DummyFrontend) TypeInt() Type     { return TypeInt64 }
-func (d DummyFrontend) TypeUintptr() Type { return TypeUInt64 }
-func (d DummyFrontend) TypeString() Type  { panic("unimplemented") }
-func (d DummyFrontend) TypeBytePtr() Type { return TypeBytePtr }
-
-func (d DummyFrontend) CanSSA(t Type) bool {
-	// There are no un-SSAable types in dummy land.
-	return true
-}
-
-type DummySym string
-
-func (s DummySym) String() string { return string(s) }
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/flagalloc.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/flagalloc.go
deleted file mode 100644
index 6dc8f7f..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/flagalloc.go
+++ /dev/null
@@ -1,163 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/flagalloc.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/flagalloc.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-// flagalloc allocates the flag register among all the flag-generating
-// instructions. Flag values are recomputed if they need to be
-// spilled/restored.
-func flagalloc(f *Func) {
-	// Compute the in-register flag value we want at the end of
-	// each block. This is basically a best-effort live variable
-	// analysis, so it can be much simpler than a full analysis.
-	end := make([]*Value, f.NumBlocks())
-	po := f.postorder()
-	for n := 0; n < 2; n++ {
-		for _, b := range po {
-			// Walk values backwards to figure out what flag
-			// value we want in the flag register at the start
-			// of the block.
-			flag := end[b.ID]
-			if b.Control != nil && b.Control.Type.IsFlags() {
-				flag = b.Control
-			}
-			for j := len(b.Values) - 1; j >= 0; j-- {
-				v := b.Values[j]
-				if v == flag {
-					flag = nil
-				}
-				if v.clobbersFlags() {
-					flag = nil
-				}
-				for _, a := range v.Args {
-					if a.Type.IsFlags() {
-						flag = a
-					}
-				}
-			}
-			if flag != nil {
-				for _, e := range b.Preds {
-					p := e.b
-					end[p.ID] = flag
-				}
-			}
-		}
-	}
-
-	// For blocks which have a flags control value, that's the only value
-	// we can leave in the flags register at the end of the block. (There
-	// is no place to put a flag regeneration instruction.)
-	for _, b := range f.Blocks {
-		v := b.Control
-		if v != nil && v.Type.IsFlags() && end[b.ID] != v {
-			end[b.ID] = nil
-		}
-		if b.Kind == BlockDefer {
-			// Defer blocks internally use/clobber the flags value.
-			end[b.ID] = nil
-		}
-	}
-
-	// Add flag recomputations where they are needed.
-	// TODO: Remove original instructions if they are never used.
-	var oldSched []*Value
-	for _, b := range f.Blocks {
-		oldSched = append(oldSched[:0], b.Values...)
-		b.Values = b.Values[:0]
-		// The current live flag value the pre-flagalloc copy).
-		var flag *Value
-		if len(b.Preds) > 0 {
-			flag = end[b.Preds[0].b.ID]
-			// Note: the following condition depends on the lack of critical edges.
-			for _, e := range b.Preds[1:] {
-				p := e.b
-				if end[p.ID] != flag {
-					f.Fatalf("live flag in %s's predecessors not consistent", b)
-				}
-			}
-		}
-		for _, v := range oldSched {
-			if v.Op == OpPhi && v.Type.IsFlags() {
-				f.Fatalf("phi of flags not supported: %s", v.LongString())
-			}
-			// Make sure any flag arg of v is in the flags register.
-			// If not, recompute it.
-			for i, a := range v.Args {
-				if !a.Type.IsFlags() {
-					continue
-				}
-				if a == flag {
-					continue
-				}
-				// Recalculate a
-				c := copyFlags(a, b)
-				// Update v.
-				v.SetArg(i, c)
-				// Remember the most-recently computed flag value.
-				flag = a
-			}
-			// Issue v.
-			b.Values = append(b.Values, v)
-			if v.clobbersFlags() {
-				flag = nil
-			}
-			if v.Type.IsFlags() {
-				flag = v
-			}
-		}
-		if v := b.Control; v != nil && v != flag && v.Type.IsFlags() {
-			// Recalculate control value.
-			c := v.copyInto(b)
-			b.SetControl(c)
-			flag = v
-		}
-		if v := end[b.ID]; v != nil && v != flag {
-			// Need to reissue flag generator for use by
-			// subsequent blocks.
-			copyFlags(v, b)
-			// Note: this flag generator is not properly linked up
-			// with the flag users. This breaks the SSA representation.
-			// We could fix up the users with another pass, but for now
-			// we'll just leave it.  (Regalloc has the same issue for
-			// standard regs, and it runs next.)
-		}
-	}
-
-	// Save live flag state for later.
-	for _, b := range f.Blocks {
-		b.FlagsLiveAtEnd = end[b.ID] != nil
-	}
-}
-
-func (v *Value) clobbersFlags() bool {
-	if opcodeTable[v.Op].clobberFlags {
-		return true
-	}
-	if v.Type.IsTuple() && (v.Type.FieldType(0).IsFlags() || v.Type.FieldType(1).IsFlags()) {
-		// This case handles the possibility where a flag value is generated but never used.
-		// In that case, there's no corresponding Select to overwrite the flags value,
-		// so we must consider flags clobbered by the tuple-generating instruction.
-		return true
-	}
-	return false
-}
-
-// copyFlags copies v (flag generator) into b, returns the copy.
-// If v's arg is also flags, copy recursively.
-func copyFlags(v *Value, b *Block) *Value {
-	flagsArgs := make(map[int]*Value)
-	for i, a := range v.Args {
-		if a.Type.IsFlags() || a.Type.IsTuple() {
-			flagsArgs[i] = copyFlags(a, b)
-		}
-	}
-	c := v.copyInto(b)
-	for i, a := range flagsArgs {
-		c.SetArg(i, a)
-	}
-	return c
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/func.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/func.go
deleted file mode 100644
index d2b1513..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/func.go
+++ /dev/null
@@ -1,491 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/func.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/func.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import (
-	"fmt"
-	"math"
-	"strings"
-)
-
-// A Func represents a Go func declaration (or function literal) and
-// its body. This package compiles each Func independently.
-type Func struct {
-	Config     *Config     // architecture information
-	pass       *pass       // current pass information (name, options, etc.)
-	Name       string      // e.g. bytes·Compare
-	Type       Type        // type signature of the function.
-	StaticData interface{} // associated static data, untouched by the ssa package
-	Blocks     []*Block    // unordered set of all basic blocks (note: not indexable by ID)
-	Entry      *Block      // the entry basic block
-	bid        idAlloc     // block ID allocator
-	vid        idAlloc     // value ID allocator
-
-	scheduled bool // Values in Blocks are in final order
-	NoSplit   bool // true if function is marked as nosplit.  Used by schedule check pass.
-
-	// when register allocation is done, maps value ids to locations
-	RegAlloc []Location
-
-	// map from LocalSlot to set of Values that we want to store in that slot.
-	NamedValues map[LocalSlot][]*Value
-	// Names is a copy of NamedValues.Keys. We keep a separate list
-	// of keys to make iteration order deterministic.
-	Names []LocalSlot
-
-	freeValues *Value // free Values linked by argstorage[0].  All other fields except ID are 0/nil.
-	freeBlocks *Block // free Blocks linked by succstorage[0].b.  All other fields except ID are 0/nil.
-
-	cachedPostorder []*Block   // cached postorder traversal
-	cachedIdom      []*Block   // cached immediate dominators
-	cachedSdom      SparseTree // cached dominator tree
-	cachedLoopnest  *loopnest  // cached loop nest information
-
-	constants map[int64][]*Value // constants cache, keyed by constant value; users must check value's Op and Type
-}
-
-// NumBlocks returns an integer larger than the id of any Block in the Func.
-func (f *Func) NumBlocks() int {
-	return f.bid.num()
-}
-
-// NumValues returns an integer larger than the id of any Value in the Func.
-func (f *Func) NumValues() int {
-	return f.vid.num()
-}
-
-// newSparseSet returns a sparse set that can store at least up to n integers.
-func (f *Func) newSparseSet(n int) *sparseSet {
-	for i, scr := range f.Config.scrSparse {
-		if scr != nil && scr.cap() >= n {
-			f.Config.scrSparse[i] = nil
-			scr.clear()
-			return scr
-		}
-	}
-	return newSparseSet(n)
-}
-
-// retSparseSet returns a sparse set to the config's cache of sparse sets to be reused by f.newSparseSet.
-func (f *Func) retSparseSet(ss *sparseSet) {
-	for i, scr := range f.Config.scrSparse {
-		if scr == nil {
-			f.Config.scrSparse[i] = ss
-			return
-		}
-	}
-	f.Config.scrSparse = append(f.Config.scrSparse, ss)
-}
-
-// newValue allocates a new Value with the given fields and places it at the end of b.Values.
-func (f *Func) newValue(op Op, t Type, b *Block, line int32) *Value {
-	var v *Value
-	if f.freeValues != nil {
-		v = f.freeValues
-		f.freeValues = v.argstorage[0]
-		v.argstorage[0] = nil
-	} else {
-		ID := f.vid.get()
-		if int(ID) < len(f.Config.values) {
-			v = &f.Config.values[ID]
-		} else {
-			v = &Value{ID: ID}
-		}
-	}
-	v.Op = op
-	v.Type = t
-	v.Block = b
-	v.Line = line
-	b.Values = append(b.Values, v)
-	return v
-}
-
-// logPassStat writes a string key and int value as a warning in a
-// tab-separated format easily handled by spreadsheets or awk.
-// file names, lines, and function names are included to provide enough (?)
-// context to allow item-by-item comparisons across runs.
-// For example:
-// awk 'BEGIN {FS="\t"} $3~/TIME/{sum+=$4} END{print "t(ns)=",sum}' t.log
-func (f *Func) LogStat(key string, args ...interface{}) {
-	value := ""
-	for _, a := range args {
-		value += fmt.Sprintf("\t%v", a)
-	}
-	n := "missing_pass"
-	if f.pass != nil {
-		n = strings.Replace(f.pass.name, " ", "_", -1)
-	}
-	f.Config.Warnl(f.Entry.Line, "\t%s\t%s%s\t%s", n, key, value, f.Name)
-}
-
-// freeValue frees a value. It must no longer be referenced.
-func (f *Func) freeValue(v *Value) {
-	if v.Block == nil {
-		f.Fatalf("trying to free an already freed value")
-	}
-	if v.Uses != 0 {
-		f.Fatalf("value %s still has %d uses", v, v.Uses)
-	}
-	// Clear everything but ID (which we reuse).
-	id := v.ID
-
-	// Zero argument values might be cached, so remove them there.
-	nArgs := opcodeTable[v.Op].argLen
-	if nArgs == 0 {
-		vv := f.constants[v.AuxInt]
-		for i, cv := range vv {
-			if v == cv {
-				vv[i] = vv[len(vv)-1]
-				f.constants[v.AuxInt] = vv[0 : len(vv)-1]
-				break
-			}
-		}
-	}
-	*v = Value{}
-	v.ID = id
-	v.argstorage[0] = f.freeValues
-	f.freeValues = v
-}
-
-// newBlock allocates a new Block of the given kind and places it at the end of f.Blocks.
-func (f *Func) NewBlock(kind BlockKind) *Block {
-	var b *Block
-	if f.freeBlocks != nil {
-		b = f.freeBlocks
-		f.freeBlocks = b.succstorage[0].b
-		b.succstorage[0].b = nil
-	} else {
-		ID := f.bid.get()
-		if int(ID) < len(f.Config.blocks) {
-			b = &f.Config.blocks[ID]
-		} else {
-			b = &Block{ID: ID}
-		}
-	}
-	b.Kind = kind
-	b.Func = f
-	b.Preds = b.predstorage[:0]
-	b.Succs = b.succstorage[:0]
-	b.Values = b.valstorage[:0]
-	f.Blocks = append(f.Blocks, b)
-	f.invalidateCFG()
-	return b
-}
-
-func (f *Func) freeBlock(b *Block) {
-	if b.Func == nil {
-		f.Fatalf("trying to free an already freed block")
-	}
-	// Clear everything but ID (which we reuse).
-	id := b.ID
-	*b = Block{}
-	b.ID = id
-	b.succstorage[0].b = f.freeBlocks
-	f.freeBlocks = b
-}
-
-// NewValue0 returns a new value in the block with no arguments and zero aux values.
-func (b *Block) NewValue0(line int32, op Op, t Type) *Value {
-	v := b.Func.newValue(op, t, b, line)
-	v.AuxInt = 0
-	v.Args = v.argstorage[:0]
-	return v
-}
-
-// NewValue returns a new value in the block with no arguments and an auxint value.
-func (b *Block) NewValue0I(line int32, op Op, t Type, auxint int64) *Value {
-	v := b.Func.newValue(op, t, b, line)
-	v.AuxInt = auxint
-	v.Args = v.argstorage[:0]
-	return v
-}
-
-// NewValue returns a new value in the block with no arguments and an aux value.
-func (b *Block) NewValue0A(line int32, op Op, t Type, aux interface{}) *Value {
-	if _, ok := aux.(int64); ok {
-		// Disallow int64 aux values. They should be in the auxint field instead.
-		// Maybe we want to allow this at some point, but for now we disallow it
-		// to prevent errors like using NewValue1A instead of NewValue1I.
-		b.Fatalf("aux field has int64 type op=%s type=%s aux=%v", op, t, aux)
-	}
-	v := b.Func.newValue(op, t, b, line)
-	v.AuxInt = 0
-	v.Aux = aux
-	v.Args = v.argstorage[:0]
-	return v
-}
-
-// NewValue returns a new value in the block with no arguments and both an auxint and aux values.
-func (b *Block) NewValue0IA(line int32, op Op, t Type, auxint int64, aux interface{}) *Value {
-	v := b.Func.newValue(op, t, b, line)
-	v.AuxInt = auxint
-	v.Aux = aux
-	v.Args = v.argstorage[:0]
-	return v
-}
-
-// NewValue1 returns a new value in the block with one argument and zero aux values.
-func (b *Block) NewValue1(line int32, op Op, t Type, arg *Value) *Value {
-	v := b.Func.newValue(op, t, b, line)
-	v.AuxInt = 0
-	v.Args = v.argstorage[:1]
-	v.argstorage[0] = arg
-	arg.Uses++
-	return v
-}
-
-// NewValue1I returns a new value in the block with one argument and an auxint value.
-func (b *Block) NewValue1I(line int32, op Op, t Type, auxint int64, arg *Value) *Value {
-	v := b.Func.newValue(op, t, b, line)
-	v.AuxInt = auxint
-	v.Args = v.argstorage[:1]
-	v.argstorage[0] = arg
-	arg.Uses++
-	return v
-}
-
-// NewValue1A returns a new value in the block with one argument and an aux value.
-func (b *Block) NewValue1A(line int32, op Op, t Type, aux interface{}, arg *Value) *Value {
-	v := b.Func.newValue(op, t, b, line)
-	v.AuxInt = 0
-	v.Aux = aux
-	v.Args = v.argstorage[:1]
-	v.argstorage[0] = arg
-	arg.Uses++
-	return v
-}
-
-// NewValue1IA returns a new value in the block with one argument and both an auxint and aux values.
-func (b *Block) NewValue1IA(line int32, op Op, t Type, auxint int64, aux interface{}, arg *Value) *Value {
-	v := b.Func.newValue(op, t, b, line)
-	v.AuxInt = auxint
-	v.Aux = aux
-	v.Args = v.argstorage[:1]
-	v.argstorage[0] = arg
-	arg.Uses++
-	return v
-}
-
-// NewValue2 returns a new value in the block with two arguments and zero aux values.
-func (b *Block) NewValue2(line int32, op Op, t Type, arg0, arg1 *Value) *Value {
-	v := b.Func.newValue(op, t, b, line)
-	v.AuxInt = 0
-	v.Args = v.argstorage[:2]
-	v.argstorage[0] = arg0
-	v.argstorage[1] = arg1
-	arg0.Uses++
-	arg1.Uses++
-	return v
-}
-
-// NewValue2I returns a new value in the block with two arguments and an auxint value.
-func (b *Block) NewValue2I(line int32, op Op, t Type, auxint int64, arg0, arg1 *Value) *Value {
-	v := b.Func.newValue(op, t, b, line)
-	v.AuxInt = auxint
-	v.Args = v.argstorage[:2]
-	v.argstorage[0] = arg0
-	v.argstorage[1] = arg1
-	arg0.Uses++
-	arg1.Uses++
-	return v
-}
-
-// NewValue3 returns a new value in the block with three arguments and zero aux values.
-func (b *Block) NewValue3(line int32, op Op, t Type, arg0, arg1, arg2 *Value) *Value {
-	v := b.Func.newValue(op, t, b, line)
-	v.AuxInt = 0
-	v.Args = v.argstorage[:3]
-	v.argstorage[0] = arg0
-	v.argstorage[1] = arg1
-	v.argstorage[2] = arg2
-	arg0.Uses++
-	arg1.Uses++
-	arg2.Uses++
-	return v
-}
-
-// NewValue3I returns a new value in the block with three arguments and an auxint value.
-func (b *Block) NewValue3I(line int32, op Op, t Type, auxint int64, arg0, arg1, arg2 *Value) *Value {
-	v := b.Func.newValue(op, t, b, line)
-	v.AuxInt = auxint
-	v.Args = v.argstorage[:3]
-	v.argstorage[0] = arg0
-	v.argstorage[1] = arg1
-	v.argstorage[2] = arg2
-	arg0.Uses++
-	arg1.Uses++
-	arg2.Uses++
-	return v
-}
-
-// NewValue4 returns a new value in the block with four arguments and zero aux values.
-func (b *Block) NewValue4(line int32, op Op, t Type, arg0, arg1, arg2, arg3 *Value) *Value {
-	v := b.Func.newValue(op, t, b, line)
-	v.AuxInt = 0
-	v.Args = []*Value{arg0, arg1, arg2, arg3}
-	arg0.Uses++
-	arg1.Uses++
-	arg2.Uses++
-	arg3.Uses++
-	return v
-}
-
-// constVal returns a constant value for c.
-func (f *Func) constVal(line int32, op Op, t Type, c int64, setAux bool) *Value {
-	if f.constants == nil {
-		f.constants = make(map[int64][]*Value)
-	}
-	vv := f.constants[c]
-	for _, v := range vv {
-		if v.Op == op && v.Type.Compare(t) == CMPeq {
-			if setAux && v.AuxInt != c {
-				panic(fmt.Sprintf("cached const %s should have AuxInt of %d", v.LongString(), c))
-			}
-			return v
-		}
-	}
-	var v *Value
-	if setAux {
-		v = f.Entry.NewValue0I(line, op, t, c)
-	} else {
-		v = f.Entry.NewValue0(line, op, t)
-	}
-	f.constants[c] = append(vv, v)
-	return v
-}
-
-// These magic auxint values let us easily cache non-numeric constants
-// using the same constants map while making collisions unlikely.
-// These values are unlikely to occur in regular code and
-// are easy to grep for in case of bugs.
-const (
-	constSliceMagic       = 1122334455
-	constInterfaceMagic   = 2233445566
-	constNilMagic         = 3344556677
-	constEmptyStringMagic = 4455667788
-)
-
-// ConstInt returns an int constant representing its argument.
-func (f *Func) ConstBool(line int32, t Type, c bool) *Value {
-	i := int64(0)
-	if c {
-		i = 1
-	}
-	return f.constVal(line, OpConstBool, t, i, true)
-}
-func (f *Func) ConstInt8(line int32, t Type, c int8) *Value {
-	return f.constVal(line, OpConst8, t, int64(c), true)
-}
-func (f *Func) ConstInt16(line int32, t Type, c int16) *Value {
-	return f.constVal(line, OpConst16, t, int64(c), true)
-}
-func (f *Func) ConstInt32(line int32, t Type, c int32) *Value {
-	return f.constVal(line, OpConst32, t, int64(c), true)
-}
-func (f *Func) ConstInt64(line int32, t Type, c int64) *Value {
-	return f.constVal(line, OpConst64, t, c, true)
-}
-func (f *Func) ConstFloat32(line int32, t Type, c float64) *Value {
-	return f.constVal(line, OpConst32F, t, int64(math.Float64bits(float64(float32(c)))), true)
-}
-func (f *Func) ConstFloat64(line int32, t Type, c float64) *Value {
-	return f.constVal(line, OpConst64F, t, int64(math.Float64bits(c)), true)
-}
-
-func (f *Func) ConstSlice(line int32, t Type) *Value {
-	return f.constVal(line, OpConstSlice, t, constSliceMagic, false)
-}
-func (f *Func) ConstInterface(line int32, t Type) *Value {
-	return f.constVal(line, OpConstInterface, t, constInterfaceMagic, false)
-}
-func (f *Func) ConstNil(line int32, t Type) *Value {
-	return f.constVal(line, OpConstNil, t, constNilMagic, false)
-}
-func (f *Func) ConstEmptyString(line int32, t Type) *Value {
-	v := f.constVal(line, OpConstString, t, constEmptyStringMagic, false)
-	v.Aux = ""
-	return v
-}
-
-func (f *Func) Logf(msg string, args ...interface{})   { f.Config.Logf(msg, args...) }
-func (f *Func) Log() bool                              { return f.Config.Log() }
-func (f *Func) Fatalf(msg string, args ...interface{}) { f.Config.Fatalf(f.Entry.Line, msg, args...) }
-
-func (f *Func) Free() {
-	// Clear cached CFG info.
-	f.invalidateCFG()
-
-	// Clear values.
-	n := f.vid.num()
-	if n > len(f.Config.values) {
-		n = len(f.Config.values)
-	}
-	for i := 1; i < n; i++ {
-		f.Config.values[i] = Value{}
-		f.Config.values[i].ID = ID(i)
-	}
-
-	// Clear blocks.
-	n = f.bid.num()
-	if n > len(f.Config.blocks) {
-		n = len(f.Config.blocks)
-	}
-	for i := 1; i < n; i++ {
-		f.Config.blocks[i] = Block{}
-		f.Config.blocks[i].ID = ID(i)
-	}
-
-	// Unregister from config.
-	if f.Config.curFunc != f {
-		f.Fatalf("free of function which isn't the last one allocated")
-	}
-	f.Config.curFunc = nil
-	*f = Func{} // just in case
-}
-
-// postorder returns the reachable blocks in f in a postorder traversal.
-func (f *Func) postorder() []*Block {
-	if f.cachedPostorder == nil {
-		f.cachedPostorder = postorder(f)
-	}
-	return f.cachedPostorder
-}
-
-// Idom returns a map from block ID to the immediate dominator of that block.
-// f.Entry.ID maps to nil. Unreachable blocks map to nil as well.
-func (f *Func) Idom() []*Block {
-	if f.cachedIdom == nil {
-		f.cachedIdom = dominators(f)
-	}
-	return f.cachedIdom
-}
-
-// sdom returns a sparse tree representing the dominator relationships
-// among the blocks of f.
-func (f *Func) sdom() SparseTree {
-	if f.cachedSdom == nil {
-		f.cachedSdom = newSparseTree(f, f.Idom())
-	}
-	return f.cachedSdom
-}
-
-// loopnest returns the loop nest information for f.
-func (f *Func) loopnest() *loopnest {
-	if f.cachedLoopnest == nil {
-		f.cachedLoopnest = loopnestfor(f)
-	}
-	return f.cachedLoopnest
-}
-
-// invalidateCFG tells f that its CFG has changed.
-func (f *Func) invalidateCFG() {
-	f.cachedPostorder = nil
-	f.cachedIdom = nil
-	f.cachedSdom = nil
-	f.cachedLoopnest = nil
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/func_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/func_test.go
deleted file mode 100644
index 60327b9..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/func_test.go
+++ /dev/null
@@ -1,470 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/func_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/func_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file contains some utility functions to help define Funcs for testing.
-// As an example, the following func
-//
-//   b1:
-//     v1 = InitMem <mem>
-//     Plain -> b2
-//   b2:
-//     Exit v1
-//   b3:
-//     v2 = Const <bool> [true]
-//     If v2 -> b3 b2
-//
-// can be defined as
-//
-//   fun := Fun("entry",
-//       Bloc("entry",
-//           Valu("mem", OpInitMem, TypeMem, 0, nil),
-//           Goto("exit")),
-//       Bloc("exit",
-//           Exit("mem")),
-//       Bloc("deadblock",
-//          Valu("deadval", OpConstBool, TypeBool, 0, true),
-//          If("deadval", "deadblock", "exit")))
-//
-// and the Blocks or Values used in the Func can be accessed
-// like this:
-//   fun.blocks["entry"] or fun.values["deadval"]
-
-package ssa
-
-// TODO(matloob): Choose better names for Fun, Bloc, Goto, etc.
-// TODO(matloob): Write a parser for the Func disassembly. Maybe
-//                the parser can be used instead of Fun.
-
-import (
-	"fmt"
-	"reflect"
-	"testing"
-)
-
-// Compare two Funcs for equivalence. Their CFGs must be isomorphic,
-// and their values must correspond.
-// Requires that values and predecessors are in the same order, even
-// though Funcs could be equivalent when they are not.
-// TODO(matloob): Allow values and predecessors to be in different
-// orders if the CFG are otherwise equivalent.
-func Equiv(f, g *Func) bool {
-	valcor := make(map[*Value]*Value)
-	var checkVal func(fv, gv *Value) bool
-	checkVal = func(fv, gv *Value) bool {
-		if fv == nil && gv == nil {
-			return true
-		}
-		if valcor[fv] == nil && valcor[gv] == nil {
-			valcor[fv] = gv
-			valcor[gv] = fv
-			// Ignore ids. Ops and Types are compared for equality.
-			// TODO(matloob): Make sure types are canonical and can
-			// be compared for equality.
-			if fv.Op != gv.Op || fv.Type != gv.Type || fv.AuxInt != gv.AuxInt {
-				return false
-			}
-			if !reflect.DeepEqual(fv.Aux, gv.Aux) {
-				// This makes the assumption that aux values can be compared
-				// using DeepEqual.
-				// TODO(matloob): Aux values may be *gc.Sym pointers in the near
-				// future. Make sure they are canonical.
-				return false
-			}
-			if len(fv.Args) != len(gv.Args) {
-				return false
-			}
-			for i := range fv.Args {
-				if !checkVal(fv.Args[i], gv.Args[i]) {
-					return false
-				}
-			}
-		}
-		return valcor[fv] == gv && valcor[gv] == fv
-	}
-	blkcor := make(map[*Block]*Block)
-	var checkBlk func(fb, gb *Block) bool
-	checkBlk = func(fb, gb *Block) bool {
-		if blkcor[fb] == nil && blkcor[gb] == nil {
-			blkcor[fb] = gb
-			blkcor[gb] = fb
-			// ignore ids
-			if fb.Kind != gb.Kind {
-				return false
-			}
-			if len(fb.Values) != len(gb.Values) {
-				return false
-			}
-			for i := range fb.Values {
-				if !checkVal(fb.Values[i], gb.Values[i]) {
-					return false
-				}
-			}
-			if len(fb.Succs) != len(gb.Succs) {
-				return false
-			}
-			for i := range fb.Succs {
-				if !checkBlk(fb.Succs[i].b, gb.Succs[i].b) {
-					return false
-				}
-			}
-			if len(fb.Preds) != len(gb.Preds) {
-				return false
-			}
-			for i := range fb.Preds {
-				if !checkBlk(fb.Preds[i].b, gb.Preds[i].b) {
-					return false
-				}
-			}
-			return true
-
-		}
-		return blkcor[fb] == gb && blkcor[gb] == fb
-	}
-
-	return checkBlk(f.Entry, g.Entry)
-}
-
-// fun is the return type of Fun. It contains the created func
-// itself as well as indexes from block and value names into the
-// corresponding Blocks and Values.
-type fun struct {
-	f      *Func
-	blocks map[string]*Block
-	values map[string]*Value
-}
-
-var emptyPass pass = pass{
-	name: "empty pass",
-}
-
-// Fun takes the name of an entry bloc and a series of Bloc calls, and
-// returns a fun containing the composed Func. entry must be a name
-// supplied to one of the Bloc functions. Each of the bloc names and
-// valu names should be unique across the Fun.
-func Fun(c *Config, entry string, blocs ...bloc) fun {
-	f := c.NewFunc()
-	f.pass = &emptyPass
-
-	blocks := make(map[string]*Block)
-	values := make(map[string]*Value)
-	// Create all the blocks and values.
-	for _, bloc := range blocs {
-		b := f.NewBlock(bloc.control.kind)
-		blocks[bloc.name] = b
-		for _, valu := range bloc.valus {
-			// args are filled in the second pass.
-			values[valu.name] = b.NewValue0IA(0, valu.op, valu.t, valu.auxint, valu.aux)
-		}
-	}
-	// Connect the blocks together and specify control values.
-	f.Entry = blocks[entry]
-	for _, bloc := range blocs {
-		b := blocks[bloc.name]
-		c := bloc.control
-		// Specify control values.
-		if c.control != "" {
-			cval, ok := values[c.control]
-			if !ok {
-				f.Fatalf("control value for block %s missing", bloc.name)
-			}
-			b.SetControl(cval)
-		}
-		// Fill in args.
-		for _, valu := range bloc.valus {
-			v := values[valu.name]
-			for _, arg := range valu.args {
-				a, ok := values[arg]
-				if !ok {
-					b.Fatalf("arg %s missing for value %s in block %s",
-						arg, valu.name, bloc.name)
-				}
-				v.AddArg(a)
-			}
-		}
-		// Connect to successors.
-		for _, succ := range c.succs {
-			b.AddEdgeTo(blocks[succ])
-		}
-	}
-	return fun{f, blocks, values}
-}
-
-// Bloc defines a block for Fun. The bloc name should be unique
-// across the containing Fun. entries should consist of calls to valu,
-// as well as one call to Goto, If, or Exit to specify the block kind.
-func Bloc(name string, entries ...interface{}) bloc {
-	b := bloc{}
-	b.name = name
-	seenCtrl := false
-	for _, e := range entries {
-		switch v := e.(type) {
-		case ctrl:
-			// there should be exactly one Ctrl entry.
-			if seenCtrl {
-				panic(fmt.Sprintf("already seen control for block %s", name))
-			}
-			b.control = v
-			seenCtrl = true
-		case valu:
-			b.valus = append(b.valus, v)
-		}
-	}
-	if !seenCtrl {
-		panic(fmt.Sprintf("block %s doesn't have control", b.name))
-	}
-	return b
-}
-
-// Valu defines a value in a block.
-func Valu(name string, op Op, t Type, auxint int64, aux interface{}, args ...string) valu {
-	return valu{name, op, t, auxint, aux, args}
-}
-
-// Goto specifies that this is a BlockPlain and names the single successor.
-// TODO(matloob): choose a better name.
-func Goto(succ string) ctrl {
-	return ctrl{BlockPlain, "", []string{succ}}
-}
-
-// If specifies a BlockIf.
-func If(cond, sub, alt string) ctrl {
-	return ctrl{BlockIf, cond, []string{sub, alt}}
-}
-
-// Exit specifies a BlockExit.
-func Exit(arg string) ctrl {
-	return ctrl{BlockExit, arg, []string{}}
-}
-
-// Eq specifies a BlockAMD64EQ.
-func Eq(cond, sub, alt string) ctrl {
-	return ctrl{BlockAMD64EQ, cond, []string{sub, alt}}
-}
-
-// bloc, ctrl, and valu are internal structures used by Bloc, Valu, Goto,
-// If, and Exit to help define blocks.
-
-type bloc struct {
-	name    string
-	control ctrl
-	valus   []valu
-}
-
-type ctrl struct {
-	kind    BlockKind
-	control string
-	succs   []string
-}
-
-type valu struct {
-	name   string
-	op     Op
-	t      Type
-	auxint int64
-	aux    interface{}
-	args   []string
-}
-
-func TestArgs(t *testing.T) {
-	c := testConfig(t)
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("a", OpConst64, TypeInt64, 14, nil),
-			Valu("b", OpConst64, TypeInt64, 26, nil),
-			Valu("sum", OpAdd64, TypeInt64, 0, nil, "a", "b"),
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Goto("exit")),
-		Bloc("exit",
-			Exit("mem")))
-	sum := fun.values["sum"]
-	for i, name := range []string{"a", "b"} {
-		if sum.Args[i] != fun.values[name] {
-			t.Errorf("arg %d for sum is incorrect: want %s, got %s",
-				i, sum.Args[i], fun.values[name])
-		}
-	}
-}
-
-func TestEquiv(t *testing.T) {
-	equivalentCases := []struct{ f, g fun }{
-		// simple case
-		{
-			Fun(testConfig(t), "entry",
-				Bloc("entry",
-					Valu("a", OpConst64, TypeInt64, 14, nil),
-					Valu("b", OpConst64, TypeInt64, 26, nil),
-					Valu("sum", OpAdd64, TypeInt64, 0, nil, "a", "b"),
-					Valu("mem", OpInitMem, TypeMem, 0, nil),
-					Goto("exit")),
-				Bloc("exit",
-					Exit("mem"))),
-			Fun(testConfig(t), "entry",
-				Bloc("entry",
-					Valu("a", OpConst64, TypeInt64, 14, nil),
-					Valu("b", OpConst64, TypeInt64, 26, nil),
-					Valu("sum", OpAdd64, TypeInt64, 0, nil, "a", "b"),
-					Valu("mem", OpInitMem, TypeMem, 0, nil),
-					Goto("exit")),
-				Bloc("exit",
-					Exit("mem"))),
-		},
-		// block order changed
-		{
-			Fun(testConfig(t), "entry",
-				Bloc("entry",
-					Valu("a", OpConst64, TypeInt64, 14, nil),
-					Valu("b", OpConst64, TypeInt64, 26, nil),
-					Valu("sum", OpAdd64, TypeInt64, 0, nil, "a", "b"),
-					Valu("mem", OpInitMem, TypeMem, 0, nil),
-					Goto("exit")),
-				Bloc("exit",
-					Exit("mem"))),
-			Fun(testConfig(t), "entry",
-				Bloc("exit",
-					Exit("mem")),
-				Bloc("entry",
-					Valu("a", OpConst64, TypeInt64, 14, nil),
-					Valu("b", OpConst64, TypeInt64, 26, nil),
-					Valu("sum", OpAdd64, TypeInt64, 0, nil, "a", "b"),
-					Valu("mem", OpInitMem, TypeMem, 0, nil),
-					Goto("exit"))),
-		},
-	}
-	for _, c := range equivalentCases {
-		if !Equiv(c.f.f, c.g.f) {
-			t.Error("expected equivalence. Func definitions:")
-			t.Error(c.f.f)
-			t.Error(c.g.f)
-		}
-	}
-
-	differentCases := []struct{ f, g fun }{
-		// different shape
-		{
-			Fun(testConfig(t), "entry",
-				Bloc("entry",
-					Valu("mem", OpInitMem, TypeMem, 0, nil),
-					Goto("exit")),
-				Bloc("exit",
-					Exit("mem"))),
-			Fun(testConfig(t), "entry",
-				Bloc("entry",
-					Valu("mem", OpInitMem, TypeMem, 0, nil),
-					Exit("mem"))),
-		},
-		// value order changed
-		{
-			Fun(testConfig(t), "entry",
-				Bloc("entry",
-					Valu("mem", OpInitMem, TypeMem, 0, nil),
-					Valu("b", OpConst64, TypeInt64, 26, nil),
-					Valu("a", OpConst64, TypeInt64, 14, nil),
-					Exit("mem"))),
-			Fun(testConfig(t), "entry",
-				Bloc("entry",
-					Valu("mem", OpInitMem, TypeMem, 0, nil),
-					Valu("a", OpConst64, TypeInt64, 14, nil),
-					Valu("b", OpConst64, TypeInt64, 26, nil),
-					Exit("mem"))),
-		},
-		// value auxint different
-		{
-			Fun(testConfig(t), "entry",
-				Bloc("entry",
-					Valu("mem", OpInitMem, TypeMem, 0, nil),
-					Valu("a", OpConst64, TypeInt64, 14, nil),
-					Exit("mem"))),
-			Fun(testConfig(t), "entry",
-				Bloc("entry",
-					Valu("mem", OpInitMem, TypeMem, 0, nil),
-					Valu("a", OpConst64, TypeInt64, 26, nil),
-					Exit("mem"))),
-		},
-		// value aux different
-		{
-			Fun(testConfig(t), "entry",
-				Bloc("entry",
-					Valu("mem", OpInitMem, TypeMem, 0, nil),
-					Valu("a", OpConst64, TypeInt64, 0, 14),
-					Exit("mem"))),
-			Fun(testConfig(t), "entry",
-				Bloc("entry",
-					Valu("mem", OpInitMem, TypeMem, 0, nil),
-					Valu("a", OpConst64, TypeInt64, 0, 26),
-					Exit("mem"))),
-		},
-		// value args different
-		{
-			Fun(testConfig(t), "entry",
-				Bloc("entry",
-					Valu("mem", OpInitMem, TypeMem, 0, nil),
-					Valu("a", OpConst64, TypeInt64, 14, nil),
-					Valu("b", OpConst64, TypeInt64, 26, nil),
-					Valu("sum", OpAdd64, TypeInt64, 0, nil, "a", "b"),
-					Exit("mem"))),
-			Fun(testConfig(t), "entry",
-				Bloc("entry",
-					Valu("mem", OpInitMem, TypeMem, 0, nil),
-					Valu("a", OpConst64, TypeInt64, 0, nil),
-					Valu("b", OpConst64, TypeInt64, 14, nil),
-					Valu("sum", OpAdd64, TypeInt64, 0, nil, "b", "a"),
-					Exit("mem"))),
-		},
-	}
-	for _, c := range differentCases {
-		if Equiv(c.f.f, c.g.f) {
-			t.Error("expected difference. Func definitions:")
-			t.Error(c.f.f)
-			t.Error(c.g.f)
-		}
-	}
-}
-
-// TestConstCache ensures that the cache will not return
-// reused free'd values with a non-matching AuxInt
-func TestConstCache(t *testing.T) {
-	f := Fun(testConfig(t), "entry",
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Exit("mem")))
-	v1 := f.f.ConstBool(0, TypeBool, false)
-	v2 := f.f.ConstBool(0, TypeBool, true)
-	f.f.freeValue(v1)
-	f.f.freeValue(v2)
-	v3 := f.f.ConstBool(0, TypeBool, false)
-	v4 := f.f.ConstBool(0, TypeBool, true)
-	if v3.AuxInt != 0 {
-		t.Errorf("expected %s to have auxint of 0\n", v3.LongString())
-	}
-	if v4.AuxInt != 1 {
-		t.Errorf("expected %s to have auxint of 1\n", v4.LongString())
-	}
-
-}
-
-// opcodeMap returns a map from opcode to the number of times that opcode
-// appears in the function.
-func opcodeMap(f *Func) map[Op]int {
-	m := map[Op]int{}
-	for _, b := range f.Blocks {
-		for _, v := range b.Values {
-			m[v.Op]++
-		}
-	}
-	return m
-}
-
-// opcodeCounts checks that the number of opcodes listed in m agree with the
-// number of opcodes that appear in the function.
-func checkOpcodeCounts(t *testing.T, f *Func, m map[Op]int) {
-	n := opcodeMap(f)
-	for op, cnt := range m {
-		if n[op] != cnt {
-			t.Errorf("%s appears %d times, want %d times", op, n[op], cnt)
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/fuse.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/fuse.go
deleted file mode 100644
index efa6dce..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/fuse.go
+++ /dev/null
@@ -1,153 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/fuse.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/fuse.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-// fuse simplifies control flow by joining basic blocks.
-func fuse(f *Func) {
-	for changed := true; changed; {
-		changed = false
-		for _, b := range f.Blocks {
-			changed = fuseBlockIf(b) || changed
-			changed = fuseBlockPlain(b) || changed
-		}
-	}
-}
-
-// fuseBlockIf handles the following cases where s0 and s1 are empty blocks.
-//
-//   b        b        b      b
-//  / \      | \      / |    | |
-// s0  s1    |  s1   s0 |    | |
-//  \ /      | /      \ |    | |
-//   ss      ss        ss     ss
-//
-// If all Phi ops in ss have identical variables for slots corresponding to
-// s0, s1 and b then the branch can be dropped.
-// This optimization often comes up in switch statements with multiple
-// expressions in a case clause:
-//   switch n {
-//     case 1,2,3: return 4
-//   }
-// TODO: If ss doesn't contain any OpPhis, are s0 and s1 dead code anyway.
-func fuseBlockIf(b *Block) bool {
-	if b.Kind != BlockIf {
-		return false
-	}
-
-	var ss0, ss1 *Block
-	s0 := b.Succs[0].b
-	i0 := b.Succs[0].i
-	if s0.Kind != BlockPlain || len(s0.Preds) != 1 || len(s0.Values) != 0 {
-		s0, ss0 = b, s0
-	} else {
-		ss0 = s0.Succs[0].b
-		i0 = s0.Succs[0].i
-	}
-	s1 := b.Succs[1].b
-	i1 := b.Succs[1].i
-	if s1.Kind != BlockPlain || len(s1.Preds) != 1 || len(s1.Values) != 0 {
-		s1, ss1 = b, s1
-	} else {
-		ss1 = s1.Succs[0].b
-		i1 = s1.Succs[0].i
-	}
-
-	if ss0 != ss1 {
-		return false
-	}
-	ss := ss0
-
-	// s0 and s1 are equal with b if the corresponding block is missing
-	// (2nd, 3rd and 4th case in the figure).
-
-	for _, v := range ss.Values {
-		if v.Op == OpPhi && v.Uses > 0 && v.Args[i0] != v.Args[i1] {
-			return false
-		}
-	}
-
-	// Now we have two of following b->ss, b->s0->ss and b->s1->ss,
-	// with s0 and s1 empty if exist.
-	// We can replace it with b->ss without if all OpPhis in ss
-	// have identical predecessors (verified above).
-	// No critical edge is introduced because b will have one successor.
-	if s0 != b && s1 != b {
-		// Replace edge b->s0->ss with b->ss.
-		// We need to keep a slot for Phis corresponding to b.
-		b.Succs[0] = Edge{ss, i0}
-		ss.Preds[i0] = Edge{b, 0}
-		b.removeEdge(1)
-		s1.removeEdge(0)
-	} else if s0 != b {
-		b.removeEdge(0)
-		s0.removeEdge(0)
-	} else if s1 != b {
-		b.removeEdge(1)
-		s1.removeEdge(0)
-	} else {
-		b.removeEdge(1)
-	}
-	b.Kind = BlockPlain
-	b.SetControl(nil)
-
-	// Trash the empty blocks s0 & s1.
-	if s0 != b {
-		s0.Kind = BlockInvalid
-		s0.Values = nil
-		s0.Succs = nil
-		s0.Preds = nil
-	}
-	if s1 != b {
-		s1.Kind = BlockInvalid
-		s1.Values = nil
-		s1.Succs = nil
-		s1.Preds = nil
-	}
-	return true
-}
-
-func fuseBlockPlain(b *Block) bool {
-	if b.Kind != BlockPlain {
-		return false
-	}
-
-	c := b.Succs[0].b
-	if len(c.Preds) != 1 {
-		return false
-	}
-
-	// move all of b's values to c.
-	for _, v := range b.Values {
-		v.Block = c
-		c.Values = append(c.Values, v)
-	}
-
-	// replace b->c edge with preds(b) -> c
-	c.predstorage[0] = Edge{}
-	if len(b.Preds) > len(b.predstorage) {
-		c.Preds = b.Preds
-	} else {
-		c.Preds = append(c.predstorage[:0], b.Preds...)
-	}
-	for i, e := range c.Preds {
-		p := e.b
-		p.Succs[e.i] = Edge{c, i}
-	}
-	f := b.Func
-	if f.Entry == b {
-		f.Entry = c
-	}
-	f.invalidateCFG()
-
-	// trash b, just in case
-	b.Kind = BlockInvalid
-	b.Values = nil
-	b.Preds = nil
-	b.Succs = nil
-	return true
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/fuse_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/fuse_test.go
deleted file mode 100644
index 6641fe5..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/fuse_test.go
+++ /dev/null
@@ -1,172 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/fuse_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/fuse_test.go:1
-package ssa
-
-import (
-	"fmt"
-	"strconv"
-	"testing"
-)
-
-func TestFuseEliminatesOneBranch(t *testing.T) {
-	ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
-	c := NewConfig("amd64", DummyFrontend{t}, nil, true)
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Valu("sb", OpSB, TypeInvalid, 0, nil),
-			Goto("checkPtr")),
-		Bloc("checkPtr",
-			Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
-			Valu("nilptr", OpConstNil, ptrType, 0, nil),
-			Valu("bool1", OpNeqPtr, TypeBool, 0, nil, "ptr1", "nilptr"),
-			If("bool1", "then", "exit")),
-		Bloc("then",
-			Goto("exit")),
-		Bloc("exit",
-			Exit("mem")))
-
-	CheckFunc(fun.f)
-	fuse(fun.f)
-
-	for _, b := range fun.f.Blocks {
-		if b == fun.blocks["then"] && b.Kind != BlockInvalid {
-			t.Errorf("then was not eliminated, but should have")
-		}
-	}
-}
-
-func TestFuseEliminatesBothBranches(t *testing.T) {
-	ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
-	c := NewConfig("amd64", DummyFrontend{t}, nil, true)
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Valu("sb", OpSB, TypeInvalid, 0, nil),
-			Goto("checkPtr")),
-		Bloc("checkPtr",
-			Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
-			Valu("nilptr", OpConstNil, ptrType, 0, nil),
-			Valu("bool1", OpNeqPtr, TypeBool, 0, nil, "ptr1", "nilptr"),
-			If("bool1", "then", "else")),
-		Bloc("then",
-			Goto("exit")),
-		Bloc("else",
-			Goto("exit")),
-		Bloc("exit",
-			Exit("mem")))
-
-	CheckFunc(fun.f)
-	fuse(fun.f)
-
-	for _, b := range fun.f.Blocks {
-		if b == fun.blocks["then"] && b.Kind != BlockInvalid {
-			t.Errorf("then was not eliminated, but should have")
-		}
-		if b == fun.blocks["else"] && b.Kind != BlockInvalid {
-			t.Errorf("then was not eliminated, but should have")
-		}
-	}
-}
-
-func TestFuseHandlesPhis(t *testing.T) {
-	ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
-	c := NewConfig("amd64", DummyFrontend{t}, nil, true)
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Valu("sb", OpSB, TypeInvalid, 0, nil),
-			Goto("checkPtr")),
-		Bloc("checkPtr",
-			Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
-			Valu("nilptr", OpConstNil, ptrType, 0, nil),
-			Valu("bool1", OpNeqPtr, TypeBool, 0, nil, "ptr1", "nilptr"),
-			If("bool1", "then", "else")),
-		Bloc("then",
-			Goto("exit")),
-		Bloc("else",
-			Goto("exit")),
-		Bloc("exit",
-			Valu("phi", OpPhi, ptrType, 0, nil, "ptr1", "ptr1"),
-			Exit("mem")))
-
-	CheckFunc(fun.f)
-	fuse(fun.f)
-
-	for _, b := range fun.f.Blocks {
-		if b == fun.blocks["then"] && b.Kind != BlockInvalid {
-			t.Errorf("then was not eliminated, but should have")
-		}
-		if b == fun.blocks["else"] && b.Kind != BlockInvalid {
-			t.Errorf("then was not eliminated, but should have")
-		}
-	}
-}
-
-func TestFuseEliminatesEmptyBlocks(t *testing.T) {
-	c := NewConfig("amd64", DummyFrontend{t}, nil, true)
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Valu("sb", OpSB, TypeInvalid, 0, nil),
-			Goto("z0")),
-		Bloc("z1",
-			Goto("z2")),
-		Bloc("z3",
-			Goto("exit")),
-		Bloc("z2",
-			Goto("z3")),
-		Bloc("z0",
-			Goto("z1")),
-		Bloc("exit",
-			Exit("mem"),
-		))
-
-	CheckFunc(fun.f)
-	fuse(fun.f)
-
-	for k, b := range fun.blocks {
-		if k[:1] == "z" && b.Kind != BlockInvalid {
-			t.Errorf("%s was not eliminated, but should have", k)
-		}
-	}
-}
-
-func BenchmarkFuse(b *testing.B) {
-	for _, n := range [...]int{1, 10, 100, 1000, 10000} {
-		b.Run(strconv.Itoa(n), func(b *testing.B) {
-			c := testConfig(b)
-
-			blocks := make([]bloc, 0, 2*n+3)
-			blocks = append(blocks,
-				Bloc("entry",
-					Valu("mem", OpInitMem, TypeMem, 0, nil),
-					Valu("cond", OpArg, TypeBool, 0, nil),
-					Valu("x", OpArg, TypeInt64, 0, nil),
-					Goto("exit")))
-
-			phiArgs := make([]string, 0, 2*n)
-			for i := 0; i < n; i++ {
-				cname := fmt.Sprintf("c%d", i)
-				blocks = append(blocks,
-					Bloc(fmt.Sprintf("b%d", i), If("cond", cname, "merge")),
-					Bloc(cname, Goto("merge")))
-				phiArgs = append(phiArgs, "x", "x")
-			}
-			blocks = append(blocks,
-				Bloc("merge",
-					Valu("phi", OpPhi, TypeMem, 0, nil, phiArgs...),
-					Goto("exit")),
-				Bloc("exit",
-					Exit("mem")))
-
-			b.ResetTimer()
-			for i := 0; i < b.N; i++ {
-				fun := Fun(c, "entry", blocks...)
-				fuse(fun.f)
-				fun.f.Free()
-			}
-		})
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/html.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/html.go
deleted file mode 100644
index addb4bf..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/html.go
+++ /dev/null
@@ -1,477 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/html.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/html.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import (
-	"bytes"
-	"fmt"
-	"html"
-	"io"
-	"os"
-)
-
-type HTMLWriter struct {
-	Logger
-	*os.File
-}
-
-func NewHTMLWriter(path string, logger Logger, funcname string) *HTMLWriter {
-	out, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
-	if err != nil {
-		logger.Fatalf(0, "%v", err)
-	}
-	html := HTMLWriter{File: out, Logger: logger}
-	html.start(funcname)
-	return &html
-}
-
-func (w *HTMLWriter) start(name string) {
-	if w == nil {
-		return
-	}
-	w.WriteString("<html>")
-	w.WriteString(`<head>
-<meta http-equiv="Content-Type" content="text/html;charset=UTF-8">
-<style>
-
-#helplink {
-    margin-bottom: 15px;
-    display: block;
-    margin-top: -15px;
-}
-
-#help {
-    display: none;
-}
-
-.stats {
-	font-size: 60%;
-}
-
-table {
-    border: 1px solid black;
-    table-layout: fixed;
-    width: 300px;
-}
-
-th, td {
-    border: 1px solid black;
-    overflow: hidden;
-    width: 400px;
-    vertical-align: top;
-    padding: 5px;
-}
-
-li {
-    list-style-type: none;
-}
-
-li.ssa-long-value {
-    text-indent: -2em;  /* indent wrapped lines */
-}
-
-li.ssa-value-list {
-    display: inline;
-}
-
-li.ssa-start-block {
-    padding: 0;
-    margin: 0;
-}
-
-li.ssa-end-block {
-    padding: 0;
-    margin: 0;
-}
-
-ul.ssa-print-func {
-    padding-left: 0;
-}
-
-dl.ssa-gen {
-    padding-left: 0;
-}
-
-dt.ssa-prog-src {
-    padding: 0;
-    margin: 0;
-    float: left;
-    width: 4em;
-}
-
-dd.ssa-prog {
-    padding: 0;
-    margin-right: 0;
-    margin-left: 4em;
-}
-
-.dead-value {
-    color: gray;
-}
-
-.dead-block {
-    opacity: 0.5;
-}
-
-.depcycle {
-    font-style: italic;
-}
-
-.highlight-yellow         { background-color: yellow; }
-.highlight-aquamarine     { background-color: aquamarine; }
-.highlight-coral          { background-color: coral; }
-.highlight-lightpink      { background-color: lightpink; }
-.highlight-lightsteelblue { background-color: lightsteelblue; }
-.highlight-palegreen      { background-color: palegreen; }
-.highlight-powderblue     { background-color: powderblue; }
-.highlight-lightgray      { background-color: lightgray; }
-
-.outline-blue           { outline: blue solid 2px; }
-.outline-red            { outline: red solid 2px; }
-.outline-blueviolet     { outline: blueviolet solid 2px; }
-.outline-darkolivegreen { outline: darkolivegreen solid 2px; }
-.outline-fuchsia        { outline: fuchsia solid 2px; }
-.outline-sienna         { outline: sienna solid 2px; }
-.outline-gold           { outline: gold solid 2px; }
-
-</style>
-
-<script type="text/javascript">
-// ordered list of all available highlight colors
-var highlights = [
-    "highlight-yellow",
-    "highlight-aquamarine",
-    "highlight-coral",
-    "highlight-lightpink",
-    "highlight-lightsteelblue",
-    "highlight-palegreen",
-    "highlight-lightgray"
-];
-
-// state: which value is highlighted this color?
-var highlighted = {};
-for (var i = 0; i < highlights.length; i++) {
-    highlighted[highlights[i]] = "";
-}
-
-// ordered list of all available outline colors
-var outlines = [
-    "outline-blue",
-    "outline-red",
-    "outline-blueviolet",
-    "outline-darkolivegreen",
-    "outline-fuchsia",
-    "outline-sienna",
-    "outline-gold"
-];
-
-// state: which value is outlined this color?
-var outlined = {};
-for (var i = 0; i < outlines.length; i++) {
-    outlined[outlines[i]] = "";
-}
-
-window.onload = function() {
-    var ssaElemClicked = function(elem, event, selections, selected) {
-        event.stopPropagation()
-
-        // TODO: pushState with updated state and read it on page load,
-        // so that state can survive across reloads
-
-        // find all values with the same name
-        var c = elem.classList.item(0);
-        var x = document.getElementsByClassName(c);
-
-        // if selected, remove selections from all of them
-        // otherwise, attempt to add
-
-        var remove = "";
-        for (var i = 0; i < selections.length; i++) {
-            var color = selections[i];
-            if (selected[color] == c) {
-                remove = color;
-                break;
-            }
-        }
-
-        if (remove != "") {
-            for (var i = 0; i < x.length; i++) {
-                x[i].classList.remove(remove);
-            }
-            selected[remove] = "";
-            return;
-        }
-
-        // we're adding a selection
-        // find first available color
-        var avail = "";
-        for (var i = 0; i < selections.length; i++) {
-            var color = selections[i];
-            if (selected[color] == "") {
-                avail = color;
-                break;
-            }
-        }
-        if (avail == "") {
-            alert("out of selection colors; go add more");
-            return;
-        }
-
-        // set that as the selection
-        for (var i = 0; i < x.length; i++) {
-            x[i].classList.add(avail);
-        }
-        selected[avail] = c;
-    };
-
-    var ssaValueClicked = function(event) {
-        ssaElemClicked(this, event, highlights, highlighted);
-    }
-
-    var ssaBlockClicked = function(event) {
-        ssaElemClicked(this, event, outlines, outlined);
-    }
-
-    var ssavalues = document.getElementsByClassName("ssa-value");
-    for (var i = 0; i < ssavalues.length; i++) {
-        ssavalues[i].addEventListener('click', ssaValueClicked);
-    }
-
-    var ssalongvalues = document.getElementsByClassName("ssa-long-value");
-    for (var i = 0; i < ssalongvalues.length; i++) {
-        // don't attach listeners to li nodes, just the spans they contain
-        if (ssalongvalues[i].nodeName == "SPAN") {
-            ssalongvalues[i].addEventListener('click', ssaValueClicked);
-        }
-    }
-
-    var ssablocks = document.getElementsByClassName("ssa-block");
-    for (var i = 0; i < ssablocks.length; i++) {
-        ssablocks[i].addEventListener('click', ssaBlockClicked);
-    }
-};
-
-function toggle_visibility(id) {
-   var e = document.getElementById(id);
-   if(e.style.display == 'block')
-      e.style.display = 'none';
-   else
-      e.style.display = 'block';
-}
-</script>
-
-</head>`)
-	// TODO: Add javascript click handlers for blocks
-	// to outline that block across all phases
-	w.WriteString("<body>")
-	w.WriteString("<h1>")
-	w.WriteString(html.EscapeString(name))
-	w.WriteString("</h1>")
-	w.WriteString(`
-<a href="#" onclick="toggle_visibility('help');" id="helplink">help</a>
-<div id="help">
-
-<p>
-Click on a value or block to toggle highlighting of that value/block
-and its uses.  (Values and blocks are highlighted by ID, and IDs of
-dead items may be reused, so not all highlights necessarily correspond
-to the clicked item.)
-</p>
-
-<p>
-Faded out values and blocks are dead code that has not been eliminated.
-</p>
-
-<p>
-Values printed in italics have a dependency cycle.
-</p>
-
-</div>
-`)
-	w.WriteString("<table>")
-	w.WriteString("<tr>")
-}
-
-func (w *HTMLWriter) Close() {
-	if w == nil {
-		return
-	}
-	w.WriteString("</tr>")
-	w.WriteString("</table>")
-	w.WriteString("</body>")
-	w.WriteString("</html>")
-	w.File.Close()
-}
-
-// WriteFunc writes f in a column headed by title.
-func (w *HTMLWriter) WriteFunc(title string, f *Func) {
-	if w == nil {
-		return // avoid generating HTML just to discard it
-	}
-	w.WriteColumn(title, f.HTML())
-	// TODO: Add visual representation of f's CFG.
-}
-
-// WriteColumn writes raw HTML in a column headed by title.
-// It is intended for pre- and post-compilation log output.
-func (w *HTMLWriter) WriteColumn(title string, html string) {
-	if w == nil {
-		return
-	}
-	w.WriteString("<td>")
-	w.WriteString("<h2>" + title + "</h2>")
-	w.WriteString(html)
-	w.WriteString("</td>")
-}
-
-func (w *HTMLWriter) Printf(msg string, v ...interface{}) {
-	if _, err := fmt.Fprintf(w.File, msg, v...); err != nil {
-		w.Fatalf(0, "%v", err)
-	}
-}
-
-func (w *HTMLWriter) WriteString(s string) {
-	if _, err := w.File.WriteString(s); err != nil {
-		w.Fatalf(0, "%v", err)
-	}
-}
-
-func (v *Value) HTML() string {
-	// TODO: Using the value ID as the class ignores the fact
-	// that value IDs get recycled and that some values
-	// are transmuted into other values.
-	s := v.String()
-	return fmt.Sprintf("<span class=\"%s ssa-value\">%s</span>", s, s)
-}
-
-func (v *Value) LongHTML() string {
-	// TODO: Any intra-value formatting?
-	// I'm wary of adding too much visual noise,
-	// but a little bit might be valuable.
-	// We already have visual noise in the form of punctuation
-	// maybe we could replace some of that with formatting.
-	s := fmt.Sprintf("<span class=\"%s ssa-long-value\">", v.String())
-	s += fmt.Sprintf("%s = %s", v.HTML(), v.Op.String())
-	s += " &lt;" + html.EscapeString(v.Type.String()) + "&gt;"
-	s += html.EscapeString(v.auxString())
-	for _, a := range v.Args {
-		s += fmt.Sprintf(" %s", a.HTML())
-	}
-	r := v.Block.Func.RegAlloc
-	if int(v.ID) < len(r) && r[v.ID] != nil {
-		s += " : " + html.EscapeString(r[v.ID].Name())
-	}
-	s += "</span>"
-	return s
-}
-
-func (b *Block) HTML() string {
-	// TODO: Using the value ID as the class ignores the fact
-	// that value IDs get recycled and that some values
-	// are transmuted into other values.
-	s := html.EscapeString(b.String())
-	return fmt.Sprintf("<span class=\"%s ssa-block\">%s</span>", s, s)
-}
-
-func (b *Block) LongHTML() string {
-	// TODO: improve this for HTML?
-	s := fmt.Sprintf("<span class=\"%s ssa-block\">%s</span>", html.EscapeString(b.String()), html.EscapeString(b.Kind.String()))
-	if b.Aux != nil {
-		s += html.EscapeString(fmt.Sprintf(" {%v}", b.Aux))
-	}
-	if b.Control != nil {
-		s += fmt.Sprintf(" %s", b.Control.HTML())
-	}
-	if len(b.Succs) > 0 {
-		s += " &#8594;" // right arrow
-		for _, e := range b.Succs {
-			c := e.b
-			s += " " + c.HTML()
-		}
-	}
-	switch b.Likely {
-	case BranchUnlikely:
-		s += " (unlikely)"
-	case BranchLikely:
-		s += " (likely)"
-	}
-	return s
-}
-
-func (f *Func) HTML() string {
-	var buf bytes.Buffer
-	fmt.Fprint(&buf, "<code>")
-	p := htmlFuncPrinter{w: &buf}
-	fprintFunc(p, f)
-
-	// fprintFunc(&buf, f) // TODO: HTML, not text, <br /> for line breaks, etc.
-	fmt.Fprint(&buf, "</code>")
-	return buf.String()
-}
-
-type htmlFuncPrinter struct {
-	w io.Writer
-}
-
-func (p htmlFuncPrinter) header(f *Func) {}
-
-func (p htmlFuncPrinter) startBlock(b *Block, reachable bool) {
-	// TODO: Make blocks collapsable?
-	var dead string
-	if !reachable {
-		dead = "dead-block"
-	}
-	fmt.Fprintf(p.w, "<ul class=\"%s ssa-print-func %s\">", b, dead)
-	fmt.Fprintf(p.w, "<li class=\"ssa-start-block\">%s:", b.HTML())
-	if len(b.Preds) > 0 {
-		io.WriteString(p.w, " &#8592;") // left arrow
-		for _, e := range b.Preds {
-			pred := e.b
-			fmt.Fprintf(p.w, " %s", pred.HTML())
-		}
-	}
-	io.WriteString(p.w, "</li>")
-	if len(b.Values) > 0 { // start list of values
-		io.WriteString(p.w, "<li class=\"ssa-value-list\">")
-		io.WriteString(p.w, "<ul>")
-	}
-}
-
-func (p htmlFuncPrinter) endBlock(b *Block) {
-	if len(b.Values) > 0 { // end list of values
-		io.WriteString(p.w, "</ul>")
-		io.WriteString(p.w, "</li>")
-	}
-	io.WriteString(p.w, "<li class=\"ssa-end-block\">")
-	fmt.Fprint(p.w, b.LongHTML())
-	io.WriteString(p.w, "</li>")
-	io.WriteString(p.w, "</ul>")
-	// io.WriteString(p.w, "</span>")
-}
-
-func (p htmlFuncPrinter) value(v *Value, live bool) {
-	var dead string
-	if !live {
-		dead = "dead-value"
-	}
-	fmt.Fprintf(p.w, "<li class=\"ssa-long-value %s\">", dead)
-	fmt.Fprint(p.w, v.LongHTML())
-	io.WriteString(p.w, "</li>")
-}
-
-func (p htmlFuncPrinter) startDepCycle() {
-	fmt.Fprintln(p.w, "<span class=\"depcycle\">")
-}
-
-func (p htmlFuncPrinter) endDepCycle() {
-	fmt.Fprintln(p.w, "</span>")
-}
-
-func (p htmlFuncPrinter) named(n LocalSlot, vals []*Value) {
-	// TODO
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/id.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/id.go
deleted file mode 100644
index 4a66b8c..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/id.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/id.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/id.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-type ID int32
-
-// idAlloc provides an allocator for unique integers.
-type idAlloc struct {
-	last ID
-}
-
-// get allocates an ID and returns it. IDs are always > 0.
-func (a *idAlloc) get() ID {
-	x := a.last
-	x++
-	if x == 1<<31-1 {
-		panic("too many ids for this function")
-	}
-	a.last = x
-	return x
-}
-
-// num returns the maximum ID ever returned + 1.
-func (a *idAlloc) num() int {
-	return int(a.last + 1)
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/layout.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/layout.go
deleted file mode 100644
index 9352db3..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/layout.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/layout.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/layout.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-// layout orders basic blocks in f with the goal of minimizing control flow instructions.
-// After this phase returns, the order of f.Blocks matters and is the order
-// in which those blocks will appear in the assembly output.
-func layout(f *Func) {
-	order := make([]*Block, 0, f.NumBlocks())
-	scheduled := make([]bool, f.NumBlocks())
-	idToBlock := make([]*Block, f.NumBlocks())
-	indegree := make([]int, f.NumBlocks())
-	posdegree := f.newSparseSet(f.NumBlocks()) // blocks with positive remaining degree
-	defer f.retSparseSet(posdegree)
-	zerodegree := f.newSparseSet(f.NumBlocks()) // blocks with zero remaining degree
-	defer f.retSparseSet(zerodegree)
-
-	// Initialize indegree of each block
-	for _, b := range f.Blocks {
-		idToBlock[b.ID] = b
-		indegree[b.ID] = len(b.Preds)
-		if len(b.Preds) == 0 {
-			zerodegree.add(b.ID)
-		} else {
-			posdegree.add(b.ID)
-		}
-	}
-
-	bid := f.Entry.ID
-blockloop:
-	for {
-		// add block to schedule
-		b := idToBlock[bid]
-		order = append(order, b)
-		scheduled[bid] = true
-		if len(order) == len(f.Blocks) {
-			break
-		}
-
-		for _, e := range b.Succs {
-			c := e.b
-			indegree[c.ID]--
-			if indegree[c.ID] == 0 {
-				posdegree.remove(c.ID)
-				zerodegree.add(c.ID)
-			}
-		}
-
-		// Pick the next block to schedule
-		// Pick among the successor blocks that have not been scheduled yet.
-
-		// Use likely direction if we have it.
-		var likely *Block
-		switch b.Likely {
-		case BranchLikely:
-			likely = b.Succs[0].b
-		case BranchUnlikely:
-			likely = b.Succs[1].b
-		}
-		if likely != nil && !scheduled[likely.ID] {
-			bid = likely.ID
-			continue
-		}
-
-		// Use degree for now.
-		bid = 0
-		mindegree := f.NumBlocks()
-		for _, e := range order[len(order)-1].Succs {
-			c := e.b
-			if scheduled[c.ID] {
-				continue
-			}
-			if indegree[c.ID] < mindegree {
-				mindegree = indegree[c.ID]
-				bid = c.ID
-			}
-		}
-		if bid != 0 {
-			continue
-		}
-		// TODO: improve this part
-		// No successor of the previously scheduled block works.
-		// Pick a zero-degree block if we can.
-		for zerodegree.size() > 0 {
-			cid := zerodegree.pop()
-			if !scheduled[cid] {
-				bid = cid
-				continue blockloop
-			}
-		}
-		// Still nothing, pick any block.
-		for {
-			cid := posdegree.pop()
-			if !scheduled[cid] {
-				bid = cid
-				continue blockloop
-			}
-		}
-	}
-	f.Blocks = order
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/lca.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/lca.go
deleted file mode 100644
index ba264ea..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/lca.go
+++ /dev/null
@@ -1,126 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/lca.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/lca.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-// Code to compute lowest common ancestors in the dominator tree.
-// https://en.wikipedia.org/wiki/Lowest_common_ancestor
-// https://en.wikipedia.org/wiki/Range_minimum_query#Solution_using_constant_time_and_linearithmic_space
-
-// lcaRange is a data structure that can compute lowest common ancestor queries
-// in O(n lg n) precomputed space and O(1) time per query.
-type lcaRange struct {
-	// Additional information about each block (indexed by block ID).
-	blocks []lcaRangeBlock
-
-	// Data structure for range minimum queries.
-	// rangeMin[k][i] contains the ID of the minimum depth block
-	// in the Euler tour from positions i to i+1<<k-1, inclusive.
-	rangeMin [][]ID
-}
-
-type lcaRangeBlock struct {
-	b          *Block
-	parent     ID    // parent in dominator tree.  0 = no parent (entry or unreachable)
-	firstChild ID    // first child in dominator tree
-	sibling    ID    // next child of parent
-	pos        int32 // an index in the Euler tour where this block appears (any one of its occurrences)
-	depth      int32 // depth in dominator tree (root=0, its children=1, etc.)
-}
-
-func makeLCArange(f *Func) *lcaRange {
-	dom := f.Idom()
-
-	// Build tree
-	blocks := make([]lcaRangeBlock, f.NumBlocks())
-	for _, b := range f.Blocks {
-		blocks[b.ID].b = b
-		if dom[b.ID] == nil {
-			continue // entry or unreachable
-		}
-		parent := dom[b.ID].ID
-		blocks[b.ID].parent = parent
-		blocks[b.ID].sibling = blocks[parent].firstChild
-		blocks[parent].firstChild = b.ID
-	}
-
-	// Compute euler tour ordering.
-	// Each reachable block will appear #children+1 times in the tour.
-	tour := make([]ID, 0, f.NumBlocks()*2-1)
-	type queueEntry struct {
-		bid ID // block to work on
-		cid ID // child we're already working on (0 = haven't started yet)
-	}
-	q := []queueEntry{{f.Entry.ID, 0}}
-	for len(q) > 0 {
-		n := len(q) - 1
-		bid := q[n].bid
-		cid := q[n].cid
-		q = q[:n]
-
-		// Add block to tour.
-		blocks[bid].pos = int32(len(tour))
-		tour = append(tour, bid)
-
-		// Proceed down next child edge (if any).
-		if cid == 0 {
-			// This is our first visit to b. Set its depth.
-			blocks[bid].depth = blocks[blocks[bid].parent].depth + 1
-			// Then explore its first child.
-			cid = blocks[bid].firstChild
-		} else {
-			// We've seen b before. Explore the next child.
-			cid = blocks[cid].sibling
-		}
-		if cid != 0 {
-			q = append(q, queueEntry{bid, cid}, queueEntry{cid, 0})
-		}
-	}
-
-	// Compute fast range-minimum query data structure
-	var rangeMin [][]ID
-	rangeMin = append(rangeMin, tour) // 1-size windows are just the tour itself.
-	for logS, s := 1, 2; s < len(tour); logS, s = logS+1, s*2 {
-		r := make([]ID, len(tour)-s+1)
-		for i := 0; i < len(tour)-s+1; i++ {
-			bid := rangeMin[logS-1][i]
-			bid2 := rangeMin[logS-1][i+s/2]
-			if blocks[bid2].depth < blocks[bid].depth {
-				bid = bid2
-			}
-			r[i] = bid
-		}
-		rangeMin = append(rangeMin, r)
-	}
-
-	return &lcaRange{blocks: blocks, rangeMin: rangeMin}
-}
-
-// find returns the lowest common ancestor of a and b.
-func (lca *lcaRange) find(a, b *Block) *Block {
-	if a == b {
-		return a
-	}
-	// Find the positions of a and bin the Euler tour.
-	p1 := lca.blocks[a.ID].pos
-	p2 := lca.blocks[b.ID].pos
-	if p1 > p2 {
-		p1, p2 = p2, p1
-	}
-
-	// The lowest common ancestor is the minimum depth block
-	// on the tour from p1 to p2.  We've precomputed minimum
-	// depth blocks for powers-of-two subsequences of the tour.
-	// Combine the right two precomputed values to get the answer.
-	logS := uint(log2(int64(p2 - p1)))
-	bid1 := lca.rangeMin[logS][p1]
-	bid2 := lca.rangeMin[logS][p2-1<<logS+1]
-	if lca.blocks[bid1].depth < lca.blocks[bid2].depth {
-		return lca.blocks[bid1].b
-	}
-	return lca.blocks[bid2].b
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/lca_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/lca_test.go
deleted file mode 100644
index db17b7f..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/lca_test.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/lca_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/lca_test.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import "testing"
-
-type lca interface {
-	find(a, b *Block) *Block
-}
-
-func lcaEqual(f *Func, lca1, lca2 lca) bool {
-	for _, b := range f.Blocks {
-		for _, c := range f.Blocks {
-			if lca1.find(b, c) != lca2.find(b, c) {
-				return false
-			}
-		}
-	}
-	return true
-}
-
-func testLCAgen(t *testing.T, bg blockGen, size int) {
-	c := NewConfig("amd64", DummyFrontend{t}, nil, true)
-	fun := Fun(c, "entry", bg(size)...)
-	CheckFunc(fun.f)
-	if size == 4 {
-		t.Logf(fun.f.String())
-	}
-	lca1 := makeLCArange(fun.f)
-	lca2 := makeLCAeasy(fun.f)
-	for _, b := range fun.f.Blocks {
-		for _, c := range fun.f.Blocks {
-			l1 := lca1.find(b, c)
-			l2 := lca2.find(b, c)
-			if l1 != l2 {
-				t.Errorf("lca(%s,%s)=%s, want %s", b, c, l1, l2)
-			}
-		}
-	}
-}
-
-func TestLCALinear(t *testing.T) {
-	testLCAgen(t, genLinear, 10)
-	testLCAgen(t, genLinear, 100)
-}
-
-func TestLCAFwdBack(t *testing.T) {
-	testLCAgen(t, genFwdBack, 10)
-	testLCAgen(t, genFwdBack, 100)
-}
-
-func TestLCAManyPred(t *testing.T) {
-	testLCAgen(t, genManyPred, 10)
-	testLCAgen(t, genManyPred, 100)
-}
-
-func TestLCAMaxPred(t *testing.T) {
-	testLCAgen(t, genMaxPred, 10)
-	testLCAgen(t, genMaxPred, 100)
-}
-
-func TestLCAMaxPredValue(t *testing.T) {
-	testLCAgen(t, genMaxPredValue, 10)
-	testLCAgen(t, genMaxPredValue, 100)
-}
-
-// Simple implementation of LCA to compare against.
-type lcaEasy struct {
-	parent []*Block
-}
-
-func makeLCAeasy(f *Func) *lcaEasy {
-	return &lcaEasy{parent: dominators(f)}
-}
-
-func (lca *lcaEasy) find(a, b *Block) *Block {
-	da := lca.depth(a)
-	db := lca.depth(b)
-	for da > db {
-		da--
-		a = lca.parent[a.ID]
-	}
-	for da < db {
-		db--
-		b = lca.parent[b.ID]
-	}
-	for a != b {
-		a = lca.parent[a.ID]
-		b = lca.parent[b.ID]
-	}
-	return a
-}
-
-func (lca *lcaEasy) depth(b *Block) int {
-	n := 0
-	for b != nil {
-		b = lca.parent[b.ID]
-		n++
-	}
-	return n
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/likelyadjust.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/likelyadjust.go
deleted file mode 100644
index cfe5275..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/likelyadjust.go
+++ /dev/null
@@ -1,465 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/likelyadjust.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/likelyadjust.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import (
-	"fmt"
-)
-
-type loop struct {
-	header *Block // The header node of this (reducible) loop
-	outer  *loop  // loop containing this loop
-
-	// By default, children exits, and depth are not initialized.
-	children []*loop  // loops nested directly within this loop. Initialized by assembleChildren().
-	exits    []*Block // exits records blocks reached by exits from this loop. Initialized by findExits().
-
-	// Loops aren't that common, so rather than force regalloc to keep
-	// a map or slice for its data, just put it here.
-	spills  []*Value
-	scratch int32
-
-	// Next three fields used by regalloc and/or
-	// aid in computation of inner-ness and list of blocks.
-	nBlocks int32 // Number of blocks in this loop but not within inner loops
-	depth   int16 // Nesting depth of the loop; 1 is outermost. Initialized by calculateDepths().
-	isInner bool  // True if never discovered to contain a loop
-
-	// register allocation uses this.
-	containsCall bool // if any block in this loop or any loop it contains has a call
-}
-
-// outerinner records that outer contains inner
-func (sdom SparseTree) outerinner(outer, inner *loop) {
-	oldouter := inner.outer
-	if oldouter == nil || sdom.isAncestorEq(oldouter.header, outer.header) {
-		inner.outer = outer
-		outer.isInner = false
-		if inner.containsCall {
-			outer.setContainsCall()
-		}
-	}
-}
-
-func (l *loop) setContainsCall() {
-	for ; l != nil && !l.containsCall; l = l.outer {
-		l.containsCall = true
-	}
-
-}
-func (l *loop) checkContainsCall(bb *Block) {
-	if bb.Kind == BlockDefer {
-		l.setContainsCall()
-		return
-	}
-	for _, v := range bb.Values {
-		if opcodeTable[v.Op].call {
-			l.setContainsCall()
-			return
-		}
-	}
-}
-
-type loopnest struct {
-	f     *Func
-	b2l   []*loop
-	po    []*Block
-	sdom  SparseTree
-	loops []*loop
-
-	// Record which of the lazily initialized fields have actually been initialized.
-	initializedChildren, initializedDepth, initializedExits bool
-}
-
-func min8(a, b int8) int8 {
-	if a < b {
-		return a
-	}
-	return b
-}
-
-func max8(a, b int8) int8 {
-	if a > b {
-		return a
-	}
-	return b
-}
-
-const (
-	blDEFAULT = 0
-	blMin     = blDEFAULT
-	blCALL    = 1
-	blRET     = 2
-	blEXIT    = 3
-)
-
-var bllikelies [4]string = [4]string{"default", "call", "ret", "exit"}
-
-func describePredictionAgrees(b *Block, prediction BranchPrediction) string {
-	s := ""
-	if prediction == b.Likely {
-		s = " (agrees with previous)"
-	} else if b.Likely != BranchUnknown {
-		s = " (disagrees with previous, ignored)"
-	}
-	return s
-}
-
-func describeBranchPrediction(f *Func, b *Block, likely, not int8, prediction BranchPrediction) {
-	f.Config.Warnl(b.Line, "Branch prediction rule %s < %s%s",
-		bllikelies[likely-blMin], bllikelies[not-blMin], describePredictionAgrees(b, prediction))
-}
-
-func likelyadjust(f *Func) {
-	// The values assigned to certain and local only matter
-	// in their rank order.  0 is default, more positive
-	// is less likely. It's possible to assign a negative
-	// unlikeliness (though not currently the case).
-	certain := make([]int8, f.NumBlocks()) // In the long run, all outcomes are at least this bad. Mainly for Exit
-	local := make([]int8, f.NumBlocks())   // for our immediate predecessors.
-
-	po := f.postorder()
-	nest := f.loopnest()
-	b2l := nest.b2l
-
-	for _, b := range po {
-		switch b.Kind {
-		case BlockExit:
-			// Very unlikely.
-			local[b.ID] = blEXIT
-			certain[b.ID] = blEXIT
-
-			// Ret, it depends.
-		case BlockRet, BlockRetJmp:
-			local[b.ID] = blRET
-			certain[b.ID] = blRET
-
-			// Calls. TODO not all calls are equal, names give useful clues.
-			// Any name-based heuristics are only relative to other calls,
-			// and less influential than inferences from loop structure.
-		case BlockDefer:
-			local[b.ID] = blCALL
-			certain[b.ID] = max8(blCALL, certain[b.Succs[0].b.ID])
-
-		default:
-			if len(b.Succs) == 1 {
-				certain[b.ID] = certain[b.Succs[0].b.ID]
-			} else if len(b.Succs) == 2 {
-				// If successor is an unvisited backedge, it's in loop and we don't care.
-				// Its default unlikely is also zero which is consistent with favoring loop edges.
-				// Notice that this can act like a "reset" on unlikeliness at loops; the
-				// default "everything returns" unlikeliness is erased by min with the
-				// backedge likeliness; however a loop with calls on every path will be
-				// tagged with call cost. Net effect is that loop entry is favored.
-				b0 := b.Succs[0].b.ID
-				b1 := b.Succs[1].b.ID
-				certain[b.ID] = min8(certain[b0], certain[b1])
-
-				l := b2l[b.ID]
-				l0 := b2l[b0]
-				l1 := b2l[b1]
-
-				prediction := b.Likely
-				// Weak loop heuristic -- both source and at least one dest are in loops,
-				// and there is a difference in the destinations.
-				// TODO what is best arrangement for nested loops?
-				if l != nil && l0 != l1 {
-					noprediction := false
-					switch {
-					// prefer not to exit loops
-					case l1 == nil:
-						prediction = BranchLikely
-					case l0 == nil:
-						prediction = BranchUnlikely
-
-						// prefer to stay in loop, not exit to outer.
-					case l == l0:
-						prediction = BranchLikely
-					case l == l1:
-						prediction = BranchUnlikely
-					default:
-						noprediction = true
-					}
-					if f.pass.debug > 0 && !noprediction {
-						f.Config.Warnl(b.Line, "Branch prediction rule stay in loop%s",
-							describePredictionAgrees(b, prediction))
-					}
-
-				} else {
-					// Lacking loop structure, fall back on heuristics.
-					if certain[b1] > certain[b0] {
-						prediction = BranchLikely
-						if f.pass.debug > 0 {
-							describeBranchPrediction(f, b, certain[b0], certain[b1], prediction)
-						}
-					} else if certain[b0] > certain[b1] {
-						prediction = BranchUnlikely
-						if f.pass.debug > 0 {
-							describeBranchPrediction(f, b, certain[b1], certain[b0], prediction)
-						}
-					} else if local[b1] > local[b0] {
-						prediction = BranchLikely
-						if f.pass.debug > 0 {
-							describeBranchPrediction(f, b, local[b0], local[b1], prediction)
-						}
-					} else if local[b0] > local[b1] {
-						prediction = BranchUnlikely
-						if f.pass.debug > 0 {
-							describeBranchPrediction(f, b, local[b1], local[b0], prediction)
-						}
-					}
-				}
-				if b.Likely != prediction {
-					if b.Likely == BranchUnknown {
-						b.Likely = prediction
-					}
-				}
-			}
-			// Look for calls in the block.  If there is one, make this block unlikely.
-			for _, v := range b.Values {
-				if opcodeTable[v.Op].call {
-					local[b.ID] = blCALL
-					certain[b.ID] = max8(blCALL, certain[b.Succs[0].b.ID])
-				}
-			}
-		}
-		if f.pass.debug > 2 {
-			f.Config.Warnl(b.Line, "BP: Block %s, local=%s, certain=%s", b, bllikelies[local[b.ID]-blMin], bllikelies[certain[b.ID]-blMin])
-		}
-
-	}
-}
-
-func (l *loop) String() string {
-	return fmt.Sprintf("hdr:%s", l.header)
-}
-
-func (l *loop) LongString() string {
-	i := ""
-	o := ""
-	if l.isInner {
-		i = ", INNER"
-	}
-	if l.outer != nil {
-		o = ", o=" + l.outer.header.String()
-	}
-	return fmt.Sprintf("hdr:%s%s%s", l.header, i, o)
-}
-
-// nearestOuterLoop returns the outer loop of loop most nearly
-// containing block b; the header must dominate b.  loop itself
-// is assumed to not be that loop. For acceptable performance,
-// we're relying on loop nests to not be terribly deep.
-func (l *loop) nearestOuterLoop(sdom SparseTree, b *Block) *loop {
-	var o *loop
-	for o = l.outer; o != nil && !sdom.isAncestorEq(o.header, b); o = o.outer {
-	}
-	return o
-}
-
-func loopnestfor(f *Func) *loopnest {
-	po := f.postorder()
-	sdom := f.sdom()
-	b2l := make([]*loop, f.NumBlocks())
-	loops := make([]*loop, 0)
-
-	// Reducible-loop-nest-finding.
-	for _, b := range po {
-		if f.pass.debug > 3 {
-			fmt.Printf("loop finding (0) at %s\n", b)
-		}
-
-		var innermost *loop // innermost header reachable from this block
-
-		// IF any successor s of b is in a loop headed by h
-		// AND h dominates b
-		// THEN b is in the loop headed by h.
-		//
-		// Choose the first/innermost such h.
-		//
-		// IF s itself dominates b, the s is a loop header;
-		// and there may be more than one such s.
-		// Since there's at most 2 successors, the inner/outer ordering
-		// between them can be established with simple comparisons.
-		for _, e := range b.Succs {
-			bb := e.b
-			l := b2l[bb.ID]
-
-			if sdom.isAncestorEq(bb, b) { // Found a loop header
-				if l == nil {
-					l = &loop{header: bb, isInner: true}
-					loops = append(loops, l)
-					b2l[bb.ID] = l
-					l.checkContainsCall(bb)
-				}
-			} else { // Perhaps a loop header is inherited.
-				// is there any loop containing our successor whose
-				// header dominates b?
-				if l != nil && !sdom.isAncestorEq(l.header, b) {
-					l = l.nearestOuterLoop(sdom, b)
-				}
-			}
-
-			if l == nil || innermost == l {
-				continue
-			}
-
-			if innermost == nil {
-				innermost = l
-				continue
-			}
-
-			if sdom.isAncestor(innermost.header, l.header) {
-				sdom.outerinner(innermost, l)
-				innermost = l
-			} else if sdom.isAncestor(l.header, innermost.header) {
-				sdom.outerinner(l, innermost)
-			}
-		}
-
-		if innermost != nil {
-			b2l[b.ID] = innermost
-			innermost.checkContainsCall(b)
-			innermost.nBlocks++
-		}
-	}
-
-	ln := &loopnest{f: f, b2l: b2l, po: po, sdom: sdom, loops: loops}
-
-	// Curious about the loopiness? "-d=ssa/likelyadjust/stats"
-	if f.pass.stats > 0 && len(loops) > 0 {
-		ln.assembleChildren()
-		ln.calculateDepths()
-		ln.findExits()
-
-		// Note stats for non-innermost loops are slightly flawed because
-		// they don't account for inner loop exits that span multiple levels.
-
-		for _, l := range loops {
-			x := len(l.exits)
-			cf := 0
-			if !l.containsCall {
-				cf = 1
-			}
-			inner := 0
-			if l.isInner {
-				inner++
-			}
-
-			f.LogStat("loopstats:",
-				l.depth, "depth", x, "exits",
-				inner, "is_inner", cf, "is_callfree", l.nBlocks, "n_blocks")
-		}
-	}
-
-	if f.pass.debug > 1 && len(loops) > 0 {
-		fmt.Printf("Loops in %s:\n", f.Name)
-		for _, l := range loops {
-			fmt.Printf("%s, b=", l.LongString())
-			for _, b := range f.Blocks {
-				if b2l[b.ID] == l {
-					fmt.Printf(" %s", b)
-				}
-			}
-			fmt.Print("\n")
-		}
-		fmt.Printf("Nonloop blocks in %s:", f.Name)
-		for _, b := range f.Blocks {
-			if b2l[b.ID] == nil {
-				fmt.Printf(" %s", b)
-			}
-		}
-		fmt.Print("\n")
-	}
-	return ln
-}
-
-// assembleChildren initializes the children field of each
-// loop in the nest.  Loop A is a child of loop B if A is
-// directly nested within B (based on the reducible-loops
-// detection above)
-func (ln *loopnest) assembleChildren() {
-	if ln.initializedChildren {
-		return
-	}
-	for _, l := range ln.loops {
-		if l.outer != nil {
-			l.outer.children = append(l.outer.children, l)
-		}
-	}
-	ln.initializedChildren = true
-}
-
-// calculateDepths uses the children field of loops
-// to determine the nesting depth (outer=1) of each
-// loop.  This is helpful for finding exit edges.
-func (ln *loopnest) calculateDepths() {
-	if ln.initializedDepth {
-		return
-	}
-	ln.assembleChildren()
-	for _, l := range ln.loops {
-		if l.outer == nil {
-			l.setDepth(1)
-		}
-	}
-	ln.initializedDepth = true
-}
-
-// findExits uses loop depth information to find the
-// exits from a loop.
-func (ln *loopnest) findExits() {
-	if ln.initializedExits {
-		return
-	}
-	ln.calculateDepths()
-	b2l := ln.b2l
-	for _, b := range ln.po {
-		l := b2l[b.ID]
-		if l != nil && len(b.Succs) == 2 {
-			sl := b2l[b.Succs[0].b.ID]
-			if recordIfExit(l, sl, b.Succs[0].b) {
-				continue
-			}
-			sl = b2l[b.Succs[1].b.ID]
-			if recordIfExit(l, sl, b.Succs[1].b) {
-				continue
-			}
-		}
-	}
-	ln.initializedExits = true
-}
-
-// recordIfExit checks sl (the loop containing b) to see if it
-// is outside of loop l, and if so, records b as an exit block
-// from l and returns true.
-func recordIfExit(l, sl *loop, b *Block) bool {
-	if sl != l {
-		if sl == nil || sl.depth <= l.depth {
-			l.exits = append(l.exits, b)
-			return true
-		}
-		// sl is not nil, and is deeper than l
-		// it's possible for this to be a goto into an irreducible loop made from gotos.
-		for sl.depth > l.depth {
-			sl = sl.outer
-		}
-		if sl != l {
-			l.exits = append(l.exits, b)
-			return true
-		}
-	}
-	return false
-}
-
-func (l *loop) setDepth(d int16) {
-	l.depth = d
-	for _, c := range l.children {
-		c.setDepth(d + 1)
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/location.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/location.go
deleted file mode 100644
index 0cfa71a..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/location.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/location.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/location.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import "fmt"
-
-// A place that an ssa variable can reside.
-type Location interface {
-	Name() string // name to use in assembly templates: %rax, 16(%rsp), ...
-}
-
-// A Register is a machine register, like %rax.
-// They are numbered densely from 0 (for each architecture).
-type Register struct {
-	num    int32
-	objNum int16 // register number from cmd/internal/obj/$ARCH
-	name   string
-}
-
-func (r *Register) Name() string {
-	return r.name
-}
-
-// A LocalSlot is a location in the stack frame.
-// It is (possibly a subpiece of) a PPARAM, PPARAMOUT, or PAUTO ONAME node.
-type LocalSlot struct {
-	N    GCNode // an ONAME *gc.Node representing a variable on the stack
-	Type Type   // type of slot
-	Off  int64  // offset of slot in N
-}
-
-func (s LocalSlot) Name() string {
-	if s.Off == 0 {
-		return fmt.Sprintf("%v[%v]", s.N, s.Type)
-	}
-	return fmt.Sprintf("%v+%d[%v]", s.N, s.Off, s.Type)
-}
-
-type LocPair [2]Location
-
-func (t LocPair) Name() string {
-	n0, n1 := "nil", "nil"
-	if t[0] != nil {
-		n0 = t[0].Name()
-	}
-	if t[1] != nil {
-		n1 = t[1].Name()
-	}
-	return fmt.Sprintf("<%s,%s>", n0, n1)
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/loopbce.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/loopbce.go
deleted file mode 100644
index 7244bd3..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/loopbce.go
+++ /dev/null
@@ -1,304 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/loopbce.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/loopbce.go:1
-package ssa
-
-type indVar struct {
-	ind   *Value // induction variable
-	inc   *Value // increment, a constant
-	nxt   *Value // ind+inc variable
-	min   *Value // minimum value. inclusive,
-	max   *Value // maximum value. exclusive.
-	entry *Block // entry block in the loop.
-	// Invariants: for all blocks dominated by entry:
-	//	min <= ind < max
-	//	min <= nxt <= max
-}
-
-// findIndVar finds induction variables in a function.
-//
-// Look for variables and blocks that satisfy the following
-//
-// loop:
-//   ind = (Phi min nxt),
-//   if ind < max
-//     then goto enter_loop
-//     else goto exit_loop
-//
-//   enter_loop:
-//	do something
-//      nxt = inc + ind
-//	goto loop
-//
-// exit_loop:
-//
-//
-// TODO: handle 32 bit operations
-func findIndVar(f *Func) []indVar {
-	var iv []indVar
-	sdom := f.sdom()
-
-nextb:
-	for _, b := range f.Blocks {
-		if b.Kind != BlockIf || len(b.Preds) != 2 {
-			continue
-		}
-
-		var ind, max *Value // induction, and maximum
-		entry := -1         // which successor of b enters the loop
-
-		// Check thet the control if it either ind < max or max > ind.
-		// TODO: Handle Leq64, Geq64.
-		switch b.Control.Op {
-		case OpLess64:
-			entry = 0
-			ind, max = b.Control.Args[0], b.Control.Args[1]
-		case OpGreater64:
-			entry = 0
-			ind, max = b.Control.Args[1], b.Control.Args[0]
-		default:
-			continue nextb
-		}
-
-		// Check that the induction variable is a phi that depends on itself.
-		if ind.Op != OpPhi {
-			continue
-		}
-
-		// Extract min and nxt knowing that nxt is an addition (e.g. Add64).
-		var min, nxt *Value // minimum, and next value
-		if n := ind.Args[0]; n.Op == OpAdd64 && (n.Args[0] == ind || n.Args[1] == ind) {
-			min, nxt = ind.Args[1], n
-		} else if n := ind.Args[1]; n.Op == OpAdd64 && (n.Args[0] == ind || n.Args[1] == ind) {
-			min, nxt = ind.Args[0], n
-		} else {
-			// Not a recognized induction variable.
-			continue
-		}
-
-		var inc *Value
-		if nxt.Args[0] == ind { // nxt = ind + inc
-			inc = nxt.Args[1]
-		} else if nxt.Args[1] == ind { // nxt = inc + ind
-			inc = nxt.Args[0]
-		} else {
-			panic("unreachable") // one of the cases must be true from the above.
-		}
-
-		// Expect the increment to be a positive constant.
-		// TODO: handle negative increment.
-		if inc.Op != OpConst64 || inc.AuxInt <= 0 {
-			continue
-		}
-
-		// Up to now we extracted the induction variable (ind),
-		// the increment delta (inc), the temporary sum (nxt),
-		// the mininum value (min) and the maximum value (max).
-		//
-		// We also know that ind has the form (Phi min nxt) where
-		// nxt is (Add inc nxt) which means: 1) inc dominates nxt
-		// and 2) there is a loop starting at inc and containing nxt.
-		//
-		// We need to prove that the induction variable is incremented
-		// only when it's smaller than the maximum value.
-		// Two conditions must happen listed below to accept ind
-		// as an induction variable.
-
-		// First condition: loop entry has a single predecessor, which
-		// is the header block.  This implies that b.Succs[entry] is
-		// reached iff ind < max.
-		if len(b.Succs[entry].b.Preds) != 1 {
-			// b.Succs[1-entry] must exit the loop.
-			continue
-		}
-
-		// Second condition: b.Succs[entry] dominates nxt so that
-		// nxt is computed when inc < max, meaning nxt <= max.
-		if !sdom.isAncestorEq(b.Succs[entry].b, nxt.Block) {
-			// inc+ind can only be reached through the branch that enters the loop.
-			continue
-		}
-
-		// If max is c + SliceLen with c <= 0 then we drop c.
-		// Makes sure c + SliceLen doesn't overflow when SliceLen == 0.
-		// TODO: save c as an offset from max.
-		if w, c := dropAdd64(max); (w.Op == OpStringLen || w.Op == OpSliceLen) && 0 >= c && -c >= 0 {
-			max = w
-		}
-
-		// We can only guarantee that the loops runs within limits of induction variable
-		// if the increment is 1 or when the limits are constants.
-		if inc.AuxInt != 1 {
-			ok := false
-			if min.Op == OpConst64 && max.Op == OpConst64 {
-				if max.AuxInt > min.AuxInt && max.AuxInt%inc.AuxInt == min.AuxInt%inc.AuxInt { // handle overflow
-					ok = true
-				}
-			}
-			if !ok {
-				continue
-			}
-		}
-
-		if f.pass.debug > 1 {
-			if min.Op == OpConst64 {
-				b.Func.Config.Warnl(b.Line, "Induction variable with minimum %d and increment %d", min.AuxInt, inc.AuxInt)
-			} else {
-				b.Func.Config.Warnl(b.Line, "Induction variable with non-const minimum and increment %d", inc.AuxInt)
-			}
-		}
-
-		iv = append(iv, indVar{
-			ind:   ind,
-			inc:   inc,
-			nxt:   nxt,
-			min:   min,
-			max:   max,
-			entry: b.Succs[entry].b,
-		})
-		b.Logf("found induction variable %v (inc = %v, min = %v, max = %v)\n", ind, inc, min, max)
-	}
-
-	return iv
-}
-
-// loopbce performs loop based bounds check elimination.
-func loopbce(f *Func) {
-	ivList := findIndVar(f)
-
-	m := make(map[*Value]indVar)
-	for _, iv := range ivList {
-		m[iv.ind] = iv
-	}
-
-	removeBoundsChecks(f, m)
-}
-
-// removesBoundsChecks remove IsInBounds and IsSliceInBounds based on the induction variables.
-func removeBoundsChecks(f *Func, m map[*Value]indVar) {
-	sdom := f.sdom()
-	for _, b := range f.Blocks {
-		if b.Kind != BlockIf {
-			continue
-		}
-
-		v := b.Control
-
-		// Simplify:
-		// (IsInBounds ind max) where 0 <= const == min <= ind < max.
-		// (IsSliceInBounds ind max) where 0 <= const == min <= ind < max.
-		// Found in:
-		//	for i := range a {
-		//		use a[i]
-		//		use a[i:]
-		//		use a[:i]
-		//	}
-		if v.Op == OpIsInBounds || v.Op == OpIsSliceInBounds {
-			ind, add := dropAdd64(v.Args[0])
-			if ind.Op != OpPhi {
-				goto skip1
-			}
-			if v.Op == OpIsInBounds && add != 0 {
-				goto skip1
-			}
-			if v.Op == OpIsSliceInBounds && (0 > add || add > 1) {
-				goto skip1
-			}
-
-			if iv, has := m[ind]; has && sdom.isAncestorEq(iv.entry, b) && isNonNegative(iv.min) {
-				if v.Args[1] == iv.max {
-					if f.pass.debug > 0 {
-						f.Config.Warnl(b.Line, "Found redundant %s", v.Op)
-					}
-					goto simplify
-				}
-			}
-		}
-	skip1:
-
-		// Simplify:
-		// (IsSliceInBounds ind (SliceCap a)) where 0 <= min <= ind < max == (SliceLen a)
-		// Found in:
-		//	for i := range a {
-		//		use a[:i]
-		//		use a[:i+1]
-		//	}
-		if v.Op == OpIsSliceInBounds {
-			ind, add := dropAdd64(v.Args[0])
-			if ind.Op != OpPhi {
-				goto skip2
-			}
-			if 0 > add || add > 1 {
-				goto skip2
-			}
-
-			if iv, has := m[ind]; has && sdom.isAncestorEq(iv.entry, b) && isNonNegative(iv.min) {
-				if v.Args[1].Op == OpSliceCap && iv.max.Op == OpSliceLen && v.Args[1].Args[0] == iv.max.Args[0] {
-					if f.pass.debug > 0 {
-						f.Config.Warnl(b.Line, "Found redundant %s (len promoted to cap)", v.Op)
-					}
-					goto simplify
-				}
-			}
-		}
-	skip2:
-
-		// Simplify
-		// (IsInBounds (Add64 ind) (Const64 [c])) where 0 <= min <= ind < max <= (Const64 [c])
-		// (IsSliceInBounds ind (Const64 [c])) where 0 <= min <= ind < max <= (Const64 [c])
-		if v.Op == OpIsInBounds || v.Op == OpIsSliceInBounds {
-			ind, add := dropAdd64(v.Args[0])
-			if ind.Op != OpPhi {
-				goto skip3
-			}
-
-			// ind + add >= 0 <-> min + add >= 0 <-> min >= -add
-			if iv, has := m[ind]; has && sdom.isAncestorEq(iv.entry, b) && isGreaterOrEqualThan(iv.min, -add) {
-				if !v.Args[1].isGenericIntConst() || !iv.max.isGenericIntConst() {
-					goto skip3
-				}
-
-				limit := v.Args[1].AuxInt
-				if v.Op == OpIsSliceInBounds {
-					// If limit++ overflows signed integer then 0 <= max && max <= limit will be false.
-					limit++
-				}
-
-				if max := iv.max.AuxInt + add; 0 <= max && max <= limit { // handle overflow
-					if f.pass.debug > 0 {
-						f.Config.Warnl(b.Line, "Found redundant (%s ind %d), ind < %d", v.Op, v.Args[1].AuxInt, iv.max.AuxInt+add)
-					}
-					goto simplify
-				}
-			}
-		}
-	skip3:
-
-		continue
-
-	simplify:
-		f.Logf("removing bounds check %v at %v in %s\n", b.Control, b, f.Name)
-		b.Kind = BlockFirst
-		b.SetControl(nil)
-	}
-}
-
-func dropAdd64(v *Value) (*Value, int64) {
-	if v.Op == OpAdd64 && v.Args[0].Op == OpConst64 {
-		return v.Args[1], v.Args[0].AuxInt
-	}
-	if v.Op == OpAdd64 && v.Args[1].Op == OpConst64 {
-		return v.Args[0], v.Args[1].AuxInt
-	}
-	return v, 0
-}
-
-func isGreaterOrEqualThan(v *Value, c int64) bool {
-	if c == 0 {
-		return isNonNegative(v)
-	}
-	if v.isGenericIntConst() && v.AuxInt >= c {
-		return true
-	}
-	return false
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/loopreschedchecks.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/loopreschedchecks.go
deleted file mode 100644
index 019a405..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/loopreschedchecks.go
+++ /dev/null
@@ -1,520 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/loopreschedchecks.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/loopreschedchecks.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import "fmt"
-
-// an edgeMemCtr records a backedge, together with the memory and
-// counter phi functions at the target of the backedge that must
-// be updated when a rescheduling check replaces the backedge.
-type edgeMemCtr struct {
-	e Edge
-	m *Value // phi for memory at dest of e
-	c *Value // phi for counter at dest of e
-}
-
-// a rewriteTarget is a a value-argindex pair indicating
-// where a rewrite is applied.  Note that this is for values,
-// not for block controls, because block controls are not targets
-// for the rewrites performed in inserting rescheduling checks.
-type rewriteTarget struct {
-	v *Value
-	i int
-}
-
-type rewrite struct {
-	before, after *Value          // before is the expected value before rewrite, after is the new value installed.
-	rewrites      []rewriteTarget // all the targets for this rewrite.
-}
-
-func (r *rewrite) String() string {
-	s := "\n\tbefore=" + r.before.String() + ", after=" + r.after.String()
-	for _, rw := range r.rewrites {
-		s += ", (i=" + fmt.Sprint(rw.i) + ", v=" + rw.v.LongString() + ")"
-	}
-	s += "\n"
-	return s
-}
-
-const initialRescheduleCounterValue = 1021 // Largest 10-bit prime. 97 nSec loop bodies will check every 100 uSec.
-
-// insertLoopReschedChecks inserts rescheduling checks on loop backedges.
-func insertLoopReschedChecks(f *Func) {
-	// TODO: when split information is recorded in export data, insert checks only on backedges that can be reached on a split-call-free path.
-
-	// Loop reschedule checks decrement a per-function counter
-	// shared by all loops, and when the counter becomes non-positive
-	// a call is made to a rescheduling check in the runtime.
-	//
-	// Steps:
-	// 1. locate backedges.
-	// 2. Record memory definitions at block end so that
-	//    the SSA graph for mem can be prperly modified.
-	// 3. Define a counter and record its future uses (at backedges)
-	//    (Same process as 2, applied to a single definition of the counter.
-	//     difference for mem is that there are zero-to-many existing mem
-	//     definitions, versus exactly one for the new counter.)
-	// 4. Ensure that phi functions that will-be-needed for mem and counter
-	//    are present in the graph, initially with trivial inputs.
-	// 5. Record all to-be-modified uses of mem and counter;
-	//    apply modifications (split into two steps to simplify and
-	//    avoided nagging order-dependences).
-	// 6. Rewrite backedges to include counter check, reschedule check,
-	//    and modify destination phi function appropriately with new
-	//    definitions for mem and counter.
-
-	if f.NoSplit { // nosplit functions don't reschedule.
-		return
-	}
-
-	backedges := backedges(f)
-	if len(backedges) == 0 { // no backedges means no rescheduling checks.
-		return
-	}
-
-	lastMems := findLastMems(f)
-
-	idom := f.Idom()
-	sdom := f.sdom()
-
-	if f.pass.debug > 2 {
-		fmt.Printf("before %s = %s\n", f.Name, sdom.treestructure(f.Entry))
-	}
-
-	tofixBackedges := []edgeMemCtr{}
-
-	for _, e := range backedges { // TODO: could filter here by calls in loops, if declared and inferred nosplit are recorded in export data.
-		tofixBackedges = append(tofixBackedges, edgeMemCtr{e, nil, nil})
-	}
-
-	// It's possible that there is no memory state (no global/pointer loads/stores or calls)
-	if lastMems[f.Entry.ID] == nil {
-		lastMems[f.Entry.ID] = f.Entry.NewValue0(f.Entry.Line, OpInitMem, TypeMem)
-	}
-
-	memDefsAtBlockEnds := make([]*Value, f.NumBlocks()) // For each block, the mem def seen at its bottom. Could be from earlier block.
-
-	// Propagate last mem definitions forward through successor blocks.
-	po := f.postorder()
-	for i := len(po) - 1; i >= 0; i-- {
-		b := po[i]
-		mem := lastMems[b.ID]
-		for j := 0; mem == nil; j++ { // if there's no def, then there's no phi, so the visible mem is identical in all predecessors.
-			// loop because there might be backedges that haven't been visited yet.
-			mem = memDefsAtBlockEnds[b.Preds[j].b.ID]
-		}
-		memDefsAtBlockEnds[b.ID] = mem
-	}
-
-	// Set up counter.  There are no phis etc pre-existing for it.
-	counter0 := f.Entry.NewValue0I(f.Entry.Line, OpConst32, f.Config.fe.TypeInt32(), initialRescheduleCounterValue)
-	ctrDefsAtBlockEnds := make([]*Value, f.NumBlocks()) // For each block, def visible at its end, if that def will be used.
-
-	// There's a minor difference between memDefsAtBlockEnds and ctrDefsAtBlockEnds;
-	// because the counter only matter for loops and code that reaches them, it is nil for blocks where the ctr is no
-	// longer live.  This will avoid creation of dead phi functions.  This optimization is ignored for the mem variable
-	// because it is harder and also less likely to be helpful, though dead code elimination ought to clean this out anyhow.
-
-	for _, emc := range tofixBackedges {
-		e := emc.e
-		// set initial uses of counter zero (note available-at-bottom and use are the same thing initially.)
-		// each back-edge will be rewritten to include a reschedule check, and that will use the counter.
-		src := e.b.Preds[e.i].b
-		ctrDefsAtBlockEnds[src.ID] = counter0
-	}
-
-	// Push uses towards root
-	for _, b := range f.postorder() {
-		bd := ctrDefsAtBlockEnds[b.ID]
-		if bd == nil {
-			continue
-		}
-		for _, e := range b.Preds {
-			p := e.b
-			if ctrDefsAtBlockEnds[p.ID] == nil {
-				ctrDefsAtBlockEnds[p.ID] = bd
-			}
-		}
-	}
-
-	// Maps from block to newly-inserted phi function in block.
-	newmemphis := make(map[*Block]rewrite)
-	newctrphis := make(map[*Block]rewrite)
-
-	// Insert phi functions as necessary for future changes to flow graph.
-	for i, emc := range tofixBackedges {
-		e := emc.e
-		h := e.b
-
-		// find the phi function for the memory input at "h", if there is one.
-		var headerMemPhi *Value // look for header mem phi
-
-		for _, v := range h.Values {
-			if v.Op == OpPhi && v.Type.IsMemory() {
-				headerMemPhi = v
-			}
-		}
-
-		if headerMemPhi == nil {
-			// if the header is nil, make a trivial phi from the dominator
-			mem0 := memDefsAtBlockEnds[idom[h.ID].ID]
-			headerMemPhi = newPhiFor(h, mem0)
-			newmemphis[h] = rewrite{before: mem0, after: headerMemPhi}
-			addDFphis(mem0, h, h, f, memDefsAtBlockEnds, newmemphis)
-
-		}
-		tofixBackedges[i].m = headerMemPhi
-
-		var headerCtrPhi *Value
-		rw, ok := newctrphis[h]
-		if !ok {
-			headerCtrPhi = newPhiFor(h, counter0)
-			newctrphis[h] = rewrite{before: counter0, after: headerCtrPhi}
-			addDFphis(counter0, h, h, f, ctrDefsAtBlockEnds, newctrphis)
-		} else {
-			headerCtrPhi = rw.after
-		}
-		tofixBackedges[i].c = headerCtrPhi
-	}
-
-	rewriteNewPhis(f.Entry, f.Entry, f, memDefsAtBlockEnds, newmemphis)
-	rewriteNewPhis(f.Entry, f.Entry, f, ctrDefsAtBlockEnds, newctrphis)
-
-	if f.pass.debug > 0 {
-		for b, r := range newmemphis {
-			fmt.Printf("b=%s, rewrite=%s\n", b, r.String())
-		}
-
-		for b, r := range newctrphis {
-			fmt.Printf("b=%s, rewrite=%s\n", b, r.String())
-		}
-	}
-
-	// Apply collected rewrites.
-	for _, r := range newmemphis {
-		for _, rw := range r.rewrites {
-			rw.v.SetArg(rw.i, r.after)
-		}
-	}
-
-	for _, r := range newctrphis {
-		for _, rw := range r.rewrites {
-			rw.v.SetArg(rw.i, r.after)
-		}
-	}
-
-	zero := f.Entry.NewValue0I(f.Entry.Line, OpConst32, f.Config.fe.TypeInt32(), 0)
-	one := f.Entry.NewValue0I(f.Entry.Line, OpConst32, f.Config.fe.TypeInt32(), 1)
-
-	// Rewrite backedges to include reschedule checks.
-	for _, emc := range tofixBackedges {
-		e := emc.e
-		headerMemPhi := emc.m
-		headerCtrPhi := emc.c
-		h := e.b
-		i := e.i
-		p := h.Preds[i]
-		bb := p.b
-		mem0 := headerMemPhi.Args[i]
-		ctr0 := headerCtrPhi.Args[i]
-		// bb e->p h,
-		// Because we're going to insert a rare-call, make sure the
-		// looping edge still looks likely.
-		likely := BranchLikely
-		if p.i != 0 {
-			likely = BranchUnlikely
-		}
-		bb.Likely = likely
-
-		// rewrite edge to include reschedule check
-		// existing edges:
-		//
-		// bb.Succs[p.i] == Edge{h, i}
-		// h.Preds[i] == p == Edge{bb,p.i}
-		//
-		// new block(s):
-		// test:
-		//    ctr1 := ctr0 - 1
-		//    if ctr1 <= 0 { goto sched }
-		//    goto join
-		// sched:
-		//    mem1 := call resched (mem0)
-		//    goto join
-		// join:
-		//    ctr2 := phi(ctr1, counter0) // counter0 is the constant
-		//    mem2 := phi(mem0, mem1)
-		//    goto h
-		//
-		// and correct arg i of headerMemPhi and headerCtrPhi
-		//
-		// EXCEPT: block containing only phi functions is bad
-		// for the register allocator.  Therefore, there is no
-		// join, and instead branches targeting join instead target
-		// the header, and the other phi functions within header are
-		// adjusted for the additional input.
-
-		test := f.NewBlock(BlockIf)
-		sched := f.NewBlock(BlockPlain)
-
-		test.Line = bb.Line
-		sched.Line = bb.Line
-
-		//    ctr1 := ctr0 - 1
-		//    if ctr1 <= 0 { goto sched }
-		//    goto header
-		ctr1 := test.NewValue2(bb.Line, OpSub32, f.Config.fe.TypeInt32(), ctr0, one)
-		cmp := test.NewValue2(bb.Line, OpLeq32, f.Config.fe.TypeBool(), ctr1, zero)
-		test.SetControl(cmp)
-		test.AddEdgeTo(sched) // if true
-		// if false -- rewrite edge to header.
-		// do NOT remove+add, because that will perturb all the other phi functions
-		// as well as messing up other edges to the header.
-		test.Succs = append(test.Succs, Edge{h, i})
-		h.Preds[i] = Edge{test, 1}
-		headerMemPhi.SetArg(i, mem0)
-		headerCtrPhi.SetArg(i, ctr1)
-
-		test.Likely = BranchUnlikely
-
-		// sched:
-		//    mem1 := call resched (mem0)
-		//    goto header
-		resched := f.Config.fe.Syslook("goschedguarded")
-		mem1 := sched.NewValue1A(bb.Line, OpStaticCall, TypeMem, resched, mem0)
-		sched.AddEdgeTo(h)
-		headerMemPhi.AddArg(mem1)
-		headerCtrPhi.AddArg(counter0)
-
-		bb.Succs[p.i] = Edge{test, 0}
-		test.Preds = append(test.Preds, Edge{bb, p.i})
-
-		// Must correct all the other phi functions in the header for new incoming edge.
-		// Except for mem and counter phis, it will be the same value seen on the original
-		// backedge at index i.
-		for _, v := range h.Values {
-			if v.Op == OpPhi && v != headerMemPhi && v != headerCtrPhi {
-				v.AddArg(v.Args[i])
-			}
-		}
-	}
-
-	f.invalidateCFG()
-
-	if f.pass.debug > 2 {
-		sdom = newSparseTree(f, f.Idom())
-		fmt.Printf("after %s = %s\n", f.Name, sdom.treestructure(f.Entry))
-	}
-
-	return
-}
-
-// newPhiFor inserts a new Phi function into b,
-// with all inputs set to v.
-func newPhiFor(b *Block, v *Value) *Value {
-	phiV := b.NewValue0(b.Line, OpPhi, v.Type)
-
-	for range b.Preds {
-		phiV.AddArg(v)
-	}
-	return phiV
-}
-
-// rewriteNewPhis updates newphis[h] to record all places where the new phi function inserted
-// in block h will replace a previous definition.  Block b is the block currently being processed;
-// if b has its own phi definition then it takes the place of h.
-// defsForUses provides information about other definitions of the variable that are present
-// (and if nil, indicates that the variable is no longer live)
-func rewriteNewPhis(h, b *Block, f *Func, defsForUses []*Value, newphis map[*Block]rewrite) {
-	// If b is a block with a new phi, then a new rewrite applies below it in the dominator tree.
-	if _, ok := newphis[b]; ok {
-		h = b
-	}
-	change := newphis[h]
-	x := change.before
-	y := change.after
-
-	// Apply rewrites to this block
-	if x != nil { // don't waste time on the common case of no definition.
-		p := &change.rewrites
-		for _, v := range b.Values {
-			if v == y { // don't rewrite self -- phi inputs are handled below.
-				continue
-			}
-			for i, w := range v.Args {
-				if w != x {
-					continue
-				}
-				*p = append(*p, rewriteTarget{v, i})
-			}
-		}
-
-		// Rewrite appropriate inputs of phis reached in successors
-		// in dominance frontier, self, and dominated.
-		// If the variable def reaching uses in b is itself defined in b, then the new phi function
-		// does not reach the successors of b.  (This assumes a bit about the structure of the
-		// phi use-def graph, but it's true for memory and the inserted counter.)
-		if dfu := defsForUses[b.ID]; dfu != nil && dfu.Block != b {
-			for _, e := range b.Succs {
-				s := e.b
-				if sphi, ok := newphis[s]; ok { // saves time to find the phi this way.
-					*p = append(*p, rewriteTarget{sphi.after, e.i})
-					continue
-				}
-				for _, v := range s.Values {
-					if v.Op == OpPhi && v.Args[e.i] == x {
-						*p = append(*p, rewriteTarget{v, e.i})
-						break
-					}
-				}
-			}
-		}
-		newphis[h] = change
-	}
-
-	sdom := f.sdom()
-
-	for c := sdom[b.ID].child; c != nil; c = sdom[c.ID].sibling {
-		rewriteNewPhis(h, c, f, defsForUses, newphis) // TODO: convert to explicit stack from recursion.
-	}
-}
-
-// addDFphis creates new trivial phis that are necessary to correctly reflect (within SSA)
-// a new definition for variable "x" inserted at h (usually but not necessarily a phi).
-// These new phis can only occur at the dominance frontier of h; block s is in the dominance
-// frontier of h if h does not strictly dominate s and if s is a successor of a block b where
-// either b = h or h strictly dominates b.
-// These newly created phis are themselves new definitions that may require addition of their
-// own trivial phi functions in their own dominance frontier, and this is handled recursively.
-func addDFphis(x *Value, h, b *Block, f *Func, defForUses []*Value, newphis map[*Block]rewrite) {
-	oldv := defForUses[b.ID]
-	if oldv != x { // either a new definition replacing x, or nil if it is proven that there are no uses reachable from b
-		return
-	}
-	sdom := f.sdom()
-	idom := f.Idom()
-outer:
-	for _, e := range b.Succs {
-		s := e.b
-		// check phi functions in the dominance frontier
-		if sdom.isAncestor(h, s) {
-			continue // h dominates s, successor of b, therefore s is not in the frontier.
-		}
-		if _, ok := newphis[s]; ok {
-			continue // successor s of b already has a new phi function, so there is no need to add another.
-		}
-		if x != nil {
-			for _, v := range s.Values {
-				if v.Op == OpPhi && v.Args[e.i] == x {
-					continue outer // successor s of b has an old phi function, so there is no need to add another.
-				}
-			}
-		}
-
-		old := defForUses[idom[s.ID].ID] // new phi function is correct-but-redundant, combining value "old" on all inputs.
-		headerPhi := newPhiFor(s, old)
-		// the new phi will replace "old" in block s and all blocks dominated by s.
-		newphis[s] = rewrite{before: old, after: headerPhi} // record new phi, to have inputs labeled "old" rewritten to "headerPhi"
-		addDFphis(old, s, s, f, defForUses, newphis)        // the new definition may also create new phi functions.
-	}
-	for c := sdom[b.ID].child; c != nil; c = sdom[c.ID].sibling {
-		addDFphis(x, h, c, f, defForUses, newphis) // TODO: convert to explicit stack from recursion.
-	}
-}
-
-// findLastMems maps block ids to last memory-output op in a block, if any
-func findLastMems(f *Func) []*Value {
-
-	var stores []*Value
-	lastMems := make([]*Value, f.NumBlocks())
-	storeUse := f.newSparseSet(f.NumValues())
-	defer f.retSparseSet(storeUse)
-	for _, b := range f.Blocks {
-		// Find all the stores in this block. Categorize their uses:
-		//  storeUse contains stores which are used by a subsequent store.
-		storeUse.clear()
-		stores = stores[:0]
-		var memPhi *Value
-		for _, v := range b.Values {
-			if v.Op == OpPhi {
-				if v.Type.IsMemory() {
-					memPhi = v
-				}
-				continue
-			}
-			if v.Type.IsMemory() {
-				stores = append(stores, v)
-				if v.Op == OpSelect1 {
-					// Use the arg of the tuple-generating op.
-					v = v.Args[0]
-				}
-				for _, a := range v.Args {
-					if a.Block == b && a.Type.IsMemory() {
-						storeUse.add(a.ID)
-					}
-				}
-			}
-		}
-		if len(stores) == 0 {
-			lastMems[b.ID] = memPhi
-			continue
-		}
-
-		// find last store in the block
-		var last *Value
-		for _, v := range stores {
-			if storeUse.contains(v.ID) {
-				continue
-			}
-			if last != nil {
-				b.Fatalf("two final stores - simultaneous live stores %s %s", last, v)
-			}
-			last = v
-		}
-		if last == nil {
-			b.Fatalf("no last store found - cycle?")
-		}
-		lastMems[b.ID] = last
-	}
-	return lastMems
-}
-
-type backedgesState struct {
-	b *Block
-	i int
-}
-
-// backedges returns a slice of successor edges that are back
-// edges.  For reducible loops, edge.b is the header.
-func backedges(f *Func) []Edge {
-	edges := []Edge{}
-	mark := make([]markKind, f.NumBlocks())
-	stack := []backedgesState{}
-
-	mark[f.Entry.ID] = notExplored
-	stack = append(stack, backedgesState{f.Entry, 0})
-
-	for len(stack) > 0 {
-		l := len(stack)
-		x := stack[l-1]
-		if x.i < len(x.b.Succs) {
-			e := x.b.Succs[x.i]
-			stack[l-1].i++
-			s := e.b
-			if mark[s.ID] == notFound {
-				mark[s.ID] = notExplored
-				stack = append(stack, backedgesState{s, 0})
-			} else if mark[s.ID] == notExplored {
-				edges = append(edges, e)
-			}
-		} else {
-			mark[x.b.ID] = done
-			stack = stack[0 : l-1]
-		}
-	}
-	return edges
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/lower.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/lower.go
deleted file mode 100644
index baa6c45..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/lower.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/lower.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/lower.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-// convert to machine-dependent ops
-func lower(f *Func) {
-	// repeat rewrites until we find no more rewrites
-	applyRewrite(f, f.Config.lowerBlock, f.Config.lowerValue)
-}
-
-// checkLower checks for unlowered opcodes and fails if we find one.
-func checkLower(f *Func) {
-	// Needs to be a separate phase because it must run after both
-	// lowering and a subsequent dead code elimination (because lowering
-	// rules may leave dead generic ops behind).
-	for _, b := range f.Blocks {
-		for _, v := range b.Values {
-			if !opcodeTable[v.Op].generic {
-				continue // lowered
-			}
-			switch v.Op {
-			case OpSP, OpSB, OpInitMem, OpArg, OpPhi, OpVarDef, OpVarKill, OpVarLive, OpKeepAlive, OpSelect0, OpSelect1:
-				continue // ok not to lower
-			case OpGetG:
-				if f.Config.hasGReg {
-					// has hardware g register, regalloc takes care of it
-					continue // ok not to lower
-				}
-			}
-			s := "not lowered: " + v.String() + ", " + v.Op.String() + " " + v.Type.SimpleString()
-			for _, a := range v.Args {
-				s += " " + a.Type.SimpleString()
-			}
-			f.Fatalf("%s", s)
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/magic.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/magic.go
deleted file mode 100644
index 07109c3..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/magic.go
+++ /dev/null
@@ -1,263 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/magic.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/magic.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-// A copy of the code in ../gc/subr.go.
-// We can't use it directly because it would generate
-// an import cycle. TODO: move to a common support package.
-
-// argument passing to/from
-// smagic and umagic
-type magic struct {
-	W   int // input for both - width
-	S   int // output for both - shift
-	Bad int // output for both - unexpected failure
-
-	// magic multiplier for signed literal divisors
-	Sd int64 // input - literal divisor
-	Sm int64 // output - multiplier
-
-	// magic multiplier for unsigned literal divisors
-	Ud uint64 // input - literal divisor
-	Um uint64 // output - multiplier
-	Ua int    // output - adder
-}
-
-// magic number for signed division
-// see hacker's delight chapter 10
-func smagic(m *magic) {
-	var mask uint64
-
-	m.Bad = 0
-	switch m.W {
-	default:
-		m.Bad = 1
-		return
-
-	case 8:
-		mask = 0xff
-
-	case 16:
-		mask = 0xffff
-
-	case 32:
-		mask = 0xffffffff
-
-	case 64:
-		mask = 0xffffffffffffffff
-	}
-
-	two31 := mask ^ (mask >> 1)
-
-	p := m.W - 1
-	ad := uint64(m.Sd)
-	if m.Sd < 0 {
-		ad = -uint64(m.Sd)
-	}
-
-	// bad denominators
-	if ad == 0 || ad == 1 || ad == two31 {
-		m.Bad = 1
-		return
-	}
-
-	t := two31
-	ad &= mask
-
-	anc := t - 1 - t%ad
-	anc &= mask
-
-	q1 := two31 / anc
-	r1 := two31 - q1*anc
-	q1 &= mask
-	r1 &= mask
-
-	q2 := two31 / ad
-	r2 := two31 - q2*ad
-	q2 &= mask
-	r2 &= mask
-
-	var delta uint64
-	for {
-		p++
-		q1 <<= 1
-		r1 <<= 1
-		q1 &= mask
-		r1 &= mask
-		if r1 >= anc {
-			q1++
-			r1 -= anc
-			q1 &= mask
-			r1 &= mask
-		}
-
-		q2 <<= 1
-		r2 <<= 1
-		q2 &= mask
-		r2 &= mask
-		if r2 >= ad {
-			q2++
-			r2 -= ad
-			q2 &= mask
-			r2 &= mask
-		}
-
-		delta = ad - r2
-		delta &= mask
-		if q1 < delta || (q1 == delta && r1 == 0) {
-			continue
-		}
-
-		break
-	}
-
-	m.Sm = int64(q2 + 1)
-	if uint64(m.Sm)&two31 != 0 {
-		m.Sm |= ^int64(mask)
-	}
-	m.S = p - m.W
-}
-
-// magic number for unsigned division
-// see hacker's delight chapter 10
-func umagic(m *magic) {
-	var mask uint64
-
-	m.Bad = 0
-	m.Ua = 0
-
-	switch m.W {
-	default:
-		m.Bad = 1
-		return
-
-	case 8:
-		mask = 0xff
-
-	case 16:
-		mask = 0xffff
-
-	case 32:
-		mask = 0xffffffff
-
-	case 64:
-		mask = 0xffffffffffffffff
-	}
-
-	two31 := mask ^ (mask >> 1)
-
-	m.Ud &= mask
-	if m.Ud == 0 || m.Ud == two31 {
-		m.Bad = 1
-		return
-	}
-
-	nc := mask - (-m.Ud&mask)%m.Ud
-	p := m.W - 1
-
-	q1 := two31 / nc
-	r1 := two31 - q1*nc
-	q1 &= mask
-	r1 &= mask
-
-	q2 := (two31 - 1) / m.Ud
-	r2 := (two31 - 1) - q2*m.Ud
-	q2 &= mask
-	r2 &= mask
-
-	var delta uint64
-	for {
-		p++
-		if r1 >= nc-r1 {
-			q1 <<= 1
-			q1++
-			r1 <<= 1
-			r1 -= nc
-		} else {
-			q1 <<= 1
-			r1 <<= 1
-		}
-
-		q1 &= mask
-		r1 &= mask
-		if r2+1 >= m.Ud-r2 {
-			if q2 >= two31-1 {
-				m.Ua = 1
-			}
-
-			q2 <<= 1
-			q2++
-			r2 <<= 1
-			r2++
-			r2 -= m.Ud
-		} else {
-			if q2 >= two31 {
-				m.Ua = 1
-			}
-
-			q2 <<= 1
-			r2 <<= 1
-			r2++
-		}
-
-		q2 &= mask
-		r2 &= mask
-
-		delta = m.Ud - 1 - r2
-		delta &= mask
-
-		if p < m.W+m.W {
-			if q1 < delta || (q1 == delta && r1 == 0) {
-				continue
-			}
-		}
-
-		break
-	}
-
-	m.Um = q2 + 1
-	m.S = p - m.W
-}
-
-// adaptors for use by rewrite rules
-func smagic64ok(d int64) bool {
-	m := magic{W: 64, Sd: d}
-	smagic(&m)
-	return m.Bad == 0
-}
-func smagic64m(d int64) int64 {
-	m := magic{W: 64, Sd: d}
-	smagic(&m)
-	return m.Sm
-}
-func smagic64s(d int64) int64 {
-	m := magic{W: 64, Sd: d}
-	smagic(&m)
-	return int64(m.S)
-}
-
-func umagic64ok(d int64) bool {
-	m := magic{W: 64, Ud: uint64(d)}
-	umagic(&m)
-	return m.Bad == 0
-}
-func umagic64m(d int64) int64 {
-	m := magic{W: 64, Ud: uint64(d)}
-	umagic(&m)
-	return int64(m.Um)
-}
-func umagic64s(d int64) int64 {
-	m := magic{W: 64, Ud: uint64(d)}
-	umagic(&m)
-	return int64(m.S)
-}
-func umagic64a(d int64) bool {
-	m := magic{W: 64, Ud: uint64(d)}
-	umagic(&m)
-	return m.Ua != 0
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/nilcheck.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/nilcheck.go
deleted file mode 100644
index 5adae76..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/nilcheck.go
+++ /dev/null
@@ -1,223 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/nilcheck.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/nilcheck.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-// nilcheckelim eliminates unnecessary nil checks.
-// runs on machine-independent code.
-func nilcheckelim(f *Func) {
-	// A nil check is redundant if the same nil check was successful in a
-	// dominating block. The efficacy of this pass depends heavily on the
-	// efficacy of the cse pass.
-	sdom := f.sdom()
-
-	// TODO: Eliminate more nil checks.
-	// We can recursively remove any chain of fixed offset calculations,
-	// i.e. struct fields and array elements, even with non-constant
-	// indices: x is non-nil iff x.a.b[i].c is.
-
-	type walkState int
-	const (
-		Work     walkState = iota // process nil checks and traverse to dominees
-		ClearPtr                  // forget the fact that ptr is nil
-	)
-
-	type bp struct {
-		block *Block // block, or nil in ClearPtr state
-		ptr   *Value // if non-nil, ptr that is to be cleared in ClearPtr state
-		op    walkState
-	}
-
-	work := make([]bp, 0, 256)
-	work = append(work, bp{block: f.Entry})
-
-	// map from value ID to bool indicating if value is known to be non-nil
-	// in the current dominator path being walked. This slice is updated by
-	// walkStates to maintain the known non-nil values.
-	nonNilValues := make([]bool, f.NumValues())
-
-	// make an initial pass identifying any non-nil values
-	for _, b := range f.Blocks {
-		// a value resulting from taking the address of a
-		// value, or a value constructed from an offset of a
-		// non-nil ptr (OpAddPtr) implies it is non-nil
-		for _, v := range b.Values {
-			if v.Op == OpAddr || v.Op == OpAddPtr {
-				nonNilValues[v.ID] = true
-			} else if v.Op == OpPhi {
-				// phis whose arguments are all non-nil
-				// are non-nil
-				argsNonNil := true
-				for _, a := range v.Args {
-					if !nonNilValues[a.ID] {
-						argsNonNil = false
-					}
-				}
-				if argsNonNil {
-					nonNilValues[v.ID] = true
-				}
-			}
-		}
-	}
-
-	// perform a depth first walk of the dominee tree
-	for len(work) > 0 {
-		node := work[len(work)-1]
-		work = work[:len(work)-1]
-
-		switch node.op {
-		case Work:
-			b := node.block
-
-			// First, see if we're dominated by an explicit nil check.
-			if len(b.Preds) == 1 {
-				p := b.Preds[0].b
-				if p.Kind == BlockIf && p.Control.Op == OpIsNonNil && p.Succs[0].b == b {
-					ptr := p.Control.Args[0]
-					if !nonNilValues[ptr.ID] {
-						nonNilValues[ptr.ID] = true
-						work = append(work, bp{op: ClearPtr, ptr: ptr})
-					}
-				}
-			}
-
-			// Next, process values in the block.
-			i := 0
-			for _, v := range b.Values {
-				b.Values[i] = v
-				i++
-				switch v.Op {
-				case OpIsNonNil:
-					ptr := v.Args[0]
-					if nonNilValues[ptr.ID] {
-						// This is a redundant explicit nil check.
-						v.reset(OpConstBool)
-						v.AuxInt = 1 // true
-					}
-				case OpNilCheck:
-					ptr := v.Args[0]
-					if nonNilValues[ptr.ID] {
-						// This is a redundant implicit nil check.
-						// Logging in the style of the former compiler -- and omit line 1,
-						// which is usually in generated code.
-						if f.Config.Debug_checknil() && v.Line > 1 {
-							f.Config.Warnl(v.Line, "removed nil check")
-						}
-						v.reset(OpUnknown)
-						i--
-						continue
-					}
-					// Record the fact that we know ptr is non nil, and remember to
-					// undo that information when this dominator subtree is done.
-					nonNilValues[ptr.ID] = true
-					work = append(work, bp{op: ClearPtr, ptr: ptr})
-				}
-			}
-			for j := i; j < len(b.Values); j++ {
-				b.Values[j] = nil
-			}
-			b.Values = b.Values[:i]
-
-			// Add all dominated blocks to the work list.
-			for w := sdom[node.block.ID].child; w != nil; w = sdom[w.ID].sibling {
-				work = append(work, bp{op: Work, block: w})
-			}
-
-		case ClearPtr:
-			nonNilValues[node.ptr.ID] = false
-			continue
-		}
-	}
-}
-
-// All platforms are guaranteed to fault if we load/store to anything smaller than this address.
-//
-// This should agree with minLegalPointer in the runtime.
-const minZeroPage = 4096
-
-// nilcheckelim2 eliminates unnecessary nil checks.
-// Runs after lowering and scheduling.
-func nilcheckelim2(f *Func) {
-	unnecessary := f.newSparseSet(f.NumValues())
-	defer f.retSparseSet(unnecessary)
-	for _, b := range f.Blocks {
-		// Walk the block backwards. Find instructions that will fault if their
-		// input pointer is nil. Remove nil checks on those pointers, as the
-		// faulting instruction effectively does the nil check for free.
-		unnecessary.clear()
-		for i := len(b.Values) - 1; i >= 0; i-- {
-			v := b.Values[i]
-			if opcodeTable[v.Op].nilCheck && unnecessary.contains(v.Args[0].ID) {
-				if f.Config.Debug_checknil() && int(v.Line) > 1 {
-					f.Config.Warnl(v.Line, "removed nil check")
-				}
-				v.reset(OpUnknown)
-				continue
-			}
-			if v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() {
-				if v.Op == OpVarDef || v.Op == OpVarKill || v.Op == OpVarLive {
-					// These ops don't really change memory.
-					continue
-				}
-				// This op changes memory.  Any faulting instruction after v that
-				// we've recorded in the unnecessary map is now obsolete.
-				unnecessary.clear()
-			}
-
-			// Find any pointers that this op is guaranteed to fault on if nil.
-			var ptrstore [2]*Value
-			ptrs := ptrstore[:0]
-			if opcodeTable[v.Op].faultOnNilArg0 {
-				ptrs = append(ptrs, v.Args[0])
-			}
-			if opcodeTable[v.Op].faultOnNilArg1 {
-				ptrs = append(ptrs, v.Args[1])
-			}
-			for _, ptr := range ptrs {
-				// Check to make sure the offset is small.
-				switch opcodeTable[v.Op].auxType {
-				case auxSymOff:
-					if v.Aux != nil || v.AuxInt < 0 || v.AuxInt >= minZeroPage {
-						continue
-					}
-				case auxSymValAndOff:
-					off := ValAndOff(v.AuxInt).Off()
-					if v.Aux != nil || off < 0 || off >= minZeroPage {
-						continue
-					}
-				case auxInt32:
-					// Mips uses this auxType for atomic add constant. It does not affect the effective address.
-				case auxInt64:
-					// ARM uses this auxType for duffcopy/duffzero/alignment info.
-					// It does not affect the effective address.
-				case auxNone:
-					// offset is zero.
-				default:
-					v.Fatalf("can't handle aux %s (type %d) yet\n", v.auxString(), int(opcodeTable[v.Op].auxType))
-				}
-				// This instruction is guaranteed to fault if ptr is nil.
-				// Any previous nil check op is unnecessary.
-				unnecessary.add(ptr.ID)
-			}
-		}
-		// Remove values we've clobbered with OpUnknown.
-		i := 0
-		for _, v := range b.Values {
-			if v.Op != OpUnknown {
-				b.Values[i] = v
-				i++
-			}
-		}
-		for j := i; j < len(b.Values); j++ {
-			b.Values[j] = nil
-		}
-		b.Values = b.Values[:i]
-
-		// TODO: if b.Kind == BlockPlain, start the analysis in the subsequent block to find
-		// more unnecessary nil checks.  Would fix test/nilptr3_ssa.go:157.
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/nilcheck_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/nilcheck_test.go
deleted file mode 100644
index 70885e3..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/nilcheck_test.go
+++ /dev/null
@@ -1,436 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/nilcheck_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/nilcheck_test.go:1
-package ssa
-
-import (
-	"strconv"
-	"testing"
-)
-
-func BenchmarkNilCheckDeep1(b *testing.B)     { benchmarkNilCheckDeep(b, 1) }
-func BenchmarkNilCheckDeep10(b *testing.B)    { benchmarkNilCheckDeep(b, 10) }
-func BenchmarkNilCheckDeep100(b *testing.B)   { benchmarkNilCheckDeep(b, 100) }
-func BenchmarkNilCheckDeep1000(b *testing.B)  { benchmarkNilCheckDeep(b, 1000) }
-func BenchmarkNilCheckDeep10000(b *testing.B) { benchmarkNilCheckDeep(b, 10000) }
-
-// benchmarkNilCheckDeep is a stress test of nilcheckelim.
-// It uses the worst possible input: A linear string of
-// nil checks, none of which can be eliminated.
-// Run with multiple depths to observe big-O behavior.
-func benchmarkNilCheckDeep(b *testing.B, depth int) {
-	ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
-
-	var blocs []bloc
-	blocs = append(blocs,
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Valu("sb", OpSB, TypeInvalid, 0, nil),
-			Goto(blockn(0)),
-		),
-	)
-	for i := 0; i < depth; i++ {
-		blocs = append(blocs,
-			Bloc(blockn(i),
-				Valu(ptrn(i), OpAddr, ptrType, 0, nil, "sb"),
-				Valu(booln(i), OpIsNonNil, TypeBool, 0, nil, ptrn(i)),
-				If(booln(i), blockn(i+1), "exit"),
-			),
-		)
-	}
-	blocs = append(blocs,
-		Bloc(blockn(depth), Goto("exit")),
-		Bloc("exit", Exit("mem")),
-	)
-
-	c := NewConfig("amd64", DummyFrontend{b}, nil, true)
-	fun := Fun(c, "entry", blocs...)
-
-	CheckFunc(fun.f)
-	b.SetBytes(int64(depth)) // helps for eyeballing linearity
-	b.ResetTimer()
-	b.ReportAllocs()
-
-	for i := 0; i < b.N; i++ {
-		nilcheckelim(fun.f)
-	}
-}
-
-func blockn(n int) string { return "b" + strconv.Itoa(n) }
-func ptrn(n int) string   { return "p" + strconv.Itoa(n) }
-func booln(n int) string  { return "c" + strconv.Itoa(n) }
-
-func isNilCheck(b *Block) bool {
-	return b.Kind == BlockIf && b.Control.Op == OpIsNonNil
-}
-
-// TestNilcheckSimple verifies that a second repeated nilcheck is removed.
-func TestNilcheckSimple(t *testing.T) {
-	ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
-	c := NewConfig("amd64", DummyFrontend{t}, nil, true)
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Valu("sb", OpSB, TypeInvalid, 0, nil),
-			Goto("checkPtr")),
-		Bloc("checkPtr",
-			Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
-			Valu("bool1", OpIsNonNil, TypeBool, 0, nil, "ptr1"),
-			If("bool1", "secondCheck", "exit")),
-		Bloc("secondCheck",
-			Valu("bool2", OpIsNonNil, TypeBool, 0, nil, "ptr1"),
-			If("bool2", "extra", "exit")),
-		Bloc("extra",
-			Goto("exit")),
-		Bloc("exit",
-			Exit("mem")))
-
-	CheckFunc(fun.f)
-	nilcheckelim(fun.f)
-
-	// clean up the removed nil check
-	fuse(fun.f)
-	deadcode(fun.f)
-
-	CheckFunc(fun.f)
-	for _, b := range fun.f.Blocks {
-		if b == fun.blocks["secondCheck"] && isNilCheck(b) {
-			t.Errorf("secondCheck was not eliminated")
-		}
-	}
-}
-
-// TestNilcheckDomOrder ensures that the nil check elimination isn't dependent
-// on the order of the dominees.
-func TestNilcheckDomOrder(t *testing.T) {
-	ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
-	c := NewConfig("amd64", DummyFrontend{t}, nil, true)
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Valu("sb", OpSB, TypeInvalid, 0, nil),
-			Goto("checkPtr")),
-		Bloc("checkPtr",
-			Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
-			Valu("bool1", OpIsNonNil, TypeBool, 0, nil, "ptr1"),
-			If("bool1", "secondCheck", "exit")),
-		Bloc("exit",
-			Exit("mem")),
-		Bloc("secondCheck",
-			Valu("bool2", OpIsNonNil, TypeBool, 0, nil, "ptr1"),
-			If("bool2", "extra", "exit")),
-		Bloc("extra",
-			Goto("exit")))
-
-	CheckFunc(fun.f)
-	nilcheckelim(fun.f)
-
-	// clean up the removed nil check
-	fuse(fun.f)
-	deadcode(fun.f)
-
-	CheckFunc(fun.f)
-	for _, b := range fun.f.Blocks {
-		if b == fun.blocks["secondCheck"] && isNilCheck(b) {
-			t.Errorf("secondCheck was not eliminated")
-		}
-	}
-}
-
-// TestNilcheckAddr verifies that nilchecks of OpAddr constructed values are removed.
-func TestNilcheckAddr(t *testing.T) {
-	ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
-	c := NewConfig("amd64", DummyFrontend{t}, nil, true)
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Valu("sb", OpSB, TypeInvalid, 0, nil),
-			Goto("checkPtr")),
-		Bloc("checkPtr",
-			Valu("ptr1", OpAddr, ptrType, 0, nil, "sb"),
-			Valu("bool1", OpIsNonNil, TypeBool, 0, nil, "ptr1"),
-			If("bool1", "extra", "exit")),
-		Bloc("extra",
-			Goto("exit")),
-		Bloc("exit",
-			Exit("mem")))
-
-	CheckFunc(fun.f)
-	nilcheckelim(fun.f)
-
-	// clean up the removed nil check
-	fuse(fun.f)
-	deadcode(fun.f)
-
-	CheckFunc(fun.f)
-	for _, b := range fun.f.Blocks {
-		if b == fun.blocks["checkPtr"] && isNilCheck(b) {
-			t.Errorf("checkPtr was not eliminated")
-		}
-	}
-}
-
-// TestNilcheckAddPtr verifies that nilchecks of OpAddPtr constructed values are removed.
-func TestNilcheckAddPtr(t *testing.T) {
-	ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
-	c := NewConfig("amd64", DummyFrontend{t}, nil, true)
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Valu("sb", OpSB, TypeInvalid, 0, nil),
-			Goto("checkPtr")),
-		Bloc("checkPtr",
-			Valu("off", OpConst64, TypeInt64, 20, nil),
-			Valu("ptr1", OpAddPtr, ptrType, 0, nil, "sb", "off"),
-			Valu("bool1", OpIsNonNil, TypeBool, 0, nil, "ptr1"),
-			If("bool1", "extra", "exit")),
-		Bloc("extra",
-			Goto("exit")),
-		Bloc("exit",
-			Exit("mem")))
-
-	CheckFunc(fun.f)
-	nilcheckelim(fun.f)
-
-	// clean up the removed nil check
-	fuse(fun.f)
-	deadcode(fun.f)
-
-	CheckFunc(fun.f)
-	for _, b := range fun.f.Blocks {
-		if b == fun.blocks["checkPtr"] && isNilCheck(b) {
-			t.Errorf("checkPtr was not eliminated")
-		}
-	}
-}
-
-// TestNilcheckPhi tests that nil checks of phis, for which all values are known to be
-// non-nil are removed.
-func TestNilcheckPhi(t *testing.T) {
-	ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
-	c := NewConfig("amd64", DummyFrontend{t}, nil, true)
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Valu("sb", OpSB, TypeInvalid, 0, nil),
-			Valu("sp", OpSP, TypeInvalid, 0, nil),
-			Valu("baddr", OpAddr, TypeBool, 0, "b", "sp"),
-			Valu("bool1", OpLoad, TypeBool, 0, nil, "baddr", "mem"),
-			If("bool1", "b1", "b2")),
-		Bloc("b1",
-			Valu("ptr1", OpAddr, ptrType, 0, nil, "sb"),
-			Goto("checkPtr")),
-		Bloc("b2",
-			Valu("ptr2", OpAddr, ptrType, 0, nil, "sb"),
-			Goto("checkPtr")),
-		// both ptr1 and ptr2 are guaranteed non-nil here
-		Bloc("checkPtr",
-			Valu("phi", OpPhi, ptrType, 0, nil, "ptr1", "ptr2"),
-			Valu("bool2", OpIsNonNil, TypeBool, 0, nil, "phi"),
-			If("bool2", "extra", "exit")),
-		Bloc("extra",
-			Goto("exit")),
-		Bloc("exit",
-			Exit("mem")))
-
-	CheckFunc(fun.f)
-	nilcheckelim(fun.f)
-
-	// clean up the removed nil check
-	fuse(fun.f)
-	deadcode(fun.f)
-
-	CheckFunc(fun.f)
-	for _, b := range fun.f.Blocks {
-		if b == fun.blocks["checkPtr"] && isNilCheck(b) {
-			t.Errorf("checkPtr was not eliminated")
-		}
-	}
-}
-
-// TestNilcheckKeepRemove verifies that duplicate checks of the same pointer
-// are removed, but checks of different pointers are not.
-func TestNilcheckKeepRemove(t *testing.T) {
-	ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
-	c := NewConfig("amd64", DummyFrontend{t}, nil, true)
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Valu("sb", OpSB, TypeInvalid, 0, nil),
-			Goto("checkPtr")),
-		Bloc("checkPtr",
-			Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
-			Valu("bool1", OpIsNonNil, TypeBool, 0, nil, "ptr1"),
-			If("bool1", "differentCheck", "exit")),
-		Bloc("differentCheck",
-			Valu("ptr2", OpLoad, ptrType, 0, nil, "sb", "mem"),
-			Valu("bool2", OpIsNonNil, TypeBool, 0, nil, "ptr2"),
-			If("bool2", "secondCheck", "exit")),
-		Bloc("secondCheck",
-			Valu("bool3", OpIsNonNil, TypeBool, 0, nil, "ptr1"),
-			If("bool3", "extra", "exit")),
-		Bloc("extra",
-			Goto("exit")),
-		Bloc("exit",
-			Exit("mem")))
-
-	CheckFunc(fun.f)
-	nilcheckelim(fun.f)
-
-	// clean up the removed nil check
-	fuse(fun.f)
-	deadcode(fun.f)
-
-	CheckFunc(fun.f)
-	foundDifferentCheck := false
-	for _, b := range fun.f.Blocks {
-		if b == fun.blocks["secondCheck"] && isNilCheck(b) {
-			t.Errorf("secondCheck was not eliminated")
-		}
-		if b == fun.blocks["differentCheck"] && isNilCheck(b) {
-			foundDifferentCheck = true
-		}
-	}
-	if !foundDifferentCheck {
-		t.Errorf("removed differentCheck, but shouldn't have")
-	}
-}
-
-// TestNilcheckInFalseBranch tests that nil checks in the false branch of an nilcheck
-// block are *not* removed.
-func TestNilcheckInFalseBranch(t *testing.T) {
-	ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
-	c := NewConfig("amd64", DummyFrontend{t}, nil, true)
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Valu("sb", OpSB, TypeInvalid, 0, nil),
-			Goto("checkPtr")),
-		Bloc("checkPtr",
-			Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
-			Valu("bool1", OpIsNonNil, TypeBool, 0, nil, "ptr1"),
-			If("bool1", "extra", "secondCheck")),
-		Bloc("secondCheck",
-			Valu("bool2", OpIsNonNil, TypeBool, 0, nil, "ptr1"),
-			If("bool2", "extra", "thirdCheck")),
-		Bloc("thirdCheck",
-			Valu("bool3", OpIsNonNil, TypeBool, 0, nil, "ptr1"),
-			If("bool3", "extra", "exit")),
-		Bloc("extra",
-			Goto("exit")),
-		Bloc("exit",
-			Exit("mem")))
-
-	CheckFunc(fun.f)
-	nilcheckelim(fun.f)
-
-	// clean up the removed nil check
-	fuse(fun.f)
-	deadcode(fun.f)
-
-	CheckFunc(fun.f)
-	foundSecondCheck := false
-	foundThirdCheck := false
-	for _, b := range fun.f.Blocks {
-		if b == fun.blocks["secondCheck"] && isNilCheck(b) {
-			foundSecondCheck = true
-		}
-		if b == fun.blocks["thirdCheck"] && isNilCheck(b) {
-			foundThirdCheck = true
-		}
-	}
-	if !foundSecondCheck {
-		t.Errorf("removed secondCheck, but shouldn't have [false branch]")
-	}
-	if !foundThirdCheck {
-		t.Errorf("removed thirdCheck, but shouldn't have [false branch]")
-	}
-}
-
-// TestNilcheckUser verifies that a user nil check that dominates a generated nil check
-// wil remove the generated nil check.
-func TestNilcheckUser(t *testing.T) {
-	ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
-	c := NewConfig("amd64", DummyFrontend{t}, nil, true)
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Valu("sb", OpSB, TypeInvalid, 0, nil),
-			Goto("checkPtr")),
-		Bloc("checkPtr",
-			Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
-			Valu("nilptr", OpConstNil, ptrType, 0, nil),
-			Valu("bool1", OpNeqPtr, TypeBool, 0, nil, "ptr1", "nilptr"),
-			If("bool1", "secondCheck", "exit")),
-		Bloc("secondCheck",
-			Valu("bool2", OpIsNonNil, TypeBool, 0, nil, "ptr1"),
-			If("bool2", "extra", "exit")),
-		Bloc("extra",
-			Goto("exit")),
-		Bloc("exit",
-			Exit("mem")))
-
-	CheckFunc(fun.f)
-	// we need the opt here to rewrite the user nilcheck
-	opt(fun.f)
-	nilcheckelim(fun.f)
-
-	// clean up the removed nil check
-	fuse(fun.f)
-	deadcode(fun.f)
-
-	CheckFunc(fun.f)
-	for _, b := range fun.f.Blocks {
-		if b == fun.blocks["secondCheck"] && isNilCheck(b) {
-			t.Errorf("secondCheck was not eliminated")
-		}
-	}
-}
-
-// TestNilcheckBug reproduces a bug in nilcheckelim found by compiling math/big
-func TestNilcheckBug(t *testing.T) {
-	ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
-	c := NewConfig("amd64", DummyFrontend{t}, nil, true)
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Valu("sb", OpSB, TypeInvalid, 0, nil),
-			Goto("checkPtr")),
-		Bloc("checkPtr",
-			Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
-			Valu("nilptr", OpConstNil, ptrType, 0, nil),
-			Valu("bool1", OpNeqPtr, TypeBool, 0, nil, "ptr1", "nilptr"),
-			If("bool1", "secondCheck", "couldBeNil")),
-		Bloc("couldBeNil",
-			Goto("secondCheck")),
-		Bloc("secondCheck",
-			Valu("bool2", OpIsNonNil, TypeBool, 0, nil, "ptr1"),
-			If("bool2", "extra", "exit")),
-		Bloc("extra",
-			// prevent fuse from eliminating this block
-			Valu("store", OpStore, TypeMem, 8, nil, "ptr1", "nilptr", "mem"),
-			Goto("exit")),
-		Bloc("exit",
-			Valu("phi", OpPhi, TypeMem, 0, nil, "mem", "store"),
-			Exit("phi")))
-
-	CheckFunc(fun.f)
-	// we need the opt here to rewrite the user nilcheck
-	opt(fun.f)
-	nilcheckelim(fun.f)
-
-	// clean up the removed nil check
-	fuse(fun.f)
-	deadcode(fun.f)
-
-	CheckFunc(fun.f)
-	foundSecondCheck := false
-	for _, b := range fun.f.Blocks {
-		if b == fun.blocks["secondCheck"] && isNilCheck(b) {
-			foundSecondCheck = true
-		}
-	}
-	if !foundSecondCheck {
-		t.Errorf("secondCheck was eliminated, but shouldn't have")
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/op.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/op.go
deleted file mode 100644
index 8118521..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/op.go
+++ /dev/null
@@ -1,171 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/op.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/op.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"fmt"
-)
-
-// An Op encodes the specific operation that a Value performs.
-// Opcodes' semantics can be modified by the type and aux fields of the Value.
-// For instance, OpAdd can be 32 or 64 bit, signed or unsigned, float or complex, depending on Value.Type.
-// Semantics of each op are described in the opcode files in gen/*Ops.go.
-// There is one file for generic (architecture-independent) ops and one file
-// for each architecture.
-type Op int32
-
-type opInfo struct {
-	name              string
-	reg               regInfo
-	auxType           auxType
-	argLen            int32 // the number of arguments, -1 if variable length
-	asm               obj.As
-	generic           bool // this is a generic (arch-independent) opcode
-	rematerializeable bool // this op is rematerializeable
-	commutative       bool // this operation is commutative (e.g. addition)
-	resultInArg0      bool // (first, if a tuple) output of v and v.Args[0] must be allocated to the same register
-	resultNotInArgs   bool // outputs must not be allocated to the same registers as inputs
-	clobberFlags      bool // this op clobbers flags register
-	call              bool // is a function call
-	nilCheck          bool // this op is a nil check on arg0
-	faultOnNilArg0    bool // this op will fault if arg0 is nil (and aux encodes a small offset)
-	faultOnNilArg1    bool // this op will fault if arg1 is nil (and aux encodes a small offset)
-	usesScratch       bool // this op requires scratch memory space
-}
-
-type inputInfo struct {
-	idx  int     // index in Args array
-	regs regMask // allowed input registers
-}
-
-type outputInfo struct {
-	idx  int     // index in output tuple
-	regs regMask // allowed output registers
-}
-
-type regInfo struct {
-	inputs   []inputInfo // ordered in register allocation order
-	clobbers regMask
-	outputs  []outputInfo // ordered in register allocation order
-}
-
-type auxType int8
-
-const (
-	auxNone            auxType = iota
-	auxBool                    // auxInt is 0/1 for false/true
-	auxInt8                    // auxInt is an 8-bit integer
-	auxInt16                   // auxInt is a 16-bit integer
-	auxInt32                   // auxInt is a 32-bit integer
-	auxInt64                   // auxInt is a 64-bit integer
-	auxInt128                  // auxInt represents a 128-bit integer.  Always 0.
-	auxFloat32                 // auxInt is a float32 (encoded with math.Float64bits)
-	auxFloat64                 // auxInt is a float64 (encoded with math.Float64bits)
-	auxSizeAndAlign            // auxInt is a SizeAndAlign
-	auxString                  // aux is a string
-	auxSym                     // aux is a symbol
-	auxSymOff                  // aux is a symbol, auxInt is an offset
-	auxSymValAndOff            // aux is a symbol, auxInt is a ValAndOff
-	auxSymSizeAndAlign         // aux is a symbol, auxInt is a SizeAndAlign
-
-	auxSymInt32 // aux is a symbol, auxInt is a 32-bit integer
-)
-
-// A ValAndOff is used by the several opcodes. It holds
-// both a value and a pointer offset.
-// A ValAndOff is intended to be encoded into an AuxInt field.
-// The zero ValAndOff encodes a value of 0 and an offset of 0.
-// The high 32 bits hold a value.
-// The low 32 bits hold a pointer offset.
-type ValAndOff int64
-
-func (x ValAndOff) Val() int64 {
-	return int64(x) >> 32
-}
-func (x ValAndOff) Off() int64 {
-	return int64(int32(x))
-}
-func (x ValAndOff) Int64() int64 {
-	return int64(x)
-}
-func (x ValAndOff) String() string {
-	return fmt.Sprintf("val=%d,off=%d", x.Val(), x.Off())
-}
-
-// validVal reports whether the value can be used
-// as an argument to makeValAndOff.
-func validVal(val int64) bool {
-	return val == int64(int32(val))
-}
-
-// validOff reports whether the offset can be used
-// as an argument to makeValAndOff.
-func validOff(off int64) bool {
-	return off == int64(int32(off))
-}
-
-// validValAndOff reports whether we can fit the value and offset into
-// a ValAndOff value.
-func validValAndOff(val, off int64) bool {
-	if !validVal(val) {
-		return false
-	}
-	if !validOff(off) {
-		return false
-	}
-	return true
-}
-
-// makeValAndOff encodes a ValAndOff into an int64 suitable for storing in an AuxInt field.
-func makeValAndOff(val, off int64) int64 {
-	if !validValAndOff(val, off) {
-		panic("invalid makeValAndOff")
-	}
-	return ValAndOff(val<<32 + int64(uint32(off))).Int64()
-}
-
-func (x ValAndOff) canAdd(off int64) bool {
-	newoff := x.Off() + off
-	return newoff == int64(int32(newoff))
-}
-
-func (x ValAndOff) add(off int64) int64 {
-	if !x.canAdd(off) {
-		panic("invalid ValAndOff.add")
-	}
-	return makeValAndOff(x.Val(), x.Off()+off)
-}
-
-// SizeAndAlign holds both the size and the alignment of a type,
-// used in Zero and Move ops.
-// The high 8 bits hold the alignment.
-// The low 56 bits hold the size.
-type SizeAndAlign int64
-
-func (x SizeAndAlign) Size() int64 {
-	return int64(x) & (1<<56 - 1)
-}
-func (x SizeAndAlign) Align() int64 {
-	return int64(uint64(x) >> 56)
-}
-func (x SizeAndAlign) Int64() int64 {
-	return int64(x)
-}
-func (x SizeAndAlign) String() string {
-	return fmt.Sprintf("size=%d,align=%d", x.Size(), x.Align())
-}
-func MakeSizeAndAlign(size, align int64) SizeAndAlign {
-	if size&^(1<<56-1) != 0 {
-		panic("size too big in SizeAndAlign")
-	}
-	if align >= 1<<8 {
-		panic("alignment too big in SizeAndAlign")
-	}
-	return SizeAndAlign(size | align<<56)
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/opGen.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/opGen.go
deleted file mode 100644
index 8d79753..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/opGen.go
+++ /dev/null
@@ -1,21863 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/opGen.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/opGen.go:1
-// autogenerated: do not edit!
-// generated from gen/*Ops.go
-
-package ssa
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/arm"
-	"bootstrap/cmd/internal/obj/arm64"
-	"bootstrap/cmd/internal/obj/mips"
-	"bootstrap/cmd/internal/obj/ppc64"
-	"bootstrap/cmd/internal/obj/s390x"
-	"bootstrap/cmd/internal/obj/x86"
-)
-
-const (
-	BlockInvalid BlockKind = iota
-
-	Block386EQ
-	Block386NE
-	Block386LT
-	Block386LE
-	Block386GT
-	Block386GE
-	Block386ULT
-	Block386ULE
-	Block386UGT
-	Block386UGE
-	Block386EQF
-	Block386NEF
-	Block386ORD
-	Block386NAN
-
-	BlockAMD64EQ
-	BlockAMD64NE
-	BlockAMD64LT
-	BlockAMD64LE
-	BlockAMD64GT
-	BlockAMD64GE
-	BlockAMD64ULT
-	BlockAMD64ULE
-	BlockAMD64UGT
-	BlockAMD64UGE
-	BlockAMD64EQF
-	BlockAMD64NEF
-	BlockAMD64ORD
-	BlockAMD64NAN
-
-	BlockARMEQ
-	BlockARMNE
-	BlockARMLT
-	BlockARMLE
-	BlockARMGT
-	BlockARMGE
-	BlockARMULT
-	BlockARMULE
-	BlockARMUGT
-	BlockARMUGE
-
-	BlockARM64EQ
-	BlockARM64NE
-	BlockARM64LT
-	BlockARM64LE
-	BlockARM64GT
-	BlockARM64GE
-	BlockARM64ULT
-	BlockARM64ULE
-	BlockARM64UGT
-	BlockARM64UGE
-	BlockARM64Z
-	BlockARM64NZ
-	BlockARM64ZW
-	BlockARM64NZW
-
-	BlockMIPSEQ
-	BlockMIPSNE
-	BlockMIPSLTZ
-	BlockMIPSLEZ
-	BlockMIPSGTZ
-	BlockMIPSGEZ
-	BlockMIPSFPT
-	BlockMIPSFPF
-
-	BlockMIPS64EQ
-	BlockMIPS64NE
-	BlockMIPS64LTZ
-	BlockMIPS64LEZ
-	BlockMIPS64GTZ
-	BlockMIPS64GEZ
-	BlockMIPS64FPT
-	BlockMIPS64FPF
-
-	BlockPPC64EQ
-	BlockPPC64NE
-	BlockPPC64LT
-	BlockPPC64LE
-	BlockPPC64GT
-	BlockPPC64GE
-	BlockPPC64FLT
-	BlockPPC64FLE
-	BlockPPC64FGT
-	BlockPPC64FGE
-
-	BlockS390XEQ
-	BlockS390XNE
-	BlockS390XLT
-	BlockS390XLE
-	BlockS390XGT
-	BlockS390XGE
-	BlockS390XGTF
-	BlockS390XGEF
-
-	BlockPlain
-	BlockIf
-	BlockDefer
-	BlockRet
-	BlockRetJmp
-	BlockExit
-	BlockFirst
-)
-
-var blockString = [...]string{
-	BlockInvalid: "BlockInvalid",
-
-	Block386EQ:  "EQ",
-	Block386NE:  "NE",
-	Block386LT:  "LT",
-	Block386LE:  "LE",
-	Block386GT:  "GT",
-	Block386GE:  "GE",
-	Block386ULT: "ULT",
-	Block386ULE: "ULE",
-	Block386UGT: "UGT",
-	Block386UGE: "UGE",
-	Block386EQF: "EQF",
-	Block386NEF: "NEF",
-	Block386ORD: "ORD",
-	Block386NAN: "NAN",
-
-	BlockAMD64EQ:  "EQ",
-	BlockAMD64NE:  "NE",
-	BlockAMD64LT:  "LT",
-	BlockAMD64LE:  "LE",
-	BlockAMD64GT:  "GT",
-	BlockAMD64GE:  "GE",
-	BlockAMD64ULT: "ULT",
-	BlockAMD64ULE: "ULE",
-	BlockAMD64UGT: "UGT",
-	BlockAMD64UGE: "UGE",
-	BlockAMD64EQF: "EQF",
-	BlockAMD64NEF: "NEF",
-	BlockAMD64ORD: "ORD",
-	BlockAMD64NAN: "NAN",
-
-	BlockARMEQ:  "EQ",
-	BlockARMNE:  "NE",
-	BlockARMLT:  "LT",
-	BlockARMLE:  "LE",
-	BlockARMGT:  "GT",
-	BlockARMGE:  "GE",
-	BlockARMULT: "ULT",
-	BlockARMULE: "ULE",
-	BlockARMUGT: "UGT",
-	BlockARMUGE: "UGE",
-
-	BlockARM64EQ:  "EQ",
-	BlockARM64NE:  "NE",
-	BlockARM64LT:  "LT",
-	BlockARM64LE:  "LE",
-	BlockARM64GT:  "GT",
-	BlockARM64GE:  "GE",
-	BlockARM64ULT: "ULT",
-	BlockARM64ULE: "ULE",
-	BlockARM64UGT: "UGT",
-	BlockARM64UGE: "UGE",
-	BlockARM64Z:   "Z",
-	BlockARM64NZ:  "NZ",
-	BlockARM64ZW:  "ZW",
-	BlockARM64NZW: "NZW",
-
-	BlockMIPSEQ:  "EQ",
-	BlockMIPSNE:  "NE",
-	BlockMIPSLTZ: "LTZ",
-	BlockMIPSLEZ: "LEZ",
-	BlockMIPSGTZ: "GTZ",
-	BlockMIPSGEZ: "GEZ",
-	BlockMIPSFPT: "FPT",
-	BlockMIPSFPF: "FPF",
-
-	BlockMIPS64EQ:  "EQ",
-	BlockMIPS64NE:  "NE",
-	BlockMIPS64LTZ: "LTZ",
-	BlockMIPS64LEZ: "LEZ",
-	BlockMIPS64GTZ: "GTZ",
-	BlockMIPS64GEZ: "GEZ",
-	BlockMIPS64FPT: "FPT",
-	BlockMIPS64FPF: "FPF",
-
-	BlockPPC64EQ:  "EQ",
-	BlockPPC64NE:  "NE",
-	BlockPPC64LT:  "LT",
-	BlockPPC64LE:  "LE",
-	BlockPPC64GT:  "GT",
-	BlockPPC64GE:  "GE",
-	BlockPPC64FLT: "FLT",
-	BlockPPC64FLE: "FLE",
-	BlockPPC64FGT: "FGT",
-	BlockPPC64FGE: "FGE",
-
-	BlockS390XEQ:  "EQ",
-	BlockS390XNE:  "NE",
-	BlockS390XLT:  "LT",
-	BlockS390XLE:  "LE",
-	BlockS390XGT:  "GT",
-	BlockS390XGE:  "GE",
-	BlockS390XGTF: "GTF",
-	BlockS390XGEF: "GEF",
-
-	BlockPlain:  "Plain",
-	BlockIf:     "If",
-	BlockDefer:  "Defer",
-	BlockRet:    "Ret",
-	BlockRetJmp: "RetJmp",
-	BlockExit:   "Exit",
-	BlockFirst:  "First",
-}
-
-func (k BlockKind) String() string { return blockString[k] }
-
-const (
-	OpInvalid Op = iota
-
-	Op386ADDSS
-	Op386ADDSD
-	Op386SUBSS
-	Op386SUBSD
-	Op386MULSS
-	Op386MULSD
-	Op386DIVSS
-	Op386DIVSD
-	Op386MOVSSload
-	Op386MOVSDload
-	Op386MOVSSconst
-	Op386MOVSDconst
-	Op386MOVSSloadidx1
-	Op386MOVSSloadidx4
-	Op386MOVSDloadidx1
-	Op386MOVSDloadidx8
-	Op386MOVSSstore
-	Op386MOVSDstore
-	Op386MOVSSstoreidx1
-	Op386MOVSSstoreidx4
-	Op386MOVSDstoreidx1
-	Op386MOVSDstoreidx8
-	Op386ADDL
-	Op386ADDLconst
-	Op386ADDLcarry
-	Op386ADDLconstcarry
-	Op386ADCL
-	Op386ADCLconst
-	Op386SUBL
-	Op386SUBLconst
-	Op386SUBLcarry
-	Op386SUBLconstcarry
-	Op386SBBL
-	Op386SBBLconst
-	Op386MULL
-	Op386MULLconst
-	Op386HMULL
-	Op386HMULLU
-	Op386HMULW
-	Op386HMULB
-	Op386HMULWU
-	Op386HMULBU
-	Op386MULLQU
-	Op386DIVL
-	Op386DIVW
-	Op386DIVLU
-	Op386DIVWU
-	Op386MODL
-	Op386MODW
-	Op386MODLU
-	Op386MODWU
-	Op386ANDL
-	Op386ANDLconst
-	Op386ORL
-	Op386ORLconst
-	Op386XORL
-	Op386XORLconst
-	Op386CMPL
-	Op386CMPW
-	Op386CMPB
-	Op386CMPLconst
-	Op386CMPWconst
-	Op386CMPBconst
-	Op386UCOMISS
-	Op386UCOMISD
-	Op386TESTL
-	Op386TESTW
-	Op386TESTB
-	Op386TESTLconst
-	Op386TESTWconst
-	Op386TESTBconst
-	Op386SHLL
-	Op386SHLLconst
-	Op386SHRL
-	Op386SHRW
-	Op386SHRB
-	Op386SHRLconst
-	Op386SHRWconst
-	Op386SHRBconst
-	Op386SARL
-	Op386SARW
-	Op386SARB
-	Op386SARLconst
-	Op386SARWconst
-	Op386SARBconst
-	Op386ROLLconst
-	Op386ROLWconst
-	Op386ROLBconst
-	Op386NEGL
-	Op386NOTL
-	Op386BSFL
-	Op386BSFW
-	Op386BSRL
-	Op386BSRW
-	Op386BSWAPL
-	Op386SQRTSD
-	Op386SBBLcarrymask
-	Op386SETEQ
-	Op386SETNE
-	Op386SETL
-	Op386SETLE
-	Op386SETG
-	Op386SETGE
-	Op386SETB
-	Op386SETBE
-	Op386SETA
-	Op386SETAE
-	Op386SETEQF
-	Op386SETNEF
-	Op386SETORD
-	Op386SETNAN
-	Op386SETGF
-	Op386SETGEF
-	Op386MOVBLSX
-	Op386MOVBLZX
-	Op386MOVWLSX
-	Op386MOVWLZX
-	Op386MOVLconst
-	Op386CVTTSD2SL
-	Op386CVTTSS2SL
-	Op386CVTSL2SS
-	Op386CVTSL2SD
-	Op386CVTSD2SS
-	Op386CVTSS2SD
-	Op386PXOR
-	Op386LEAL
-	Op386LEAL1
-	Op386LEAL2
-	Op386LEAL4
-	Op386LEAL8
-	Op386MOVBload
-	Op386MOVBLSXload
-	Op386MOVWload
-	Op386MOVWLSXload
-	Op386MOVLload
-	Op386MOVBstore
-	Op386MOVWstore
-	Op386MOVLstore
-	Op386MOVBloadidx1
-	Op386MOVWloadidx1
-	Op386MOVWloadidx2
-	Op386MOVLloadidx1
-	Op386MOVLloadidx4
-	Op386MOVBstoreidx1
-	Op386MOVWstoreidx1
-	Op386MOVWstoreidx2
-	Op386MOVLstoreidx1
-	Op386MOVLstoreidx4
-	Op386MOVBstoreconst
-	Op386MOVWstoreconst
-	Op386MOVLstoreconst
-	Op386MOVBstoreconstidx1
-	Op386MOVWstoreconstidx1
-	Op386MOVWstoreconstidx2
-	Op386MOVLstoreconstidx1
-	Op386MOVLstoreconstidx4
-	Op386DUFFZERO
-	Op386REPSTOSL
-	Op386CALLstatic
-	Op386CALLclosure
-	Op386CALLdefer
-	Op386CALLgo
-	Op386CALLinter
-	Op386DUFFCOPY
-	Op386REPMOVSL
-	Op386InvertFlags
-	Op386LoweredGetG
-	Op386LoweredGetClosurePtr
-	Op386LoweredNilCheck
-	Op386MOVLconvert
-	Op386FlagEQ
-	Op386FlagLT_ULT
-	Op386FlagLT_UGT
-	Op386FlagGT_UGT
-	Op386FlagGT_ULT
-	Op386FCHS
-	Op386MOVSSconst1
-	Op386MOVSDconst1
-	Op386MOVSSconst2
-	Op386MOVSDconst2
-
-	OpAMD64ADDSS
-	OpAMD64ADDSD
-	OpAMD64SUBSS
-	OpAMD64SUBSD
-	OpAMD64MULSS
-	OpAMD64MULSD
-	OpAMD64DIVSS
-	OpAMD64DIVSD
-	OpAMD64MOVSSload
-	OpAMD64MOVSDload
-	OpAMD64MOVSSconst
-	OpAMD64MOVSDconst
-	OpAMD64MOVSSloadidx1
-	OpAMD64MOVSSloadidx4
-	OpAMD64MOVSDloadidx1
-	OpAMD64MOVSDloadidx8
-	OpAMD64MOVSSstore
-	OpAMD64MOVSDstore
-	OpAMD64MOVSSstoreidx1
-	OpAMD64MOVSSstoreidx4
-	OpAMD64MOVSDstoreidx1
-	OpAMD64MOVSDstoreidx8
-	OpAMD64ADDQ
-	OpAMD64ADDL
-	OpAMD64ADDQconst
-	OpAMD64ADDLconst
-	OpAMD64SUBQ
-	OpAMD64SUBL
-	OpAMD64SUBQconst
-	OpAMD64SUBLconst
-	OpAMD64MULQ
-	OpAMD64MULL
-	OpAMD64MULQconst
-	OpAMD64MULLconst
-	OpAMD64HMULQ
-	OpAMD64HMULL
-	OpAMD64HMULW
-	OpAMD64HMULB
-	OpAMD64HMULQU
-	OpAMD64HMULLU
-	OpAMD64HMULWU
-	OpAMD64HMULBU
-	OpAMD64AVGQU
-	OpAMD64DIVQ
-	OpAMD64DIVL
-	OpAMD64DIVW
-	OpAMD64DIVQU
-	OpAMD64DIVLU
-	OpAMD64DIVWU
-	OpAMD64MULQU2
-	OpAMD64DIVQU2
-	OpAMD64ANDQ
-	OpAMD64ANDL
-	OpAMD64ANDQconst
-	OpAMD64ANDLconst
-	OpAMD64ORQ
-	OpAMD64ORL
-	OpAMD64ORQconst
-	OpAMD64ORLconst
-	OpAMD64XORQ
-	OpAMD64XORL
-	OpAMD64XORQconst
-	OpAMD64XORLconst
-	OpAMD64CMPQ
-	OpAMD64CMPL
-	OpAMD64CMPW
-	OpAMD64CMPB
-	OpAMD64CMPQconst
-	OpAMD64CMPLconst
-	OpAMD64CMPWconst
-	OpAMD64CMPBconst
-	OpAMD64UCOMISS
-	OpAMD64UCOMISD
-	OpAMD64TESTQ
-	OpAMD64TESTL
-	OpAMD64TESTW
-	OpAMD64TESTB
-	OpAMD64TESTQconst
-	OpAMD64TESTLconst
-	OpAMD64TESTWconst
-	OpAMD64TESTBconst
-	OpAMD64SHLQ
-	OpAMD64SHLL
-	OpAMD64SHLQconst
-	OpAMD64SHLLconst
-	OpAMD64SHRQ
-	OpAMD64SHRL
-	OpAMD64SHRW
-	OpAMD64SHRB
-	OpAMD64SHRQconst
-	OpAMD64SHRLconst
-	OpAMD64SHRWconst
-	OpAMD64SHRBconst
-	OpAMD64SARQ
-	OpAMD64SARL
-	OpAMD64SARW
-	OpAMD64SARB
-	OpAMD64SARQconst
-	OpAMD64SARLconst
-	OpAMD64SARWconst
-	OpAMD64SARBconst
-	OpAMD64ROLQconst
-	OpAMD64ROLLconst
-	OpAMD64ROLWconst
-	OpAMD64ROLBconst
-	OpAMD64NEGQ
-	OpAMD64NEGL
-	OpAMD64NOTQ
-	OpAMD64NOTL
-	OpAMD64BSFQ
-	OpAMD64BSFL
-	OpAMD64CMOVQEQ
-	OpAMD64CMOVLEQ
-	OpAMD64BSWAPQ
-	OpAMD64BSWAPL
-	OpAMD64SQRTSD
-	OpAMD64SBBQcarrymask
-	OpAMD64SBBLcarrymask
-	OpAMD64SETEQ
-	OpAMD64SETNE
-	OpAMD64SETL
-	OpAMD64SETLE
-	OpAMD64SETG
-	OpAMD64SETGE
-	OpAMD64SETB
-	OpAMD64SETBE
-	OpAMD64SETA
-	OpAMD64SETAE
-	OpAMD64SETEQF
-	OpAMD64SETNEF
-	OpAMD64SETORD
-	OpAMD64SETNAN
-	OpAMD64SETGF
-	OpAMD64SETGEF
-	OpAMD64MOVBQSX
-	OpAMD64MOVBQZX
-	OpAMD64MOVWQSX
-	OpAMD64MOVWQZX
-	OpAMD64MOVLQSX
-	OpAMD64MOVLQZX
-	OpAMD64MOVLconst
-	OpAMD64MOVQconst
-	OpAMD64CVTTSD2SL
-	OpAMD64CVTTSD2SQ
-	OpAMD64CVTTSS2SL
-	OpAMD64CVTTSS2SQ
-	OpAMD64CVTSL2SS
-	OpAMD64CVTSL2SD
-	OpAMD64CVTSQ2SS
-	OpAMD64CVTSQ2SD
-	OpAMD64CVTSD2SS
-	OpAMD64CVTSS2SD
-	OpAMD64PXOR
-	OpAMD64LEAQ
-	OpAMD64LEAQ1
-	OpAMD64LEAQ2
-	OpAMD64LEAQ4
-	OpAMD64LEAQ8
-	OpAMD64LEAL
-	OpAMD64MOVBload
-	OpAMD64MOVBQSXload
-	OpAMD64MOVWload
-	OpAMD64MOVWQSXload
-	OpAMD64MOVLload
-	OpAMD64MOVLQSXload
-	OpAMD64MOVQload
-	OpAMD64MOVBstore
-	OpAMD64MOVWstore
-	OpAMD64MOVLstore
-	OpAMD64MOVQstore
-	OpAMD64MOVOload
-	OpAMD64MOVOstore
-	OpAMD64MOVBloadidx1
-	OpAMD64MOVWloadidx1
-	OpAMD64MOVWloadidx2
-	OpAMD64MOVLloadidx1
-	OpAMD64MOVLloadidx4
-	OpAMD64MOVQloadidx1
-	OpAMD64MOVQloadidx8
-	OpAMD64MOVBstoreidx1
-	OpAMD64MOVWstoreidx1
-	OpAMD64MOVWstoreidx2
-	OpAMD64MOVLstoreidx1
-	OpAMD64MOVLstoreidx4
-	OpAMD64MOVQstoreidx1
-	OpAMD64MOVQstoreidx8
-	OpAMD64MOVBstoreconst
-	OpAMD64MOVWstoreconst
-	OpAMD64MOVLstoreconst
-	OpAMD64MOVQstoreconst
-	OpAMD64MOVBstoreconstidx1
-	OpAMD64MOVWstoreconstidx1
-	OpAMD64MOVWstoreconstidx2
-	OpAMD64MOVLstoreconstidx1
-	OpAMD64MOVLstoreconstidx4
-	OpAMD64MOVQstoreconstidx1
-	OpAMD64MOVQstoreconstidx8
-	OpAMD64DUFFZERO
-	OpAMD64MOVOconst
-	OpAMD64REPSTOSQ
-	OpAMD64CALLstatic
-	OpAMD64CALLclosure
-	OpAMD64CALLdefer
-	OpAMD64CALLgo
-	OpAMD64CALLinter
-	OpAMD64DUFFCOPY
-	OpAMD64REPMOVSQ
-	OpAMD64InvertFlags
-	OpAMD64LoweredGetG
-	OpAMD64LoweredGetClosurePtr
-	OpAMD64LoweredNilCheck
-	OpAMD64MOVQconvert
-	OpAMD64MOVLconvert
-	OpAMD64FlagEQ
-	OpAMD64FlagLT_ULT
-	OpAMD64FlagLT_UGT
-	OpAMD64FlagGT_UGT
-	OpAMD64FlagGT_ULT
-	OpAMD64MOVLatomicload
-	OpAMD64MOVQatomicload
-	OpAMD64XCHGL
-	OpAMD64XCHGQ
-	OpAMD64XADDLlock
-	OpAMD64XADDQlock
-	OpAMD64AddTupleFirst32
-	OpAMD64AddTupleFirst64
-	OpAMD64CMPXCHGLlock
-	OpAMD64CMPXCHGQlock
-	OpAMD64ANDBlock
-	OpAMD64ORBlock
-
-	OpARMADD
-	OpARMADDconst
-	OpARMSUB
-	OpARMSUBconst
-	OpARMRSB
-	OpARMRSBconst
-	OpARMMUL
-	OpARMHMUL
-	OpARMHMULU
-	OpARMUDIVrtcall
-	OpARMADDS
-	OpARMADDSconst
-	OpARMADC
-	OpARMADCconst
-	OpARMSUBS
-	OpARMSUBSconst
-	OpARMRSBSconst
-	OpARMSBC
-	OpARMSBCconst
-	OpARMRSCconst
-	OpARMMULLU
-	OpARMMULA
-	OpARMADDF
-	OpARMADDD
-	OpARMSUBF
-	OpARMSUBD
-	OpARMMULF
-	OpARMMULD
-	OpARMDIVF
-	OpARMDIVD
-	OpARMAND
-	OpARMANDconst
-	OpARMOR
-	OpARMORconst
-	OpARMXOR
-	OpARMXORconst
-	OpARMBIC
-	OpARMBICconst
-	OpARMMVN
-	OpARMNEGF
-	OpARMNEGD
-	OpARMSQRTD
-	OpARMCLZ
-	OpARMSLL
-	OpARMSLLconst
-	OpARMSRL
-	OpARMSRLconst
-	OpARMSRA
-	OpARMSRAconst
-	OpARMSRRconst
-	OpARMADDshiftLL
-	OpARMADDshiftRL
-	OpARMADDshiftRA
-	OpARMSUBshiftLL
-	OpARMSUBshiftRL
-	OpARMSUBshiftRA
-	OpARMRSBshiftLL
-	OpARMRSBshiftRL
-	OpARMRSBshiftRA
-	OpARMANDshiftLL
-	OpARMANDshiftRL
-	OpARMANDshiftRA
-	OpARMORshiftLL
-	OpARMORshiftRL
-	OpARMORshiftRA
-	OpARMXORshiftLL
-	OpARMXORshiftRL
-	OpARMXORshiftRA
-	OpARMXORshiftRR
-	OpARMBICshiftLL
-	OpARMBICshiftRL
-	OpARMBICshiftRA
-	OpARMMVNshiftLL
-	OpARMMVNshiftRL
-	OpARMMVNshiftRA
-	OpARMADCshiftLL
-	OpARMADCshiftRL
-	OpARMADCshiftRA
-	OpARMSBCshiftLL
-	OpARMSBCshiftRL
-	OpARMSBCshiftRA
-	OpARMRSCshiftLL
-	OpARMRSCshiftRL
-	OpARMRSCshiftRA
-	OpARMADDSshiftLL
-	OpARMADDSshiftRL
-	OpARMADDSshiftRA
-	OpARMSUBSshiftLL
-	OpARMSUBSshiftRL
-	OpARMSUBSshiftRA
-	OpARMRSBSshiftLL
-	OpARMRSBSshiftRL
-	OpARMRSBSshiftRA
-	OpARMADDshiftLLreg
-	OpARMADDshiftRLreg
-	OpARMADDshiftRAreg
-	OpARMSUBshiftLLreg
-	OpARMSUBshiftRLreg
-	OpARMSUBshiftRAreg
-	OpARMRSBshiftLLreg
-	OpARMRSBshiftRLreg
-	OpARMRSBshiftRAreg
-	OpARMANDshiftLLreg
-	OpARMANDshiftRLreg
-	OpARMANDshiftRAreg
-	OpARMORshiftLLreg
-	OpARMORshiftRLreg
-	OpARMORshiftRAreg
-	OpARMXORshiftLLreg
-	OpARMXORshiftRLreg
-	OpARMXORshiftRAreg
-	OpARMBICshiftLLreg
-	OpARMBICshiftRLreg
-	OpARMBICshiftRAreg
-	OpARMMVNshiftLLreg
-	OpARMMVNshiftRLreg
-	OpARMMVNshiftRAreg
-	OpARMADCshiftLLreg
-	OpARMADCshiftRLreg
-	OpARMADCshiftRAreg
-	OpARMSBCshiftLLreg
-	OpARMSBCshiftRLreg
-	OpARMSBCshiftRAreg
-	OpARMRSCshiftLLreg
-	OpARMRSCshiftRLreg
-	OpARMRSCshiftRAreg
-	OpARMADDSshiftLLreg
-	OpARMADDSshiftRLreg
-	OpARMADDSshiftRAreg
-	OpARMSUBSshiftLLreg
-	OpARMSUBSshiftRLreg
-	OpARMSUBSshiftRAreg
-	OpARMRSBSshiftLLreg
-	OpARMRSBSshiftRLreg
-	OpARMRSBSshiftRAreg
-	OpARMCMP
-	OpARMCMPconst
-	OpARMCMN
-	OpARMCMNconst
-	OpARMTST
-	OpARMTSTconst
-	OpARMTEQ
-	OpARMTEQconst
-	OpARMCMPF
-	OpARMCMPD
-	OpARMCMPshiftLL
-	OpARMCMPshiftRL
-	OpARMCMPshiftRA
-	OpARMCMPshiftLLreg
-	OpARMCMPshiftRLreg
-	OpARMCMPshiftRAreg
-	OpARMCMPF0
-	OpARMCMPD0
-	OpARMMOVWconst
-	OpARMMOVFconst
-	OpARMMOVDconst
-	OpARMMOVWaddr
-	OpARMMOVBload
-	OpARMMOVBUload
-	OpARMMOVHload
-	OpARMMOVHUload
-	OpARMMOVWload
-	OpARMMOVFload
-	OpARMMOVDload
-	OpARMMOVBstore
-	OpARMMOVHstore
-	OpARMMOVWstore
-	OpARMMOVFstore
-	OpARMMOVDstore
-	OpARMMOVWloadidx
-	OpARMMOVWloadshiftLL
-	OpARMMOVWloadshiftRL
-	OpARMMOVWloadshiftRA
-	OpARMMOVWstoreidx
-	OpARMMOVWstoreshiftLL
-	OpARMMOVWstoreshiftRL
-	OpARMMOVWstoreshiftRA
-	OpARMMOVBreg
-	OpARMMOVBUreg
-	OpARMMOVHreg
-	OpARMMOVHUreg
-	OpARMMOVWreg
-	OpARMMOVWnop
-	OpARMMOVWF
-	OpARMMOVWD
-	OpARMMOVWUF
-	OpARMMOVWUD
-	OpARMMOVFW
-	OpARMMOVDW
-	OpARMMOVFWU
-	OpARMMOVDWU
-	OpARMMOVFD
-	OpARMMOVDF
-	OpARMCMOVWHSconst
-	OpARMCMOVWLSconst
-	OpARMSRAcond
-	OpARMCALLstatic
-	OpARMCALLclosure
-	OpARMCALLdefer
-	OpARMCALLgo
-	OpARMCALLinter
-	OpARMLoweredNilCheck
-	OpARMEqual
-	OpARMNotEqual
-	OpARMLessThan
-	OpARMLessEqual
-	OpARMGreaterThan
-	OpARMGreaterEqual
-	OpARMLessThanU
-	OpARMLessEqualU
-	OpARMGreaterThanU
-	OpARMGreaterEqualU
-	OpARMDUFFZERO
-	OpARMDUFFCOPY
-	OpARMLoweredZero
-	OpARMLoweredMove
-	OpARMLoweredGetClosurePtr
-	OpARMMOVWconvert
-	OpARMFlagEQ
-	OpARMFlagLT_ULT
-	OpARMFlagLT_UGT
-	OpARMFlagGT_UGT
-	OpARMFlagGT_ULT
-	OpARMInvertFlags
-
-	OpARM64ADD
-	OpARM64ADDconst
-	OpARM64SUB
-	OpARM64SUBconst
-	OpARM64MUL
-	OpARM64MULW
-	OpARM64MULH
-	OpARM64UMULH
-	OpARM64MULL
-	OpARM64UMULL
-	OpARM64DIV
-	OpARM64UDIV
-	OpARM64DIVW
-	OpARM64UDIVW
-	OpARM64MOD
-	OpARM64UMOD
-	OpARM64MODW
-	OpARM64UMODW
-	OpARM64FADDS
-	OpARM64FADDD
-	OpARM64FSUBS
-	OpARM64FSUBD
-	OpARM64FMULS
-	OpARM64FMULD
-	OpARM64FDIVS
-	OpARM64FDIVD
-	OpARM64AND
-	OpARM64ANDconst
-	OpARM64OR
-	OpARM64ORconst
-	OpARM64XOR
-	OpARM64XORconst
-	OpARM64BIC
-	OpARM64BICconst
-	OpARM64MVN
-	OpARM64NEG
-	OpARM64FNEGS
-	OpARM64FNEGD
-	OpARM64FSQRTD
-	OpARM64REV
-	OpARM64REVW
-	OpARM64REV16W
-	OpARM64RBIT
-	OpARM64RBITW
-	OpARM64CLZ
-	OpARM64CLZW
-	OpARM64SLL
-	OpARM64SLLconst
-	OpARM64SRL
-	OpARM64SRLconst
-	OpARM64SRA
-	OpARM64SRAconst
-	OpARM64RORconst
-	OpARM64RORWconst
-	OpARM64CMP
-	OpARM64CMPconst
-	OpARM64CMPW
-	OpARM64CMPWconst
-	OpARM64CMN
-	OpARM64CMNconst
-	OpARM64CMNW
-	OpARM64CMNWconst
-	OpARM64FCMPS
-	OpARM64FCMPD
-	OpARM64ADDshiftLL
-	OpARM64ADDshiftRL
-	OpARM64ADDshiftRA
-	OpARM64SUBshiftLL
-	OpARM64SUBshiftRL
-	OpARM64SUBshiftRA
-	OpARM64ANDshiftLL
-	OpARM64ANDshiftRL
-	OpARM64ANDshiftRA
-	OpARM64ORshiftLL
-	OpARM64ORshiftRL
-	OpARM64ORshiftRA
-	OpARM64XORshiftLL
-	OpARM64XORshiftRL
-	OpARM64XORshiftRA
-	OpARM64BICshiftLL
-	OpARM64BICshiftRL
-	OpARM64BICshiftRA
-	OpARM64CMPshiftLL
-	OpARM64CMPshiftRL
-	OpARM64CMPshiftRA
-	OpARM64MOVDconst
-	OpARM64FMOVSconst
-	OpARM64FMOVDconst
-	OpARM64MOVDaddr
-	OpARM64MOVBload
-	OpARM64MOVBUload
-	OpARM64MOVHload
-	OpARM64MOVHUload
-	OpARM64MOVWload
-	OpARM64MOVWUload
-	OpARM64MOVDload
-	OpARM64FMOVSload
-	OpARM64FMOVDload
-	OpARM64MOVBstore
-	OpARM64MOVHstore
-	OpARM64MOVWstore
-	OpARM64MOVDstore
-	OpARM64FMOVSstore
-	OpARM64FMOVDstore
-	OpARM64MOVBstorezero
-	OpARM64MOVHstorezero
-	OpARM64MOVWstorezero
-	OpARM64MOVDstorezero
-	OpARM64MOVBreg
-	OpARM64MOVBUreg
-	OpARM64MOVHreg
-	OpARM64MOVHUreg
-	OpARM64MOVWreg
-	OpARM64MOVWUreg
-	OpARM64MOVDreg
-	OpARM64MOVDnop
-	OpARM64SCVTFWS
-	OpARM64SCVTFWD
-	OpARM64UCVTFWS
-	OpARM64UCVTFWD
-	OpARM64SCVTFS
-	OpARM64SCVTFD
-	OpARM64UCVTFS
-	OpARM64UCVTFD
-	OpARM64FCVTZSSW
-	OpARM64FCVTZSDW
-	OpARM64FCVTZUSW
-	OpARM64FCVTZUDW
-	OpARM64FCVTZSS
-	OpARM64FCVTZSD
-	OpARM64FCVTZUS
-	OpARM64FCVTZUD
-	OpARM64FCVTSD
-	OpARM64FCVTDS
-	OpARM64CSELULT
-	OpARM64CSELULT0
-	OpARM64CALLstatic
-	OpARM64CALLclosure
-	OpARM64CALLdefer
-	OpARM64CALLgo
-	OpARM64CALLinter
-	OpARM64LoweredNilCheck
-	OpARM64Equal
-	OpARM64NotEqual
-	OpARM64LessThan
-	OpARM64LessEqual
-	OpARM64GreaterThan
-	OpARM64GreaterEqual
-	OpARM64LessThanU
-	OpARM64LessEqualU
-	OpARM64GreaterThanU
-	OpARM64GreaterEqualU
-	OpARM64DUFFZERO
-	OpARM64LoweredZero
-	OpARM64DUFFCOPY
-	OpARM64LoweredMove
-	OpARM64LoweredGetClosurePtr
-	OpARM64MOVDconvert
-	OpARM64FlagEQ
-	OpARM64FlagLT_ULT
-	OpARM64FlagLT_UGT
-	OpARM64FlagGT_UGT
-	OpARM64FlagGT_ULT
-	OpARM64InvertFlags
-	OpARM64LDAR
-	OpARM64LDARW
-	OpARM64STLR
-	OpARM64STLRW
-	OpARM64LoweredAtomicExchange64
-	OpARM64LoweredAtomicExchange32
-	OpARM64LoweredAtomicAdd64
-	OpARM64LoweredAtomicAdd32
-	OpARM64LoweredAtomicCas64
-	OpARM64LoweredAtomicCas32
-	OpARM64LoweredAtomicAnd8
-	OpARM64LoweredAtomicOr8
-
-	OpMIPSADD
-	OpMIPSADDconst
-	OpMIPSSUB
-	OpMIPSSUBconst
-	OpMIPSMUL
-	OpMIPSMULT
-	OpMIPSMULTU
-	OpMIPSDIV
-	OpMIPSDIVU
-	OpMIPSADDF
-	OpMIPSADDD
-	OpMIPSSUBF
-	OpMIPSSUBD
-	OpMIPSMULF
-	OpMIPSMULD
-	OpMIPSDIVF
-	OpMIPSDIVD
-	OpMIPSAND
-	OpMIPSANDconst
-	OpMIPSOR
-	OpMIPSORconst
-	OpMIPSXOR
-	OpMIPSXORconst
-	OpMIPSNOR
-	OpMIPSNORconst
-	OpMIPSNEG
-	OpMIPSNEGF
-	OpMIPSNEGD
-	OpMIPSSQRTD
-	OpMIPSSLL
-	OpMIPSSLLconst
-	OpMIPSSRL
-	OpMIPSSRLconst
-	OpMIPSSRA
-	OpMIPSSRAconst
-	OpMIPSCLZ
-	OpMIPSSGT
-	OpMIPSSGTconst
-	OpMIPSSGTzero
-	OpMIPSSGTU
-	OpMIPSSGTUconst
-	OpMIPSSGTUzero
-	OpMIPSCMPEQF
-	OpMIPSCMPEQD
-	OpMIPSCMPGEF
-	OpMIPSCMPGED
-	OpMIPSCMPGTF
-	OpMIPSCMPGTD
-	OpMIPSMOVWconst
-	OpMIPSMOVFconst
-	OpMIPSMOVDconst
-	OpMIPSMOVWaddr
-	OpMIPSMOVBload
-	OpMIPSMOVBUload
-	OpMIPSMOVHload
-	OpMIPSMOVHUload
-	OpMIPSMOVWload
-	OpMIPSMOVFload
-	OpMIPSMOVDload
-	OpMIPSMOVBstore
-	OpMIPSMOVHstore
-	OpMIPSMOVWstore
-	OpMIPSMOVFstore
-	OpMIPSMOVDstore
-	OpMIPSMOVBstorezero
-	OpMIPSMOVHstorezero
-	OpMIPSMOVWstorezero
-	OpMIPSMOVBreg
-	OpMIPSMOVBUreg
-	OpMIPSMOVHreg
-	OpMIPSMOVHUreg
-	OpMIPSMOVWreg
-	OpMIPSMOVWnop
-	OpMIPSCMOVZ
-	OpMIPSCMOVZzero
-	OpMIPSMOVWF
-	OpMIPSMOVWD
-	OpMIPSTRUNCFW
-	OpMIPSTRUNCDW
-	OpMIPSMOVFD
-	OpMIPSMOVDF
-	OpMIPSCALLstatic
-	OpMIPSCALLclosure
-	OpMIPSCALLdefer
-	OpMIPSCALLgo
-	OpMIPSCALLinter
-	OpMIPSLoweredAtomicLoad
-	OpMIPSLoweredAtomicStore
-	OpMIPSLoweredAtomicStorezero
-	OpMIPSLoweredAtomicExchange
-	OpMIPSLoweredAtomicAdd
-	OpMIPSLoweredAtomicAddconst
-	OpMIPSLoweredAtomicCas
-	OpMIPSLoweredAtomicAnd
-	OpMIPSLoweredAtomicOr
-	OpMIPSLoweredZero
-	OpMIPSLoweredMove
-	OpMIPSLoweredNilCheck
-	OpMIPSFPFlagTrue
-	OpMIPSFPFlagFalse
-	OpMIPSLoweredGetClosurePtr
-	OpMIPSMOVWconvert
-
-	OpMIPS64ADDV
-	OpMIPS64ADDVconst
-	OpMIPS64SUBV
-	OpMIPS64SUBVconst
-	OpMIPS64MULV
-	OpMIPS64MULVU
-	OpMIPS64DIVV
-	OpMIPS64DIVVU
-	OpMIPS64ADDF
-	OpMIPS64ADDD
-	OpMIPS64SUBF
-	OpMIPS64SUBD
-	OpMIPS64MULF
-	OpMIPS64MULD
-	OpMIPS64DIVF
-	OpMIPS64DIVD
-	OpMIPS64AND
-	OpMIPS64ANDconst
-	OpMIPS64OR
-	OpMIPS64ORconst
-	OpMIPS64XOR
-	OpMIPS64XORconst
-	OpMIPS64NOR
-	OpMIPS64NORconst
-	OpMIPS64NEGV
-	OpMIPS64NEGF
-	OpMIPS64NEGD
-	OpMIPS64SLLV
-	OpMIPS64SLLVconst
-	OpMIPS64SRLV
-	OpMIPS64SRLVconst
-	OpMIPS64SRAV
-	OpMIPS64SRAVconst
-	OpMIPS64SGT
-	OpMIPS64SGTconst
-	OpMIPS64SGTU
-	OpMIPS64SGTUconst
-	OpMIPS64CMPEQF
-	OpMIPS64CMPEQD
-	OpMIPS64CMPGEF
-	OpMIPS64CMPGED
-	OpMIPS64CMPGTF
-	OpMIPS64CMPGTD
-	OpMIPS64MOVVconst
-	OpMIPS64MOVFconst
-	OpMIPS64MOVDconst
-	OpMIPS64MOVVaddr
-	OpMIPS64MOVBload
-	OpMIPS64MOVBUload
-	OpMIPS64MOVHload
-	OpMIPS64MOVHUload
-	OpMIPS64MOVWload
-	OpMIPS64MOVWUload
-	OpMIPS64MOVVload
-	OpMIPS64MOVFload
-	OpMIPS64MOVDload
-	OpMIPS64MOVBstore
-	OpMIPS64MOVHstore
-	OpMIPS64MOVWstore
-	OpMIPS64MOVVstore
-	OpMIPS64MOVFstore
-	OpMIPS64MOVDstore
-	OpMIPS64MOVBstorezero
-	OpMIPS64MOVHstorezero
-	OpMIPS64MOVWstorezero
-	OpMIPS64MOVVstorezero
-	OpMIPS64MOVBreg
-	OpMIPS64MOVBUreg
-	OpMIPS64MOVHreg
-	OpMIPS64MOVHUreg
-	OpMIPS64MOVWreg
-	OpMIPS64MOVWUreg
-	OpMIPS64MOVVreg
-	OpMIPS64MOVVnop
-	OpMIPS64MOVWF
-	OpMIPS64MOVWD
-	OpMIPS64MOVVF
-	OpMIPS64MOVVD
-	OpMIPS64TRUNCFW
-	OpMIPS64TRUNCDW
-	OpMIPS64TRUNCFV
-	OpMIPS64TRUNCDV
-	OpMIPS64MOVFD
-	OpMIPS64MOVDF
-	OpMIPS64CALLstatic
-	OpMIPS64CALLclosure
-	OpMIPS64CALLdefer
-	OpMIPS64CALLgo
-	OpMIPS64CALLinter
-	OpMIPS64DUFFZERO
-	OpMIPS64LoweredZero
-	OpMIPS64LoweredMove
-	OpMIPS64LoweredNilCheck
-	OpMIPS64FPFlagTrue
-	OpMIPS64FPFlagFalse
-	OpMIPS64LoweredGetClosurePtr
-	OpMIPS64MOVVconvert
-
-	OpPPC64ADD
-	OpPPC64ADDconst
-	OpPPC64FADD
-	OpPPC64FADDS
-	OpPPC64SUB
-	OpPPC64FSUB
-	OpPPC64FSUBS
-	OpPPC64MULLD
-	OpPPC64MULLW
-	OpPPC64MULHD
-	OpPPC64MULHW
-	OpPPC64MULHDU
-	OpPPC64MULHWU
-	OpPPC64FMUL
-	OpPPC64FMULS
-	OpPPC64SRAD
-	OpPPC64SRAW
-	OpPPC64SRD
-	OpPPC64SRW
-	OpPPC64SLD
-	OpPPC64SLW
-	OpPPC64ADDconstForCarry
-	OpPPC64MaskIfNotCarry
-	OpPPC64SRADconst
-	OpPPC64SRAWconst
-	OpPPC64SRDconst
-	OpPPC64SRWconst
-	OpPPC64SLDconst
-	OpPPC64SLWconst
-	OpPPC64FDIV
-	OpPPC64FDIVS
-	OpPPC64DIVD
-	OpPPC64DIVW
-	OpPPC64DIVDU
-	OpPPC64DIVWU
-	OpPPC64FCTIDZ
-	OpPPC64FCTIWZ
-	OpPPC64FCFID
-	OpPPC64FRSP
-	OpPPC64Xf2i64
-	OpPPC64Xi2f64
-	OpPPC64AND
-	OpPPC64ANDN
-	OpPPC64OR
-	OpPPC64ORN
-	OpPPC64XOR
-	OpPPC64EQV
-	OpPPC64NEG
-	OpPPC64FNEG
-	OpPPC64FSQRT
-	OpPPC64FSQRTS
-	OpPPC64ORconst
-	OpPPC64XORconst
-	OpPPC64ANDconst
-	OpPPC64ANDCCconst
-	OpPPC64MOVBreg
-	OpPPC64MOVBZreg
-	OpPPC64MOVHreg
-	OpPPC64MOVHZreg
-	OpPPC64MOVWreg
-	OpPPC64MOVWZreg
-	OpPPC64MOVBZload
-	OpPPC64MOVHload
-	OpPPC64MOVHZload
-	OpPPC64MOVWload
-	OpPPC64MOVWZload
-	OpPPC64MOVDload
-	OpPPC64FMOVDload
-	OpPPC64FMOVSload
-	OpPPC64MOVBstore
-	OpPPC64MOVHstore
-	OpPPC64MOVWstore
-	OpPPC64MOVDstore
-	OpPPC64FMOVDstore
-	OpPPC64FMOVSstore
-	OpPPC64MOVBstorezero
-	OpPPC64MOVHstorezero
-	OpPPC64MOVWstorezero
-	OpPPC64MOVDstorezero
-	OpPPC64MOVDaddr
-	OpPPC64MOVDconst
-	OpPPC64FMOVDconst
-	OpPPC64FMOVSconst
-	OpPPC64FCMPU
-	OpPPC64CMP
-	OpPPC64CMPU
-	OpPPC64CMPW
-	OpPPC64CMPWU
-	OpPPC64CMPconst
-	OpPPC64CMPUconst
-	OpPPC64CMPWconst
-	OpPPC64CMPWUconst
-	OpPPC64Equal
-	OpPPC64NotEqual
-	OpPPC64LessThan
-	OpPPC64FLessThan
-	OpPPC64LessEqual
-	OpPPC64FLessEqual
-	OpPPC64GreaterThan
-	OpPPC64FGreaterThan
-	OpPPC64GreaterEqual
-	OpPPC64FGreaterEqual
-	OpPPC64LoweredGetClosurePtr
-	OpPPC64LoweredNilCheck
-	OpPPC64MOVDconvert
-	OpPPC64CALLstatic
-	OpPPC64CALLclosure
-	OpPPC64CALLdefer
-	OpPPC64CALLgo
-	OpPPC64CALLinter
-	OpPPC64LoweredZero
-	OpPPC64LoweredMove
-	OpPPC64InvertFlags
-	OpPPC64FlagEQ
-	OpPPC64FlagLT
-	OpPPC64FlagGT
-
-	OpS390XFADDS
-	OpS390XFADD
-	OpS390XFSUBS
-	OpS390XFSUB
-	OpS390XFMULS
-	OpS390XFMUL
-	OpS390XFDIVS
-	OpS390XFDIV
-	OpS390XFNEGS
-	OpS390XFNEG
-	OpS390XFMOVSload
-	OpS390XFMOVDload
-	OpS390XFMOVSconst
-	OpS390XFMOVDconst
-	OpS390XFMOVSloadidx
-	OpS390XFMOVDloadidx
-	OpS390XFMOVSstore
-	OpS390XFMOVDstore
-	OpS390XFMOVSstoreidx
-	OpS390XFMOVDstoreidx
-	OpS390XADD
-	OpS390XADDW
-	OpS390XADDconst
-	OpS390XADDWconst
-	OpS390XADDload
-	OpS390XADDWload
-	OpS390XSUB
-	OpS390XSUBW
-	OpS390XSUBconst
-	OpS390XSUBWconst
-	OpS390XSUBload
-	OpS390XSUBWload
-	OpS390XMULLD
-	OpS390XMULLW
-	OpS390XMULLDconst
-	OpS390XMULLWconst
-	OpS390XMULLDload
-	OpS390XMULLWload
-	OpS390XMULHD
-	OpS390XMULHDU
-	OpS390XDIVD
-	OpS390XDIVW
-	OpS390XDIVDU
-	OpS390XDIVWU
-	OpS390XMODD
-	OpS390XMODW
-	OpS390XMODDU
-	OpS390XMODWU
-	OpS390XAND
-	OpS390XANDW
-	OpS390XANDconst
-	OpS390XANDWconst
-	OpS390XANDload
-	OpS390XANDWload
-	OpS390XOR
-	OpS390XORW
-	OpS390XORconst
-	OpS390XORWconst
-	OpS390XORload
-	OpS390XORWload
-	OpS390XXOR
-	OpS390XXORW
-	OpS390XXORconst
-	OpS390XXORWconst
-	OpS390XXORload
-	OpS390XXORWload
-	OpS390XCMP
-	OpS390XCMPW
-	OpS390XCMPU
-	OpS390XCMPWU
-	OpS390XCMPconst
-	OpS390XCMPWconst
-	OpS390XCMPUconst
-	OpS390XCMPWUconst
-	OpS390XFCMPS
-	OpS390XFCMP
-	OpS390XSLD
-	OpS390XSLW
-	OpS390XSLDconst
-	OpS390XSLWconst
-	OpS390XSRD
-	OpS390XSRW
-	OpS390XSRDconst
-	OpS390XSRWconst
-	OpS390XSRAD
-	OpS390XSRAW
-	OpS390XSRADconst
-	OpS390XSRAWconst
-	OpS390XRLLGconst
-	OpS390XRLLconst
-	OpS390XNEG
-	OpS390XNEGW
-	OpS390XNOT
-	OpS390XNOTW
-	OpS390XFSQRT
-	OpS390XSUBEcarrymask
-	OpS390XSUBEWcarrymask
-	OpS390XMOVDEQ
-	OpS390XMOVDNE
-	OpS390XMOVDLT
-	OpS390XMOVDLE
-	OpS390XMOVDGT
-	OpS390XMOVDGE
-	OpS390XMOVDGTnoinv
-	OpS390XMOVDGEnoinv
-	OpS390XMOVBreg
-	OpS390XMOVBZreg
-	OpS390XMOVHreg
-	OpS390XMOVHZreg
-	OpS390XMOVWreg
-	OpS390XMOVWZreg
-	OpS390XMOVDconst
-	OpS390XCFDBRA
-	OpS390XCGDBRA
-	OpS390XCFEBRA
-	OpS390XCGEBRA
-	OpS390XCEFBRA
-	OpS390XCDFBRA
-	OpS390XCEGBRA
-	OpS390XCDGBRA
-	OpS390XLEDBR
-	OpS390XLDEBR
-	OpS390XMOVDaddr
-	OpS390XMOVDaddridx
-	OpS390XMOVBZload
-	OpS390XMOVBload
-	OpS390XMOVHZload
-	OpS390XMOVHload
-	OpS390XMOVWZload
-	OpS390XMOVWload
-	OpS390XMOVDload
-	OpS390XMOVWBR
-	OpS390XMOVDBR
-	OpS390XMOVHBRload
-	OpS390XMOVWBRload
-	OpS390XMOVDBRload
-	OpS390XMOVBstore
-	OpS390XMOVHstore
-	OpS390XMOVWstore
-	OpS390XMOVDstore
-	OpS390XMOVHBRstore
-	OpS390XMOVWBRstore
-	OpS390XMOVDBRstore
-	OpS390XMVC
-	OpS390XMOVBZloadidx
-	OpS390XMOVHZloadidx
-	OpS390XMOVWZloadidx
-	OpS390XMOVDloadidx
-	OpS390XMOVHBRloadidx
-	OpS390XMOVWBRloadidx
-	OpS390XMOVDBRloadidx
-	OpS390XMOVBstoreidx
-	OpS390XMOVHstoreidx
-	OpS390XMOVWstoreidx
-	OpS390XMOVDstoreidx
-	OpS390XMOVHBRstoreidx
-	OpS390XMOVWBRstoreidx
-	OpS390XMOVDBRstoreidx
-	OpS390XMOVBstoreconst
-	OpS390XMOVHstoreconst
-	OpS390XMOVWstoreconst
-	OpS390XMOVDstoreconst
-	OpS390XCLEAR
-	OpS390XCALLstatic
-	OpS390XCALLclosure
-	OpS390XCALLdefer
-	OpS390XCALLgo
-	OpS390XCALLinter
-	OpS390XInvertFlags
-	OpS390XLoweredGetG
-	OpS390XLoweredGetClosurePtr
-	OpS390XLoweredNilCheck
-	OpS390XMOVDconvert
-	OpS390XFlagEQ
-	OpS390XFlagLT
-	OpS390XFlagGT
-	OpS390XMOVWZatomicload
-	OpS390XMOVDatomicload
-	OpS390XMOVWatomicstore
-	OpS390XMOVDatomicstore
-	OpS390XLAA
-	OpS390XLAAG
-	OpS390XAddTupleFirst32
-	OpS390XAddTupleFirst64
-	OpS390XLoweredAtomicCas32
-	OpS390XLoweredAtomicCas64
-	OpS390XLoweredAtomicExchange32
-	OpS390XLoweredAtomicExchange64
-	OpS390XFLOGR
-	OpS390XSTMG2
-	OpS390XSTMG3
-	OpS390XSTMG4
-	OpS390XSTM2
-	OpS390XSTM3
-	OpS390XSTM4
-	OpS390XLoweredMove
-	OpS390XLoweredZero
-
-	OpAdd8
-	OpAdd16
-	OpAdd32
-	OpAdd64
-	OpAddPtr
-	OpAdd32F
-	OpAdd64F
-	OpSub8
-	OpSub16
-	OpSub32
-	OpSub64
-	OpSubPtr
-	OpSub32F
-	OpSub64F
-	OpMul8
-	OpMul16
-	OpMul32
-	OpMul64
-	OpMul32F
-	OpMul64F
-	OpDiv32F
-	OpDiv64F
-	OpHmul8
-	OpHmul8u
-	OpHmul16
-	OpHmul16u
-	OpHmul32
-	OpHmul32u
-	OpHmul64
-	OpHmul64u
-	OpMul32uhilo
-	OpMul64uhilo
-	OpAvg64u
-	OpDiv8
-	OpDiv8u
-	OpDiv16
-	OpDiv16u
-	OpDiv32
-	OpDiv32u
-	OpDiv64
-	OpDiv64u
-	OpDiv128u
-	OpMod8
-	OpMod8u
-	OpMod16
-	OpMod16u
-	OpMod32
-	OpMod32u
-	OpMod64
-	OpMod64u
-	OpAnd8
-	OpAnd16
-	OpAnd32
-	OpAnd64
-	OpOr8
-	OpOr16
-	OpOr32
-	OpOr64
-	OpXor8
-	OpXor16
-	OpXor32
-	OpXor64
-	OpLsh8x8
-	OpLsh8x16
-	OpLsh8x32
-	OpLsh8x64
-	OpLsh16x8
-	OpLsh16x16
-	OpLsh16x32
-	OpLsh16x64
-	OpLsh32x8
-	OpLsh32x16
-	OpLsh32x32
-	OpLsh32x64
-	OpLsh64x8
-	OpLsh64x16
-	OpLsh64x32
-	OpLsh64x64
-	OpRsh8x8
-	OpRsh8x16
-	OpRsh8x32
-	OpRsh8x64
-	OpRsh16x8
-	OpRsh16x16
-	OpRsh16x32
-	OpRsh16x64
-	OpRsh32x8
-	OpRsh32x16
-	OpRsh32x32
-	OpRsh32x64
-	OpRsh64x8
-	OpRsh64x16
-	OpRsh64x32
-	OpRsh64x64
-	OpRsh8Ux8
-	OpRsh8Ux16
-	OpRsh8Ux32
-	OpRsh8Ux64
-	OpRsh16Ux8
-	OpRsh16Ux16
-	OpRsh16Ux32
-	OpRsh16Ux64
-	OpRsh32Ux8
-	OpRsh32Ux16
-	OpRsh32Ux32
-	OpRsh32Ux64
-	OpRsh64Ux8
-	OpRsh64Ux16
-	OpRsh64Ux32
-	OpRsh64Ux64
-	OpLrot8
-	OpLrot16
-	OpLrot32
-	OpLrot64
-	OpEq8
-	OpEq16
-	OpEq32
-	OpEq64
-	OpEqPtr
-	OpEqInter
-	OpEqSlice
-	OpEq32F
-	OpEq64F
-	OpNeq8
-	OpNeq16
-	OpNeq32
-	OpNeq64
-	OpNeqPtr
-	OpNeqInter
-	OpNeqSlice
-	OpNeq32F
-	OpNeq64F
-	OpLess8
-	OpLess8U
-	OpLess16
-	OpLess16U
-	OpLess32
-	OpLess32U
-	OpLess64
-	OpLess64U
-	OpLess32F
-	OpLess64F
-	OpLeq8
-	OpLeq8U
-	OpLeq16
-	OpLeq16U
-	OpLeq32
-	OpLeq32U
-	OpLeq64
-	OpLeq64U
-	OpLeq32F
-	OpLeq64F
-	OpGreater8
-	OpGreater8U
-	OpGreater16
-	OpGreater16U
-	OpGreater32
-	OpGreater32U
-	OpGreater64
-	OpGreater64U
-	OpGreater32F
-	OpGreater64F
-	OpGeq8
-	OpGeq8U
-	OpGeq16
-	OpGeq16U
-	OpGeq32
-	OpGeq32U
-	OpGeq64
-	OpGeq64U
-	OpGeq32F
-	OpGeq64F
-	OpAndB
-	OpOrB
-	OpEqB
-	OpNeqB
-	OpNot
-	OpNeg8
-	OpNeg16
-	OpNeg32
-	OpNeg64
-	OpNeg32F
-	OpNeg64F
-	OpCom8
-	OpCom16
-	OpCom32
-	OpCom64
-	OpCtz32
-	OpCtz64
-	OpBswap32
-	OpBswap64
-	OpSqrt
-	OpPhi
-	OpCopy
-	OpConvert
-	OpConstBool
-	OpConstString
-	OpConstNil
-	OpConst8
-	OpConst16
-	OpConst32
-	OpConst64
-	OpConst32F
-	OpConst64F
-	OpConstInterface
-	OpConstSlice
-	OpInitMem
-	OpArg
-	OpAddr
-	OpSP
-	OpSB
-	OpFunc
-	OpLoad
-	OpStore
-	OpMove
-	OpZero
-	OpStoreWB
-	OpMoveWB
-	OpMoveWBVolatile
-	OpZeroWB
-	OpClosureCall
-	OpStaticCall
-	OpDeferCall
-	OpGoCall
-	OpInterCall
-	OpSignExt8to16
-	OpSignExt8to32
-	OpSignExt8to64
-	OpSignExt16to32
-	OpSignExt16to64
-	OpSignExt32to64
-	OpZeroExt8to16
-	OpZeroExt8to32
-	OpZeroExt8to64
-	OpZeroExt16to32
-	OpZeroExt16to64
-	OpZeroExt32to64
-	OpTrunc16to8
-	OpTrunc32to8
-	OpTrunc32to16
-	OpTrunc64to8
-	OpTrunc64to16
-	OpTrunc64to32
-	OpCvt32to32F
-	OpCvt32to64F
-	OpCvt64to32F
-	OpCvt64to64F
-	OpCvt32Fto32
-	OpCvt32Fto64
-	OpCvt64Fto32
-	OpCvt64Fto64
-	OpCvt32Fto64F
-	OpCvt64Fto32F
-	OpIsNonNil
-	OpIsInBounds
-	OpIsSliceInBounds
-	OpNilCheck
-	OpGetG
-	OpGetClosurePtr
-	OpPtrIndex
-	OpOffPtr
-	OpSliceMake
-	OpSlicePtr
-	OpSliceLen
-	OpSliceCap
-	OpComplexMake
-	OpComplexReal
-	OpComplexImag
-	OpStringMake
-	OpStringPtr
-	OpStringLen
-	OpIMake
-	OpITab
-	OpIData
-	OpStructMake0
-	OpStructMake1
-	OpStructMake2
-	OpStructMake3
-	OpStructMake4
-	OpStructSelect
-	OpArrayMake0
-	OpArrayMake1
-	OpArraySelect
-	OpStoreReg
-	OpLoadReg
-	OpFwdRef
-	OpUnknown
-	OpVarDef
-	OpVarKill
-	OpVarLive
-	OpKeepAlive
-	OpInt64Make
-	OpInt64Hi
-	OpInt64Lo
-	OpAdd32carry
-	OpAdd32withcarry
-	OpSub32carry
-	OpSub32withcarry
-	OpSignmask
-	OpZeromask
-	OpSlicemask
-	OpCvt32Uto32F
-	OpCvt32Uto64F
-	OpCvt32Fto32U
-	OpCvt64Fto32U
-	OpCvt64Uto32F
-	OpCvt64Uto64F
-	OpCvt32Fto64U
-	OpCvt64Fto64U
-	OpSelect0
-	OpSelect1
-	OpAtomicLoad32
-	OpAtomicLoad64
-	OpAtomicLoadPtr
-	OpAtomicStore32
-	OpAtomicStore64
-	OpAtomicStorePtrNoWB
-	OpAtomicExchange32
-	OpAtomicExchange64
-	OpAtomicAdd32
-	OpAtomicAdd64
-	OpAtomicCompareAndSwap32
-	OpAtomicCompareAndSwap64
-	OpAtomicAnd8
-	OpAtomicOr8
-)
-
-var opcodeTable = [...]opInfo{
-	{name: "OpInvalid"},
-
-	{
-		name:         "ADDSS",
-		argLen:       2,
-		commutative:  true,
-		resultInArg0: true,
-		usesScratch:  true,
-		asm:          x86.AADDSS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-				{1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-			outputs: []outputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-		},
-	},
-	{
-		name:         "ADDSD",
-		argLen:       2,
-		commutative:  true,
-		resultInArg0: true,
-		asm:          x86.AADDSD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-				{1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-			outputs: []outputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-		},
-	},
-	{
-		name:         "SUBSS",
-		argLen:       2,
-		resultInArg0: true,
-		usesScratch:  true,
-		asm:          x86.ASUBSS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-				{1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-			outputs: []outputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-		},
-	},
-	{
-		name:         "SUBSD",
-		argLen:       2,
-		resultInArg0: true,
-		asm:          x86.ASUBSD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-				{1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-			outputs: []outputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-		},
-	},
-	{
-		name:         "MULSS",
-		argLen:       2,
-		commutative:  true,
-		resultInArg0: true,
-		usesScratch:  true,
-		asm:          x86.AMULSS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-				{1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-			outputs: []outputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-		},
-	},
-	{
-		name:         "MULSD",
-		argLen:       2,
-		commutative:  true,
-		resultInArg0: true,
-		asm:          x86.AMULSD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-				{1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-			outputs: []outputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-		},
-	},
-	{
-		name:         "DIVSS",
-		argLen:       2,
-		resultInArg0: true,
-		usesScratch:  true,
-		asm:          x86.ADIVSS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-				{1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-			outputs: []outputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-		},
-	},
-	{
-		name:         "DIVSD",
-		argLen:       2,
-		resultInArg0: true,
-		asm:          x86.ADIVSD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-				{1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-			outputs: []outputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-		},
-	},
-	{
-		name:           "MOVSSload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVSS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-			outputs: []outputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-		},
-	},
-	{
-		name:           "MOVSDload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVSD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-			outputs: []outputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-		},
-	},
-	{
-		name:              "MOVSSconst",
-		auxType:           auxFloat32,
-		argLen:            0,
-		rematerializeable: true,
-		asm:               x86.AMOVSS,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-		},
-	},
-	{
-		name:              "MOVSDconst",
-		auxType:           auxFloat64,
-		argLen:            0,
-		rematerializeable: true,
-		asm:               x86.AMOVSD,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-		},
-	},
-	{
-		name:    "MOVSSloadidx1",
-		auxType: auxSymOff,
-		argLen:  3,
-		asm:     x86.AMOVSS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 255},   // AX CX DX BX SP BP SI DI
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-			outputs: []outputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-		},
-	},
-	{
-		name:    "MOVSSloadidx4",
-		auxType: auxSymOff,
-		argLen:  3,
-		asm:     x86.AMOVSS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 255},   // AX CX DX BX SP BP SI DI
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-			outputs: []outputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-		},
-	},
-	{
-		name:    "MOVSDloadidx1",
-		auxType: auxSymOff,
-		argLen:  3,
-		asm:     x86.AMOVSD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 255},   // AX CX DX BX SP BP SI DI
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-			outputs: []outputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-		},
-	},
-	{
-		name:    "MOVSDloadidx8",
-		auxType: auxSymOff,
-		argLen:  3,
-		asm:     x86.AMOVSD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 255},   // AX CX DX BX SP BP SI DI
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-			outputs: []outputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-		},
-	},
-	{
-		name:           "MOVSSstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVSS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-		},
-	},
-	{
-		name:           "MOVSDstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVSD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-		},
-	},
-	{
-		name:    "MOVSSstoreidx1",
-		auxType: auxSymOff,
-		argLen:  4,
-		asm:     x86.AMOVSS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 255},   // AX CX DX BX SP BP SI DI
-				{2, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-		},
-	},
-	{
-		name:    "MOVSSstoreidx4",
-		auxType: auxSymOff,
-		argLen:  4,
-		asm:     x86.AMOVSS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 255},   // AX CX DX BX SP BP SI DI
-				{2, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-		},
-	},
-	{
-		name:    "MOVSDstoreidx1",
-		auxType: auxSymOff,
-		argLen:  4,
-		asm:     x86.AMOVSD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 255},   // AX CX DX BX SP BP SI DI
-				{2, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-		},
-	},
-	{
-		name:    "MOVSDstoreidx8",
-		auxType: auxSymOff,
-		argLen:  4,
-		asm:     x86.AMOVSD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 255},   // AX CX DX BX SP BP SI DI
-				{2, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-		},
-	},
-	{
-		name:         "ADDL",
-		argLen:       2,
-		commutative:  true,
-		clobberFlags: true,
-		asm:          x86.AADDL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 239}, // AX CX DX BX BP SI DI
-				{0, 255}, // AX CX DX BX SP BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "ADDLconst",
-		auxType:      auxInt32,
-		argLen:       1,
-		clobberFlags: true,
-		asm:          x86.AADDL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 255}, // AX CX DX BX SP BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "ADDLcarry",
-		argLen:       2,
-		commutative:  true,
-		resultInArg0: true,
-		asm:          x86.AADDL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-				{1, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "ADDLconstcarry",
-		auxType:      auxInt32,
-		argLen:       1,
-		resultInArg0: true,
-		asm:          x86.AADDL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "ADCL",
-		argLen:       3,
-		commutative:  true,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AADCL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-				{1, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "ADCLconst",
-		auxType:      auxInt32,
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AADCL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "SUBL",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASUBL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-				{1, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "SUBLconst",
-		auxType:      auxInt32,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASUBL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "SUBLcarry",
-		argLen:       2,
-		resultInArg0: true,
-		asm:          x86.ASUBL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-				{1, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "SUBLconstcarry",
-		auxType:      auxInt32,
-		argLen:       1,
-		resultInArg0: true,
-		asm:          x86.ASUBL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "SBBL",
-		argLen:       3,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASBBL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-				{1, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "SBBLconst",
-		auxType:      auxInt32,
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASBBL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "MULL",
-		argLen:       2,
-		commutative:  true,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AIMULL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-				{1, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "MULLconst",
-		auxType:      auxInt32,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AIMULL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "HMULL",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          x86.AIMULL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1},   // AX
-				{1, 255}, // AX CX DX BX SP BP SI DI
-			},
-			clobbers: 1, // AX
-			outputs: []outputInfo{
-				{0, 4}, // DX
-			},
-		},
-	},
-	{
-		name:         "HMULLU",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          x86.AMULL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1},   // AX
-				{1, 255}, // AX CX DX BX SP BP SI DI
-			},
-			clobbers: 1, // AX
-			outputs: []outputInfo{
-				{0, 4}, // DX
-			},
-		},
-	},
-	{
-		name:         "HMULW",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          x86.AIMULW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1},   // AX
-				{1, 255}, // AX CX DX BX SP BP SI DI
-			},
-			clobbers: 1, // AX
-			outputs: []outputInfo{
-				{0, 4}, // DX
-			},
-		},
-	},
-	{
-		name:         "HMULB",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          x86.AIMULB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1},   // AX
-				{1, 255}, // AX CX DX BX SP BP SI DI
-			},
-			clobbers: 1, // AX
-			outputs: []outputInfo{
-				{0, 4}, // DX
-			},
-		},
-	},
-	{
-		name:         "HMULWU",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          x86.AMULW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1},   // AX
-				{1, 255}, // AX CX DX BX SP BP SI DI
-			},
-			clobbers: 1, // AX
-			outputs: []outputInfo{
-				{0, 4}, // DX
-			},
-		},
-	},
-	{
-		name:         "HMULBU",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          x86.AMULB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1},   // AX
-				{1, 255}, // AX CX DX BX SP BP SI DI
-			},
-			clobbers: 1, // AX
-			outputs: []outputInfo{
-				{0, 4}, // DX
-			},
-		},
-	},
-	{
-		name:         "MULLQU",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          x86.AMULL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1},   // AX
-				{1, 255}, // AX CX DX BX SP BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 4}, // DX
-				{1, 1}, // AX
-			},
-		},
-	},
-	{
-		name:         "DIVL",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          x86.AIDIVL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1},   // AX
-				{1, 251}, // AX CX BX SP BP SI DI
-			},
-			clobbers: 4, // DX
-			outputs: []outputInfo{
-				{0, 1}, // AX
-			},
-		},
-	},
-	{
-		name:         "DIVW",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          x86.AIDIVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1},   // AX
-				{1, 251}, // AX CX BX SP BP SI DI
-			},
-			clobbers: 4, // DX
-			outputs: []outputInfo{
-				{0, 1}, // AX
-			},
-		},
-	},
-	{
-		name:         "DIVLU",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          x86.ADIVL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1},   // AX
-				{1, 251}, // AX CX BX SP BP SI DI
-			},
-			clobbers: 4, // DX
-			outputs: []outputInfo{
-				{0, 1}, // AX
-			},
-		},
-	},
-	{
-		name:         "DIVWU",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          x86.ADIVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1},   // AX
-				{1, 251}, // AX CX BX SP BP SI DI
-			},
-			clobbers: 4, // DX
-			outputs: []outputInfo{
-				{0, 1}, // AX
-			},
-		},
-	},
-	{
-		name:         "MODL",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          x86.AIDIVL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1},   // AX
-				{1, 251}, // AX CX BX SP BP SI DI
-			},
-			clobbers: 1, // AX
-			outputs: []outputInfo{
-				{0, 4}, // DX
-			},
-		},
-	},
-	{
-		name:         "MODW",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          x86.AIDIVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1},   // AX
-				{1, 251}, // AX CX BX SP BP SI DI
-			},
-			clobbers: 1, // AX
-			outputs: []outputInfo{
-				{0, 4}, // DX
-			},
-		},
-	},
-	{
-		name:         "MODLU",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          x86.ADIVL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1},   // AX
-				{1, 251}, // AX CX BX SP BP SI DI
-			},
-			clobbers: 1, // AX
-			outputs: []outputInfo{
-				{0, 4}, // DX
-			},
-		},
-	},
-	{
-		name:         "MODWU",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          x86.ADIVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1},   // AX
-				{1, 251}, // AX CX BX SP BP SI DI
-			},
-			clobbers: 1, // AX
-			outputs: []outputInfo{
-				{0, 4}, // DX
-			},
-		},
-	},
-	{
-		name:         "ANDL",
-		argLen:       2,
-		commutative:  true,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AANDL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-				{1, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "ANDLconst",
-		auxType:      auxInt32,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AANDL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "ORL",
-		argLen:       2,
-		commutative:  true,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AORL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-				{1, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "ORLconst",
-		auxType:      auxInt32,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AORL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "XORL",
-		argLen:       2,
-		commutative:  true,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AXORL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-				{1, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "XORLconst",
-		auxType:      auxInt32,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AXORL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:   "CMPL",
-		argLen: 2,
-		asm:    x86.ACMPL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 255}, // AX CX DX BX SP BP SI DI
-				{1, 255}, // AX CX DX BX SP BP SI DI
-			},
-		},
-	},
-	{
-		name:   "CMPW",
-		argLen: 2,
-		asm:    x86.ACMPW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 255}, // AX CX DX BX SP BP SI DI
-				{1, 255}, // AX CX DX BX SP BP SI DI
-			},
-		},
-	},
-	{
-		name:   "CMPB",
-		argLen: 2,
-		asm:    x86.ACMPB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 255}, // AX CX DX BX SP BP SI DI
-				{1, 255}, // AX CX DX BX SP BP SI DI
-			},
-		},
-	},
-	{
-		name:    "CMPLconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     x86.ACMPL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 255}, // AX CX DX BX SP BP SI DI
-			},
-		},
-	},
-	{
-		name:    "CMPWconst",
-		auxType: auxInt16,
-		argLen:  1,
-		asm:     x86.ACMPW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 255}, // AX CX DX BX SP BP SI DI
-			},
-		},
-	},
-	{
-		name:    "CMPBconst",
-		auxType: auxInt8,
-		argLen:  1,
-		asm:     x86.ACMPB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 255}, // AX CX DX BX SP BP SI DI
-			},
-		},
-	},
-	{
-		name:        "UCOMISS",
-		argLen:      2,
-		usesScratch: true,
-		asm:         x86.AUCOMISS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-				{1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-		},
-	},
-	{
-		name:        "UCOMISD",
-		argLen:      2,
-		usesScratch: true,
-		asm:         x86.AUCOMISD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-				{1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-		},
-	},
-	{
-		name:   "TESTL",
-		argLen: 2,
-		asm:    x86.ATESTL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 255}, // AX CX DX BX SP BP SI DI
-				{1, 255}, // AX CX DX BX SP BP SI DI
-			},
-		},
-	},
-	{
-		name:   "TESTW",
-		argLen: 2,
-		asm:    x86.ATESTW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 255}, // AX CX DX BX SP BP SI DI
-				{1, 255}, // AX CX DX BX SP BP SI DI
-			},
-		},
-	},
-	{
-		name:   "TESTB",
-		argLen: 2,
-		asm:    x86.ATESTB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 255}, // AX CX DX BX SP BP SI DI
-				{1, 255}, // AX CX DX BX SP BP SI DI
-			},
-		},
-	},
-	{
-		name:    "TESTLconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     x86.ATESTL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 255}, // AX CX DX BX SP BP SI DI
-			},
-		},
-	},
-	{
-		name:    "TESTWconst",
-		auxType: auxInt16,
-		argLen:  1,
-		asm:     x86.ATESTW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 255}, // AX CX DX BX SP BP SI DI
-			},
-		},
-	},
-	{
-		name:    "TESTBconst",
-		auxType: auxInt8,
-		argLen:  1,
-		asm:     x86.ATESTB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 255}, // AX CX DX BX SP BP SI DI
-			},
-		},
-	},
-	{
-		name:         "SHLL",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASHLL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 2},   // CX
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "SHLLconst",
-		auxType:      auxInt32,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASHLL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "SHRL",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASHRL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 2},   // CX
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "SHRW",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASHRW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 2},   // CX
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "SHRB",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASHRB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 2},   // CX
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "SHRLconst",
-		auxType:      auxInt32,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASHRL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "SHRWconst",
-		auxType:      auxInt16,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASHRW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "SHRBconst",
-		auxType:      auxInt8,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASHRB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "SARL",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASARL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 2},   // CX
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "SARW",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASARW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 2},   // CX
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "SARB",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASARB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 2},   // CX
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "SARLconst",
-		auxType:      auxInt32,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASARL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "SARWconst",
-		auxType:      auxInt16,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASARW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "SARBconst",
-		auxType:      auxInt8,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASARB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "ROLLconst",
-		auxType:      auxInt32,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AROLL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "ROLWconst",
-		auxType:      auxInt16,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AROLW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "ROLBconst",
-		auxType:      auxInt8,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AROLB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "NEGL",
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ANEGL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "NOTL",
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ANOTL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "BSFL",
-		argLen:       1,
-		clobberFlags: true,
-		asm:          x86.ABSFL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "BSFW",
-		argLen:       1,
-		clobberFlags: true,
-		asm:          x86.ABSFW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "BSRL",
-		argLen:       1,
-		clobberFlags: true,
-		asm:          x86.ABSRL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "BSRW",
-		argLen:       1,
-		clobberFlags: true,
-		asm:          x86.ABSRW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "BSWAPL",
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ABSWAPL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:   "SQRTSD",
-		argLen: 1,
-		asm:    x86.ASQRTSD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-			outputs: []outputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-		},
-	},
-	{
-		name:   "SBBLcarrymask",
-		argLen: 1,
-		asm:    x86.ASBBL,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:   "SETEQ",
-		argLen: 1,
-		asm:    x86.ASETEQ,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:   "SETNE",
-		argLen: 1,
-		asm:    x86.ASETNE,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:   "SETL",
-		argLen: 1,
-		asm:    x86.ASETLT,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:   "SETLE",
-		argLen: 1,
-		asm:    x86.ASETLE,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:   "SETG",
-		argLen: 1,
-		asm:    x86.ASETGT,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:   "SETGE",
-		argLen: 1,
-		asm:    x86.ASETGE,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:   "SETB",
-		argLen: 1,
-		asm:    x86.ASETCS,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:   "SETBE",
-		argLen: 1,
-		asm:    x86.ASETLS,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:   "SETA",
-		argLen: 1,
-		asm:    x86.ASETHI,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:   "SETAE",
-		argLen: 1,
-		asm:    x86.ASETCC,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "SETEQF",
-		argLen:       1,
-		clobberFlags: true,
-		asm:          x86.ASETEQ,
-		reg: regInfo{
-			clobbers: 1, // AX
-			outputs: []outputInfo{
-				{0, 238}, // CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:         "SETNEF",
-		argLen:       1,
-		clobberFlags: true,
-		asm:          x86.ASETNE,
-		reg: regInfo{
-			clobbers: 1, // AX
-			outputs: []outputInfo{
-				{0, 238}, // CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:   "SETORD",
-		argLen: 1,
-		asm:    x86.ASETPC,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:   "SETNAN",
-		argLen: 1,
-		asm:    x86.ASETPS,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:   "SETGF",
-		argLen: 1,
-		asm:    x86.ASETHI,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:   "SETGEF",
-		argLen: 1,
-		asm:    x86.ASETCC,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:   "MOVBLSX",
-		argLen: 1,
-		asm:    x86.AMOVBLSX,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:   "MOVBLZX",
-		argLen: 1,
-		asm:    x86.AMOVBLZX,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:   "MOVWLSX",
-		argLen: 1,
-		asm:    x86.AMOVWLSX,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:   "MOVWLZX",
-		argLen: 1,
-		asm:    x86.AMOVWLZX,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:              "MOVLconst",
-		auxType:           auxInt32,
-		argLen:            0,
-		rematerializeable: true,
-		asm:               x86.AMOVL,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:        "CVTTSD2SL",
-		argLen:      1,
-		usesScratch: true,
-		asm:         x86.ACVTTSD2SL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:        "CVTTSS2SL",
-		argLen:      1,
-		usesScratch: true,
-		asm:         x86.ACVTTSS2SL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:        "CVTSL2SS",
-		argLen:      1,
-		usesScratch: true,
-		asm:         x86.ACVTSL2SS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-		},
-	},
-	{
-		name:        "CVTSL2SD",
-		argLen:      1,
-		usesScratch: true,
-		asm:         x86.ACVTSL2SD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-		},
-	},
-	{
-		name:        "CVTSD2SS",
-		argLen:      1,
-		usesScratch: true,
-		asm:         x86.ACVTSD2SS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-			outputs: []outputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-		},
-	},
-	{
-		name:   "CVTSS2SD",
-		argLen: 1,
-		asm:    x86.ACVTSS2SD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-			outputs: []outputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-		},
-	},
-	{
-		name:         "PXOR",
-		argLen:       2,
-		commutative:  true,
-		resultInArg0: true,
-		asm:          x86.APXOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-				{1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-			outputs: []outputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-		},
-	},
-	{
-		name:              "LEAL",
-		auxType:           auxSymOff,
-		argLen:            1,
-		rematerializeable: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:    "LEAL1",
-		auxType: auxSymOff,
-		argLen:  2,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 255},   // AX CX DX BX SP BP SI DI
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:    "LEAL2",
-		auxType: auxSymOff,
-		argLen:  2,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 255},   // AX CX DX BX SP BP SI DI
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:    "LEAL4",
-		auxType: auxSymOff,
-		argLen:  2,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 255},   // AX CX DX BX SP BP SI DI
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:    "LEAL8",
-		auxType: auxSymOff,
-		argLen:  2,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 255},   // AX CX DX BX SP BP SI DI
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:           "MOVBload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVBLZX,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:           "MOVBLSXload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVBLSX,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:           "MOVWload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVWLZX,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:           "MOVWLSXload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVWLSX,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:           "MOVLload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:           "MOVBstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 255},   // AX CX DX BX SP BP SI DI
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-		},
-	},
-	{
-		name:           "MOVWstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 255},   // AX CX DX BX SP BP SI DI
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-		},
-	},
-	{
-		name:           "MOVLstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 255},   // AX CX DX BX SP BP SI DI
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-		},
-	},
-	{
-		name:    "MOVBloadidx1",
-		auxType: auxSymOff,
-		argLen:  3,
-		asm:     x86.AMOVBLZX,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 255},   // AX CX DX BX SP BP SI DI
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:    "MOVWloadidx1",
-		auxType: auxSymOff,
-		argLen:  3,
-		asm:     x86.AMOVWLZX,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 255},   // AX CX DX BX SP BP SI DI
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:    "MOVWloadidx2",
-		auxType: auxSymOff,
-		argLen:  3,
-		asm:     x86.AMOVWLZX,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 255},   // AX CX DX BX SP BP SI DI
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:    "MOVLloadidx1",
-		auxType: auxSymOff,
-		argLen:  3,
-		asm:     x86.AMOVL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 255},   // AX CX DX BX SP BP SI DI
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:    "MOVLloadidx4",
-		auxType: auxSymOff,
-		argLen:  3,
-		asm:     x86.AMOVL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 255},   // AX CX DX BX SP BP SI DI
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:    "MOVBstoreidx1",
-		auxType: auxSymOff,
-		argLen:  4,
-		asm:     x86.AMOVB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 255},   // AX CX DX BX SP BP SI DI
-				{2, 255},   // AX CX DX BX SP BP SI DI
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-		},
-	},
-	{
-		name:    "MOVWstoreidx1",
-		auxType: auxSymOff,
-		argLen:  4,
-		asm:     x86.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 255},   // AX CX DX BX SP BP SI DI
-				{2, 255},   // AX CX DX BX SP BP SI DI
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-		},
-	},
-	{
-		name:    "MOVWstoreidx2",
-		auxType: auxSymOff,
-		argLen:  4,
-		asm:     x86.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 255},   // AX CX DX BX SP BP SI DI
-				{2, 255},   // AX CX DX BX SP BP SI DI
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-		},
-	},
-	{
-		name:    "MOVLstoreidx1",
-		auxType: auxSymOff,
-		argLen:  4,
-		asm:     x86.AMOVL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 255},   // AX CX DX BX SP BP SI DI
-				{2, 255},   // AX CX DX BX SP BP SI DI
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-		},
-	},
-	{
-		name:    "MOVLstoreidx4",
-		auxType: auxSymOff,
-		argLen:  4,
-		asm:     x86.AMOVL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 255},   // AX CX DX BX SP BP SI DI
-				{2, 255},   // AX CX DX BX SP BP SI DI
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-		},
-	},
-	{
-		name:           "MOVBstoreconst",
-		auxType:        auxSymValAndOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-		},
-	},
-	{
-		name:           "MOVWstoreconst",
-		auxType:        auxSymValAndOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-		},
-	},
-	{
-		name:           "MOVLstoreconst",
-		auxType:        auxSymValAndOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-		},
-	},
-	{
-		name:    "MOVBstoreconstidx1",
-		auxType: auxSymValAndOff,
-		argLen:  3,
-		asm:     x86.AMOVB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 255},   // AX CX DX BX SP BP SI DI
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-		},
-	},
-	{
-		name:    "MOVWstoreconstidx1",
-		auxType: auxSymValAndOff,
-		argLen:  3,
-		asm:     x86.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 255},   // AX CX DX BX SP BP SI DI
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-		},
-	},
-	{
-		name:    "MOVWstoreconstidx2",
-		auxType: auxSymValAndOff,
-		argLen:  3,
-		asm:     x86.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 255},   // AX CX DX BX SP BP SI DI
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-		},
-	},
-	{
-		name:    "MOVLstoreconstidx1",
-		auxType: auxSymValAndOff,
-		argLen:  3,
-		asm:     x86.AMOVL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 255},   // AX CX DX BX SP BP SI DI
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-		},
-	},
-	{
-		name:    "MOVLstoreconstidx4",
-		auxType: auxSymValAndOff,
-		argLen:  3,
-		asm:     x86.AMOVL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 255},   // AX CX DX BX SP BP SI DI
-				{0, 65791}, // AX CX DX BX SP BP SI DI SB
-			},
-		},
-	},
-	{
-		name:    "DUFFZERO",
-		auxType: auxInt64,
-		argLen:  3,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 128}, // DI
-				{1, 1},   // AX
-			},
-			clobbers: 130, // CX DI
-		},
-	},
-	{
-		name:   "REPSTOSL",
-		argLen: 4,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 128}, // DI
-				{1, 2},   // CX
-				{2, 1},   // AX
-			},
-			clobbers: 130, // CX DI
-		},
-	},
-	{
-		name:         "CALLstatic",
-		auxType:      auxSymOff,
-		argLen:       1,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			clobbers: 65519, // AX CX DX BX BP SI DI X0 X1 X2 X3 X4 X5 X6 X7
-		},
-	},
-	{
-		name:         "CALLclosure",
-		auxType:      auxInt64,
-		argLen:       3,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 4},   // DX
-				{0, 255}, // AX CX DX BX SP BP SI DI
-			},
-			clobbers: 65519, // AX CX DX BX BP SI DI X0 X1 X2 X3 X4 X5 X6 X7
-		},
-	},
-	{
-		name:         "CALLdefer",
-		auxType:      auxInt64,
-		argLen:       1,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			clobbers: 65519, // AX CX DX BX BP SI DI X0 X1 X2 X3 X4 X5 X6 X7
-		},
-	},
-	{
-		name:         "CALLgo",
-		auxType:      auxInt64,
-		argLen:       1,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			clobbers: 65519, // AX CX DX BX BP SI DI X0 X1 X2 X3 X4 X5 X6 X7
-		},
-	},
-	{
-		name:         "CALLinter",
-		auxType:      auxInt64,
-		argLen:       2,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			clobbers: 65519, // AX CX DX BX BP SI DI X0 X1 X2 X3 X4 X5 X6 X7
-		},
-	},
-	{
-		name:         "DUFFCOPY",
-		auxType:      auxInt64,
-		argLen:       3,
-		clobberFlags: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 128}, // DI
-				{1, 64},  // SI
-			},
-			clobbers: 194, // CX SI DI
-		},
-	},
-	{
-		name:   "REPMOVSL",
-		argLen: 4,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 128}, // DI
-				{1, 64},  // SI
-				{2, 2},   // CX
-			},
-			clobbers: 194, // CX SI DI
-		},
-	},
-	{
-		name:   "InvertFlags",
-		argLen: 1,
-		reg:    regInfo{},
-	},
-	{
-		name:   "LoweredGetG",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:   "LoweredGetClosurePtr",
-		argLen: 0,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 4}, // DX
-			},
-		},
-	},
-	{
-		name:           "LoweredNilCheck",
-		argLen:         2,
-		clobberFlags:   true,
-		nilCheck:       true,
-		faultOnNilArg0: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 255}, // AX CX DX BX SP BP SI DI
-			},
-		},
-	},
-	{
-		name:   "MOVLconvert",
-		argLen: 2,
-		asm:    x86.AMOVL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:   "FlagEQ",
-		argLen: 0,
-		reg:    regInfo{},
-	},
-	{
-		name:   "FlagLT_ULT",
-		argLen: 0,
-		reg:    regInfo{},
-	},
-	{
-		name:   "FlagLT_UGT",
-		argLen: 0,
-		reg:    regInfo{},
-	},
-	{
-		name:   "FlagGT_UGT",
-		argLen: 0,
-		reg:    regInfo{},
-	},
-	{
-		name:   "FlagGT_ULT",
-		argLen: 0,
-		reg:    regInfo{},
-	},
-	{
-		name:   "FCHS",
-		argLen: 1,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-			outputs: []outputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-		},
-	},
-	{
-		name:    "MOVSSconst1",
-		auxType: auxFloat32,
-		argLen:  0,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:    "MOVSDconst1",
-		auxType: auxFloat64,
-		argLen:  0,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-		},
-	},
-	{
-		name:   "MOVSSconst2",
-		argLen: 1,
-		asm:    x86.AMOVSS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-		},
-	},
-	{
-		name:   "MOVSDconst2",
-		argLen: 1,
-		asm:    x86.AMOVSD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 239}, // AX CX DX BX BP SI DI
-			},
-			outputs: []outputInfo{
-				{0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
-			},
-		},
-	},
-
-	{
-		name:         "ADDSS",
-		argLen:       2,
-		commutative:  true,
-		resultInArg0: true,
-		asm:          x86.AADDSS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-		},
-	},
-	{
-		name:         "ADDSD",
-		argLen:       2,
-		commutative:  true,
-		resultInArg0: true,
-		asm:          x86.AADDSD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-		},
-	},
-	{
-		name:         "SUBSS",
-		argLen:       2,
-		resultInArg0: true,
-		asm:          x86.ASUBSS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-		},
-	},
-	{
-		name:         "SUBSD",
-		argLen:       2,
-		resultInArg0: true,
-		asm:          x86.ASUBSD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-		},
-	},
-	{
-		name:         "MULSS",
-		argLen:       2,
-		commutative:  true,
-		resultInArg0: true,
-		asm:          x86.AMULSS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-		},
-	},
-	{
-		name:         "MULSD",
-		argLen:       2,
-		commutative:  true,
-		resultInArg0: true,
-		asm:          x86.AMULSD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-		},
-	},
-	{
-		name:         "DIVSS",
-		argLen:       2,
-		resultInArg0: true,
-		asm:          x86.ADIVSS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-		},
-	},
-	{
-		name:         "DIVSD",
-		argLen:       2,
-		resultInArg0: true,
-		asm:          x86.ADIVSD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-		},
-	},
-	{
-		name:           "MOVSSload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVSS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-		},
-	},
-	{
-		name:           "MOVSDload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVSD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-		},
-	},
-	{
-		name:              "MOVSSconst",
-		auxType:           auxFloat32,
-		argLen:            0,
-		rematerializeable: true,
-		asm:               x86.AMOVSS,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-		},
-	},
-	{
-		name:              "MOVSDconst",
-		auxType:           auxFloat64,
-		argLen:            0,
-		rematerializeable: true,
-		asm:               x86.AMOVSD,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-		},
-	},
-	{
-		name:    "MOVSSloadidx1",
-		auxType: auxSymOff,
-		argLen:  3,
-		asm:     x86.AMOVSS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-		},
-	},
-	{
-		name:    "MOVSSloadidx4",
-		auxType: auxSymOff,
-		argLen:  3,
-		asm:     x86.AMOVSS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-		},
-	},
-	{
-		name:    "MOVSDloadidx1",
-		auxType: auxSymOff,
-		argLen:  3,
-		asm:     x86.AMOVSD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-		},
-	},
-	{
-		name:    "MOVSDloadidx8",
-		auxType: auxSymOff,
-		argLen:  3,
-		asm:     x86.AMOVSD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-		},
-	},
-	{
-		name:           "MOVSSstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVSS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-	{
-		name:           "MOVSDstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVSD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-	{
-		name:    "MOVSSstoreidx1",
-		auxType: auxSymOff,
-		argLen:  4,
-		asm:     x86.AMOVSS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-	{
-		name:    "MOVSSstoreidx4",
-		auxType: auxSymOff,
-		argLen:  4,
-		asm:     x86.AMOVSS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-	{
-		name:    "MOVSDstoreidx1",
-		auxType: auxSymOff,
-		argLen:  4,
-		asm:     x86.AMOVSD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-	{
-		name:    "MOVSDstoreidx8",
-		auxType: auxSymOff,
-		argLen:  4,
-		asm:     x86.AMOVSD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-	{
-		name:         "ADDQ",
-		argLen:       2,
-		commutative:  true,
-		clobberFlags: true,
-		asm:          x86.AADDQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "ADDL",
-		argLen:       2,
-		commutative:  true,
-		clobberFlags: true,
-		asm:          x86.AADDL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "ADDQconst",
-		auxType:      auxInt64,
-		argLen:       1,
-		clobberFlags: true,
-		asm:          x86.AADDQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "ADDLconst",
-		auxType:      auxInt32,
-		argLen:       1,
-		clobberFlags: true,
-		asm:          x86.AADDL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "SUBQ",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASUBQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "SUBL",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASUBL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "SUBQconst",
-		auxType:      auxInt64,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASUBQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "SUBLconst",
-		auxType:      auxInt32,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASUBL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "MULQ",
-		argLen:       2,
-		commutative:  true,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AIMULQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "MULL",
-		argLen:       2,
-		commutative:  true,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AIMULL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "MULQconst",
-		auxType:      auxInt64,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AIMULQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "MULLconst",
-		auxType:      auxInt32,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AIMULL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "HMULQ",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          x86.AIMULQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1},     // AX
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			clobbers: 1, // AX
-			outputs: []outputInfo{
-				{0, 4}, // DX
-			},
-		},
-	},
-	{
-		name:         "HMULL",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          x86.AIMULL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1},     // AX
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			clobbers: 1, // AX
-			outputs: []outputInfo{
-				{0, 4}, // DX
-			},
-		},
-	},
-	{
-		name:         "HMULW",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          x86.AIMULW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1},     // AX
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			clobbers: 1, // AX
-			outputs: []outputInfo{
-				{0, 4}, // DX
-			},
-		},
-	},
-	{
-		name:         "HMULB",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          x86.AIMULB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1},     // AX
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			clobbers: 1, // AX
-			outputs: []outputInfo{
-				{0, 4}, // DX
-			},
-		},
-	},
-	{
-		name:         "HMULQU",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          x86.AMULQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1},     // AX
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			clobbers: 1, // AX
-			outputs: []outputInfo{
-				{0, 4}, // DX
-			},
-		},
-	},
-	{
-		name:         "HMULLU",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          x86.AMULL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1},     // AX
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			clobbers: 1, // AX
-			outputs: []outputInfo{
-				{0, 4}, // DX
-			},
-		},
-	},
-	{
-		name:         "HMULWU",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          x86.AMULW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1},     // AX
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			clobbers: 1, // AX
-			outputs: []outputInfo{
-				{0, 4}, // DX
-			},
-		},
-	},
-	{
-		name:         "HMULBU",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          x86.AMULB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1},     // AX
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			clobbers: 1, // AX
-			outputs: []outputInfo{
-				{0, 4}, // DX
-			},
-		},
-	},
-	{
-		name:         "AVGQU",
-		argLen:       2,
-		commutative:  true,
-		resultInArg0: true,
-		clobberFlags: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "DIVQ",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          x86.AIDIVQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1},     // AX
-				{1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 1}, // AX
-				{1, 4}, // DX
-			},
-		},
-	},
-	{
-		name:         "DIVL",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          x86.AIDIVL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1},     // AX
-				{1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 1}, // AX
-				{1, 4}, // DX
-			},
-		},
-	},
-	{
-		name:         "DIVW",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          x86.AIDIVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1},     // AX
-				{1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 1}, // AX
-				{1, 4}, // DX
-			},
-		},
-	},
-	{
-		name:         "DIVQU",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          x86.ADIVQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1},     // AX
-				{1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 1}, // AX
-				{1, 4}, // DX
-			},
-		},
-	},
-	{
-		name:         "DIVLU",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          x86.ADIVL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1},     // AX
-				{1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 1}, // AX
-				{1, 4}, // DX
-			},
-		},
-	},
-	{
-		name:         "DIVWU",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          x86.ADIVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1},     // AX
-				{1, 65531}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 1}, // AX
-				{1, 4}, // DX
-			},
-		},
-	},
-	{
-		name:         "MULQU2",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          x86.AMULQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1},     // AX
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 4}, // DX
-				{1, 1}, // AX
-			},
-		},
-	},
-	{
-		name:         "DIVQU2",
-		argLen:       3,
-		clobberFlags: true,
-		asm:          x86.ADIVQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4},     // DX
-				{1, 1},     // AX
-				{2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 1}, // AX
-				{1, 4}, // DX
-			},
-		},
-	},
-	{
-		name:         "ANDQ",
-		argLen:       2,
-		commutative:  true,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AANDQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "ANDL",
-		argLen:       2,
-		commutative:  true,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AANDL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "ANDQconst",
-		auxType:      auxInt64,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AANDQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "ANDLconst",
-		auxType:      auxInt32,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AANDL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "ORQ",
-		argLen:       2,
-		commutative:  true,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AORQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "ORL",
-		argLen:       2,
-		commutative:  true,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AORL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "ORQconst",
-		auxType:      auxInt64,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AORQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "ORLconst",
-		auxType:      auxInt32,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AORL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "XORQ",
-		argLen:       2,
-		commutative:  true,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AXORQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "XORL",
-		argLen:       2,
-		commutative:  true,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AXORL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "XORQconst",
-		auxType:      auxInt64,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AXORQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "XORLconst",
-		auxType:      auxInt32,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AXORL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "CMPQ",
-		argLen: 2,
-		asm:    x86.ACMPQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "CMPL",
-		argLen: 2,
-		asm:    x86.ACMPL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "CMPW",
-		argLen: 2,
-		asm:    x86.ACMPW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "CMPB",
-		argLen: 2,
-		asm:    x86.ACMPB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:    "CMPQconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     x86.ACMPQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:    "CMPLconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     x86.ACMPL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:    "CMPWconst",
-		auxType: auxInt16,
-		argLen:  1,
-		asm:     x86.ACMPW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:    "CMPBconst",
-		auxType: auxInt8,
-		argLen:  1,
-		asm:     x86.ACMPB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "UCOMISS",
-		argLen: 2,
-		asm:    x86.AUCOMISS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-		},
-	},
-	{
-		name:   "UCOMISD",
-		argLen: 2,
-		asm:    x86.AUCOMISD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-		},
-	},
-	{
-		name:   "TESTQ",
-		argLen: 2,
-		asm:    x86.ATESTQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "TESTL",
-		argLen: 2,
-		asm:    x86.ATESTL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "TESTW",
-		argLen: 2,
-		asm:    x86.ATESTW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "TESTB",
-		argLen: 2,
-		asm:    x86.ATESTB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:    "TESTQconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     x86.ATESTQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:    "TESTLconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     x86.ATESTL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:    "TESTWconst",
-		auxType: auxInt16,
-		argLen:  1,
-		asm:     x86.ATESTW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:    "TESTBconst",
-		auxType: auxInt8,
-		argLen:  1,
-		asm:     x86.ATESTB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "SHLQ",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASHLQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 2},     // CX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "SHLL",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASHLL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 2},     // CX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "SHLQconst",
-		auxType:      auxInt64,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASHLQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "SHLLconst",
-		auxType:      auxInt32,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASHLL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "SHRQ",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASHRQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 2},     // CX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "SHRL",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASHRL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 2},     // CX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "SHRW",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASHRW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 2},     // CX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "SHRB",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASHRB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 2},     // CX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "SHRQconst",
-		auxType:      auxInt64,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASHRQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "SHRLconst",
-		auxType:      auxInt32,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASHRL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "SHRWconst",
-		auxType:      auxInt16,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASHRW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "SHRBconst",
-		auxType:      auxInt8,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASHRB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "SARQ",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASARQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 2},     // CX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "SARL",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASARL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 2},     // CX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "SARW",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASARW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 2},     // CX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "SARB",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASARB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 2},     // CX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "SARQconst",
-		auxType:      auxInt64,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASARQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "SARLconst",
-		auxType:      auxInt32,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASARL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "SARWconst",
-		auxType:      auxInt16,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASARW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "SARBconst",
-		auxType:      auxInt8,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ASARB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "ROLQconst",
-		auxType:      auxInt64,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AROLQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "ROLLconst",
-		auxType:      auxInt32,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AROLL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "ROLWconst",
-		auxType:      auxInt16,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AROLW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "ROLBconst",
-		auxType:      auxInt8,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.AROLB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "NEGQ",
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ANEGQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "NEGL",
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ANEGL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "NOTQ",
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ANOTQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "NOTL",
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ANOTL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "BSFQ",
-		argLen: 1,
-		asm:    x86.ABSFQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "BSFL",
-		argLen: 1,
-		asm:    x86.ABSFL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "CMOVQEQ",
-		argLen:       3,
-		resultInArg0: true,
-		asm:          x86.ACMOVQEQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "CMOVLEQ",
-		argLen:       3,
-		resultInArg0: true,
-		asm:          x86.ACMOVLEQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "BSWAPQ",
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ABSWAPQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "BSWAPL",
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          x86.ABSWAPL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "SQRTSD",
-		argLen: 1,
-		asm:    x86.ASQRTSD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-		},
-	},
-	{
-		name:   "SBBQcarrymask",
-		argLen: 1,
-		asm:    x86.ASBBQ,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "SBBLcarrymask",
-		argLen: 1,
-		asm:    x86.ASBBL,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "SETEQ",
-		argLen: 1,
-		asm:    x86.ASETEQ,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "SETNE",
-		argLen: 1,
-		asm:    x86.ASETNE,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "SETL",
-		argLen: 1,
-		asm:    x86.ASETLT,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "SETLE",
-		argLen: 1,
-		asm:    x86.ASETLE,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "SETG",
-		argLen: 1,
-		asm:    x86.ASETGT,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "SETGE",
-		argLen: 1,
-		asm:    x86.ASETGE,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "SETB",
-		argLen: 1,
-		asm:    x86.ASETCS,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "SETBE",
-		argLen: 1,
-		asm:    x86.ASETLS,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "SETA",
-		argLen: 1,
-		asm:    x86.ASETHI,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "SETAE",
-		argLen: 1,
-		asm:    x86.ASETCC,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "SETEQF",
-		argLen:       1,
-		clobberFlags: true,
-		asm:          x86.ASETEQ,
-		reg: regInfo{
-			clobbers: 1, // AX
-			outputs: []outputInfo{
-				{0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:         "SETNEF",
-		argLen:       1,
-		clobberFlags: true,
-		asm:          x86.ASETNE,
-		reg: regInfo{
-			clobbers: 1, // AX
-			outputs: []outputInfo{
-				{0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "SETORD",
-		argLen: 1,
-		asm:    x86.ASETPC,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "SETNAN",
-		argLen: 1,
-		asm:    x86.ASETPS,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "SETGF",
-		argLen: 1,
-		asm:    x86.ASETHI,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "SETGEF",
-		argLen: 1,
-		asm:    x86.ASETCC,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "MOVBQSX",
-		argLen: 1,
-		asm:    x86.AMOVBQSX,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "MOVBQZX",
-		argLen: 1,
-		asm:    x86.AMOVBLZX,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "MOVWQSX",
-		argLen: 1,
-		asm:    x86.AMOVWQSX,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "MOVWQZX",
-		argLen: 1,
-		asm:    x86.AMOVWLZX,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "MOVLQSX",
-		argLen: 1,
-		asm:    x86.AMOVLQSX,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "MOVLQZX",
-		argLen: 1,
-		asm:    x86.AMOVL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:              "MOVLconst",
-		auxType:           auxInt32,
-		argLen:            0,
-		rematerializeable: true,
-		asm:               x86.AMOVL,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:              "MOVQconst",
-		auxType:           auxInt64,
-		argLen:            0,
-		rematerializeable: true,
-		asm:               x86.AMOVQ,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "CVTTSD2SL",
-		argLen: 1,
-		asm:    x86.ACVTTSD2SL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "CVTTSD2SQ",
-		argLen: 1,
-		asm:    x86.ACVTTSD2SQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "CVTTSS2SL",
-		argLen: 1,
-		asm:    x86.ACVTTSS2SL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "CVTTSS2SQ",
-		argLen: 1,
-		asm:    x86.ACVTTSS2SQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "CVTSL2SS",
-		argLen: 1,
-		asm:    x86.ACVTSL2SS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-		},
-	},
-	{
-		name:   "CVTSL2SD",
-		argLen: 1,
-		asm:    x86.ACVTSL2SD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-		},
-	},
-	{
-		name:   "CVTSQ2SS",
-		argLen: 1,
-		asm:    x86.ACVTSQ2SS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-		},
-	},
-	{
-		name:   "CVTSQ2SD",
-		argLen: 1,
-		asm:    x86.ACVTSQ2SD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-		},
-	},
-	{
-		name:   "CVTSD2SS",
-		argLen: 1,
-		asm:    x86.ACVTSD2SS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-		},
-	},
-	{
-		name:   "CVTSS2SD",
-		argLen: 1,
-		asm:    x86.ACVTSS2SD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-		},
-	},
-	{
-		name:         "PXOR",
-		argLen:       2,
-		commutative:  true,
-		resultInArg0: true,
-		asm:          x86.APXOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-		},
-	},
-	{
-		name:              "LEAQ",
-		auxType:           auxSymOff,
-		argLen:            1,
-		rematerializeable: true,
-		asm:               x86.ALEAQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:    "LEAQ1",
-		auxType: auxSymOff,
-		argLen:  2,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:    "LEAQ2",
-		auxType: auxSymOff,
-		argLen:  2,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:    "LEAQ4",
-		auxType: auxSymOff,
-		argLen:  2,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:    "LEAQ8",
-		auxType: auxSymOff,
-		argLen:  2,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:              "LEAL",
-		auxType:           auxSymOff,
-		argLen:            1,
-		rematerializeable: true,
-		asm:               x86.ALEAL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:           "MOVBload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVBLZX,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:           "MOVBQSXload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVBQSX,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:           "MOVWload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVWLZX,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:           "MOVWQSXload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVWQSX,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:           "MOVLload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:           "MOVLQSXload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVLQSX,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:           "MOVQload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:           "MOVBstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-	{
-		name:           "MOVWstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-	{
-		name:           "MOVLstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-	{
-		name:           "MOVQstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-	{
-		name:           "MOVOload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVUPS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-		},
-	},
-	{
-		name:           "MOVOstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVUPS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-	{
-		name:    "MOVBloadidx1",
-		auxType: auxSymOff,
-		argLen:  3,
-		asm:     x86.AMOVBLZX,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:    "MOVWloadidx1",
-		auxType: auxSymOff,
-		argLen:  3,
-		asm:     x86.AMOVWLZX,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:    "MOVWloadidx2",
-		auxType: auxSymOff,
-		argLen:  3,
-		asm:     x86.AMOVWLZX,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:    "MOVLloadidx1",
-		auxType: auxSymOff,
-		argLen:  3,
-		asm:     x86.AMOVL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:    "MOVLloadidx4",
-		auxType: auxSymOff,
-		argLen:  3,
-		asm:     x86.AMOVL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:    "MOVQloadidx1",
-		auxType: auxSymOff,
-		argLen:  3,
-		asm:     x86.AMOVQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:    "MOVQloadidx8",
-		auxType: auxSymOff,
-		argLen:  3,
-		asm:     x86.AMOVQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:    "MOVBstoreidx1",
-		auxType: auxSymOff,
-		argLen:  4,
-		asm:     x86.AMOVB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-	{
-		name:    "MOVWstoreidx1",
-		auxType: auxSymOff,
-		argLen:  4,
-		asm:     x86.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-	{
-		name:    "MOVWstoreidx2",
-		auxType: auxSymOff,
-		argLen:  4,
-		asm:     x86.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-	{
-		name:    "MOVLstoreidx1",
-		auxType: auxSymOff,
-		argLen:  4,
-		asm:     x86.AMOVL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-	{
-		name:    "MOVLstoreidx4",
-		auxType: auxSymOff,
-		argLen:  4,
-		asm:     x86.AMOVL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-	{
-		name:    "MOVQstoreidx1",
-		auxType: auxSymOff,
-		argLen:  4,
-		asm:     x86.AMOVQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-	{
-		name:    "MOVQstoreidx8",
-		auxType: auxSymOff,
-		argLen:  4,
-		asm:     x86.AMOVQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-	{
-		name:           "MOVBstoreconst",
-		auxType:        auxSymValAndOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-	{
-		name:           "MOVWstoreconst",
-		auxType:        auxSymValAndOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-	{
-		name:           "MOVLstoreconst",
-		auxType:        auxSymValAndOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-	{
-		name:           "MOVQstoreconst",
-		auxType:        auxSymValAndOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-	{
-		name:    "MOVBstoreconstidx1",
-		auxType: auxSymValAndOff,
-		argLen:  3,
-		asm:     x86.AMOVB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-	{
-		name:    "MOVWstoreconstidx1",
-		auxType: auxSymValAndOff,
-		argLen:  3,
-		asm:     x86.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-	{
-		name:    "MOVWstoreconstidx2",
-		auxType: auxSymValAndOff,
-		argLen:  3,
-		asm:     x86.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-	{
-		name:    "MOVLstoreconstidx1",
-		auxType: auxSymValAndOff,
-		argLen:  3,
-		asm:     x86.AMOVL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-	{
-		name:    "MOVLstoreconstidx4",
-		auxType: auxSymValAndOff,
-		argLen:  3,
-		asm:     x86.AMOVL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-	{
-		name:    "MOVQstoreconstidx1",
-		auxType: auxSymValAndOff,
-		argLen:  3,
-		asm:     x86.AMOVQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-	{
-		name:    "MOVQstoreconstidx8",
-		auxType: auxSymValAndOff,
-		argLen:  3,
-		asm:     x86.AMOVQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-	{
-		name:         "DUFFZERO",
-		auxType:      auxInt64,
-		argLen:       3,
-		clobberFlags: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 128},   // DI
-				{1, 65536}, // X0
-			},
-			clobbers: 128, // DI
-		},
-	},
-	{
-		name:              "MOVOconst",
-		auxType:           auxInt128,
-		argLen:            0,
-		rematerializeable: true,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-			},
-		},
-	},
-	{
-		name:   "REPSTOSQ",
-		argLen: 4,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 128}, // DI
-				{1, 2},   // CX
-				{2, 1},   // AX
-			},
-			clobbers: 130, // CX DI
-		},
-	},
-	{
-		name:         "CALLstatic",
-		auxType:      auxSymOff,
-		argLen:       1,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			clobbers: 4294967279, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-		},
-	},
-	{
-		name:         "CALLclosure",
-		auxType:      auxInt64,
-		argLen:       3,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 4},     // DX
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			clobbers: 4294967279, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-		},
-	},
-	{
-		name:         "CALLdefer",
-		auxType:      auxInt64,
-		argLen:       1,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			clobbers: 4294967279, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-		},
-	},
-	{
-		name:         "CALLgo",
-		auxType:      auxInt64,
-		argLen:       1,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			clobbers: 4294967279, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-		},
-	},
-	{
-		name:         "CALLinter",
-		auxType:      auxInt64,
-		argLen:       2,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			clobbers: 4294967279, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15
-		},
-	},
-	{
-		name:         "DUFFCOPY",
-		auxType:      auxInt64,
-		argLen:       3,
-		clobberFlags: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 128}, // DI
-				{1, 64},  // SI
-			},
-			clobbers: 65728, // SI DI X0
-		},
-	},
-	{
-		name:   "REPMOVSQ",
-		argLen: 4,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 128}, // DI
-				{1, 64},  // SI
-				{2, 2},   // CX
-			},
-			clobbers: 194, // CX SI DI
-		},
-	},
-	{
-		name:   "InvertFlags",
-		argLen: 1,
-		reg:    regInfo{},
-	},
-	{
-		name:   "LoweredGetG",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "LoweredGetClosurePtr",
-		argLen: 0,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 4}, // DX
-			},
-		},
-	},
-	{
-		name:           "LoweredNilCheck",
-		argLen:         2,
-		clobberFlags:   true,
-		nilCheck:       true,
-		faultOnNilArg0: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "MOVQconvert",
-		argLen: 2,
-		asm:    x86.AMOVQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "MOVLconvert",
-		argLen: 2,
-		asm:    x86.AMOVL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "FlagEQ",
-		argLen: 0,
-		reg:    regInfo{},
-	},
-	{
-		name:   "FlagLT_ULT",
-		argLen: 0,
-		reg:    regInfo{},
-	},
-	{
-		name:   "FlagLT_UGT",
-		argLen: 0,
-		reg:    regInfo{},
-	},
-	{
-		name:   "FlagGT_UGT",
-		argLen: 0,
-		reg:    regInfo{},
-	},
-	{
-		name:   "FlagGT_ULT",
-		argLen: 0,
-		reg:    regInfo{},
-	},
-	{
-		name:           "MOVLatomicload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:           "MOVQatomicload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            x86.AMOVQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:           "XCHGL",
-		auxType:        auxSymOff,
-		argLen:         3,
-		resultInArg0:   true,
-		faultOnNilArg1: true,
-		asm:            x86.AXCHGL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:           "XCHGQ",
-		auxType:        auxSymOff,
-		argLen:         3,
-		resultInArg0:   true,
-		faultOnNilArg1: true,
-		asm:            x86.AXCHGQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:           "XADDLlock",
-		auxType:        auxSymOff,
-		argLen:         3,
-		resultInArg0:   true,
-		clobberFlags:   true,
-		faultOnNilArg1: true,
-		asm:            x86.AXADDL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:           "XADDQlock",
-		auxType:        auxSymOff,
-		argLen:         3,
-		resultInArg0:   true,
-		clobberFlags:   true,
-		faultOnNilArg1: true,
-		asm:            x86.AXADDQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			outputs: []outputInfo{
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:   "AddTupleFirst32",
-		argLen: 2,
-		reg:    regInfo{},
-	},
-	{
-		name:   "AddTupleFirst64",
-		argLen: 2,
-		reg:    regInfo{},
-	},
-	{
-		name:           "CMPXCHGLlock",
-		auxType:        auxSymOff,
-		argLen:         4,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            x86.ACMPXCHGL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 1},     // AX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			clobbers: 1, // AX
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:           "CMPXCHGQlock",
-		auxType:        auxSymOff,
-		argLen:         4,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            x86.ACMPXCHGQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 1},     // AX
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{2, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-			clobbers: 1, // AX
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-			},
-		},
-	},
-	{
-		name:           "ANDBlock",
-		auxType:        auxSymOff,
-		argLen:         3,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            x86.AANDB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-	{
-		name:           "ORBlock",
-		auxType:        auxSymOff,
-		argLen:         3,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            x86.AORB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 65535},      // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
-				{0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
-			},
-		},
-	},
-
-	{
-		name:        "ADD",
-		argLen:      2,
-		commutative: true,
-		asm:         arm.AADD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "ADDconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     arm.AADD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 30719}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "SUB",
-		argLen: 2,
-		asm:    arm.ASUB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "SUBconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     arm.ASUB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "RSB",
-		argLen: 2,
-		asm:    arm.ARSB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "RSBconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     arm.ARSB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:        "MUL",
-		argLen:      2,
-		commutative: true,
-		asm:         arm.AMUL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:        "HMUL",
-		argLen:      2,
-		commutative: true,
-		asm:         arm.AMULL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:        "HMULU",
-		argLen:      2,
-		commutative: true,
-		asm:         arm.AMULLU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "UDIVrtcall",
-		argLen:       2,
-		clobberFlags: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 2}, // R1
-				{1, 1}, // R0
-			},
-			clobbers: 16396, // R2 R3 R14
-			outputs: []outputInfo{
-				{0, 1}, // R0
-				{1, 2}, // R1
-			},
-		},
-	},
-	{
-		name:        "ADDS",
-		argLen:      2,
-		commutative: true,
-		asm:         arm.AADD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "ADDSconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     arm.AADD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:        "ADC",
-		argLen:      3,
-		commutative: true,
-		asm:         arm.AADC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "ADCconst",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.AADC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "SUBS",
-		argLen: 2,
-		asm:    arm.ASUB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "SUBSconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     arm.ASUB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "RSBSconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     arm.ARSB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "SBC",
-		argLen: 3,
-		asm:    arm.ASBC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "SBCconst",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.ASBC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "RSCconst",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.ARSC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:        "MULLU",
-		argLen:      2,
-		commutative: true,
-		asm:         arm.AMULLU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "MULA",
-		argLen: 3,
-		asm:    arm.AMULA,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:        "ADDF",
-		argLen:      2,
-		commutative: true,
-		asm:         arm.AADDF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-				{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:        "ADDD",
-		argLen:      2,
-		commutative: true,
-		asm:         arm.AADDD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-				{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:   "SUBF",
-		argLen: 2,
-		asm:    arm.ASUBF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-				{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:   "SUBD",
-		argLen: 2,
-		asm:    arm.ASUBD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-				{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:        "MULF",
-		argLen:      2,
-		commutative: true,
-		asm:         arm.AMULF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-				{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:        "MULD",
-		argLen:      2,
-		commutative: true,
-		asm:         arm.AMULD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-				{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:   "DIVF",
-		argLen: 2,
-		asm:    arm.ADIVF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-				{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:   "DIVD",
-		argLen: 2,
-		asm:    arm.ADIVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-				{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:        "AND",
-		argLen:      2,
-		commutative: true,
-		asm:         arm.AAND,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "ANDconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     arm.AAND,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:        "OR",
-		argLen:      2,
-		commutative: true,
-		asm:         arm.AORR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "ORconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     arm.AORR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:        "XOR",
-		argLen:      2,
-		commutative: true,
-		asm:         arm.AEOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "XORconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     arm.AEOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "BIC",
-		argLen: 2,
-		asm:    arm.ABIC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "BICconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     arm.ABIC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "MVN",
-		argLen: 1,
-		asm:    arm.AMVN,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "NEGF",
-		argLen: 1,
-		asm:    arm.ANEGF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:   "NEGD",
-		argLen: 1,
-		asm:    arm.ANEGD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:   "SQRTD",
-		argLen: 1,
-		asm:    arm.ASQRTD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:   "CLZ",
-		argLen: 1,
-		asm:    arm.ACLZ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "SLL",
-		argLen: 2,
-		asm:    arm.ASLL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "SLLconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     arm.ASLL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "SRL",
-		argLen: 2,
-		asm:    arm.ASRL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "SRLconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     arm.ASRL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "SRA",
-		argLen: 2,
-		asm:    arm.ASRA,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "SRAconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     arm.ASRA,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "SRRconst",
-		auxType: auxInt32,
-		argLen:  1,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "ADDshiftLL",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.AADD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "ADDshiftRL",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.AADD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "ADDshiftRA",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.AADD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "SUBshiftLL",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.ASUB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "SUBshiftRL",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.ASUB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "SUBshiftRA",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.ASUB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "RSBshiftLL",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.ARSB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "RSBshiftRL",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.ARSB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "RSBshiftRA",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.ARSB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "ANDshiftLL",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.AAND,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "ANDshiftRL",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.AAND,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "ANDshiftRA",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.AAND,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "ORshiftLL",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.AORR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "ORshiftRL",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.AORR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "ORshiftRA",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.AORR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "XORshiftLL",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.AEOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "XORshiftRL",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.AEOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "XORshiftRA",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.AEOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "XORshiftRR",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.AEOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "BICshiftLL",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.ABIC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "BICshiftRL",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.ABIC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "BICshiftRA",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.ABIC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "MVNshiftLL",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     arm.AMVN,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "MVNshiftRL",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     arm.AMVN,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "MVNshiftRA",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     arm.AMVN,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "ADCshiftLL",
-		auxType: auxInt32,
-		argLen:  3,
-		asm:     arm.AADC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "ADCshiftRL",
-		auxType: auxInt32,
-		argLen:  3,
-		asm:     arm.AADC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "ADCshiftRA",
-		auxType: auxInt32,
-		argLen:  3,
-		asm:     arm.AADC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "SBCshiftLL",
-		auxType: auxInt32,
-		argLen:  3,
-		asm:     arm.ASBC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "SBCshiftRL",
-		auxType: auxInt32,
-		argLen:  3,
-		asm:     arm.ASBC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "SBCshiftRA",
-		auxType: auxInt32,
-		argLen:  3,
-		asm:     arm.ASBC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "RSCshiftLL",
-		auxType: auxInt32,
-		argLen:  3,
-		asm:     arm.ARSC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "RSCshiftRL",
-		auxType: auxInt32,
-		argLen:  3,
-		asm:     arm.ARSC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "RSCshiftRA",
-		auxType: auxInt32,
-		argLen:  3,
-		asm:     arm.ARSC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "ADDSshiftLL",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.AADD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "ADDSshiftRL",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.AADD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "ADDSshiftRA",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.AADD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "SUBSshiftLL",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.ASUB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "SUBSshiftRL",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.ASUB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "SUBSshiftRA",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.ASUB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "RSBSshiftLL",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.ARSB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "RSBSshiftRL",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.ARSB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "RSBSshiftRA",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.ARSB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "ADDshiftLLreg",
-		argLen: 3,
-		asm:    arm.AADD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "ADDshiftRLreg",
-		argLen: 3,
-		asm:    arm.AADD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "ADDshiftRAreg",
-		argLen: 3,
-		asm:    arm.AADD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "SUBshiftLLreg",
-		argLen: 3,
-		asm:    arm.ASUB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "SUBshiftRLreg",
-		argLen: 3,
-		asm:    arm.ASUB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "SUBshiftRAreg",
-		argLen: 3,
-		asm:    arm.ASUB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "RSBshiftLLreg",
-		argLen: 3,
-		asm:    arm.ARSB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "RSBshiftRLreg",
-		argLen: 3,
-		asm:    arm.ARSB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "RSBshiftRAreg",
-		argLen: 3,
-		asm:    arm.ARSB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "ANDshiftLLreg",
-		argLen: 3,
-		asm:    arm.AAND,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "ANDshiftRLreg",
-		argLen: 3,
-		asm:    arm.AAND,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "ANDshiftRAreg",
-		argLen: 3,
-		asm:    arm.AAND,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "ORshiftLLreg",
-		argLen: 3,
-		asm:    arm.AORR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "ORshiftRLreg",
-		argLen: 3,
-		asm:    arm.AORR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "ORshiftRAreg",
-		argLen: 3,
-		asm:    arm.AORR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "XORshiftLLreg",
-		argLen: 3,
-		asm:    arm.AEOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "XORshiftRLreg",
-		argLen: 3,
-		asm:    arm.AEOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "XORshiftRAreg",
-		argLen: 3,
-		asm:    arm.AEOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "BICshiftLLreg",
-		argLen: 3,
-		asm:    arm.ABIC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "BICshiftRLreg",
-		argLen: 3,
-		asm:    arm.ABIC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "BICshiftRAreg",
-		argLen: 3,
-		asm:    arm.ABIC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "MVNshiftLLreg",
-		argLen: 2,
-		asm:    arm.AMVN,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "MVNshiftRLreg",
-		argLen: 2,
-		asm:    arm.AMVN,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "MVNshiftRAreg",
-		argLen: 2,
-		asm:    arm.AMVN,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "ADCshiftLLreg",
-		argLen: 4,
-		asm:    arm.AADC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "ADCshiftRLreg",
-		argLen: 4,
-		asm:    arm.AADC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "ADCshiftRAreg",
-		argLen: 4,
-		asm:    arm.AADC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "SBCshiftLLreg",
-		argLen: 4,
-		asm:    arm.ASBC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "SBCshiftRLreg",
-		argLen: 4,
-		asm:    arm.ASBC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "SBCshiftRAreg",
-		argLen: 4,
-		asm:    arm.ASBC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "RSCshiftLLreg",
-		argLen: 4,
-		asm:    arm.ARSC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "RSCshiftRLreg",
-		argLen: 4,
-		asm:    arm.ARSC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "RSCshiftRAreg",
-		argLen: 4,
-		asm:    arm.ARSC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "ADDSshiftLLreg",
-		argLen: 3,
-		asm:    arm.AADD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "ADDSshiftRLreg",
-		argLen: 3,
-		asm:    arm.AADD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "ADDSshiftRAreg",
-		argLen: 3,
-		asm:    arm.AADD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "SUBSshiftLLreg",
-		argLen: 3,
-		asm:    arm.ASUB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "SUBSshiftRLreg",
-		argLen: 3,
-		asm:    arm.ASUB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "SUBSshiftRAreg",
-		argLen: 3,
-		asm:    arm.ASUB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "RSBSshiftLLreg",
-		argLen: 3,
-		asm:    arm.ARSB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "RSBSshiftRLreg",
-		argLen: 3,
-		asm:    arm.ARSB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "RSBSshiftRAreg",
-		argLen: 3,
-		asm:    arm.ARSB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "CMP",
-		argLen: 2,
-		asm:    arm.ACMP,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-		},
-	},
-	{
-		name:    "CMPconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     arm.ACMP,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-		},
-	},
-	{
-		name:   "CMN",
-		argLen: 2,
-		asm:    arm.ACMN,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-		},
-	},
-	{
-		name:    "CMNconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     arm.ACMN,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-		},
-	},
-	{
-		name:        "TST",
-		argLen:      2,
-		commutative: true,
-		asm:         arm.ATST,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-		},
-	},
-	{
-		name:    "TSTconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     arm.ATST,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-		},
-	},
-	{
-		name:        "TEQ",
-		argLen:      2,
-		commutative: true,
-		asm:         arm.ATEQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-		},
-	},
-	{
-		name:    "TEQconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     arm.ATEQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-		},
-	},
-	{
-		name:   "CMPF",
-		argLen: 2,
-		asm:    arm.ACMPF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-				{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:   "CMPD",
-		argLen: 2,
-		asm:    arm.ACMPD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-				{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:    "CMPshiftLL",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.ACMP,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-		},
-	},
-	{
-		name:    "CMPshiftRL",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.ACMP,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-		},
-	},
-	{
-		name:    "CMPshiftRA",
-		auxType: auxInt32,
-		argLen:  2,
-		asm:     arm.ACMP,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-		},
-	},
-	{
-		name:   "CMPshiftLLreg",
-		argLen: 3,
-		asm:    arm.ACMP,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "CMPshiftRLreg",
-		argLen: 3,
-		asm:    arm.ACMP,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "CMPshiftRAreg",
-		argLen: 3,
-		asm:    arm.ACMP,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "CMPF0",
-		argLen: 1,
-		asm:    arm.ACMPF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:   "CMPD0",
-		argLen: 1,
-		asm:    arm.ACMPD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:              "MOVWconst",
-		auxType:           auxInt32,
-		argLen:            0,
-		rematerializeable: true,
-		asm:               arm.AMOVW,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:              "MOVFconst",
-		auxType:           auxFloat64,
-		argLen:            0,
-		rematerializeable: true,
-		asm:               arm.AMOVF,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:              "MOVDconst",
-		auxType:           auxFloat64,
-		argLen:            0,
-		rematerializeable: true,
-		asm:               arm.AMOVD,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:              "MOVWaddr",
-		auxType:           auxSymOff,
-		argLen:            1,
-		rematerializeable: true,
-		asm:               arm.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294975488}, // SP SB
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "MOVBload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            arm.AMOVB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "MOVBUload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            arm.AMOVBU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "MOVHload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            arm.AMOVH,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "MOVHUload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            arm.AMOVHU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "MOVWload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            arm.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "MOVFload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            arm.AMOVF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:           "MOVDload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            arm.AMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:           "MOVBstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            arm.AMOVB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 22527},      // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
-			},
-		},
-	},
-	{
-		name:           "MOVHstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            arm.AMOVH,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 22527},      // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
-			},
-		},
-	},
-	{
-		name:           "MOVWstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            arm.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 22527},      // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
-			},
-		},
-	},
-	{
-		name:           "MOVFstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            arm.AMOVF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
-				{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:           "MOVDstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            arm.AMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
-				{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:   "MOVWloadidx",
-		argLen: 3,
-		asm:    arm.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 22527},      // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "MOVWloadshiftLL",
-		auxType: auxInt32,
-		argLen:  3,
-		asm:     arm.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 22527},      // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "MOVWloadshiftRL",
-		auxType: auxInt32,
-		argLen:  3,
-		asm:     arm.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 22527},      // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "MOVWloadshiftRA",
-		auxType: auxInt32,
-		argLen:  3,
-		asm:     arm.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 22527},      // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "MOVWstoreidx",
-		argLen: 4,
-		asm:    arm.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 22527},      // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{2, 22527},      // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
-			},
-		},
-	},
-	{
-		name:    "MOVWstoreshiftLL",
-		auxType: auxInt32,
-		argLen:  4,
-		asm:     arm.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 22527},      // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{2, 22527},      // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
-			},
-		},
-	},
-	{
-		name:    "MOVWstoreshiftRL",
-		auxType: auxInt32,
-		argLen:  4,
-		asm:     arm.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 22527},      // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{2, 22527},      // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
-			},
-		},
-	},
-	{
-		name:    "MOVWstoreshiftRA",
-		auxType: auxInt32,
-		argLen:  4,
-		asm:     arm.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 22527},      // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{2, 22527},      // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-				{0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
-			},
-		},
-	},
-	{
-		name:   "MOVBreg",
-		argLen: 1,
-		asm:    arm.AMOVBS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "MOVBUreg",
-		argLen: 1,
-		asm:    arm.AMOVBU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "MOVHreg",
-		argLen: 1,
-		asm:    arm.AMOVHS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "MOVHUreg",
-		argLen: 1,
-		asm:    arm.AMOVHU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "MOVWreg",
-		argLen: 1,
-		asm:    arm.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "MOVWnop",
-		argLen:       1,
-		resultInArg0: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "MOVWF",
-		argLen: 1,
-		asm:    arm.AMOVWF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:   "MOVWD",
-		argLen: 1,
-		asm:    arm.AMOVWD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:   "MOVWUF",
-		argLen: 1,
-		asm:    arm.AMOVWF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:   "MOVWUD",
-		argLen: 1,
-		asm:    arm.AMOVWD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:   "MOVFW",
-		argLen: 1,
-		asm:    arm.AMOVFW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "MOVDW",
-		argLen: 1,
-		asm:    arm.AMOVDW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "MOVFWU",
-		argLen: 1,
-		asm:    arm.AMOVFW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "MOVDWU",
-		argLen: 1,
-		asm:    arm.AMOVDW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "MOVFD",
-		argLen: 1,
-		asm:    arm.AMOVFD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:   "MOVDF",
-		argLen: 1,
-		asm:    arm.AMOVDF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:         "CMOVWHSconst",
-		auxType:      auxInt32,
-		argLen:       2,
-		resultInArg0: true,
-		asm:          arm.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "CMOVWLSconst",
-		auxType:      auxInt32,
-		argLen:       2,
-		resultInArg0: true,
-		asm:          arm.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "SRAcond",
-		argLen: 3,
-		asm:    arm.ASRA,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "CALLstatic",
-		auxType:      auxSymOff,
-		argLen:       1,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-		},
-	},
-	{
-		name:         "CALLclosure",
-		auxType:      auxInt64,
-		argLen:       3,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 128},   // R7
-				{0, 29695}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP R14
-			},
-			clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-		},
-	},
-	{
-		name:         "CALLdefer",
-		auxType:      auxInt64,
-		argLen:       1,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-		},
-	},
-	{
-		name:         "CALLgo",
-		auxType:      auxInt64,
-		argLen:       1,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-		},
-	},
-	{
-		name:         "CALLinter",
-		auxType:      auxInt64,
-		argLen:       2,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-		},
-	},
-	{
-		name:           "LoweredNilCheck",
-		argLen:         2,
-		nilCheck:       true,
-		faultOnNilArg0: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-		},
-	},
-	{
-		name:   "Equal",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "NotEqual",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "LessThan",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "LessEqual",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "GreaterThan",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "GreaterEqual",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "LessThanU",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "LessEqualU",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "GreaterThanU",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "GreaterEqualU",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "DUFFZERO",
-		auxType:        auxInt64,
-		argLen:         3,
-		faultOnNilArg0: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 2}, // R1
-				{1, 1}, // R0
-			},
-			clobbers: 16386, // R1 R14
-		},
-	},
-	{
-		name:           "DUFFCOPY",
-		auxType:        auxInt64,
-		argLen:         3,
-		faultOnNilArg0: true,
-		faultOnNilArg1: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4}, // R2
-				{1, 2}, // R1
-			},
-			clobbers: 16391, // R0 R1 R2 R14
-		},
-	},
-	{
-		name:           "LoweredZero",
-		auxType:        auxInt64,
-		argLen:         4,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 2},     // R1
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			clobbers: 2, // R1
-		},
-	},
-	{
-		name:           "LoweredMove",
-		auxType:        auxInt64,
-		argLen:         4,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		faultOnNilArg1: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4},     // R2
-				{1, 2},     // R1
-				{2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			clobbers: 6, // R1 R2
-		},
-	},
-	{
-		name:   "LoweredGetClosurePtr",
-		argLen: 0,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 128}, // R7
-			},
-		},
-	},
-	{
-		name:   "MOVWconvert",
-		argLen: 2,
-		asm:    arm.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "FlagEQ",
-		argLen: 0,
-		reg:    regInfo{},
-	},
-	{
-		name:   "FlagLT_ULT",
-		argLen: 0,
-		reg:    regInfo{},
-	},
-	{
-		name:   "FlagLT_UGT",
-		argLen: 0,
-		reg:    regInfo{},
-	},
-	{
-		name:   "FlagGT_UGT",
-		argLen: 0,
-		reg:    regInfo{},
-	},
-	{
-		name:   "FlagGT_ULT",
-		argLen: 0,
-		reg:    regInfo{},
-	},
-	{
-		name:   "InvertFlags",
-		argLen: 1,
-		reg:    regInfo{},
-	},
-
-	{
-		name:        "ADD",
-		argLen:      2,
-		commutative: true,
-		asm:         arm64.AADD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:    "ADDconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     arm64.AADD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1878786047}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "SUB",
-		argLen: 2,
-		asm:    arm64.ASUB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:    "SUBconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     arm64.ASUB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:        "MUL",
-		argLen:      2,
-		commutative: true,
-		asm:         arm64.AMUL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:        "MULW",
-		argLen:      2,
-		commutative: true,
-		asm:         arm64.AMULW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:        "MULH",
-		argLen:      2,
-		commutative: true,
-		asm:         arm64.ASMULH,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:        "UMULH",
-		argLen:      2,
-		commutative: true,
-		asm:         arm64.AUMULH,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:        "MULL",
-		argLen:      2,
-		commutative: true,
-		asm:         arm64.ASMULL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:        "UMULL",
-		argLen:      2,
-		commutative: true,
-		asm:         arm64.AUMULL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "DIV",
-		argLen: 2,
-		asm:    arm64.ASDIV,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "UDIV",
-		argLen: 2,
-		asm:    arm64.AUDIV,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "DIVW",
-		argLen: 2,
-		asm:    arm64.ASDIVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "UDIVW",
-		argLen: 2,
-		asm:    arm64.AUDIVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "MOD",
-		argLen: 2,
-		asm:    arm64.AREM,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "UMOD",
-		argLen: 2,
-		asm:    arm64.AUREM,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "MODW",
-		argLen: 2,
-		asm:    arm64.AREMW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "UMODW",
-		argLen: 2,
-		asm:    arm64.AUREMW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:        "FADDS",
-		argLen:      2,
-		commutative: true,
-		asm:         arm64.AFADDS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-				{1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:        "FADDD",
-		argLen:      2,
-		commutative: true,
-		asm:         arm64.AFADDD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-				{1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "FSUBS",
-		argLen: 2,
-		asm:    arm64.AFSUBS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-				{1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "FSUBD",
-		argLen: 2,
-		asm:    arm64.AFSUBD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-				{1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:        "FMULS",
-		argLen:      2,
-		commutative: true,
-		asm:         arm64.AFMULS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-				{1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:        "FMULD",
-		argLen:      2,
-		commutative: true,
-		asm:         arm64.AFMULD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-				{1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "FDIVS",
-		argLen: 2,
-		asm:    arm64.AFDIVS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-				{1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "FDIVD",
-		argLen: 2,
-		asm:    arm64.AFDIVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-				{1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:        "AND",
-		argLen:      2,
-		commutative: true,
-		asm:         arm64.AAND,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:    "ANDconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     arm64.AAND,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:        "OR",
-		argLen:      2,
-		commutative: true,
-		asm:         arm64.AORR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:    "ORconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     arm64.AORR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:        "XOR",
-		argLen:      2,
-		commutative: true,
-		asm:         arm64.AEOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:    "XORconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     arm64.AEOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "BIC",
-		argLen: 2,
-		asm:    arm64.ABIC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:    "BICconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     arm64.ABIC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "MVN",
-		argLen: 1,
-		asm:    arm64.AMVN,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "NEG",
-		argLen: 1,
-		asm:    arm64.ANEG,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "FNEGS",
-		argLen: 1,
-		asm:    arm64.AFNEGS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "FNEGD",
-		argLen: 1,
-		asm:    arm64.AFNEGD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "FSQRTD",
-		argLen: 1,
-		asm:    arm64.AFSQRTD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "REV",
-		argLen: 1,
-		asm:    arm64.AREV,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "REVW",
-		argLen: 1,
-		asm:    arm64.AREVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "REV16W",
-		argLen: 1,
-		asm:    arm64.AREV16W,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "RBIT",
-		argLen: 1,
-		asm:    arm64.ARBIT,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "RBITW",
-		argLen: 1,
-		asm:    arm64.ARBITW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "CLZ",
-		argLen: 1,
-		asm:    arm64.ACLZ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "CLZW",
-		argLen: 1,
-		asm:    arm64.ACLZW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "SLL",
-		argLen: 2,
-		asm:    arm64.ALSL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:    "SLLconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     arm64.ALSL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "SRL",
-		argLen: 2,
-		asm:    arm64.ALSR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:    "SRLconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     arm64.ALSR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "SRA",
-		argLen: 2,
-		asm:    arm64.AASR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:    "SRAconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     arm64.AASR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:    "RORconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     arm64.AROR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:    "RORWconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     arm64.ARORW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "CMP",
-		argLen: 2,
-		asm:    arm64.ACMP,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-		},
-	},
-	{
-		name:    "CMPconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     arm64.ACMP,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-		},
-	},
-	{
-		name:   "CMPW",
-		argLen: 2,
-		asm:    arm64.ACMPW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-		},
-	},
-	{
-		name:    "CMPWconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     arm64.ACMPW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-		},
-	},
-	{
-		name:   "CMN",
-		argLen: 2,
-		asm:    arm64.ACMN,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-		},
-	},
-	{
-		name:    "CMNconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     arm64.ACMN,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-		},
-	},
-	{
-		name:   "CMNW",
-		argLen: 2,
-		asm:    arm64.ACMNW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-		},
-	},
-	{
-		name:    "CMNWconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     arm64.ACMNW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-		},
-	},
-	{
-		name:   "FCMPS",
-		argLen: 2,
-		asm:    arm64.AFCMPS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-				{1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "FCMPD",
-		argLen: 2,
-		asm:    arm64.AFCMPD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-				{1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:    "ADDshiftLL",
-		auxType: auxInt64,
-		argLen:  2,
-		asm:     arm64.AADD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:    "ADDshiftRL",
-		auxType: auxInt64,
-		argLen:  2,
-		asm:     arm64.AADD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:    "ADDshiftRA",
-		auxType: auxInt64,
-		argLen:  2,
-		asm:     arm64.AADD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:    "SUBshiftLL",
-		auxType: auxInt64,
-		argLen:  2,
-		asm:     arm64.ASUB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:    "SUBshiftRL",
-		auxType: auxInt64,
-		argLen:  2,
-		asm:     arm64.ASUB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:    "SUBshiftRA",
-		auxType: auxInt64,
-		argLen:  2,
-		asm:     arm64.ASUB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:    "ANDshiftLL",
-		auxType: auxInt64,
-		argLen:  2,
-		asm:     arm64.AAND,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:    "ANDshiftRL",
-		auxType: auxInt64,
-		argLen:  2,
-		asm:     arm64.AAND,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:    "ANDshiftRA",
-		auxType: auxInt64,
-		argLen:  2,
-		asm:     arm64.AAND,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:    "ORshiftLL",
-		auxType: auxInt64,
-		argLen:  2,
-		asm:     arm64.AORR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:    "ORshiftRL",
-		auxType: auxInt64,
-		argLen:  2,
-		asm:     arm64.AORR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:    "ORshiftRA",
-		auxType: auxInt64,
-		argLen:  2,
-		asm:     arm64.AORR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:    "XORshiftLL",
-		auxType: auxInt64,
-		argLen:  2,
-		asm:     arm64.AEOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:    "XORshiftRL",
-		auxType: auxInt64,
-		argLen:  2,
-		asm:     arm64.AEOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:    "XORshiftRA",
-		auxType: auxInt64,
-		argLen:  2,
-		asm:     arm64.AEOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:    "BICshiftLL",
-		auxType: auxInt64,
-		argLen:  2,
-		asm:     arm64.ABIC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:    "BICshiftRL",
-		auxType: auxInt64,
-		argLen:  2,
-		asm:     arm64.ABIC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:    "BICshiftRA",
-		auxType: auxInt64,
-		argLen:  2,
-		asm:     arm64.ABIC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:    "CMPshiftLL",
-		auxType: auxInt64,
-		argLen:  2,
-		asm:     arm64.ACMP,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-		},
-	},
-	{
-		name:    "CMPshiftRL",
-		auxType: auxInt64,
-		argLen:  2,
-		asm:     arm64.ACMP,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-		},
-	},
-	{
-		name:    "CMPshiftRA",
-		auxType: auxInt64,
-		argLen:  2,
-		asm:     arm64.ACMP,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-		},
-	},
-	{
-		name:              "MOVDconst",
-		auxType:           auxInt64,
-		argLen:            0,
-		rematerializeable: true,
-		asm:               arm64.AMOVD,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:              "FMOVSconst",
-		auxType:           auxFloat64,
-		argLen:            0,
-		rematerializeable: true,
-		asm:               arm64.AFMOVS,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:              "FMOVDconst",
-		auxType:           auxFloat64,
-		argLen:            0,
-		rematerializeable: true,
-		asm:               arm64.AFMOVD,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:              "MOVDaddr",
-		auxType:           auxSymOff,
-		argLen:            1,
-		rematerializeable: true,
-		asm:               arm64.AMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372037928517632}, // SP SB
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:           "MOVBload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            arm64.AMOVB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:           "MOVBUload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            arm64.AMOVBU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:           "MOVHload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            arm64.AMOVH,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:           "MOVHUload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            arm64.AMOVHU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:           "MOVWload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            arm64.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:           "MOVWUload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            arm64.AMOVWU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:           "MOVDload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            arm64.AMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:           "FMOVSload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            arm64.AFMOVS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:           "FMOVDload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            arm64.AFMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:           "MOVBstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            arm64.AMOVB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 805044223},           // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-			},
-		},
-	},
-	{
-		name:           "MOVHstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            arm64.AMOVH,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 805044223},           // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-			},
-		},
-	},
-	{
-		name:           "MOVWstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            arm64.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 805044223},           // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-			},
-		},
-	},
-	{
-		name:           "MOVDstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            arm64.AMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 805044223},           // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-			},
-		},
-	},
-	{
-		name:           "FMOVSstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            arm64.AFMOVS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-				{1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:           "FMOVDstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            arm64.AFMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-				{1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:           "MOVBstorezero",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            arm64.AMOVB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-			},
-		},
-	},
-	{
-		name:           "MOVHstorezero",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            arm64.AMOVH,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-			},
-		},
-	},
-	{
-		name:           "MOVWstorezero",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            arm64.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-			},
-		},
-	},
-	{
-		name:           "MOVDstorezero",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            arm64.AMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-			},
-		},
-	},
-	{
-		name:   "MOVBreg",
-		argLen: 1,
-		asm:    arm64.AMOVB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "MOVBUreg",
-		argLen: 1,
-		asm:    arm64.AMOVBU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "MOVHreg",
-		argLen: 1,
-		asm:    arm64.AMOVH,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "MOVHUreg",
-		argLen: 1,
-		asm:    arm64.AMOVHU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "MOVWreg",
-		argLen: 1,
-		asm:    arm64.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "MOVWUreg",
-		argLen: 1,
-		asm:    arm64.AMOVWU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "MOVDreg",
-		argLen: 1,
-		asm:    arm64.AMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:         "MOVDnop",
-		argLen:       1,
-		resultInArg0: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "SCVTFWS",
-		argLen: 1,
-		asm:    arm64.ASCVTFWS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-			outputs: []outputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "SCVTFWD",
-		argLen: 1,
-		asm:    arm64.ASCVTFWD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-			outputs: []outputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "UCVTFWS",
-		argLen: 1,
-		asm:    arm64.AUCVTFWS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-			outputs: []outputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "UCVTFWD",
-		argLen: 1,
-		asm:    arm64.AUCVTFWD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-			outputs: []outputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "SCVTFS",
-		argLen: 1,
-		asm:    arm64.ASCVTFS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-			outputs: []outputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "SCVTFD",
-		argLen: 1,
-		asm:    arm64.ASCVTFD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-			outputs: []outputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "UCVTFS",
-		argLen: 1,
-		asm:    arm64.AUCVTFS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-			outputs: []outputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "UCVTFD",
-		argLen: 1,
-		asm:    arm64.AUCVTFD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-			outputs: []outputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "FCVTZSSW",
-		argLen: 1,
-		asm:    arm64.AFCVTZSSW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "FCVTZSDW",
-		argLen: 1,
-		asm:    arm64.AFCVTZSDW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "FCVTZUSW",
-		argLen: 1,
-		asm:    arm64.AFCVTZUSW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "FCVTZUDW",
-		argLen: 1,
-		asm:    arm64.AFCVTZUDW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "FCVTZSS",
-		argLen: 1,
-		asm:    arm64.AFCVTZSS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "FCVTZSD",
-		argLen: 1,
-		asm:    arm64.AFCVTZSD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "FCVTZUS",
-		argLen: 1,
-		asm:    arm64.AFCVTZUS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "FCVTZUD",
-		argLen: 1,
-		asm:    arm64.AFCVTZUD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "FCVTSD",
-		argLen: 1,
-		asm:    arm64.AFCVTSD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "FCVTDS",
-		argLen: 1,
-		asm:    arm64.AFCVTDS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "CSELULT",
-		argLen: 3,
-		asm:    arm64.ACSEL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-				{1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "CSELULT0",
-		argLen: 2,
-		asm:    arm64.ACSEL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:         "CALLstatic",
-		auxType:      auxSymOff,
-		argLen:       1,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			clobbers: 9223372035512336383, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-		},
-	},
-	{
-		name:         "CALLclosure",
-		auxType:      auxInt64,
-		argLen:       3,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 67108864},   // R26
-				{0, 1744568319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 SP
-			},
-			clobbers: 9223372035512336383, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-		},
-	},
-	{
-		name:         "CALLdefer",
-		auxType:      auxInt64,
-		argLen:       1,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			clobbers: 9223372035512336383, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-		},
-	},
-	{
-		name:         "CALLgo",
-		auxType:      auxInt64,
-		argLen:       1,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			clobbers: 9223372035512336383, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-		},
-	},
-	{
-		name:         "CALLinter",
-		auxType:      auxInt64,
-		argLen:       2,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-			clobbers: 9223372035512336383, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-		},
-	},
-	{
-		name:           "LoweredNilCheck",
-		argLen:         2,
-		nilCheck:       true,
-		faultOnNilArg0: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-		},
-	},
-	{
-		name:   "Equal",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "NotEqual",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "LessThan",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "LessEqual",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "GreaterThan",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "GreaterEqual",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "LessThanU",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "LessEqualU",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "GreaterThanU",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "GreaterEqualU",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:           "DUFFZERO",
-		auxType:        auxInt64,
-		argLen:         2,
-		faultOnNilArg0: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-			clobbers: 536936448, // R16 R30
-		},
-	},
-	{
-		name:           "LoweredZero",
-		argLen:         3,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 65536},     // R16
-				{1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-			clobbers: 65536, // R16
-		},
-	},
-	{
-		name:           "DUFFCOPY",
-		auxType:        auxInt64,
-		argLen:         3,
-		faultOnNilArg0: true,
-		faultOnNilArg1: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 131072}, // R17
-				{1, 65536},  // R16
-			},
-			clobbers: 537067520, // R16 R17 R30
-		},
-	},
-	{
-		name:           "LoweredMove",
-		argLen:         4,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		faultOnNilArg1: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 131072},    // R17
-				{1, 65536},     // R16
-				{2, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-			clobbers: 196608, // R16 R17
-		},
-	},
-	{
-		name:   "LoweredGetClosurePtr",
-		argLen: 0,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 67108864}, // R26
-			},
-		},
-	},
-	{
-		name:   "MOVDconvert",
-		argLen: 2,
-		asm:    arm64.AMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:   "FlagEQ",
-		argLen: 0,
-		reg:    regInfo{},
-	},
-	{
-		name:   "FlagLT_ULT",
-		argLen: 0,
-		reg:    regInfo{},
-	},
-	{
-		name:   "FlagLT_UGT",
-		argLen: 0,
-		reg:    regInfo{},
-	},
-	{
-		name:   "FlagGT_UGT",
-		argLen: 0,
-		reg:    regInfo{},
-	},
-	{
-		name:   "FlagGT_ULT",
-		argLen: 0,
-		reg:    regInfo{},
-	},
-	{
-		name:   "InvertFlags",
-		argLen: 1,
-		reg:    regInfo{},
-	},
-	{
-		name:           "LDAR",
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            arm64.ALDAR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:           "LDARW",
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            arm64.ALDARW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:           "STLR",
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            arm64.ASTLR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 805044223},           // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-			},
-		},
-	},
-	{
-		name:           "STLRW",
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            arm64.ASTLRW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 805044223},           // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-			},
-		},
-	},
-	{
-		name:            "LoweredAtomicExchange64",
-		argLen:          3,
-		resultNotInArgs: true,
-		faultOnNilArg0:  true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 805044223},           // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:            "LoweredAtomicExchange32",
-		argLen:          3,
-		resultNotInArgs: true,
-		faultOnNilArg0:  true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 805044223},           // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:            "LoweredAtomicAdd64",
-		argLen:          3,
-		resultNotInArgs: true,
-		faultOnNilArg0:  true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 805044223},           // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:            "LoweredAtomicAdd32",
-		argLen:          3,
-		resultNotInArgs: true,
-		faultOnNilArg0:  true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 805044223},           // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:            "LoweredAtomicCas64",
-		argLen:          4,
-		resultNotInArgs: true,
-		clobberFlags:    true,
-		faultOnNilArg0:  true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 805044223},           // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{2, 805044223},           // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:            "LoweredAtomicCas32",
-		argLen:          4,
-		resultNotInArgs: true,
-		clobberFlags:    true,
-		faultOnNilArg0:  true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 805044223},           // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{2, 805044223},           // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
-			},
-		},
-	},
-	{
-		name:           "LoweredAtomicAnd8",
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            arm64.AAND,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 805044223},           // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-			},
-		},
-	},
-	{
-		name:           "LoweredAtomicOr8",
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            arm64.AORR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 805044223},           // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
-				{0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
-			},
-		},
-	},
-
-	{
-		name:        "ADD",
-		argLen:      2,
-		commutative: true,
-		asm:         mips.AADDU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-				{1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:    "ADDconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     mips.AADDU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 536870910}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:   "SUB",
-		argLen: 2,
-		asm:    mips.ASUBU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-				{1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:    "SUBconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     mips.ASUBU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:        "MUL",
-		argLen:      2,
-		commutative: true,
-		asm:         mips.AMUL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-				{1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			clobbers: 105553116266496, // HI LO
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:        "MULT",
-		argLen:      2,
-		commutative: true,
-		asm:         mips.AMUL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-				{1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 35184372088832}, // HI
-				{1, 70368744177664}, // LO
-			},
-		},
-	},
-	{
-		name:        "MULTU",
-		argLen:      2,
-		commutative: true,
-		asm:         mips.AMULU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-				{1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 35184372088832}, // HI
-				{1, 70368744177664}, // LO
-			},
-		},
-	},
-	{
-		name:   "DIV",
-		argLen: 2,
-		asm:    mips.ADIV,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-				{1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 35184372088832}, // HI
-				{1, 70368744177664}, // LO
-			},
-		},
-	},
-	{
-		name:   "DIVU",
-		argLen: 2,
-		asm:    mips.ADIVU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-				{1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 35184372088832}, // HI
-				{1, 70368744177664}, // LO
-			},
-		},
-	},
-	{
-		name:        "ADDF",
-		argLen:      2,
-		commutative: true,
-		asm:         mips.AADDF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-				{1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-			outputs: []outputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-		},
-	},
-	{
-		name:        "ADDD",
-		argLen:      2,
-		commutative: true,
-		asm:         mips.AADDD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-				{1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-			outputs: []outputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-		},
-	},
-	{
-		name:   "SUBF",
-		argLen: 2,
-		asm:    mips.ASUBF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-				{1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-			outputs: []outputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-		},
-	},
-	{
-		name:   "SUBD",
-		argLen: 2,
-		asm:    mips.ASUBD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-				{1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-			outputs: []outputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-		},
-	},
-	{
-		name:        "MULF",
-		argLen:      2,
-		commutative: true,
-		asm:         mips.AMULF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-				{1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-			outputs: []outputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-		},
-	},
-	{
-		name:        "MULD",
-		argLen:      2,
-		commutative: true,
-		asm:         mips.AMULD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-				{1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-			outputs: []outputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-		},
-	},
-	{
-		name:   "DIVF",
-		argLen: 2,
-		asm:    mips.ADIVF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-				{1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-			outputs: []outputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-		},
-	},
-	{
-		name:   "DIVD",
-		argLen: 2,
-		asm:    mips.ADIVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-				{1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-			outputs: []outputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-		},
-	},
-	{
-		name:        "AND",
-		argLen:      2,
-		commutative: true,
-		asm:         mips.AAND,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-				{1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:    "ANDconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     mips.AAND,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:        "OR",
-		argLen:      2,
-		commutative: true,
-		asm:         mips.AOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-				{1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:    "ORconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     mips.AOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:        "XOR",
-		argLen:      2,
-		commutative: true,
-		asm:         mips.AXOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-				{1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:    "XORconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     mips.AXOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:        "NOR",
-		argLen:      2,
-		commutative: true,
-		asm:         mips.ANOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-				{1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:    "NORconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     mips.ANOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:   "NEG",
-		argLen: 1,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:   "NEGF",
-		argLen: 1,
-		asm:    mips.ANEGF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-			outputs: []outputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-		},
-	},
-	{
-		name:   "NEGD",
-		argLen: 1,
-		asm:    mips.ANEGD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-			outputs: []outputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-		},
-	},
-	{
-		name:   "SQRTD",
-		argLen: 1,
-		asm:    mips.ASQRTD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-			outputs: []outputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-		},
-	},
-	{
-		name:   "SLL",
-		argLen: 2,
-		asm:    mips.ASLL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-				{1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:    "SLLconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     mips.ASLL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:   "SRL",
-		argLen: 2,
-		asm:    mips.ASRL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-				{1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:    "SRLconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     mips.ASRL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:   "SRA",
-		argLen: 2,
-		asm:    mips.ASRA,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-				{1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:    "SRAconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     mips.ASRA,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:   "CLZ",
-		argLen: 1,
-		asm:    mips.ACLZ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:   "SGT",
-		argLen: 2,
-		asm:    mips.ASGT,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-				{1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:    "SGTconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     mips.ASGT,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:   "SGTzero",
-		argLen: 1,
-		asm:    mips.ASGT,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:   "SGTU",
-		argLen: 2,
-		asm:    mips.ASGTU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-				{1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:    "SGTUconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     mips.ASGTU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:   "SGTUzero",
-		argLen: 1,
-		asm:    mips.ASGTU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:   "CMPEQF",
-		argLen: 2,
-		asm:    mips.ACMPEQF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-				{1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-		},
-	},
-	{
-		name:   "CMPEQD",
-		argLen: 2,
-		asm:    mips.ACMPEQD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-				{1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-		},
-	},
-	{
-		name:   "CMPGEF",
-		argLen: 2,
-		asm:    mips.ACMPGEF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-				{1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-		},
-	},
-	{
-		name:   "CMPGED",
-		argLen: 2,
-		asm:    mips.ACMPGED,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-				{1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-		},
-	},
-	{
-		name:   "CMPGTF",
-		argLen: 2,
-		asm:    mips.ACMPGTF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-				{1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-		},
-	},
-	{
-		name:   "CMPGTD",
-		argLen: 2,
-		asm:    mips.ACMPGTD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-				{1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-		},
-	},
-	{
-		name:              "MOVWconst",
-		auxType:           auxInt32,
-		argLen:            0,
-		rematerializeable: true,
-		asm:               mips.AMOVW,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:              "MOVFconst",
-		auxType:           auxFloat32,
-		argLen:            0,
-		rematerializeable: true,
-		asm:               mips.AMOVF,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-		},
-	},
-	{
-		name:              "MOVDconst",
-		auxType:           auxFloat64,
-		argLen:            0,
-		rematerializeable: true,
-		asm:               mips.AMOVD,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-		},
-	},
-	{
-		name:              "MOVWaddr",
-		auxType:           auxSymOff,
-		argLen:            1,
-		rematerializeable: true,
-		asm:               mips.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 140737555464192}, // SP SB
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:           "MOVBload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:           "MOVBUload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVBU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:           "MOVHload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVH,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:           "MOVHUload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVHU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:           "MOVWload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:           "MOVFload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
-			},
-			outputs: []outputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-		},
-	},
-	{
-		name:           "MOVDload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
-			},
-			outputs: []outputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-		},
-	},
-	{
-		name:           "MOVBstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 469762046},       // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-				{0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
-			},
-		},
-	},
-	{
-		name:           "MOVHstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVH,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 469762046},       // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-				{0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
-			},
-		},
-	},
-	{
-		name:           "MOVWstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 469762046},       // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-				{0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
-			},
-		},
-	},
-	{
-		name:           "MOVFstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 35183835217920},  // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-				{0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
-			},
-		},
-	},
-	{
-		name:           "MOVDstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 35183835217920},  // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-				{0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
-			},
-		},
-	},
-	{
-		name:           "MOVBstorezero",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
-			},
-		},
-	},
-	{
-		name:           "MOVHstorezero",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVH,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
-			},
-		},
-	},
-	{
-		name:           "MOVWstorezero",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
-			},
-		},
-	},
-	{
-		name:   "MOVBreg",
-		argLen: 1,
-		asm:    mips.AMOVB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:   "MOVBUreg",
-		argLen: 1,
-		asm:    mips.AMOVBU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:   "MOVHreg",
-		argLen: 1,
-		asm:    mips.AMOVH,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:   "MOVHUreg",
-		argLen: 1,
-		asm:    mips.AMOVHU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:   "MOVWreg",
-		argLen: 1,
-		asm:    mips.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:         "MOVWnop",
-		argLen:       1,
-		resultInArg0: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:         "CMOVZ",
-		argLen:       3,
-		resultInArg0: true,
-		asm:          mips.ACMOVZ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-				{1, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-				{2, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:         "CMOVZzero",
-		argLen:       2,
-		resultInArg0: true,
-		asm:          mips.ACMOVZ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-				{1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:   "MOVWF",
-		argLen: 1,
-		asm:    mips.AMOVWF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-			outputs: []outputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-		},
-	},
-	{
-		name:   "MOVWD",
-		argLen: 1,
-		asm:    mips.AMOVWD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-			outputs: []outputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-		},
-	},
-	{
-		name:   "TRUNCFW",
-		argLen: 1,
-		asm:    mips.ATRUNCFW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-			outputs: []outputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-		},
-	},
-	{
-		name:   "TRUNCDW",
-		argLen: 1,
-		asm:    mips.ATRUNCDW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-			outputs: []outputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-		},
-	},
-	{
-		name:   "MOVFD",
-		argLen: 1,
-		asm:    mips.AMOVFD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-			outputs: []outputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-		},
-	},
-	{
-		name:   "MOVDF",
-		argLen: 1,
-		asm:    mips.AMOVDF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-			outputs: []outputInfo{
-				{0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
-			},
-		},
-	},
-	{
-		name:         "CALLstatic",
-		auxType:      auxSymOff,
-		argLen:       1,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO
-		},
-	},
-	{
-		name:         "CALLclosure",
-		auxType:      auxInt32,
-		argLen:       3,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 4194304},   // R22
-				{0, 402653182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP R31
-			},
-			clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO
-		},
-	},
-	{
-		name:         "CALLdefer",
-		auxType:      auxInt32,
-		argLen:       1,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO
-		},
-	},
-	{
-		name:         "CALLgo",
-		auxType:      auxInt32,
-		argLen:       1,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO
-		},
-	},
-	{
-		name:         "CALLinter",
-		auxType:      auxInt32,
-		argLen:       2,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-			clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO
-		},
-	},
-	{
-		name:           "LoweredAtomicLoad",
-		argLen:         2,
-		faultOnNilArg0: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:           "LoweredAtomicStore",
-		argLen:         3,
-		faultOnNilArg0: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 469762046},       // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-				{0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
-			},
-		},
-	},
-	{
-		name:           "LoweredAtomicStorezero",
-		argLen:         2,
-		faultOnNilArg0: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
-			},
-		},
-	},
-	{
-		name:            "LoweredAtomicExchange",
-		argLen:          3,
-		resultNotInArgs: true,
-		faultOnNilArg0:  true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 469762046},       // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-				{0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:            "LoweredAtomicAdd",
-		argLen:          3,
-		resultNotInArgs: true,
-		faultOnNilArg0:  true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 469762046},       // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-				{0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:            "LoweredAtomicAddconst",
-		auxType:         auxInt32,
-		argLen:          2,
-		resultNotInArgs: true,
-		faultOnNilArg0:  true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:            "LoweredAtomicCas",
-		argLen:          4,
-		resultNotInArgs: true,
-		faultOnNilArg0:  true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 469762046},       // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-				{2, 469762046},       // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-				{0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:           "LoweredAtomicAnd",
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            mips.AAND,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 469762046},       // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-				{0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
-			},
-		},
-	},
-	{
-		name:           "LoweredAtomicOr",
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            mips.AOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 469762046},       // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-				{0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
-			},
-		},
-	},
-	{
-		name:           "LoweredZero",
-		auxType:        auxInt32,
-		argLen:         3,
-		faultOnNilArg0: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 2},         // R1
-				{1, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-			clobbers: 2, // R1
-		},
-	},
-	{
-		name:           "LoweredMove",
-		auxType:        auxInt32,
-		argLen:         4,
-		faultOnNilArg0: true,
-		faultOnNilArg1: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4},         // R2
-				{1, 2},         // R1
-				{2, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-			clobbers: 6, // R1 R2
-		},
-	},
-	{
-		name:           "LoweredNilCheck",
-		argLen:         2,
-		nilCheck:       true,
-		faultOnNilArg0: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-		},
-	},
-	{
-		name:   "FPFlagTrue",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:   "FPFlagFalse",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-	{
-		name:   "LoweredGetClosurePtr",
-		argLen: 0,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 4194304}, // R22
-			},
-		},
-	},
-	{
-		name:   "MOVWconvert",
-		argLen: 2,
-		asm:    mips.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
-			},
-			outputs: []outputInfo{
-				{0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
-			},
-		},
-	},
-
-	{
-		name:        "ADDV",
-		argLen:      2,
-		commutative: true,
-		asm:         mips.AADDVU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-				{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:    "ADDVconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     mips.AADDVU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 268435454}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:   "SUBV",
-		argLen: 2,
-		asm:    mips.ASUBVU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-				{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:    "SUBVconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     mips.ASUBVU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:        "MULV",
-		argLen:      2,
-		commutative: true,
-		asm:         mips.AMULV,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-				{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 1152921504606846976}, // HI
-				{1, 2305843009213693952}, // LO
-			},
-		},
-	},
-	{
-		name:        "MULVU",
-		argLen:      2,
-		commutative: true,
-		asm:         mips.AMULVU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-				{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 1152921504606846976}, // HI
-				{1, 2305843009213693952}, // LO
-			},
-		},
-	},
-	{
-		name:   "DIVV",
-		argLen: 2,
-		asm:    mips.ADIVV,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-				{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 1152921504606846976}, // HI
-				{1, 2305843009213693952}, // LO
-			},
-		},
-	},
-	{
-		name:   "DIVVU",
-		argLen: 2,
-		asm:    mips.ADIVVU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-				{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 1152921504606846976}, // HI
-				{1, 2305843009213693952}, // LO
-			},
-		},
-	},
-	{
-		name:        "ADDF",
-		argLen:      2,
-		commutative: true,
-		asm:         mips.AADDF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-				{1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:        "ADDD",
-		argLen:      2,
-		commutative: true,
-		asm:         mips.AADDD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-				{1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "SUBF",
-		argLen: 2,
-		asm:    mips.ASUBF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-				{1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "SUBD",
-		argLen: 2,
-		asm:    mips.ASUBD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-				{1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:        "MULF",
-		argLen:      2,
-		commutative: true,
-		asm:         mips.AMULF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-				{1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:        "MULD",
-		argLen:      2,
-		commutative: true,
-		asm:         mips.AMULD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-				{1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "DIVF",
-		argLen: 2,
-		asm:    mips.ADIVF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-				{1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "DIVD",
-		argLen: 2,
-		asm:    mips.ADIVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-				{1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:        "AND",
-		argLen:      2,
-		commutative: true,
-		asm:         mips.AAND,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-				{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:    "ANDconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     mips.AAND,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:        "OR",
-		argLen:      2,
-		commutative: true,
-		asm:         mips.AOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-				{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:    "ORconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     mips.AOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:        "XOR",
-		argLen:      2,
-		commutative: true,
-		asm:         mips.AXOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-				{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:    "XORconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     mips.AXOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:        "NOR",
-		argLen:      2,
-		commutative: true,
-		asm:         mips.ANOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-				{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:    "NORconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     mips.ANOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:   "NEGV",
-		argLen: 1,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:   "NEGF",
-		argLen: 1,
-		asm:    mips.ANEGF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "NEGD",
-		argLen: 1,
-		asm:    mips.ANEGD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "SLLV",
-		argLen: 2,
-		asm:    mips.ASLLV,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-				{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:    "SLLVconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     mips.ASLLV,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:   "SRLV",
-		argLen: 2,
-		asm:    mips.ASRLV,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-				{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:    "SRLVconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     mips.ASRLV,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:   "SRAV",
-		argLen: 2,
-		asm:    mips.ASRAV,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-				{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:    "SRAVconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     mips.ASRAV,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:   "SGT",
-		argLen: 2,
-		asm:    mips.ASGT,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-				{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:    "SGTconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     mips.ASGT,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:   "SGTU",
-		argLen: 2,
-		asm:    mips.ASGTU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-				{1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:    "SGTUconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     mips.ASGTU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:   "CMPEQF",
-		argLen: 2,
-		asm:    mips.ACMPEQF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-				{1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "CMPEQD",
-		argLen: 2,
-		asm:    mips.ACMPEQD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-				{1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "CMPGEF",
-		argLen: 2,
-		asm:    mips.ACMPGEF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-				{1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "CMPGED",
-		argLen: 2,
-		asm:    mips.ACMPGED,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-				{1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "CMPGTF",
-		argLen: 2,
-		asm:    mips.ACMPGTF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-				{1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "CMPGTD",
-		argLen: 2,
-		asm:    mips.ACMPGTD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-				{1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:              "MOVVconst",
-		auxType:           auxInt64,
-		argLen:            0,
-		rematerializeable: true,
-		asm:               mips.AMOVV,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:              "MOVFconst",
-		auxType:           auxFloat64,
-		argLen:            0,
-		rematerializeable: true,
-		asm:               mips.AMOVF,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:              "MOVDconst",
-		auxType:           auxFloat64,
-		argLen:            0,
-		rematerializeable: true,
-		asm:               mips.AMOVD,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:              "MOVVaddr",
-		auxType:           auxSymOff,
-		argLen:            1,
-		rematerializeable: true,
-		asm:               mips.AMOVV,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4611686018460942336}, // SP SB
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:           "MOVBload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:           "MOVBUload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVBU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:           "MOVHload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVH,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:           "MOVHUload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVHU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:           "MOVWload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:           "MOVWUload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVWU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:           "MOVVload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVV,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:           "MOVFload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
-			},
-			outputs: []outputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:           "MOVDload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
-			},
-			outputs: []outputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:           "MOVBstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 234881022},           // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-				{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
-			},
-		},
-	},
-	{
-		name:           "MOVHstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVH,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 234881022},           // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-				{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
-			},
-		},
-	},
-	{
-		name:           "MOVWstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 234881022},           // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-				{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
-			},
-		},
-	},
-	{
-		name:           "MOVVstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVV,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 234881022},           // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-				{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
-			},
-		},
-	},
-	{
-		name:           "MOVFstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
-				{1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:           "MOVDstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
-				{1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:           "MOVBstorezero",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
-			},
-		},
-	},
-	{
-		name:           "MOVHstorezero",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVH,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
-			},
-		},
-	},
-	{
-		name:           "MOVWstorezero",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
-			},
-		},
-	},
-	{
-		name:           "MOVVstorezero",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            mips.AMOVV,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
-			},
-		},
-	},
-	{
-		name:   "MOVBreg",
-		argLen: 1,
-		asm:    mips.AMOVB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:   "MOVBUreg",
-		argLen: 1,
-		asm:    mips.AMOVBU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:   "MOVHreg",
-		argLen: 1,
-		asm:    mips.AMOVH,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:   "MOVHUreg",
-		argLen: 1,
-		asm:    mips.AMOVHU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:   "MOVWreg",
-		argLen: 1,
-		asm:    mips.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:   "MOVWUreg",
-		argLen: 1,
-		asm:    mips.AMOVWU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:   "MOVVreg",
-		argLen: 1,
-		asm:    mips.AMOVV,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:         "MOVVnop",
-		argLen:       1,
-		resultInArg0: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:   "MOVWF",
-		argLen: 1,
-		asm:    mips.AMOVWF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "MOVWD",
-		argLen: 1,
-		asm:    mips.AMOVWD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "MOVVF",
-		argLen: 1,
-		asm:    mips.AMOVVF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "MOVVD",
-		argLen: 1,
-		asm:    mips.AMOVVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "TRUNCFW",
-		argLen: 1,
-		asm:    mips.ATRUNCFW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "TRUNCDW",
-		argLen: 1,
-		asm:    mips.ATRUNCDW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "TRUNCFV",
-		argLen: 1,
-		asm:    mips.ATRUNCFV,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "TRUNCDV",
-		argLen: 1,
-		asm:    mips.ATRUNCDV,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "MOVFD",
-		argLen: 1,
-		asm:    mips.AMOVFD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:   "MOVDF",
-		argLen: 1,
-		asm:    mips.AMOVDF,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-			outputs: []outputInfo{
-				{0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
-			},
-		},
-	},
-	{
-		name:         "CALLstatic",
-		auxType:      auxSymOff,
-		argLen:       1,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO
-		},
-	},
-	{
-		name:         "CALLclosure",
-		auxType:      auxInt64,
-		argLen:       3,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 4194304},   // R22
-				{0, 201326590}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP R31
-			},
-			clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO
-		},
-	},
-	{
-		name:         "CALLdefer",
-		auxType:      auxInt64,
-		argLen:       1,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO
-		},
-	},
-	{
-		name:         "CALLgo",
-		auxType:      auxInt64,
-		argLen:       1,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO
-		},
-	},
-	{
-		name:         "CALLinter",
-		auxType:      auxInt64,
-		argLen:       2,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-			clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO
-		},
-	},
-	{
-		name:           "DUFFZERO",
-		auxType:        auxInt64,
-		argLen:         2,
-		faultOnNilArg0: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-			clobbers: 134217730, // R1 R31
-		},
-	},
-	{
-		name:           "LoweredZero",
-		auxType:        auxInt64,
-		argLen:         3,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 2},         // R1
-				{1, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-			clobbers: 2, // R1
-		},
-	},
-	{
-		name:           "LoweredMove",
-		auxType:        auxInt64,
-		argLen:         4,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		faultOnNilArg1: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4},         // R2
-				{1, 2},         // R1
-				{2, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-			clobbers: 6, // R1 R2
-		},
-	},
-	{
-		name:           "LoweredNilCheck",
-		argLen:         2,
-		nilCheck:       true,
-		faultOnNilArg0: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-		},
-	},
-	{
-		name:   "FPFlagTrue",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:   "FPFlagFalse",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-	{
-		name:   "LoweredGetClosurePtr",
-		argLen: 0,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 4194304}, // R22
-			},
-		},
-	},
-	{
-		name:   "MOVVconvert",
-		argLen: 2,
-		asm:    mips.AMOVV,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
-			},
-			outputs: []outputInfo{
-				{0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
-			},
-		},
-	},
-
-	{
-		name:        "ADD",
-		argLen:      2,
-		commutative: true,
-		asm:         ppc64.AADD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:    "ADDconst",
-		auxType: auxSymOff,
-		argLen:  1,
-		asm:     ppc64.AADD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:        "FADD",
-		argLen:      2,
-		commutative: true,
-		asm:         ppc64.AFADD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-				{1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-			outputs: []outputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-		},
-	},
-	{
-		name:        "FADDS",
-		argLen:      2,
-		commutative: true,
-		asm:         ppc64.AFADDS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-				{1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-			outputs: []outputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-		},
-	},
-	{
-		name:   "SUB",
-		argLen: 2,
-		asm:    ppc64.ASUB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "FSUB",
-		argLen: 2,
-		asm:    ppc64.AFSUB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-				{1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-			outputs: []outputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-		},
-	},
-	{
-		name:   "FSUBS",
-		argLen: 2,
-		asm:    ppc64.AFSUBS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-				{1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-			outputs: []outputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-		},
-	},
-	{
-		name:        "MULLD",
-		argLen:      2,
-		commutative: true,
-		asm:         ppc64.AMULLD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:        "MULLW",
-		argLen:      2,
-		commutative: true,
-		asm:         ppc64.AMULLW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:        "MULHD",
-		argLen:      2,
-		commutative: true,
-		asm:         ppc64.AMULHD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:        "MULHW",
-		argLen:      2,
-		commutative: true,
-		asm:         ppc64.AMULHW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:        "MULHDU",
-		argLen:      2,
-		commutative: true,
-		asm:         ppc64.AMULHDU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:        "MULHWU",
-		argLen:      2,
-		commutative: true,
-		asm:         ppc64.AMULHWU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:        "FMUL",
-		argLen:      2,
-		commutative: true,
-		asm:         ppc64.AFMUL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-				{1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-			outputs: []outputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-		},
-	},
-	{
-		name:        "FMULS",
-		argLen:      2,
-		commutative: true,
-		asm:         ppc64.AFMULS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-				{1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-			outputs: []outputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-		},
-	},
-	{
-		name:   "SRAD",
-		argLen: 2,
-		asm:    ppc64.ASRAD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "SRAW",
-		argLen: 2,
-		asm:    ppc64.ASRAW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "SRD",
-		argLen: 2,
-		asm:    ppc64.ASRD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "SRW",
-		argLen: 2,
-		asm:    ppc64.ASRW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "SLD",
-		argLen: 2,
-		asm:    ppc64.ASLD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "SLW",
-		argLen: 2,
-		asm:    ppc64.ASLW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:    "ADDconstForCarry",
-		auxType: auxInt16,
-		argLen:  1,
-		asm:     ppc64.AADDC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			clobbers: 2147483648, // R31
-		},
-	},
-	{
-		name:   "MaskIfNotCarry",
-		argLen: 1,
-		asm:    ppc64.AADDME,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:    "SRADconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     ppc64.ASRAD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:    "SRAWconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     ppc64.ASRAW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:    "SRDconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     ppc64.ASRD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:    "SRWconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     ppc64.ASRW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:    "SLDconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     ppc64.ASLD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:    "SLWconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     ppc64.ASLW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "FDIV",
-		argLen: 2,
-		asm:    ppc64.AFDIV,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-				{1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-			outputs: []outputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-		},
-	},
-	{
-		name:   "FDIVS",
-		argLen: 2,
-		asm:    ppc64.AFDIVS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-				{1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-			outputs: []outputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-		},
-	},
-	{
-		name:   "DIVD",
-		argLen: 2,
-		asm:    ppc64.ADIVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "DIVW",
-		argLen: 2,
-		asm:    ppc64.ADIVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "DIVDU",
-		argLen: 2,
-		asm:    ppc64.ADIVDU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "DIVWU",
-		argLen: 2,
-		asm:    ppc64.ADIVWU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "FCTIDZ",
-		argLen: 1,
-		asm:    ppc64.AFCTIDZ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-			outputs: []outputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-		},
-	},
-	{
-		name:   "FCTIWZ",
-		argLen: 1,
-		asm:    ppc64.AFCTIWZ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-			outputs: []outputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-		},
-	},
-	{
-		name:   "FCFID",
-		argLen: 1,
-		asm:    ppc64.AFCFID,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-			outputs: []outputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-		},
-	},
-	{
-		name:   "FRSP",
-		argLen: 1,
-		asm:    ppc64.AFRSP,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-			outputs: []outputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-		},
-	},
-	{
-		name:        "Xf2i64",
-		argLen:      1,
-		usesScratch: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:        "Xi2f64",
-		argLen:      1,
-		usesScratch: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-		},
-	},
-	{
-		name:        "AND",
-		argLen:      2,
-		commutative: true,
-		asm:         ppc64.AAND,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "ANDN",
-		argLen: 2,
-		asm:    ppc64.AANDN,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:        "OR",
-		argLen:      2,
-		commutative: true,
-		asm:         ppc64.AOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "ORN",
-		argLen: 2,
-		asm:    ppc64.AORN,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:        "XOR",
-		argLen:      2,
-		commutative: true,
-		asm:         ppc64.AXOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:        "EQV",
-		argLen:      2,
-		commutative: true,
-		asm:         ppc64.AEQV,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "NEG",
-		argLen: 1,
-		asm:    ppc64.ANEG,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "FNEG",
-		argLen: 1,
-		asm:    ppc64.AFNEG,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-			outputs: []outputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-		},
-	},
-	{
-		name:   "FSQRT",
-		argLen: 1,
-		asm:    ppc64.AFSQRT,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-			outputs: []outputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-		},
-	},
-	{
-		name:   "FSQRTS",
-		argLen: 1,
-		asm:    ppc64.AFSQRTS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-			outputs: []outputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-		},
-	},
-	{
-		name:    "ORconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     ppc64.AOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:    "XORconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     ppc64.AXOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:         "ANDconst",
-		auxType:      auxInt64,
-		argLen:       1,
-		clobberFlags: true,
-		asm:          ppc64.AANDCC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:    "ANDCCconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     ppc64.AANDCC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "MOVBreg",
-		argLen: 1,
-		asm:    ppc64.AMOVB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "MOVBZreg",
-		argLen: 1,
-		asm:    ppc64.AMOVBZ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "MOVHreg",
-		argLen: 1,
-		asm:    ppc64.AMOVH,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "MOVHZreg",
-		argLen: 1,
-		asm:    ppc64.AMOVHZ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "MOVWreg",
-		argLen: 1,
-		asm:    ppc64.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "MOVWZreg",
-		argLen: 1,
-		asm:    ppc64.AMOVWZ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:           "MOVBZload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            ppc64.AMOVBZ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:           "MOVHload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            ppc64.AMOVH,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:           "MOVHZload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            ppc64.AMOVHZ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:           "MOVWload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            ppc64.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:           "MOVWZload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            ppc64.AMOVWZ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:           "MOVDload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            ppc64.AMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:           "FMOVDload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            ppc64.AFMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-		},
-	},
-	{
-		name:           "FMOVSload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            ppc64.AFMOVS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-		},
-	},
-	{
-		name:           "MOVBstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            ppc64.AMOVB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:           "MOVHstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            ppc64.AMOVH,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:           "MOVWstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            ppc64.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:           "MOVDstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            ppc64.AMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:           "FMOVDstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            ppc64.AFMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-				{0, 1073733630},         // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:           "FMOVSstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            ppc64.AFMOVS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-				{0, 1073733630},         // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:           "MOVBstorezero",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            ppc64.AMOVB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:           "MOVHstorezero",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            ppc64.AMOVH,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:           "MOVWstorezero",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            ppc64.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:           "MOVDstorezero",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            ppc64.AMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:              "MOVDaddr",
-		auxType:           auxSymOff,
-		argLen:            1,
-		rematerializeable: true,
-		asm:               ppc64.AMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 6}, // SP SB
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:              "MOVDconst",
-		auxType:           auxInt64,
-		argLen:            0,
-		rematerializeable: true,
-		asm:               ppc64.AMOVD,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:              "FMOVDconst",
-		auxType:           auxFloat64,
-		argLen:            0,
-		rematerializeable: true,
-		asm:               ppc64.AFMOVD,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-		},
-	},
-	{
-		name:              "FMOVSconst",
-		auxType:           auxFloat32,
-		argLen:            0,
-		rematerializeable: true,
-		asm:               ppc64.AFMOVS,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-		},
-	},
-	{
-		name:   "FCMPU",
-		argLen: 2,
-		asm:    ppc64.AFCMPU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-				{1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-			},
-		},
-	},
-	{
-		name:   "CMP",
-		argLen: 2,
-		asm:    ppc64.ACMP,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "CMPU",
-		argLen: 2,
-		asm:    ppc64.ACMPU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "CMPW",
-		argLen: 2,
-		asm:    ppc64.ACMPW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "CMPWU",
-		argLen: 2,
-		asm:    ppc64.ACMPWU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-				{1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:    "CMPconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     ppc64.ACMP,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:    "CMPUconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     ppc64.ACMPU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:    "CMPWconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     ppc64.ACMPW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:    "CMPWUconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     ppc64.ACMPWU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "Equal",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "NotEqual",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "LessThan",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "FLessThan",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "LessEqual",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "FLessEqual",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "GreaterThan",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "FGreaterThan",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "GreaterEqual",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "FGreaterEqual",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:   "LoweredGetClosurePtr",
-		argLen: 0,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 2048}, // R11
-			},
-		},
-	},
-	{
-		name:           "LoweredNilCheck",
-		argLen:         2,
-		clobberFlags:   true,
-		nilCheck:       true,
-		faultOnNilArg0: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			clobbers: 2147483648, // R31
-		},
-	},
-	{
-		name:   "MOVDconvert",
-		argLen: 2,
-		asm:    ppc64.AMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			outputs: []outputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-		},
-	},
-	{
-		name:         "CALLstatic",
-		auxType:      auxSymOff,
-		argLen:       1,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			clobbers: 576460745860964344, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-		},
-	},
-	{
-		name:         "CALLclosure",
-		auxType:      auxInt64,
-		argLen:       3,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 2048},       // R11
-				{0, 1073733626}, // SP R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			clobbers: 576460745860964344, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-		},
-	},
-	{
-		name:         "CALLdefer",
-		auxType:      auxInt64,
-		argLen:       1,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			clobbers: 576460745860964344, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-		},
-	},
-	{
-		name:         "CALLgo",
-		auxType:      auxInt64,
-		argLen:       1,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			clobbers: 576460745860964344, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-		},
-	},
-	{
-		name:         "CALLinter",
-		auxType:      auxInt64,
-		argLen:       2,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			clobbers: 576460745860964344, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
-		},
-	},
-	{
-		name:           "LoweredZero",
-		auxType:        auxInt64,
-		argLen:         3,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 8},          // R3
-				{1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			clobbers: 8, // R3
-		},
-	},
-	{
-		name:           "LoweredMove",
-		auxType:        auxInt64,
-		argLen:         4,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		faultOnNilArg1: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 8},          // R3
-				{1, 16},         // R4
-				{2, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
-			},
-			clobbers: 24, // R3 R4
-		},
-	},
-	{
-		name:   "InvertFlags",
-		argLen: 1,
-		reg:    regInfo{},
-	},
-	{
-		name:   "FlagEQ",
-		argLen: 0,
-		reg:    regInfo{},
-	},
-	{
-		name:   "FlagLT",
-		argLen: 0,
-		reg:    regInfo{},
-	},
-	{
-		name:   "FlagGT",
-		argLen: 0,
-		reg:    regInfo{},
-	},
-
-	{
-		name:         "FADDS",
-		argLen:       2,
-		commutative:  true,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          s390x.AFADDS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-				{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:         "FADD",
-		argLen:       2,
-		commutative:  true,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          s390x.AFADD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-				{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:         "FSUBS",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          s390x.AFSUBS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-				{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:         "FSUB",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          s390x.AFSUB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-				{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:         "FMULS",
-		argLen:       2,
-		commutative:  true,
-		resultInArg0: true,
-		asm:          s390x.AFMULS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-				{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:         "FMUL",
-		argLen:       2,
-		commutative:  true,
-		resultInArg0: true,
-		asm:          s390x.AFMUL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-				{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:         "FDIVS",
-		argLen:       2,
-		resultInArg0: true,
-		asm:          s390x.AFDIVS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-				{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:         "FDIV",
-		argLen:       2,
-		resultInArg0: true,
-		asm:          s390x.AFDIV,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-				{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:         "FNEGS",
-		argLen:       1,
-		clobberFlags: true,
-		asm:          s390x.AFNEGS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:         "FNEG",
-		argLen:       1,
-		clobberFlags: true,
-		asm:          s390x.AFNEG,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:           "FMOVSload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            s390x.AFMOVS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:           "FMOVDload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            s390x.AFMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:              "FMOVSconst",
-		auxType:           auxFloat32,
-		argLen:            0,
-		rematerializeable: true,
-		asm:               s390x.AFMOVS,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:              "FMOVDconst",
-		auxType:           auxFloat64,
-		argLen:            0,
-		rematerializeable: true,
-		asm:               s390x.AFMOVD,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:    "FMOVSloadidx",
-		auxType: auxSymOff,
-		argLen:  3,
-		asm:     s390x.AFMOVS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{1, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:    "FMOVDloadidx",
-		auxType: auxSymOff,
-		argLen:  3,
-		asm:     s390x.AFMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{1, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:           "FMOVSstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            s390x.AFMOVS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-				{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:           "FMOVDstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            s390x.AFMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-				{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:    "FMOVSstoreidx",
-		auxType: auxSymOff,
-		argLen:  4,
-		asm:     s390x.AFMOVS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54270},      // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{1, 54270},      // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:    "FMOVDstoreidx",
-		auxType: auxSymOff,
-		argLen:  4,
-		asm:     s390x.AFMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54270},      // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{1, 54270},      // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:         "ADD",
-		argLen:       2,
-		commutative:  true,
-		clobberFlags: true,
-		asm:          s390x.AADD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{0, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "ADDW",
-		argLen:       2,
-		commutative:  true,
-		clobberFlags: true,
-		asm:          s390x.AADDW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{0, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "ADDconst",
-		auxType:      auxInt64,
-		argLen:       1,
-		clobberFlags: true,
-		asm:          s390x.AADD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "ADDWconst",
-		auxType:      auxInt32,
-		argLen:       1,
-		clobberFlags: true,
-		asm:          s390x.AADDW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "ADDload",
-		auxType:        auxSymOff,
-		argLen:         3,
-		resultInArg0:   true,
-		clobberFlags:   true,
-		faultOnNilArg1: true,
-		asm:            s390x.AADD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "ADDWload",
-		auxType:        auxSymOff,
-		argLen:         3,
-		resultInArg0:   true,
-		clobberFlags:   true,
-		faultOnNilArg1: true,
-		asm:            s390x.AADDW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "SUB",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          s390x.ASUB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "SUBW",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          s390x.ASUBW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "SUBconst",
-		auxType:      auxInt64,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          s390x.ASUB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "SUBWconst",
-		auxType:      auxInt32,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          s390x.ASUBW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "SUBload",
-		auxType:        auxSymOff,
-		argLen:         3,
-		resultInArg0:   true,
-		clobberFlags:   true,
-		faultOnNilArg1: true,
-		asm:            s390x.ASUB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "SUBWload",
-		auxType:        auxSymOff,
-		argLen:         3,
-		resultInArg0:   true,
-		clobberFlags:   true,
-		faultOnNilArg1: true,
-		asm:            s390x.ASUBW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "MULLD",
-		argLen:       2,
-		commutative:  true,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          s390x.AMULLD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "MULLW",
-		argLen:       2,
-		commutative:  true,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          s390x.AMULLW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "MULLDconst",
-		auxType:      auxInt64,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          s390x.AMULLD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "MULLWconst",
-		auxType:      auxInt32,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          s390x.AMULLW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "MULLDload",
-		auxType:        auxSymOff,
-		argLen:         3,
-		resultInArg0:   true,
-		clobberFlags:   true,
-		faultOnNilArg1: true,
-		asm:            s390x.AMULLD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "MULLWload",
-		auxType:        auxSymOff,
-		argLen:         3,
-		resultInArg0:   true,
-		clobberFlags:   true,
-		faultOnNilArg1: true,
-		asm:            s390x.AMULLW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "MULHD",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          s390x.AMULHD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "MULHDU",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          s390x.AMULHDU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "DIVD",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          s390x.ADIVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "DIVW",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          s390x.ADIVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "DIVDU",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          s390x.ADIVDU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "DIVWU",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          s390x.ADIVWU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "MODD",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          s390x.AMODD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "MODW",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          s390x.AMODW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "MODDU",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          s390x.AMODDU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "MODWU",
-		argLen:       2,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          s390x.AMODWU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "AND",
-		argLen:       2,
-		commutative:  true,
-		clobberFlags: true,
-		asm:          s390x.AAND,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "ANDW",
-		argLen:       2,
-		commutative:  true,
-		clobberFlags: true,
-		asm:          s390x.AANDW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "ANDconst",
-		auxType:      auxInt64,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          s390x.AAND,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "ANDWconst",
-		auxType:      auxInt32,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          s390x.AANDW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "ANDload",
-		auxType:        auxSymOff,
-		argLen:         3,
-		resultInArg0:   true,
-		clobberFlags:   true,
-		faultOnNilArg1: true,
-		asm:            s390x.AAND,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "ANDWload",
-		auxType:        auxSymOff,
-		argLen:         3,
-		resultInArg0:   true,
-		clobberFlags:   true,
-		faultOnNilArg1: true,
-		asm:            s390x.AANDW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "OR",
-		argLen:       2,
-		commutative:  true,
-		clobberFlags: true,
-		asm:          s390x.AOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "ORW",
-		argLen:       2,
-		commutative:  true,
-		clobberFlags: true,
-		asm:          s390x.AORW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "ORconst",
-		auxType:      auxInt64,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          s390x.AOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "ORWconst",
-		auxType:      auxInt32,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          s390x.AORW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "ORload",
-		auxType:        auxSymOff,
-		argLen:         3,
-		resultInArg0:   true,
-		clobberFlags:   true,
-		faultOnNilArg1: true,
-		asm:            s390x.AOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "ORWload",
-		auxType:        auxSymOff,
-		argLen:         3,
-		resultInArg0:   true,
-		clobberFlags:   true,
-		faultOnNilArg1: true,
-		asm:            s390x.AORW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "XOR",
-		argLen:       2,
-		commutative:  true,
-		clobberFlags: true,
-		asm:          s390x.AXOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "XORW",
-		argLen:       2,
-		commutative:  true,
-		clobberFlags: true,
-		asm:          s390x.AXORW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "XORconst",
-		auxType:      auxInt64,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          s390x.AXOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "XORWconst",
-		auxType:      auxInt32,
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		asm:          s390x.AXORW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "XORload",
-		auxType:        auxSymOff,
-		argLen:         3,
-		resultInArg0:   true,
-		clobberFlags:   true,
-		faultOnNilArg1: true,
-		asm:            s390x.AXOR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "XORWload",
-		auxType:        auxSymOff,
-		argLen:         3,
-		resultInArg0:   true,
-		clobberFlags:   true,
-		faultOnNilArg1: true,
-		asm:            s390x.AXORW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "CMP",
-		argLen: 2,
-		asm:    s390x.ACMP,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{1, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:   "CMPW",
-		argLen: 2,
-		asm:    s390x.ACMPW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{1, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:   "CMPU",
-		argLen: 2,
-		asm:    s390x.ACMPU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{1, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:   "CMPWU",
-		argLen: 2,
-		asm:    s390x.ACMPWU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{1, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:    "CMPconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     s390x.ACMP,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:    "CMPWconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     s390x.ACMPW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:    "CMPUconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     s390x.ACMPU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:    "CMPWUconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     s390x.ACMPWU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:   "FCMPS",
-		argLen: 2,
-		asm:    s390x.ACEBR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-				{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:   "FCMP",
-		argLen: 2,
-		asm:    s390x.AFCMPU,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-				{1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:   "SLD",
-		argLen: 2,
-		asm:    s390x.ASLD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 21502}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "SLW",
-		argLen: 2,
-		asm:    s390x.ASLW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 21502}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "SLDconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     s390x.ASLD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "SLWconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     s390x.ASLW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "SRD",
-		argLen: 2,
-		asm:    s390x.ASRD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 21502}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "SRW",
-		argLen: 2,
-		asm:    s390x.ASRW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 21502}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "SRDconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     s390x.ASRD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "SRWconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     s390x.ASRW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "SRAD",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          s390x.ASRAD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 21502}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "SRAW",
-		argLen:       2,
-		clobberFlags: true,
-		asm:          s390x.ASRAW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 21502}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "SRADconst",
-		auxType:      auxInt64,
-		argLen:       1,
-		clobberFlags: true,
-		asm:          s390x.ASRAD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "SRAWconst",
-		auxType:      auxInt32,
-		argLen:       1,
-		clobberFlags: true,
-		asm:          s390x.ASRAW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "RLLGconst",
-		auxType: auxInt64,
-		argLen:  1,
-		asm:     s390x.ARLLG,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:    "RLLconst",
-		auxType: auxInt32,
-		argLen:  1,
-		asm:     s390x.ARLL,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "NEG",
-		argLen:       1,
-		clobberFlags: true,
-		asm:          s390x.ANEG,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "NEGW",
-		argLen:       1,
-		clobberFlags: true,
-		asm:          s390x.ANEGW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "NOT",
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "NOTW",
-		argLen:       1,
-		resultInArg0: true,
-		clobberFlags: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "FSQRT",
-		argLen: 1,
-		asm:    s390x.AFSQRT,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:   "SUBEcarrymask",
-		argLen: 1,
-		asm:    s390x.ASUBE,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "SUBEWcarrymask",
-		argLen: 1,
-		asm:    s390x.ASUBE,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "MOVDEQ",
-		argLen:       3,
-		resultInArg0: true,
-		asm:          s390x.AMOVDEQ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "MOVDNE",
-		argLen:       3,
-		resultInArg0: true,
-		asm:          s390x.AMOVDNE,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "MOVDLT",
-		argLen:       3,
-		resultInArg0: true,
-		asm:          s390x.AMOVDLT,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "MOVDLE",
-		argLen:       3,
-		resultInArg0: true,
-		asm:          s390x.AMOVDLE,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "MOVDGT",
-		argLen:       3,
-		resultInArg0: true,
-		asm:          s390x.AMOVDGT,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "MOVDGE",
-		argLen:       3,
-		resultInArg0: true,
-		asm:          s390x.AMOVDGE,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "MOVDGTnoinv",
-		argLen:       3,
-		resultInArg0: true,
-		asm:          s390x.AMOVDGT,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "MOVDGEnoinv",
-		argLen:       3,
-		resultInArg0: true,
-		asm:          s390x.AMOVDGE,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-				{1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "MOVBreg",
-		argLen: 1,
-		asm:    s390x.AMOVB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "MOVBZreg",
-		argLen: 1,
-		asm:    s390x.AMOVBZ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "MOVHreg",
-		argLen: 1,
-		asm:    s390x.AMOVH,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "MOVHZreg",
-		argLen: 1,
-		asm:    s390x.AMOVHZ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "MOVWreg",
-		argLen: 1,
-		asm:    s390x.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "MOVWZreg",
-		argLen: 1,
-		asm:    s390x.AMOVWZ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:              "MOVDconst",
-		auxType:           auxInt64,
-		argLen:            0,
-		rematerializeable: true,
-		asm:               s390x.AMOVD,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "CFDBRA",
-		argLen: 1,
-		asm:    s390x.ACFDBRA,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "CGDBRA",
-		argLen: 1,
-		asm:    s390x.ACGDBRA,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "CFEBRA",
-		argLen: 1,
-		asm:    s390x.ACFEBRA,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "CGEBRA",
-		argLen: 1,
-		asm:    s390x.ACGEBRA,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "CEFBRA",
-		argLen: 1,
-		asm:    s390x.ACEFBRA,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:   "CDFBRA",
-		argLen: 1,
-		asm:    s390x.ACDFBRA,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:   "CEGBRA",
-		argLen: 1,
-		asm:    s390x.ACEGBRA,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:   "CDGBRA",
-		argLen: 1,
-		asm:    s390x.ACDGBRA,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:   "LEDBR",
-		argLen: 1,
-		asm:    s390x.ALEDBR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:   "LDEBR",
-		argLen: 1,
-		asm:    s390x.ALDEBR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-			outputs: []outputInfo{
-				{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-			},
-		},
-	},
-	{
-		name:              "MOVDaddr",
-		auxType:           auxSymOff,
-		argLen:            1,
-		rematerializeable: true,
-		clobberFlags:      true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295000064}, // SP SB
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "MOVDaddridx",
-		auxType:      auxSymOff,
-		argLen:       2,
-		clobberFlags: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295000064}, // SP SB
-				{1, 54270},      // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "MOVBZload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            s390x.AMOVBZ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "MOVBload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            s390x.AMOVB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "MOVHZload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            s390x.AMOVHZ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "MOVHload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            s390x.AMOVH,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "MOVWZload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            s390x.AMOVWZ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "MOVWload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            s390x.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "MOVDload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            s390x.AMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "MOVWBR",
-		argLen: 1,
-		asm:    s390x.AMOVWBR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "MOVDBR",
-		argLen: 1,
-		asm:    s390x.AMOVDBR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "MOVHBRload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            s390x.AMOVHBR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "MOVWBRload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            s390x.AMOVWBR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "MOVDBRload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            s390x.AMOVDBR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "MOVBstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            s390x.AMOVB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-				{1, 54271},      // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:           "MOVHstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            s390x.AMOVH,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-				{1, 54271},      // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:           "MOVWstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            s390x.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-				{1, 54271},      // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:           "MOVDstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            s390x.AMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-				{1, 54271},      // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:           "MOVHBRstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            s390x.AMOVHBR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{1, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:           "MOVWBRstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            s390x.AMOVWBR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{1, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:           "MOVDBRstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            s390x.AMOVDBR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{1, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:           "MVC",
-		auxType:        auxSymValAndOff,
-		argLen:         3,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		faultOnNilArg1: true,
-		asm:            s390x.AMVC,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{1, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:         "MOVBZloadidx",
-		auxType:      auxSymOff,
-		argLen:       3,
-		clobberFlags: true,
-		asm:          s390x.AMOVBZ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 54270},      // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "MOVHZloadidx",
-		auxType:      auxSymOff,
-		argLen:       3,
-		clobberFlags: true,
-		asm:          s390x.AMOVHZ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 54270},      // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "MOVWZloadidx",
-		auxType:      auxSymOff,
-		argLen:       3,
-		clobberFlags: true,
-		asm:          s390x.AMOVWZ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 54270},      // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "MOVDloadidx",
-		auxType:      auxSymOff,
-		argLen:       3,
-		clobberFlags: true,
-		asm:          s390x.AMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 54270},      // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "MOVHBRloadidx",
-		auxType:      auxSymOff,
-		argLen:       3,
-		clobberFlags: true,
-		asm:          s390x.AMOVHBR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 54270},      // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "MOVWBRloadidx",
-		auxType:      auxSymOff,
-		argLen:       3,
-		clobberFlags: true,
-		asm:          s390x.AMOVWBR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 54270},      // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "MOVDBRloadidx",
-		auxType:      auxSymOff,
-		argLen:       3,
-		clobberFlags: true,
-		asm:          s390x.AMOVDBR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 54270},      // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "MOVBstoreidx",
-		auxType:      auxSymOff,
-		argLen:       4,
-		clobberFlags: true,
-		asm:          s390x.AMOVB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{1, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{2, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:         "MOVHstoreidx",
-		auxType:      auxSymOff,
-		argLen:       4,
-		clobberFlags: true,
-		asm:          s390x.AMOVH,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{1, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{2, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:         "MOVWstoreidx",
-		auxType:      auxSymOff,
-		argLen:       4,
-		clobberFlags: true,
-		asm:          s390x.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{1, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{2, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:         "MOVDstoreidx",
-		auxType:      auxSymOff,
-		argLen:       4,
-		clobberFlags: true,
-		asm:          s390x.AMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{1, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{2, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:         "MOVHBRstoreidx",
-		auxType:      auxSymOff,
-		argLen:       4,
-		clobberFlags: true,
-		asm:          s390x.AMOVHBR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{1, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{2, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:         "MOVWBRstoreidx",
-		auxType:      auxSymOff,
-		argLen:       4,
-		clobberFlags: true,
-		asm:          s390x.AMOVWBR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{1, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{2, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:         "MOVDBRstoreidx",
-		auxType:      auxSymOff,
-		argLen:       4,
-		clobberFlags: true,
-		asm:          s390x.AMOVDBR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{1, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{2, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:           "MOVBstoreconst",
-		auxType:        auxSymValAndOff,
-		argLen:         2,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            s390x.AMOVB,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-			},
-		},
-	},
-	{
-		name:           "MOVHstoreconst",
-		auxType:        auxSymValAndOff,
-		argLen:         2,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            s390x.AMOVH,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-			},
-		},
-	},
-	{
-		name:           "MOVWstoreconst",
-		auxType:        auxSymValAndOff,
-		argLen:         2,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            s390x.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-			},
-		},
-	},
-	{
-		name:           "MOVDstoreconst",
-		auxType:        auxSymValAndOff,
-		argLen:         2,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            s390x.AMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-			},
-		},
-	},
-	{
-		name:           "CLEAR",
-		auxType:        auxSymValAndOff,
-		argLen:         2,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            s390x.ACLEAR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21502}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:         "CALLstatic",
-		auxType:      auxSymOff,
-		argLen:       1,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			clobbers: 4294923263, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-		},
-	},
-	{
-		name:         "CALLclosure",
-		auxType:      auxInt64,
-		argLen:       3,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 4096},  // R12
-				{0, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			clobbers: 4294923263, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-		},
-	},
-	{
-		name:         "CALLdefer",
-		auxType:      auxInt64,
-		argLen:       1,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			clobbers: 4294923263, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-		},
-	},
-	{
-		name:         "CALLgo",
-		auxType:      auxInt64,
-		argLen:       1,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			clobbers: 4294923263, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-		},
-	},
-	{
-		name:         "CALLinter",
-		auxType:      auxInt64,
-		argLen:       2,
-		clobberFlags: true,
-		call:         true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21502}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			clobbers: 4294923263, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
-		},
-	},
-	{
-		name:   "InvertFlags",
-		argLen: 1,
-		reg:    regInfo{},
-	},
-	{
-		name:   "LoweredGetG",
-		argLen: 1,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "LoweredGetClosurePtr",
-		argLen: 0,
-		reg: regInfo{
-			outputs: []outputInfo{
-				{0, 4096}, // R12
-			},
-		},
-	},
-	{
-		name:           "LoweredNilCheck",
-		argLen:         2,
-		clobberFlags:   true,
-		nilCheck:       true,
-		faultOnNilArg0: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:   "MOVDconvert",
-		argLen: 2,
-		asm:    s390x.AMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "FlagEQ",
-		argLen: 0,
-		reg:    regInfo{},
-	},
-	{
-		name:   "FlagLT",
-		argLen: 0,
-		reg:    regInfo{},
-	},
-	{
-		name:   "FlagGT",
-		argLen: 0,
-		reg:    regInfo{},
-	},
-	{
-		name:           "MOVWZatomicload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            s390x.AMOVWZ,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "MOVDatomicload",
-		auxType:        auxSymOff,
-		argLen:         2,
-		faultOnNilArg0: true,
-		asm:            s390x.AMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "MOVWatomicstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            s390x.AMOVW,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-				{1, 54271},      // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:           "MOVDatomicstore",
-		auxType:        auxSymOff,
-		argLen:         3,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            s390x.AMOVD,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-				{1, 54271},      // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:           "LAA",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            s390x.ALAA,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-				{1, 54271},      // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "LAAG",
-		auxType:        auxSymOff,
-		argLen:         3,
-		faultOnNilArg0: true,
-		asm:            s390x.ALAAG,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB
-				{1, 54271},      // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			outputs: []outputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:   "AddTupleFirst32",
-		argLen: 2,
-		reg:    regInfo{},
-	},
-	{
-		name:   "AddTupleFirst64",
-		argLen: 2,
-		reg:    regInfo{},
-	},
-	{
-		name:           "LoweredAtomicCas32",
-		auxType:        auxSymOff,
-		argLen:         4,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            s390x.ACS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 1},     // R0
-				{0, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{2, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			clobbers: 1, // R0
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "LoweredAtomicCas64",
-		auxType:        auxSymOff,
-		argLen:         4,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            s390x.ACSG,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 1},     // R0
-				{0, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{2, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			clobbers: 1, // R0
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-		},
-	},
-	{
-		name:           "LoweredAtomicExchange32",
-		auxType:        auxSymOff,
-		argLen:         3,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            s390x.ACS,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{1, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 1}, // R0
-			},
-		},
-	},
-	{
-		name:           "LoweredAtomicExchange64",
-		auxType:        auxSymOff,
-		argLen:         3,
-		clobberFlags:   true,
-		faultOnNilArg0: true,
-		asm:            s390x.ACSG,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-				{1, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			outputs: []outputInfo{
-				{1, 0},
-				{0, 1}, // R0
-			},
-		},
-	},
-	{
-		name:         "FLOGR",
-		argLen:       1,
-		clobberFlags: true,
-		asm:          s390x.AFLOGR,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
-			},
-			clobbers: 2, // R1
-			outputs: []outputInfo{
-				{0, 1}, // R0
-			},
-		},
-	},
-	{
-		name:           "STMG2",
-		auxType:        auxSymOff,
-		argLen:         4,
-		faultOnNilArg0: true,
-		asm:            s390x.ASTMG,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 2},     // R1
-				{2, 4},     // R2
-				{0, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:           "STMG3",
-		auxType:        auxSymOff,
-		argLen:         5,
-		faultOnNilArg0: true,
-		asm:            s390x.ASTMG,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 2},     // R1
-				{2, 4},     // R2
-				{3, 8},     // R3
-				{0, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:           "STMG4",
-		auxType:        auxSymOff,
-		argLen:         6,
-		faultOnNilArg0: true,
-		asm:            s390x.ASTMG,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 2},     // R1
-				{2, 4},     // R2
-				{3, 8},     // R3
-				{4, 16},    // R4
-				{0, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:           "STM2",
-		auxType:        auxSymOff,
-		argLen:         4,
-		faultOnNilArg0: true,
-		asm:            s390x.ASTMY,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 2},     // R1
-				{2, 4},     // R2
-				{0, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:           "STM3",
-		auxType:        auxSymOff,
-		argLen:         5,
-		faultOnNilArg0: true,
-		asm:            s390x.ASTMY,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 2},     // R1
-				{2, 4},     // R2
-				{3, 8},     // R3
-				{0, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:           "STM4",
-		auxType:        auxSymOff,
-		argLen:         6,
-		faultOnNilArg0: true,
-		asm:            s390x.ASTMY,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{1, 2},     // R1
-				{2, 4},     // R2
-				{3, 8},     // R3
-				{4, 16},    // R4
-				{0, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-		},
-	},
-	{
-		name:         "LoweredMove",
-		auxType:      auxInt64,
-		argLen:       4,
-		clobberFlags: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 2},     // R1
-				{1, 4},     // R2
-				{2, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			clobbers: 6, // R1 R2
-		},
-	},
-	{
-		name:         "LoweredZero",
-		auxType:      auxInt64,
-		argLen:       3,
-		clobberFlags: true,
-		reg: regInfo{
-			inputs: []inputInfo{
-				{0, 2},     // R1
-				{1, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
-			},
-			clobbers: 2, // R1
-		},
-	},
-
-	{
-		name:        "Add8",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:        "Add16",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:        "Add32",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:        "Add64",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:    "AddPtr",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Add32F",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Add64F",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Sub8",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Sub16",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Sub32",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Sub64",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "SubPtr",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Sub32F",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Sub64F",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:        "Mul8",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:        "Mul16",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:        "Mul32",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:        "Mul64",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:    "Mul32F",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Mul64F",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Div32F",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Div64F",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Hmul8",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Hmul8u",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Hmul16",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Hmul16u",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Hmul32",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Hmul32u",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Hmul64",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Hmul64u",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Mul32uhilo",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Mul64uhilo",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Avg64u",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Div8",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Div8u",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Div16",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Div16u",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Div32",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Div32u",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Div64",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Div64u",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Div128u",
-		argLen:  3,
-		generic: true,
-	},
-	{
-		name:    "Mod8",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Mod8u",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Mod16",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Mod16u",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Mod32",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Mod32u",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Mod64",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Mod64u",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:        "And8",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:        "And16",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:        "And32",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:        "And64",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:        "Or8",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:        "Or16",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:        "Or32",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:        "Or64",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:        "Xor8",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:        "Xor16",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:        "Xor32",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:        "Xor64",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:    "Lsh8x8",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Lsh8x16",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Lsh8x32",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Lsh8x64",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Lsh16x8",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Lsh16x16",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Lsh16x32",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Lsh16x64",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Lsh32x8",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Lsh32x16",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Lsh32x32",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Lsh32x64",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Lsh64x8",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Lsh64x16",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Lsh64x32",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Lsh64x64",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh8x8",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh8x16",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh8x32",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh8x64",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh16x8",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh16x16",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh16x32",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh16x64",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh32x8",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh32x16",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh32x32",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh32x64",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh64x8",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh64x16",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh64x32",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh64x64",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh8Ux8",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh8Ux16",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh8Ux32",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh8Ux64",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh16Ux8",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh16Ux16",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh16Ux32",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh16Ux64",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh32Ux8",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh32Ux16",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh32Ux32",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh32Ux64",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh64Ux8",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh64Ux16",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh64Ux32",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Rsh64Ux64",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Lrot8",
-		auxType: auxInt64,
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Lrot16",
-		auxType: auxInt64,
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Lrot32",
-		auxType: auxInt64,
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Lrot64",
-		auxType: auxInt64,
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:        "Eq8",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:        "Eq16",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:        "Eq32",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:        "Eq64",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:        "EqPtr",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:    "EqInter",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "EqSlice",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Eq32F",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Eq64F",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:        "Neq8",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:        "Neq16",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:        "Neq32",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:        "Neq64",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:        "NeqPtr",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:    "NeqInter",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "NeqSlice",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Neq32F",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Neq64F",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Less8",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Less8U",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Less16",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Less16U",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Less32",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Less32U",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Less64",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Less64U",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Less32F",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Less64F",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Leq8",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Leq8U",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Leq16",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Leq16U",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Leq32",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Leq32U",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Leq64",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Leq64U",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Leq32F",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Leq64F",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Greater8",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Greater8U",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Greater16",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Greater16U",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Greater32",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Greater32U",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Greater64",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Greater64U",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Greater32F",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Greater64F",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Geq8",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Geq8U",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Geq16",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Geq16U",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Geq32",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Geq32U",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Geq64",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Geq64U",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Geq32F",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Geq64F",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "AndB",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "OrB",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "EqB",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "NeqB",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Not",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Neg8",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Neg16",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Neg32",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Neg64",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Neg32F",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Neg64F",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Com8",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Com16",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Com32",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Com64",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Ctz32",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Ctz64",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Bswap32",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Bswap64",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Sqrt",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Phi",
-		argLen:  -1,
-		generic: true,
-	},
-	{
-		name:    "Copy",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Convert",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "ConstBool",
-		auxType: auxBool,
-		argLen:  0,
-		generic: true,
-	},
-	{
-		name:    "ConstString",
-		auxType: auxString,
-		argLen:  0,
-		generic: true,
-	},
-	{
-		name:    "ConstNil",
-		argLen:  0,
-		generic: true,
-	},
-	{
-		name:    "Const8",
-		auxType: auxInt8,
-		argLen:  0,
-		generic: true,
-	},
-	{
-		name:    "Const16",
-		auxType: auxInt16,
-		argLen:  0,
-		generic: true,
-	},
-	{
-		name:    "Const32",
-		auxType: auxInt32,
-		argLen:  0,
-		generic: true,
-	},
-	{
-		name:    "Const64",
-		auxType: auxInt64,
-		argLen:  0,
-		generic: true,
-	},
-	{
-		name:    "Const32F",
-		auxType: auxFloat32,
-		argLen:  0,
-		generic: true,
-	},
-	{
-		name:    "Const64F",
-		auxType: auxFloat64,
-		argLen:  0,
-		generic: true,
-	},
-	{
-		name:    "ConstInterface",
-		argLen:  0,
-		generic: true,
-	},
-	{
-		name:    "ConstSlice",
-		argLen:  0,
-		generic: true,
-	},
-	{
-		name:    "InitMem",
-		argLen:  0,
-		generic: true,
-	},
-	{
-		name:    "Arg",
-		auxType: auxSymOff,
-		argLen:  0,
-		generic: true,
-	},
-	{
-		name:    "Addr",
-		auxType: auxSym,
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "SP",
-		argLen:  0,
-		generic: true,
-	},
-	{
-		name:    "SB",
-		argLen:  0,
-		generic: true,
-	},
-	{
-		name:    "Func",
-		auxType: auxSym,
-		argLen:  0,
-		generic: true,
-	},
-	{
-		name:    "Load",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Store",
-		auxType: auxInt64,
-		argLen:  3,
-		generic: true,
-	},
-	{
-		name:    "Move",
-		auxType: auxSizeAndAlign,
-		argLen:  3,
-		generic: true,
-	},
-	{
-		name:    "Zero",
-		auxType: auxSizeAndAlign,
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "StoreWB",
-		auxType: auxInt64,
-		argLen:  3,
-		generic: true,
-	},
-	{
-		name:    "MoveWB",
-		auxType: auxSymSizeAndAlign,
-		argLen:  3,
-		generic: true,
-	},
-	{
-		name:    "MoveWBVolatile",
-		auxType: auxSymSizeAndAlign,
-		argLen:  3,
-		generic: true,
-	},
-	{
-		name:    "ZeroWB",
-		auxType: auxSymSizeAndAlign,
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "ClosureCall",
-		auxType: auxInt64,
-		argLen:  3,
-		call:    true,
-		generic: true,
-	},
-	{
-		name:    "StaticCall",
-		auxType: auxSymOff,
-		argLen:  1,
-		call:    true,
-		generic: true,
-	},
-	{
-		name:    "DeferCall",
-		auxType: auxInt64,
-		argLen:  1,
-		call:    true,
-		generic: true,
-	},
-	{
-		name:    "GoCall",
-		auxType: auxInt64,
-		argLen:  1,
-		call:    true,
-		generic: true,
-	},
-	{
-		name:    "InterCall",
-		auxType: auxInt64,
-		argLen:  2,
-		call:    true,
-		generic: true,
-	},
-	{
-		name:    "SignExt8to16",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "SignExt8to32",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "SignExt8to64",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "SignExt16to32",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "SignExt16to64",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "SignExt32to64",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "ZeroExt8to16",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "ZeroExt8to32",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "ZeroExt8to64",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "ZeroExt16to32",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "ZeroExt16to64",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "ZeroExt32to64",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Trunc16to8",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Trunc32to8",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Trunc32to16",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Trunc64to8",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Trunc64to16",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Trunc64to32",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Cvt32to32F",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Cvt32to64F",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Cvt64to32F",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Cvt64to64F",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Cvt32Fto32",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Cvt32Fto64",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Cvt64Fto32",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Cvt64Fto64",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Cvt32Fto64F",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Cvt64Fto32F",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "IsNonNil",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "IsInBounds",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "IsSliceInBounds",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "NilCheck",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "GetG",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "GetClosurePtr",
-		argLen:  0,
-		generic: true,
-	},
-	{
-		name:    "PtrIndex",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "OffPtr",
-		auxType: auxInt64,
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "SliceMake",
-		argLen:  3,
-		generic: true,
-	},
-	{
-		name:    "SlicePtr",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "SliceLen",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "SliceCap",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "ComplexMake",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "ComplexReal",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "ComplexImag",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "StringMake",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "StringPtr",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "StringLen",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "IMake",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "ITab",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "IData",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "StructMake0",
-		argLen:  0,
-		generic: true,
-	},
-	{
-		name:    "StructMake1",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "StructMake2",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "StructMake3",
-		argLen:  3,
-		generic: true,
-	},
-	{
-		name:    "StructMake4",
-		argLen:  4,
-		generic: true,
-	},
-	{
-		name:    "StructSelect",
-		auxType: auxInt64,
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "ArrayMake0",
-		argLen:  0,
-		generic: true,
-	},
-	{
-		name:    "ArrayMake1",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "ArraySelect",
-		auxType: auxInt64,
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "StoreReg",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "LoadReg",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "FwdRef",
-		auxType: auxSym,
-		argLen:  0,
-		generic: true,
-	},
-	{
-		name:    "Unknown",
-		argLen:  0,
-		generic: true,
-	},
-	{
-		name:    "VarDef",
-		auxType: auxSym,
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "VarKill",
-		auxType: auxSym,
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "VarLive",
-		auxType: auxSym,
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "KeepAlive",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Int64Make",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Int64Hi",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Int64Lo",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:        "Add32carry",
-		argLen:      2,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:        "Add32withcarry",
-		argLen:      3,
-		commutative: true,
-		generic:     true,
-	},
-	{
-		name:    "Sub32carry",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "Sub32withcarry",
-		argLen:  3,
-		generic: true,
-	},
-	{
-		name:    "Signmask",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Zeromask",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Slicemask",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Cvt32Uto32F",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Cvt32Uto64F",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Cvt32Fto32U",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Cvt64Fto32U",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Cvt64Uto32F",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Cvt64Uto64F",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Cvt32Fto64U",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Cvt64Fto64U",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Select0",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "Select1",
-		argLen:  1,
-		generic: true,
-	},
-	{
-		name:    "AtomicLoad32",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "AtomicLoad64",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "AtomicLoadPtr",
-		argLen:  2,
-		generic: true,
-	},
-	{
-		name:    "AtomicStore32",
-		argLen:  3,
-		generic: true,
-	},
-	{
-		name:    "AtomicStore64",
-		argLen:  3,
-		generic: true,
-	},
-	{
-		name:    "AtomicStorePtrNoWB",
-		argLen:  3,
-		generic: true,
-	},
-	{
-		name:    "AtomicExchange32",
-		argLen:  3,
-		generic: true,
-	},
-	{
-		name:    "AtomicExchange64",
-		argLen:  3,
-		generic: true,
-	},
-	{
-		name:    "AtomicAdd32",
-		argLen:  3,
-		generic: true,
-	},
-	{
-		name:    "AtomicAdd64",
-		argLen:  3,
-		generic: true,
-	},
-	{
-		name:    "AtomicCompareAndSwap32",
-		argLen:  4,
-		generic: true,
-	},
-	{
-		name:    "AtomicCompareAndSwap64",
-		argLen:  4,
-		generic: true,
-	},
-	{
-		name:    "AtomicAnd8",
-		argLen:  3,
-		generic: true,
-	},
-	{
-		name:    "AtomicOr8",
-		argLen:  3,
-		generic: true,
-	},
-}
-
-func (o Op) Asm() obj.As       { return opcodeTable[o].asm }
-func (o Op) String() string    { return opcodeTable[o].name }
-func (o Op) UsesScratch() bool { return opcodeTable[o].usesScratch }
-
-var registers386 = [...]Register{
-	{0, x86.REG_AX, "AX"},
-	{1, x86.REG_CX, "CX"},
-	{2, x86.REG_DX, "DX"},
-	{3, x86.REG_BX, "BX"},
-	{4, x86.REGSP, "SP"},
-	{5, x86.REG_BP, "BP"},
-	{6, x86.REG_SI, "SI"},
-	{7, x86.REG_DI, "DI"},
-	{8, x86.REG_X0, "X0"},
-	{9, x86.REG_X1, "X1"},
-	{10, x86.REG_X2, "X2"},
-	{11, x86.REG_X3, "X3"},
-	{12, x86.REG_X4, "X4"},
-	{13, x86.REG_X5, "X5"},
-	{14, x86.REG_X6, "X6"},
-	{15, x86.REG_X7, "X7"},
-	{16, 0, "SB"},
-}
-var gpRegMask386 = regMask(239)
-var fpRegMask386 = regMask(65280)
-var specialRegMask386 = regMask(0)
-var framepointerReg386 = int8(5)
-var linkReg386 = int8(-1)
-var registersAMD64 = [...]Register{
-	{0, x86.REG_AX, "AX"},
-	{1, x86.REG_CX, "CX"},
-	{2, x86.REG_DX, "DX"},
-	{3, x86.REG_BX, "BX"},
-	{4, x86.REGSP, "SP"},
-	{5, x86.REG_BP, "BP"},
-	{6, x86.REG_SI, "SI"},
-	{7, x86.REG_DI, "DI"},
-	{8, x86.REG_R8, "R8"},
-	{9, x86.REG_R9, "R9"},
-	{10, x86.REG_R10, "R10"},
-	{11, x86.REG_R11, "R11"},
-	{12, x86.REG_R12, "R12"},
-	{13, x86.REG_R13, "R13"},
-	{14, x86.REG_R14, "R14"},
-	{15, x86.REG_R15, "R15"},
-	{16, x86.REG_X0, "X0"},
-	{17, x86.REG_X1, "X1"},
-	{18, x86.REG_X2, "X2"},
-	{19, x86.REG_X3, "X3"},
-	{20, x86.REG_X4, "X4"},
-	{21, x86.REG_X5, "X5"},
-	{22, x86.REG_X6, "X6"},
-	{23, x86.REG_X7, "X7"},
-	{24, x86.REG_X8, "X8"},
-	{25, x86.REG_X9, "X9"},
-	{26, x86.REG_X10, "X10"},
-	{27, x86.REG_X11, "X11"},
-	{28, x86.REG_X12, "X12"},
-	{29, x86.REG_X13, "X13"},
-	{30, x86.REG_X14, "X14"},
-	{31, x86.REG_X15, "X15"},
-	{32, 0, "SB"},
-}
-var gpRegMaskAMD64 = regMask(65519)
-var fpRegMaskAMD64 = regMask(4294901760)
-var specialRegMaskAMD64 = regMask(0)
-var framepointerRegAMD64 = int8(5)
-var linkRegAMD64 = int8(-1)
-var registersARM = [...]Register{
-	{0, arm.REG_R0, "R0"},
-	{1, arm.REG_R1, "R1"},
-	{2, arm.REG_R2, "R2"},
-	{3, arm.REG_R3, "R3"},
-	{4, arm.REG_R4, "R4"},
-	{5, arm.REG_R5, "R5"},
-	{6, arm.REG_R6, "R6"},
-	{7, arm.REG_R7, "R7"},
-	{8, arm.REG_R8, "R8"},
-	{9, arm.REG_R9, "R9"},
-	{10, arm.REGG, "g"},
-	{11, arm.REG_R11, "R11"},
-	{12, arm.REG_R12, "R12"},
-	{13, arm.REGSP, "SP"},
-	{14, arm.REG_R14, "R14"},
-	{15, arm.REG_R15, "R15"},
-	{16, arm.REG_F0, "F0"},
-	{17, arm.REG_F1, "F1"},
-	{18, arm.REG_F2, "F2"},
-	{19, arm.REG_F3, "F3"},
-	{20, arm.REG_F4, "F4"},
-	{21, arm.REG_F5, "F5"},
-	{22, arm.REG_F6, "F6"},
-	{23, arm.REG_F7, "F7"},
-	{24, arm.REG_F8, "F8"},
-	{25, arm.REG_F9, "F9"},
-	{26, arm.REG_F10, "F10"},
-	{27, arm.REG_F11, "F11"},
-	{28, arm.REG_F12, "F12"},
-	{29, arm.REG_F13, "F13"},
-	{30, arm.REG_F14, "F14"},
-	{31, arm.REG_F15, "F15"},
-	{32, 0, "SB"},
-}
-var gpRegMaskARM = regMask(21503)
-var fpRegMaskARM = regMask(4294901760)
-var specialRegMaskARM = regMask(0)
-var framepointerRegARM = int8(-1)
-var linkRegARM = int8(14)
-var registersARM64 = [...]Register{
-	{0, arm64.REG_R0, "R0"},
-	{1, arm64.REG_R1, "R1"},
-	{2, arm64.REG_R2, "R2"},
-	{3, arm64.REG_R3, "R3"},
-	{4, arm64.REG_R4, "R4"},
-	{5, arm64.REG_R5, "R5"},
-	{6, arm64.REG_R6, "R6"},
-	{7, arm64.REG_R7, "R7"},
-	{8, arm64.REG_R8, "R8"},
-	{9, arm64.REG_R9, "R9"},
-	{10, arm64.REG_R10, "R10"},
-	{11, arm64.REG_R11, "R11"},
-	{12, arm64.REG_R12, "R12"},
-	{13, arm64.REG_R13, "R13"},
-	{14, arm64.REG_R14, "R14"},
-	{15, arm64.REG_R15, "R15"},
-	{16, arm64.REG_R16, "R16"},
-	{17, arm64.REG_R17, "R17"},
-	{18, arm64.REG_R18, "R18"},
-	{19, arm64.REG_R19, "R19"},
-	{20, arm64.REG_R20, "R20"},
-	{21, arm64.REG_R21, "R21"},
-	{22, arm64.REG_R22, "R22"},
-	{23, arm64.REG_R23, "R23"},
-	{24, arm64.REG_R24, "R24"},
-	{25, arm64.REG_R25, "R25"},
-	{26, arm64.REG_R26, "R26"},
-	{27, arm64.REGG, "g"},
-	{28, arm64.REG_R29, "R29"},
-	{29, arm64.REG_R30, "R30"},
-	{30, arm64.REGSP, "SP"},
-	{31, arm64.REG_F0, "F0"},
-	{32, arm64.REG_F1, "F1"},
-	{33, arm64.REG_F2, "F2"},
-	{34, arm64.REG_F3, "F3"},
-	{35, arm64.REG_F4, "F4"},
-	{36, arm64.REG_F5, "F5"},
-	{37, arm64.REG_F6, "F6"},
-	{38, arm64.REG_F7, "F7"},
-	{39, arm64.REG_F8, "F8"},
-	{40, arm64.REG_F9, "F9"},
-	{41, arm64.REG_F10, "F10"},
-	{42, arm64.REG_F11, "F11"},
-	{43, arm64.REG_F12, "F12"},
-	{44, arm64.REG_F13, "F13"},
-	{45, arm64.REG_F14, "F14"},
-	{46, arm64.REG_F15, "F15"},
-	{47, arm64.REG_F16, "F16"},
-	{48, arm64.REG_F17, "F17"},
-	{49, arm64.REG_F18, "F18"},
-	{50, arm64.REG_F19, "F19"},
-	{51, arm64.REG_F20, "F20"},
-	{52, arm64.REG_F21, "F21"},
-	{53, arm64.REG_F22, "F22"},
-	{54, arm64.REG_F23, "F23"},
-	{55, arm64.REG_F24, "F24"},
-	{56, arm64.REG_F25, "F25"},
-	{57, arm64.REG_F26, "F26"},
-	{58, arm64.REG_F27, "F27"},
-	{59, arm64.REG_F28, "F28"},
-	{60, arm64.REG_F29, "F29"},
-	{61, arm64.REG_F30, "F30"},
-	{62, arm64.REG_F31, "F31"},
-	{63, 0, "SB"},
-}
-var gpRegMaskARM64 = regMask(670826495)
-var fpRegMaskARM64 = regMask(9223372034707292160)
-var specialRegMaskARM64 = regMask(0)
-var framepointerRegARM64 = int8(-1)
-var linkRegARM64 = int8(29)
-var registersMIPS = [...]Register{
-	{0, mips.REG_R0, "R0"},
-	{1, mips.REG_R1, "R1"},
-	{2, mips.REG_R2, "R2"},
-	{3, mips.REG_R3, "R3"},
-	{4, mips.REG_R4, "R4"},
-	{5, mips.REG_R5, "R5"},
-	{6, mips.REG_R6, "R6"},
-	{7, mips.REG_R7, "R7"},
-	{8, mips.REG_R8, "R8"},
-	{9, mips.REG_R9, "R9"},
-	{10, mips.REG_R10, "R10"},
-	{11, mips.REG_R11, "R11"},
-	{12, mips.REG_R12, "R12"},
-	{13, mips.REG_R13, "R13"},
-	{14, mips.REG_R14, "R14"},
-	{15, mips.REG_R15, "R15"},
-	{16, mips.REG_R16, "R16"},
-	{17, mips.REG_R17, "R17"},
-	{18, mips.REG_R18, "R18"},
-	{19, mips.REG_R19, "R19"},
-	{20, mips.REG_R20, "R20"},
-	{21, mips.REG_R21, "R21"},
-	{22, mips.REG_R22, "R22"},
-	{23, mips.REG_R24, "R24"},
-	{24, mips.REG_R25, "R25"},
-	{25, mips.REG_R28, "R28"},
-	{26, mips.REGSP, "SP"},
-	{27, mips.REGG, "g"},
-	{28, mips.REG_R31, "R31"},
-	{29, mips.REG_F0, "F0"},
-	{30, mips.REG_F2, "F2"},
-	{31, mips.REG_F4, "F4"},
-	{32, mips.REG_F6, "F6"},
-	{33, mips.REG_F8, "F8"},
-	{34, mips.REG_F10, "F10"},
-	{35, mips.REG_F12, "F12"},
-	{36, mips.REG_F14, "F14"},
-	{37, mips.REG_F16, "F16"},
-	{38, mips.REG_F18, "F18"},
-	{39, mips.REG_F20, "F20"},
-	{40, mips.REG_F22, "F22"},
-	{41, mips.REG_F24, "F24"},
-	{42, mips.REG_F26, "F26"},
-	{43, mips.REG_F28, "F28"},
-	{44, mips.REG_F30, "F30"},
-	{45, mips.REG_HI, "HI"},
-	{46, mips.REG_LO, "LO"},
-	{47, 0, "SB"},
-}
-var gpRegMaskMIPS = regMask(335544318)
-var fpRegMaskMIPS = regMask(35183835217920)
-var specialRegMaskMIPS = regMask(105553116266496)
-var framepointerRegMIPS = int8(-1)
-var linkRegMIPS = int8(28)
-var registersMIPS64 = [...]Register{
-	{0, mips.REG_R0, "R0"},
-	{1, mips.REG_R1, "R1"},
-	{2, mips.REG_R2, "R2"},
-	{3, mips.REG_R3, "R3"},
-	{4, mips.REG_R4, "R4"},
-	{5, mips.REG_R5, "R5"},
-	{6, mips.REG_R6, "R6"},
-	{7, mips.REG_R7, "R7"},
-	{8, mips.REG_R8, "R8"},
-	{9, mips.REG_R9, "R9"},
-	{10, mips.REG_R10, "R10"},
-	{11, mips.REG_R11, "R11"},
-	{12, mips.REG_R12, "R12"},
-	{13, mips.REG_R13, "R13"},
-	{14, mips.REG_R14, "R14"},
-	{15, mips.REG_R15, "R15"},
-	{16, mips.REG_R16, "R16"},
-	{17, mips.REG_R17, "R17"},
-	{18, mips.REG_R18, "R18"},
-	{19, mips.REG_R19, "R19"},
-	{20, mips.REG_R20, "R20"},
-	{21, mips.REG_R21, "R21"},
-	{22, mips.REG_R22, "R22"},
-	{23, mips.REG_R24, "R24"},
-	{24, mips.REG_R25, "R25"},
-	{25, mips.REGSP, "SP"},
-	{26, mips.REGG, "g"},
-	{27, mips.REG_R31, "R31"},
-	{28, mips.REG_F0, "F0"},
-	{29, mips.REG_F1, "F1"},
-	{30, mips.REG_F2, "F2"},
-	{31, mips.REG_F3, "F3"},
-	{32, mips.REG_F4, "F4"},
-	{33, mips.REG_F5, "F5"},
-	{34, mips.REG_F6, "F6"},
-	{35, mips.REG_F7, "F7"},
-	{36, mips.REG_F8, "F8"},
-	{37, mips.REG_F9, "F9"},
-	{38, mips.REG_F10, "F10"},
-	{39, mips.REG_F11, "F11"},
-	{40, mips.REG_F12, "F12"},
-	{41, mips.REG_F13, "F13"},
-	{42, mips.REG_F14, "F14"},
-	{43, mips.REG_F15, "F15"},
-	{44, mips.REG_F16, "F16"},
-	{45, mips.REG_F17, "F17"},
-	{46, mips.REG_F18, "F18"},
-	{47, mips.REG_F19, "F19"},
-	{48, mips.REG_F20, "F20"},
-	{49, mips.REG_F21, "F21"},
-	{50, mips.REG_F22, "F22"},
-	{51, mips.REG_F23, "F23"},
-	{52, mips.REG_F24, "F24"},
-	{53, mips.REG_F25, "F25"},
-	{54, mips.REG_F26, "F26"},
-	{55, mips.REG_F27, "F27"},
-	{56, mips.REG_F28, "F28"},
-	{57, mips.REG_F29, "F29"},
-	{58, mips.REG_F30, "F30"},
-	{59, mips.REG_F31, "F31"},
-	{60, mips.REG_HI, "HI"},
-	{61, mips.REG_LO, "LO"},
-	{62, 0, "SB"},
-}
-var gpRegMaskMIPS64 = regMask(167772158)
-var fpRegMaskMIPS64 = regMask(1152921504338411520)
-var specialRegMaskMIPS64 = regMask(3458764513820540928)
-var framepointerRegMIPS64 = int8(-1)
-var linkRegMIPS64 = int8(27)
-var registersPPC64 = [...]Register{
-	{0, ppc64.REG_R0, "R0"},
-	{1, ppc64.REGSP, "SP"},
-	{2, 0, "SB"},
-	{3, ppc64.REG_R3, "R3"},
-	{4, ppc64.REG_R4, "R4"},
-	{5, ppc64.REG_R5, "R5"},
-	{6, ppc64.REG_R6, "R6"},
-	{7, ppc64.REG_R7, "R7"},
-	{8, ppc64.REG_R8, "R8"},
-	{9, ppc64.REG_R9, "R9"},
-	{10, ppc64.REG_R10, "R10"},
-	{11, ppc64.REG_R11, "R11"},
-	{12, ppc64.REG_R12, "R12"},
-	{13, ppc64.REG_R13, "R13"},
-	{14, ppc64.REG_R14, "R14"},
-	{15, ppc64.REG_R15, "R15"},
-	{16, ppc64.REG_R16, "R16"},
-	{17, ppc64.REG_R17, "R17"},
-	{18, ppc64.REG_R18, "R18"},
-	{19, ppc64.REG_R19, "R19"},
-	{20, ppc64.REG_R20, "R20"},
-	{21, ppc64.REG_R21, "R21"},
-	{22, ppc64.REG_R22, "R22"},
-	{23, ppc64.REG_R23, "R23"},
-	{24, ppc64.REG_R24, "R24"},
-	{25, ppc64.REG_R25, "R25"},
-	{26, ppc64.REG_R26, "R26"},
-	{27, ppc64.REG_R27, "R27"},
-	{28, ppc64.REG_R28, "R28"},
-	{29, ppc64.REG_R29, "R29"},
-	{30, ppc64.REGG, "g"},
-	{31, ppc64.REG_R31, "R31"},
-	{32, ppc64.REG_F0, "F0"},
-	{33, ppc64.REG_F1, "F1"},
-	{34, ppc64.REG_F2, "F2"},
-	{35, ppc64.REG_F3, "F3"},
-	{36, ppc64.REG_F4, "F4"},
-	{37, ppc64.REG_F5, "F5"},
-	{38, ppc64.REG_F6, "F6"},
-	{39, ppc64.REG_F7, "F7"},
-	{40, ppc64.REG_F8, "F8"},
-	{41, ppc64.REG_F9, "F9"},
-	{42, ppc64.REG_F10, "F10"},
-	{43, ppc64.REG_F11, "F11"},
-	{44, ppc64.REG_F12, "F12"},
-	{45, ppc64.REG_F13, "F13"},
-	{46, ppc64.REG_F14, "F14"},
-	{47, ppc64.REG_F15, "F15"},
-	{48, ppc64.REG_F16, "F16"},
-	{49, ppc64.REG_F17, "F17"},
-	{50, ppc64.REG_F18, "F18"},
-	{51, ppc64.REG_F19, "F19"},
-	{52, ppc64.REG_F20, "F20"},
-	{53, ppc64.REG_F21, "F21"},
-	{54, ppc64.REG_F22, "F22"},
-	{55, ppc64.REG_F23, "F23"},
-	{56, ppc64.REG_F24, "F24"},
-	{57, ppc64.REG_F25, "F25"},
-	{58, ppc64.REG_F26, "F26"},
-	{59, ppc64.REG_F27, "F27"},
-	{60, ppc64.REG_F28, "F28"},
-	{61, ppc64.REG_F29, "F29"},
-	{62, ppc64.REG_F30, "F30"},
-	{63, ppc64.REG_F31, "F31"},
-}
-var gpRegMaskPPC64 = regMask(1073733624)
-var fpRegMaskPPC64 = regMask(576460743713488896)
-var specialRegMaskPPC64 = regMask(0)
-var framepointerRegPPC64 = int8(1)
-var linkRegPPC64 = int8(-1)
-var registersS390X = [...]Register{
-	{0, s390x.REG_R0, "R0"},
-	{1, s390x.REG_R1, "R1"},
-	{2, s390x.REG_R2, "R2"},
-	{3, s390x.REG_R3, "R3"},
-	{4, s390x.REG_R4, "R4"},
-	{5, s390x.REG_R5, "R5"},
-	{6, s390x.REG_R6, "R6"},
-	{7, s390x.REG_R7, "R7"},
-	{8, s390x.REG_R8, "R8"},
-	{9, s390x.REG_R9, "R9"},
-	{10, s390x.REG_R10, "R10"},
-	{11, s390x.REG_R11, "R11"},
-	{12, s390x.REG_R12, "R12"},
-	{13, s390x.REGG, "g"},
-	{14, s390x.REG_R14, "R14"},
-	{15, s390x.REGSP, "SP"},
-	{16, s390x.REG_F0, "F0"},
-	{17, s390x.REG_F1, "F1"},
-	{18, s390x.REG_F2, "F2"},
-	{19, s390x.REG_F3, "F3"},
-	{20, s390x.REG_F4, "F4"},
-	{21, s390x.REG_F5, "F5"},
-	{22, s390x.REG_F6, "F6"},
-	{23, s390x.REG_F7, "F7"},
-	{24, s390x.REG_F8, "F8"},
-	{25, s390x.REG_F9, "F9"},
-	{26, s390x.REG_F10, "F10"},
-	{27, s390x.REG_F11, "F11"},
-	{28, s390x.REG_F12, "F12"},
-	{29, s390x.REG_F13, "F13"},
-	{30, s390x.REG_F14, "F14"},
-	{31, s390x.REG_F15, "F15"},
-	{32, 0, "SB"},
-}
-var gpRegMaskS390X = regMask(21503)
-var fpRegMaskS390X = regMask(4294901760)
-var specialRegMaskS390X = regMask(0)
-var framepointerRegS390X = int8(-1)
-var linkRegS390X = int8(14)
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/opt.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/opt.go
deleted file mode 100644
index 477d370..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/opt.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/opt.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/opt.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-// machine-independent optimization
-func opt(f *Func) {
-	applyRewrite(f, rewriteBlockgeneric, rewriteValuegeneric)
-}
-
-func dec(f *Func) {
-	applyRewrite(f, rewriteBlockdec, rewriteValuedec)
-	if f.Config.IntSize == 4 && f.Config.arch != "amd64p32" {
-		applyRewrite(f, rewriteBlockdec64, rewriteValuedec64)
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/passbm_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/passbm_test.go
deleted file mode 100644
index 402e1df..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/passbm_test.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/passbm_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/passbm_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-package ssa
-
-import (
-	"fmt"
-	"testing"
-)
-
-const (
-	blockCount = 1000
-	passCount  = 15000
-)
-
-type passFunc func(*Func)
-
-func BenchmarkDSEPass(b *testing.B)           { benchFnPass(b, dse, blockCount, genFunction) }
-func BenchmarkDSEPassBlock(b *testing.B)      { benchFnBlock(b, dse, genFunction) }
-func BenchmarkCSEPass(b *testing.B)           { benchFnPass(b, cse, blockCount, genFunction) }
-func BenchmarkCSEPassBlock(b *testing.B)      { benchFnBlock(b, cse, genFunction) }
-func BenchmarkDeadcodePass(b *testing.B)      { benchFnPass(b, deadcode, blockCount, genFunction) }
-func BenchmarkDeadcodePassBlock(b *testing.B) { benchFnBlock(b, deadcode, genFunction) }
-
-func multi(f *Func) {
-	cse(f)
-	dse(f)
-	deadcode(f)
-}
-func BenchmarkMultiPass(b *testing.B)      { benchFnPass(b, multi, blockCount, genFunction) }
-func BenchmarkMultiPassBlock(b *testing.B) { benchFnBlock(b, multi, genFunction) }
-
-// benchFnPass runs passFunc b.N times across a single function.
-func benchFnPass(b *testing.B, fn passFunc, size int, bg blockGen) {
-	b.ReportAllocs()
-	c := NewConfig("amd64", DummyFrontend{b}, nil, true)
-	fun := Fun(c, "entry", bg(size)...)
-	CheckFunc(fun.f)
-	b.ResetTimer()
-	for i := 0; i < b.N; i++ {
-		fn(fun.f)
-		b.StopTimer()
-		CheckFunc(fun.f)
-		b.StartTimer()
-	}
-}
-
-// benchFnPass runs passFunc across a function with b.N blocks.
-func benchFnBlock(b *testing.B, fn passFunc, bg blockGen) {
-	b.ReportAllocs()
-	c := NewConfig("amd64", DummyFrontend{b}, nil, true)
-	fun := Fun(c, "entry", bg(b.N)...)
-	CheckFunc(fun.f)
-	b.ResetTimer()
-	for i := 0; i < passCount; i++ {
-		fn(fun.f)
-	}
-	b.StopTimer()
-}
-
-func genFunction(size int) []bloc {
-	var blocs []bloc
-	elemType := &TypeImpl{Size_: 8, Name: "testtype"}
-	ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr", Elem_: elemType} // dummy for testing
-
-	valn := func(s string, m, n int) string { return fmt.Sprintf("%s%d-%d", s, m, n) }
-	blocs = append(blocs,
-		Bloc("entry",
-			Valu(valn("store", 0, 4), OpInitMem, TypeMem, 0, nil),
-			Valu("sb", OpSB, TypeInvalid, 0, nil),
-			Goto(blockn(1)),
-		),
-	)
-	for i := 1; i < size+1; i++ {
-		blocs = append(blocs, Bloc(blockn(i),
-			Valu(valn("v", i, 0), OpConstBool, TypeBool, 1, nil),
-			Valu(valn("addr", i, 1), OpAddr, ptrType, 0, nil, "sb"),
-			Valu(valn("addr", i, 2), OpAddr, ptrType, 0, nil, "sb"),
-			Valu(valn("addr", i, 3), OpAddr, ptrType, 0, nil, "sb"),
-			Valu(valn("zero", i, 1), OpZero, TypeMem, 8, nil, valn("addr", i, 3),
-				valn("store", i-1, 4)),
-			Valu(valn("store", i, 1), OpStore, TypeMem, 0, nil, valn("addr", i, 1),
-				valn("v", i, 0), valn("zero", i, 1)),
-			Valu(valn("store", i, 2), OpStore, TypeMem, 0, nil, valn("addr", i, 2),
-				valn("v", i, 0), valn("store", i, 1)),
-			Valu(valn("store", i, 3), OpStore, TypeMem, 0, nil, valn("addr", i, 1),
-				valn("v", i, 0), valn("store", i, 2)),
-			Valu(valn("store", i, 4), OpStore, TypeMem, 0, nil, valn("addr", i, 3),
-				valn("v", i, 0), valn("store", i, 3)),
-			Goto(blockn(i+1))))
-	}
-
-	blocs = append(blocs,
-		Bloc(blockn(size+1), Goto("exit")),
-		Bloc("exit", Exit("store0-4")),
-	)
-
-	return blocs
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/phielim.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/phielim.go
deleted file mode 100644
index 647103e..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/phielim.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/phielim.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/phielim.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-// phielim eliminates redundant phi values from f.
-// A phi is redundant if its arguments are all equal. For
-// purposes of counting, ignore the phi itself. Both of
-// these phis are redundant:
-//   v = phi(x,x,x)
-//   v = phi(x,v,x,v)
-// We repeat this process to also catch situations like:
-//   v = phi(x, phi(x, x), phi(x, v))
-// TODO: Can we also simplify cases like:
-//   v = phi(v, w, x)
-//   w = phi(v, w, x)
-// and would that be useful?
-func phielim(f *Func) {
-	for {
-		change := false
-		for _, b := range f.Blocks {
-			for _, v := range b.Values {
-				copyelimValue(v)
-				change = phielimValue(v) || change
-			}
-		}
-		if !change {
-			break
-		}
-	}
-}
-
-// phielimValue tries to convert the phi v to a copy.
-func phielimValue(v *Value) bool {
-	if v.Op != OpPhi {
-		return false
-	}
-
-	// If there are two distinct args of v which
-	// are not v itself, then the phi must remain.
-	// Otherwise, we can replace it with a copy.
-	var w *Value
-	for _, x := range v.Args {
-		if x == v {
-			continue
-		}
-		if x == w {
-			continue
-		}
-		if w != nil {
-			return false
-		}
-		w = x
-	}
-
-	if w == nil {
-		// v references only itself. It must be in
-		// a dead code loop. Don't bother modifying it.
-		return false
-	}
-	v.Op = OpCopy
-	v.SetArgs1(w)
-	f := v.Block.Func
-	if f.pass.debug > 0 {
-		f.Config.Warnl(v.Line, "eliminated phi")
-	}
-	return true
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/phiopt.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/phiopt.go
deleted file mode 100644
index 4c5da78..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/phiopt.go
+++ /dev/null
@@ -1,177 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/phiopt.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/phiopt.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-// phiopt eliminates boolean Phis based on the previous if.
-//
-// Main use case is to transform:
-//   x := false
-//   if b {
-//     x = true
-//   }
-// into x = b.
-//
-// In SSA code this appears as
-//
-// b0
-//   If b -> b1 b2
-// b1
-//   Plain -> b2
-// b2
-//   x = (OpPhi (ConstBool [true]) (ConstBool [false]))
-//
-// In this case we can replace x with a copy of b.
-func phiopt(f *Func) {
-	sdom := f.sdom()
-	for _, b := range f.Blocks {
-		if len(b.Preds) != 2 || len(b.Values) == 0 {
-			// TODO: handle more than 2 predecessors, e.g. a || b || c.
-			continue
-		}
-
-		pb0, b0 := b, b.Preds[0].b
-		for len(b0.Succs) == 1 && len(b0.Preds) == 1 {
-			pb0, b0 = b0, b0.Preds[0].b
-		}
-		if b0.Kind != BlockIf {
-			continue
-		}
-		pb1, b1 := b, b.Preds[1].b
-		for len(b1.Succs) == 1 && len(b1.Preds) == 1 {
-			pb1, b1 = b1, b1.Preds[0].b
-		}
-		if b1 != b0 {
-			continue
-		}
-		// b0 is the if block giving the boolean value.
-
-		// reverse is the predecessor from which the truth value comes.
-		var reverse int
-		if b0.Succs[0].b == pb0 && b0.Succs[1].b == pb1 {
-			reverse = 0
-		} else if b0.Succs[0].b == pb1 && b0.Succs[1].b == pb0 {
-			reverse = 1
-		} else {
-			b.Fatalf("invalid predecessors\n")
-		}
-
-		for _, v := range b.Values {
-			if v.Op != OpPhi {
-				continue
-			}
-
-			// Look for conversions from bool to 0/1.
-			if v.Type.IsInteger() {
-				phioptint(v, b0, reverse)
-			}
-
-			if !v.Type.IsBoolean() {
-				continue
-			}
-
-			// Replaces
-			//   if a { x = true } else { x = false } with x = a
-			// and
-			//   if a { x = false } else { x = true } with x = !a
-			if v.Args[0].Op == OpConstBool && v.Args[1].Op == OpConstBool {
-				if v.Args[reverse].AuxInt != v.Args[1-reverse].AuxInt {
-					ops := [2]Op{OpNot, OpCopy}
-					v.reset(ops[v.Args[reverse].AuxInt])
-					v.AddArg(b0.Control)
-					if f.pass.debug > 0 {
-						f.Config.Warnl(b.Line, "converted OpPhi to %v", v.Op)
-					}
-					continue
-				}
-			}
-
-			// Replaces
-			//   if a { x = true } else { x = value } with x = a || value.
-			// Requires that value dominates x, meaning that regardless of a,
-			// value is always computed. This guarantees that the side effects
-			// of value are not seen if a is false.
-			if v.Args[reverse].Op == OpConstBool && v.Args[reverse].AuxInt == 1 {
-				if tmp := v.Args[1-reverse]; sdom.isAncestorEq(tmp.Block, b) {
-					v.reset(OpOrB)
-					v.SetArgs2(b0.Control, tmp)
-					if f.pass.debug > 0 {
-						f.Config.Warnl(b.Line, "converted OpPhi to %v", v.Op)
-					}
-					continue
-				}
-			}
-
-			// Replaces
-			//   if a { x = value } else { x = false } with x = a && value.
-			// Requires that value dominates x, meaning that regardless of a,
-			// value is always computed. This guarantees that the side effects
-			// of value are not seen if a is false.
-			if v.Args[1-reverse].Op == OpConstBool && v.Args[1-reverse].AuxInt == 0 {
-				if tmp := v.Args[reverse]; sdom.isAncestorEq(tmp.Block, b) {
-					v.reset(OpAndB)
-					v.SetArgs2(b0.Control, tmp)
-					if f.pass.debug > 0 {
-						f.Config.Warnl(b.Line, "converted OpPhi to %v", v.Op)
-					}
-					continue
-				}
-			}
-		}
-	}
-}
-
-func phioptint(v *Value, b0 *Block, reverse int) {
-	a0 := v.Args[0]
-	a1 := v.Args[1]
-	if a0.Op != a1.Op {
-		return
-	}
-
-	switch a0.Op {
-	case OpConst8, OpConst16, OpConst32, OpConst64:
-	default:
-		return
-	}
-
-	negate := false
-	switch {
-	case a0.AuxInt == 0 && a1.AuxInt == 1:
-		negate = true
-	case a0.AuxInt == 1 && a1.AuxInt == 0:
-	default:
-		return
-	}
-
-	if reverse == 1 {
-		negate = !negate
-	}
-
-	switch v.Type.Size() {
-	case 1:
-		v.reset(OpCopy)
-	case 2:
-		v.reset(OpZeroExt8to16)
-	case 4:
-		v.reset(OpZeroExt8to32)
-	case 8:
-		v.reset(OpZeroExt8to64)
-	default:
-		v.Fatalf("bad int size %d", v.Type.Size())
-	}
-
-	a := b0.Control
-	if negate {
-		a = v.Block.NewValue1(v.Line, OpNot, a.Type, a)
-	}
-	v.AddArg(a)
-
-	f := b0.Func
-	if f.pass.debug > 0 {
-		f.Config.Warnl(v.Block.Line, "converted OpPhi bool -> int%d", v.Type.Size()*8)
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/print.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/print.go
deleted file mode 100644
index 74378b3..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/print.go
+++ /dev/null
@@ -1,153 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/print.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/print.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-)
-
-func printFunc(f *Func) {
-	f.Logf("%s", f)
-}
-
-func (f *Func) String() string {
-	var buf bytes.Buffer
-	p := stringFuncPrinter{w: &buf}
-	fprintFunc(p, f)
-	return buf.String()
-}
-
-type funcPrinter interface {
-	header(f *Func)
-	startBlock(b *Block, reachable bool)
-	endBlock(b *Block)
-	value(v *Value, live bool)
-	startDepCycle()
-	endDepCycle()
-	named(n LocalSlot, vals []*Value)
-}
-
-type stringFuncPrinter struct {
-	w io.Writer
-}
-
-func (p stringFuncPrinter) header(f *Func) {
-	fmt.Fprint(p.w, f.Name)
-	fmt.Fprint(p.w, " ")
-	fmt.Fprintln(p.w, f.Type)
-}
-
-func (p stringFuncPrinter) startBlock(b *Block, reachable bool) {
-	fmt.Fprintf(p.w, "  b%d:", b.ID)
-	if len(b.Preds) > 0 {
-		io.WriteString(p.w, " <-")
-		for _, e := range b.Preds {
-			pred := e.b
-			fmt.Fprintf(p.w, " b%d", pred.ID)
-		}
-	}
-	if !reachable {
-		fmt.Fprint(p.w, " DEAD")
-	}
-	io.WriteString(p.w, "\n")
-}
-
-func (p stringFuncPrinter) endBlock(b *Block) {
-	fmt.Fprintln(p.w, "    "+b.LongString())
-}
-
-func (p stringFuncPrinter) value(v *Value, live bool) {
-	fmt.Fprint(p.w, "    ")
-	//fmt.Fprint(p.w, v.Block.Func.Config.fe.Line(v.Line))
-	//fmt.Fprint(p.w, ": ")
-	fmt.Fprint(p.w, v.LongString())
-	if !live {
-		fmt.Fprint(p.w, " DEAD")
-	}
-	fmt.Fprintln(p.w)
-}
-
-func (p stringFuncPrinter) startDepCycle() {
-	fmt.Fprintln(p.w, "dependency cycle!")
-}
-
-func (p stringFuncPrinter) endDepCycle() {}
-
-func (p stringFuncPrinter) named(n LocalSlot, vals []*Value) {
-	fmt.Fprintf(p.w, "name %s: %v\n", n.Name(), vals)
-}
-
-func fprintFunc(p funcPrinter, f *Func) {
-	reachable, live := findlive(f)
-	p.header(f)
-	printed := make([]bool, f.NumValues())
-	for _, b := range f.Blocks {
-		p.startBlock(b, reachable[b.ID])
-
-		if f.scheduled {
-			// Order of Values has been decided - print in that order.
-			for _, v := range b.Values {
-				p.value(v, live[v.ID])
-				printed[v.ID] = true
-			}
-			p.endBlock(b)
-			continue
-		}
-
-		// print phis first since all value cycles contain a phi
-		n := 0
-		for _, v := range b.Values {
-			if v.Op != OpPhi {
-				continue
-			}
-			p.value(v, live[v.ID])
-			printed[v.ID] = true
-			n++
-		}
-
-		// print rest of values in dependency order
-		for n < len(b.Values) {
-			m := n
-		outer:
-			for _, v := range b.Values {
-				if printed[v.ID] {
-					continue
-				}
-				for _, w := range v.Args {
-					// w == nil shouldn't happen, but if it does,
-					// don't panic; we'll get a better diagnosis later.
-					if w != nil && w.Block == b && !printed[w.ID] {
-						continue outer
-					}
-				}
-				p.value(v, live[v.ID])
-				printed[v.ID] = true
-				n++
-			}
-			if m == n {
-				p.startDepCycle()
-				for _, v := range b.Values {
-					if printed[v.ID] {
-						continue
-					}
-					p.value(v, live[v.ID])
-					printed[v.ID] = true
-					n++
-				}
-				p.endDepCycle()
-			}
-		}
-
-		p.endBlock(b)
-	}
-	for _, name := range f.Names {
-		p.named(name, f.NamedValues[name])
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/prove.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/prove.go
deleted file mode 100644
index b5e8bdc..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/prove.go
+++ /dev/null
@@ -1,720 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/prove.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/prove.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import (
-	"fmt"
-	"math"
-)
-
-type branch int
-
-const (
-	unknown = iota
-	positive
-	negative
-)
-
-// relation represents the set of possible relations between
-// pairs of variables (v, w). Without a priori knowledge the
-// mask is lt | eq | gt meaning v can be less than, equal to or
-// greater than w. When the execution path branches on the condition
-// `v op w` the set of relations is updated to exclude any
-// relation not possible due to `v op w` being true (or false).
-//
-// E.g.
-//
-// r := relation(...)
-//
-// if v < w {
-//   newR := r & lt
-// }
-// if v >= w {
-//   newR := r & (eq|gt)
-// }
-// if v != w {
-//   newR := r & (lt|gt)
-// }
-type relation uint
-
-const (
-	lt relation = 1 << iota
-	eq
-	gt
-)
-
-// domain represents the domain of a variable pair in which a set
-// of relations is known.  For example, relations learned for unsigned
-// pairs cannot be transferred to signed pairs because the same bit
-// representation can mean something else.
-type domain uint
-
-const (
-	signed domain = 1 << iota
-	unsigned
-	pointer
-	boolean
-)
-
-type pair struct {
-	v, w *Value // a pair of values, ordered by ID.
-	// v can be nil, to mean the zero value.
-	// for booleans the zero value (v == nil) is false.
-	d domain
-}
-
-// fact is a pair plus a relation for that pair.
-type fact struct {
-	p pair
-	r relation
-}
-
-// a limit records known upper and lower bounds for a value.
-type limit struct {
-	min, max   int64  // min <= value <= max, signed
-	umin, umax uint64 // umin <= value <= umax, unsigned
-}
-
-func (l limit) String() string {
-	return fmt.Sprintf("sm,SM,um,UM=%d,%d,%d,%d", l.min, l.max, l.umin, l.umax)
-}
-
-var noLimit = limit{math.MinInt64, math.MaxInt64, 0, math.MaxUint64}
-
-// a limitFact is a limit known for a particular value.
-type limitFact struct {
-	vid   ID
-	limit limit
-}
-
-// factsTable keeps track of relations between pairs of values.
-type factsTable struct {
-	facts map[pair]relation // current known set of relation
-	stack []fact            // previous sets of relations
-
-	// known lower and upper bounds on individual values.
-	limits     map[ID]limit
-	limitStack []limitFact // previous entries
-}
-
-// checkpointFact is an invalid value used for checkpointing
-// and restoring factsTable.
-var checkpointFact = fact{}
-var checkpointBound = limitFact{}
-
-func newFactsTable() *factsTable {
-	ft := &factsTable{}
-	ft.facts = make(map[pair]relation)
-	ft.stack = make([]fact, 4)
-	ft.limits = make(map[ID]limit)
-	ft.limitStack = make([]limitFact, 4)
-	return ft
-}
-
-// get returns the known possible relations between v and w.
-// If v and w are not in the map it returns lt|eq|gt, i.e. any order.
-func (ft *factsTable) get(v, w *Value, d domain) relation {
-	if v.isGenericIntConst() || w.isGenericIntConst() {
-		reversed := false
-		if v.isGenericIntConst() {
-			v, w = w, v
-			reversed = true
-		}
-		r := lt | eq | gt
-		lim, ok := ft.limits[v.ID]
-		if !ok {
-			return r
-		}
-		c := w.AuxInt
-		switch d {
-		case signed:
-			switch {
-			case c < lim.min:
-				r = gt
-			case c > lim.max:
-				r = lt
-			case c == lim.min && c == lim.max:
-				r = eq
-			case c == lim.min:
-				r = gt | eq
-			case c == lim.max:
-				r = lt | eq
-			}
-		case unsigned:
-			// TODO: also use signed data if lim.min >= 0?
-			var uc uint64
-			switch w.Op {
-			case OpConst64:
-				uc = uint64(c)
-			case OpConst32:
-				uc = uint64(uint32(c))
-			case OpConst16:
-				uc = uint64(uint16(c))
-			case OpConst8:
-				uc = uint64(uint8(c))
-			}
-			switch {
-			case uc < lim.umin:
-				r = gt
-			case uc > lim.umax:
-				r = lt
-			case uc == lim.umin && uc == lim.umax:
-				r = eq
-			case uc == lim.umin:
-				r = gt | eq
-			case uc == lim.umax:
-				r = lt | eq
-			}
-		}
-		if reversed {
-			return reverseBits[r]
-		}
-		return r
-	}
-
-	reversed := false
-	if lessByID(w, v) {
-		v, w = w, v
-		reversed = !reversed
-	}
-
-	p := pair{v, w, d}
-	r, ok := ft.facts[p]
-	if !ok {
-		if p.v == p.w {
-			r = eq
-		} else {
-			r = lt | eq | gt
-		}
-	}
-
-	if reversed {
-		return reverseBits[r]
-	}
-	return r
-}
-
-// update updates the set of relations between v and w in domain d
-// restricting it to r.
-func (ft *factsTable) update(parent *Block, v, w *Value, d domain, r relation) {
-	if lessByID(w, v) {
-		v, w = w, v
-		r = reverseBits[r]
-	}
-
-	p := pair{v, w, d}
-	oldR := ft.get(v, w, d)
-	ft.stack = append(ft.stack, fact{p, oldR})
-	ft.facts[p] = oldR & r
-
-	// Extract bounds when comparing against constants
-	if v.isGenericIntConst() {
-		v, w = w, v
-		r = reverseBits[r]
-	}
-	if v != nil && w.isGenericIntConst() {
-		c := w.AuxInt
-		// Note: all the +1/-1 below could overflow/underflow. Either will
-		// still generate correct results, it will just lead to imprecision.
-		// In fact if there is overflow/underflow, the corresponding
-		// code is unreachable because the known range is outside the range
-		// of the value's type.
-		old, ok := ft.limits[v.ID]
-		if !ok {
-			old = noLimit
-		}
-		lim := old
-		// Update lim with the new information we know.
-		switch d {
-		case signed:
-			switch r {
-			case lt:
-				if c-1 < lim.max {
-					lim.max = c - 1
-				}
-			case lt | eq:
-				if c < lim.max {
-					lim.max = c
-				}
-			case gt | eq:
-				if c > lim.min {
-					lim.min = c
-				}
-			case gt:
-				if c+1 > lim.min {
-					lim.min = c + 1
-				}
-			case lt | gt:
-				if c == lim.min {
-					lim.min++
-				}
-				if c == lim.max {
-					lim.max--
-				}
-			case eq:
-				lim.min = c
-				lim.max = c
-			}
-		case unsigned:
-			var uc uint64
-			switch w.Op {
-			case OpConst64:
-				uc = uint64(c)
-			case OpConst32:
-				uc = uint64(uint32(c))
-			case OpConst16:
-				uc = uint64(uint16(c))
-			case OpConst8:
-				uc = uint64(uint8(c))
-			}
-			switch r {
-			case lt:
-				if uc-1 < lim.umax {
-					lim.umax = uc - 1
-				}
-			case lt | eq:
-				if uc < lim.umax {
-					lim.umax = uc
-				}
-			case gt | eq:
-				if uc > lim.umin {
-					lim.umin = uc
-				}
-			case gt:
-				if uc+1 > lim.umin {
-					lim.umin = uc + 1
-				}
-			case lt | gt:
-				if uc == lim.umin {
-					lim.umin++
-				}
-				if uc == lim.umax {
-					lim.umax--
-				}
-			case eq:
-				lim.umin = uc
-				lim.umax = uc
-			}
-		}
-		ft.limitStack = append(ft.limitStack, limitFact{v.ID, old})
-		ft.limits[v.ID] = lim
-		if v.Block.Func.pass.debug > 2 {
-			v.Block.Func.Config.Warnl(parent.Line, "parent=%s, new limits %s %s %s", parent, v, w, lim.String())
-		}
-	}
-}
-
-// isNonNegative returns true if v is known to be non-negative.
-func (ft *factsTable) isNonNegative(v *Value) bool {
-	if isNonNegative(v) {
-		return true
-	}
-	l, has := ft.limits[v.ID]
-	return has && (l.min >= 0 || l.umax <= math.MaxInt64)
-}
-
-// checkpoint saves the current state of known relations.
-// Called when descending on a branch.
-func (ft *factsTable) checkpoint() {
-	ft.stack = append(ft.stack, checkpointFact)
-	ft.limitStack = append(ft.limitStack, checkpointBound)
-}
-
-// restore restores known relation to the state just
-// before the previous checkpoint.
-// Called when backing up on a branch.
-func (ft *factsTable) restore() {
-	for {
-		old := ft.stack[len(ft.stack)-1]
-		ft.stack = ft.stack[:len(ft.stack)-1]
-		if old == checkpointFact {
-			break
-		}
-		if old.r == lt|eq|gt {
-			delete(ft.facts, old.p)
-		} else {
-			ft.facts[old.p] = old.r
-		}
-	}
-	for {
-		old := ft.limitStack[len(ft.limitStack)-1]
-		ft.limitStack = ft.limitStack[:len(ft.limitStack)-1]
-		if old.vid == 0 { // checkpointBound
-			break
-		}
-		if old.limit == noLimit {
-			delete(ft.limits, old.vid)
-		} else {
-			ft.limits[old.vid] = old.limit
-		}
-	}
-}
-
-func lessByID(v, w *Value) bool {
-	if v == nil && w == nil {
-		// Should not happen, but just in case.
-		return false
-	}
-	if v == nil {
-		return true
-	}
-	return w != nil && v.ID < w.ID
-}
-
-var (
-	reverseBits = [...]relation{0, 4, 2, 6, 1, 5, 3, 7}
-
-	// maps what we learn when the positive branch is taken.
-	// For example:
-	//      OpLess8:   {signed, lt},
-	//	v1 = (OpLess8 v2 v3).
-	// If v1 branch is taken than we learn that the rangeMaks
-	// can be at most lt.
-	domainRelationTable = map[Op]struct {
-		d domain
-		r relation
-	}{
-		OpEq8:   {signed | unsigned, eq},
-		OpEq16:  {signed | unsigned, eq},
-		OpEq32:  {signed | unsigned, eq},
-		OpEq64:  {signed | unsigned, eq},
-		OpEqPtr: {pointer, eq},
-
-		OpNeq8:   {signed | unsigned, lt | gt},
-		OpNeq16:  {signed | unsigned, lt | gt},
-		OpNeq32:  {signed | unsigned, lt | gt},
-		OpNeq64:  {signed | unsigned, lt | gt},
-		OpNeqPtr: {pointer, lt | gt},
-
-		OpLess8:   {signed, lt},
-		OpLess8U:  {unsigned, lt},
-		OpLess16:  {signed, lt},
-		OpLess16U: {unsigned, lt},
-		OpLess32:  {signed, lt},
-		OpLess32U: {unsigned, lt},
-		OpLess64:  {signed, lt},
-		OpLess64U: {unsigned, lt},
-
-		OpLeq8:   {signed, lt | eq},
-		OpLeq8U:  {unsigned, lt | eq},
-		OpLeq16:  {signed, lt | eq},
-		OpLeq16U: {unsigned, lt | eq},
-		OpLeq32:  {signed, lt | eq},
-		OpLeq32U: {unsigned, lt | eq},
-		OpLeq64:  {signed, lt | eq},
-		OpLeq64U: {unsigned, lt | eq},
-
-		OpGeq8:   {signed, eq | gt},
-		OpGeq8U:  {unsigned, eq | gt},
-		OpGeq16:  {signed, eq | gt},
-		OpGeq16U: {unsigned, eq | gt},
-		OpGeq32:  {signed, eq | gt},
-		OpGeq32U: {unsigned, eq | gt},
-		OpGeq64:  {signed, eq | gt},
-		OpGeq64U: {unsigned, eq | gt},
-
-		OpGreater8:   {signed, gt},
-		OpGreater8U:  {unsigned, gt},
-		OpGreater16:  {signed, gt},
-		OpGreater16U: {unsigned, gt},
-		OpGreater32:  {signed, gt},
-		OpGreater32U: {unsigned, gt},
-		OpGreater64:  {signed, gt},
-		OpGreater64U: {unsigned, gt},
-
-		// TODO: OpIsInBounds actually test 0 <= a < b. This means
-		// that the positive branch learns signed/LT and unsigned/LT
-		// but the negative branch only learns unsigned/GE.
-		OpIsInBounds:      {unsigned, lt},
-		OpIsSliceInBounds: {unsigned, lt | eq},
-	}
-)
-
-// prove removes redundant BlockIf controls that can be inferred in a straight line.
-//
-// By far, the most common redundant pair are generated by bounds checking.
-// For example for the code:
-//
-//    a[i] = 4
-//    foo(a[i])
-//
-// The compiler will generate the following code:
-//
-//    if i >= len(a) {
-//        panic("not in bounds")
-//    }
-//    a[i] = 4
-//    if i >= len(a) {
-//        panic("not in bounds")
-//    }
-//    foo(a[i])
-//
-// The second comparison i >= len(a) is clearly redundant because if the
-// else branch of the first comparison is executed, we already know that i < len(a).
-// The code for the second panic can be removed.
-func prove(f *Func) {
-	// current node state
-	type walkState int
-	const (
-		descend walkState = iota
-		simplify
-	)
-	// work maintains the DFS stack.
-	type bp struct {
-		block *Block    // current handled block
-		state walkState // what's to do
-	}
-	work := make([]bp, 0, 256)
-	work = append(work, bp{
-		block: f.Entry,
-		state: descend,
-	})
-
-	ft := newFactsTable()
-	idom := f.Idom()
-	sdom := f.sdom()
-
-	// DFS on the dominator tree.
-	for len(work) > 0 {
-		node := work[len(work)-1]
-		work = work[:len(work)-1]
-		parent := idom[node.block.ID]
-		branch := getBranch(sdom, parent, node.block)
-
-		switch node.state {
-		case descend:
-			if branch != unknown {
-				ft.checkpoint()
-				c := parent.Control
-				updateRestrictions(parent, ft, boolean, nil, c, lt|gt, branch)
-				if tr, has := domainRelationTable[parent.Control.Op]; has {
-					// When we branched from parent we learned a new set of
-					// restrictions. Update the factsTable accordingly.
-					updateRestrictions(parent, ft, tr.d, c.Args[0], c.Args[1], tr.r, branch)
-				}
-			}
-
-			work = append(work, bp{
-				block: node.block,
-				state: simplify,
-			})
-			for s := sdom.Child(node.block); s != nil; s = sdom.Sibling(s) {
-				work = append(work, bp{
-					block: s,
-					state: descend,
-				})
-			}
-
-		case simplify:
-			succ := simplifyBlock(ft, node.block)
-			if succ != unknown {
-				b := node.block
-				b.Kind = BlockFirst
-				b.SetControl(nil)
-				if succ == negative {
-					b.swapSuccessors()
-				}
-			}
-
-			if branch != unknown {
-				ft.restore()
-			}
-		}
-	}
-}
-
-// getBranch returns the range restrictions added by p
-// when reaching b. p is the immediate dominator of b.
-func getBranch(sdom SparseTree, p *Block, b *Block) branch {
-	if p == nil || p.Kind != BlockIf {
-		return unknown
-	}
-	// If p and p.Succs[0] are dominators it means that every path
-	// from entry to b passes through p and p.Succs[0]. We care that
-	// no path from entry to b passes through p.Succs[1]. If p.Succs[0]
-	// has one predecessor then (apart from the degenerate case),
-	// there is no path from entry that can reach b through p.Succs[1].
-	// TODO: how about p->yes->b->yes, i.e. a loop in yes.
-	if sdom.isAncestorEq(p.Succs[0].b, b) && len(p.Succs[0].b.Preds) == 1 {
-		return positive
-	}
-	if sdom.isAncestorEq(p.Succs[1].b, b) && len(p.Succs[1].b.Preds) == 1 {
-		return negative
-	}
-	return unknown
-}
-
-// updateRestrictions updates restrictions from the immediate
-// dominating block (p) using r. r is adjusted according to the branch taken.
-func updateRestrictions(parent *Block, ft *factsTable, t domain, v, w *Value, r relation, branch branch) {
-	if t == 0 || branch == unknown {
-		// Trivial case: nothing to do, or branch unknown.
-		// Shoult not happen, but just in case.
-		return
-	}
-	if branch == negative {
-		// Negative branch taken, complement the relations.
-		r = (lt | eq | gt) ^ r
-	}
-	for i := domain(1); i <= t; i <<= 1 {
-		if t&i != 0 {
-			ft.update(parent, v, w, i, r)
-		}
-	}
-}
-
-// simplifyBlock simplifies block known the restrictions in ft.
-// Returns which branch must always be taken.
-func simplifyBlock(ft *factsTable, b *Block) branch {
-	for _, v := range b.Values {
-		if v.Op != OpSlicemask {
-			continue
-		}
-		add := v.Args[0]
-		if add.Op != OpAdd64 && add.Op != OpAdd32 {
-			continue
-		}
-		// Note that the arg of slicemask was originally a sub, but
-		// was rewritten to an add by generic.rules (if the thing
-		// being subtracted was a constant).
-		x := add.Args[0]
-		y := add.Args[1]
-		if x.Op == OpConst64 || x.Op == OpConst32 {
-			x, y = y, x
-		}
-		if y.Op != OpConst64 && y.Op != OpConst32 {
-			continue
-		}
-		// slicemask(x + y)
-		// if x is larger than -y (y is negative), then slicemask is -1.
-		lim, ok := ft.limits[x.ID]
-		if !ok {
-			continue
-		}
-		if lim.umin > uint64(-y.AuxInt) {
-			if v.Args[0].Op == OpAdd64 {
-				v.reset(OpConst64)
-			} else {
-				v.reset(OpConst32)
-			}
-			if b.Func.pass.debug > 0 {
-				b.Func.Config.Warnl(v.Line, "Proved slicemask not needed")
-			}
-			v.AuxInt = -1
-		}
-	}
-
-	if b.Kind != BlockIf {
-		return unknown
-	}
-
-	// First, checks if the condition itself is redundant.
-	m := ft.get(nil, b.Control, boolean)
-	if m == lt|gt {
-		if b.Func.pass.debug > 0 {
-			if b.Func.pass.debug > 1 {
-				b.Func.Config.Warnl(b.Line, "Proved boolean %s (%s)", b.Control.Op, b.Control)
-			} else {
-				b.Func.Config.Warnl(b.Line, "Proved boolean %s", b.Control.Op)
-			}
-		}
-		return positive
-	}
-	if m == eq {
-		if b.Func.pass.debug > 0 {
-			if b.Func.pass.debug > 1 {
-				b.Func.Config.Warnl(b.Line, "Disproved boolean %s (%s)", b.Control.Op, b.Control)
-			} else {
-				b.Func.Config.Warnl(b.Line, "Disproved boolean %s", b.Control.Op)
-			}
-		}
-		return negative
-	}
-
-	// Next look check equalities.
-	c := b.Control
-	tr, has := domainRelationTable[c.Op]
-	if !has {
-		return unknown
-	}
-
-	a0, a1 := c.Args[0], c.Args[1]
-	for d := domain(1); d <= tr.d; d <<= 1 {
-		if d&tr.d == 0 {
-			continue
-		}
-
-		// tr.r represents in which case the positive branch is taken.
-		// m represents which cases are possible because of previous relations.
-		// If the set of possible relations m is included in the set of relations
-		// need to take the positive branch (or negative) then that branch will
-		// always be taken.
-		// For shortcut, if m == 0 then this block is dead code.
-		m := ft.get(a0, a1, d)
-		if m != 0 && tr.r&m == m {
-			if b.Func.pass.debug > 0 {
-				if b.Func.pass.debug > 1 {
-					b.Func.Config.Warnl(b.Line, "Proved %s (%s)", c.Op, c)
-				} else {
-					b.Func.Config.Warnl(b.Line, "Proved %s", c.Op)
-				}
-			}
-			return positive
-		}
-		if m != 0 && ((lt|eq|gt)^tr.r)&m == m {
-			if b.Func.pass.debug > 0 {
-				if b.Func.pass.debug > 1 {
-					b.Func.Config.Warnl(b.Line, "Disproved %s (%s)", c.Op, c)
-				} else {
-					b.Func.Config.Warnl(b.Line, "Disproved %s", c.Op)
-				}
-			}
-			return negative
-		}
-	}
-
-	// HACK: If the first argument of IsInBounds or IsSliceInBounds
-	// is a constant and we already know that constant is smaller (or equal)
-	// to the upper bound than this is proven. Most useful in cases such as:
-	// if len(a) <= 1 { return }
-	// do something with a[1]
-	if (c.Op == OpIsInBounds || c.Op == OpIsSliceInBounds) && ft.isNonNegative(c.Args[0]) {
-		m := ft.get(a0, a1, signed)
-		if m != 0 && tr.r&m == m {
-			if b.Func.pass.debug > 0 {
-				if b.Func.pass.debug > 1 {
-					b.Func.Config.Warnl(b.Line, "Proved non-negative bounds %s (%s)", c.Op, c)
-				} else {
-					b.Func.Config.Warnl(b.Line, "Proved non-negative bounds %s", c.Op)
-				}
-			}
-			return positive
-		}
-	}
-
-	return unknown
-}
-
-// isNonNegative returns true is v is known to be greater or equal to zero.
-func isNonNegative(v *Value) bool {
-	switch v.Op {
-	case OpConst64:
-		return v.AuxInt >= 0
-
-	case OpConst32:
-		return int32(v.AuxInt) >= 0
-
-	case OpStringLen, OpSliceLen, OpSliceCap,
-		OpZeroExt8to64, OpZeroExt16to64, OpZeroExt32to64:
-		return true
-
-	case OpRsh64x64:
-		return isNonNegative(v.Args[0])
-	}
-	return false
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/redblack32.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/redblack32.go
deleted file mode 100644
index c9dbcbc..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/redblack32.go
+++ /dev/null
@@ -1,432 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/redblack32.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/redblack32.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import "fmt"
-
-const (
-	rankLeaf rbrank = 1
-	rankZero rbrank = 0
-)
-
-type rbrank int8
-
-// RBTint32 is a red-black tree with data stored at internal nodes,
-// following Tarjan, Data Structures and Network Algorithms,
-// pp 48-52, using explicit rank instead of red and black.
-// Deletion is not yet implemented because it is not yet needed.
-// Extra operations glb, lub, glbEq, lubEq are provided for
-// use in sparse lookup algorithms.
-type RBTint32 struct {
-	root *node32
-	// An extra-clever implementation will have special cases
-	// for small sets, but we are not extra-clever today.
-}
-
-func (t *RBTint32) String() string {
-	if t.root == nil {
-		return "[]"
-	}
-	return "[" + t.root.String() + "]"
-}
-
-func (t *node32) String() string {
-	s := ""
-	if t.left != nil {
-		s = t.left.String() + " "
-	}
-	s = s + fmt.Sprintf("k=%d,d=%v", t.key, t.data)
-	if t.right != nil {
-		s = s + " " + t.right.String()
-	}
-	return s
-}
-
-type node32 struct {
-	// Standard conventions hold for left = smaller, right = larger
-	left, right, parent *node32
-	data                interface{}
-	key                 int32
-	rank                rbrank // From Tarjan pp 48-49:
-	// If x is a node with a parent, then x.rank <= x.parent.rank <= x.rank+1.
-	// If x is a node with a grandparent, then x.rank < x.parent.parent.rank.
-	// If x is an "external [null] node", then x.rank = 0 && x.parent.rank = 1.
-	// Any node with one or more null children should have rank = 1.
-}
-
-// makeNode returns a new leaf node with the given key and nil data.
-func (t *RBTint32) makeNode(key int32) *node32 {
-	return &node32{key: key, rank: rankLeaf}
-}
-
-// IsEmpty reports whether t is empty.
-func (t *RBTint32) IsEmpty() bool {
-	return t.root == nil
-}
-
-// IsSingle reports whether t is a singleton (leaf).
-func (t *RBTint32) IsSingle() bool {
-	return t.root != nil && t.root.isLeaf()
-}
-
-// VisitInOrder applies f to the key and data pairs in t,
-// with keys ordered from smallest to largest.
-func (t *RBTint32) VisitInOrder(f func(int32, interface{})) {
-	if t.root == nil {
-		return
-	}
-	t.root.visitInOrder(f)
-}
-
-func (n *node32) Data() interface{} {
-	if n == nil {
-		return nil
-	}
-	return n.data
-}
-
-func (n *node32) keyAndData() (k int32, d interface{}) {
-	if n == nil {
-		k = 0
-		d = nil
-	} else {
-		k = n.key
-		d = n.data
-	}
-	return
-}
-
-func (n *node32) Rank() rbrank {
-	if n == nil {
-		return 0
-	}
-	return n.rank
-}
-
-// Find returns the data associated with key in the tree, or
-// nil if key is not in the tree.
-func (t *RBTint32) Find(key int32) interface{} {
-	return t.root.find(key).Data()
-}
-
-// Insert adds key to the tree and associates key with data.
-// If key was already in the tree, it updates the associated data.
-// Insert returns the previous data associated with key,
-// or nil if key was not present.
-// Insert panics if data is nil.
-func (t *RBTint32) Insert(key int32, data interface{}) interface{} {
-	if data == nil {
-		panic("Cannot insert nil data into tree")
-	}
-	n := t.root
-	var newroot *node32
-	if n == nil {
-		n = t.makeNode(key)
-		newroot = n
-	} else {
-		newroot, n = n.insert(key, t)
-	}
-	r := n.data
-	n.data = data
-	t.root = newroot
-	return r
-}
-
-// Min returns the minimum element of t and its associated data.
-// If t is empty, then (0, nil) is returned.
-func (t *RBTint32) Min() (k int32, d interface{}) {
-	return t.root.min().keyAndData()
-}
-
-// Max returns the maximum element of t and its associated data.
-// If t is empty, then (0, nil) is returned.
-func (t *RBTint32) Max() (k int32, d interface{}) {
-	return t.root.max().keyAndData()
-}
-
-// Glb returns the greatest-lower-bound-exclusive of x and its associated
-// data.  If x has no glb in the tree, then (0, nil) is returned.
-func (t *RBTint32) Glb(x int32) (k int32, d interface{}) {
-	return t.root.glb(x, false).keyAndData()
-}
-
-// GlbEq returns the greatest-lower-bound-inclusive of x and its associated
-// data.  If x has no glbEQ in the tree, then (0, nil) is returned.
-func (t *RBTint32) GlbEq(x int32) (k int32, d interface{}) {
-	return t.root.glb(x, true).keyAndData()
-}
-
-// Lub returns the least-upper-bound-exclusive of x and its associated
-// data.  If x has no lub in the tree, then (0, nil) is returned.
-func (t *RBTint32) Lub(x int32) (k int32, d interface{}) {
-	return t.root.lub(x, false).keyAndData()
-}
-
-// LubEq returns the least-upper-bound-inclusive of x and its associated
-// data.  If x has no lubEq in the tree, then (0, nil) is returned.
-func (t *RBTint32) LubEq(x int32) (k int32, d interface{}) {
-	return t.root.lub(x, true).keyAndData()
-}
-
-func (t *node32) isLeaf() bool {
-	return t.left == nil && t.right == nil
-}
-
-func (t *node32) visitInOrder(f func(int32, interface{})) {
-	if t.left != nil {
-		t.left.visitInOrder(f)
-	}
-	f(t.key, t.data)
-	if t.right != nil {
-		t.right.visitInOrder(f)
-	}
-}
-
-func (t *node32) maxChildRank() rbrank {
-	if t.left == nil {
-		if t.right == nil {
-			return rankZero
-		}
-		return t.right.rank
-	}
-	if t.right == nil {
-		return t.left.rank
-	}
-	if t.right.rank > t.left.rank {
-		return t.right.rank
-	}
-	return t.left.rank
-}
-
-func (t *node32) minChildRank() rbrank {
-	if t.left == nil || t.right == nil {
-		return rankZero
-	}
-	if t.right.rank < t.left.rank {
-		return t.right.rank
-	}
-	return t.left.rank
-}
-
-func (t *node32) find(key int32) *node32 {
-	for t != nil {
-		if key < t.key {
-			t = t.left
-		} else if key > t.key {
-			t = t.right
-		} else {
-			return t
-		}
-	}
-	return nil
-}
-
-func (t *node32) min() *node32 {
-	if t == nil {
-		return t
-	}
-	for t.left != nil {
-		t = t.left
-	}
-	return t
-}
-
-func (t *node32) max() *node32 {
-	if t == nil {
-		return t
-	}
-	for t.right != nil {
-		t = t.right
-	}
-	return t
-}
-
-func (t *node32) glb(key int32, allow_eq bool) *node32 {
-	var best *node32 = nil
-	for t != nil {
-		if key <= t.key {
-			if key == t.key && allow_eq {
-				return t
-			}
-			// t is too big, glb is to left.
-			t = t.left
-		} else {
-			// t is a lower bound, record it and seek a better one.
-			best = t
-			t = t.right
-		}
-	}
-	return best
-}
-
-func (t *node32) lub(key int32, allow_eq bool) *node32 {
-	var best *node32 = nil
-	for t != nil {
-		if key >= t.key {
-			if key == t.key && allow_eq {
-				return t
-			}
-			// t is too small, lub is to right.
-			t = t.right
-		} else {
-			// t is a upper bound, record it and seek a better one.
-			best = t
-			t = t.left
-		}
-	}
-	return best
-}
-
-func (t *node32) insert(x int32, w *RBTint32) (newroot, newnode *node32) {
-	// defaults
-	newroot = t
-	newnode = t
-	if x == t.key {
-		return
-	}
-	if x < t.key {
-		if t.left == nil {
-			n := w.makeNode(x)
-			n.parent = t
-			t.left = n
-			newnode = n
-			return
-		}
-		var new_l *node32
-		new_l, newnode = t.left.insert(x, w)
-		t.left = new_l
-		new_l.parent = t
-		newrank := 1 + new_l.maxChildRank()
-		if newrank > t.rank {
-			if newrank > 1+t.right.Rank() { // rotations required
-				if new_l.left.Rank() < new_l.right.Rank() {
-					// double rotation
-					t.left = new_l.rightToRoot()
-				}
-				newroot = t.leftToRoot()
-				return
-			} else {
-				t.rank = newrank
-			}
-		}
-	} else { // x > t.key
-		if t.right == nil {
-			n := w.makeNode(x)
-			n.parent = t
-			t.right = n
-			newnode = n
-			return
-		}
-		var new_r *node32
-		new_r, newnode = t.right.insert(x, w)
-		t.right = new_r
-		new_r.parent = t
-		newrank := 1 + new_r.maxChildRank()
-		if newrank > t.rank {
-			if newrank > 1+t.left.Rank() { // rotations required
-				if new_r.right.Rank() < new_r.left.Rank() {
-					// double rotation
-					t.right = new_r.leftToRoot()
-				}
-				newroot = t.rightToRoot()
-				return
-			} else {
-				t.rank = newrank
-			}
-		}
-	}
-	return
-}
-
-func (t *node32) rightToRoot() *node32 {
-	//    this
-	// left  right
-	//      rl   rr
-	//
-	// becomes
-	//
-	//       right
-	//    this   rr
-	// left  rl
-	//
-	right := t.right
-	rl := right.left
-	right.parent = t.parent
-	right.left = t
-	t.parent = right
-	// parent's child ptr fixed in caller
-	t.right = rl
-	if rl != nil {
-		rl.parent = t
-	}
-	return right
-}
-
-func (t *node32) leftToRoot() *node32 {
-	//     this
-	//  left  right
-	// ll  lr
-	//
-	// becomes
-	//
-	//    left
-	//   ll  this
-	//      lr  right
-	//
-	left := t.left
-	lr := left.right
-	left.parent = t.parent
-	left.right = t
-	t.parent = left
-	// parent's child ptr fixed in caller
-	t.left = lr
-	if lr != nil {
-		lr.parent = t
-	}
-	return left
-}
-
-// next returns the successor of t in a left-to-right
-// walk of the tree in which t is embedded.
-func (t *node32) next() *node32 {
-	// If there is a right child, it is to the right
-	r := t.right
-	if r != nil {
-		return r.min()
-	}
-	// if t is p.left, then p, else repeat.
-	p := t.parent
-	for p != nil {
-		if p.left == t {
-			return p
-		}
-		t = p
-		p = t.parent
-	}
-	return nil
-}
-
-// prev returns the predecessor of t in a left-to-right
-// walk of the tree in which t is embedded.
-func (t *node32) prev() *node32 {
-	// If there is a left child, it is to the left
-	l := t.left
-	if l != nil {
-		return l.max()
-	}
-	// if t is p.right, then p, else repeat.
-	p := t.parent
-	for p != nil {
-		if p.right == t {
-			return p
-		}
-		t = p
-		p = t.parent
-	}
-	return nil
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/redblack32_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/redblack32_test.go
deleted file mode 100644
index 919e395..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/redblack32_test.go
+++ /dev/null
@@ -1,279 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/redblack32_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/redblack32_test.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import (
-	"fmt"
-	"testing"
-)
-
-type sstring string
-
-func (s sstring) String() string {
-	return string(s)
-}
-
-// wellFormed ensures that a red-black tree meets
-// all of its invariants and returns a string identifying
-// the first problem encountered. If there is no problem
-// then the returned string is empty. The size is also
-// returned to allow comparison of calculated tree size
-// with expected.
-func (t *RBTint32) wellFormed() (s string, i int) {
-	if t.root == nil {
-		s = ""
-		i = 0
-		return
-	}
-	return t.root.wellFormedSubtree(nil, -0x80000000, 0x7fffffff)
-}
-
-// wellFormedSubtree ensures that a red-black subtree meets
-// all of its invariants and returns a string identifying
-// the first problem encountered. If there is no problem
-// then the returned string is empty. The size is also
-// returned to allow comparison of calculated tree size
-// with expected.
-func (t *node32) wellFormedSubtree(parent *node32, min, max int32) (s string, i int) {
-	i = -1 // initialize to a failing value
-	s = "" // s is the reason for failure; empty means okay.
-
-	if t.parent != parent {
-		s = "t.parent != parent"
-		return
-	}
-
-	if min >= t.key {
-		s = "min >= t.key"
-		return
-	}
-
-	if max <= t.key {
-		s = "max <= t.key"
-		return
-	}
-
-	l := t.left
-	r := t.right
-	if l == nil && r == nil {
-		if t.rank != rankLeaf {
-			s = "leaf rank wrong"
-			return
-		}
-	}
-	if l != nil {
-		if t.rank < l.rank {
-			s = "t.rank < l.rank"
-		} else if t.rank > 1+l.rank {
-			s = "t.rank > 1+l.rank"
-		} else if t.rank <= l.maxChildRank() {
-			s = "t.rank <= l.maxChildRank()"
-		} else if t.key <= l.key {
-			s = "t.key <= l.key"
-		}
-		if s != "" {
-			return
-		}
-	} else {
-		if t.rank != 1 {
-			s = "t w/ left nil has rank != 1"
-			return
-		}
-	}
-	if r != nil {
-		if t.rank < r.rank {
-			s = "t.rank < r.rank"
-		} else if t.rank > 1+r.rank {
-			s = "t.rank > 1+r.rank"
-		} else if t.rank <= r.maxChildRank() {
-			s = "t.rank <= r.maxChildRank()"
-		} else if t.key >= r.key {
-			s = "t.key >= r.key"
-		}
-		if s != "" {
-			return
-		}
-	} else {
-		if t.rank != 1 {
-			s = "t w/ right nil has rank != 1"
-			return
-		}
-	}
-	ii := 1
-	if l != nil {
-		res, il := l.wellFormedSubtree(t, min, t.key)
-		if res != "" {
-			s = "L." + res
-			return
-		}
-		ii += il
-	}
-	if r != nil {
-		res, ir := r.wellFormedSubtree(t, t.key, max)
-		if res != "" {
-			s = "R." + res
-			return
-		}
-		ii += ir
-	}
-	i = ii
-	return
-}
-
-func (t *RBTint32) DebugString() string {
-	if t.root == nil {
-		return ""
-	}
-	return t.root.DebugString()
-}
-
-// DebugString prints the tree with nested information
-// to allow an eyeball check on the tree balance.
-func (t *node32) DebugString() string {
-	s := ""
-	if t.left != nil {
-		s = s + "["
-		s = s + t.left.DebugString()
-		s = s + "]"
-	}
-	s = s + fmt.Sprintf("%v=%v:%d", t.key, t.data, t.rank)
-	if t.right != nil {
-		s = s + "["
-		s = s + t.right.DebugString()
-		s = s + "]"
-	}
-	return s
-}
-
-func allRBT32Ops(te *testing.T, x []int32) {
-	t := &RBTint32{}
-	for i, d := range x {
-		x[i] = d + d // Double everything for glb/lub testing
-	}
-
-	// fmt.Printf("Inserting double of %v", x)
-	k := 0
-	min := int32(0x7fffffff)
-	max := int32(-0x80000000)
-	for _, d := range x {
-		if d < min {
-			min = d
-		}
-
-		if d > max {
-			max = d
-		}
-
-		t.Insert(d, sstring(fmt.Sprintf("%v", d)))
-		k++
-		s, i := t.wellFormed()
-		if i != k {
-			te.Errorf("Wrong tree size %v, expected %v for %v", i, k, t.DebugString())
-		}
-		if s != "" {
-			te.Errorf("Tree consistency problem at %v", s)
-			return
-		} else {
-			// fmt.Printf("%s", t.DebugString())
-		}
-	}
-
-	oops := false
-
-	for _, d := range x {
-		s := fmt.Sprintf("%v", d)
-		f := t.Find(d)
-
-		// data
-		if s != fmt.Sprintf("%v", f) {
-			te.Errorf("s(%v) != f(%v)", s, f)
-			oops = true
-		}
-	}
-
-	if !oops {
-		for _, d := range x {
-			s := fmt.Sprintf("%v", d)
-
-			kg, g := t.Glb(d + 1)
-			kge, ge := t.GlbEq(d)
-			kl, l := t.Lub(d - 1)
-			kle, le := t.LubEq(d)
-
-			// keys
-			if d != kg {
-				te.Errorf("d(%v) != kg(%v)", d, kg)
-			}
-			if d != kl {
-				te.Errorf("d(%v) != kl(%v)", d, kl)
-			}
-			if d != kge {
-				te.Errorf("d(%v) != kge(%v)", d, kge)
-			}
-			if d != kle {
-				te.Errorf("d(%v) != kle(%v)", d, kle)
-			}
-			// data
-			if s != fmt.Sprintf("%v", g) {
-				te.Errorf("s(%v) != g(%v)", s, g)
-			}
-			if s != fmt.Sprintf("%v", l) {
-				te.Errorf("s(%v) != l(%v)", s, l)
-			}
-			if s != fmt.Sprintf("%v", ge) {
-				te.Errorf("s(%v) != ge(%v)", s, ge)
-			}
-			if s != fmt.Sprintf("%v", le) {
-				te.Errorf("s(%v) != le(%v)", s, le)
-			}
-		}
-
-		for _, d := range x {
-			s := fmt.Sprintf("%v", d)
-			kge, ge := t.GlbEq(d + 1)
-			kle, le := t.LubEq(d - 1)
-			if d != kge {
-				te.Errorf("d(%v) != kge(%v)", d, kge)
-			}
-			if d != kle {
-				te.Errorf("d(%v) != kle(%v)", d, kle)
-			}
-			if s != fmt.Sprintf("%v", ge) {
-				te.Errorf("s(%v) != ge(%v)", s, ge)
-			}
-			if s != fmt.Sprintf("%v", le) {
-				te.Errorf("s(%v) != le(%v)", s, le)
-			}
-		}
-
-		kg, g := t.Glb(min)
-		kge, ge := t.GlbEq(min - 1)
-		kl, l := t.Lub(max)
-		kle, le := t.LubEq(max + 1)
-		fmin := t.Find(min - 1)
-		fmax := t.Find(min + 11)
-
-		if kg != 0 || kge != 0 || kl != 0 || kle != 0 {
-			te.Errorf("Got non-zero-key for missing query")
-		}
-
-		if g != nil || ge != nil || l != nil || le != nil || fmin != nil || fmax != nil {
-			te.Errorf("Got non-error-data for missing query")
-		}
-
-	}
-}
-
-func TestAllRBTreeOps(t *testing.T) {
-	allRBT32Ops(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25})
-	allRBT32Ops(t, []int32{22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 3, 2, 1, 25, 24, 23, 12, 11, 10, 9, 8, 7, 6, 5, 4})
-	allRBT32Ops(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1})
-	allRBT32Ops(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24})
-	allRBT32Ops(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2})
-	allRBT32Ops(t, []int32{24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25})
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/regalloc.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/regalloc.go
deleted file mode 100644
index bb90a3c..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/regalloc.go
+++ /dev/null
@@ -1,2630 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/regalloc.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/regalloc.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Register allocation.
-//
-// We use a version of a linear scan register allocator. We treat the
-// whole function as a single long basic block and run through
-// it using a greedy register allocator. Then all merge edges
-// (those targeting a block with len(Preds)>1) are processed to
-// shuffle data into the place that the target of the edge expects.
-//
-// The greedy allocator moves values into registers just before they
-// are used, spills registers only when necessary, and spills the
-// value whose next use is farthest in the future.
-//
-// The register allocator requires that a block is not scheduled until
-// at least one of its predecessors have been scheduled. The most recent
-// such predecessor provides the starting register state for a block.
-//
-// It also requires that there are no critical edges (critical =
-// comes from a block with >1 successor and goes to a block with >1
-// predecessor).  This makes it easy to add fixup code on merge edges -
-// the source of a merge edge has only one successor, so we can add
-// fixup code to the end of that block.
-
-// Spilling
-//
-// For every value, we generate a spill immediately after the value itself.
-//     x = Op y z    : AX
-//     x2 = StoreReg x
-// While AX still holds x, any uses of x will use that value. When AX is needed
-// for another value, we simply reuse AX.  Spill code has already been generated
-// so there is no code generated at "spill" time. When x is referenced
-// subsequently, we issue a load to restore x to a register using x2 as
-//  its argument:
-//    x3 = Restore x2 : CX
-// x3 can then be used wherever x is referenced again.
-// If the spill (x2) is never used, it will be removed at the end of regalloc.
-//
-// Phi values are special, as always. We define two kinds of phis, those
-// where the merge happens in a register (a "register" phi) and those where
-// the merge happens in a stack location (a "stack" phi).
-//
-// A register phi must have the phi and all of its inputs allocated to the
-// same register. Register phis are spilled similarly to regular ops:
-//     b1: y = ... : AX        b2: z = ... : AX
-//         goto b3                 goto b3
-//     b3: x = phi(y, z) : AX
-//         x2 = StoreReg x
-//
-// A stack phi must have the phi and all of its inputs allocated to the same
-// stack location. Stack phis start out life already spilled - each phi
-// input must be a store (using StoreReg) at the end of the corresponding
-// predecessor block.
-//     b1: y = ... : AX        b2: z = ... : BX
-//         y2 = StoreReg y         z2 = StoreReg z
-//         goto b3                 goto b3
-//     b3: x = phi(y2, z2)
-// The stack allocator knows that StoreReg args of stack-allocated phis
-// must be allocated to the same stack slot as the phi that uses them.
-// x is now a spilled value and a restore must appear before its first use.
-
-// TODO
-
-// Use an affinity graph to mark two values which should use the
-// same register. This affinity graph will be used to prefer certain
-// registers for allocation. This affinity helps eliminate moves that
-// are required for phi implementations and helps generate allocations
-// for 2-register architectures.
-
-// Note: regalloc generates a not-quite-SSA output. If we have:
-//
-//             b1: x = ... : AX
-//                 x2 = StoreReg x
-//                 ... AX gets reused for something else ...
-//                 if ... goto b3 else b4
-//
-//   b3: x3 = LoadReg x2 : BX       b4: x4 = LoadReg x2 : CX
-//       ... use x3 ...                 ... use x4 ...
-//
-//             b2: ... use x3 ...
-//
-// If b3 is the primary predecessor of b2, then we use x3 in b2 and
-// add a x4:CX->BX copy at the end of b4.
-// But the definition of x3 doesn't dominate b2.  We should really
-// insert a dummy phi at the start of b2 (x5=phi(x3,x4):BX) to keep
-// SSA form. For now, we ignore this problem as remaining in strict
-// SSA form isn't needed after regalloc. We'll just leave the use
-// of x3 not dominated by the definition of x3, and the CX->BX copy
-// will have no use (so don't run deadcode after regalloc!).
-// TODO: maybe we should introduce these extra phis?
-
-// Additional not-quite-SSA output occurs when spills are sunk out
-// of loops to the targets of exit edges from the loop.  Before sinking,
-// there is one spill site (one StoreReg) targeting stack slot X, after
-// sinking there may be multiple spill sites targeting stack slot X,
-// with no phi functions at any join points reachable by the multiple
-// spill sites.  In addition, uses of the spill from copies of the original
-// will not name the copy in their reference; instead they will name
-// the original, though both will have the same spill location.  The
-// first sunk spill will be the original, but moved, to an exit block,
-// thus ensuring that there is a definition somewhere corresponding to
-// the original spill's uses.
-
-package ssa
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"fmt"
-	"unsafe"
-)
-
-const (
-	moveSpills = iota
-	logSpills
-	regDebug
-	stackDebug
-)
-
-// distance is a measure of how far into the future values are used.
-// distance is measured in units of instructions.
-const (
-	likelyDistance   = 1
-	normalDistance   = 10
-	unlikelyDistance = 100
-)
-
-// regalloc performs register allocation on f. It sets f.RegAlloc
-// to the resulting allocation.
-func regalloc(f *Func) {
-	var s regAllocState
-	s.init(f)
-	s.regalloc(f)
-}
-
-type register uint8
-
-const noRegister register = 255
-
-type regMask uint64
-
-func (m regMask) String() string {
-	s := ""
-	for r := register(0); m != 0; r++ {
-		if m>>r&1 == 0 {
-			continue
-		}
-		m &^= regMask(1) << r
-		if s != "" {
-			s += " "
-		}
-		s += fmt.Sprintf("r%d", r)
-	}
-	return s
-}
-
-// countRegs returns the number of set bits in the register mask.
-func countRegs(r regMask) int {
-	n := 0
-	for r != 0 {
-		n += int(r & 1)
-		r >>= 1
-	}
-	return n
-}
-
-// pickReg picks an arbitrary register from the register mask.
-func pickReg(r regMask) register {
-	// pick the lowest one
-	if r == 0 {
-		panic("can't pick a register from an empty set")
-	}
-	for i := register(0); ; i++ {
-		if r&1 != 0 {
-			return i
-		}
-		r >>= 1
-	}
-}
-
-type use struct {
-	dist int32 // distance from start of the block to a use of a value
-	line int32 // line number of the use
-	next *use  // linked list of uses of a value in nondecreasing dist order
-}
-
-type valState struct {
-	regs              regMask // the set of registers holding a Value (usually just one)
-	uses              *use    // list of uses in this block
-	spill             *Value  // spilled copy of the Value
-	spillUsed         bool
-	spillUsedShuffle  bool // true if used in shuffling, after ordinary uses
-	needReg           bool // cached value of !v.Type.IsMemory() && !v.Type.IsVoid() && !.v.Type.IsFlags()
-	rematerializeable bool // cached value of v.rematerializeable()
-}
-
-type regState struct {
-	v *Value // Original (preregalloc) Value stored in this register.
-	c *Value // A Value equal to v which is currently in a register.  Might be v or a copy of it.
-	// If a register is unused, v==c==nil
-}
-
-type regAllocState struct {
-	f *Func
-
-	registers   []Register
-	numRegs     register
-	SPReg       register
-	SBReg       register
-	GReg        register
-	allocatable regMask
-
-	// for each block, its primary predecessor.
-	// A predecessor of b is primary if it is the closest
-	// predecessor that appears before b in the layout order.
-	// We record the index in the Preds list where the primary predecessor sits.
-	primary []int32
-
-	// live values at the end of each block.  live[b.ID] is a list of value IDs
-	// which are live at the end of b, together with a count of how many instructions
-	// forward to the next use.
-	live [][]liveInfo
-	// desired register assignments at the end of each block.
-	// Note that this is a static map computed before allocation occurs. Dynamic
-	// register desires (from partially completed allocations) will trump
-	// this information.
-	desired []desiredState
-
-	// current state of each (preregalloc) Value
-	values []valState
-
-	// For each Value, map from its value ID back to the
-	// preregalloc Value it was derived from.
-	orig []*Value
-
-	// current state of each register
-	regs []regState
-
-	// registers that contain values which can't be kicked out
-	nospill regMask
-
-	// mask of registers currently in use
-	used regMask
-
-	// mask of registers used in the current instruction
-	tmpused regMask
-
-	// current block we're working on
-	curBlock *Block
-
-	// cache of use records
-	freeUseRecords *use
-
-	// endRegs[blockid] is the register state at the end of each block.
-	// encoded as a set of endReg records.
-	endRegs [][]endReg
-
-	// startRegs[blockid] is the register state at the start of merge blocks.
-	// saved state does not include the state of phi ops in the block.
-	startRegs [][]startReg
-
-	// spillLive[blockid] is the set of live spills at the end of each block
-	spillLive [][]ID
-
-	// a set of copies we generated to move things around, and
-	// whether it is used in shuffle. Unused copies will be deleted.
-	copies map[*Value]bool
-
-	loopnest *loopnest
-}
-
-type spillToSink struct {
-	spill *Value // Spill instruction to move (a StoreReg)
-	dests int32  // Bitmask indicating exit blocks from loop in which spill/val is defined. 1<<i set means val is live into loop.exitBlocks[i]
-}
-
-func (sts *spillToSink) spilledValue() *Value {
-	return sts.spill.Args[0]
-}
-
-type endReg struct {
-	r register
-	v *Value // pre-regalloc value held in this register (TODO: can we use ID here?)
-	c *Value // cached version of the value
-}
-
-type startReg struct {
-	r    register
-	vid  ID    // pre-regalloc value needed in this register
-	line int32 // line number of use of this register
-}
-
-// freeReg frees up register r. Any current user of r is kicked out.
-func (s *regAllocState) freeReg(r register) {
-	v := s.regs[r].v
-	if v == nil {
-		s.f.Fatalf("tried to free an already free register %d\n", r)
-	}
-
-	// Mark r as unused.
-	if s.f.pass.debug > regDebug {
-		fmt.Printf("freeReg %s (dump %s/%s)\n", s.registers[r].Name(), v, s.regs[r].c)
-	}
-	s.regs[r] = regState{}
-	s.values[v.ID].regs &^= regMask(1) << r
-	s.used &^= regMask(1) << r
-}
-
-// freeRegs frees up all registers listed in m.
-func (s *regAllocState) freeRegs(m regMask) {
-	for m&s.used != 0 {
-		s.freeReg(pickReg(m & s.used))
-	}
-}
-
-// setOrig records that c's original value is the same as
-// v's original value.
-func (s *regAllocState) setOrig(c *Value, v *Value) {
-	for int(c.ID) >= len(s.orig) {
-		s.orig = append(s.orig, nil)
-	}
-	if s.orig[c.ID] != nil {
-		s.f.Fatalf("orig value set twice %s %s", c, v)
-	}
-	s.orig[c.ID] = s.orig[v.ID]
-}
-
-// assignReg assigns register r to hold c, a copy of v.
-// r must be unused.
-func (s *regAllocState) assignReg(r register, v *Value, c *Value) {
-	if s.f.pass.debug > regDebug {
-		fmt.Printf("assignReg %s %s/%s\n", s.registers[r].Name(), v, c)
-	}
-	if s.regs[r].v != nil {
-		s.f.Fatalf("tried to assign register %d to %s/%s but it is already used by %s", r, v, c, s.regs[r].v)
-	}
-
-	// Update state.
-	s.regs[r] = regState{v, c}
-	s.values[v.ID].regs |= regMask(1) << r
-	s.used |= regMask(1) << r
-	s.f.setHome(c, &s.registers[r])
-}
-
-// allocReg chooses a register from the set of registers in mask.
-// If there is no unused register, a Value will be kicked out of
-// a register to make room.
-func (s *regAllocState) allocReg(mask regMask, v *Value) register {
-	mask &= s.allocatable
-	mask &^= s.nospill
-	if mask == 0 {
-		s.f.Fatalf("no register available for %s", v)
-	}
-
-	// Pick an unused register if one is available.
-	if mask&^s.used != 0 {
-		return pickReg(mask &^ s.used)
-	}
-
-	// Pick a value to spill. Spill the value with the
-	// farthest-in-the-future use.
-	// TODO: Prefer registers with already spilled Values?
-	// TODO: Modify preference using affinity graph.
-	// TODO: if a single value is in multiple registers, spill one of them
-	// before spilling a value in just a single register.
-
-	// Find a register to spill. We spill the register containing the value
-	// whose next use is as far in the future as possible.
-	// https://en.wikipedia.org/wiki/Page_replacement_algorithm#The_theoretically_optimal_page_replacement_algorithm
-	var r register
-	maxuse := int32(-1)
-	for t := register(0); t < s.numRegs; t++ {
-		if mask>>t&1 == 0 {
-			continue
-		}
-		v := s.regs[t].v
-		if n := s.values[v.ID].uses.dist; n > maxuse {
-			// v's next use is farther in the future than any value
-			// we've seen so far. A new best spill candidate.
-			r = t
-			maxuse = n
-		}
-	}
-	if maxuse == -1 {
-		s.f.Fatalf("couldn't find register to spill")
-	}
-
-	// Try to move it around before kicking out, if there is a free register.
-	// We generate a Copy and record it. It will be deleted if never used.
-	v2 := s.regs[r].v
-	m := s.compatRegs(v2.Type) &^ s.used &^ s.tmpused &^ (regMask(1) << r)
-	if m != 0 && !s.values[v2.ID].rematerializeable && countRegs(s.values[v2.ID].regs) == 1 {
-		r2 := pickReg(m)
-		c := s.curBlock.NewValue1(v2.Line, OpCopy, v2.Type, s.regs[r].c)
-		s.copies[c] = false
-		if s.f.pass.debug > regDebug {
-			fmt.Printf("copy %s to %s : %s\n", v2, c, s.registers[r2].Name())
-		}
-		s.setOrig(c, v2)
-		s.assignReg(r2, v2, c)
-	}
-	s.freeReg(r)
-	return r
-}
-
-// allocValToReg allocates v to a register selected from regMask and
-// returns the register copy of v. Any previous user is kicked out and spilled
-// (if necessary). Load code is added at the current pc. If nospill is set the
-// allocated register is marked nospill so the assignment cannot be
-// undone until the caller allows it by clearing nospill. Returns a
-// *Value which is either v or a copy of v allocated to the chosen register.
-func (s *regAllocState) allocValToReg(v *Value, mask regMask, nospill bool, line int32) *Value {
-	vi := &s.values[v.ID]
-
-	// Check if v is already in a requested register.
-	if mask&vi.regs != 0 {
-		r := pickReg(mask & vi.regs)
-		if s.regs[r].v != v || s.regs[r].c == nil {
-			panic("bad register state")
-		}
-		if nospill {
-			s.nospill |= regMask(1) << r
-		}
-		return s.regs[r].c
-	}
-
-	// Allocate a register.
-	r := s.allocReg(mask, v)
-
-	// Allocate v to the new register.
-	var c *Value
-	if vi.regs != 0 {
-		// Copy from a register that v is already in.
-		r2 := pickReg(vi.regs)
-		if s.regs[r2].v != v {
-			panic("bad register state")
-		}
-		c = s.curBlock.NewValue1(line, OpCopy, v.Type, s.regs[r2].c)
-	} else if v.rematerializeable() {
-		// Rematerialize instead of loading from the spill location.
-		c = v.copyInto(s.curBlock)
-	} else {
-		switch {
-		// Load v from its spill location.
-		case vi.spill != nil:
-			if s.f.pass.debug > logSpills {
-				s.f.Config.Warnl(vi.spill.Line, "load spill for %v from %v", v, vi.spill)
-			}
-			c = s.curBlock.NewValue1(line, OpLoadReg, v.Type, vi.spill)
-			vi.spillUsed = true
-		default:
-			s.f.Fatalf("attempt to load unspilled value %v", v.LongString())
-		}
-	}
-	s.setOrig(c, v)
-	s.assignReg(r, v, c)
-	if nospill {
-		s.nospill |= regMask(1) << r
-	}
-	return c
-}
-
-// isLeaf reports whether f performs any calls.
-func isLeaf(f *Func) bool {
-	for _, b := range f.Blocks {
-		for _, v := range b.Values {
-			if opcodeTable[v.Op].call {
-				return false
-			}
-		}
-	}
-	return true
-}
-
-func (s *regAllocState) init(f *Func) {
-	s.f = f
-	s.registers = f.Config.registers
-	if nr := len(s.registers); nr == 0 || nr > int(noRegister) || nr > int(unsafe.Sizeof(regMask(0))*8) {
-		s.f.Fatalf("bad number of registers: %d", nr)
-	} else {
-		s.numRegs = register(nr)
-	}
-	// Locate SP, SB, and g registers.
-	s.SPReg = noRegister
-	s.SBReg = noRegister
-	s.GReg = noRegister
-	for r := register(0); r < s.numRegs; r++ {
-		switch s.registers[r].Name() {
-		case "SP":
-			s.SPReg = r
-		case "SB":
-			s.SBReg = r
-		case "g":
-			s.GReg = r
-		}
-	}
-	// Make sure we found all required registers.
-	switch noRegister {
-	case s.SPReg:
-		s.f.Fatalf("no SP register found")
-	case s.SBReg:
-		s.f.Fatalf("no SB register found")
-	case s.GReg:
-		if f.Config.hasGReg {
-			s.f.Fatalf("no g register found")
-		}
-	}
-
-	// Figure out which registers we're allowed to use.
-	s.allocatable = s.f.Config.gpRegMask | s.f.Config.fpRegMask | s.f.Config.specialRegMask
-	s.allocatable &^= 1 << s.SPReg
-	s.allocatable &^= 1 << s.SBReg
-	if s.f.Config.hasGReg {
-		s.allocatable &^= 1 << s.GReg
-	}
-	if s.f.Config.ctxt.Framepointer_enabled && s.f.Config.FPReg >= 0 {
-		s.allocatable &^= 1 << uint(s.f.Config.FPReg)
-	}
-	if s.f.Config.ctxt.Flag_shared {
-		switch s.f.Config.arch {
-		case "ppc64le": // R2 already reserved.
-			s.allocatable &^= 1 << 12 // R12
-		}
-	}
-	if s.f.Config.LinkReg != -1 {
-		if isLeaf(f) {
-			// Leaf functions don't save/restore the link register.
-			s.allocatable &^= 1 << uint(s.f.Config.LinkReg)
-		}
-		if s.f.Config.arch == "arm" && obj.GOARM == 5 {
-			// On ARMv5 we insert softfloat calls at each FP instruction.
-			// This clobbers LR almost everywhere. Disable allocating LR
-			// on ARMv5.
-			s.allocatable &^= 1 << uint(s.f.Config.LinkReg)
-		}
-	}
-	if s.f.Config.ctxt.Flag_dynlink {
-		switch s.f.Config.arch {
-		case "amd64":
-			s.allocatable &^= 1 << 15 // R15
-		case "arm":
-			s.allocatable &^= 1 << 9 // R9
-		case "ppc64le": // R2 already reserved.
-			s.allocatable &^= 1 << 12 // R12
-		case "arm64":
-			// nothing to do?
-		case "386":
-			// nothing to do.
-			// Note that for Flag_shared (position independent code)
-			// we do need to be careful, but that carefulness is hidden
-			// in the rewrite rules so we always have a free register
-			// available for global load/stores. See gen/386.rules (search for Flag_shared).
-		case "s390x":
-			// nothing to do, R10 & R11 already reserved
-		default:
-			s.f.Config.fe.Fatalf(0, "arch %s not implemented", s.f.Config.arch)
-		}
-	}
-	if s.f.Config.nacl {
-		switch s.f.Config.arch {
-		case "arm":
-			s.allocatable &^= 1 << 9 // R9 is "thread pointer" on nacl/arm
-		case "amd64p32":
-			s.allocatable &^= 1 << 5  // BP - reserved for nacl
-			s.allocatable &^= 1 << 15 // R15 - reserved for nacl
-		}
-	}
-	if s.f.Config.use387 {
-		s.allocatable &^= 1 << 15 // X7 disallowed (one 387 register is used as scratch space during SSE->387 generation in ../x86/387.go)
-	}
-
-	s.regs = make([]regState, s.numRegs)
-	s.values = make([]valState, f.NumValues())
-	s.orig = make([]*Value, f.NumValues())
-	s.copies = make(map[*Value]bool)
-	for _, b := range f.Blocks {
-		for _, v := range b.Values {
-			if !v.Type.IsMemory() && !v.Type.IsVoid() && !v.Type.IsFlags() && !v.Type.IsTuple() {
-				s.values[v.ID].needReg = true
-				s.values[v.ID].rematerializeable = v.rematerializeable()
-				s.orig[v.ID] = v
-			}
-			// Note: needReg is false for values returning Tuple types.
-			// Instead, we mark the corresponding Selects as needReg.
-		}
-	}
-	s.computeLive()
-
-	// Compute block order. This array allows us to distinguish forward edges
-	// from backward edges and compute how far they go.
-	blockOrder := make([]int32, f.NumBlocks())
-	for i, b := range f.Blocks {
-		blockOrder[b.ID] = int32(i)
-	}
-
-	// Compute primary predecessors.
-	s.primary = make([]int32, f.NumBlocks())
-	for _, b := range f.Blocks {
-		best := -1
-		for i, e := range b.Preds {
-			p := e.b
-			if blockOrder[p.ID] >= blockOrder[b.ID] {
-				continue // backward edge
-			}
-			if best == -1 || blockOrder[p.ID] > blockOrder[b.Preds[best].b.ID] {
-				best = i
-			}
-		}
-		s.primary[b.ID] = int32(best)
-	}
-
-	s.endRegs = make([][]endReg, f.NumBlocks())
-	s.startRegs = make([][]startReg, f.NumBlocks())
-	s.spillLive = make([][]ID, f.NumBlocks())
-}
-
-// Adds a use record for id at distance dist from the start of the block.
-// All calls to addUse must happen with nonincreasing dist.
-func (s *regAllocState) addUse(id ID, dist int32, line int32) {
-	r := s.freeUseRecords
-	if r != nil {
-		s.freeUseRecords = r.next
-	} else {
-		r = &use{}
-	}
-	r.dist = dist
-	r.line = line
-	r.next = s.values[id].uses
-	s.values[id].uses = r
-	if r.next != nil && dist > r.next.dist {
-		s.f.Fatalf("uses added in wrong order")
-	}
-}
-
-// advanceUses advances the uses of v's args from the state before v to the state after v.
-// Any values which have no more uses are deallocated from registers.
-func (s *regAllocState) advanceUses(v *Value) {
-	for _, a := range v.Args {
-		if !s.values[a.ID].needReg {
-			continue
-		}
-		ai := &s.values[a.ID]
-		r := ai.uses
-		ai.uses = r.next
-		if r.next == nil {
-			// Value is dead, free all registers that hold it.
-			s.freeRegs(ai.regs)
-		}
-		r.next = s.freeUseRecords
-		s.freeUseRecords = r
-	}
-}
-
-// liveAfterCurrentInstruction reports whether v is live after
-// the current instruction is completed.  v must be used by the
-// current instruction.
-func (s *regAllocState) liveAfterCurrentInstruction(v *Value) bool {
-	u := s.values[v.ID].uses
-	d := u.dist
-	for u != nil && u.dist == d {
-		u = u.next
-	}
-	return u != nil && u.dist > d
-}
-
-// Sets the state of the registers to that encoded in regs.
-func (s *regAllocState) setState(regs []endReg) {
-	s.freeRegs(s.used)
-	for _, x := range regs {
-		s.assignReg(x.r, x.v, x.c)
-	}
-}
-
-// compatRegs returns the set of registers which can store a type t.
-func (s *regAllocState) compatRegs(t Type) regMask {
-	var m regMask
-	if t.IsTuple() || t.IsFlags() {
-		return 0
-	}
-	if t.IsFloat() || t == TypeInt128 {
-		m = s.f.Config.fpRegMask
-	} else {
-		m = s.f.Config.gpRegMask
-	}
-	return m & s.allocatable
-}
-
-// loopForBlock returns the loop containing block b,
-// provided that the loop is "interesting" for purposes
-// of improving register allocation (= is inner, and does
-// not contain a call)
-func (s *regAllocState) loopForBlock(b *Block) *loop {
-	loop := s.loopnest.b2l[b.ID]
-
-	// Minor for-the-time-being optimization: nothing happens
-	// unless a loop is both inner and call-free, therefore
-	// don't bother with other loops.
-	if loop != nil && (loop.containsCall || !loop.isInner) {
-		loop = nil
-	}
-	return loop
-}
-
-func (s *regAllocState) regalloc(f *Func) {
-	liveSet := f.newSparseSet(f.NumValues())
-	defer f.retSparseSet(liveSet)
-	var oldSched []*Value
-	var phis []*Value
-	var phiRegs []register
-	var args []*Value
-
-	// statistics
-	var nSpills int               // # of spills remaining
-	var nSpillsInner int          // # of spills remaining in inner loops
-	var nSpillsSunk int           // # of sunk spills remaining
-	var nSpillsChanged int        // # of sunk spills lost because of register use change
-	var nSpillsSunkUnused int     // # of spills not sunk because they were removed completely
-	var nSpillsNotSunkLateUse int // # of spills not sunk because of very late use (in shuffle)
-
-	// Data structure used for computing desired registers.
-	var desired desiredState
-
-	// Desired registers for inputs & outputs for each instruction in the block.
-	type dentry struct {
-		out [4]register    // desired output registers
-		in  [3][4]register // desired input registers (for inputs 0,1, and 2)
-	}
-	var dinfo []dentry
-
-	if f.Entry != f.Blocks[0] {
-		f.Fatalf("entry block must be first")
-	}
-
-	// Get loop nest so that spills in inner loops can be
-	// tracked.  When the last block of a loop is processed,
-	// attempt to move spills out of the loop.
-	s.loopnest.findExits()
-
-	// Spills are moved from one block's slice of values to another's.
-	// This confuses register allocation if it occurs before it is
-	// complete, so candidates are recorded, then rechecked and
-	// moved after all allocation (register and stack) is complete.
-	// Because movement is only within a stack slot's lifetime, it
-	// is safe to do this.
-	var toSink []spillToSink
-	// Will be used to figure out live inputs to exit blocks of inner loops.
-	entryCandidates := newSparseMap(f.NumValues())
-
-	for _, b := range f.Blocks {
-		s.curBlock = b
-		loop := s.loopForBlock(b)
-
-		// Initialize liveSet and uses fields for this block.
-		// Walk backwards through the block doing liveness analysis.
-		liveSet.clear()
-		for _, e := range s.live[b.ID] {
-			s.addUse(e.ID, int32(len(b.Values))+e.dist, e.line) // pseudo-uses from beyond end of block
-			liveSet.add(e.ID)
-		}
-		if v := b.Control; v != nil && s.values[v.ID].needReg {
-			s.addUse(v.ID, int32(len(b.Values)), b.Line) // pseudo-use by control value
-			liveSet.add(v.ID)
-		}
-		for i := len(b.Values) - 1; i >= 0; i-- {
-			v := b.Values[i]
-			liveSet.remove(v.ID)
-			if v.Op == OpPhi {
-				// Remove v from the live set, but don't add
-				// any inputs. This is the state the len(b.Preds)>1
-				// case below desires; it wants to process phis specially.
-				continue
-			}
-			for _, a := range v.Args {
-				if !s.values[a.ID].needReg {
-					continue
-				}
-				s.addUse(a.ID, int32(i), v.Line)
-				liveSet.add(a.ID)
-			}
-		}
-		if s.f.pass.debug > regDebug {
-			fmt.Printf("uses for %s:%s\n", s.f.Name, b)
-			for i := range s.values {
-				vi := &s.values[i]
-				u := vi.uses
-				if u == nil {
-					continue
-				}
-				fmt.Printf("  v%d:", i)
-				for u != nil {
-					fmt.Printf(" %d", u.dist)
-					u = u.next
-				}
-				fmt.Println()
-			}
-		}
-
-		// Make a copy of the block schedule so we can generate a new one in place.
-		// We make a separate copy for phis and regular values.
-		nphi := 0
-		for _, v := range b.Values {
-			if v.Op != OpPhi {
-				break
-			}
-			nphi++
-		}
-		phis = append(phis[:0], b.Values[:nphi]...)
-		oldSched = append(oldSched[:0], b.Values[nphi:]...)
-		b.Values = b.Values[:0]
-
-		// Initialize start state of block.
-		if b == f.Entry {
-			// Regalloc state is empty to start.
-			if nphi > 0 {
-				f.Fatalf("phis in entry block")
-			}
-		} else if len(b.Preds) == 1 {
-			// Start regalloc state with the end state of the previous block.
-			s.setState(s.endRegs[b.Preds[0].b.ID])
-			if nphi > 0 {
-				f.Fatalf("phis in single-predecessor block")
-			}
-			// Drop any values which are no longer live.
-			// This may happen because at the end of p, a value may be
-			// live but only used by some other successor of p.
-			for r := register(0); r < s.numRegs; r++ {
-				v := s.regs[r].v
-				if v != nil && !liveSet.contains(v.ID) {
-					s.freeReg(r)
-				}
-			}
-		} else {
-			// This is the complicated case. We have more than one predecessor,
-			// which means we may have Phi ops.
-
-			// Copy phi ops into new schedule.
-			b.Values = append(b.Values, phis...)
-
-			// Start with the final register state of the primary predecessor
-			idx := s.primary[b.ID]
-			if idx < 0 {
-				f.Fatalf("block with no primary predecessor %s", b)
-			}
-			p := b.Preds[idx].b
-			s.setState(s.endRegs[p.ID])
-
-			if s.f.pass.debug > regDebug {
-				fmt.Printf("starting merge block %s with end state of %s:\n", b, p)
-				for _, x := range s.endRegs[p.ID] {
-					fmt.Printf("  %s: orig:%s cache:%s\n", s.registers[x.r].Name(), x.v, x.c)
-				}
-			}
-
-			// Decide on registers for phi ops. Use the registers determined
-			// by the primary predecessor if we can.
-			// TODO: pick best of (already processed) predecessors?
-			// Majority vote?  Deepest nesting level?
-			phiRegs = phiRegs[:0]
-			var phiUsed regMask
-			for _, v := range phis {
-				if !s.values[v.ID].needReg {
-					phiRegs = append(phiRegs, noRegister)
-					continue
-				}
-				a := v.Args[idx]
-				// Some instructions target not-allocatable registers.
-				// They're not suitable for further (phi-function) allocation.
-				m := s.values[a.ID].regs &^ phiUsed & s.allocatable
-				if m != 0 {
-					r := pickReg(m)
-					phiUsed |= regMask(1) << r
-					phiRegs = append(phiRegs, r)
-				} else {
-					phiRegs = append(phiRegs, noRegister)
-				}
-			}
-
-			// Second pass - deallocate any phi inputs which are now dead.
-			for i, v := range phis {
-				if !s.values[v.ID].needReg {
-					continue
-				}
-				a := v.Args[idx]
-				if !liveSet.contains(a.ID) {
-					// Input is dead beyond the phi, deallocate
-					// anywhere else it might live.
-					s.freeRegs(s.values[a.ID].regs)
-				} else {
-					// Input is still live.
-					// Try to move it around before kicking out, if there is a free register.
-					// We generate a Copy in the predecessor block and record it. It will be
-					// deleted if never used.
-					r := phiRegs[i]
-					if r == noRegister {
-						continue
-					}
-					// Pick a free register. At this point some registers used in the predecessor
-					// block may have been deallocated. Those are the ones used for Phis. Exclude
-					// them (and they are not going to be helpful anyway).
-					m := s.compatRegs(a.Type) &^ s.used &^ phiUsed
-					if m != 0 && !s.values[a.ID].rematerializeable && countRegs(s.values[a.ID].regs) == 1 {
-						r2 := pickReg(m)
-						c := p.NewValue1(a.Line, OpCopy, a.Type, s.regs[r].c)
-						s.copies[c] = false
-						if s.f.pass.debug > regDebug {
-							fmt.Printf("copy %s to %s : %s\n", a, c, s.registers[r2].Name())
-						}
-						s.setOrig(c, a)
-						s.assignReg(r2, a, c)
-						s.endRegs[p.ID] = append(s.endRegs[p.ID], endReg{r2, a, c})
-					}
-					s.freeReg(r)
-				}
-			}
-
-			// Third pass - pick registers for phis whose inputs
-			// were not in a register.
-			for i, v := range phis {
-				if !s.values[v.ID].needReg {
-					continue
-				}
-				if phiRegs[i] != noRegister {
-					continue
-				}
-				if s.f.Config.use387 && v.Type.IsFloat() {
-					continue // 387 can't handle floats in registers between blocks
-				}
-				m := s.compatRegs(v.Type) &^ phiUsed &^ s.used
-				if m != 0 {
-					r := pickReg(m)
-					phiRegs[i] = r
-					phiUsed |= regMask(1) << r
-				}
-			}
-
-			// Set registers for phis. Add phi spill code.
-			for i, v := range phis {
-				if !s.values[v.ID].needReg {
-					continue
-				}
-				r := phiRegs[i]
-				if r == noRegister {
-					// stack-based phi
-					// Spills will be inserted in all the predecessors below.
-					s.values[v.ID].spill = v        // v starts life spilled
-					s.values[v.ID].spillUsed = true // use is guaranteed
-					continue
-				}
-				// register-based phi
-				s.assignReg(r, v, v)
-				// Spill the phi in case we need to restore it later.
-				spill := b.NewValue1(v.Line, OpStoreReg, v.Type, v)
-				s.setOrig(spill, v)
-				s.values[v.ID].spill = spill
-				s.values[v.ID].spillUsed = false
-				if loop != nil {
-					loop.spills = append(loop.spills, v)
-					nSpillsInner++
-				}
-				nSpills++
-			}
-
-			// Save the starting state for use by merge edges.
-			var regList []startReg
-			for r := register(0); r < s.numRegs; r++ {
-				v := s.regs[r].v
-				if v == nil {
-					continue
-				}
-				if phiUsed>>r&1 != 0 {
-					// Skip registers that phis used, we'll handle those
-					// specially during merge edge processing.
-					continue
-				}
-				regList = append(regList, startReg{r, v.ID, s.values[v.ID].uses.line})
-			}
-			s.startRegs[b.ID] = regList
-
-			if s.f.pass.debug > regDebug {
-				fmt.Printf("after phis\n")
-				for _, x := range s.startRegs[b.ID] {
-					fmt.Printf("  %s: v%d\n", s.registers[x.r].Name(), x.vid)
-				}
-			}
-		}
-
-		// Allocate space to record the desired registers for each value.
-		dinfo = dinfo[:0]
-		for i := 0; i < len(oldSched); i++ {
-			dinfo = append(dinfo, dentry{})
-		}
-
-		// Load static desired register info at the end of the block.
-		desired.copy(&s.desired[b.ID])
-
-		// Check actual assigned registers at the start of the next block(s).
-		// Dynamically assigned registers will trump the static
-		// desired registers computed during liveness analysis.
-		// Note that we do this phase after startRegs is set above, so that
-		// we get the right behavior for a block which branches to itself.
-		for _, e := range b.Succs {
-			succ := e.b
-			// TODO: prioritize likely successor?
-			for _, x := range s.startRegs[succ.ID] {
-				desired.add(x.vid, x.r)
-			}
-			// Process phi ops in succ.
-			pidx := e.i
-			for _, v := range succ.Values {
-				if v.Op != OpPhi {
-					break
-				}
-				if !s.values[v.ID].needReg {
-					continue
-				}
-				rp, ok := s.f.getHome(v.ID).(*Register)
-				if !ok {
-					continue
-				}
-				desired.add(v.Args[pidx].ID, register(rp.num))
-			}
-		}
-		// Walk values backwards computing desired register info.
-		// See computeLive for more comments.
-		for i := len(oldSched) - 1; i >= 0; i-- {
-			v := oldSched[i]
-			prefs := desired.remove(v.ID)
-			desired.clobber(opcodeTable[v.Op].reg.clobbers)
-			for _, j := range opcodeTable[v.Op].reg.inputs {
-				if countRegs(j.regs) != 1 {
-					continue
-				}
-				desired.clobber(j.regs)
-				desired.add(v.Args[j.idx].ID, pickReg(j.regs))
-			}
-			if opcodeTable[v.Op].resultInArg0 {
-				if opcodeTable[v.Op].commutative {
-					desired.addList(v.Args[1].ID, prefs)
-				}
-				desired.addList(v.Args[0].ID, prefs)
-			}
-			// Save desired registers for this value.
-			dinfo[i].out = prefs
-			for j, a := range v.Args {
-				if j >= len(dinfo[i].in) {
-					break
-				}
-				dinfo[i].in[j] = desired.get(a.ID)
-			}
-		}
-
-		// Process all the non-phi values.
-		for idx, v := range oldSched {
-			if s.f.pass.debug > regDebug {
-				fmt.Printf("  processing %s\n", v.LongString())
-			}
-			regspec := opcodeTable[v.Op].reg
-			if v.Op == OpPhi {
-				f.Fatalf("phi %s not at start of block", v)
-			}
-			if v.Op == OpSP {
-				s.assignReg(s.SPReg, v, v)
-				b.Values = append(b.Values, v)
-				s.advanceUses(v)
-				continue
-			}
-			if v.Op == OpSB {
-				s.assignReg(s.SBReg, v, v)
-				b.Values = append(b.Values, v)
-				s.advanceUses(v)
-				continue
-			}
-			if v.Op == OpSelect0 || v.Op == OpSelect1 {
-				if s.values[v.ID].needReg {
-					var i = 0
-					if v.Op == OpSelect1 {
-						i = 1
-					}
-					s.assignReg(register(s.f.getHome(v.Args[0].ID).(LocPair)[i].(*Register).num), v, v)
-				}
-				b.Values = append(b.Values, v)
-				s.advanceUses(v)
-				goto issueSpill
-			}
-			if v.Op == OpGetG && s.f.Config.hasGReg {
-				// use hardware g register
-				if s.regs[s.GReg].v != nil {
-					s.freeReg(s.GReg) // kick out the old value
-				}
-				s.assignReg(s.GReg, v, v)
-				b.Values = append(b.Values, v)
-				s.advanceUses(v)
-				goto issueSpill
-			}
-			if v.Op == OpArg {
-				// Args are "pre-spilled" values. We don't allocate
-				// any register here. We just set up the spill pointer to
-				// point at itself and any later user will restore it to use it.
-				s.values[v.ID].spill = v
-				s.values[v.ID].spillUsed = true // use is guaranteed
-				b.Values = append(b.Values, v)
-				s.advanceUses(v)
-				continue
-			}
-			if v.Op == OpKeepAlive {
-				// Make sure the argument to v is still live here.
-				s.advanceUses(v)
-				vi := &s.values[v.Args[0].ID]
-				if vi.spillUsed {
-					// Use the spill location.
-					v.SetArg(0, vi.spill)
-				} else {
-					// No need to keep unspilled values live.
-					// These are typically rematerializeable constants like nil,
-					// or values of a variable that were modified since the last call.
-					v.Op = OpCopy
-					v.SetArgs1(v.Args[1])
-				}
-				b.Values = append(b.Values, v)
-				continue
-			}
-			if len(regspec.inputs) == 0 && len(regspec.outputs) == 0 {
-				// No register allocation required (or none specified yet)
-				s.freeRegs(regspec.clobbers)
-				b.Values = append(b.Values, v)
-				s.advanceUses(v)
-				continue
-			}
-
-			if s.values[v.ID].rematerializeable {
-				// Value is rematerializeable, don't issue it here.
-				// It will get issued just before each use (see
-				// allocValueToReg).
-				for _, a := range v.Args {
-					a.Uses--
-				}
-				s.advanceUses(v)
-				continue
-			}
-
-			if s.f.pass.debug > regDebug {
-				fmt.Printf("value %s\n", v.LongString())
-				fmt.Printf("  out:")
-				for _, r := range dinfo[idx].out {
-					if r != noRegister {
-						fmt.Printf(" %s", s.registers[r].Name())
-					}
-				}
-				fmt.Println()
-				for i := 0; i < len(v.Args) && i < 3; i++ {
-					fmt.Printf("  in%d:", i)
-					for _, r := range dinfo[idx].in[i] {
-						if r != noRegister {
-							fmt.Printf(" %s", s.registers[r].Name())
-						}
-					}
-					fmt.Println()
-				}
-			}
-
-			// Move arguments to registers. Process in an ordering defined
-			// by the register specification (most constrained first).
-			args = append(args[:0], v.Args...)
-			for _, i := range regspec.inputs {
-				mask := i.regs
-				if mask&s.values[args[i.idx].ID].regs == 0 {
-					// Need a new register for the input.
-					mask &= s.allocatable
-					mask &^= s.nospill
-					// Used desired register if available.
-					if i.idx < 3 {
-						for _, r := range dinfo[idx].in[i.idx] {
-							if r != noRegister && (mask&^s.used)>>r&1 != 0 {
-								// Desired register is allowed and unused.
-								mask = regMask(1) << r
-								break
-							}
-						}
-					}
-					// Avoid registers we're saving for other values.
-					if mask&^desired.avoid != 0 {
-						mask &^= desired.avoid
-					}
-				}
-				args[i.idx] = s.allocValToReg(args[i.idx], mask, true, v.Line)
-			}
-
-			// If the output clobbers the input register, make sure we have
-			// at least two copies of the input register so we don't
-			// have to reload the value from the spill location.
-			if opcodeTable[v.Op].resultInArg0 {
-				var m regMask
-				if !s.liveAfterCurrentInstruction(v.Args[0]) {
-					// arg0 is dead.  We can clobber its register.
-					goto ok
-				}
-				if s.values[v.Args[0].ID].rematerializeable {
-					// We can rematerialize the input, don't worry about clobbering it.
-					goto ok
-				}
-				if countRegs(s.values[v.Args[0].ID].regs) >= 2 {
-					// we have at least 2 copies of arg0.  We can afford to clobber one.
-					goto ok
-				}
-				if opcodeTable[v.Op].commutative {
-					if !s.liveAfterCurrentInstruction(v.Args[1]) {
-						args[0], args[1] = args[1], args[0]
-						goto ok
-					}
-					if s.values[v.Args[1].ID].rematerializeable {
-						args[0], args[1] = args[1], args[0]
-						goto ok
-					}
-					if countRegs(s.values[v.Args[1].ID].regs) >= 2 {
-						args[0], args[1] = args[1], args[0]
-						goto ok
-					}
-				}
-
-				// We can't overwrite arg0 (or arg1, if commutative).  So we
-				// need to make a copy of an input so we have a register we can modify.
-
-				// Possible new registers to copy into.
-				m = s.compatRegs(v.Args[0].Type) &^ s.used
-				if m == 0 {
-					// No free registers.  In this case we'll just clobber
-					// an input and future uses of that input must use a restore.
-					// TODO(khr): We should really do this like allocReg does it,
-					// spilling the value with the most distant next use.
-					goto ok
-				}
-
-				// Try to move an input to the desired output.
-				for _, r := range dinfo[idx].out {
-					if r != noRegister && m>>r&1 != 0 {
-						m = regMask(1) << r
-						args[0] = s.allocValToReg(v.Args[0], m, true, v.Line)
-						// Note: we update args[0] so the instruction will
-						// use the register copy we just made.
-						goto ok
-					}
-				}
-				// Try to copy input to its desired location & use its old
-				// location as the result register.
-				for _, r := range dinfo[idx].in[0] {
-					if r != noRegister && m>>r&1 != 0 {
-						m = regMask(1) << r
-						c := s.allocValToReg(v.Args[0], m, true, v.Line)
-						s.copies[c] = false
-						// Note: no update to args[0] so the instruction will
-						// use the original copy.
-						goto ok
-					}
-				}
-				if opcodeTable[v.Op].commutative {
-					for _, r := range dinfo[idx].in[1] {
-						if r != noRegister && m>>r&1 != 0 {
-							m = regMask(1) << r
-							c := s.allocValToReg(v.Args[1], m, true, v.Line)
-							s.copies[c] = false
-							args[0], args[1] = args[1], args[0]
-							goto ok
-						}
-					}
-				}
-				// Avoid future fixed uses if we can.
-				if m&^desired.avoid != 0 {
-					m &^= desired.avoid
-				}
-				// Save input 0 to a new register so we can clobber it.
-				c := s.allocValToReg(v.Args[0], m, true, v.Line)
-				s.copies[c] = false
-			}
-
-		ok:
-			// Now that all args are in regs, we're ready to issue the value itself.
-			// Before we pick a register for the output value, allow input registers
-			// to be deallocated. We do this here so that the output can use the
-			// same register as a dying input.
-			if !opcodeTable[v.Op].resultNotInArgs {
-				s.tmpused = s.nospill
-				s.nospill = 0
-				s.advanceUses(v) // frees any registers holding args that are no longer live
-			}
-
-			// Dump any registers which will be clobbered
-			s.freeRegs(regspec.clobbers)
-			s.tmpused |= regspec.clobbers
-
-			// Pick registers for outputs.
-			{
-				outRegs := [2]register{noRegister, noRegister}
-				var used regMask
-				for _, out := range regspec.outputs {
-					mask := out.regs & s.allocatable &^ used
-					if mask == 0 {
-						continue
-					}
-					if opcodeTable[v.Op].resultInArg0 && out.idx == 0 {
-						if !opcodeTable[v.Op].commutative {
-							// Output must use the same register as input 0.
-							r := register(s.f.getHome(args[0].ID).(*Register).num)
-							mask = regMask(1) << r
-						} else {
-							// Output must use the same register as input 0 or 1.
-							r0 := register(s.f.getHome(args[0].ID).(*Register).num)
-							r1 := register(s.f.getHome(args[1].ID).(*Register).num)
-							// Check r0 and r1 for desired output register.
-							found := false
-							for _, r := range dinfo[idx].out {
-								if (r == r0 || r == r1) && (mask&^s.used)>>r&1 != 0 {
-									mask = regMask(1) << r
-									found = true
-									if r == r1 {
-										args[0], args[1] = args[1], args[0]
-									}
-									break
-								}
-							}
-							if !found {
-								// Neither are desired, pick r0.
-								mask = regMask(1) << r0
-							}
-						}
-					}
-					for _, r := range dinfo[idx].out {
-						if r != noRegister && (mask&^s.used)>>r&1 != 0 {
-							// Desired register is allowed and unused.
-							mask = regMask(1) << r
-							break
-						}
-					}
-					// Avoid registers we're saving for other values.
-					if mask&^desired.avoid != 0 {
-						mask &^= desired.avoid
-					}
-					r := s.allocReg(mask, v)
-					outRegs[out.idx] = r
-					used |= regMask(1) << r
-					s.tmpused |= regMask(1) << r
-				}
-				// Record register choices
-				if v.Type.IsTuple() {
-					var outLocs LocPair
-					if r := outRegs[0]; r != noRegister {
-						outLocs[0] = &s.registers[r]
-					}
-					if r := outRegs[1]; r != noRegister {
-						outLocs[1] = &s.registers[r]
-					}
-					s.f.setHome(v, outLocs)
-					// Note that subsequent SelectX instructions will do the assignReg calls.
-				} else {
-					if r := outRegs[0]; r != noRegister {
-						s.assignReg(r, v, v)
-					}
-				}
-			}
-
-			// deallocate dead args, if we have not done so
-			if opcodeTable[v.Op].resultNotInArgs {
-				s.nospill = 0
-				s.advanceUses(v) // frees any registers holding args that are no longer live
-			}
-			s.tmpused = 0
-
-			// Issue the Value itself.
-			for i, a := range args {
-				v.SetArg(i, a) // use register version of arguments
-			}
-			b.Values = append(b.Values, v)
-
-			// Issue a spill for this value. We issue spills unconditionally,
-			// then at the end of regalloc delete the ones we never use.
-			// TODO: schedule the spill at a point that dominates all restores.
-			// The restore may be off in an unlikely branch somewhere and it
-			// would be better to have the spill in that unlikely branch as well.
-			// v := ...
-			// if unlikely {
-			//     f()
-			// }
-			// It would be good to have both spill and restore inside the IF.
-		issueSpill:
-			if s.values[v.ID].needReg {
-				spill := b.NewValue1(v.Line, OpStoreReg, v.Type, v)
-				s.setOrig(spill, v)
-				s.values[v.ID].spill = spill
-				s.values[v.ID].spillUsed = false
-				if loop != nil {
-					loop.spills = append(loop.spills, v)
-					nSpillsInner++
-				}
-				nSpills++
-			}
-		}
-
-		// Load control value into reg.
-		if v := b.Control; v != nil && s.values[v.ID].needReg {
-			if s.f.pass.debug > regDebug {
-				fmt.Printf("  processing control %s\n", v.LongString())
-			}
-			// We assume that a control input can be passed in any
-			// type-compatible register. If this turns out not to be true,
-			// we'll need to introduce a regspec for a block's control value.
-			b.Control = s.allocValToReg(v, s.compatRegs(v.Type), false, b.Line)
-			if b.Control != v {
-				v.Uses--
-				b.Control.Uses++
-			}
-			// Remove this use from the uses list.
-			vi := &s.values[v.ID]
-			u := vi.uses
-			vi.uses = u.next
-			if u.next == nil {
-				s.freeRegs(vi.regs) // value is dead
-			}
-			u.next = s.freeUseRecords
-			s.freeUseRecords = u
-		}
-
-		// Spill any values that can't live across basic block boundaries.
-		if s.f.Config.use387 {
-			s.freeRegs(s.f.Config.fpRegMask)
-		}
-
-		// If we are approaching a merge point and we are the primary
-		// predecessor of it, find live values that we use soon after
-		// the merge point and promote them to registers now.
-		if len(b.Succs) == 1 {
-			// For this to be worthwhile, the loop must have no calls in it.
-			top := b.Succs[0].b
-			loop := s.loopnest.b2l[top.ID]
-			if loop == nil || loop.header != top || loop.containsCall {
-				goto badloop
-			}
-
-			// TODO: sort by distance, pick the closest ones?
-			for _, live := range s.live[b.ID] {
-				if live.dist >= unlikelyDistance {
-					// Don't preload anything live after the loop.
-					continue
-				}
-				vid := live.ID
-				vi := &s.values[vid]
-				if vi.regs != 0 {
-					continue
-				}
-				if vi.rematerializeable {
-					continue
-				}
-				v := s.orig[vid]
-				if s.f.Config.use387 && v.Type.IsFloat() {
-					continue // 387 can't handle floats in registers between blocks
-				}
-				m := s.compatRegs(v.Type) &^ s.used
-				if m&^desired.avoid != 0 {
-					m &^= desired.avoid
-				}
-				if m != 0 {
-					s.allocValToReg(v, m, false, b.Line)
-				}
-			}
-		}
-	badloop:
-		;
-
-		// Save end-of-block register state.
-		// First count how many, this cuts allocations in half.
-		k := 0
-		for r := register(0); r < s.numRegs; r++ {
-			v := s.regs[r].v
-			if v == nil {
-				continue
-			}
-			k++
-		}
-		regList := make([]endReg, 0, k)
-		for r := register(0); r < s.numRegs; r++ {
-			v := s.regs[r].v
-			if v == nil {
-				continue
-			}
-			regList = append(regList, endReg{r, v, s.regs[r].c})
-		}
-		s.endRegs[b.ID] = regList
-
-		if checkEnabled {
-			liveSet.clear()
-			for _, x := range s.live[b.ID] {
-				liveSet.add(x.ID)
-			}
-			for r := register(0); r < s.numRegs; r++ {
-				v := s.regs[r].v
-				if v == nil {
-					continue
-				}
-				if !liveSet.contains(v.ID) {
-					s.f.Fatalf("val %s is in reg but not live at end of %s", v, b)
-				}
-			}
-		}
-
-		// If a value is live at the end of the block and
-		// isn't in a register, remember that its spill location
-		// is live. We need to remember this information so that
-		// the liveness analysis in stackalloc is correct.
-		for _, e := range s.live[b.ID] {
-			if s.values[e.ID].regs != 0 {
-				// in a register, we'll use that source for the merge.
-				continue
-			}
-			spill := s.values[e.ID].spill
-			if spill == nil {
-				// rematerializeable values will have spill==nil.
-				continue
-			}
-			s.spillLive[b.ID] = append(s.spillLive[b.ID], spill.ID)
-			s.values[e.ID].spillUsed = true
-		}
-
-		// Keep track of values that are spilled in the loop, but whose spill
-		// is not used in the loop.  It may be possible to move ("sink") the
-		// spill out of the loop into one or more exit blocks.
-		if loop != nil {
-			loop.scratch++                    // increment count of blocks in this loop that have been processed
-			if loop.scratch == loop.nBlocks { // just processed last block of loop, if it is an inner loop.
-				// This check is redundant with code at the top of the loop.
-				// This is definitive; the one at the top of the loop is an optimization.
-				if loop.isInner && // Common case, easier, most likely to be profitable
-					!loop.containsCall && // Calls force spills, also lead to puzzling spill info.
-					len(loop.exits) <= 32 { // Almost no inner loops have more than 32 exits,
-					// and this allows use of a bitvector and a sparseMap.
-
-					// TODO: exit calculation is messed up for non-inner loops
-					// because of multilevel exits that are not part of the "exit"
-					// count.
-
-					// Compute the set of spill-movement candidates live at entry to exit blocks.
-					// isLoopSpillCandidate filters for
-					// (1) defined in appropriate loop
-					// (2) needs a register
-					// (3) spill not already used (in the loop)
-					// Condition (3) === "in a register at all loop exits"
-
-					entryCandidates.clear()
-
-					for whichExit, ss := range loop.exits {
-						// Start with live at end.
-						for _, li := range s.live[ss.ID] {
-							if s.isLoopSpillCandidate(loop, s.orig[li.ID]) {
-								// s.live contains original IDs, use s.orig above to map back to *Value
-								entryCandidates.setBit(li.ID, uint(whichExit))
-							}
-						}
-						// Control can also be live.
-						if ss.Control != nil && s.orig[ss.Control.ID] != nil && s.isLoopSpillCandidate(loop, s.orig[ss.Control.ID]) {
-							entryCandidates.setBit(s.orig[ss.Control.ID].ID, uint(whichExit))
-						}
-						// Walk backwards, filling in locally live values, removing those defined.
-						for i := len(ss.Values) - 1; i >= 0; i-- {
-							v := ss.Values[i]
-							vorig := s.orig[v.ID]
-							if vorig != nil {
-								entryCandidates.remove(vorig.ID) // Cannot be an issue, only keeps the sets smaller.
-							}
-							for _, a := range v.Args {
-								aorig := s.orig[a.ID]
-								if aorig != nil && s.isLoopSpillCandidate(loop, aorig) {
-									entryCandidates.setBit(aorig.ID, uint(whichExit))
-								}
-							}
-						}
-					}
-
-					for _, e := range loop.spills {
-						whichblocks := entryCandidates.get(e.ID)
-						oldSpill := s.values[e.ID].spill
-						if whichblocks != 0 && whichblocks != -1 { // -1 = not in map.
-							toSink = append(toSink, spillToSink{spill: oldSpill, dests: whichblocks})
-						}
-					}
-
-				} // loop is inner etc
-				loop.scratch = 0 // Don't leave a mess, just in case.
-				loop.spills = nil
-			} // if scratch == nBlocks
-		} // if loop is not nil
-
-		// Clear any final uses.
-		// All that is left should be the pseudo-uses added for values which
-		// are live at the end of b.
-		for _, e := range s.live[b.ID] {
-			u := s.values[e.ID].uses
-			if u == nil {
-				f.Fatalf("live at end, no uses v%d", e.ID)
-			}
-			if u.next != nil {
-				f.Fatalf("live at end, too many uses v%d", e.ID)
-			}
-			s.values[e.ID].uses = nil
-			u.next = s.freeUseRecords
-			s.freeUseRecords = u
-		}
-	}
-
-	// Erase any spills we never used
-	for i := range s.values {
-		vi := s.values[i]
-		if vi.spillUsed {
-			if s.f.pass.debug > logSpills && vi.spill.Op != OpArg {
-				s.f.Config.Warnl(vi.spill.Line, "spilled value at %v remains", vi.spill)
-			}
-			continue
-		}
-		spill := vi.spill
-		if spill == nil {
-			// Constants, SP, SB, ...
-			continue
-		}
-		loop := s.loopForBlock(spill.Block)
-		if loop != nil {
-			nSpillsInner--
-		}
-
-		spill.Args[0].Uses--
-		f.freeValue(spill)
-		nSpills--
-	}
-
-	for _, b := range f.Blocks {
-		i := 0
-		for _, v := range b.Values {
-			if v.Op == OpInvalid {
-				continue
-			}
-			b.Values[i] = v
-			i++
-		}
-		b.Values = b.Values[:i]
-		// TODO: zero b.Values[i:], recycle Values
-		// Not important now because this is the last phase that manipulates Values
-	}
-
-	// Must clear these out before any potential recycling, though that's
-	// not currently implemented.
-	for i, ts := range toSink {
-		vsp := ts.spill
-		if vsp.Op == OpInvalid { // This spill was completely eliminated
-			toSink[i].spill = nil
-		}
-	}
-
-	// Anything that didn't get a register gets a stack location here.
-	// (StoreReg, stack-based phis, inputs, ...)
-	stacklive := stackalloc(s.f, s.spillLive)
-
-	// Fix up all merge edges.
-	s.shuffle(stacklive)
-
-	// Insert moved spills (that have not been marked invalid above)
-	// at start of appropriate block and remove the originals from their
-	// location within loops.  Notice that this can break SSA form;
-	// if a spill is sunk to multiple exits, there will be no phi for that
-	// spill at a join point downstream of those two exits, though the
-	// two spills will target the same stack slot.  Notice also that this
-	// takes place after stack allocation, so the stack allocator does
-	// not need to process these malformed flow graphs.
-sinking:
-	for _, ts := range toSink {
-		vsp := ts.spill
-		if vsp == nil { // This spill was completely eliminated
-			nSpillsSunkUnused++
-			continue sinking
-		}
-		e := ts.spilledValue()
-		if s.values[e.ID].spillUsedShuffle {
-			nSpillsNotSunkLateUse++
-			continue sinking
-		}
-
-		// move spills to a better (outside of loop) block.
-		// This would be costly if it occurred very often, but it doesn't.
-		b := vsp.Block
-		loop := s.loopnest.b2l[b.ID]
-		dests := ts.dests
-
-		// Pre-check to be sure that spilled value is still in expected register on all exits where live.
-	check_val_still_in_reg:
-		for i := uint(0); i < 32 && dests != 0; i++ {
-
-			if dests&(1<<i) == 0 {
-				continue
-			}
-			dests ^= 1 << i
-			d := loop.exits[i]
-			if len(d.Preds) > 1 {
-				panic("Should be impossible given critical edges removed")
-			}
-			p := d.Preds[0].b // block in loop exiting to d.
-
-			endregs := s.endRegs[p.ID]
-			for _, regrec := range endregs {
-				if regrec.v == e && regrec.r != noRegister && regrec.c == e { // TODO: regrec.c != e implies different spill possible.
-					continue check_val_still_in_reg
-				}
-			}
-			// If here, the register assignment was lost down at least one exit and it can't be sunk
-			if s.f.pass.debug > moveSpills {
-				s.f.Config.Warnl(e.Line, "lost register assignment for spill %v in %v at exit %v to %v",
-					vsp, b, p, d)
-			}
-			nSpillsChanged++
-			continue sinking
-		}
-
-		nSpillsSunk++
-		nSpillsInner--
-		// don't update nSpills, since spill is only moved, and if it is duplicated, the spills-on-a-path is not increased.
-
-		dests = ts.dests
-
-		// remove vsp from b.Values
-		i := 0
-		for _, w := range b.Values {
-			if vsp == w {
-				continue
-			}
-			b.Values[i] = w
-			i++
-		}
-		b.Values = b.Values[:i]
-
-		first := true
-		for i := uint(0); i < 32 && dests != 0; i++ {
-
-			if dests&(1<<i) == 0 {
-				continue
-			}
-
-			dests ^= 1 << i
-
-			d := loop.exits[i]
-			vspnew := vsp // reuse original for first sunk spill, saves tracking down and renaming uses
-			if !first {   // any sunk spills after first must make a copy
-				vspnew = d.NewValue1(e.Line, OpStoreReg, e.Type, e)
-				f.setHome(vspnew, f.getHome(vsp.ID)) // copy stack home
-				if s.f.pass.debug > moveSpills {
-					s.f.Config.Warnl(e.Line, "copied spill %v in %v for %v to %v in %v",
-						vsp, b, e, vspnew, d)
-				}
-			} else {
-				first = false
-				vspnew.Block = d
-				d.Values = append(d.Values, vspnew)
-				if s.f.pass.debug > moveSpills {
-					s.f.Config.Warnl(e.Line, "moved spill %v in %v for %v to %v in %v",
-						vsp, b, e, vspnew, d)
-				}
-			}
-
-			// shuffle vspnew to the beginning of its block
-			copy(d.Values[1:], d.Values[0:len(d.Values)-1])
-			d.Values[0] = vspnew
-
-		}
-	}
-
-	// Erase any copies we never used.
-	// Also, an unused copy might be the only use of another copy,
-	// so continue erasing until we reach a fixed point.
-	for {
-		progress := false
-		for c, used := range s.copies {
-			if !used && c.Uses == 0 {
-				if s.f.pass.debug > regDebug {
-					fmt.Printf("delete copied value %s\n", c.LongString())
-				}
-				c.Args[0].Uses--
-				f.freeValue(c)
-				delete(s.copies, c)
-				progress = true
-			}
-		}
-		if !progress {
-			break
-		}
-	}
-
-	for _, b := range f.Blocks {
-		i := 0
-		for _, v := range b.Values {
-			if v.Op == OpInvalid {
-				continue
-			}
-			b.Values[i] = v
-			i++
-		}
-		b.Values = b.Values[:i]
-	}
-
-	if f.pass.stats > 0 {
-		f.LogStat("spills_info",
-			nSpills, "spills", nSpillsInner, "inner_spills_remaining", nSpillsSunk, "inner_spills_sunk", nSpillsSunkUnused, "inner_spills_unused", nSpillsNotSunkLateUse, "inner_spills_shuffled", nSpillsChanged, "inner_spills_changed")
-	}
-}
-
-// isLoopSpillCandidate indicates whether the spill for v satisfies preliminary
-// spill-sinking conditions just after the last block of loop has been processed.
-// In particular:
-//   v needs a register.
-//   v's spill is not (YET) used.
-//   v's definition is within loop.
-// The spill may be used in the future, either by an outright use
-// in the code, or by shuffling code inserted after stack allocation.
-// Outright uses cause sinking; shuffling (within the loop) inhibits it.
-func (s *regAllocState) isLoopSpillCandidate(loop *loop, v *Value) bool {
-	return s.values[v.ID].needReg && !s.values[v.ID].spillUsed && s.loopnest.b2l[v.Block.ID] == loop
-}
-
-// lateSpillUse notes a late (after stack allocation) use of the spill of value with ID vid.
-// This will inhibit spill sinking.
-func (s *regAllocState) lateSpillUse(vid ID) {
-	// TODO investigate why this is necessary.
-	// It appears that an outside-the-loop use of
-	// an otherwise sinkable spill makes the spill
-	// a candidate for shuffling, when it would not
-	// otherwise have been the case (spillUsed was not
-	// true when isLoopSpillCandidate was called, yet
-	// it was shuffled).  Such shuffling cuts the amount
-	// of spill sinking by more than half (in make.bash)
-	s.values[vid].spillUsedShuffle = true
-}
-
-// shuffle fixes up all the merge edges (those going into blocks of indegree > 1).
-func (s *regAllocState) shuffle(stacklive [][]ID) {
-	var e edgeState
-	e.s = s
-	e.cache = map[ID][]*Value{}
-	e.contents = map[Location]contentRecord{}
-	if s.f.pass.debug > regDebug {
-		fmt.Printf("shuffle %s\n", s.f.Name)
-		fmt.Println(s.f.String())
-	}
-
-	for _, b := range s.f.Blocks {
-		if len(b.Preds) <= 1 {
-			continue
-		}
-		e.b = b
-		for i, edge := range b.Preds {
-			p := edge.b
-			e.p = p
-			e.setup(i, s.endRegs[p.ID], s.startRegs[b.ID], stacklive[p.ID])
-			e.process()
-		}
-	}
-}
-
-type edgeState struct {
-	s    *regAllocState
-	p, b *Block // edge goes from p->b.
-
-	// for each pre-regalloc value, a list of equivalent cached values
-	cache      map[ID][]*Value
-	cachedVals []ID // (superset of) keys of the above map, for deterministic iteration
-
-	// map from location to the value it contains
-	contents map[Location]contentRecord
-
-	// desired destination locations
-	destinations []dstRecord
-	extra        []dstRecord
-
-	usedRegs   regMask // registers currently holding something
-	uniqueRegs regMask // registers holding the only copy of a value
-	finalRegs  regMask // registers holding final target
-}
-
-type contentRecord struct {
-	vid   ID     // pre-regalloc value
-	c     *Value // cached value
-	final bool   // this is a satisfied destination
-	line  int32  // line number of use of the value
-}
-
-type dstRecord struct {
-	loc    Location // register or stack slot
-	vid    ID       // pre-regalloc value it should contain
-	splice **Value  // place to store reference to the generating instruction
-	line   int32    // line number of use of this location
-}
-
-// setup initializes the edge state for shuffling.
-func (e *edgeState) setup(idx int, srcReg []endReg, dstReg []startReg, stacklive []ID) {
-	if e.s.f.pass.debug > regDebug {
-		fmt.Printf("edge %s->%s\n", e.p, e.b)
-	}
-
-	// Clear state.
-	for _, vid := range e.cachedVals {
-		delete(e.cache, vid)
-	}
-	e.cachedVals = e.cachedVals[:0]
-	for k := range e.contents {
-		delete(e.contents, k)
-	}
-	e.usedRegs = 0
-	e.uniqueRegs = 0
-	e.finalRegs = 0
-
-	// Live registers can be sources.
-	for _, x := range srcReg {
-		e.set(&e.s.registers[x.r], x.v.ID, x.c, false, 0) // don't care the line number of the source
-	}
-	// So can all of the spill locations.
-	for _, spillID := range stacklive {
-		v := e.s.orig[spillID]
-		spill := e.s.values[v.ID].spill
-		e.set(e.s.f.getHome(spillID), v.ID, spill, false, 0) // don't care the line number of the source
-	}
-
-	// Figure out all the destinations we need.
-	dsts := e.destinations[:0]
-	for _, x := range dstReg {
-		dsts = append(dsts, dstRecord{&e.s.registers[x.r], x.vid, nil, x.line})
-	}
-	// Phis need their args to end up in a specific location.
-	for _, v := range e.b.Values {
-		if v.Op != OpPhi {
-			break
-		}
-		loc := e.s.f.getHome(v.ID)
-		if loc == nil {
-			continue
-		}
-		dsts = append(dsts, dstRecord{loc, v.Args[idx].ID, &v.Args[idx], v.Line})
-	}
-	e.destinations = dsts
-
-	if e.s.f.pass.debug > regDebug {
-		for _, vid := range e.cachedVals {
-			a := e.cache[vid]
-			for _, c := range a {
-				fmt.Printf("src %s: v%d cache=%s\n", e.s.f.getHome(c.ID).Name(), vid, c)
-			}
-		}
-		for _, d := range e.destinations {
-			fmt.Printf("dst %s: v%d\n", d.loc.Name(), d.vid)
-		}
-	}
-}
-
-// process generates code to move all the values to the right destination locations.
-func (e *edgeState) process() {
-	dsts := e.destinations
-
-	// Process the destinations until they are all satisfied.
-	for len(dsts) > 0 {
-		i := 0
-		for _, d := range dsts {
-			if !e.processDest(d.loc, d.vid, d.splice, d.line) {
-				// Failed - save for next iteration.
-				dsts[i] = d
-				i++
-			}
-		}
-		if i < len(dsts) {
-			// Made some progress. Go around again.
-			dsts = dsts[:i]
-
-			// Append any extras destinations we generated.
-			dsts = append(dsts, e.extra...)
-			e.extra = e.extra[:0]
-			continue
-		}
-
-		// We made no progress. That means that any
-		// remaining unsatisfied moves are in simple cycles.
-		// For example, A -> B -> C -> D -> A.
-		//   A ----> B
-		//   ^       |
-		//   |       |
-		//   |       v
-		//   D <---- C
-
-		// To break the cycle, we pick an unused register, say R,
-		// and put a copy of B there.
-		//   A ----> B
-		//   ^       |
-		//   |       |
-		//   |       v
-		//   D <---- C <---- R=copyofB
-		// When we resume the outer loop, the A->B move can now proceed,
-		// and eventually the whole cycle completes.
-
-		// Copy any cycle location to a temp register. This duplicates
-		// one of the cycle entries, allowing the just duplicated value
-		// to be overwritten and the cycle to proceed.
-		d := dsts[0]
-		loc := d.loc
-		vid := e.contents[loc].vid
-		c := e.contents[loc].c
-		r := e.findRegFor(c.Type)
-		if e.s.f.pass.debug > regDebug {
-			fmt.Printf("breaking cycle with v%d in %s:%s\n", vid, loc.Name(), c)
-		}
-		if _, isReg := loc.(*Register); isReg {
-			c = e.p.NewValue1(d.line, OpCopy, c.Type, c)
-		} else {
-			e.s.lateSpillUse(vid)
-			c = e.p.NewValue1(d.line, OpLoadReg, c.Type, c)
-		}
-		e.set(r, vid, c, false, d.line)
-	}
-}
-
-// processDest generates code to put value vid into location loc. Returns true
-// if progress was made.
-func (e *edgeState) processDest(loc Location, vid ID, splice **Value, line int32) bool {
-	occupant := e.contents[loc]
-	if occupant.vid == vid {
-		// Value is already in the correct place.
-		e.contents[loc] = contentRecord{vid, occupant.c, true, line}
-		if splice != nil {
-			(*splice).Uses--
-			*splice = occupant.c
-			occupant.c.Uses++
-			if occupant.c.Op == OpStoreReg {
-				e.s.lateSpillUse(vid)
-			}
-		}
-		// Note: if splice==nil then c will appear dead. This is
-		// non-SSA formed code, so be careful after this pass not to run
-		// deadcode elimination.
-		if _, ok := e.s.copies[occupant.c]; ok {
-			// The copy at occupant.c was used to avoid spill.
-			e.s.copies[occupant.c] = true
-		}
-		return true
-	}
-
-	// Check if we're allowed to clobber the destination location.
-	if len(e.cache[occupant.vid]) == 1 && !e.s.values[occupant.vid].rematerializeable {
-		// We can't overwrite the last copy
-		// of a value that needs to survive.
-		return false
-	}
-
-	// Copy from a source of v, register preferred.
-	v := e.s.orig[vid]
-	var c *Value
-	var src Location
-	if e.s.f.pass.debug > regDebug {
-		fmt.Printf("moving v%d to %s\n", vid, loc.Name())
-		fmt.Printf("sources of v%d:", vid)
-	}
-	for _, w := range e.cache[vid] {
-		h := e.s.f.getHome(w.ID)
-		if e.s.f.pass.debug > regDebug {
-			fmt.Printf(" %s:%s", h.Name(), w)
-		}
-		_, isreg := h.(*Register)
-		if src == nil || isreg {
-			c = w
-			src = h
-		}
-	}
-	if e.s.f.pass.debug > regDebug {
-		if src != nil {
-			fmt.Printf(" [use %s]\n", src.Name())
-		} else {
-			fmt.Printf(" [no source]\n")
-		}
-	}
-	_, dstReg := loc.(*Register)
-	var x *Value
-	if c == nil {
-		if !e.s.values[vid].rematerializeable {
-			e.s.f.Fatalf("can't find source for %s->%s: %s\n", e.p, e.b, v.LongString())
-		}
-		if dstReg {
-			x = v.copyInto(e.p)
-		} else {
-			// Rematerialize into stack slot. Need a free
-			// register to accomplish this.
-			e.erase(loc) // see pre-clobber comment below
-			r := e.findRegFor(v.Type)
-			x = v.copyInto(e.p)
-			e.set(r, vid, x, false, line)
-			// Make sure we spill with the size of the slot, not the
-			// size of x (which might be wider due to our dropping
-			// of narrowing conversions).
-			x = e.p.NewValue1(line, OpStoreReg, loc.(LocalSlot).Type, x)
-		}
-	} else {
-		// Emit move from src to dst.
-		_, srcReg := src.(*Register)
-		if srcReg {
-			if dstReg {
-				x = e.p.NewValue1(line, OpCopy, c.Type, c)
-			} else {
-				x = e.p.NewValue1(line, OpStoreReg, loc.(LocalSlot).Type, c)
-			}
-		} else {
-			if dstReg {
-				e.s.lateSpillUse(vid)
-				x = e.p.NewValue1(line, OpLoadReg, c.Type, c)
-			} else {
-				// mem->mem. Use temp register.
-
-				// Pre-clobber destination. This avoids the
-				// following situation:
-				//   - v is currently held in R0 and stacktmp0.
-				//   - We want to copy stacktmp1 to stacktmp0.
-				//   - We choose R0 as the temporary register.
-				// During the copy, both R0 and stacktmp0 are
-				// clobbered, losing both copies of v. Oops!
-				// Erasing the destination early means R0 will not
-				// be chosen as the temp register, as it will then
-				// be the last copy of v.
-				e.erase(loc)
-
-				r := e.findRegFor(c.Type)
-				e.s.lateSpillUse(vid)
-				t := e.p.NewValue1(line, OpLoadReg, c.Type, c)
-				e.set(r, vid, t, false, line)
-				x = e.p.NewValue1(line, OpStoreReg, loc.(LocalSlot).Type, t)
-			}
-		}
-	}
-	e.set(loc, vid, x, true, line)
-	if splice != nil {
-		(*splice).Uses--
-		*splice = x
-		x.Uses++
-	}
-	return true
-}
-
-// set changes the contents of location loc to hold the given value and its cached representative.
-func (e *edgeState) set(loc Location, vid ID, c *Value, final bool, line int32) {
-	e.s.f.setHome(c, loc)
-	e.erase(loc)
-	e.contents[loc] = contentRecord{vid, c, final, line}
-	a := e.cache[vid]
-	if len(a) == 0 {
-		e.cachedVals = append(e.cachedVals, vid)
-	}
-	a = append(a, c)
-	e.cache[vid] = a
-	if r, ok := loc.(*Register); ok {
-		e.usedRegs |= regMask(1) << uint(r.num)
-		if final {
-			e.finalRegs |= regMask(1) << uint(r.num)
-		}
-		if len(a) == 1 {
-			e.uniqueRegs |= regMask(1) << uint(r.num)
-		}
-		if len(a) == 2 {
-			if t, ok := e.s.f.getHome(a[0].ID).(*Register); ok {
-				e.uniqueRegs &^= regMask(1) << uint(t.num)
-			}
-		}
-	}
-	if e.s.f.pass.debug > regDebug {
-		fmt.Printf("%s\n", c.LongString())
-		fmt.Printf("v%d now available in %s:%s\n", vid, loc.Name(), c)
-	}
-}
-
-// erase removes any user of loc.
-func (e *edgeState) erase(loc Location) {
-	cr := e.contents[loc]
-	if cr.c == nil {
-		return
-	}
-	vid := cr.vid
-
-	if cr.final {
-		// Add a destination to move this value back into place.
-		// Make sure it gets added to the tail of the destination queue
-		// so we make progress on other moves first.
-		e.extra = append(e.extra, dstRecord{loc, cr.vid, nil, cr.line})
-	}
-
-	// Remove c from the list of cached values.
-	a := e.cache[vid]
-	for i, c := range a {
-		if e.s.f.getHome(c.ID) == loc {
-			if e.s.f.pass.debug > regDebug {
-				fmt.Printf("v%d no longer available in %s:%s\n", vid, loc.Name(), c)
-			}
-			a[i], a = a[len(a)-1], a[:len(a)-1]
-			break
-		}
-	}
-	e.cache[vid] = a
-
-	// Update register masks.
-	if r, ok := loc.(*Register); ok {
-		e.usedRegs &^= regMask(1) << uint(r.num)
-		if cr.final {
-			e.finalRegs &^= regMask(1) << uint(r.num)
-		}
-	}
-	if len(a) == 1 {
-		if r, ok := e.s.f.getHome(a[0].ID).(*Register); ok {
-			e.uniqueRegs |= regMask(1) << uint(r.num)
-		}
-	}
-}
-
-// findRegFor finds a register we can use to make a temp copy of type typ.
-func (e *edgeState) findRegFor(typ Type) Location {
-	// Which registers are possibilities.
-	var m regMask
-	if typ.IsFloat() {
-		m = e.s.compatRegs(e.s.f.Config.fe.TypeFloat64())
-	} else {
-		m = e.s.compatRegs(e.s.f.Config.fe.TypeInt64())
-	}
-
-	// Pick a register. In priority order:
-	// 1) an unused register
-	// 2) a non-unique register not holding a final value
-	// 3) a non-unique register
-	x := m &^ e.usedRegs
-	if x != 0 {
-		return &e.s.registers[pickReg(x)]
-	}
-	x = m &^ e.uniqueRegs &^ e.finalRegs
-	if x != 0 {
-		return &e.s.registers[pickReg(x)]
-	}
-	x = m &^ e.uniqueRegs
-	if x != 0 {
-		return &e.s.registers[pickReg(x)]
-	}
-
-	// No register is available. Allocate a temp location to spill a register to.
-	// The type of the slot is immaterial - it will not be live across
-	// any safepoint. Just use a type big enough to hold any register.
-	typ = e.s.f.Config.fe.TypeInt64()
-	t := LocalSlot{e.s.f.Config.fe.Auto(typ), typ, 0}
-	// TODO: reuse these slots.
-
-	// Pick a register to spill.
-	for _, vid := range e.cachedVals {
-		a := e.cache[vid]
-		for _, c := range a {
-			if r, ok := e.s.f.getHome(c.ID).(*Register); ok && m>>uint(r.num)&1 != 0 {
-				x := e.p.NewValue1(c.Line, OpStoreReg, c.Type, c)
-				e.set(t, vid, x, false, c.Line)
-				if e.s.f.pass.debug > regDebug {
-					fmt.Printf("  SPILL %s->%s %s\n", r.Name(), t.Name(), x.LongString())
-				}
-				// r will now be overwritten by the caller. At some point
-				// later, the newly saved value will be moved back to its
-				// final destination in processDest.
-				return r
-			}
-		}
-	}
-
-	fmt.Printf("m:%d unique:%d final:%d\n", m, e.uniqueRegs, e.finalRegs)
-	for _, vid := range e.cachedVals {
-		a := e.cache[vid]
-		for _, c := range a {
-			fmt.Printf("v%d: %s %s\n", vid, c, e.s.f.getHome(c.ID).Name())
-		}
-	}
-	e.s.f.Fatalf("can't find empty register on edge %s->%s", e.p, e.b)
-	return nil
-}
-
-// rematerializeable reports whether the register allocator should recompute
-// a value instead of spilling/restoring it.
-func (v *Value) rematerializeable() bool {
-	if !opcodeTable[v.Op].rematerializeable {
-		return false
-	}
-	for _, a := range v.Args {
-		// SP and SB (generated by OpSP and OpSB) are always available.
-		if a.Op != OpSP && a.Op != OpSB {
-			return false
-		}
-	}
-	return true
-}
-
-type liveInfo struct {
-	ID   ID    // ID of value
-	dist int32 // # of instructions before next use
-	line int32 // line number of next use
-}
-
-// dblock contains information about desired & avoid registers at the end of a block.
-type dblock struct {
-	prefers []desiredStateEntry
-	avoid   regMask
-}
-
-// computeLive computes a map from block ID to a list of value IDs live at the end
-// of that block. Together with the value ID is a count of how many instructions
-// to the next use of that value. The resulting map is stored in s.live.
-// computeLive also computes the desired register information at the end of each block.
-// This desired register information is stored in s.desired.
-// TODO: this could be quadratic if lots of variables are live across lots of
-// basic blocks. Figure out a way to make this function (or, more precisely, the user
-// of this function) require only linear size & time.
-func (s *regAllocState) computeLive() {
-	f := s.f
-	s.live = make([][]liveInfo, f.NumBlocks())
-	s.desired = make([]desiredState, f.NumBlocks())
-	var phis []*Value
-
-	live := newSparseMap(f.NumValues())
-	t := newSparseMap(f.NumValues())
-
-	// Keep track of which value we want in each register.
-	var desired desiredState
-
-	// Instead of iterating over f.Blocks, iterate over their postordering.
-	// Liveness information flows backward, so starting at the end
-	// increases the probability that we will stabilize quickly.
-	// TODO: Do a better job yet. Here's one possibility:
-	// Calculate the dominator tree and locate all strongly connected components.
-	// If a value is live in one block of an SCC, it is live in all.
-	// Walk the dominator tree from end to beginning, just once, treating SCC
-	// components as single blocks, duplicated calculated liveness information
-	// out to all of them.
-	po := f.postorder()
-	s.loopnest = f.loopnest()
-	for {
-		changed := false
-
-		for _, b := range po {
-			// Start with known live values at the end of the block.
-			// Add len(b.Values) to adjust from end-of-block distance
-			// to beginning-of-block distance.
-			live.clear()
-			for _, e := range s.live[b.ID] {
-				live.set(e.ID, e.dist+int32(len(b.Values)), e.line)
-			}
-
-			// Mark control value as live
-			if b.Control != nil && s.values[b.Control.ID].needReg {
-				live.set(b.Control.ID, int32(len(b.Values)), b.Line)
-			}
-
-			// Propagate backwards to the start of the block
-			// Assumes Values have been scheduled.
-			phis = phis[:0]
-			for i := len(b.Values) - 1; i >= 0; i-- {
-				v := b.Values[i]
-				live.remove(v.ID)
-				if v.Op == OpPhi {
-					// save phi ops for later
-					phis = append(phis, v)
-					continue
-				}
-				if opcodeTable[v.Op].call {
-					c := live.contents()
-					for i := range c {
-						c[i].val += unlikelyDistance
-					}
-				}
-				for _, a := range v.Args {
-					if s.values[a.ID].needReg {
-						live.set(a.ID, int32(i), v.Line)
-					}
-				}
-			}
-			// Propagate desired registers backwards.
-			desired.copy(&s.desired[b.ID])
-			for i := len(b.Values) - 1; i >= 0; i-- {
-				v := b.Values[i]
-				prefs := desired.remove(v.ID)
-				if v.Op == OpPhi {
-					// TODO: if v is a phi, save desired register for phi inputs.
-					// For now, we just drop it and don't propagate
-					// desired registers back though phi nodes.
-					continue
-				}
-				// Cancel desired registers if they get clobbered.
-				desired.clobber(opcodeTable[v.Op].reg.clobbers)
-				// Update desired registers if there are any fixed register inputs.
-				for _, j := range opcodeTable[v.Op].reg.inputs {
-					if countRegs(j.regs) != 1 {
-						continue
-					}
-					desired.clobber(j.regs)
-					desired.add(v.Args[j.idx].ID, pickReg(j.regs))
-				}
-				// Set desired register of input 0 if this is a 2-operand instruction.
-				if opcodeTable[v.Op].resultInArg0 {
-					if opcodeTable[v.Op].commutative {
-						desired.addList(v.Args[1].ID, prefs)
-					}
-					desired.addList(v.Args[0].ID, prefs)
-				}
-			}
-
-			// For each predecessor of b, expand its list of live-at-end values.
-			// invariant: live contains the values live at the start of b (excluding phi inputs)
-			for i, e := range b.Preds {
-				p := e.b
-				// Compute additional distance for the edge.
-				// Note: delta must be at least 1 to distinguish the control
-				// value use from the first user in a successor block.
-				delta := int32(normalDistance)
-				if len(p.Succs) == 2 {
-					if p.Succs[0].b == b && p.Likely == BranchLikely ||
-						p.Succs[1].b == b && p.Likely == BranchUnlikely {
-						delta = likelyDistance
-					}
-					if p.Succs[0].b == b && p.Likely == BranchUnlikely ||
-						p.Succs[1].b == b && p.Likely == BranchLikely {
-						delta = unlikelyDistance
-					}
-				}
-
-				// Update any desired registers at the end of p.
-				s.desired[p.ID].merge(&desired)
-
-				// Start t off with the previously known live values at the end of p.
-				t.clear()
-				for _, e := range s.live[p.ID] {
-					t.set(e.ID, e.dist, e.line)
-				}
-				update := false
-
-				// Add new live values from scanning this block.
-				for _, e := range live.contents() {
-					d := e.val + delta
-					if !t.contains(e.key) || d < t.get(e.key) {
-						update = true
-						t.set(e.key, d, e.aux)
-					}
-				}
-				// Also add the correct arg from the saved phi values.
-				// All phis are at distance delta (we consider them
-				// simultaneously happening at the start of the block).
-				for _, v := range phis {
-					id := v.Args[i].ID
-					if s.values[id].needReg && (!t.contains(id) || delta < t.get(id)) {
-						update = true
-						t.set(id, delta, v.Line)
-					}
-				}
-
-				if !update {
-					continue
-				}
-				// The live set has changed, update it.
-				l := s.live[p.ID][:0]
-				if cap(l) < t.size() {
-					l = make([]liveInfo, 0, t.size())
-				}
-				for _, e := range t.contents() {
-					l = append(l, liveInfo{e.key, e.val, e.aux})
-				}
-				s.live[p.ID] = l
-				changed = true
-			}
-		}
-
-		if !changed {
-			break
-		}
-	}
-	if f.pass.debug > regDebug {
-		fmt.Println("live values at end of each block")
-		for _, b := range f.Blocks {
-			fmt.Printf("  %s:", b)
-			for _, x := range s.live[b.ID] {
-				fmt.Printf(" v%d", x.ID)
-				for _, e := range s.desired[b.ID].entries {
-					if e.ID != x.ID {
-						continue
-					}
-					fmt.Printf("[")
-					first := true
-					for _, r := range e.regs {
-						if r == noRegister {
-							continue
-						}
-						if !first {
-							fmt.Printf(",")
-						}
-						fmt.Print(s.registers[r].Name())
-						first = false
-					}
-					fmt.Printf("]")
-				}
-			}
-			fmt.Printf(" avoid=%x", int64(s.desired[b.ID].avoid))
-			fmt.Println()
-		}
-	}
-}
-
-// A desiredState represents desired register assignments.
-type desiredState struct {
-	// Desired assignments will be small, so we just use a list
-	// of valueID+registers entries.
-	entries []desiredStateEntry
-	// Registers that other values want to be in.  This value will
-	// contain at least the union of the regs fields of entries, but
-	// may contain additional entries for values that were once in
-	// this data structure but are no longer.
-	avoid regMask
-}
-type desiredStateEntry struct {
-	// (pre-regalloc) value
-	ID ID
-	// Registers it would like to be in, in priority order.
-	// Unused slots are filled with noRegister.
-	regs [4]register
-}
-
-func (d *desiredState) clear() {
-	d.entries = d.entries[:0]
-	d.avoid = 0
-}
-
-// get returns a list of desired registers for value vid.
-func (d *desiredState) get(vid ID) [4]register {
-	for _, e := range d.entries {
-		if e.ID == vid {
-			return e.regs
-		}
-	}
-	return [4]register{noRegister, noRegister, noRegister, noRegister}
-}
-
-// add records that we'd like value vid to be in register r.
-func (d *desiredState) add(vid ID, r register) {
-	d.avoid |= regMask(1) << r
-	for i := range d.entries {
-		e := &d.entries[i]
-		if e.ID != vid {
-			continue
-		}
-		if e.regs[0] == r {
-			// Already known and highest priority
-			return
-		}
-		for j := 1; j < len(e.regs); j++ {
-			if e.regs[j] == r {
-				// Move from lower priority to top priority
-				copy(e.regs[1:], e.regs[:j])
-				e.regs[0] = r
-				return
-			}
-		}
-		copy(e.regs[1:], e.regs[:])
-		e.regs[0] = r
-		return
-	}
-	d.entries = append(d.entries, desiredStateEntry{vid, [4]register{r, noRegister, noRegister, noRegister}})
-}
-
-func (d *desiredState) addList(vid ID, regs [4]register) {
-	// regs is in priority order, so iterate in reverse order.
-	for i := len(regs) - 1; i >= 0; i-- {
-		r := regs[i]
-		if r != noRegister {
-			d.add(vid, r)
-		}
-	}
-}
-
-// clobber erases any desired registers in the set m.
-func (d *desiredState) clobber(m regMask) {
-	for i := 0; i < len(d.entries); {
-		e := &d.entries[i]
-		j := 0
-		for _, r := range e.regs {
-			if r != noRegister && m>>r&1 == 0 {
-				e.regs[j] = r
-				j++
-			}
-		}
-		if j == 0 {
-			// No more desired registers for this value.
-			d.entries[i] = d.entries[len(d.entries)-1]
-			d.entries = d.entries[:len(d.entries)-1]
-			continue
-		}
-		for ; j < len(e.regs); j++ {
-			e.regs[j] = noRegister
-		}
-		i++
-	}
-	d.avoid &^= m
-}
-
-// copy copies a desired state from another desiredState x.
-func (d *desiredState) copy(x *desiredState) {
-	d.entries = append(d.entries[:0], x.entries...)
-	d.avoid = x.avoid
-}
-
-// remove removes the desired registers for vid and returns them.
-func (d *desiredState) remove(vid ID) [4]register {
-	for i := range d.entries {
-		if d.entries[i].ID == vid {
-			regs := d.entries[i].regs
-			d.entries[i] = d.entries[len(d.entries)-1]
-			d.entries = d.entries[:len(d.entries)-1]
-			return regs
-		}
-	}
-	return [4]register{noRegister, noRegister, noRegister, noRegister}
-}
-
-// merge merges another desired state x into d.
-func (d *desiredState) merge(x *desiredState) {
-	d.avoid |= x.avoid
-	// There should only be a few desired registers, so
-	// linear insert is ok.
-	for _, e := range x.entries {
-		d.addList(e.ID, e.regs)
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/regalloc_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/regalloc_test.go
deleted file mode 100644
index 3c88cbd..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/regalloc_test.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/regalloc_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/regalloc_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import "testing"
-
-func TestLiveControlOps(t *testing.T) {
-	c := testConfig(t)
-	f := Fun(c, "entry",
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Valu("x", OpAMD64MOVLconst, TypeInt8, 1, nil),
-			Valu("y", OpAMD64MOVLconst, TypeInt8, 2, nil),
-			Valu("a", OpAMD64TESTB, TypeFlags, 0, nil, "x", "y"),
-			Valu("b", OpAMD64TESTB, TypeFlags, 0, nil, "y", "x"),
-			Eq("a", "if", "exit"),
-		),
-		Bloc("if",
-			Eq("b", "plain", "exit"),
-		),
-		Bloc("plain",
-			Goto("exit"),
-		),
-		Bloc("exit",
-			Exit("mem"),
-		),
-	)
-	flagalloc(f.f)
-	regalloc(f.f)
-	checkFunc(f.f)
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewrite.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewrite.go
deleted file mode 100644
index d33458e..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewrite.go
+++ /dev/null
@@ -1,523 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/rewrite.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/rewrite.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import (
-	"fmt"
-	"math"
-	"os"
-	"path/filepath"
-)
-
-func applyRewrite(f *Func, rb func(*Block, *Config) bool, rv func(*Value, *Config) bool) {
-	// repeat rewrites until we find no more rewrites
-	var curb *Block
-	var curv *Value
-	defer func() {
-		if curb != nil {
-			curb.Fatalf("panic during rewrite of block %s\n", curb.LongString())
-		}
-		if curv != nil {
-			curv.Fatalf("panic during rewrite of value %s\n", curv.LongString())
-			// TODO(khr): print source location also
-		}
-	}()
-	config := f.Config
-	for {
-		change := false
-		for _, b := range f.Blocks {
-			if b.Control != nil && b.Control.Op == OpCopy {
-				for b.Control.Op == OpCopy {
-					b.SetControl(b.Control.Args[0])
-				}
-			}
-			curb = b
-			if rb(b, config) {
-				change = true
-			}
-			curb = nil
-			for _, v := range b.Values {
-				change = phielimValue(v) || change
-
-				// Eliminate copy inputs.
-				// If any copy input becomes unused, mark it
-				// as invalid and discard its argument. Repeat
-				// recursively on the discarded argument.
-				// This phase helps remove phantom "dead copy" uses
-				// of a value so that a x.Uses==1 rule condition
-				// fires reliably.
-				for i, a := range v.Args {
-					if a.Op != OpCopy {
-						continue
-					}
-					v.SetArg(i, copySource(a))
-					change = true
-					for a.Uses == 0 {
-						b := a.Args[0]
-						a.reset(OpInvalid)
-						a = b
-					}
-				}
-
-				// apply rewrite function
-				curv = v
-				if rv(v, config) {
-					change = true
-				}
-				curv = nil
-			}
-		}
-		if !change {
-			break
-		}
-	}
-	// remove clobbered values
-	for _, b := range f.Blocks {
-		j := 0
-		for i, v := range b.Values {
-			if v.Op == OpInvalid {
-				f.freeValue(v)
-				continue
-			}
-			if i != j {
-				b.Values[j] = v
-			}
-			j++
-		}
-		if j != len(b.Values) {
-			tail := b.Values[j:]
-			for j := range tail {
-				tail[j] = nil
-			}
-			b.Values = b.Values[:j]
-		}
-	}
-}
-
-// Common functions called from rewriting rules
-
-func is64BitFloat(t Type) bool {
-	return t.Size() == 8 && t.IsFloat()
-}
-
-func is32BitFloat(t Type) bool {
-	return t.Size() == 4 && t.IsFloat()
-}
-
-func is64BitInt(t Type) bool {
-	return t.Size() == 8 && t.IsInteger()
-}
-
-func is32BitInt(t Type) bool {
-	return t.Size() == 4 && t.IsInteger()
-}
-
-func is16BitInt(t Type) bool {
-	return t.Size() == 2 && t.IsInteger()
-}
-
-func is8BitInt(t Type) bool {
-	return t.Size() == 1 && t.IsInteger()
-}
-
-func isPtr(t Type) bool {
-	return t.IsPtrShaped()
-}
-
-func isSigned(t Type) bool {
-	return t.IsSigned()
-}
-
-func typeSize(t Type) int64 {
-	return t.Size()
-}
-
-// mergeSym merges two symbolic offsets. There is no real merging of
-// offsets, we just pick the non-nil one.
-func mergeSym(x, y interface{}) interface{} {
-	if x == nil {
-		return y
-	}
-	if y == nil {
-		return x
-	}
-	panic(fmt.Sprintf("mergeSym with two non-nil syms %s %s", x, y))
-}
-func canMergeSym(x, y interface{}) bool {
-	return x == nil || y == nil
-}
-
-// canMergeLoad reports whether the load can be merged into target without
-// invalidating the schedule.
-func canMergeLoad(target, load *Value) bool {
-	if target.Block.ID != load.Block.ID {
-		// If the load is in a different block do not merge it.
-		return false
-	}
-	mem := load.Args[len(load.Args)-1]
-
-	// We need the load's memory arg to still be alive at target. That
-	// can't be the case if one of target's args depends on a memory
-	// state that is a successor of load's memory arg.
-	//
-	// For example, it would be invalid to merge load into target in
-	// the following situation because newmem has killed oldmem
-	// before target is reached:
-	//     load = read ... oldmem
-	//   newmem = write ... oldmem
-	//     arg0 = read ... newmem
-	//   target = add arg0 load
-	//
-	// If the argument comes from a different block then we can exclude
-	// it immediately because it must dominate load (which is in the
-	// same block as target).
-	var args []*Value
-	for _, a := range target.Args {
-		if a != load && a.Block.ID == target.Block.ID {
-			args = append(args, a)
-		}
-	}
-
-	// memPreds contains memory states known to be predecessors of load's
-	// memory state. It is lazily initialized.
-	var memPreds map[*Value]bool
-search:
-	for i := 0; len(args) > 0; i++ {
-		const limit = 100
-		if i >= limit {
-			// Give up if we have done a lot of iterations.
-			return false
-		}
-		v := args[len(args)-1]
-		args = args[:len(args)-1]
-		if target.Block.ID != v.Block.ID {
-			// Since target and load are in the same block
-			// we can stop searching when we leave the block.
-			continue search
-		}
-		if v.Op == OpPhi {
-			// A Phi implies we have reached the top of the block.
-			continue search
-		}
-		if v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() {
-			// We could handle this situation however it is likely
-			// to be very rare.
-			return false
-		}
-		if v.Type.IsMemory() {
-			if memPreds == nil {
-				// Initialise a map containing memory states
-				// known to be predecessors of load's memory
-				// state.
-				memPreds = make(map[*Value]bool)
-				m := mem
-				const limit = 50
-				for i := 0; i < limit; i++ {
-					if m.Op == OpPhi {
-						break
-					}
-					if m.Block.ID != target.Block.ID {
-						break
-					}
-					if !m.Type.IsMemory() {
-						break
-					}
-					memPreds[m] = true
-					if len(m.Args) == 0 {
-						break
-					}
-					m = m.Args[len(m.Args)-1]
-				}
-			}
-
-			// We can merge if v is a predecessor of mem.
-			//
-			// For example, we can merge load into target in the
-			// following scenario:
-			//      x = read ... v
-			//    mem = write ... v
-			//   load = read ... mem
-			// target = add x load
-			if memPreds[v] {
-				continue search
-			}
-			return false
-		}
-		if len(v.Args) > 0 && v.Args[len(v.Args)-1] == mem {
-			// If v takes mem as an input then we know mem
-			// is valid at this point.
-			continue search
-		}
-		for _, a := range v.Args {
-			if target.Block.ID == a.Block.ID {
-				args = append(args, a)
-			}
-		}
-	}
-	return true
-}
-
-// isArg returns whether s is an arg symbol
-func isArg(s interface{}) bool {
-	_, ok := s.(*ArgSymbol)
-	return ok
-}
-
-// isAuto returns whether s is an auto symbol
-func isAuto(s interface{}) bool {
-	_, ok := s.(*AutoSymbol)
-	return ok
-}
-
-// isSameSym returns whether sym is the same as the given named symbol
-func isSameSym(sym interface{}, name string) bool {
-	s, ok := sym.(fmt.Stringer)
-	return ok && s.String() == name
-}
-
-// nlz returns the number of leading zeros.
-func nlz(x int64) int64 {
-	// log2(0) == 1, so nlz(0) == 64
-	return 63 - log2(x)
-}
-
-// ntz returns the number of trailing zeros.
-func ntz(x int64) int64 {
-	return 64 - nlz(^x&(x-1))
-}
-
-// nlo returns the number of leading ones.
-func nlo(x int64) int64 {
-	return nlz(^x)
-}
-
-// nto returns the number of trailing ones.
-func nto(x int64) int64 {
-	return ntz(^x)
-}
-
-// log2 returns logarithm in base of uint64(n), with log2(0) = -1.
-// Rounds down.
-func log2(n int64) (l int64) {
-	l = -1
-	x := uint64(n)
-	for ; x >= 0x8000; x >>= 16 {
-		l += 16
-	}
-	if x >= 0x80 {
-		x >>= 8
-		l += 8
-	}
-	if x >= 0x8 {
-		x >>= 4
-		l += 4
-	}
-	if x >= 0x2 {
-		x >>= 2
-		l += 2
-	}
-	if x >= 0x1 {
-		l++
-	}
-	return
-}
-
-// isPowerOfTwo reports whether n is a power of 2.
-func isPowerOfTwo(n int64) bool {
-	return n > 0 && n&(n-1) == 0
-}
-
-// is32Bit reports whether n can be represented as a signed 32 bit integer.
-func is32Bit(n int64) bool {
-	return n == int64(int32(n))
-}
-
-// is16Bit reports whether n can be represented as a signed 16 bit integer.
-func is16Bit(n int64) bool {
-	return n == int64(int16(n))
-}
-
-// isU16Bit reports whether n can be represented as an unsigned 16 bit integer.
-func isU16Bit(n int64) bool {
-	return n == int64(uint16(n))
-}
-
-// isU32Bit reports whether n can be represented as an unsigned 32 bit integer.
-func isU32Bit(n int64) bool {
-	return n == int64(uint32(n))
-}
-
-// is20Bit reports whether n can be represented as a signed 20 bit integer.
-func is20Bit(n int64) bool {
-	return -(1<<19) <= n && n < (1<<19)
-}
-
-// b2i translates a boolean value to 0 or 1 for assigning to auxInt.
-func b2i(b bool) int64 {
-	if b {
-		return 1
-	}
-	return 0
-}
-
-// i2f is used in rules for converting from an AuxInt to a float.
-func i2f(i int64) float64 {
-	return math.Float64frombits(uint64(i))
-}
-
-// i2f32 is used in rules for converting from an AuxInt to a float32.
-func i2f32(i int64) float32 {
-	return float32(math.Float64frombits(uint64(i)))
-}
-
-// f2i is used in the rules for storing a float in AuxInt.
-func f2i(f float64) int64 {
-	return int64(math.Float64bits(f))
-}
-
-// uaddOvf returns true if unsigned a+b would overflow.
-func uaddOvf(a, b int64) bool {
-	return uint64(a)+uint64(b) < uint64(a)
-}
-
-// isSamePtr reports whether p1 and p2 point to the same address.
-func isSamePtr(p1, p2 *Value) bool {
-	if p1 == p2 {
-		return true
-	}
-	if p1.Op != p2.Op {
-		return false
-	}
-	switch p1.Op {
-	case OpOffPtr:
-		return p1.AuxInt == p2.AuxInt && isSamePtr(p1.Args[0], p2.Args[0])
-	case OpAddr:
-		// OpAddr's 0th arg is either OpSP or OpSB, which means that it is uniquely identified by its Op.
-		// Checking for value equality only works after [z]cse has run.
-		return p1.Aux == p2.Aux && p1.Args[0].Op == p2.Args[0].Op
-	case OpAddPtr:
-		return p1.Args[1] == p2.Args[1] && isSamePtr(p1.Args[0], p2.Args[0])
-	}
-	return false
-}
-
-// moveSize returns the number of bytes an aligned MOV instruction moves
-func moveSize(align int64, c *Config) int64 {
-	switch {
-	case align%8 == 0 && c.IntSize == 8:
-		return 8
-	case align%4 == 0:
-		return 4
-	case align%2 == 0:
-		return 2
-	}
-	return 1
-}
-
-// mergePoint finds a block among a's blocks which dominates b and is itself
-// dominated by all of a's blocks. Returns nil if it can't find one.
-// Might return nil even if one does exist.
-func mergePoint(b *Block, a ...*Value) *Block {
-	// Walk backward from b looking for one of the a's blocks.
-
-	// Max distance
-	d := 100
-
-	for d > 0 {
-		for _, x := range a {
-			if b == x.Block {
-				goto found
-			}
-		}
-		if len(b.Preds) > 1 {
-			// Don't know which way to go back. Abort.
-			return nil
-		}
-		b = b.Preds[0].b
-		d--
-	}
-	return nil // too far away
-found:
-	// At this point, r is the first value in a that we find by walking backwards.
-	// if we return anything, r will be it.
-	r := b
-
-	// Keep going, counting the other a's that we find. They must all dominate r.
-	na := 0
-	for d > 0 {
-		for _, x := range a {
-			if b == x.Block {
-				na++
-			}
-		}
-		if na == len(a) {
-			// Found all of a in a backwards walk. We can return r.
-			return r
-		}
-		if len(b.Preds) > 1 {
-			return nil
-		}
-		b = b.Preds[0].b
-		d--
-
-	}
-	return nil // too far away
-}
-
-// clobber invalidates v.  Returns true.
-// clobber is used by rewrite rules to:
-//   A) make sure v is really dead and never used again.
-//   B) decrement use counts of v's args.
-func clobber(v *Value) bool {
-	v.reset(OpInvalid)
-	// Note: leave v.Block intact.  The Block field is used after clobber.
-	return true
-}
-
-// noteRule is an easy way to track if a rule is matched when writing
-// new ones.  Make the rule of interest also conditional on
-//     noteRule("note to self: rule of interest matched")
-// and that message will print when the rule matches.
-func noteRule(s string) bool {
-	fmt.Println(s)
-	return true
-}
-
-// warnRule generates a compiler debug output with string s when
-// cond is true and the rule is fired.
-func warnRule(cond bool, v *Value, s string) bool {
-	if cond {
-		v.Block.Func.Config.Warnl(v.Line, s)
-	}
-	return true
-}
-
-// logRule logs the use of the rule s. This will only be enabled if
-// rewrite rules were generated with the -log option, see gen/rulegen.go.
-func logRule(s string) {
-	if ruleFile == nil {
-		// Open a log file to write log to. We open in append
-		// mode because all.bash runs the compiler lots of times,
-		// and we want the concatenation of all of those logs.
-		// This means, of course, that users need to rm the old log
-		// to get fresh data.
-		// TODO: all.bash runs compilers in parallel. Need to synchronize logging somehow?
-		w, err := os.OpenFile(filepath.Join(os.Getenv("GOROOT"), "src", "rulelog"),
-			os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
-		if err != nil {
-			panic(err)
-		}
-		ruleFile = w
-	}
-	_, err := fmt.Fprintf(ruleFile, "rewrite %s\n", s)
-	if err != nil {
-		panic(err)
-	}
-}
-
-var ruleFile *os.File
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewrite386.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewrite386.go
deleted file mode 100644
index 1720606..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewrite386.go
+++ /dev/null
@@ -1,14790 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/rewrite386.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/rewrite386.go:1
-// autogenerated from gen/386.rules: do not edit!
-// generated with: cd gen; go run *.go
-
-package ssa
-
-import "math"
-
-var _ = math.MinInt8 // in case not otherwise used
-func rewriteValue386(v *Value, config *Config) bool {
-	switch v.Op {
-	case Op386ADCL:
-		return rewriteValue386_Op386ADCL(v, config)
-	case Op386ADDL:
-		return rewriteValue386_Op386ADDL(v, config)
-	case Op386ADDLcarry:
-		return rewriteValue386_Op386ADDLcarry(v, config)
-	case Op386ADDLconst:
-		return rewriteValue386_Op386ADDLconst(v, config)
-	case Op386ANDL:
-		return rewriteValue386_Op386ANDL(v, config)
-	case Op386ANDLconst:
-		return rewriteValue386_Op386ANDLconst(v, config)
-	case Op386CMPB:
-		return rewriteValue386_Op386CMPB(v, config)
-	case Op386CMPBconst:
-		return rewriteValue386_Op386CMPBconst(v, config)
-	case Op386CMPL:
-		return rewriteValue386_Op386CMPL(v, config)
-	case Op386CMPLconst:
-		return rewriteValue386_Op386CMPLconst(v, config)
-	case Op386CMPW:
-		return rewriteValue386_Op386CMPW(v, config)
-	case Op386CMPWconst:
-		return rewriteValue386_Op386CMPWconst(v, config)
-	case Op386LEAL:
-		return rewriteValue386_Op386LEAL(v, config)
-	case Op386LEAL1:
-		return rewriteValue386_Op386LEAL1(v, config)
-	case Op386LEAL2:
-		return rewriteValue386_Op386LEAL2(v, config)
-	case Op386LEAL4:
-		return rewriteValue386_Op386LEAL4(v, config)
-	case Op386LEAL8:
-		return rewriteValue386_Op386LEAL8(v, config)
-	case Op386MOVBLSX:
-		return rewriteValue386_Op386MOVBLSX(v, config)
-	case Op386MOVBLSXload:
-		return rewriteValue386_Op386MOVBLSXload(v, config)
-	case Op386MOVBLZX:
-		return rewriteValue386_Op386MOVBLZX(v, config)
-	case Op386MOVBload:
-		return rewriteValue386_Op386MOVBload(v, config)
-	case Op386MOVBloadidx1:
-		return rewriteValue386_Op386MOVBloadidx1(v, config)
-	case Op386MOVBstore:
-		return rewriteValue386_Op386MOVBstore(v, config)
-	case Op386MOVBstoreconst:
-		return rewriteValue386_Op386MOVBstoreconst(v, config)
-	case Op386MOVBstoreconstidx1:
-		return rewriteValue386_Op386MOVBstoreconstidx1(v, config)
-	case Op386MOVBstoreidx1:
-		return rewriteValue386_Op386MOVBstoreidx1(v, config)
-	case Op386MOVLload:
-		return rewriteValue386_Op386MOVLload(v, config)
-	case Op386MOVLloadidx1:
-		return rewriteValue386_Op386MOVLloadidx1(v, config)
-	case Op386MOVLloadidx4:
-		return rewriteValue386_Op386MOVLloadidx4(v, config)
-	case Op386MOVLstore:
-		return rewriteValue386_Op386MOVLstore(v, config)
-	case Op386MOVLstoreconst:
-		return rewriteValue386_Op386MOVLstoreconst(v, config)
-	case Op386MOVLstoreconstidx1:
-		return rewriteValue386_Op386MOVLstoreconstidx1(v, config)
-	case Op386MOVLstoreconstidx4:
-		return rewriteValue386_Op386MOVLstoreconstidx4(v, config)
-	case Op386MOVLstoreidx1:
-		return rewriteValue386_Op386MOVLstoreidx1(v, config)
-	case Op386MOVLstoreidx4:
-		return rewriteValue386_Op386MOVLstoreidx4(v, config)
-	case Op386MOVSDconst:
-		return rewriteValue386_Op386MOVSDconst(v, config)
-	case Op386MOVSDload:
-		return rewriteValue386_Op386MOVSDload(v, config)
-	case Op386MOVSDloadidx1:
-		return rewriteValue386_Op386MOVSDloadidx1(v, config)
-	case Op386MOVSDloadidx8:
-		return rewriteValue386_Op386MOVSDloadidx8(v, config)
-	case Op386MOVSDstore:
-		return rewriteValue386_Op386MOVSDstore(v, config)
-	case Op386MOVSDstoreidx1:
-		return rewriteValue386_Op386MOVSDstoreidx1(v, config)
-	case Op386MOVSDstoreidx8:
-		return rewriteValue386_Op386MOVSDstoreidx8(v, config)
-	case Op386MOVSSconst:
-		return rewriteValue386_Op386MOVSSconst(v, config)
-	case Op386MOVSSload:
-		return rewriteValue386_Op386MOVSSload(v, config)
-	case Op386MOVSSloadidx1:
-		return rewriteValue386_Op386MOVSSloadidx1(v, config)
-	case Op386MOVSSloadidx4:
-		return rewriteValue386_Op386MOVSSloadidx4(v, config)
-	case Op386MOVSSstore:
-		return rewriteValue386_Op386MOVSSstore(v, config)
-	case Op386MOVSSstoreidx1:
-		return rewriteValue386_Op386MOVSSstoreidx1(v, config)
-	case Op386MOVSSstoreidx4:
-		return rewriteValue386_Op386MOVSSstoreidx4(v, config)
-	case Op386MOVWLSX:
-		return rewriteValue386_Op386MOVWLSX(v, config)
-	case Op386MOVWLSXload:
-		return rewriteValue386_Op386MOVWLSXload(v, config)
-	case Op386MOVWLZX:
-		return rewriteValue386_Op386MOVWLZX(v, config)
-	case Op386MOVWload:
-		return rewriteValue386_Op386MOVWload(v, config)
-	case Op386MOVWloadidx1:
-		return rewriteValue386_Op386MOVWloadidx1(v, config)
-	case Op386MOVWloadidx2:
-		return rewriteValue386_Op386MOVWloadidx2(v, config)
-	case Op386MOVWstore:
-		return rewriteValue386_Op386MOVWstore(v, config)
-	case Op386MOVWstoreconst:
-		return rewriteValue386_Op386MOVWstoreconst(v, config)
-	case Op386MOVWstoreconstidx1:
-		return rewriteValue386_Op386MOVWstoreconstidx1(v, config)
-	case Op386MOVWstoreconstidx2:
-		return rewriteValue386_Op386MOVWstoreconstidx2(v, config)
-	case Op386MOVWstoreidx1:
-		return rewriteValue386_Op386MOVWstoreidx1(v, config)
-	case Op386MOVWstoreidx2:
-		return rewriteValue386_Op386MOVWstoreidx2(v, config)
-	case Op386MULL:
-		return rewriteValue386_Op386MULL(v, config)
-	case Op386MULLconst:
-		return rewriteValue386_Op386MULLconst(v, config)
-	case Op386NEGL:
-		return rewriteValue386_Op386NEGL(v, config)
-	case Op386NOTL:
-		return rewriteValue386_Op386NOTL(v, config)
-	case Op386ORL:
-		return rewriteValue386_Op386ORL(v, config)
-	case Op386ORLconst:
-		return rewriteValue386_Op386ORLconst(v, config)
-	case Op386ROLBconst:
-		return rewriteValue386_Op386ROLBconst(v, config)
-	case Op386ROLLconst:
-		return rewriteValue386_Op386ROLLconst(v, config)
-	case Op386ROLWconst:
-		return rewriteValue386_Op386ROLWconst(v, config)
-	case Op386SARB:
-		return rewriteValue386_Op386SARB(v, config)
-	case Op386SARBconst:
-		return rewriteValue386_Op386SARBconst(v, config)
-	case Op386SARL:
-		return rewriteValue386_Op386SARL(v, config)
-	case Op386SARLconst:
-		return rewriteValue386_Op386SARLconst(v, config)
-	case Op386SARW:
-		return rewriteValue386_Op386SARW(v, config)
-	case Op386SARWconst:
-		return rewriteValue386_Op386SARWconst(v, config)
-	case Op386SBBL:
-		return rewriteValue386_Op386SBBL(v, config)
-	case Op386SBBLcarrymask:
-		return rewriteValue386_Op386SBBLcarrymask(v, config)
-	case Op386SETA:
-		return rewriteValue386_Op386SETA(v, config)
-	case Op386SETAE:
-		return rewriteValue386_Op386SETAE(v, config)
-	case Op386SETB:
-		return rewriteValue386_Op386SETB(v, config)
-	case Op386SETBE:
-		return rewriteValue386_Op386SETBE(v, config)
-	case Op386SETEQ:
-		return rewriteValue386_Op386SETEQ(v, config)
-	case Op386SETG:
-		return rewriteValue386_Op386SETG(v, config)
-	case Op386SETGE:
-		return rewriteValue386_Op386SETGE(v, config)
-	case Op386SETL:
-		return rewriteValue386_Op386SETL(v, config)
-	case Op386SETLE:
-		return rewriteValue386_Op386SETLE(v, config)
-	case Op386SETNE:
-		return rewriteValue386_Op386SETNE(v, config)
-	case Op386SHLL:
-		return rewriteValue386_Op386SHLL(v, config)
-	case Op386SHRB:
-		return rewriteValue386_Op386SHRB(v, config)
-	case Op386SHRL:
-		return rewriteValue386_Op386SHRL(v, config)
-	case Op386SHRW:
-		return rewriteValue386_Op386SHRW(v, config)
-	case Op386SUBL:
-		return rewriteValue386_Op386SUBL(v, config)
-	case Op386SUBLcarry:
-		return rewriteValue386_Op386SUBLcarry(v, config)
-	case Op386SUBLconst:
-		return rewriteValue386_Op386SUBLconst(v, config)
-	case Op386XORL:
-		return rewriteValue386_Op386XORL(v, config)
-	case Op386XORLconst:
-		return rewriteValue386_Op386XORLconst(v, config)
-	case OpAdd16:
-		return rewriteValue386_OpAdd16(v, config)
-	case OpAdd32:
-		return rewriteValue386_OpAdd32(v, config)
-	case OpAdd32F:
-		return rewriteValue386_OpAdd32F(v, config)
-	case OpAdd32carry:
-		return rewriteValue386_OpAdd32carry(v, config)
-	case OpAdd32withcarry:
-		return rewriteValue386_OpAdd32withcarry(v, config)
-	case OpAdd64F:
-		return rewriteValue386_OpAdd64F(v, config)
-	case OpAdd8:
-		return rewriteValue386_OpAdd8(v, config)
-	case OpAddPtr:
-		return rewriteValue386_OpAddPtr(v, config)
-	case OpAddr:
-		return rewriteValue386_OpAddr(v, config)
-	case OpAnd16:
-		return rewriteValue386_OpAnd16(v, config)
-	case OpAnd32:
-		return rewriteValue386_OpAnd32(v, config)
-	case OpAnd8:
-		return rewriteValue386_OpAnd8(v, config)
-	case OpAndB:
-		return rewriteValue386_OpAndB(v, config)
-	case OpBswap32:
-		return rewriteValue386_OpBswap32(v, config)
-	case OpClosureCall:
-		return rewriteValue386_OpClosureCall(v, config)
-	case OpCom16:
-		return rewriteValue386_OpCom16(v, config)
-	case OpCom32:
-		return rewriteValue386_OpCom32(v, config)
-	case OpCom8:
-		return rewriteValue386_OpCom8(v, config)
-	case OpConst16:
-		return rewriteValue386_OpConst16(v, config)
-	case OpConst32:
-		return rewriteValue386_OpConst32(v, config)
-	case OpConst32F:
-		return rewriteValue386_OpConst32F(v, config)
-	case OpConst64F:
-		return rewriteValue386_OpConst64F(v, config)
-	case OpConst8:
-		return rewriteValue386_OpConst8(v, config)
-	case OpConstBool:
-		return rewriteValue386_OpConstBool(v, config)
-	case OpConstNil:
-		return rewriteValue386_OpConstNil(v, config)
-	case OpConvert:
-		return rewriteValue386_OpConvert(v, config)
-	case OpCvt32Fto32:
-		return rewriteValue386_OpCvt32Fto32(v, config)
-	case OpCvt32Fto64F:
-		return rewriteValue386_OpCvt32Fto64F(v, config)
-	case OpCvt32to32F:
-		return rewriteValue386_OpCvt32to32F(v, config)
-	case OpCvt32to64F:
-		return rewriteValue386_OpCvt32to64F(v, config)
-	case OpCvt64Fto32:
-		return rewriteValue386_OpCvt64Fto32(v, config)
-	case OpCvt64Fto32F:
-		return rewriteValue386_OpCvt64Fto32F(v, config)
-	case OpDeferCall:
-		return rewriteValue386_OpDeferCall(v, config)
-	case OpDiv16:
-		return rewriteValue386_OpDiv16(v, config)
-	case OpDiv16u:
-		return rewriteValue386_OpDiv16u(v, config)
-	case OpDiv32:
-		return rewriteValue386_OpDiv32(v, config)
-	case OpDiv32F:
-		return rewriteValue386_OpDiv32F(v, config)
-	case OpDiv32u:
-		return rewriteValue386_OpDiv32u(v, config)
-	case OpDiv64F:
-		return rewriteValue386_OpDiv64F(v, config)
-	case OpDiv8:
-		return rewriteValue386_OpDiv8(v, config)
-	case OpDiv8u:
-		return rewriteValue386_OpDiv8u(v, config)
-	case OpEq16:
-		return rewriteValue386_OpEq16(v, config)
-	case OpEq32:
-		return rewriteValue386_OpEq32(v, config)
-	case OpEq32F:
-		return rewriteValue386_OpEq32F(v, config)
-	case OpEq64F:
-		return rewriteValue386_OpEq64F(v, config)
-	case OpEq8:
-		return rewriteValue386_OpEq8(v, config)
-	case OpEqB:
-		return rewriteValue386_OpEqB(v, config)
-	case OpEqPtr:
-		return rewriteValue386_OpEqPtr(v, config)
-	case OpGeq16:
-		return rewriteValue386_OpGeq16(v, config)
-	case OpGeq16U:
-		return rewriteValue386_OpGeq16U(v, config)
-	case OpGeq32:
-		return rewriteValue386_OpGeq32(v, config)
-	case OpGeq32F:
-		return rewriteValue386_OpGeq32F(v, config)
-	case OpGeq32U:
-		return rewriteValue386_OpGeq32U(v, config)
-	case OpGeq64F:
-		return rewriteValue386_OpGeq64F(v, config)
-	case OpGeq8:
-		return rewriteValue386_OpGeq8(v, config)
-	case OpGeq8U:
-		return rewriteValue386_OpGeq8U(v, config)
-	case OpGetClosurePtr:
-		return rewriteValue386_OpGetClosurePtr(v, config)
-	case OpGetG:
-		return rewriteValue386_OpGetG(v, config)
-	case OpGoCall:
-		return rewriteValue386_OpGoCall(v, config)
-	case OpGreater16:
-		return rewriteValue386_OpGreater16(v, config)
-	case OpGreater16U:
-		return rewriteValue386_OpGreater16U(v, config)
-	case OpGreater32:
-		return rewriteValue386_OpGreater32(v, config)
-	case OpGreater32F:
-		return rewriteValue386_OpGreater32F(v, config)
-	case OpGreater32U:
-		return rewriteValue386_OpGreater32U(v, config)
-	case OpGreater64F:
-		return rewriteValue386_OpGreater64F(v, config)
-	case OpGreater8:
-		return rewriteValue386_OpGreater8(v, config)
-	case OpGreater8U:
-		return rewriteValue386_OpGreater8U(v, config)
-	case OpHmul16:
-		return rewriteValue386_OpHmul16(v, config)
-	case OpHmul16u:
-		return rewriteValue386_OpHmul16u(v, config)
-	case OpHmul32:
-		return rewriteValue386_OpHmul32(v, config)
-	case OpHmul32u:
-		return rewriteValue386_OpHmul32u(v, config)
-	case OpHmul8:
-		return rewriteValue386_OpHmul8(v, config)
-	case OpHmul8u:
-		return rewriteValue386_OpHmul8u(v, config)
-	case OpInterCall:
-		return rewriteValue386_OpInterCall(v, config)
-	case OpIsInBounds:
-		return rewriteValue386_OpIsInBounds(v, config)
-	case OpIsNonNil:
-		return rewriteValue386_OpIsNonNil(v, config)
-	case OpIsSliceInBounds:
-		return rewriteValue386_OpIsSliceInBounds(v, config)
-	case OpLeq16:
-		return rewriteValue386_OpLeq16(v, config)
-	case OpLeq16U:
-		return rewriteValue386_OpLeq16U(v, config)
-	case OpLeq32:
-		return rewriteValue386_OpLeq32(v, config)
-	case OpLeq32F:
-		return rewriteValue386_OpLeq32F(v, config)
-	case OpLeq32U:
-		return rewriteValue386_OpLeq32U(v, config)
-	case OpLeq64F:
-		return rewriteValue386_OpLeq64F(v, config)
-	case OpLeq8:
-		return rewriteValue386_OpLeq8(v, config)
-	case OpLeq8U:
-		return rewriteValue386_OpLeq8U(v, config)
-	case OpLess16:
-		return rewriteValue386_OpLess16(v, config)
-	case OpLess16U:
-		return rewriteValue386_OpLess16U(v, config)
-	case OpLess32:
-		return rewriteValue386_OpLess32(v, config)
-	case OpLess32F:
-		return rewriteValue386_OpLess32F(v, config)
-	case OpLess32U:
-		return rewriteValue386_OpLess32U(v, config)
-	case OpLess64F:
-		return rewriteValue386_OpLess64F(v, config)
-	case OpLess8:
-		return rewriteValue386_OpLess8(v, config)
-	case OpLess8U:
-		return rewriteValue386_OpLess8U(v, config)
-	case OpLoad:
-		return rewriteValue386_OpLoad(v, config)
-	case OpLrot16:
-		return rewriteValue386_OpLrot16(v, config)
-	case OpLrot32:
-		return rewriteValue386_OpLrot32(v, config)
-	case OpLrot8:
-		return rewriteValue386_OpLrot8(v, config)
-	case OpLsh16x16:
-		return rewriteValue386_OpLsh16x16(v, config)
-	case OpLsh16x32:
-		return rewriteValue386_OpLsh16x32(v, config)
-	case OpLsh16x64:
-		return rewriteValue386_OpLsh16x64(v, config)
-	case OpLsh16x8:
-		return rewriteValue386_OpLsh16x8(v, config)
-	case OpLsh32x16:
-		return rewriteValue386_OpLsh32x16(v, config)
-	case OpLsh32x32:
-		return rewriteValue386_OpLsh32x32(v, config)
-	case OpLsh32x64:
-		return rewriteValue386_OpLsh32x64(v, config)
-	case OpLsh32x8:
-		return rewriteValue386_OpLsh32x8(v, config)
-	case OpLsh8x16:
-		return rewriteValue386_OpLsh8x16(v, config)
-	case OpLsh8x32:
-		return rewriteValue386_OpLsh8x32(v, config)
-	case OpLsh8x64:
-		return rewriteValue386_OpLsh8x64(v, config)
-	case OpLsh8x8:
-		return rewriteValue386_OpLsh8x8(v, config)
-	case OpMod16:
-		return rewriteValue386_OpMod16(v, config)
-	case OpMod16u:
-		return rewriteValue386_OpMod16u(v, config)
-	case OpMod32:
-		return rewriteValue386_OpMod32(v, config)
-	case OpMod32u:
-		return rewriteValue386_OpMod32u(v, config)
-	case OpMod8:
-		return rewriteValue386_OpMod8(v, config)
-	case OpMod8u:
-		return rewriteValue386_OpMod8u(v, config)
-	case OpMove:
-		return rewriteValue386_OpMove(v, config)
-	case OpMul16:
-		return rewriteValue386_OpMul16(v, config)
-	case OpMul32:
-		return rewriteValue386_OpMul32(v, config)
-	case OpMul32F:
-		return rewriteValue386_OpMul32F(v, config)
-	case OpMul32uhilo:
-		return rewriteValue386_OpMul32uhilo(v, config)
-	case OpMul64F:
-		return rewriteValue386_OpMul64F(v, config)
-	case OpMul8:
-		return rewriteValue386_OpMul8(v, config)
-	case OpNeg16:
-		return rewriteValue386_OpNeg16(v, config)
-	case OpNeg32:
-		return rewriteValue386_OpNeg32(v, config)
-	case OpNeg32F:
-		return rewriteValue386_OpNeg32F(v, config)
-	case OpNeg64F:
-		return rewriteValue386_OpNeg64F(v, config)
-	case OpNeg8:
-		return rewriteValue386_OpNeg8(v, config)
-	case OpNeq16:
-		return rewriteValue386_OpNeq16(v, config)
-	case OpNeq32:
-		return rewriteValue386_OpNeq32(v, config)
-	case OpNeq32F:
-		return rewriteValue386_OpNeq32F(v, config)
-	case OpNeq64F:
-		return rewriteValue386_OpNeq64F(v, config)
-	case OpNeq8:
-		return rewriteValue386_OpNeq8(v, config)
-	case OpNeqB:
-		return rewriteValue386_OpNeqB(v, config)
-	case OpNeqPtr:
-		return rewriteValue386_OpNeqPtr(v, config)
-	case OpNilCheck:
-		return rewriteValue386_OpNilCheck(v, config)
-	case OpNot:
-		return rewriteValue386_OpNot(v, config)
-	case OpOffPtr:
-		return rewriteValue386_OpOffPtr(v, config)
-	case OpOr16:
-		return rewriteValue386_OpOr16(v, config)
-	case OpOr32:
-		return rewriteValue386_OpOr32(v, config)
-	case OpOr8:
-		return rewriteValue386_OpOr8(v, config)
-	case OpOrB:
-		return rewriteValue386_OpOrB(v, config)
-	case OpRsh16Ux16:
-		return rewriteValue386_OpRsh16Ux16(v, config)
-	case OpRsh16Ux32:
-		return rewriteValue386_OpRsh16Ux32(v, config)
-	case OpRsh16Ux64:
-		return rewriteValue386_OpRsh16Ux64(v, config)
-	case OpRsh16Ux8:
-		return rewriteValue386_OpRsh16Ux8(v, config)
-	case OpRsh16x16:
-		return rewriteValue386_OpRsh16x16(v, config)
-	case OpRsh16x32:
-		return rewriteValue386_OpRsh16x32(v, config)
-	case OpRsh16x64:
-		return rewriteValue386_OpRsh16x64(v, config)
-	case OpRsh16x8:
-		return rewriteValue386_OpRsh16x8(v, config)
-	case OpRsh32Ux16:
-		return rewriteValue386_OpRsh32Ux16(v, config)
-	case OpRsh32Ux32:
-		return rewriteValue386_OpRsh32Ux32(v, config)
-	case OpRsh32Ux64:
-		return rewriteValue386_OpRsh32Ux64(v, config)
-	case OpRsh32Ux8:
-		return rewriteValue386_OpRsh32Ux8(v, config)
-	case OpRsh32x16:
-		return rewriteValue386_OpRsh32x16(v, config)
-	case OpRsh32x32:
-		return rewriteValue386_OpRsh32x32(v, config)
-	case OpRsh32x64:
-		return rewriteValue386_OpRsh32x64(v, config)
-	case OpRsh32x8:
-		return rewriteValue386_OpRsh32x8(v, config)
-	case OpRsh8Ux16:
-		return rewriteValue386_OpRsh8Ux16(v, config)
-	case OpRsh8Ux32:
-		return rewriteValue386_OpRsh8Ux32(v, config)
-	case OpRsh8Ux64:
-		return rewriteValue386_OpRsh8Ux64(v, config)
-	case OpRsh8Ux8:
-		return rewriteValue386_OpRsh8Ux8(v, config)
-	case OpRsh8x16:
-		return rewriteValue386_OpRsh8x16(v, config)
-	case OpRsh8x32:
-		return rewriteValue386_OpRsh8x32(v, config)
-	case OpRsh8x64:
-		return rewriteValue386_OpRsh8x64(v, config)
-	case OpRsh8x8:
-		return rewriteValue386_OpRsh8x8(v, config)
-	case OpSignExt16to32:
-		return rewriteValue386_OpSignExt16to32(v, config)
-	case OpSignExt8to16:
-		return rewriteValue386_OpSignExt8to16(v, config)
-	case OpSignExt8to32:
-		return rewriteValue386_OpSignExt8to32(v, config)
-	case OpSignmask:
-		return rewriteValue386_OpSignmask(v, config)
-	case OpSlicemask:
-		return rewriteValue386_OpSlicemask(v, config)
-	case OpSqrt:
-		return rewriteValue386_OpSqrt(v, config)
-	case OpStaticCall:
-		return rewriteValue386_OpStaticCall(v, config)
-	case OpStore:
-		return rewriteValue386_OpStore(v, config)
-	case OpSub16:
-		return rewriteValue386_OpSub16(v, config)
-	case OpSub32:
-		return rewriteValue386_OpSub32(v, config)
-	case OpSub32F:
-		return rewriteValue386_OpSub32F(v, config)
-	case OpSub32carry:
-		return rewriteValue386_OpSub32carry(v, config)
-	case OpSub32withcarry:
-		return rewriteValue386_OpSub32withcarry(v, config)
-	case OpSub64F:
-		return rewriteValue386_OpSub64F(v, config)
-	case OpSub8:
-		return rewriteValue386_OpSub8(v, config)
-	case OpSubPtr:
-		return rewriteValue386_OpSubPtr(v, config)
-	case OpTrunc16to8:
-		return rewriteValue386_OpTrunc16to8(v, config)
-	case OpTrunc32to16:
-		return rewriteValue386_OpTrunc32to16(v, config)
-	case OpTrunc32to8:
-		return rewriteValue386_OpTrunc32to8(v, config)
-	case OpXor16:
-		return rewriteValue386_OpXor16(v, config)
-	case OpXor32:
-		return rewriteValue386_OpXor32(v, config)
-	case OpXor8:
-		return rewriteValue386_OpXor8(v, config)
-	case OpZero:
-		return rewriteValue386_OpZero(v, config)
-	case OpZeroExt16to32:
-		return rewriteValue386_OpZeroExt16to32(v, config)
-	case OpZeroExt8to16:
-		return rewriteValue386_OpZeroExt8to16(v, config)
-	case OpZeroExt8to32:
-		return rewriteValue386_OpZeroExt8to32(v, config)
-	case OpZeromask:
-		return rewriteValue386_OpZeromask(v, config)
-	}
-	return false
-}
-func rewriteValue386_Op386ADCL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADCL x (MOVLconst [c]) f)
-	// cond:
-	// result: (ADCLconst [c] x f)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		f := v.Args[2]
-		v.reset(Op386ADCLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(f)
-		return true
-	}
-	// match: (ADCL (MOVLconst [c]) x f)
-	// cond:
-	// result: (ADCLconst [c] x f)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		f := v.Args[2]
-		v.reset(Op386ADCLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(f)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386ADDL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDL x (MOVLconst [c]))
-	// cond:
-	// result: (ADDLconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(Op386ADDLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDL (MOVLconst [c]) x)
-	// cond:
-	// result: (ADDLconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(Op386ADDLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDL x (SHLLconst [3] y))
-	// cond:
-	// result: (LEAL8 x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386SHLLconst {
-			break
-		}
-		if v_1.AuxInt != 3 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(Op386LEAL8)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDL x (SHLLconst [2] y))
-	// cond:
-	// result: (LEAL4 x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386SHLLconst {
-			break
-		}
-		if v_1.AuxInt != 2 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(Op386LEAL4)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDL x (SHLLconst [1] y))
-	// cond:
-	// result: (LEAL2 x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386SHLLconst {
-			break
-		}
-		if v_1.AuxInt != 1 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(Op386LEAL2)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDL x (ADDL y y))
-	// cond:
-	// result: (LEAL2 x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDL {
-			break
-		}
-		y := v_1.Args[0]
-		if y != v_1.Args[1] {
-			break
-		}
-		v.reset(Op386LEAL2)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDL x (ADDL x y))
-	// cond:
-	// result: (LEAL2 y x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDL {
-			break
-		}
-		if x != v_1.Args[0] {
-			break
-		}
-		y := v_1.Args[1]
-		v.reset(Op386LEAL2)
-		v.AddArg(y)
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDL x (ADDL y x))
-	// cond:
-	// result: (LEAL2 y x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDL {
-			break
-		}
-		y := v_1.Args[0]
-		if x != v_1.Args[1] {
-			break
-		}
-		v.reset(Op386LEAL2)
-		v.AddArg(y)
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDL (ADDLconst [c] x) y)
-	// cond:
-	// result: (LEAL1 [c] x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v_0.Args[0]
-		y := v.Args[1]
-		v.reset(Op386LEAL1)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDL x (ADDLconst [c] y))
-	// cond:
-	// result: (LEAL1 [c] x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(Op386LEAL1)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDL x (LEAL [c] {s} y))
-	// cond: x.Op != OpSB && y.Op != OpSB
-	// result: (LEAL1 [c] {s} x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386LEAL {
-			break
-		}
-		c := v_1.AuxInt
-		s := v_1.Aux
-		y := v_1.Args[0]
-		if !(x.Op != OpSB && y.Op != OpSB) {
-			break
-		}
-		v.reset(Op386LEAL1)
-		v.AuxInt = c
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDL (LEAL [c] {s} x) y)
-	// cond: x.Op != OpSB && y.Op != OpSB
-	// result: (LEAL1 [c] {s} x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL {
-			break
-		}
-		c := v_0.AuxInt
-		s := v_0.Aux
-		x := v_0.Args[0]
-		y := v.Args[1]
-		if !(x.Op != OpSB && y.Op != OpSB) {
-			break
-		}
-		v.reset(Op386LEAL1)
-		v.AuxInt = c
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDL x (NEGL y))
-	// cond:
-	// result: (SUBL x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386NEGL {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(Op386SUBL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386ADDLcarry(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDLcarry x (MOVLconst [c]))
-	// cond:
-	// result: (ADDLconstcarry [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(Op386ADDLconstcarry)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDLcarry (MOVLconst [c]) x)
-	// cond:
-	// result: (ADDLconstcarry [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(Op386ADDLconstcarry)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386ADDLconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDLconst [c] (ADDL x y))
-	// cond:
-	// result: (LEAL1 [c] x y)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDL {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(Op386LEAL1)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDLconst [c] (LEAL [d] {s} x))
-	// cond: is32Bit(c+d)
-	// result: (LEAL [c+d] {s} x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL {
-			break
-		}
-		d := v_0.AuxInt
-		s := v_0.Aux
-		x := v_0.Args[0]
-		if !(is32Bit(c + d)) {
-			break
-		}
-		v.reset(Op386LEAL)
-		v.AuxInt = c + d
-		v.Aux = s
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDLconst [c] (LEAL1 [d] {s} x y))
-	// cond: is32Bit(c+d)
-	// result: (LEAL1 [c+d] {s} x y)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL1 {
-			break
-		}
-		d := v_0.AuxInt
-		s := v_0.Aux
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if !(is32Bit(c + d)) {
-			break
-		}
-		v.reset(Op386LEAL1)
-		v.AuxInt = c + d
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDLconst [c] (LEAL2 [d] {s} x y))
-	// cond: is32Bit(c+d)
-	// result: (LEAL2 [c+d] {s} x y)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL2 {
-			break
-		}
-		d := v_0.AuxInt
-		s := v_0.Aux
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if !(is32Bit(c + d)) {
-			break
-		}
-		v.reset(Op386LEAL2)
-		v.AuxInt = c + d
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDLconst [c] (LEAL4 [d] {s} x y))
-	// cond: is32Bit(c+d)
-	// result: (LEAL4 [c+d] {s} x y)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL4 {
-			break
-		}
-		d := v_0.AuxInt
-		s := v_0.Aux
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if !(is32Bit(c + d)) {
-			break
-		}
-		v.reset(Op386LEAL4)
-		v.AuxInt = c + d
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDLconst [c] (LEAL8 [d] {s} x y))
-	// cond: is32Bit(c+d)
-	// result: (LEAL8 [c+d] {s} x y)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL8 {
-			break
-		}
-		d := v_0.AuxInt
-		s := v_0.Aux
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if !(is32Bit(c + d)) {
-			break
-		}
-		v.reset(Op386LEAL8)
-		v.AuxInt = c + d
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDLconst [c] x)
-	// cond: int32(c)==0
-	// result: x
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(int32(c) == 0) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDLconst [c] (MOVLconst [d]))
-	// cond:
-	// result: (MOVLconst [int64(int32(c+d))])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(Op386MOVLconst)
-		v.AuxInt = int64(int32(c + d))
-		return true
-	}
-	// match: (ADDLconst [c] (ADDLconst [d] x))
-	// cond:
-	// result: (ADDLconst [int64(int32(c+d))] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(Op386ADDLconst)
-		v.AuxInt = int64(int32(c + d))
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386ANDL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ANDL x (MOVLconst [c]))
-	// cond:
-	// result: (ANDLconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(Op386ANDLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDL (MOVLconst [c]) x)
-	// cond:
-	// result: (ANDLconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(Op386ANDLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDL x x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386ANDLconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ANDLconst [c] (ANDLconst [d] x))
-	// cond:
-	// result: (ANDLconst [c & d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ANDLconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(Op386ANDLconst)
-		v.AuxInt = c & d
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDLconst [c] _)
-	// cond: int32(c)==0
-	// result: (MOVLconst [0])
-	for {
-		c := v.AuxInt
-		if !(int32(c) == 0) {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (ANDLconst [c] x)
-	// cond: int32(c)==-1
-	// result: x
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(int32(c) == -1) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDLconst [c] (MOVLconst [d]))
-	// cond:
-	// result: (MOVLconst [c&d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(Op386MOVLconst)
-		v.AuxInt = c & d
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386CMPB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPB x (MOVLconst [c]))
-	// cond:
-	// result: (CMPBconst x [int64(int8(c))])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(Op386CMPBconst)
-		v.AuxInt = int64(int8(c))
-		v.AddArg(x)
-		return true
-	}
-	// match: (CMPB (MOVLconst [c]) x)
-	// cond:
-	// result: (InvertFlags (CMPBconst x [int64(int8(c))]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(Op386InvertFlags)
-		v0 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
-		v0.AuxInt = int64(int8(c))
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386CMPBconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPBconst (MOVLconst [x]) [y])
-	// cond: int8(x)==int8(y)
-	// result: (FlagEQ)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int8(x) == int8(y)) {
-			break
-		}
-		v.reset(Op386FlagEQ)
-		return true
-	}
-	// match: (CMPBconst (MOVLconst [x]) [y])
-	// cond: int8(x)<int8(y) && uint8(x)<uint8(y)
-	// result: (FlagLT_ULT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int8(x) < int8(y) && uint8(x) < uint8(y)) {
-			break
-		}
-		v.reset(Op386FlagLT_ULT)
-		return true
-	}
-	// match: (CMPBconst (MOVLconst [x]) [y])
-	// cond: int8(x)<int8(y) && uint8(x)>uint8(y)
-	// result: (FlagLT_UGT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int8(x) < int8(y) && uint8(x) > uint8(y)) {
-			break
-		}
-		v.reset(Op386FlagLT_UGT)
-		return true
-	}
-	// match: (CMPBconst (MOVLconst [x]) [y])
-	// cond: int8(x)>int8(y) && uint8(x)<uint8(y)
-	// result: (FlagGT_ULT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int8(x) > int8(y) && uint8(x) < uint8(y)) {
-			break
-		}
-		v.reset(Op386FlagGT_ULT)
-		return true
-	}
-	// match: (CMPBconst (MOVLconst [x]) [y])
-	// cond: int8(x)>int8(y) && uint8(x)>uint8(y)
-	// result: (FlagGT_UGT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int8(x) > int8(y) && uint8(x) > uint8(y)) {
-			break
-		}
-		v.reset(Op386FlagGT_UGT)
-		return true
-	}
-	// match: (CMPBconst (ANDLconst _ [m]) [n])
-	// cond: 0 <= int8(m) && int8(m) < int8(n)
-	// result: (FlagLT_ULT)
-	for {
-		n := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ANDLconst {
-			break
-		}
-		m := v_0.AuxInt
-		if !(0 <= int8(m) && int8(m) < int8(n)) {
-			break
-		}
-		v.reset(Op386FlagLT_ULT)
-		return true
-	}
-	// match: (CMPBconst (ANDL x y) [0])
-	// cond:
-	// result: (TESTB x y)
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ANDL {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(Op386TESTB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (CMPBconst (ANDLconst [c] x) [0])
-	// cond:
-	// result: (TESTBconst [int64(int8(c))] x)
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ANDLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(Op386TESTBconst)
-		v.AuxInt = int64(int8(c))
-		v.AddArg(x)
-		return true
-	}
-	// match: (CMPBconst x [0])
-	// cond:
-	// result: (TESTB x x)
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(Op386TESTB)
-		v.AddArg(x)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386CMPL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPL x (MOVLconst [c]))
-	// cond:
-	// result: (CMPLconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(Op386CMPLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (CMPL (MOVLconst [c]) x)
-	// cond:
-	// result: (InvertFlags (CMPLconst x [c]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(Op386InvertFlags)
-		v0 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
-		v0.AuxInt = c
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386CMPLconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPLconst (MOVLconst [x]) [y])
-	// cond: int32(x)==int32(y)
-	// result: (FlagEQ)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int32(x) == int32(y)) {
-			break
-		}
-		v.reset(Op386FlagEQ)
-		return true
-	}
-	// match: (CMPLconst (MOVLconst [x]) [y])
-	// cond: int32(x)<int32(y) && uint32(x)<uint32(y)
-	// result: (FlagLT_ULT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
-			break
-		}
-		v.reset(Op386FlagLT_ULT)
-		return true
-	}
-	// match: (CMPLconst (MOVLconst [x]) [y])
-	// cond: int32(x)<int32(y) && uint32(x)>uint32(y)
-	// result: (FlagLT_UGT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
-			break
-		}
-		v.reset(Op386FlagLT_UGT)
-		return true
-	}
-	// match: (CMPLconst (MOVLconst [x]) [y])
-	// cond: int32(x)>int32(y) && uint32(x)<uint32(y)
-	// result: (FlagGT_ULT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
-			break
-		}
-		v.reset(Op386FlagGT_ULT)
-		return true
-	}
-	// match: (CMPLconst (MOVLconst [x]) [y])
-	// cond: int32(x)>int32(y) && uint32(x)>uint32(y)
-	// result: (FlagGT_UGT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
-			break
-		}
-		v.reset(Op386FlagGT_UGT)
-		return true
-	}
-	// match: (CMPLconst (SHRLconst _ [c]) [n])
-	// cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)
-	// result: (FlagLT_ULT)
-	for {
-		n := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386SHRLconst {
-			break
-		}
-		c := v_0.AuxInt
-		if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
-			break
-		}
-		v.reset(Op386FlagLT_ULT)
-		return true
-	}
-	// match: (CMPLconst (ANDLconst _ [m]) [n])
-	// cond: 0 <= int32(m) && int32(m) < int32(n)
-	// result: (FlagLT_ULT)
-	for {
-		n := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ANDLconst {
-			break
-		}
-		m := v_0.AuxInt
-		if !(0 <= int32(m) && int32(m) < int32(n)) {
-			break
-		}
-		v.reset(Op386FlagLT_ULT)
-		return true
-	}
-	// match: (CMPLconst (ANDL x y) [0])
-	// cond:
-	// result: (TESTL x y)
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ANDL {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(Op386TESTL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (CMPLconst (ANDLconst [c] x) [0])
-	// cond:
-	// result: (TESTLconst [c] x)
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ANDLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(Op386TESTLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (CMPLconst x [0])
-	// cond:
-	// result: (TESTL x x)
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(Op386TESTL)
-		v.AddArg(x)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386CMPW(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPW x (MOVLconst [c]))
-	// cond:
-	// result: (CMPWconst x [int64(int16(c))])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(Op386CMPWconst)
-		v.AuxInt = int64(int16(c))
-		v.AddArg(x)
-		return true
-	}
-	// match: (CMPW (MOVLconst [c]) x)
-	// cond:
-	// result: (InvertFlags (CMPWconst x [int64(int16(c))]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(Op386InvertFlags)
-		v0 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
-		v0.AuxInt = int64(int16(c))
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386CMPWconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPWconst (MOVLconst [x]) [y])
-	// cond: int16(x)==int16(y)
-	// result: (FlagEQ)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int16(x) == int16(y)) {
-			break
-		}
-		v.reset(Op386FlagEQ)
-		return true
-	}
-	// match: (CMPWconst (MOVLconst [x]) [y])
-	// cond: int16(x)<int16(y) && uint16(x)<uint16(y)
-	// result: (FlagLT_ULT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int16(x) < int16(y) && uint16(x) < uint16(y)) {
-			break
-		}
-		v.reset(Op386FlagLT_ULT)
-		return true
-	}
-	// match: (CMPWconst (MOVLconst [x]) [y])
-	// cond: int16(x)<int16(y) && uint16(x)>uint16(y)
-	// result: (FlagLT_UGT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int16(x) < int16(y) && uint16(x) > uint16(y)) {
-			break
-		}
-		v.reset(Op386FlagLT_UGT)
-		return true
-	}
-	// match: (CMPWconst (MOVLconst [x]) [y])
-	// cond: int16(x)>int16(y) && uint16(x)<uint16(y)
-	// result: (FlagGT_ULT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int16(x) > int16(y) && uint16(x) < uint16(y)) {
-			break
-		}
-		v.reset(Op386FlagGT_ULT)
-		return true
-	}
-	// match: (CMPWconst (MOVLconst [x]) [y])
-	// cond: int16(x)>int16(y) && uint16(x)>uint16(y)
-	// result: (FlagGT_UGT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int16(x) > int16(y) && uint16(x) > uint16(y)) {
-			break
-		}
-		v.reset(Op386FlagGT_UGT)
-		return true
-	}
-	// match: (CMPWconst (ANDLconst _ [m]) [n])
-	// cond: 0 <= int16(m) && int16(m) < int16(n)
-	// result: (FlagLT_ULT)
-	for {
-		n := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ANDLconst {
-			break
-		}
-		m := v_0.AuxInt
-		if !(0 <= int16(m) && int16(m) < int16(n)) {
-			break
-		}
-		v.reset(Op386FlagLT_ULT)
-		return true
-	}
-	// match: (CMPWconst (ANDL x y) [0])
-	// cond:
-	// result: (TESTW x y)
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ANDL {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(Op386TESTW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (CMPWconst (ANDLconst [c] x) [0])
-	// cond:
-	// result: (TESTWconst [int64(int16(c))] x)
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ANDLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(Op386TESTWconst)
-		v.AuxInt = int64(int16(c))
-		v.AddArg(x)
-		return true
-	}
-	// match: (CMPWconst x [0])
-	// cond:
-	// result: (TESTW x x)
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(Op386TESTW)
-		v.AddArg(x)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386LEAL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (LEAL [c] {s} (ADDLconst [d] x))
-	// cond: is32Bit(c+d)
-	// result: (LEAL [c+d] {s} x)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		if !(is32Bit(c + d)) {
-			break
-		}
-		v.reset(Op386LEAL)
-		v.AuxInt = c + d
-		v.Aux = s
-		v.AddArg(x)
-		return true
-	}
-	// match: (LEAL [c] {s} (ADDL x y))
-	// cond: x.Op != OpSB && y.Op != OpSB
-	// result: (LEAL1 [c] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDL {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if !(x.Op != OpSB && y.Op != OpSB) {
-			break
-		}
-		v.reset(Op386LEAL1)
-		v.AuxInt = c
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAL [off1] {sym1} (LEAL [off2] {sym2} x))
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (LEAL [off1+off2] {mergeSym(sym1,sym2)} x)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		x := v_0.Args[0]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(Op386LEAL)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(x)
-		return true
-	}
-	// match: (LEAL [off1] {sym1} (LEAL1 [off2] {sym2} x y))
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL1 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(Op386LEAL1)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAL [off1] {sym1} (LEAL2 [off2] {sym2} x y))
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL2 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(Op386LEAL2)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAL [off1] {sym1} (LEAL4 [off2] {sym2} x y))
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL4 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(Op386LEAL4)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAL [off1] {sym1} (LEAL8 [off2] {sym2} x y))
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL8 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(Op386LEAL8)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386LEAL1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (LEAL1 [c] {s} (ADDLconst [d] x) y)
-	// cond: is32Bit(c+d)   && x.Op != OpSB
-	// result: (LEAL1 [c+d] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		y := v.Args[1]
-		if !(is32Bit(c+d) && x.Op != OpSB) {
-			break
-		}
-		v.reset(Op386LEAL1)
-		v.AuxInt = c + d
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAL1 [c] {s} x (ADDLconst [d] y))
-	// cond: is32Bit(c+d)   && y.Op != OpSB
-	// result: (LEAL1 [c+d] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDLconst {
-			break
-		}
-		d := v_1.AuxInt
-		y := v_1.Args[0]
-		if !(is32Bit(c+d) && y.Op != OpSB) {
-			break
-		}
-		v.reset(Op386LEAL1)
-		v.AuxInt = c + d
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAL1 [c] {s} x (SHLLconst [1] y))
-	// cond:
-	// result: (LEAL2 [c] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386SHLLconst {
-			break
-		}
-		if v_1.AuxInt != 1 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(Op386LEAL2)
-		v.AuxInt = c
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAL1 [c] {s} (SHLLconst [1] x) y)
-	// cond:
-	// result: (LEAL2 [c] {s} y x)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386SHLLconst {
-			break
-		}
-		if v_0.AuxInt != 1 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v.Args[1]
-		v.reset(Op386LEAL2)
-		v.AuxInt = c
-		v.Aux = s
-		v.AddArg(y)
-		v.AddArg(x)
-		return true
-	}
-	// match: (LEAL1 [c] {s} x (SHLLconst [2] y))
-	// cond:
-	// result: (LEAL4 [c] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386SHLLconst {
-			break
-		}
-		if v_1.AuxInt != 2 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(Op386LEAL4)
-		v.AuxInt = c
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAL1 [c] {s} (SHLLconst [2] x) y)
-	// cond:
-	// result: (LEAL4 [c] {s} y x)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386SHLLconst {
-			break
-		}
-		if v_0.AuxInt != 2 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v.Args[1]
-		v.reset(Op386LEAL4)
-		v.AuxInt = c
-		v.Aux = s
-		v.AddArg(y)
-		v.AddArg(x)
-		return true
-	}
-	// match: (LEAL1 [c] {s} x (SHLLconst [3] y))
-	// cond:
-	// result: (LEAL8 [c] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386SHLLconst {
-			break
-		}
-		if v_1.AuxInt != 3 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(Op386LEAL8)
-		v.AuxInt = c
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAL1 [c] {s} (SHLLconst [3] x) y)
-	// cond:
-	// result: (LEAL8 [c] {s} y x)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386SHLLconst {
-			break
-		}
-		if v_0.AuxInt != 3 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v.Args[1]
-		v.reset(Op386LEAL8)
-		v.AuxInt = c
-		v.Aux = s
-		v.AddArg(y)
-		v.AddArg(x)
-		return true
-	}
-	// match: (LEAL1 [off1] {sym1} (LEAL [off2] {sym2} x) y)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
-	// result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		x := v_0.Args[0]
-		y := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
-			break
-		}
-		v.reset(Op386LEAL1)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAL1 [off1] {sym1} x (LEAL [off2] {sym2} y))
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB
-	// result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386LEAL {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		y := v_1.Args[0]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB) {
-			break
-		}
-		v.reset(Op386LEAL1)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386LEAL2(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (LEAL2 [c] {s} (ADDLconst [d] x) y)
-	// cond: is32Bit(c+d)   && x.Op != OpSB
-	// result: (LEAL2 [c+d] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		y := v.Args[1]
-		if !(is32Bit(c+d) && x.Op != OpSB) {
-			break
-		}
-		v.reset(Op386LEAL2)
-		v.AuxInt = c + d
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAL2 [c] {s} x (ADDLconst [d] y))
-	// cond: is32Bit(c+2*d) && y.Op != OpSB
-	// result: (LEAL2 [c+2*d] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDLconst {
-			break
-		}
-		d := v_1.AuxInt
-		y := v_1.Args[0]
-		if !(is32Bit(c+2*d) && y.Op != OpSB) {
-			break
-		}
-		v.reset(Op386LEAL2)
-		v.AuxInt = c + 2*d
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAL2 [c] {s} x (SHLLconst [1] y))
-	// cond:
-	// result: (LEAL4 [c] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386SHLLconst {
-			break
-		}
-		if v_1.AuxInt != 1 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(Op386LEAL4)
-		v.AuxInt = c
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAL2 [c] {s} x (SHLLconst [2] y))
-	// cond:
-	// result: (LEAL8 [c] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386SHLLconst {
-			break
-		}
-		if v_1.AuxInt != 2 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(Op386LEAL8)
-		v.AuxInt = c
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAL2 [off1] {sym1} (LEAL [off2] {sym2} x) y)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
-	// result: (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		x := v_0.Args[0]
-		y := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
-			break
-		}
-		v.reset(Op386LEAL2)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386LEAL4(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (LEAL4 [c] {s} (ADDLconst [d] x) y)
-	// cond: is32Bit(c+d)   && x.Op != OpSB
-	// result: (LEAL4 [c+d] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		y := v.Args[1]
-		if !(is32Bit(c+d) && x.Op != OpSB) {
-			break
-		}
-		v.reset(Op386LEAL4)
-		v.AuxInt = c + d
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAL4 [c] {s} x (ADDLconst [d] y))
-	// cond: is32Bit(c+4*d) && y.Op != OpSB
-	// result: (LEAL4 [c+4*d] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDLconst {
-			break
-		}
-		d := v_1.AuxInt
-		y := v_1.Args[0]
-		if !(is32Bit(c+4*d) && y.Op != OpSB) {
-			break
-		}
-		v.reset(Op386LEAL4)
-		v.AuxInt = c + 4*d
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAL4 [c] {s} x (SHLLconst [1] y))
-	// cond:
-	// result: (LEAL8 [c] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386SHLLconst {
-			break
-		}
-		if v_1.AuxInt != 1 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(Op386LEAL8)
-		v.AuxInt = c
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAL4 [off1] {sym1} (LEAL [off2] {sym2} x) y)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
-	// result: (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		x := v_0.Args[0]
-		y := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
-			break
-		}
-		v.reset(Op386LEAL4)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386LEAL8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (LEAL8 [c] {s} (ADDLconst [d] x) y)
-	// cond: is32Bit(c+d)   && x.Op != OpSB
-	// result: (LEAL8 [c+d] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		y := v.Args[1]
-		if !(is32Bit(c+d) && x.Op != OpSB) {
-			break
-		}
-		v.reset(Op386LEAL8)
-		v.AuxInt = c + d
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAL8 [c] {s} x (ADDLconst [d] y))
-	// cond: is32Bit(c+8*d) && y.Op != OpSB
-	// result: (LEAL8 [c+8*d] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDLconst {
-			break
-		}
-		d := v_1.AuxInt
-		y := v_1.Args[0]
-		if !(is32Bit(c+8*d) && y.Op != OpSB) {
-			break
-		}
-		v.reset(Op386LEAL8)
-		v.AuxInt = c + 8*d
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAL8 [off1] {sym1} (LEAL [off2] {sym2} x) y)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
-	// result: (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		x := v_0.Args[0]
-		y := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
-			break
-		}
-		v.reset(Op386LEAL8)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVBLSX(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBLSX x:(MOVBload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVBLSXload <v.Type> [off] {sym} ptr mem)
-	for {
-		x := v.Args[0]
-		if x.Op != Op386MOVBload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, Op386MOVBLSXload, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVBLSX (ANDLconst [c] x))
-	// cond: c & 0x80 == 0
-	// result: (ANDLconst [c & 0x7f] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ANDLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v_0.Args[0]
-		if !(c&0x80 == 0) {
-			break
-		}
-		v.reset(Op386ANDLconst)
-		v.AuxInt = c & 0x7f
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVBLSXload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)   && (base.Op != OpSB || !config.ctxt.Flag_shared)
-	// result: (MOVBLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
-			break
-		}
-		v.reset(Op386MOVBLSXload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVBLZX(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBLZX x:(MOVBload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
-	for {
-		x := v.Args[0]
-		if x.Op != Op386MOVBload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, Op386MOVBload, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVBLZX x:(MOVBloadidx1 [off] {sym} ptr idx mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem)
-	for {
-		x := v.Args[0]
-		if x.Op != Op386MOVBloadidx1 {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		idx := x.Args[1]
-		mem := x.Args[2]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, Op386MOVBloadidx1, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(idx)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVBLZX (ANDLconst [c] x))
-	// cond:
-	// result: (ANDLconst [c & 0xff] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ANDLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(Op386ANDLconst)
-		v.AuxInt = c & 0xff
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVBload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-	// result: x
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVBstore {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		x := v_1.Args[1]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBload  [off1] {sym} (ADDLconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVBload  [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(Op386MOVBload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBload  [off1] {sym1} (LEAL [off2] {sym2} base) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)   && (base.Op != OpSB || !config.ctxt.Flag_shared)
-	// result: (MOVBload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
-			break
-		}
-		v.reset(Op386MOVBload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL1 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(Op386MOVBloadidx1)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBload [off] {sym} (ADDL ptr idx) mem)
-	// cond: ptr.Op != OpSB
-	// result: (MOVBloadidx1 [off] {sym} ptr idx mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDL {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(Op386MOVBloadidx1)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVBloadidx1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
-	// cond:
-	// result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(Op386MOVBloadidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
-	// cond:
-	// result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDLconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(Op386MOVBloadidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVBstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBstore [off] {sym} ptr (MOVBLSX x) mem)
-	// cond:
-	// result: (MOVBstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVBLSX {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(Op386MOVBstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVBLZX x) mem)
-	// cond:
-	// result: (MOVBstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVBLZX {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(Op386MOVBstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore  [off1] {sym} (ADDLconst [off2] ptr) val mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVBstore  [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(Op386MOVBstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem)
-	// cond: validOff(off)
-	// result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		mem := v.Args[2]
-		if !(validOff(off)) {
-			break
-		}
-		v.reset(Op386MOVBstoreconst)
-		v.AuxInt = makeValAndOff(int64(int8(c)), off)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)   && (base.Op != OpSB || !config.ctxt.Flag_shared)
-	// result: (MOVBstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
-			break
-		}
-		v.reset(Op386MOVBstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL1 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(Op386MOVBstoreidx1)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} (ADDL ptr idx) val mem)
-	// cond: ptr.Op != OpSB
-	// result: (MOVBstoreidx1 [off] {sym} ptr idx val mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDL {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(Op386MOVBstoreidx1)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [i] {s} p (SHRLconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVWstore [i-1] {s} p w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386SHRLconst {
-			break
-		}
-		if v_1.AuxInt != 8 {
-			break
-		}
-		w := v_1.Args[0]
-		x := v.Args[2]
-		if x.Op != Op386MOVBstore {
-			break
-		}
-		if x.AuxInt != i-1 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if w != x.Args[1] {
-			break
-		}
-		mem := x.Args[2]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(Op386MOVWstore)
-		v.AuxInt = i - 1
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVWstore [i-1] {s} p w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386SHRLconst {
-			break
-		}
-		j := v_1.AuxInt
-		w := v_1.Args[0]
-		x := v.Args[2]
-		if x.Op != Op386MOVBstore {
-			break
-		}
-		if x.AuxInt != i-1 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		w0 := x.Args[1]
-		if w0.Op != Op386SHRLconst {
-			break
-		}
-		if w0.AuxInt != j-8 {
-			break
-		}
-		if w != w0.Args[0] {
-			break
-		}
-		mem := x.Args[2]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(Op386MOVWstore)
-		v.AuxInt = i - 1
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVBstoreconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
-	// cond: ValAndOff(sc).canAdd(off)
-	// result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
-	for {
-		sc := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		off := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(ValAndOff(sc).canAdd(off)) {
-			break
-		}
-		v.reset(Op386MOVBstoreconst)
-		v.AuxInt = ValAndOff(sc).add(off)
-		v.Aux = s
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)   && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
-	// result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
-	for {
-		sc := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL {
-			break
-		}
-		off := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
-			break
-		}
-		v.reset(Op386MOVBstoreconst)
-		v.AuxInt = ValAndOff(sc).add(off)
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem)
-	// cond: canMergeSym(sym1, sym2)
-	// result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL1 {
-			break
-		}
-		off := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(Op386MOVBstoreconstidx1)
-		v.AuxInt = ValAndOff(x).add(off)
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreconst [x] {sym} (ADDL ptr idx) mem)
-	// cond:
-	// result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDL {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		v.reset(Op386MOVBstoreconstidx1)
-		v.AuxInt = x
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
-	// cond: x.Uses == 1   && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()   && clobber(x)
-	// result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		x := v.Args[1]
-		if x.Op != Op386MOVBstoreconst {
-			break
-		}
-		a := x.AuxInt
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		mem := x.Args[1]
-		if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
-			break
-		}
-		v.reset(Op386MOVWstoreconst)
-		v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVBstoreconstidx1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem)
-	// cond:
-	// result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		c := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(Op386MOVBstoreconstidx1)
-		v.AuxInt = ValAndOff(x).add(c)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem)
-	// cond:
-	// result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDLconst {
-			break
-		}
-		c := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(Op386MOVBstoreconstidx1)
-		v.AuxInt = ValAndOff(x).add(c)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem))
-	// cond: x.Uses == 1   && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()   && clobber(x)
-	// result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		i := v.Args[1]
-		x := v.Args[2]
-		if x.Op != Op386MOVBstoreconstidx1 {
-			break
-		}
-		a := x.AuxInt
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if i != x.Args[1] {
-			break
-		}
-		mem := x.Args[2]
-		if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
-			break
-		}
-		v.reset(Op386MOVWstoreconstidx1)
-		v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(i)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVBstoreidx1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
-	// cond:
-	// result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(Op386MOVBstoreidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
-	// cond:
-	// result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDLconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(Op386MOVBstoreidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVWstoreidx1 [i-1] {s} p idx w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != Op386SHRLconst {
-			break
-		}
-		if v_2.AuxInt != 8 {
-			break
-		}
-		w := v_2.Args[0]
-		x := v.Args[3]
-		if x.Op != Op386MOVBstoreidx1 {
-			break
-		}
-		if x.AuxInt != i-1 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		if w != x.Args[2] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(Op386MOVWstoreidx1)
-		v.AuxInt = i - 1
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(idx)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRLconst [j-8] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != Op386SHRLconst {
-			break
-		}
-		j := v_2.AuxInt
-		w := v_2.Args[0]
-		x := v.Args[3]
-		if x.Op != Op386MOVBstoreidx1 {
-			break
-		}
-		if x.AuxInt != i-1 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		w0 := x.Args[2]
-		if w0.Op != Op386SHRLconst {
-			break
-		}
-		if w0.AuxInt != j-8 {
-			break
-		}
-		if w != w0.Args[0] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(Op386MOVWstoreidx1)
-		v.AuxInt = i - 1
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(idx)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVLload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-	// result: x
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLstore {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		x := v_1.Args[1]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVLload  [off1] {sym} (ADDLconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVLload  [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(Op386MOVLload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLload  [off1] {sym1} (LEAL [off2] {sym2} base) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)   && (base.Op != OpSB || !config.ctxt.Flag_shared)
-	// result: (MOVLload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
-			break
-		}
-		v.reset(Op386MOVLload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL1 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(Op386MOVLloadidx1)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL4 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(Op386MOVLloadidx4)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLload [off] {sym} (ADDL ptr idx) mem)
-	// cond: ptr.Op != OpSB
-	// result: (MOVLloadidx1 [off] {sym} ptr idx mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDL {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(Op386MOVLloadidx1)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVLloadidx1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVLloadidx1 [c] {sym} ptr (SHLLconst [2] idx) mem)
-	// cond:
-	// result: (MOVLloadidx4 [c] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386SHLLconst {
-			break
-		}
-		if v_1.AuxInt != 2 {
-			break
-		}
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(Op386MOVLloadidx4)
-		v.AuxInt = c
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
-	// cond:
-	// result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(Op386MOVLloadidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
-	// cond:
-	// result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDLconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(Op386MOVLloadidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVLloadidx4(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVLloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem)
-	// cond:
-	// result: (MOVLloadidx4 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(Op386MOVLloadidx4)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem)
-	// cond:
-	// result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDLconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(Op386MOVLloadidx4)
-		v.AuxInt = c + 4*d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVLstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVLstore  [off1] {sym} (ADDLconst [off2] ptr) val mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVLstore  [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(Op386MOVLstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem)
-	// cond: validOff(off)
-	// result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		mem := v.Args[2]
-		if !(validOff(off)) {
-			break
-		}
-		v.reset(Op386MOVLstoreconst)
-		v.AuxInt = makeValAndOff(int64(int32(c)), off)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)   && (base.Op != OpSB || !config.ctxt.Flag_shared)
-	// result: (MOVLstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
-			break
-		}
-		v.reset(Op386MOVLstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL1 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(Op386MOVLstoreidx1)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL4 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(Op386MOVLstoreidx4)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstore [off] {sym} (ADDL ptr idx) val mem)
-	// cond: ptr.Op != OpSB
-	// result: (MOVLstoreidx1 [off] {sym} ptr idx val mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDL {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(Op386MOVLstoreidx1)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVLstoreconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
-	// cond: ValAndOff(sc).canAdd(off)
-	// result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
-	for {
-		sc := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		off := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(ValAndOff(sc).canAdd(off)) {
-			break
-		}
-		v.reset(Op386MOVLstoreconst)
-		v.AuxInt = ValAndOff(sc).add(off)
-		v.Aux = s
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)   && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
-	// result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
-	for {
-		sc := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL {
-			break
-		}
-		off := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
-			break
-		}
-		v.reset(Op386MOVLstoreconst)
-		v.AuxInt = ValAndOff(sc).add(off)
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem)
-	// cond: canMergeSym(sym1, sym2)
-	// result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL1 {
-			break
-		}
-		off := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(Op386MOVLstoreconstidx1)
-		v.AuxInt = ValAndOff(x).add(off)
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstoreconst [x] {sym1} (LEAL4 [off] {sym2} ptr idx) mem)
-	// cond: canMergeSym(sym1, sym2)
-	// result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL4 {
-			break
-		}
-		off := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(Op386MOVLstoreconstidx4)
-		v.AuxInt = ValAndOff(x).add(off)
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstoreconst [x] {sym} (ADDL ptr idx) mem)
-	// cond:
-	// result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDL {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		v.reset(Op386MOVLstoreconstidx1)
-		v.AuxInt = x
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVLstoreconstidx1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLLconst [2] idx) mem)
-	// cond:
-	// result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386SHLLconst {
-			break
-		}
-		if v_1.AuxInt != 2 {
-			break
-		}
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(Op386MOVLstoreconstidx4)
-		v.AuxInt = c
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem)
-	// cond:
-	// result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		c := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(Op386MOVLstoreconstidx1)
-		v.AuxInt = ValAndOff(x).add(c)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem)
-	// cond:
-	// result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDLconst {
-			break
-		}
-		c := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(Op386MOVLstoreconstidx1)
-		v.AuxInt = ValAndOff(x).add(c)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVLstoreconstidx4(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVLstoreconstidx4 [x] {sym} (ADDLconst [c] ptr) idx mem)
-	// cond:
-	// result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		c := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(Op386MOVLstoreconstidx4)
-		v.AuxInt = ValAndOff(x).add(c)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDLconst [c] idx) mem)
-	// cond:
-	// result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDLconst {
-			break
-		}
-		c := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(Op386MOVLstoreconstidx4)
-		v.AuxInt = ValAndOff(x).add(4 * c)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVLstoreidx1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVLstoreidx1 [c] {sym} ptr (SHLLconst [2] idx) val mem)
-	// cond:
-	// result: (MOVLstoreidx4 [c] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386SHLLconst {
-			break
-		}
-		if v_1.AuxInt != 2 {
-			break
-		}
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(Op386MOVLstoreidx4)
-		v.AuxInt = c
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
-	// cond:
-	// result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(Op386MOVLstoreidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
-	// cond:
-	// result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDLconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(Op386MOVLstoreidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVLstoreidx4(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVLstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem)
-	// cond:
-	// result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(Op386MOVLstoreidx4)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem)
-	// cond:
-	// result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDLconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(Op386MOVLstoreidx4)
-		v.AuxInt = c + 4*d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVSDconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVSDconst [c])
-	// cond: config.ctxt.Flag_shared
-	// result: (MOVSDconst2 (MOVSDconst1 [c]))
-	for {
-		c := v.AuxInt
-		if !(config.ctxt.Flag_shared) {
-			break
-		}
-		v.reset(Op386MOVSDconst2)
-		v0 := b.NewValue0(v.Line, Op386MOVSDconst1, config.fe.TypeUInt32())
-		v0.AuxInt = c
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVSDload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVSDload [off1] {sym} (ADDLconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVSDload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(Op386MOVSDload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSDload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)   && (base.Op != OpSB || !config.ctxt.Flag_shared)
-	// result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
-			break
-		}
-		v.reset(Op386MOVSDload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSDload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL1 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(Op386MOVSDloadidx1)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSDload [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL8 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(Op386MOVSDloadidx8)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSDload [off] {sym} (ADDL ptr idx) mem)
-	// cond: ptr.Op != OpSB
-	// result: (MOVSDloadidx1 [off] {sym} ptr idx mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDL {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(Op386MOVSDloadidx1)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVSDloadidx1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVSDloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
-	// cond:
-	// result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(Op386MOVSDloadidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSDloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
-	// cond:
-	// result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDLconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(Op386MOVSDloadidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVSDloadidx8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVSDloadidx8 [c] {sym} (ADDLconst [d] ptr) idx mem)
-	// cond:
-	// result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(Op386MOVSDloadidx8)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSDloadidx8 [c] {sym} ptr (ADDLconst [d] idx) mem)
-	// cond:
-	// result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDLconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(Op386MOVSDloadidx8)
-		v.AuxInt = c + 8*d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVSDstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVSDstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVSDstore [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(Op386MOVSDstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSDstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)   && (base.Op != OpSB || !config.ctxt.Flag_shared)
-	// result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
-			break
-		}
-		v.reset(Op386MOVSDstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSDstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL1 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(Op386MOVSDstoreidx1)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSDstore [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL8 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(Op386MOVSDstoreidx8)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSDstore [off] {sym} (ADDL ptr idx) val mem)
-	// cond: ptr.Op != OpSB
-	// result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDL {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(Op386MOVSDstoreidx1)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVSDstoreidx1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVSDstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
-	// cond:
-	// result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(Op386MOVSDstoreidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSDstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
-	// cond:
-	// result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDLconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(Op386MOVSDstoreidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVSDstoreidx8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVSDstoreidx8 [c] {sym} (ADDLconst [d] ptr) idx val mem)
-	// cond:
-	// result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(Op386MOVSDstoreidx8)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSDstoreidx8 [c] {sym} ptr (ADDLconst [d] idx) val mem)
-	// cond:
-	// result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDLconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(Op386MOVSDstoreidx8)
-		v.AuxInt = c + 8*d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVSSconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVSSconst [c])
-	// cond: config.ctxt.Flag_shared
-	// result: (MOVSSconst2 (MOVSSconst1 [c]))
-	for {
-		c := v.AuxInt
-		if !(config.ctxt.Flag_shared) {
-			break
-		}
-		v.reset(Op386MOVSSconst2)
-		v0 := b.NewValue0(v.Line, Op386MOVSSconst1, config.fe.TypeUInt32())
-		v0.AuxInt = c
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVSSload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVSSload [off1] {sym} (ADDLconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVSSload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(Op386MOVSSload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSSload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)   && (base.Op != OpSB || !config.ctxt.Flag_shared)
-	// result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
-			break
-		}
-		v.reset(Op386MOVSSload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSSload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL1 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(Op386MOVSSloadidx1)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSSload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL4 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(Op386MOVSSloadidx4)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSSload [off] {sym} (ADDL ptr idx) mem)
-	// cond: ptr.Op != OpSB
-	// result: (MOVSSloadidx1 [off] {sym} ptr idx mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDL {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(Op386MOVSSloadidx1)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVSSloadidx1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVSSloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
-	// cond:
-	// result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(Op386MOVSSloadidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSSloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
-	// cond:
-	// result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDLconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(Op386MOVSSloadidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVSSloadidx4(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVSSloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem)
-	// cond:
-	// result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(Op386MOVSSloadidx4)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSSloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem)
-	// cond:
-	// result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDLconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(Op386MOVSSloadidx4)
-		v.AuxInt = c + 4*d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVSSstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVSSstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVSSstore [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(Op386MOVSSstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSSstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)   && (base.Op != OpSB || !config.ctxt.Flag_shared)
-	// result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
-			break
-		}
-		v.reset(Op386MOVSSstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSSstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL1 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(Op386MOVSSstoreidx1)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSSstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL4 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(Op386MOVSSstoreidx4)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSSstore [off] {sym} (ADDL ptr idx) val mem)
-	// cond: ptr.Op != OpSB
-	// result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDL {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(Op386MOVSSstoreidx1)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVSSstoreidx1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVSSstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
-	// cond:
-	// result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(Op386MOVSSstoreidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSSstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
-	// cond:
-	// result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDLconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(Op386MOVSSstoreidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVSSstoreidx4(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVSSstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem)
-	// cond:
-	// result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(Op386MOVSSstoreidx4)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSSstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem)
-	// cond:
-	// result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDLconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(Op386MOVSSstoreidx4)
-		v.AuxInt = c + 4*d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVWLSX(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWLSX x:(MOVWload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVWLSXload <v.Type> [off] {sym} ptr mem)
-	for {
-		x := v.Args[0]
-		if x.Op != Op386MOVWload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, Op386MOVWLSXload, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVWLSX (ANDLconst [c] x))
-	// cond: c & 0x8000 == 0
-	// result: (ANDLconst [c & 0x7fff] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ANDLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v_0.Args[0]
-		if !(c&0x8000 == 0) {
-			break
-		}
-		v.reset(Op386ANDLconst)
-		v.AuxInt = c & 0x7fff
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVWLSXload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)   && (base.Op != OpSB || !config.ctxt.Flag_shared)
-	// result: (MOVWLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
-			break
-		}
-		v.reset(Op386MOVWLSXload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVWLZX(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWLZX x:(MOVWload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
-	for {
-		x := v.Args[0]
-		if x.Op != Op386MOVWload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, Op386MOVWload, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVWLZX x:(MOVWloadidx1 [off] {sym} ptr idx mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem)
-	for {
-		x := v.Args[0]
-		if x.Op != Op386MOVWloadidx1 {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		idx := x.Args[1]
-		mem := x.Args[2]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, Op386MOVWloadidx1, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(idx)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVWLZX x:(MOVWloadidx2 [off] {sym} ptr idx mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem)
-	for {
-		x := v.Args[0]
-		if x.Op != Op386MOVWloadidx2 {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		idx := x.Args[1]
-		mem := x.Args[2]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, Op386MOVWloadidx2, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(idx)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVWLZX (ANDLconst [c] x))
-	// cond:
-	// result: (ANDLconst [c & 0xffff] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ANDLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(Op386ANDLconst)
-		v.AuxInt = c & 0xffff
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVWload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-	// result: x
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVWstore {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		x := v_1.Args[1]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWload  [off1] {sym} (ADDLconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVWload  [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(Op386MOVWload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWload  [off1] {sym1} (LEAL [off2] {sym2} base) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)   && (base.Op != OpSB || !config.ctxt.Flag_shared)
-	// result: (MOVWload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
-			break
-		}
-		v.reset(Op386MOVWload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL1 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(Op386MOVWloadidx1)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWload [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL2 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(Op386MOVWloadidx2)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWload [off] {sym} (ADDL ptr idx) mem)
-	// cond: ptr.Op != OpSB
-	// result: (MOVWloadidx1 [off] {sym} ptr idx mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDL {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(Op386MOVWloadidx1)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVWloadidx1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWloadidx1 [c] {sym} ptr (SHLLconst [1] idx) mem)
-	// cond:
-	// result: (MOVWloadidx2 [c] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386SHLLconst {
-			break
-		}
-		if v_1.AuxInt != 1 {
-			break
-		}
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(Op386MOVWloadidx2)
-		v.AuxInt = c
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem)
-	// cond:
-	// result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(Op386MOVWloadidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem)
-	// cond:
-	// result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDLconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(Op386MOVWloadidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVWloadidx2(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWloadidx2 [c] {sym} (ADDLconst [d] ptr) idx mem)
-	// cond:
-	// result: (MOVWloadidx2 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(Op386MOVWloadidx2)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWloadidx2 [c] {sym} ptr (ADDLconst [d] idx) mem)
-	// cond:
-	// result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDLconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(Op386MOVWloadidx2)
-		v.AuxInt = c + 2*d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVWstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWstore [off] {sym} ptr (MOVWLSX x) mem)
-	// cond:
-	// result: (MOVWstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVWLSX {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(Op386MOVWstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [off] {sym} ptr (MOVWLZX x) mem)
-	// cond:
-	// result: (MOVWstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVWLZX {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(Op386MOVWstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore  [off1] {sym} (ADDLconst [off2] ptr) val mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVWstore  [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(Op386MOVWstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem)
-	// cond: validOff(off)
-	// result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		mem := v.Args[2]
-		if !(validOff(off)) {
-			break
-		}
-		v.reset(Op386MOVWstoreconst)
-		v.AuxInt = makeValAndOff(int64(int16(c)), off)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)   && (base.Op != OpSB || !config.ctxt.Flag_shared)
-	// result: (MOVWstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
-			break
-		}
-		v.reset(Op386MOVWstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL1 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(Op386MOVWstoreidx1)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL2 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(Op386MOVWstoreidx2)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [off] {sym} (ADDL ptr idx) val mem)
-	// cond: ptr.Op != OpSB
-	// result: (MOVWstoreidx1 [off] {sym} ptr idx val mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDL {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(Op386MOVWstoreidx1)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [i] {s} p (SHRLconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVLstore [i-2] {s} p w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386SHRLconst {
-			break
-		}
-		if v_1.AuxInt != 16 {
-			break
-		}
-		w := v_1.Args[0]
-		x := v.Args[2]
-		if x.Op != Op386MOVWstore {
-			break
-		}
-		if x.AuxInt != i-2 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if w != x.Args[1] {
-			break
-		}
-		mem := x.Args[2]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(Op386MOVLstore)
-		v.AuxInt = i - 2
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVLstore [i-2] {s} p w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386SHRLconst {
-			break
-		}
-		j := v_1.AuxInt
-		w := v_1.Args[0]
-		x := v.Args[2]
-		if x.Op != Op386MOVWstore {
-			break
-		}
-		if x.AuxInt != i-2 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		w0 := x.Args[1]
-		if w0.Op != Op386SHRLconst {
-			break
-		}
-		if w0.AuxInt != j-16 {
-			break
-		}
-		if w != w0.Args[0] {
-			break
-		}
-		mem := x.Args[2]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(Op386MOVLstore)
-		v.AuxInt = i - 2
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVWstoreconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
-	// cond: ValAndOff(sc).canAdd(off)
-	// result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
-	for {
-		sc := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		off := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(ValAndOff(sc).canAdd(off)) {
-			break
-		}
-		v.reset(Op386MOVWstoreconst)
-		v.AuxInt = ValAndOff(sc).add(off)
-		v.Aux = s
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)   && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
-	// result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
-	for {
-		sc := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL {
-			break
-		}
-		off := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
-			break
-		}
-		v.reset(Op386MOVWstoreconst)
-		v.AuxInt = ValAndOff(sc).add(off)
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem)
-	// cond: canMergeSym(sym1, sym2)
-	// result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL1 {
-			break
-		}
-		off := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(Op386MOVWstoreconstidx1)
-		v.AuxInt = ValAndOff(x).add(off)
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreconst [x] {sym1} (LEAL2 [off] {sym2} ptr idx) mem)
-	// cond: canMergeSym(sym1, sym2)
-	// result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386LEAL2 {
-			break
-		}
-		off := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(Op386MOVWstoreconstidx2)
-		v.AuxInt = ValAndOff(x).add(off)
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreconst [x] {sym} (ADDL ptr idx) mem)
-	// cond:
-	// result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDL {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		v.reset(Op386MOVWstoreconstidx1)
-		v.AuxInt = x
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
-	// cond: x.Uses == 1   && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()   && clobber(x)
-	// result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		x := v.Args[1]
-		if x.Op != Op386MOVWstoreconst {
-			break
-		}
-		a := x.AuxInt
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		mem := x.Args[1]
-		if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
-			break
-		}
-		v.reset(Op386MOVLstoreconst)
-		v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVWstoreconstidx1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLLconst [1] idx) mem)
-	// cond:
-	// result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386SHLLconst {
-			break
-		}
-		if v_1.AuxInt != 1 {
-			break
-		}
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(Op386MOVWstoreconstidx2)
-		v.AuxInt = c
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem)
-	// cond:
-	// result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		c := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(Op386MOVWstoreconstidx1)
-		v.AuxInt = ValAndOff(x).add(c)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem)
-	// cond:
-	// result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDLconst {
-			break
-		}
-		c := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(Op386MOVWstoreconstidx1)
-		v.AuxInt = ValAndOff(x).add(c)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem))
-	// cond: x.Uses == 1   && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()   && clobber(x)
-	// result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		i := v.Args[1]
-		x := v.Args[2]
-		if x.Op != Op386MOVWstoreconstidx1 {
-			break
-		}
-		a := x.AuxInt
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if i != x.Args[1] {
-			break
-		}
-		mem := x.Args[2]
-		if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
-			break
-		}
-		v.reset(Op386MOVLstoreconstidx1)
-		v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(i)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVWstoreconstidx2(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWstoreconstidx2 [x] {sym} (ADDLconst [c] ptr) idx mem)
-	// cond:
-	// result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		c := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(Op386MOVWstoreconstidx2)
-		v.AuxInt = ValAndOff(x).add(c)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDLconst [c] idx) mem)
-	// cond:
-	// result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDLconst {
-			break
-		}
-		c := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(Op386MOVWstoreconstidx2)
-		v.AuxInt = ValAndOff(x).add(2 * c)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem))
-	// cond: x.Uses == 1   && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()   && clobber(x)
-	// result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLLconst <i.Type> [1] i) mem)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		i := v.Args[1]
-		x := v.Args[2]
-		if x.Op != Op386MOVWstoreconstidx2 {
-			break
-		}
-		a := x.AuxInt
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if i != x.Args[1] {
-			break
-		}
-		mem := x.Args[2]
-		if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
-			break
-		}
-		v.reset(Op386MOVLstoreconstidx1)
-		v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
-		v.Aux = s
-		v.AddArg(p)
-		v0 := b.NewValue0(v.Line, Op386SHLLconst, i.Type)
-		v0.AuxInt = 1
-		v0.AddArg(i)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVWstoreidx1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWstoreidx1 [c] {sym} ptr (SHLLconst [1] idx) val mem)
-	// cond:
-	// result: (MOVWstoreidx2 [c] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386SHLLconst {
-			break
-		}
-		if v_1.AuxInt != 1 {
-			break
-		}
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(Op386MOVWstoreidx2)
-		v.AuxInt = c
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem)
-	// cond:
-	// result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(Op386MOVWstoreidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem)
-	// cond:
-	// result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDLconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(Op386MOVWstoreidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreidx1 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVLstoreidx1 [i-2] {s} p idx w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != Op386SHRLconst {
-			break
-		}
-		if v_2.AuxInt != 16 {
-			break
-		}
-		w := v_2.Args[0]
-		x := v.Args[3]
-		if x.Op != Op386MOVWstoreidx1 {
-			break
-		}
-		if x.AuxInt != i-2 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		if w != x.Args[2] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(Op386MOVLstoreidx1)
-		v.AuxInt = i - 2
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(idx)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != Op386SHRLconst {
-			break
-		}
-		j := v_2.AuxInt
-		w := v_2.Args[0]
-		x := v.Args[3]
-		if x.Op != Op386MOVWstoreidx1 {
-			break
-		}
-		if x.AuxInt != i-2 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		w0 := x.Args[2]
-		if w0.Op != Op386SHRLconst {
-			break
-		}
-		if w0.AuxInt != j-16 {
-			break
-		}
-		if w != w0.Args[0] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(Op386MOVLstoreidx1)
-		v.AuxInt = i - 2
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(idx)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MOVWstoreidx2(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWstoreidx2 [c] {sym} (ADDLconst [d] ptr) idx val mem)
-	// cond:
-	// result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ADDLconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(Op386MOVWstoreidx2)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreidx2 [c] {sym} ptr (ADDLconst [d] idx) val mem)
-	// cond:
-	// result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ADDLconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(Op386MOVWstoreidx2)
-		v.AuxInt = c + 2*d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreidx2 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVLstoreidx1 [i-2] {s} p (SHLLconst <idx.Type> [1] idx) w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != Op386SHRLconst {
-			break
-		}
-		if v_2.AuxInt != 16 {
-			break
-		}
-		w := v_2.Args[0]
-		x := v.Args[3]
-		if x.Op != Op386MOVWstoreidx2 {
-			break
-		}
-		if x.AuxInt != i-2 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		if w != x.Args[2] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(Op386MOVLstoreidx1)
-		v.AuxInt = i - 2
-		v.Aux = s
-		v.AddArg(p)
-		v0 := b.NewValue0(v.Line, Op386SHLLconst, idx.Type)
-		v0.AuxInt = 1
-		v0.AddArg(idx)
-		v.AddArg(v0)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreidx2 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVLstoreidx1 [i-2] {s} p (SHLLconst <idx.Type> [1] idx) w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != Op386SHRLconst {
-			break
-		}
-		j := v_2.AuxInt
-		w := v_2.Args[0]
-		x := v.Args[3]
-		if x.Op != Op386MOVWstoreidx2 {
-			break
-		}
-		if x.AuxInt != i-2 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		w0 := x.Args[2]
-		if w0.Op != Op386SHRLconst {
-			break
-		}
-		if w0.AuxInt != j-16 {
-			break
-		}
-		if w != w0.Args[0] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(Op386MOVLstoreidx1)
-		v.AuxInt = i - 2
-		v.Aux = s
-		v.AddArg(p)
-		v0 := b.NewValue0(v.Line, Op386SHLLconst, idx.Type)
-		v0.AuxInt = 1
-		v0.AddArg(idx)
-		v.AddArg(v0)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MULL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MULL x (MOVLconst [c]))
-	// cond:
-	// result: (MULLconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(Op386MULLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULL (MOVLconst [c]) x)
-	// cond:
-	// result: (MULLconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(Op386MULLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386MULLconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MULLconst [c] (MULLconst [d] x))
-	// cond:
-	// result: (MULLconst [int64(int32(c * d))] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MULLconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(Op386MULLconst)
-		v.AuxInt = int64(int32(c * d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULLconst [-1] x)
-	// cond:
-	// result: (NEGL x)
-	for {
-		if v.AuxInt != -1 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(Op386NEGL)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULLconst [0] _)
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (MULLconst [1] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 1 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULLconst [3] x)
-	// cond:
-	// result: (LEAL2 x x)
-	for {
-		if v.AuxInt != 3 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(Op386LEAL2)
-		v.AddArg(x)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULLconst [5] x)
-	// cond:
-	// result: (LEAL4 x x)
-	for {
-		if v.AuxInt != 5 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(Op386LEAL4)
-		v.AddArg(x)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULLconst [7] x)
-	// cond:
-	// result: (LEAL8 (NEGL <v.Type> x) x)
-	for {
-		if v.AuxInt != 7 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(Op386LEAL8)
-		v0 := b.NewValue0(v.Line, Op386NEGL, v.Type)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULLconst [9] x)
-	// cond:
-	// result: (LEAL8 x x)
-	for {
-		if v.AuxInt != 9 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(Op386LEAL8)
-		v.AddArg(x)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULLconst [11] x)
-	// cond:
-	// result: (LEAL2 x (LEAL4 <v.Type> x x))
-	for {
-		if v.AuxInt != 11 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(Op386LEAL2)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, Op386LEAL4, v.Type)
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MULLconst [13] x)
-	// cond:
-	// result: (LEAL4 x (LEAL2 <v.Type> x x))
-	for {
-		if v.AuxInt != 13 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(Op386LEAL4)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, Op386LEAL2, v.Type)
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MULLconst [21] x)
-	// cond:
-	// result: (LEAL4 x (LEAL4 <v.Type> x x))
-	for {
-		if v.AuxInt != 21 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(Op386LEAL4)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, Op386LEAL4, v.Type)
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MULLconst [25] x)
-	// cond:
-	// result: (LEAL8 x (LEAL2 <v.Type> x x))
-	for {
-		if v.AuxInt != 25 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(Op386LEAL8)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, Op386LEAL2, v.Type)
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MULLconst [37] x)
-	// cond:
-	// result: (LEAL4 x (LEAL8 <v.Type> x x))
-	for {
-		if v.AuxInt != 37 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(Op386LEAL4)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, Op386LEAL8, v.Type)
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MULLconst [41] x)
-	// cond:
-	// result: (LEAL8 x (LEAL4 <v.Type> x x))
-	for {
-		if v.AuxInt != 41 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(Op386LEAL8)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, Op386LEAL4, v.Type)
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MULLconst [73] x)
-	// cond:
-	// result: (LEAL8 x (LEAL8 <v.Type> x x))
-	for {
-		if v.AuxInt != 73 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(Op386LEAL8)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, Op386LEAL8, v.Type)
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MULLconst [c] x)
-	// cond: isPowerOfTwo(c)
-	// result: (SHLLconst [log2(c)] x)
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(isPowerOfTwo(c)) {
-			break
-		}
-		v.reset(Op386SHLLconst)
-		v.AuxInt = log2(c)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULLconst [c] x)
-	// cond: isPowerOfTwo(c+1) && c >= 15
-	// result: (SUBL (SHLLconst <v.Type> [log2(c+1)] x) x)
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(isPowerOfTwo(c+1) && c >= 15) {
-			break
-		}
-		v.reset(Op386SUBL)
-		v0 := b.NewValue0(v.Line, Op386SHLLconst, v.Type)
-		v0.AuxInt = log2(c + 1)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULLconst [c] x)
-	// cond: isPowerOfTwo(c-1) && c >= 17
-	// result: (LEAL1 (SHLLconst <v.Type> [log2(c-1)] x) x)
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(isPowerOfTwo(c-1) && c >= 17) {
-			break
-		}
-		v.reset(Op386LEAL1)
-		v0 := b.NewValue0(v.Line, Op386SHLLconst, v.Type)
-		v0.AuxInt = log2(c - 1)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULLconst [c] x)
-	// cond: isPowerOfTwo(c-2) && c >= 34
-	// result: (LEAL2 (SHLLconst <v.Type> [log2(c-2)] x) x)
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(isPowerOfTwo(c-2) && c >= 34) {
-			break
-		}
-		v.reset(Op386LEAL2)
-		v0 := b.NewValue0(v.Line, Op386SHLLconst, v.Type)
-		v0.AuxInt = log2(c - 2)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULLconst [c] x)
-	// cond: isPowerOfTwo(c-4) && c >= 68
-	// result: (LEAL4 (SHLLconst <v.Type> [log2(c-4)] x) x)
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(isPowerOfTwo(c-4) && c >= 68) {
-			break
-		}
-		v.reset(Op386LEAL4)
-		v0 := b.NewValue0(v.Line, Op386SHLLconst, v.Type)
-		v0.AuxInt = log2(c - 4)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULLconst [c] x)
-	// cond: isPowerOfTwo(c-8) && c >= 136
-	// result: (LEAL8 (SHLLconst <v.Type> [log2(c-8)] x) x)
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(isPowerOfTwo(c-8) && c >= 136) {
-			break
-		}
-		v.reset(Op386LEAL8)
-		v0 := b.NewValue0(v.Line, Op386SHLLconst, v.Type)
-		v0.AuxInt = log2(c - 8)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULLconst [c] x)
-	// cond: c%3 == 0 && isPowerOfTwo(c/3)
-	// result: (SHLLconst [log2(c/3)] (LEAL2 <v.Type> x x))
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(c%3 == 0 && isPowerOfTwo(c/3)) {
-			break
-		}
-		v.reset(Op386SHLLconst)
-		v.AuxInt = log2(c / 3)
-		v0 := b.NewValue0(v.Line, Op386LEAL2, v.Type)
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MULLconst [c] x)
-	// cond: c%5 == 0 && isPowerOfTwo(c/5)
-	// result: (SHLLconst [log2(c/5)] (LEAL4 <v.Type> x x))
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(c%5 == 0 && isPowerOfTwo(c/5)) {
-			break
-		}
-		v.reset(Op386SHLLconst)
-		v.AuxInt = log2(c / 5)
-		v0 := b.NewValue0(v.Line, Op386LEAL4, v.Type)
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MULLconst [c] x)
-	// cond: c%9 == 0 && isPowerOfTwo(c/9)
-	// result: (SHLLconst [log2(c/9)] (LEAL8 <v.Type> x x))
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(c%9 == 0 && isPowerOfTwo(c/9)) {
-			break
-		}
-		v.reset(Op386SHLLconst)
-		v.AuxInt = log2(c / 9)
-		v0 := b.NewValue0(v.Line, Op386LEAL8, v.Type)
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MULLconst [c] (MOVLconst [d]))
-	// cond:
-	// result: (MOVLconst [int64(int32(c*d))])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(Op386MOVLconst)
-		v.AuxInt = int64(int32(c * d))
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386NEGL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NEGL (MOVLconst [c]))
-	// cond:
-	// result: (MOVLconst [int64(int32(-c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(Op386MOVLconst)
-		v.AuxInt = int64(int32(-c))
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386NOTL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NOTL (MOVLconst [c]))
-	// cond:
-	// result: (MOVLconst [^c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(Op386MOVLconst)
-		v.AuxInt = ^c
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386ORL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ORL x (MOVLconst [c]))
-	// cond:
-	// result: (ORLconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(Op386ORLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ORL (MOVLconst [c]) x)
-	// cond:
-	// result: (ORLconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(Op386ORLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ORL x x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ORL                  x0:(MOVBload [i]   {s} p mem)     s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem)))
-	// cond: x0.Uses == 1   && x1.Uses == 1   && s0.Uses == 1   && mergePoint(b,x0,x1) != nil   && clobber(x0)   && clobber(x1)   && clobber(s0)
-	// result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem)
-	for {
-		x0 := v.Args[0]
-		if x0.Op != Op386MOVBload {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		mem := x0.Args[1]
-		s0 := v.Args[1]
-		if s0.Op != Op386SHLLconst {
-			break
-		}
-		if s0.AuxInt != 8 {
-			break
-		}
-		x1 := s0.Args[0]
-		if x1.Op != Op386MOVBload {
-			break
-		}
-		if x1.AuxInt != i+1 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if mem != x1.Args[1] {
-			break
-		}
-		if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) {
-			break
-		}
-		b = mergePoint(b, x0, x1)
-		v0 := b.NewValue0(v.Line, Op386MOVWload, config.fe.TypeUInt16())
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = i
-		v0.Aux = s
-		v0.AddArg(p)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (ORL o0:(ORL                        x0:(MOVWload [i]   {s} p mem)     s0:(SHLLconst [16] x1:(MOVBload [i+2] {s} p mem)))     s1:(SHLLconst [24] x2:(MOVBload [i+3] {s} p mem)))
-	// cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && o0.Uses == 1   && mergePoint(b,x0,x1,x2) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(s0)   && clobber(s1)   && clobber(o0)
-	// result: @mergePoint(b,x0,x1,x2) (MOVLload [i] {s} p mem)
-	for {
-		o0 := v.Args[0]
-		if o0.Op != Op386ORL {
-			break
-		}
-		x0 := o0.Args[0]
-		if x0.Op != Op386MOVWload {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		mem := x0.Args[1]
-		s0 := o0.Args[1]
-		if s0.Op != Op386SHLLconst {
-			break
-		}
-		if s0.AuxInt != 16 {
-			break
-		}
-		x1 := s0.Args[0]
-		if x1.Op != Op386MOVBload {
-			break
-		}
-		if x1.AuxInt != i+2 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if mem != x1.Args[1] {
-			break
-		}
-		s1 := v.Args[1]
-		if s1.Op != Op386SHLLconst {
-			break
-		}
-		if s1.AuxInt != 24 {
-			break
-		}
-		x2 := s1.Args[0]
-		if x2.Op != Op386MOVBload {
-			break
-		}
-		if x2.AuxInt != i+3 {
-			break
-		}
-		if x2.Aux != s {
-			break
-		}
-		if p != x2.Args[0] {
-			break
-		}
-		if mem != x2.Args[1] {
-			break
-		}
-		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) {
-			break
-		}
-		b = mergePoint(b, x0, x1, x2)
-		v0 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = i
-		v0.Aux = s
-		v0.AddArg(p)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (ORL                  x0:(MOVBloadidx1 [i]   {s} p idx mem)     s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem)))
-	// cond: x0.Uses == 1   && x1.Uses == 1   && s0.Uses == 1   && mergePoint(b,x0,x1) != nil   && clobber(x0)   && clobber(x1)   && clobber(s0)
-	// result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i] {s} p idx mem)
-	for {
-		x0 := v.Args[0]
-		if x0.Op != Op386MOVBloadidx1 {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		idx := x0.Args[1]
-		mem := x0.Args[2]
-		s0 := v.Args[1]
-		if s0.Op != Op386SHLLconst {
-			break
-		}
-		if s0.AuxInt != 8 {
-			break
-		}
-		x1 := s0.Args[0]
-		if x1.Op != Op386MOVBloadidx1 {
-			break
-		}
-		if x1.AuxInt != i+1 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if idx != x1.Args[1] {
-			break
-		}
-		if mem != x1.Args[2] {
-			break
-		}
-		if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) {
-			break
-		}
-		b = mergePoint(b, x0, x1)
-		v0 := b.NewValue0(v.Line, Op386MOVWloadidx1, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = i
-		v0.Aux = s
-		v0.AddArg(p)
-		v0.AddArg(idx)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (ORL o0:(ORL                        x0:(MOVWloadidx1 [i]   {s} p idx mem)     s0:(SHLLconst [16] x1:(MOVBloadidx1 [i+2] {s} p idx mem)))     s1:(SHLLconst [24] x2:(MOVBloadidx1 [i+3] {s} p idx mem)))
-	// cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && o0.Uses == 1   && mergePoint(b,x0,x1,x2) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(s0)   && clobber(s1)   && clobber(o0)
-	// result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 <v.Type> [i] {s} p idx mem)
-	for {
-		o0 := v.Args[0]
-		if o0.Op != Op386ORL {
-			break
-		}
-		x0 := o0.Args[0]
-		if x0.Op != Op386MOVWloadidx1 {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		idx := x0.Args[1]
-		mem := x0.Args[2]
-		s0 := o0.Args[1]
-		if s0.Op != Op386SHLLconst {
-			break
-		}
-		if s0.AuxInt != 16 {
-			break
-		}
-		x1 := s0.Args[0]
-		if x1.Op != Op386MOVBloadidx1 {
-			break
-		}
-		if x1.AuxInt != i+2 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if idx != x1.Args[1] {
-			break
-		}
-		if mem != x1.Args[2] {
-			break
-		}
-		s1 := v.Args[1]
-		if s1.Op != Op386SHLLconst {
-			break
-		}
-		if s1.AuxInt != 24 {
-			break
-		}
-		x2 := s1.Args[0]
-		if x2.Op != Op386MOVBloadidx1 {
-			break
-		}
-		if x2.AuxInt != i+3 {
-			break
-		}
-		if x2.Aux != s {
-			break
-		}
-		if p != x2.Args[0] {
-			break
-		}
-		if idx != x2.Args[1] {
-			break
-		}
-		if mem != x2.Args[2] {
-			break
-		}
-		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) {
-			break
-		}
-		b = mergePoint(b, x0, x1, x2)
-		v0 := b.NewValue0(v.Line, Op386MOVLloadidx1, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = i
-		v0.Aux = s
-		v0.AddArg(p)
-		v0.AddArg(idx)
-		v0.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386ORLconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ORLconst [c] x)
-	// cond: int32(c)==0
-	// result: x
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(int32(c) == 0) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ORLconst [c] _)
-	// cond: int32(c)==-1
-	// result: (MOVLconst [-1])
-	for {
-		c := v.AuxInt
-		if !(int32(c) == -1) {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = -1
-		return true
-	}
-	// match: (ORLconst [c] (MOVLconst [d]))
-	// cond:
-	// result: (MOVLconst [c|d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(Op386MOVLconst)
-		v.AuxInt = c | d
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386ROLBconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ROLBconst [c] (ROLBconst [d] x))
-	// cond:
-	// result: (ROLBconst [(c+d)& 7] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ROLBconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(Op386ROLBconst)
-		v.AuxInt = (c + d) & 7
-		v.AddArg(x)
-		return true
-	}
-	// match: (ROLBconst [0] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386ROLLconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ROLLconst [c] (ROLLconst [d] x))
-	// cond:
-	// result: (ROLLconst [(c+d)&31] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ROLLconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(Op386ROLLconst)
-		v.AuxInt = (c + d) & 31
-		v.AddArg(x)
-		return true
-	}
-	// match: (ROLLconst [0] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386ROLWconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ROLWconst [c] (ROLWconst [d] x))
-	// cond:
-	// result: (ROLWconst [(c+d)&15] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386ROLWconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(Op386ROLWconst)
-		v.AuxInt = (c + d) & 15
-		v.AddArg(x)
-		return true
-	}
-	// match: (ROLWconst [0] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386SARB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SARB x (MOVLconst [c]))
-	// cond:
-	// result: (SARBconst [c&31] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(Op386SARBconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	// match: (SARB x (MOVLconst [c]))
-	// cond:
-	// result: (SARBconst [c&31] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(Op386SARBconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386SARBconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SARBconst [c] (MOVLconst [d]))
-	// cond:
-	// result: (MOVLconst [d>>uint64(c)])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(Op386MOVLconst)
-		v.AuxInt = d >> uint64(c)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386SARL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SARL x (MOVLconst [c]))
-	// cond:
-	// result: (SARLconst [c&31] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(Op386SARLconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	// match: (SARL x (MOVLconst [c]))
-	// cond:
-	// result: (SARLconst [c&31] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(Op386SARLconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	// match: (SARL x (ANDLconst [31] y))
-	// cond:
-	// result: (SARL x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ANDLconst {
-			break
-		}
-		if v_1.AuxInt != 31 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(Op386SARL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386SARLconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SARLconst [c] (MOVLconst [d]))
-	// cond:
-	// result: (MOVLconst [d>>uint64(c)])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(Op386MOVLconst)
-		v.AuxInt = d >> uint64(c)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386SARW(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SARW x (MOVLconst [c]))
-	// cond:
-	// result: (SARWconst [c&31] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(Op386SARWconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	// match: (SARW x (MOVLconst [c]))
-	// cond:
-	// result: (SARWconst [c&31] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(Op386SARWconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386SARWconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SARWconst [c] (MOVLconst [d]))
-	// cond:
-	// result: (MOVLconst [d>>uint64(c)])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(Op386MOVLconst)
-		v.AuxInt = d >> uint64(c)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386SBBL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SBBL x (MOVLconst [c]) f)
-	// cond:
-	// result: (SBBLconst [c] x f)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		f := v.Args[2]
-		v.reset(Op386SBBLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(f)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386SBBLcarrymask(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SBBLcarrymask (FlagEQ))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagEQ {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SBBLcarrymask (FlagLT_ULT))
-	// cond:
-	// result: (MOVLconst [-1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagLT_ULT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = -1
-		return true
-	}
-	// match: (SBBLcarrymask (FlagLT_UGT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagLT_UGT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SBBLcarrymask (FlagGT_ULT))
-	// cond:
-	// result: (MOVLconst [-1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagGT_ULT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = -1
-		return true
-	}
-	// match: (SBBLcarrymask (FlagGT_UGT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagGT_UGT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386SETA(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SETA (InvertFlags x))
-	// cond:
-	// result: (SETB x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(Op386SETB)
-		v.AddArg(x)
-		return true
-	}
-	// match: (SETA (FlagEQ))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagEQ {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETA (FlagLT_ULT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagLT_ULT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETA (FlagLT_UGT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagLT_UGT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETA (FlagGT_ULT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagGT_ULT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETA (FlagGT_UGT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagGT_UGT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386SETAE(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SETAE (InvertFlags x))
-	// cond:
-	// result: (SETBE x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(Op386SETBE)
-		v.AddArg(x)
-		return true
-	}
-	// match: (SETAE (FlagEQ))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagEQ {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETAE (FlagLT_ULT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagLT_ULT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETAE (FlagLT_UGT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagLT_UGT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETAE (FlagGT_ULT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagGT_ULT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETAE (FlagGT_UGT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagGT_UGT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386SETB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SETB (InvertFlags x))
-	// cond:
-	// result: (SETA x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(Op386SETA)
-		v.AddArg(x)
-		return true
-	}
-	// match: (SETB (FlagEQ))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagEQ {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETB (FlagLT_ULT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagLT_ULT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETB (FlagLT_UGT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagLT_UGT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETB (FlagGT_ULT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagGT_ULT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETB (FlagGT_UGT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagGT_UGT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386SETBE(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SETBE (InvertFlags x))
-	// cond:
-	// result: (SETAE x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(Op386SETAE)
-		v.AddArg(x)
-		return true
-	}
-	// match: (SETBE (FlagEQ))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagEQ {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETBE (FlagLT_ULT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagLT_ULT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETBE (FlagLT_UGT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagLT_UGT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETBE (FlagGT_ULT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagGT_ULT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETBE (FlagGT_UGT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagGT_UGT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386SETEQ(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SETEQ (InvertFlags x))
-	// cond:
-	// result: (SETEQ x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(Op386SETEQ)
-		v.AddArg(x)
-		return true
-	}
-	// match: (SETEQ (FlagEQ))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagEQ {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETEQ (FlagLT_ULT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagLT_ULT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETEQ (FlagLT_UGT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagLT_UGT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETEQ (FlagGT_ULT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagGT_ULT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETEQ (FlagGT_UGT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagGT_UGT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386SETG(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SETG (InvertFlags x))
-	// cond:
-	// result: (SETL x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(Op386SETL)
-		v.AddArg(x)
-		return true
-	}
-	// match: (SETG (FlagEQ))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagEQ {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETG (FlagLT_ULT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagLT_ULT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETG (FlagLT_UGT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagLT_UGT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETG (FlagGT_ULT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagGT_ULT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETG (FlagGT_UGT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagGT_UGT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386SETGE(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SETGE (InvertFlags x))
-	// cond:
-	// result: (SETLE x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(Op386SETLE)
-		v.AddArg(x)
-		return true
-	}
-	// match: (SETGE (FlagEQ))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagEQ {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETGE (FlagLT_ULT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagLT_ULT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETGE (FlagLT_UGT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagLT_UGT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETGE (FlagGT_ULT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagGT_ULT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETGE (FlagGT_UGT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagGT_UGT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386SETL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SETL (InvertFlags x))
-	// cond:
-	// result: (SETG x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(Op386SETG)
-		v.AddArg(x)
-		return true
-	}
-	// match: (SETL (FlagEQ))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagEQ {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETL (FlagLT_ULT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagLT_ULT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETL (FlagLT_UGT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagLT_UGT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETL (FlagGT_ULT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagGT_ULT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETL (FlagGT_UGT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagGT_UGT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386SETLE(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SETLE (InvertFlags x))
-	// cond:
-	// result: (SETGE x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(Op386SETGE)
-		v.AddArg(x)
-		return true
-	}
-	// match: (SETLE (FlagEQ))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagEQ {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETLE (FlagLT_ULT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagLT_ULT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETLE (FlagLT_UGT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagLT_UGT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETLE (FlagGT_ULT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagGT_ULT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETLE (FlagGT_UGT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagGT_UGT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386SETNE(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SETNE (InvertFlags x))
-	// cond:
-	// result: (SETNE x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(Op386SETNE)
-		v.AddArg(x)
-		return true
-	}
-	// match: (SETNE (FlagEQ))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagEQ {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETNE (FlagLT_ULT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagLT_ULT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETNE (FlagLT_UGT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagLT_UGT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETNE (FlagGT_ULT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagGT_ULT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETNE (FlagGT_UGT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386FlagGT_UGT {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386SHLL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SHLL x (MOVLconst [c]))
-	// cond:
-	// result: (SHLLconst [c&31] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(Op386SHLLconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	// match: (SHLL x (MOVLconst [c]))
-	// cond:
-	// result: (SHLLconst [c&31] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(Op386SHLLconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	// match: (SHLL x (ANDLconst [31] y))
-	// cond:
-	// result: (SHLL x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ANDLconst {
-			break
-		}
-		if v_1.AuxInt != 31 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(Op386SHLL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386SHRB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SHRB x (MOVLconst [c]))
-	// cond:
-	// result: (SHRBconst [c&31] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(Op386SHRBconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	// match: (SHRB x (MOVLconst [c]))
-	// cond:
-	// result: (SHRBconst [c&31] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(Op386SHRBconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386SHRL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SHRL x (MOVLconst [c]))
-	// cond:
-	// result: (SHRLconst [c&31] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(Op386SHRLconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	// match: (SHRL x (MOVLconst [c]))
-	// cond:
-	// result: (SHRLconst [c&31] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(Op386SHRLconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	// match: (SHRL x (ANDLconst [31] y))
-	// cond:
-	// result: (SHRL x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386ANDLconst {
-			break
-		}
-		if v_1.AuxInt != 31 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(Op386SHRL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386SHRW(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SHRW x (MOVLconst [c]))
-	// cond:
-	// result: (SHRWconst [c&31] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(Op386SHRWconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	// match: (SHRW x (MOVLconst [c]))
-	// cond:
-	// result: (SHRWconst [c&31] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(Op386SHRWconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386SUBL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBL x (MOVLconst [c]))
-	// cond:
-	// result: (SUBLconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(Op386SUBLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUBL (MOVLconst [c]) x)
-	// cond:
-	// result: (NEGL (SUBLconst <v.Type> x [c]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(Op386NEGL)
-		v0 := b.NewValue0(v.Line, Op386SUBLconst, v.Type)
-		v0.AuxInt = c
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (SUBL x x)
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386SUBLcarry(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBLcarry x (MOVLconst [c]))
-	// cond:
-	// result: (SUBLconstcarry [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(Op386SUBLconstcarry)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386SUBLconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBLconst [c] x)
-	// cond: int32(c) == 0
-	// result: x
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(int32(c) == 0) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUBLconst [c] x)
-	// cond:
-	// result: (ADDLconst [int64(int32(-c))] x)
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		v.reset(Op386ADDLconst)
-		v.AuxInt = int64(int32(-c))
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValue386_Op386XORL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XORL x (MOVLconst [c]))
-	// cond:
-	// result: (XORLconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != Op386MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(Op386XORLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (XORL (MOVLconst [c]) x)
-	// cond:
-	// result: (XORLconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(Op386XORLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (XORL x x)
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValue386_Op386XORLconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XORLconst [c] (XORLconst [d] x))
-	// cond:
-	// result: (XORLconst [c ^ d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386XORLconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(Op386XORLconst)
-		v.AuxInt = c ^ d
-		v.AddArg(x)
-		return true
-	}
-	// match: (XORLconst [c] x)
-	// cond: int32(c)==0
-	// result: x
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(int32(c) == 0) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (XORLconst [c] (MOVLconst [d]))
-	// cond:
-	// result: (MOVLconst [c^d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != Op386MOVLconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(Op386MOVLconst)
-		v.AuxInt = c ^ d
-		return true
-	}
-	return false
-}
-func rewriteValue386_OpAdd16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add16  x y)
-	// cond:
-	// result: (ADDL  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ADDL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpAdd32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add32  x y)
-	// cond:
-	// result: (ADDL  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ADDL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpAdd32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add32F x y)
-	// cond:
-	// result: (ADDSS x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ADDSS)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpAdd32carry(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add32carry x y)
-	// cond:
-	// result: (ADDLcarry x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ADDLcarry)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpAdd32withcarry(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add32withcarry x y c)
-	// cond:
-	// result: (ADCL x y c)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		c := v.Args[2]
-		v.reset(Op386ADCL)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(c)
-		return true
-	}
-}
-func rewriteValue386_OpAdd64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add64F x y)
-	// cond:
-	// result: (ADDSD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ADDSD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpAdd8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add8   x y)
-	// cond:
-	// result: (ADDL  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ADDL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpAddPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AddPtr x y)
-	// cond:
-	// result: (ADDL  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ADDL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpAddr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Addr {sym} base)
-	// cond:
-	// result: (LEAL {sym} base)
-	for {
-		sym := v.Aux
-		base := v.Args[0]
-		v.reset(Op386LEAL)
-		v.Aux = sym
-		v.AddArg(base)
-		return true
-	}
-}
-func rewriteValue386_OpAnd16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And16 x y)
-	// cond:
-	// result: (ANDL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ANDL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpAnd32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And32 x y)
-	// cond:
-	// result: (ANDL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ANDL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpAnd8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And8  x y)
-	// cond:
-	// result: (ANDL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ANDL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpAndB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AndB x y)
-	// cond:
-	// result: (ANDL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ANDL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpBswap32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Bswap32 x)
-	// cond:
-	// result: (BSWAPL x)
-	for {
-		x := v.Args[0]
-		v.reset(Op386BSWAPL)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValue386_OpClosureCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ClosureCall [argwid] entry closure mem)
-	// cond:
-	// result: (CALLclosure [argwid] entry closure mem)
-	for {
-		argwid := v.AuxInt
-		entry := v.Args[0]
-		closure := v.Args[1]
-		mem := v.Args[2]
-		v.reset(Op386CALLclosure)
-		v.AuxInt = argwid
-		v.AddArg(entry)
-		v.AddArg(closure)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValue386_OpCom16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com16 x)
-	// cond:
-	// result: (NOTL x)
-	for {
-		x := v.Args[0]
-		v.reset(Op386NOTL)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValue386_OpCom32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com32 x)
-	// cond:
-	// result: (NOTL x)
-	for {
-		x := v.Args[0]
-		v.reset(Op386NOTL)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValue386_OpCom8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com8  x)
-	// cond:
-	// result: (NOTL x)
-	for {
-		x := v.Args[0]
-		v.reset(Op386NOTL)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValue386_OpConst16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const16  [val])
-	// cond:
-	// result: (MOVLconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(Op386MOVLconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValue386_OpConst32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const32  [val])
-	// cond:
-	// result: (MOVLconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(Op386MOVLconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValue386_OpConst32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const32F [val])
-	// cond:
-	// result: (MOVSSconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(Op386MOVSSconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValue386_OpConst64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const64F [val])
-	// cond:
-	// result: (MOVSDconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(Op386MOVSDconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValue386_OpConst8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const8   [val])
-	// cond:
-	// result: (MOVLconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(Op386MOVLconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValue386_OpConstBool(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ConstBool [b])
-	// cond:
-	// result: (MOVLconst [b])
-	for {
-		b := v.AuxInt
-		v.reset(Op386MOVLconst)
-		v.AuxInt = b
-		return true
-	}
-}
-func rewriteValue386_OpConstNil(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ConstNil)
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v.reset(Op386MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-}
-func rewriteValue386_OpConvert(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Convert <t> x mem)
-	// cond:
-	// result: (MOVLconvert <t> x mem)
-	for {
-		t := v.Type
-		x := v.Args[0]
-		mem := v.Args[1]
-		v.reset(Op386MOVLconvert)
-		v.Type = t
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValue386_OpCvt32Fto32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32Fto32 x)
-	// cond:
-	// result: (CVTTSS2SL x)
-	for {
-		x := v.Args[0]
-		v.reset(Op386CVTTSS2SL)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValue386_OpCvt32Fto64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32Fto64F x)
-	// cond:
-	// result: (CVTSS2SD x)
-	for {
-		x := v.Args[0]
-		v.reset(Op386CVTSS2SD)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValue386_OpCvt32to32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32to32F x)
-	// cond:
-	// result: (CVTSL2SS x)
-	for {
-		x := v.Args[0]
-		v.reset(Op386CVTSL2SS)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValue386_OpCvt32to64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32to64F x)
-	// cond:
-	// result: (CVTSL2SD x)
-	for {
-		x := v.Args[0]
-		v.reset(Op386CVTSL2SD)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValue386_OpCvt64Fto32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64Fto32 x)
-	// cond:
-	// result: (CVTTSD2SL x)
-	for {
-		x := v.Args[0]
-		v.reset(Op386CVTTSD2SL)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValue386_OpCvt64Fto32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64Fto32F x)
-	// cond:
-	// result: (CVTSD2SS x)
-	for {
-		x := v.Args[0]
-		v.reset(Op386CVTSD2SS)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValue386_OpDeferCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (DeferCall [argwid] mem)
-	// cond:
-	// result: (CALLdefer [argwid] mem)
-	for {
-		argwid := v.AuxInt
-		mem := v.Args[0]
-		v.reset(Op386CALLdefer)
-		v.AuxInt = argwid
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValue386_OpDiv16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div16  x y)
-	// cond:
-	// result: (DIVW  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386DIVW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpDiv16u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div16u x y)
-	// cond:
-	// result: (DIVWU x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386DIVWU)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpDiv32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div32  x y)
-	// cond:
-	// result: (DIVL  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386DIVL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpDiv32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div32F x y)
-	// cond:
-	// result: (DIVSS x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386DIVSS)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpDiv32u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div32u x y)
-	// cond:
-	// result: (DIVLU x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386DIVLU)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpDiv64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div64F x y)
-	// cond:
-	// result: (DIVSD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386DIVSD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpDiv8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div8   x y)
-	// cond:
-	// result: (DIVW  (SignExt8to16 x) (SignExt8to16 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386DIVW)
-		v0 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValue386_OpDiv8u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div8u  x y)
-	// cond:
-	// result: (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386DIVWU)
-		v0 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValue386_OpEq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq16  x y)
-	// cond:
-	// result: (SETEQ (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETEQ)
-		v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpEq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq32  x y)
-	// cond:
-	// result: (SETEQ (CMPL x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETEQ)
-		v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpEq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq32F x y)
-	// cond:
-	// result: (SETEQF (UCOMISS x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETEQF)
-		v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpEq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq64F x y)
-	// cond:
-	// result: (SETEQF (UCOMISD x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETEQF)
-		v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpEq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq8   x y)
-	// cond:
-	// result: (SETEQ (CMPB x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETEQ)
-		v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpEqB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (EqB   x y)
-	// cond:
-	// result: (SETEQ (CMPB x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETEQ)
-		v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpEqPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (EqPtr x y)
-	// cond:
-	// result: (SETEQ (CMPL x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETEQ)
-		v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpGeq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq16  x y)
-	// cond:
-	// result: (SETGE (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETGE)
-		v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpGeq16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq16U x y)
-	// cond:
-	// result: (SETAE (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETAE)
-		v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpGeq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq32  x y)
-	// cond:
-	// result: (SETGE (CMPL x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETGE)
-		v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpGeq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq32F x y)
-	// cond:
-	// result: (SETGEF (UCOMISS x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETGEF)
-		v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpGeq32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq32U x y)
-	// cond:
-	// result: (SETAE (CMPL x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETAE)
-		v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpGeq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq64F x y)
-	// cond:
-	// result: (SETGEF (UCOMISD x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETGEF)
-		v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpGeq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq8   x y)
-	// cond:
-	// result: (SETGE (CMPB x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETGE)
-		v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpGeq8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq8U  x y)
-	// cond:
-	// result: (SETAE (CMPB x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETAE)
-		v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpGetClosurePtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (GetClosurePtr)
-	// cond:
-	// result: (LoweredGetClosurePtr)
-	for {
-		v.reset(Op386LoweredGetClosurePtr)
-		return true
-	}
-}
-func rewriteValue386_OpGetG(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (GetG mem)
-	// cond:
-	// result: (LoweredGetG mem)
-	for {
-		mem := v.Args[0]
-		v.reset(Op386LoweredGetG)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValue386_OpGoCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (GoCall [argwid] mem)
-	// cond:
-	// result: (CALLgo [argwid] mem)
-	for {
-		argwid := v.AuxInt
-		mem := v.Args[0]
-		v.reset(Op386CALLgo)
-		v.AuxInt = argwid
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValue386_OpGreater16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater16  x y)
-	// cond:
-	// result: (SETG (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETG)
-		v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpGreater16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater16U x y)
-	// cond:
-	// result: (SETA (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETA)
-		v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpGreater32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater32  x y)
-	// cond:
-	// result: (SETG (CMPL x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETG)
-		v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpGreater32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater32F x y)
-	// cond:
-	// result: (SETGF (UCOMISS x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETGF)
-		v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpGreater32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater32U x y)
-	// cond:
-	// result: (SETA (CMPL x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETA)
-		v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpGreater64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater64F x y)
-	// cond:
-	// result: (SETGF (UCOMISD x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETGF)
-		v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpGreater8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater8   x y)
-	// cond:
-	// result: (SETG (CMPB x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETG)
-		v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpGreater8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater8U  x y)
-	// cond:
-	// result: (SETA (CMPB x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETA)
-		v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpHmul16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul16  x y)
-	// cond:
-	// result: (HMULW  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386HMULW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpHmul16u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul16u x y)
-	// cond:
-	// result: (HMULWU x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386HMULWU)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpHmul32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul32  x y)
-	// cond:
-	// result: (HMULL  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386HMULL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpHmul32u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul32u x y)
-	// cond:
-	// result: (HMULLU x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386HMULLU)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpHmul8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul8   x y)
-	// cond:
-	// result: (HMULB  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386HMULB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpHmul8u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul8u  x y)
-	// cond:
-	// result: (HMULBU x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386HMULBU)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpInterCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (InterCall [argwid] entry mem)
-	// cond:
-	// result: (CALLinter [argwid] entry mem)
-	for {
-		argwid := v.AuxInt
-		entry := v.Args[0]
-		mem := v.Args[1]
-		v.reset(Op386CALLinter)
-		v.AuxInt = argwid
-		v.AddArg(entry)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValue386_OpIsInBounds(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (IsInBounds idx len)
-	// cond:
-	// result: (SETB (CMPL idx len))
-	for {
-		idx := v.Args[0]
-		len := v.Args[1]
-		v.reset(Op386SETB)
-		v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
-		v0.AddArg(idx)
-		v0.AddArg(len)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpIsNonNil(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (IsNonNil p)
-	// cond:
-	// result: (SETNE (TESTL p p))
-	for {
-		p := v.Args[0]
-		v.reset(Op386SETNE)
-		v0 := b.NewValue0(v.Line, Op386TESTL, TypeFlags)
-		v0.AddArg(p)
-		v0.AddArg(p)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpIsSliceInBounds(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (IsSliceInBounds idx len)
-	// cond:
-	// result: (SETBE (CMPL idx len))
-	for {
-		idx := v.Args[0]
-		len := v.Args[1]
-		v.reset(Op386SETBE)
-		v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
-		v0.AddArg(idx)
-		v0.AddArg(len)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpLeq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq16  x y)
-	// cond:
-	// result: (SETLE (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETLE)
-		v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpLeq16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq16U x y)
-	// cond:
-	// result: (SETBE (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETBE)
-		v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpLeq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq32  x y)
-	// cond:
-	// result: (SETLE (CMPL x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETLE)
-		v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpLeq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq32F x y)
-	// cond:
-	// result: (SETGEF (UCOMISS y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETGEF)
-		v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
-		v0.AddArg(y)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpLeq32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq32U x y)
-	// cond:
-	// result: (SETBE (CMPL x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETBE)
-		v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpLeq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq64F x y)
-	// cond:
-	// result: (SETGEF (UCOMISD y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETGEF)
-		v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
-		v0.AddArg(y)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpLeq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq8   x y)
-	// cond:
-	// result: (SETLE (CMPB x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETLE)
-		v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpLeq8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq8U  x y)
-	// cond:
-	// result: (SETBE (CMPB x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETBE)
-		v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpLess16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less16  x y)
-	// cond:
-	// result: (SETL (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETL)
-		v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpLess16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less16U x y)
-	// cond:
-	// result: (SETB (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETB)
-		v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpLess32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less32  x y)
-	// cond:
-	// result: (SETL (CMPL x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETL)
-		v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpLess32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less32F x y)
-	// cond:
-	// result: (SETGF (UCOMISS y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETGF)
-		v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
-		v0.AddArg(y)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpLess32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less32U x y)
-	// cond:
-	// result: (SETB (CMPL x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETB)
-		v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpLess64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less64F x y)
-	// cond:
-	// result: (SETGF (UCOMISD y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETGF)
-		v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
-		v0.AddArg(y)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpLess8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less8   x y)
-	// cond:
-	// result: (SETL (CMPB x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETL)
-		v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpLess8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less8U  x y)
-	// cond:
-	// result: (SETB (CMPB x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETB)
-		v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpLoad(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Load <t> ptr mem)
-	// cond: (is32BitInt(t) || isPtr(t))
-	// result: (MOVLload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is32BitInt(t) || isPtr(t)) {
-			break
-		}
-		v.reset(Op386MOVLload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: is16BitInt(t)
-	// result: (MOVWload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is16BitInt(t)) {
-			break
-		}
-		v.reset(Op386MOVWload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: (t.IsBoolean() || is8BitInt(t))
-	// result: (MOVBload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(t.IsBoolean() || is8BitInt(t)) {
-			break
-		}
-		v.reset(Op386MOVBload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: is32BitFloat(t)
-	// result: (MOVSSload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is32BitFloat(t)) {
-			break
-		}
-		v.reset(Op386MOVSSload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: is64BitFloat(t)
-	// result: (MOVSDload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is64BitFloat(t)) {
-			break
-		}
-		v.reset(Op386MOVSDload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_OpLrot16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lrot16 <t> x [c])
-	// cond:
-	// result: (ROLWconst <t> [c&15] x)
-	for {
-		t := v.Type
-		c := v.AuxInt
-		x := v.Args[0]
-		v.reset(Op386ROLWconst)
-		v.Type = t
-		v.AuxInt = c & 15
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValue386_OpLrot32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lrot32 <t> x [c])
-	// cond:
-	// result: (ROLLconst <t> [c&31] x)
-	for {
-		t := v.Type
-		c := v.AuxInt
-		x := v.Args[0]
-		v.reset(Op386ROLLconst)
-		v.Type = t
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValue386_OpLrot8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lrot8  <t> x [c])
-	// cond:
-	// result: (ROLBconst <t> [c&7] x)
-	for {
-		t := v.Type
-		c := v.AuxInt
-		x := v.Args[0]
-		v.reset(Op386ROLBconst)
-		v.Type = t
-		v.AuxInt = c & 7
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValue386_OpLsh16x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x16 <t> x y)
-	// cond:
-	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ANDL)
-		v0 := b.NewValue0(v.Line, Op386SHLL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValue386_OpLsh16x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x32 <t> x y)
-	// cond:
-	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ANDL)
-		v0 := b.NewValue0(v.Line, Op386SHLL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValue386_OpLsh16x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x64 x (Const64 [c]))
-	// cond: uint64(c) < 16
-	// result: (SHLLconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 16) {
-			break
-		}
-		v.reset(Op386SHLLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh16x64 _ (Const64 [c]))
-	// cond: uint64(c) >= 16
-	// result: (Const16 [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 16) {
-			break
-		}
-		v.reset(OpConst16)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValue386_OpLsh16x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x8  <t> x y)
-	// cond:
-	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ANDL)
-		v0 := b.NewValue0(v.Line, Op386SHLL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValue386_OpLsh32x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x16 <t> x y)
-	// cond:
-	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ANDL)
-		v0 := b.NewValue0(v.Line, Op386SHLL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValue386_OpLsh32x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x32 <t> x y)
-	// cond:
-	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ANDL)
-		v0 := b.NewValue0(v.Line, Op386SHLL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValue386_OpLsh32x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x64 x (Const64 [c]))
-	// cond: uint64(c) < 32
-	// result: (SHLLconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 32) {
-			break
-		}
-		v.reset(Op386SHLLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh32x64 _ (Const64 [c]))
-	// cond: uint64(c) >= 32
-	// result: (Const32 [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 32) {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValue386_OpLsh32x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x8  <t> x y)
-	// cond:
-	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ANDL)
-		v0 := b.NewValue0(v.Line, Op386SHLL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValue386_OpLsh8x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x16 <t> x y)
-	// cond:
-	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ANDL)
-		v0 := b.NewValue0(v.Line, Op386SHLL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValue386_OpLsh8x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x32 <t> x y)
-	// cond:
-	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ANDL)
-		v0 := b.NewValue0(v.Line, Op386SHLL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValue386_OpLsh8x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x64 x (Const64 [c]))
-	// cond: uint64(c) < 8
-	// result: (SHLLconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 8) {
-			break
-		}
-		v.reset(Op386SHLLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh8x64 _ (Const64 [c]))
-	// cond: uint64(c) >= 8
-	// result: (Const8 [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 8) {
-			break
-		}
-		v.reset(OpConst8)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValue386_OpLsh8x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x8  <t> x y)
-	// cond:
-	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ANDL)
-		v0 := b.NewValue0(v.Line, Op386SHLL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValue386_OpMod16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod16  x y)
-	// cond:
-	// result: (MODW  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386MODW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpMod16u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod16u x y)
-	// cond:
-	// result: (MODWU x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386MODWU)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpMod32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod32  x y)
-	// cond:
-	// result: (MODL  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386MODL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpMod32u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod32u x y)
-	// cond:
-	// result: (MODLU x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386MODLU)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpMod8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod8   x y)
-	// cond:
-	// result: (MODW  (SignExt8to16 x) (SignExt8to16 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386MODW)
-		v0 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValue386_OpMod8u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod8u  x y)
-	// cond:
-	// result: (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386MODWU)
-		v0 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValue386_OpMove(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Move [s] _ _ mem)
-	// cond: SizeAndAlign(s).Size() == 0
-	// result: mem
-	for {
-		s := v.AuxInt
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 0) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = mem.Type
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 1
-	// result: (MOVBstore dst (MOVBload src mem) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 1) {
-			break
-		}
-		v.reset(Op386MOVBstore)
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, Op386MOVBload, config.fe.TypeUInt8())
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 2
-	// result: (MOVWstore dst (MOVWload src mem) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 2) {
-			break
-		}
-		v.reset(Op386MOVWstore)
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, Op386MOVWload, config.fe.TypeUInt16())
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 4
-	// result: (MOVLstore dst (MOVLload src mem) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 4) {
-			break
-		}
-		v.reset(Op386MOVLstore)
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 3
-	// result: (MOVBstore [2] dst (MOVBload [2] src mem) 		(MOVWstore dst (MOVWload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 3) {
-			break
-		}
-		v.reset(Op386MOVBstore)
-		v.AuxInt = 2
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, Op386MOVBload, config.fe.TypeUInt8())
-		v0.AuxInt = 2
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, Op386MOVWstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, Op386MOVWload, config.fe.TypeUInt16())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 5
-	// result: (MOVBstore [4] dst (MOVBload [4] src mem) 		(MOVLstore dst (MOVLload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 5) {
-			break
-		}
-		v.reset(Op386MOVBstore)
-		v.AuxInt = 4
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, Op386MOVBload, config.fe.TypeUInt8())
-		v0.AuxInt = 4
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 6
-	// result: (MOVWstore [4] dst (MOVWload [4] src mem) 		(MOVLstore dst (MOVLload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 6) {
-			break
-		}
-		v.reset(Op386MOVWstore)
-		v.AuxInt = 4
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, Op386MOVWload, config.fe.TypeUInt16())
-		v0.AuxInt = 4
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 7
-	// result: (MOVLstore [3] dst (MOVLload [3] src mem) 		(MOVLstore dst (MOVLload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 7) {
-			break
-		}
-		v.reset(Op386MOVLstore)
-		v.AuxInt = 3
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
-		v0.AuxInt = 3
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 8
-	// result: (MOVLstore [4] dst (MOVLload [4] src mem) 		(MOVLstore dst (MOVLload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 8) {
-			break
-		}
-		v.reset(Op386MOVLstore)
-		v.AuxInt = 4
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
-		v0.AuxInt = 4
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size()%4 != 0
-	// result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%4] 		(ADDLconst <dst.Type> dst [SizeAndAlign(s).Size()%4]) 		(ADDLconst <src.Type> src [SizeAndAlign(s).Size()%4]) 		(MOVLstore dst (MOVLload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size()%4 != 0) {
-			break
-		}
-		v.reset(OpMove)
-		v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%4
-		v0 := b.NewValue0(v.Line, Op386ADDLconst, dst.Type)
-		v0.AuxInt = SizeAndAlign(s).Size() % 4
-		v0.AddArg(dst)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, Op386ADDLconst, src.Type)
-		v1.AuxInt = SizeAndAlign(s).Size() % 4
-		v1.AddArg(src)
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, Op386MOVLstore, TypeMem)
-		v2.AddArg(dst)
-		v3 := b.NewValue0(v.Line, Op386MOVLload, config.fe.TypeUInt32())
-		v3.AddArg(src)
-		v3.AddArg(mem)
-		v2.AddArg(v3)
-		v2.AddArg(mem)
-		v.AddArg(v2)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() <= 4*128 && SizeAndAlign(s).Size()%4 == 0 	&& !config.noDuffDevice
-	// result: (DUFFCOPY [10*(128-SizeAndAlign(s).Size()/4)] dst src mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() <= 4*128 && SizeAndAlign(s).Size()%4 == 0 && !config.noDuffDevice) {
-			break
-		}
-		v.reset(Op386DUFFCOPY)
-		v.AuxInt = 10 * (128 - SizeAndAlign(s).Size()/4)
-		v.AddArg(dst)
-		v.AddArg(src)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: (SizeAndAlign(s).Size() > 4*128 || config.noDuffDevice) && SizeAndAlign(s).Size()%4 == 0
-	// result: (REPMOVSL dst src (MOVLconst [SizeAndAlign(s).Size()/4]) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !((SizeAndAlign(s).Size() > 4*128 || config.noDuffDevice) && SizeAndAlign(s).Size()%4 == 0) {
-			break
-		}
-		v.reset(Op386REPMOVSL)
-		v.AddArg(dst)
-		v.AddArg(src)
-		v0 := b.NewValue0(v.Line, Op386MOVLconst, config.fe.TypeUInt32())
-		v0.AuxInt = SizeAndAlign(s).Size() / 4
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_OpMul16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul16  x y)
-	// cond:
-	// result: (MULL  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386MULL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpMul32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul32  x y)
-	// cond:
-	// result: (MULL  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386MULL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpMul32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul32F x y)
-	// cond:
-	// result: (MULSS x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386MULSS)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpMul32uhilo(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul32uhilo x y)
-	// cond:
-	// result: (MULLQU x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386MULLQU)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpMul64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul64F x y)
-	// cond:
-	// result: (MULSD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386MULSD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpMul8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul8   x y)
-	// cond:
-	// result: (MULL  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386MULL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpNeg16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg16  x)
-	// cond:
-	// result: (NEGL x)
-	for {
-		x := v.Args[0]
-		v.reset(Op386NEGL)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValue386_OpNeg32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg32  x)
-	// cond:
-	// result: (NEGL x)
-	for {
-		x := v.Args[0]
-		v.reset(Op386NEGL)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValue386_OpNeg32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg32F x)
-	// cond: !config.use387
-	// result: (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))]))
-	for {
-		x := v.Args[0]
-		if !(!config.use387) {
-			break
-		}
-		v.reset(Op386PXOR)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, Op386MOVSSconst, config.Frontend().TypeFloat32())
-		v0.AuxInt = f2i(math.Copysign(0, -1))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Neg32F x)
-	// cond: config.use387
-	// result: (FCHS x)
-	for {
-		x := v.Args[0]
-		if !(config.use387) {
-			break
-		}
-		v.reset(Op386FCHS)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValue386_OpNeg64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg64F x)
-	// cond: !config.use387
-	// result: (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))]))
-	for {
-		x := v.Args[0]
-		if !(!config.use387) {
-			break
-		}
-		v.reset(Op386PXOR)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, Op386MOVSDconst, config.Frontend().TypeFloat64())
-		v0.AuxInt = f2i(math.Copysign(0, -1))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Neg64F x)
-	// cond: config.use387
-	// result: (FCHS x)
-	for {
-		x := v.Args[0]
-		if !(config.use387) {
-			break
-		}
-		v.reset(Op386FCHS)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValue386_OpNeg8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg8   x)
-	// cond:
-	// result: (NEGL x)
-	for {
-		x := v.Args[0]
-		v.reset(Op386NEGL)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValue386_OpNeq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq16  x y)
-	// cond:
-	// result: (SETNE (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETNE)
-		v0 := b.NewValue0(v.Line, Op386CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpNeq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq32  x y)
-	// cond:
-	// result: (SETNE (CMPL x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETNE)
-		v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpNeq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq32F x y)
-	// cond:
-	// result: (SETNEF (UCOMISS x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETNEF)
-		v0 := b.NewValue0(v.Line, Op386UCOMISS, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpNeq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq64F x y)
-	// cond:
-	// result: (SETNEF (UCOMISD x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETNEF)
-		v0 := b.NewValue0(v.Line, Op386UCOMISD, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpNeq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq8   x y)
-	// cond:
-	// result: (SETNE (CMPB x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETNE)
-		v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpNeqB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NeqB   x y)
-	// cond:
-	// result: (SETNE (CMPB x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETNE)
-		v0 := b.NewValue0(v.Line, Op386CMPB, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpNeqPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NeqPtr x y)
-	// cond:
-	// result: (SETNE (CMPL x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SETNE)
-		v0 := b.NewValue0(v.Line, Op386CMPL, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpNilCheck(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NilCheck ptr mem)
-	// cond:
-	// result: (LoweredNilCheck ptr mem)
-	for {
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		v.reset(Op386LoweredNilCheck)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValue386_OpNot(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Not x)
-	// cond:
-	// result: (XORLconst [1] x)
-	for {
-		x := v.Args[0]
-		v.reset(Op386XORLconst)
-		v.AuxInt = 1
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValue386_OpOffPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (OffPtr [off] ptr)
-	// cond:
-	// result: (ADDLconst [off] ptr)
-	for {
-		off := v.AuxInt
-		ptr := v.Args[0]
-		v.reset(Op386ADDLconst)
-		v.AuxInt = off
-		v.AddArg(ptr)
-		return true
-	}
-}
-func rewriteValue386_OpOr16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or16 x y)
-	// cond:
-	// result: (ORL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ORL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpOr32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or32 x y)
-	// cond:
-	// result: (ORL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ORL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpOr8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or8  x y)
-	// cond:
-	// result: (ORL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ORL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpOrB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (OrB x y)
-	// cond:
-	// result: (ORL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ORL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpRsh16Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux16 <t> x y)
-	// cond:
-	// result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ANDL)
-		v0 := b.NewValue0(v.Line, Op386SHRW, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
-		v2.AuxInt = 16
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValue386_OpRsh16Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux32 <t> x y)
-	// cond:
-	// result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ANDL)
-		v0 := b.NewValue0(v.Line, Op386SHRW, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
-		v2.AuxInt = 16
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValue386_OpRsh16Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux64 x (Const64 [c]))
-	// cond: uint64(c) < 16
-	// result: (SHRWconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 16) {
-			break
-		}
-		v.reset(Op386SHRWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh16Ux64 _ (Const64 [c]))
-	// cond: uint64(c) >= 16
-	// result: (Const16 [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 16) {
-			break
-		}
-		v.reset(OpConst16)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValue386_OpRsh16Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux8  <t> x y)
-	// cond:
-	// result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ANDL)
-		v0 := b.NewValue0(v.Line, Op386SHRW, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
-		v2.AuxInt = 16
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValue386_OpRsh16x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x16 <t> x y)
-	// cond:
-	// result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SARW)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
-		v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
-		v3.AuxInt = 16
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpRsh16x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x32 <t> x y)
-	// cond:
-	// result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SARW)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
-		v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
-		v3.AuxInt = 16
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpRsh16x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x64 x (Const64 [c]))
-	// cond: uint64(c) < 16
-	// result: (SARWconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 16) {
-			break
-		}
-		v.reset(Op386SARWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh16x64 x (Const64 [c]))
-	// cond: uint64(c) >= 16
-	// result: (SARWconst x [15])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 16) {
-			break
-		}
-		v.reset(Op386SARWconst)
-		v.AuxInt = 15
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValue386_OpRsh16x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x8  <t> x y)
-	// cond:
-	// result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SARW)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
-		v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
-		v3.AuxInt = 16
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpRsh32Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux16 <t> x y)
-	// cond:
-	// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ANDL)
-		v0 := b.NewValue0(v.Line, Op386SHRL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValue386_OpRsh32Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux32 <t> x y)
-	// cond:
-	// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ANDL)
-		v0 := b.NewValue0(v.Line, Op386SHRL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValue386_OpRsh32Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux64 x (Const64 [c]))
-	// cond: uint64(c) < 32
-	// result: (SHRLconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 32) {
-			break
-		}
-		v.reset(Op386SHRLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh32Ux64 _ (Const64 [c]))
-	// cond: uint64(c) >= 32
-	// result: (Const32 [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 32) {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValue386_OpRsh32Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux8  <t> x y)
-	// cond:
-	// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ANDL)
-		v0 := b.NewValue0(v.Line, Op386SHRL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValue386_OpRsh32x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x16 <t> x y)
-	// cond:
-	// result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SARL)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
-		v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
-		v3.AuxInt = 32
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpRsh32x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x32 <t> x y)
-	// cond:
-	// result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SARL)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
-		v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
-		v3.AuxInt = 32
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpRsh32x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x64 x (Const64 [c]))
-	// cond: uint64(c) < 32
-	// result: (SARLconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 32) {
-			break
-		}
-		v.reset(Op386SARLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh32x64 x (Const64 [c]))
-	// cond: uint64(c) >= 32
-	// result: (SARLconst x [31])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 32) {
-			break
-		}
-		v.reset(Op386SARLconst)
-		v.AuxInt = 31
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValue386_OpRsh32x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x8  <t> x y)
-	// cond:
-	// result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SARL)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
-		v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
-		v3.AuxInt = 32
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpRsh8Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux16 <t> x y)
-	// cond:
-	// result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ANDL)
-		v0 := b.NewValue0(v.Line, Op386SHRB, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
-		v2.AuxInt = 8
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValue386_OpRsh8Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux32 <t> x y)
-	// cond:
-	// result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ANDL)
-		v0 := b.NewValue0(v.Line, Op386SHRB, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
-		v2.AuxInt = 8
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValue386_OpRsh8Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux64 x (Const64 [c]))
-	// cond: uint64(c) < 8
-	// result: (SHRBconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 8) {
-			break
-		}
-		v.reset(Op386SHRBconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh8Ux64 _ (Const64 [c]))
-	// cond: uint64(c) >= 8
-	// result: (Const8 [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 8) {
-			break
-		}
-		v.reset(OpConst8)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValue386_OpRsh8Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux8  <t> x y)
-	// cond:
-	// result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386ANDL)
-		v0 := b.NewValue0(v.Line, Op386SHRB, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
-		v2.AuxInt = 8
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValue386_OpRsh8x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x16 <t> x y)
-	// cond:
-	// result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SARB)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
-		v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, Op386CMPWconst, TypeFlags)
-		v3.AuxInt = 8
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpRsh8x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x32 <t> x y)
-	// cond:
-	// result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SARB)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
-		v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
-		v3.AuxInt = 8
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpRsh8x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x64 x (Const64 [c]))
-	// cond: uint64(c) < 8
-	// result: (SARBconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 8) {
-			break
-		}
-		v.reset(Op386SARBconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh8x64 x (Const64 [c]))
-	// cond: uint64(c) >= 8
-	// result: (SARBconst x [7])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 8) {
-			break
-		}
-		v.reset(Op386SARBconst)
-		v.AuxInt = 7
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValue386_OpRsh8x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x8  <t> x y)
-	// cond:
-	// result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SARB)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, Op386ORL, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, Op386NOTL, y.Type)
-		v2 := b.NewValue0(v.Line, Op386SBBLcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, Op386CMPBconst, TypeFlags)
-		v3.AuxInt = 8
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpSignExt16to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt16to32 x)
-	// cond:
-	// result: (MOVWLSX x)
-	for {
-		x := v.Args[0]
-		v.reset(Op386MOVWLSX)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValue386_OpSignExt8to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt8to16  x)
-	// cond:
-	// result: (MOVBLSX x)
-	for {
-		x := v.Args[0]
-		v.reset(Op386MOVBLSX)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValue386_OpSignExt8to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt8to32  x)
-	// cond:
-	// result: (MOVBLSX x)
-	for {
-		x := v.Args[0]
-		v.reset(Op386MOVBLSX)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValue386_OpSignmask(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Signmask x)
-	// cond:
-	// result: (SARLconst x [31])
-	for {
-		x := v.Args[0]
-		v.reset(Op386SARLconst)
-		v.AuxInt = 31
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValue386_OpSlicemask(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Slicemask <t> x)
-	// cond:
-	// result: (XORLconst [-1] (SARLconst <t> (SUBLconst <t> x [1]) [31]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v.reset(Op386XORLconst)
-		v.AuxInt = -1
-		v0 := b.NewValue0(v.Line, Op386SARLconst, t)
-		v0.AuxInt = 31
-		v1 := b.NewValue0(v.Line, Op386SUBLconst, t)
-		v1.AuxInt = 1
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValue386_OpSqrt(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sqrt x)
-	// cond:
-	// result: (SQRTSD x)
-	for {
-		x := v.Args[0]
-		v.reset(Op386SQRTSD)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValue386_OpStaticCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (StaticCall [argwid] {target} mem)
-	// cond:
-	// result: (CALLstatic [argwid] {target} mem)
-	for {
-		argwid := v.AuxInt
-		target := v.Aux
-		mem := v.Args[0]
-		v.reset(Op386CALLstatic)
-		v.AuxInt = argwid
-		v.Aux = target
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValue386_OpStore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Store [8] ptr val mem)
-	// cond: is64BitFloat(val.Type)
-	// result: (MOVSDstore ptr val mem)
-	for {
-		if v.AuxInt != 8 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is64BitFloat(val.Type)) {
-			break
-		}
-		v.reset(Op386MOVSDstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [4] ptr val mem)
-	// cond: is32BitFloat(val.Type)
-	// result: (MOVSSstore ptr val mem)
-	for {
-		if v.AuxInt != 4 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32BitFloat(val.Type)) {
-			break
-		}
-		v.reset(Op386MOVSSstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [4] ptr val mem)
-	// cond:
-	// result: (MOVLstore ptr val mem)
-	for {
-		if v.AuxInt != 4 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(Op386MOVLstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [2] ptr val mem)
-	// cond:
-	// result: (MOVWstore ptr val mem)
-	for {
-		if v.AuxInt != 2 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(Op386MOVWstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [1] ptr val mem)
-	// cond:
-	// result: (MOVBstore ptr val mem)
-	for {
-		if v.AuxInt != 1 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(Op386MOVBstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_OpSub16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub16  x y)
-	// cond:
-	// result: (SUBL  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SUBL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpSub32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub32  x y)
-	// cond:
-	// result: (SUBL  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SUBL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpSub32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub32F x y)
-	// cond:
-	// result: (SUBSS x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SUBSS)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpSub32carry(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub32carry x y)
-	// cond:
-	// result: (SUBLcarry x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SUBLcarry)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpSub32withcarry(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub32withcarry x y c)
-	// cond:
-	// result: (SBBL x y c)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		c := v.Args[2]
-		v.reset(Op386SBBL)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(c)
-		return true
-	}
-}
-func rewriteValue386_OpSub64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub64F x y)
-	// cond:
-	// result: (SUBSD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SUBSD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpSub8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub8   x y)
-	// cond:
-	// result: (SUBL  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SUBL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpSubPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SubPtr x y)
-	// cond:
-	// result: (SUBL  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386SUBL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpTrunc16to8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc16to8  x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValue386_OpTrunc32to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc32to16 x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValue386_OpTrunc32to8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc32to8  x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValue386_OpXor16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor16 x y)
-	// cond:
-	// result: (XORL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386XORL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpXor32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor32 x y)
-	// cond:
-	// result: (XORL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386XORL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpXor8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor8  x y)
-	// cond:
-	// result: (XORL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(Op386XORL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValue386_OpZero(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Zero [s] _ mem)
-	// cond: SizeAndAlign(s).Size() == 0
-	// result: mem
-	for {
-		s := v.AuxInt
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 0) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = mem.Type
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 1
-	// result: (MOVBstoreconst [0] destptr mem)
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 1) {
-			break
-		}
-		v.reset(Op386MOVBstoreconst)
-		v.AuxInt = 0
-		v.AddArg(destptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 2
-	// result: (MOVWstoreconst [0] destptr mem)
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 2) {
-			break
-		}
-		v.reset(Op386MOVWstoreconst)
-		v.AuxInt = 0
-		v.AddArg(destptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 4
-	// result: (MOVLstoreconst [0] destptr mem)
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 4) {
-			break
-		}
-		v.reset(Op386MOVLstoreconst)
-		v.AuxInt = 0
-		v.AddArg(destptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 3
-	// result: (MOVBstoreconst [makeValAndOff(0,2)] destptr 		(MOVWstoreconst [0] destptr mem))
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 3) {
-			break
-		}
-		v.reset(Op386MOVBstoreconst)
-		v.AuxInt = makeValAndOff(0, 2)
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, Op386MOVWstoreconst, TypeMem)
-		v0.AuxInt = 0
-		v0.AddArg(destptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 5
-	// result: (MOVBstoreconst [makeValAndOff(0,4)] destptr 		(MOVLstoreconst [0] destptr mem))
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 5) {
-			break
-		}
-		v.reset(Op386MOVBstoreconst)
-		v.AuxInt = makeValAndOff(0, 4)
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, Op386MOVLstoreconst, TypeMem)
-		v0.AuxInt = 0
-		v0.AddArg(destptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 6
-	// result: (MOVWstoreconst [makeValAndOff(0,4)] destptr 		(MOVLstoreconst [0] destptr mem))
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 6) {
-			break
-		}
-		v.reset(Op386MOVWstoreconst)
-		v.AuxInt = makeValAndOff(0, 4)
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, Op386MOVLstoreconst, TypeMem)
-		v0.AuxInt = 0
-		v0.AddArg(destptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 7
-	// result: (MOVLstoreconst [makeValAndOff(0,3)] destptr 		(MOVLstoreconst [0] destptr mem))
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 7) {
-			break
-		}
-		v.reset(Op386MOVLstoreconst)
-		v.AuxInt = makeValAndOff(0, 3)
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, Op386MOVLstoreconst, TypeMem)
-		v0.AuxInt = 0
-		v0.AddArg(destptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size()%4 != 0 && SizeAndAlign(s).Size() > 4
-	// result: (Zero [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%4] (ADDLconst destptr [SizeAndAlign(s).Size()%4]) 		(MOVLstoreconst [0] destptr mem))
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size()%4 != 0 && SizeAndAlign(s).Size() > 4) {
-			break
-		}
-		v.reset(OpZero)
-		v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%4
-		v0 := b.NewValue0(v.Line, Op386ADDLconst, config.fe.TypeUInt32())
-		v0.AuxInt = SizeAndAlign(s).Size() % 4
-		v0.AddArg(destptr)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, Op386MOVLstoreconst, TypeMem)
-		v1.AuxInt = 0
-		v1.AddArg(destptr)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 8
-	// result: (MOVLstoreconst [makeValAndOff(0,4)] destptr 		(MOVLstoreconst [0] destptr mem))
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 8) {
-			break
-		}
-		v.reset(Op386MOVLstoreconst)
-		v.AuxInt = makeValAndOff(0, 4)
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, Op386MOVLstoreconst, TypeMem)
-		v0.AuxInt = 0
-		v0.AddArg(destptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 12
-	// result: (MOVLstoreconst [makeValAndOff(0,8)] destptr 		(MOVLstoreconst [makeValAndOff(0,4)] destptr 			(MOVLstoreconst [0] destptr mem)))
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 12) {
-			break
-		}
-		v.reset(Op386MOVLstoreconst)
-		v.AuxInt = makeValAndOff(0, 8)
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, Op386MOVLstoreconst, TypeMem)
-		v0.AuxInt = makeValAndOff(0, 4)
-		v0.AddArg(destptr)
-		v1 := b.NewValue0(v.Line, Op386MOVLstoreconst, TypeMem)
-		v1.AuxInt = 0
-		v1.AddArg(destptr)
-		v1.AddArg(mem)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 16
-	// result: (MOVLstoreconst [makeValAndOff(0,12)] destptr 		(MOVLstoreconst [makeValAndOff(0,8)] destptr 			(MOVLstoreconst [makeValAndOff(0,4)] destptr 				(MOVLstoreconst [0] destptr mem))))
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 16) {
-			break
-		}
-		v.reset(Op386MOVLstoreconst)
-		v.AuxInt = makeValAndOff(0, 12)
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, Op386MOVLstoreconst, TypeMem)
-		v0.AuxInt = makeValAndOff(0, 8)
-		v0.AddArg(destptr)
-		v1 := b.NewValue0(v.Line, Op386MOVLstoreconst, TypeMem)
-		v1.AuxInt = makeValAndOff(0, 4)
-		v1.AddArg(destptr)
-		v2 := b.NewValue0(v.Line, Op386MOVLstoreconst, TypeMem)
-		v2.AuxInt = 0
-		v2.AddArg(destptr)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() > 16   && SizeAndAlign(s).Size() <= 4*128   && SizeAndAlign(s).Size()%4 == 0   && !config.noDuffDevice
-	// result: (DUFFZERO [1*(128-SizeAndAlign(s).Size()/4)] destptr (MOVLconst [0]) mem)
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size() <= 4*128 && SizeAndAlign(s).Size()%4 == 0 && !config.noDuffDevice) {
-			break
-		}
-		v.reset(Op386DUFFZERO)
-		v.AuxInt = 1 * (128 - SizeAndAlign(s).Size()/4)
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, Op386MOVLconst, config.fe.TypeUInt32())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: (SizeAndAlign(s).Size() > 4*128 || (config.noDuffDevice && SizeAndAlign(s).Size() > 16))   && SizeAndAlign(s).Size()%4 == 0
-	// result: (REPSTOSL destptr (MOVLconst [SizeAndAlign(s).Size()/4]) (MOVLconst [0]) mem)
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !((SizeAndAlign(s).Size() > 4*128 || (config.noDuffDevice && SizeAndAlign(s).Size() > 16)) && SizeAndAlign(s).Size()%4 == 0) {
-			break
-		}
-		v.reset(Op386REPSTOSL)
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, Op386MOVLconst, config.fe.TypeUInt32())
-		v0.AuxInt = SizeAndAlign(s).Size() / 4
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, Op386MOVLconst, config.fe.TypeUInt32())
-		v1.AuxInt = 0
-		v.AddArg(v1)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValue386_OpZeroExt16to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt16to32 x)
-	// cond:
-	// result: (MOVWLZX x)
-	for {
-		x := v.Args[0]
-		v.reset(Op386MOVWLZX)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValue386_OpZeroExt8to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt8to16  x)
-	// cond:
-	// result: (MOVBLZX x)
-	for {
-		x := v.Args[0]
-		v.reset(Op386MOVBLZX)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValue386_OpZeroExt8to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt8to32  x)
-	// cond:
-	// result: (MOVBLZX x)
-	for {
-		x := v.Args[0]
-		v.reset(Op386MOVBLZX)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValue386_OpZeromask(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Zeromask <t> x)
-	// cond:
-	// result: (XORLconst [-1] (SBBLcarrymask <t> (CMPLconst x [1])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v.reset(Op386XORLconst)
-		v.AuxInt = -1
-		v0 := b.NewValue0(v.Line, Op386SBBLcarrymask, t)
-		v1 := b.NewValue0(v.Line, Op386CMPLconst, TypeFlags)
-		v1.AuxInt = 1
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteBlock386(b *Block, config *Config) bool {
-	switch b.Kind {
-	case Block386EQ:
-		// match: (EQ (InvertFlags cmp) yes no)
-		// cond:
-		// result: (EQ cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386EQ
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (FlagEQ) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (EQ (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (EQ (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (EQ (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-	case Block386GE:
-		// match: (GE (InvertFlags cmp) yes no)
-		// cond:
-		// result: (LE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386LE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (GE (FlagEQ) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (GE (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (GE (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (GE (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (GE (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-	case Block386GT:
-		// match: (GT (InvertFlags cmp) yes no)
-		// cond:
-		// result: (LT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386LT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (GT (FlagEQ) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (GT (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (GT (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (GT (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (GT (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockIf:
-		// match: (If (SETL  cmp) yes no)
-		// cond:
-		// result: (LT  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386SETL {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386LT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (SETLE cmp) yes no)
-		// cond:
-		// result: (LE  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386SETLE {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386LE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (SETG  cmp) yes no)
-		// cond:
-		// result: (GT  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386SETG {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386GT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (SETGE cmp) yes no)
-		// cond:
-		// result: (GE  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386SETGE {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386GE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (SETEQ cmp) yes no)
-		// cond:
-		// result: (EQ  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386SETEQ {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386EQ
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (SETNE cmp) yes no)
-		// cond:
-		// result: (NE  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386SETNE {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386NE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (SETB  cmp) yes no)
-		// cond:
-		// result: (ULT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386SETB {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386ULT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (SETBE cmp) yes no)
-		// cond:
-		// result: (ULE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386SETBE {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386ULE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (SETA  cmp) yes no)
-		// cond:
-		// result: (UGT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386SETA {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386UGT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (SETAE cmp) yes no)
-		// cond:
-		// result: (UGE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386SETAE {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386UGE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (SETGF  cmp) yes no)
-		// cond:
-		// result: (UGT  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386SETGF {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386UGT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (SETGEF cmp) yes no)
-		// cond:
-		// result: (UGE  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386SETGEF {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386UGE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (SETEQF cmp) yes no)
-		// cond:
-		// result: (EQF  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386SETEQF {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386EQF
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (SETNEF cmp) yes no)
-		// cond:
-		// result: (NEF  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386SETNEF {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386NEF
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If cond yes no)
-		// cond:
-		// result: (NE (TESTB cond cond) yes no)
-		for {
-			v := b.Control
-			_ = v
-			cond := b.Control
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386NE
-			v0 := b.NewValue0(v.Line, Op386TESTB, TypeFlags)
-			v0.AddArg(cond)
-			v0.AddArg(cond)
-			b.SetControl(v0)
-			_ = yes
-			_ = no
-			return true
-		}
-	case Block386LE:
-		// match: (LE (InvertFlags cmp) yes no)
-		// cond:
-		// result: (GE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386GE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LE (FlagEQ) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LE (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LE (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LE (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (LE (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-	case Block386LT:
-		// match: (LT (InvertFlags cmp) yes no)
-		// cond:
-		// result: (GT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386GT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LT (FlagEQ) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (LT (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LT (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LT (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (LT (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-	case Block386NE:
-		// match: (NE (TESTB (SETL  cmp) (SETL  cmp)) yes no)
-		// cond:
-		// result: (LT  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386TESTB {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != Op386SETL {
-				break
-			}
-			cmp := v_0.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != Op386SETL {
-				break
-			}
-			if cmp != v_1.Args[0] {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386LT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no)
-		// cond:
-		// result: (LE  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386TESTB {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != Op386SETLE {
-				break
-			}
-			cmp := v_0.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != Op386SETLE {
-				break
-			}
-			if cmp != v_1.Args[0] {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386LE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (TESTB (SETG  cmp) (SETG  cmp)) yes no)
-		// cond:
-		// result: (GT  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386TESTB {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != Op386SETG {
-				break
-			}
-			cmp := v_0.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != Op386SETG {
-				break
-			}
-			if cmp != v_1.Args[0] {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386GT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no)
-		// cond:
-		// result: (GE  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386TESTB {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != Op386SETGE {
-				break
-			}
-			cmp := v_0.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != Op386SETGE {
-				break
-			}
-			if cmp != v_1.Args[0] {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386GE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no)
-		// cond:
-		// result: (EQ  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386TESTB {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != Op386SETEQ {
-				break
-			}
-			cmp := v_0.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != Op386SETEQ {
-				break
-			}
-			if cmp != v_1.Args[0] {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386EQ
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no)
-		// cond:
-		// result: (NE  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386TESTB {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != Op386SETNE {
-				break
-			}
-			cmp := v_0.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != Op386SETNE {
-				break
-			}
-			if cmp != v_1.Args[0] {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386NE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (TESTB (SETB  cmp) (SETB  cmp)) yes no)
-		// cond:
-		// result: (ULT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386TESTB {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != Op386SETB {
-				break
-			}
-			cmp := v_0.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != Op386SETB {
-				break
-			}
-			if cmp != v_1.Args[0] {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386ULT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no)
-		// cond:
-		// result: (ULE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386TESTB {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != Op386SETBE {
-				break
-			}
-			cmp := v_0.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != Op386SETBE {
-				break
-			}
-			if cmp != v_1.Args[0] {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386ULE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (TESTB (SETA  cmp) (SETA  cmp)) yes no)
-		// cond:
-		// result: (UGT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386TESTB {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != Op386SETA {
-				break
-			}
-			cmp := v_0.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != Op386SETA {
-				break
-			}
-			if cmp != v_1.Args[0] {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386UGT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no)
-		// cond:
-		// result: (UGE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386TESTB {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != Op386SETAE {
-				break
-			}
-			cmp := v_0.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != Op386SETAE {
-				break
-			}
-			if cmp != v_1.Args[0] {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386UGE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (TESTB (SETGF  cmp) (SETGF  cmp)) yes no)
-		// cond:
-		// result: (UGT  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386TESTB {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != Op386SETGF {
-				break
-			}
-			cmp := v_0.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != Op386SETGF {
-				break
-			}
-			if cmp != v_1.Args[0] {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386UGT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no)
-		// cond:
-		// result: (UGE  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386TESTB {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != Op386SETGEF {
-				break
-			}
-			cmp := v_0.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != Op386SETGEF {
-				break
-			}
-			if cmp != v_1.Args[0] {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386UGE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no)
-		// cond:
-		// result: (EQF  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386TESTB {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != Op386SETEQF {
-				break
-			}
-			cmp := v_0.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != Op386SETEQF {
-				break
-			}
-			if cmp != v_1.Args[0] {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386EQF
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no)
-		// cond:
-		// result: (NEF  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386TESTB {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != Op386SETNEF {
-				break
-			}
-			cmp := v_0.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != Op386SETNEF {
-				break
-			}
-			if cmp != v_1.Args[0] {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386NEF
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (InvertFlags cmp) yes no)
-		// cond:
-		// result: (NE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386NE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (FlagEQ) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (NE (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-	case Block386UGE:
-		// match: (UGE (InvertFlags cmp) yes no)
-		// cond:
-		// result: (ULE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386ULE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (UGE (FlagEQ) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (UGE (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (UGE (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (UGE (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (UGE (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-	case Block386UGT:
-		// match: (UGT (InvertFlags cmp) yes no)
-		// cond:
-		// result: (ULT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386ULT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (UGT (FlagEQ) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (UGT (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (UGT (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (UGT (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (UGT (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-	case Block386ULE:
-		// match: (ULE (InvertFlags cmp) yes no)
-		// cond:
-		// result: (UGE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386UGE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (ULE (FlagEQ) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (ULE (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (ULE (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (ULE (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (ULE (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-	case Block386ULT:
-		// match: (ULT (InvertFlags cmp) yes no)
-		// cond:
-		// result: (UGT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = Block386UGT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (ULT (FlagEQ) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (ULT (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (ULT (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (ULT (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (ULT (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != Op386FlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-	}
-	return false
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewriteAMD64.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewriteAMD64.go
deleted file mode 100644
index d016986..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewriteAMD64.go
+++ /dev/null
@@ -1,21833 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/rewriteAMD64.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/rewriteAMD64.go:1
-// autogenerated from gen/AMD64.rules: do not edit!
-// generated with: cd gen; go run *.go
-
-package ssa
-
-import "math"
-
-var _ = math.MinInt8 // in case not otherwise used
-func rewriteValueAMD64(v *Value, config *Config) bool {
-	switch v.Op {
-	case OpAMD64ADDL:
-		return rewriteValueAMD64_OpAMD64ADDL(v, config)
-	case OpAMD64ADDLconst:
-		return rewriteValueAMD64_OpAMD64ADDLconst(v, config)
-	case OpAMD64ADDQ:
-		return rewriteValueAMD64_OpAMD64ADDQ(v, config)
-	case OpAMD64ADDQconst:
-		return rewriteValueAMD64_OpAMD64ADDQconst(v, config)
-	case OpAMD64ANDL:
-		return rewriteValueAMD64_OpAMD64ANDL(v, config)
-	case OpAMD64ANDLconst:
-		return rewriteValueAMD64_OpAMD64ANDLconst(v, config)
-	case OpAMD64ANDQ:
-		return rewriteValueAMD64_OpAMD64ANDQ(v, config)
-	case OpAMD64ANDQconst:
-		return rewriteValueAMD64_OpAMD64ANDQconst(v, config)
-	case OpAMD64CMPB:
-		return rewriteValueAMD64_OpAMD64CMPB(v, config)
-	case OpAMD64CMPBconst:
-		return rewriteValueAMD64_OpAMD64CMPBconst(v, config)
-	case OpAMD64CMPL:
-		return rewriteValueAMD64_OpAMD64CMPL(v, config)
-	case OpAMD64CMPLconst:
-		return rewriteValueAMD64_OpAMD64CMPLconst(v, config)
-	case OpAMD64CMPQ:
-		return rewriteValueAMD64_OpAMD64CMPQ(v, config)
-	case OpAMD64CMPQconst:
-		return rewriteValueAMD64_OpAMD64CMPQconst(v, config)
-	case OpAMD64CMPW:
-		return rewriteValueAMD64_OpAMD64CMPW(v, config)
-	case OpAMD64CMPWconst:
-		return rewriteValueAMD64_OpAMD64CMPWconst(v, config)
-	case OpAMD64CMPXCHGLlock:
-		return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v, config)
-	case OpAMD64CMPXCHGQlock:
-		return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v, config)
-	case OpAMD64LEAL:
-		return rewriteValueAMD64_OpAMD64LEAL(v, config)
-	case OpAMD64LEAQ:
-		return rewriteValueAMD64_OpAMD64LEAQ(v, config)
-	case OpAMD64LEAQ1:
-		return rewriteValueAMD64_OpAMD64LEAQ1(v, config)
-	case OpAMD64LEAQ2:
-		return rewriteValueAMD64_OpAMD64LEAQ2(v, config)
-	case OpAMD64LEAQ4:
-		return rewriteValueAMD64_OpAMD64LEAQ4(v, config)
-	case OpAMD64LEAQ8:
-		return rewriteValueAMD64_OpAMD64LEAQ8(v, config)
-	case OpAMD64MOVBQSX:
-		return rewriteValueAMD64_OpAMD64MOVBQSX(v, config)
-	case OpAMD64MOVBQSXload:
-		return rewriteValueAMD64_OpAMD64MOVBQSXload(v, config)
-	case OpAMD64MOVBQZX:
-		return rewriteValueAMD64_OpAMD64MOVBQZX(v, config)
-	case OpAMD64MOVBload:
-		return rewriteValueAMD64_OpAMD64MOVBload(v, config)
-	case OpAMD64MOVBloadidx1:
-		return rewriteValueAMD64_OpAMD64MOVBloadidx1(v, config)
-	case OpAMD64MOVBstore:
-		return rewriteValueAMD64_OpAMD64MOVBstore(v, config)
-	case OpAMD64MOVBstoreconst:
-		return rewriteValueAMD64_OpAMD64MOVBstoreconst(v, config)
-	case OpAMD64MOVBstoreconstidx1:
-		return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v, config)
-	case OpAMD64MOVBstoreidx1:
-		return rewriteValueAMD64_OpAMD64MOVBstoreidx1(v, config)
-	case OpAMD64MOVLQSX:
-		return rewriteValueAMD64_OpAMD64MOVLQSX(v, config)
-	case OpAMD64MOVLQSXload:
-		return rewriteValueAMD64_OpAMD64MOVLQSXload(v, config)
-	case OpAMD64MOVLQZX:
-		return rewriteValueAMD64_OpAMD64MOVLQZX(v, config)
-	case OpAMD64MOVLatomicload:
-		return rewriteValueAMD64_OpAMD64MOVLatomicload(v, config)
-	case OpAMD64MOVLload:
-		return rewriteValueAMD64_OpAMD64MOVLload(v, config)
-	case OpAMD64MOVLloadidx1:
-		return rewriteValueAMD64_OpAMD64MOVLloadidx1(v, config)
-	case OpAMD64MOVLloadidx4:
-		return rewriteValueAMD64_OpAMD64MOVLloadidx4(v, config)
-	case OpAMD64MOVLstore:
-		return rewriteValueAMD64_OpAMD64MOVLstore(v, config)
-	case OpAMD64MOVLstoreconst:
-		return rewriteValueAMD64_OpAMD64MOVLstoreconst(v, config)
-	case OpAMD64MOVLstoreconstidx1:
-		return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v, config)
-	case OpAMD64MOVLstoreconstidx4:
-		return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v, config)
-	case OpAMD64MOVLstoreidx1:
-		return rewriteValueAMD64_OpAMD64MOVLstoreidx1(v, config)
-	case OpAMD64MOVLstoreidx4:
-		return rewriteValueAMD64_OpAMD64MOVLstoreidx4(v, config)
-	case OpAMD64MOVOload:
-		return rewriteValueAMD64_OpAMD64MOVOload(v, config)
-	case OpAMD64MOVOstore:
-		return rewriteValueAMD64_OpAMD64MOVOstore(v, config)
-	case OpAMD64MOVQatomicload:
-		return rewriteValueAMD64_OpAMD64MOVQatomicload(v, config)
-	case OpAMD64MOVQload:
-		return rewriteValueAMD64_OpAMD64MOVQload(v, config)
-	case OpAMD64MOVQloadidx1:
-		return rewriteValueAMD64_OpAMD64MOVQloadidx1(v, config)
-	case OpAMD64MOVQloadidx8:
-		return rewriteValueAMD64_OpAMD64MOVQloadidx8(v, config)
-	case OpAMD64MOVQstore:
-		return rewriteValueAMD64_OpAMD64MOVQstore(v, config)
-	case OpAMD64MOVQstoreconst:
-		return rewriteValueAMD64_OpAMD64MOVQstoreconst(v, config)
-	case OpAMD64MOVQstoreconstidx1:
-		return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v, config)
-	case OpAMD64MOVQstoreconstidx8:
-		return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v, config)
-	case OpAMD64MOVQstoreidx1:
-		return rewriteValueAMD64_OpAMD64MOVQstoreidx1(v, config)
-	case OpAMD64MOVQstoreidx8:
-		return rewriteValueAMD64_OpAMD64MOVQstoreidx8(v, config)
-	case OpAMD64MOVSDload:
-		return rewriteValueAMD64_OpAMD64MOVSDload(v, config)
-	case OpAMD64MOVSDloadidx1:
-		return rewriteValueAMD64_OpAMD64MOVSDloadidx1(v, config)
-	case OpAMD64MOVSDloadidx8:
-		return rewriteValueAMD64_OpAMD64MOVSDloadidx8(v, config)
-	case OpAMD64MOVSDstore:
-		return rewriteValueAMD64_OpAMD64MOVSDstore(v, config)
-	case OpAMD64MOVSDstoreidx1:
-		return rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v, config)
-	case OpAMD64MOVSDstoreidx8:
-		return rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v, config)
-	case OpAMD64MOVSSload:
-		return rewriteValueAMD64_OpAMD64MOVSSload(v, config)
-	case OpAMD64MOVSSloadidx1:
-		return rewriteValueAMD64_OpAMD64MOVSSloadidx1(v, config)
-	case OpAMD64MOVSSloadidx4:
-		return rewriteValueAMD64_OpAMD64MOVSSloadidx4(v, config)
-	case OpAMD64MOVSSstore:
-		return rewriteValueAMD64_OpAMD64MOVSSstore(v, config)
-	case OpAMD64MOVSSstoreidx1:
-		return rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v, config)
-	case OpAMD64MOVSSstoreidx4:
-		return rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v, config)
-	case OpAMD64MOVWQSX:
-		return rewriteValueAMD64_OpAMD64MOVWQSX(v, config)
-	case OpAMD64MOVWQSXload:
-		return rewriteValueAMD64_OpAMD64MOVWQSXload(v, config)
-	case OpAMD64MOVWQZX:
-		return rewriteValueAMD64_OpAMD64MOVWQZX(v, config)
-	case OpAMD64MOVWload:
-		return rewriteValueAMD64_OpAMD64MOVWload(v, config)
-	case OpAMD64MOVWloadidx1:
-		return rewriteValueAMD64_OpAMD64MOVWloadidx1(v, config)
-	case OpAMD64MOVWloadidx2:
-		return rewriteValueAMD64_OpAMD64MOVWloadidx2(v, config)
-	case OpAMD64MOVWstore:
-		return rewriteValueAMD64_OpAMD64MOVWstore(v, config)
-	case OpAMD64MOVWstoreconst:
-		return rewriteValueAMD64_OpAMD64MOVWstoreconst(v, config)
-	case OpAMD64MOVWstoreconstidx1:
-		return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v, config)
-	case OpAMD64MOVWstoreconstidx2:
-		return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v, config)
-	case OpAMD64MOVWstoreidx1:
-		return rewriteValueAMD64_OpAMD64MOVWstoreidx1(v, config)
-	case OpAMD64MOVWstoreidx2:
-		return rewriteValueAMD64_OpAMD64MOVWstoreidx2(v, config)
-	case OpAMD64MULL:
-		return rewriteValueAMD64_OpAMD64MULL(v, config)
-	case OpAMD64MULLconst:
-		return rewriteValueAMD64_OpAMD64MULLconst(v, config)
-	case OpAMD64MULQ:
-		return rewriteValueAMD64_OpAMD64MULQ(v, config)
-	case OpAMD64MULQconst:
-		return rewriteValueAMD64_OpAMD64MULQconst(v, config)
-	case OpAMD64NEGL:
-		return rewriteValueAMD64_OpAMD64NEGL(v, config)
-	case OpAMD64NEGQ:
-		return rewriteValueAMD64_OpAMD64NEGQ(v, config)
-	case OpAMD64NOTL:
-		return rewriteValueAMD64_OpAMD64NOTL(v, config)
-	case OpAMD64NOTQ:
-		return rewriteValueAMD64_OpAMD64NOTQ(v, config)
-	case OpAMD64ORL:
-		return rewriteValueAMD64_OpAMD64ORL(v, config)
-	case OpAMD64ORLconst:
-		return rewriteValueAMD64_OpAMD64ORLconst(v, config)
-	case OpAMD64ORQ:
-		return rewriteValueAMD64_OpAMD64ORQ(v, config)
-	case OpAMD64ORQconst:
-		return rewriteValueAMD64_OpAMD64ORQconst(v, config)
-	case OpAMD64ROLBconst:
-		return rewriteValueAMD64_OpAMD64ROLBconst(v, config)
-	case OpAMD64ROLLconst:
-		return rewriteValueAMD64_OpAMD64ROLLconst(v, config)
-	case OpAMD64ROLQconst:
-		return rewriteValueAMD64_OpAMD64ROLQconst(v, config)
-	case OpAMD64ROLWconst:
-		return rewriteValueAMD64_OpAMD64ROLWconst(v, config)
-	case OpAMD64SARB:
-		return rewriteValueAMD64_OpAMD64SARB(v, config)
-	case OpAMD64SARBconst:
-		return rewriteValueAMD64_OpAMD64SARBconst(v, config)
-	case OpAMD64SARL:
-		return rewriteValueAMD64_OpAMD64SARL(v, config)
-	case OpAMD64SARLconst:
-		return rewriteValueAMD64_OpAMD64SARLconst(v, config)
-	case OpAMD64SARQ:
-		return rewriteValueAMD64_OpAMD64SARQ(v, config)
-	case OpAMD64SARQconst:
-		return rewriteValueAMD64_OpAMD64SARQconst(v, config)
-	case OpAMD64SARW:
-		return rewriteValueAMD64_OpAMD64SARW(v, config)
-	case OpAMD64SARWconst:
-		return rewriteValueAMD64_OpAMD64SARWconst(v, config)
-	case OpAMD64SBBLcarrymask:
-		return rewriteValueAMD64_OpAMD64SBBLcarrymask(v, config)
-	case OpAMD64SBBQcarrymask:
-		return rewriteValueAMD64_OpAMD64SBBQcarrymask(v, config)
-	case OpAMD64SETA:
-		return rewriteValueAMD64_OpAMD64SETA(v, config)
-	case OpAMD64SETAE:
-		return rewriteValueAMD64_OpAMD64SETAE(v, config)
-	case OpAMD64SETB:
-		return rewriteValueAMD64_OpAMD64SETB(v, config)
-	case OpAMD64SETBE:
-		return rewriteValueAMD64_OpAMD64SETBE(v, config)
-	case OpAMD64SETEQ:
-		return rewriteValueAMD64_OpAMD64SETEQ(v, config)
-	case OpAMD64SETG:
-		return rewriteValueAMD64_OpAMD64SETG(v, config)
-	case OpAMD64SETGE:
-		return rewriteValueAMD64_OpAMD64SETGE(v, config)
-	case OpAMD64SETL:
-		return rewriteValueAMD64_OpAMD64SETL(v, config)
-	case OpAMD64SETLE:
-		return rewriteValueAMD64_OpAMD64SETLE(v, config)
-	case OpAMD64SETNE:
-		return rewriteValueAMD64_OpAMD64SETNE(v, config)
-	case OpAMD64SHLL:
-		return rewriteValueAMD64_OpAMD64SHLL(v, config)
-	case OpAMD64SHLQ:
-		return rewriteValueAMD64_OpAMD64SHLQ(v, config)
-	case OpAMD64SHRB:
-		return rewriteValueAMD64_OpAMD64SHRB(v, config)
-	case OpAMD64SHRL:
-		return rewriteValueAMD64_OpAMD64SHRL(v, config)
-	case OpAMD64SHRQ:
-		return rewriteValueAMD64_OpAMD64SHRQ(v, config)
-	case OpAMD64SHRW:
-		return rewriteValueAMD64_OpAMD64SHRW(v, config)
-	case OpAMD64SUBL:
-		return rewriteValueAMD64_OpAMD64SUBL(v, config)
-	case OpAMD64SUBLconst:
-		return rewriteValueAMD64_OpAMD64SUBLconst(v, config)
-	case OpAMD64SUBQ:
-		return rewriteValueAMD64_OpAMD64SUBQ(v, config)
-	case OpAMD64SUBQconst:
-		return rewriteValueAMD64_OpAMD64SUBQconst(v, config)
-	case OpAMD64XADDLlock:
-		return rewriteValueAMD64_OpAMD64XADDLlock(v, config)
-	case OpAMD64XADDQlock:
-		return rewriteValueAMD64_OpAMD64XADDQlock(v, config)
-	case OpAMD64XCHGL:
-		return rewriteValueAMD64_OpAMD64XCHGL(v, config)
-	case OpAMD64XCHGQ:
-		return rewriteValueAMD64_OpAMD64XCHGQ(v, config)
-	case OpAMD64XORL:
-		return rewriteValueAMD64_OpAMD64XORL(v, config)
-	case OpAMD64XORLconst:
-		return rewriteValueAMD64_OpAMD64XORLconst(v, config)
-	case OpAMD64XORQ:
-		return rewriteValueAMD64_OpAMD64XORQ(v, config)
-	case OpAMD64XORQconst:
-		return rewriteValueAMD64_OpAMD64XORQconst(v, config)
-	case OpAdd16:
-		return rewriteValueAMD64_OpAdd16(v, config)
-	case OpAdd32:
-		return rewriteValueAMD64_OpAdd32(v, config)
-	case OpAdd32F:
-		return rewriteValueAMD64_OpAdd32F(v, config)
-	case OpAdd64:
-		return rewriteValueAMD64_OpAdd64(v, config)
-	case OpAdd64F:
-		return rewriteValueAMD64_OpAdd64F(v, config)
-	case OpAdd8:
-		return rewriteValueAMD64_OpAdd8(v, config)
-	case OpAddPtr:
-		return rewriteValueAMD64_OpAddPtr(v, config)
-	case OpAddr:
-		return rewriteValueAMD64_OpAddr(v, config)
-	case OpAnd16:
-		return rewriteValueAMD64_OpAnd16(v, config)
-	case OpAnd32:
-		return rewriteValueAMD64_OpAnd32(v, config)
-	case OpAnd64:
-		return rewriteValueAMD64_OpAnd64(v, config)
-	case OpAnd8:
-		return rewriteValueAMD64_OpAnd8(v, config)
-	case OpAndB:
-		return rewriteValueAMD64_OpAndB(v, config)
-	case OpAtomicAdd32:
-		return rewriteValueAMD64_OpAtomicAdd32(v, config)
-	case OpAtomicAdd64:
-		return rewriteValueAMD64_OpAtomicAdd64(v, config)
-	case OpAtomicAnd8:
-		return rewriteValueAMD64_OpAtomicAnd8(v, config)
-	case OpAtomicCompareAndSwap32:
-		return rewriteValueAMD64_OpAtomicCompareAndSwap32(v, config)
-	case OpAtomicCompareAndSwap64:
-		return rewriteValueAMD64_OpAtomicCompareAndSwap64(v, config)
-	case OpAtomicExchange32:
-		return rewriteValueAMD64_OpAtomicExchange32(v, config)
-	case OpAtomicExchange64:
-		return rewriteValueAMD64_OpAtomicExchange64(v, config)
-	case OpAtomicLoad32:
-		return rewriteValueAMD64_OpAtomicLoad32(v, config)
-	case OpAtomicLoad64:
-		return rewriteValueAMD64_OpAtomicLoad64(v, config)
-	case OpAtomicLoadPtr:
-		return rewriteValueAMD64_OpAtomicLoadPtr(v, config)
-	case OpAtomicOr8:
-		return rewriteValueAMD64_OpAtomicOr8(v, config)
-	case OpAtomicStore32:
-		return rewriteValueAMD64_OpAtomicStore32(v, config)
-	case OpAtomicStore64:
-		return rewriteValueAMD64_OpAtomicStore64(v, config)
-	case OpAtomicStorePtrNoWB:
-		return rewriteValueAMD64_OpAtomicStorePtrNoWB(v, config)
-	case OpAvg64u:
-		return rewriteValueAMD64_OpAvg64u(v, config)
-	case OpBswap32:
-		return rewriteValueAMD64_OpBswap32(v, config)
-	case OpBswap64:
-		return rewriteValueAMD64_OpBswap64(v, config)
-	case OpClosureCall:
-		return rewriteValueAMD64_OpClosureCall(v, config)
-	case OpCom16:
-		return rewriteValueAMD64_OpCom16(v, config)
-	case OpCom32:
-		return rewriteValueAMD64_OpCom32(v, config)
-	case OpCom64:
-		return rewriteValueAMD64_OpCom64(v, config)
-	case OpCom8:
-		return rewriteValueAMD64_OpCom8(v, config)
-	case OpConst16:
-		return rewriteValueAMD64_OpConst16(v, config)
-	case OpConst32:
-		return rewriteValueAMD64_OpConst32(v, config)
-	case OpConst32F:
-		return rewriteValueAMD64_OpConst32F(v, config)
-	case OpConst64:
-		return rewriteValueAMD64_OpConst64(v, config)
-	case OpConst64F:
-		return rewriteValueAMD64_OpConst64F(v, config)
-	case OpConst8:
-		return rewriteValueAMD64_OpConst8(v, config)
-	case OpConstBool:
-		return rewriteValueAMD64_OpConstBool(v, config)
-	case OpConstNil:
-		return rewriteValueAMD64_OpConstNil(v, config)
-	case OpConvert:
-		return rewriteValueAMD64_OpConvert(v, config)
-	case OpCtz32:
-		return rewriteValueAMD64_OpCtz32(v, config)
-	case OpCtz64:
-		return rewriteValueAMD64_OpCtz64(v, config)
-	case OpCvt32Fto32:
-		return rewriteValueAMD64_OpCvt32Fto32(v, config)
-	case OpCvt32Fto64:
-		return rewriteValueAMD64_OpCvt32Fto64(v, config)
-	case OpCvt32Fto64F:
-		return rewriteValueAMD64_OpCvt32Fto64F(v, config)
-	case OpCvt32to32F:
-		return rewriteValueAMD64_OpCvt32to32F(v, config)
-	case OpCvt32to64F:
-		return rewriteValueAMD64_OpCvt32to64F(v, config)
-	case OpCvt64Fto32:
-		return rewriteValueAMD64_OpCvt64Fto32(v, config)
-	case OpCvt64Fto32F:
-		return rewriteValueAMD64_OpCvt64Fto32F(v, config)
-	case OpCvt64Fto64:
-		return rewriteValueAMD64_OpCvt64Fto64(v, config)
-	case OpCvt64to32F:
-		return rewriteValueAMD64_OpCvt64to32F(v, config)
-	case OpCvt64to64F:
-		return rewriteValueAMD64_OpCvt64to64F(v, config)
-	case OpDeferCall:
-		return rewriteValueAMD64_OpDeferCall(v, config)
-	case OpDiv128u:
-		return rewriteValueAMD64_OpDiv128u(v, config)
-	case OpDiv16:
-		return rewriteValueAMD64_OpDiv16(v, config)
-	case OpDiv16u:
-		return rewriteValueAMD64_OpDiv16u(v, config)
-	case OpDiv32:
-		return rewriteValueAMD64_OpDiv32(v, config)
-	case OpDiv32F:
-		return rewriteValueAMD64_OpDiv32F(v, config)
-	case OpDiv32u:
-		return rewriteValueAMD64_OpDiv32u(v, config)
-	case OpDiv64:
-		return rewriteValueAMD64_OpDiv64(v, config)
-	case OpDiv64F:
-		return rewriteValueAMD64_OpDiv64F(v, config)
-	case OpDiv64u:
-		return rewriteValueAMD64_OpDiv64u(v, config)
-	case OpDiv8:
-		return rewriteValueAMD64_OpDiv8(v, config)
-	case OpDiv8u:
-		return rewriteValueAMD64_OpDiv8u(v, config)
-	case OpEq16:
-		return rewriteValueAMD64_OpEq16(v, config)
-	case OpEq32:
-		return rewriteValueAMD64_OpEq32(v, config)
-	case OpEq32F:
-		return rewriteValueAMD64_OpEq32F(v, config)
-	case OpEq64:
-		return rewriteValueAMD64_OpEq64(v, config)
-	case OpEq64F:
-		return rewriteValueAMD64_OpEq64F(v, config)
-	case OpEq8:
-		return rewriteValueAMD64_OpEq8(v, config)
-	case OpEqB:
-		return rewriteValueAMD64_OpEqB(v, config)
-	case OpEqPtr:
-		return rewriteValueAMD64_OpEqPtr(v, config)
-	case OpGeq16:
-		return rewriteValueAMD64_OpGeq16(v, config)
-	case OpGeq16U:
-		return rewriteValueAMD64_OpGeq16U(v, config)
-	case OpGeq32:
-		return rewriteValueAMD64_OpGeq32(v, config)
-	case OpGeq32F:
-		return rewriteValueAMD64_OpGeq32F(v, config)
-	case OpGeq32U:
-		return rewriteValueAMD64_OpGeq32U(v, config)
-	case OpGeq64:
-		return rewriteValueAMD64_OpGeq64(v, config)
-	case OpGeq64F:
-		return rewriteValueAMD64_OpGeq64F(v, config)
-	case OpGeq64U:
-		return rewriteValueAMD64_OpGeq64U(v, config)
-	case OpGeq8:
-		return rewriteValueAMD64_OpGeq8(v, config)
-	case OpGeq8U:
-		return rewriteValueAMD64_OpGeq8U(v, config)
-	case OpGetClosurePtr:
-		return rewriteValueAMD64_OpGetClosurePtr(v, config)
-	case OpGetG:
-		return rewriteValueAMD64_OpGetG(v, config)
-	case OpGoCall:
-		return rewriteValueAMD64_OpGoCall(v, config)
-	case OpGreater16:
-		return rewriteValueAMD64_OpGreater16(v, config)
-	case OpGreater16U:
-		return rewriteValueAMD64_OpGreater16U(v, config)
-	case OpGreater32:
-		return rewriteValueAMD64_OpGreater32(v, config)
-	case OpGreater32F:
-		return rewriteValueAMD64_OpGreater32F(v, config)
-	case OpGreater32U:
-		return rewriteValueAMD64_OpGreater32U(v, config)
-	case OpGreater64:
-		return rewriteValueAMD64_OpGreater64(v, config)
-	case OpGreater64F:
-		return rewriteValueAMD64_OpGreater64F(v, config)
-	case OpGreater64U:
-		return rewriteValueAMD64_OpGreater64U(v, config)
-	case OpGreater8:
-		return rewriteValueAMD64_OpGreater8(v, config)
-	case OpGreater8U:
-		return rewriteValueAMD64_OpGreater8U(v, config)
-	case OpHmul16:
-		return rewriteValueAMD64_OpHmul16(v, config)
-	case OpHmul16u:
-		return rewriteValueAMD64_OpHmul16u(v, config)
-	case OpHmul32:
-		return rewriteValueAMD64_OpHmul32(v, config)
-	case OpHmul32u:
-		return rewriteValueAMD64_OpHmul32u(v, config)
-	case OpHmul64:
-		return rewriteValueAMD64_OpHmul64(v, config)
-	case OpHmul64u:
-		return rewriteValueAMD64_OpHmul64u(v, config)
-	case OpHmul8:
-		return rewriteValueAMD64_OpHmul8(v, config)
-	case OpHmul8u:
-		return rewriteValueAMD64_OpHmul8u(v, config)
-	case OpInt64Hi:
-		return rewriteValueAMD64_OpInt64Hi(v, config)
-	case OpInterCall:
-		return rewriteValueAMD64_OpInterCall(v, config)
-	case OpIsInBounds:
-		return rewriteValueAMD64_OpIsInBounds(v, config)
-	case OpIsNonNil:
-		return rewriteValueAMD64_OpIsNonNil(v, config)
-	case OpIsSliceInBounds:
-		return rewriteValueAMD64_OpIsSliceInBounds(v, config)
-	case OpLeq16:
-		return rewriteValueAMD64_OpLeq16(v, config)
-	case OpLeq16U:
-		return rewriteValueAMD64_OpLeq16U(v, config)
-	case OpLeq32:
-		return rewriteValueAMD64_OpLeq32(v, config)
-	case OpLeq32F:
-		return rewriteValueAMD64_OpLeq32F(v, config)
-	case OpLeq32U:
-		return rewriteValueAMD64_OpLeq32U(v, config)
-	case OpLeq64:
-		return rewriteValueAMD64_OpLeq64(v, config)
-	case OpLeq64F:
-		return rewriteValueAMD64_OpLeq64F(v, config)
-	case OpLeq64U:
-		return rewriteValueAMD64_OpLeq64U(v, config)
-	case OpLeq8:
-		return rewriteValueAMD64_OpLeq8(v, config)
-	case OpLeq8U:
-		return rewriteValueAMD64_OpLeq8U(v, config)
-	case OpLess16:
-		return rewriteValueAMD64_OpLess16(v, config)
-	case OpLess16U:
-		return rewriteValueAMD64_OpLess16U(v, config)
-	case OpLess32:
-		return rewriteValueAMD64_OpLess32(v, config)
-	case OpLess32F:
-		return rewriteValueAMD64_OpLess32F(v, config)
-	case OpLess32U:
-		return rewriteValueAMD64_OpLess32U(v, config)
-	case OpLess64:
-		return rewriteValueAMD64_OpLess64(v, config)
-	case OpLess64F:
-		return rewriteValueAMD64_OpLess64F(v, config)
-	case OpLess64U:
-		return rewriteValueAMD64_OpLess64U(v, config)
-	case OpLess8:
-		return rewriteValueAMD64_OpLess8(v, config)
-	case OpLess8U:
-		return rewriteValueAMD64_OpLess8U(v, config)
-	case OpLoad:
-		return rewriteValueAMD64_OpLoad(v, config)
-	case OpLrot16:
-		return rewriteValueAMD64_OpLrot16(v, config)
-	case OpLrot32:
-		return rewriteValueAMD64_OpLrot32(v, config)
-	case OpLrot64:
-		return rewriteValueAMD64_OpLrot64(v, config)
-	case OpLrot8:
-		return rewriteValueAMD64_OpLrot8(v, config)
-	case OpLsh16x16:
-		return rewriteValueAMD64_OpLsh16x16(v, config)
-	case OpLsh16x32:
-		return rewriteValueAMD64_OpLsh16x32(v, config)
-	case OpLsh16x64:
-		return rewriteValueAMD64_OpLsh16x64(v, config)
-	case OpLsh16x8:
-		return rewriteValueAMD64_OpLsh16x8(v, config)
-	case OpLsh32x16:
-		return rewriteValueAMD64_OpLsh32x16(v, config)
-	case OpLsh32x32:
-		return rewriteValueAMD64_OpLsh32x32(v, config)
-	case OpLsh32x64:
-		return rewriteValueAMD64_OpLsh32x64(v, config)
-	case OpLsh32x8:
-		return rewriteValueAMD64_OpLsh32x8(v, config)
-	case OpLsh64x16:
-		return rewriteValueAMD64_OpLsh64x16(v, config)
-	case OpLsh64x32:
-		return rewriteValueAMD64_OpLsh64x32(v, config)
-	case OpLsh64x64:
-		return rewriteValueAMD64_OpLsh64x64(v, config)
-	case OpLsh64x8:
-		return rewriteValueAMD64_OpLsh64x8(v, config)
-	case OpLsh8x16:
-		return rewriteValueAMD64_OpLsh8x16(v, config)
-	case OpLsh8x32:
-		return rewriteValueAMD64_OpLsh8x32(v, config)
-	case OpLsh8x64:
-		return rewriteValueAMD64_OpLsh8x64(v, config)
-	case OpLsh8x8:
-		return rewriteValueAMD64_OpLsh8x8(v, config)
-	case OpMod16:
-		return rewriteValueAMD64_OpMod16(v, config)
-	case OpMod16u:
-		return rewriteValueAMD64_OpMod16u(v, config)
-	case OpMod32:
-		return rewriteValueAMD64_OpMod32(v, config)
-	case OpMod32u:
-		return rewriteValueAMD64_OpMod32u(v, config)
-	case OpMod64:
-		return rewriteValueAMD64_OpMod64(v, config)
-	case OpMod64u:
-		return rewriteValueAMD64_OpMod64u(v, config)
-	case OpMod8:
-		return rewriteValueAMD64_OpMod8(v, config)
-	case OpMod8u:
-		return rewriteValueAMD64_OpMod8u(v, config)
-	case OpMove:
-		return rewriteValueAMD64_OpMove(v, config)
-	case OpMul16:
-		return rewriteValueAMD64_OpMul16(v, config)
-	case OpMul32:
-		return rewriteValueAMD64_OpMul32(v, config)
-	case OpMul32F:
-		return rewriteValueAMD64_OpMul32F(v, config)
-	case OpMul64:
-		return rewriteValueAMD64_OpMul64(v, config)
-	case OpMul64F:
-		return rewriteValueAMD64_OpMul64F(v, config)
-	case OpMul64uhilo:
-		return rewriteValueAMD64_OpMul64uhilo(v, config)
-	case OpMul8:
-		return rewriteValueAMD64_OpMul8(v, config)
-	case OpNeg16:
-		return rewriteValueAMD64_OpNeg16(v, config)
-	case OpNeg32:
-		return rewriteValueAMD64_OpNeg32(v, config)
-	case OpNeg32F:
-		return rewriteValueAMD64_OpNeg32F(v, config)
-	case OpNeg64:
-		return rewriteValueAMD64_OpNeg64(v, config)
-	case OpNeg64F:
-		return rewriteValueAMD64_OpNeg64F(v, config)
-	case OpNeg8:
-		return rewriteValueAMD64_OpNeg8(v, config)
-	case OpNeq16:
-		return rewriteValueAMD64_OpNeq16(v, config)
-	case OpNeq32:
-		return rewriteValueAMD64_OpNeq32(v, config)
-	case OpNeq32F:
-		return rewriteValueAMD64_OpNeq32F(v, config)
-	case OpNeq64:
-		return rewriteValueAMD64_OpNeq64(v, config)
-	case OpNeq64F:
-		return rewriteValueAMD64_OpNeq64F(v, config)
-	case OpNeq8:
-		return rewriteValueAMD64_OpNeq8(v, config)
-	case OpNeqB:
-		return rewriteValueAMD64_OpNeqB(v, config)
-	case OpNeqPtr:
-		return rewriteValueAMD64_OpNeqPtr(v, config)
-	case OpNilCheck:
-		return rewriteValueAMD64_OpNilCheck(v, config)
-	case OpNot:
-		return rewriteValueAMD64_OpNot(v, config)
-	case OpOffPtr:
-		return rewriteValueAMD64_OpOffPtr(v, config)
-	case OpOr16:
-		return rewriteValueAMD64_OpOr16(v, config)
-	case OpOr32:
-		return rewriteValueAMD64_OpOr32(v, config)
-	case OpOr64:
-		return rewriteValueAMD64_OpOr64(v, config)
-	case OpOr8:
-		return rewriteValueAMD64_OpOr8(v, config)
-	case OpOrB:
-		return rewriteValueAMD64_OpOrB(v, config)
-	case OpRsh16Ux16:
-		return rewriteValueAMD64_OpRsh16Ux16(v, config)
-	case OpRsh16Ux32:
-		return rewriteValueAMD64_OpRsh16Ux32(v, config)
-	case OpRsh16Ux64:
-		return rewriteValueAMD64_OpRsh16Ux64(v, config)
-	case OpRsh16Ux8:
-		return rewriteValueAMD64_OpRsh16Ux8(v, config)
-	case OpRsh16x16:
-		return rewriteValueAMD64_OpRsh16x16(v, config)
-	case OpRsh16x32:
-		return rewriteValueAMD64_OpRsh16x32(v, config)
-	case OpRsh16x64:
-		return rewriteValueAMD64_OpRsh16x64(v, config)
-	case OpRsh16x8:
-		return rewriteValueAMD64_OpRsh16x8(v, config)
-	case OpRsh32Ux16:
-		return rewriteValueAMD64_OpRsh32Ux16(v, config)
-	case OpRsh32Ux32:
-		return rewriteValueAMD64_OpRsh32Ux32(v, config)
-	case OpRsh32Ux64:
-		return rewriteValueAMD64_OpRsh32Ux64(v, config)
-	case OpRsh32Ux8:
-		return rewriteValueAMD64_OpRsh32Ux8(v, config)
-	case OpRsh32x16:
-		return rewriteValueAMD64_OpRsh32x16(v, config)
-	case OpRsh32x32:
-		return rewriteValueAMD64_OpRsh32x32(v, config)
-	case OpRsh32x64:
-		return rewriteValueAMD64_OpRsh32x64(v, config)
-	case OpRsh32x8:
-		return rewriteValueAMD64_OpRsh32x8(v, config)
-	case OpRsh64Ux16:
-		return rewriteValueAMD64_OpRsh64Ux16(v, config)
-	case OpRsh64Ux32:
-		return rewriteValueAMD64_OpRsh64Ux32(v, config)
-	case OpRsh64Ux64:
-		return rewriteValueAMD64_OpRsh64Ux64(v, config)
-	case OpRsh64Ux8:
-		return rewriteValueAMD64_OpRsh64Ux8(v, config)
-	case OpRsh64x16:
-		return rewriteValueAMD64_OpRsh64x16(v, config)
-	case OpRsh64x32:
-		return rewriteValueAMD64_OpRsh64x32(v, config)
-	case OpRsh64x64:
-		return rewriteValueAMD64_OpRsh64x64(v, config)
-	case OpRsh64x8:
-		return rewriteValueAMD64_OpRsh64x8(v, config)
-	case OpRsh8Ux16:
-		return rewriteValueAMD64_OpRsh8Ux16(v, config)
-	case OpRsh8Ux32:
-		return rewriteValueAMD64_OpRsh8Ux32(v, config)
-	case OpRsh8Ux64:
-		return rewriteValueAMD64_OpRsh8Ux64(v, config)
-	case OpRsh8Ux8:
-		return rewriteValueAMD64_OpRsh8Ux8(v, config)
-	case OpRsh8x16:
-		return rewriteValueAMD64_OpRsh8x16(v, config)
-	case OpRsh8x32:
-		return rewriteValueAMD64_OpRsh8x32(v, config)
-	case OpRsh8x64:
-		return rewriteValueAMD64_OpRsh8x64(v, config)
-	case OpRsh8x8:
-		return rewriteValueAMD64_OpRsh8x8(v, config)
-	case OpSelect0:
-		return rewriteValueAMD64_OpSelect0(v, config)
-	case OpSelect1:
-		return rewriteValueAMD64_OpSelect1(v, config)
-	case OpSignExt16to32:
-		return rewriteValueAMD64_OpSignExt16to32(v, config)
-	case OpSignExt16to64:
-		return rewriteValueAMD64_OpSignExt16to64(v, config)
-	case OpSignExt32to64:
-		return rewriteValueAMD64_OpSignExt32to64(v, config)
-	case OpSignExt8to16:
-		return rewriteValueAMD64_OpSignExt8to16(v, config)
-	case OpSignExt8to32:
-		return rewriteValueAMD64_OpSignExt8to32(v, config)
-	case OpSignExt8to64:
-		return rewriteValueAMD64_OpSignExt8to64(v, config)
-	case OpSlicemask:
-		return rewriteValueAMD64_OpSlicemask(v, config)
-	case OpSqrt:
-		return rewriteValueAMD64_OpSqrt(v, config)
-	case OpStaticCall:
-		return rewriteValueAMD64_OpStaticCall(v, config)
-	case OpStore:
-		return rewriteValueAMD64_OpStore(v, config)
-	case OpSub16:
-		return rewriteValueAMD64_OpSub16(v, config)
-	case OpSub32:
-		return rewriteValueAMD64_OpSub32(v, config)
-	case OpSub32F:
-		return rewriteValueAMD64_OpSub32F(v, config)
-	case OpSub64:
-		return rewriteValueAMD64_OpSub64(v, config)
-	case OpSub64F:
-		return rewriteValueAMD64_OpSub64F(v, config)
-	case OpSub8:
-		return rewriteValueAMD64_OpSub8(v, config)
-	case OpSubPtr:
-		return rewriteValueAMD64_OpSubPtr(v, config)
-	case OpTrunc16to8:
-		return rewriteValueAMD64_OpTrunc16to8(v, config)
-	case OpTrunc32to16:
-		return rewriteValueAMD64_OpTrunc32to16(v, config)
-	case OpTrunc32to8:
-		return rewriteValueAMD64_OpTrunc32to8(v, config)
-	case OpTrunc64to16:
-		return rewriteValueAMD64_OpTrunc64to16(v, config)
-	case OpTrunc64to32:
-		return rewriteValueAMD64_OpTrunc64to32(v, config)
-	case OpTrunc64to8:
-		return rewriteValueAMD64_OpTrunc64to8(v, config)
-	case OpXor16:
-		return rewriteValueAMD64_OpXor16(v, config)
-	case OpXor32:
-		return rewriteValueAMD64_OpXor32(v, config)
-	case OpXor64:
-		return rewriteValueAMD64_OpXor64(v, config)
-	case OpXor8:
-		return rewriteValueAMD64_OpXor8(v, config)
-	case OpZero:
-		return rewriteValueAMD64_OpZero(v, config)
-	case OpZeroExt16to32:
-		return rewriteValueAMD64_OpZeroExt16to32(v, config)
-	case OpZeroExt16to64:
-		return rewriteValueAMD64_OpZeroExt16to64(v, config)
-	case OpZeroExt32to64:
-		return rewriteValueAMD64_OpZeroExt32to64(v, config)
-	case OpZeroExt8to16:
-		return rewriteValueAMD64_OpZeroExt8to16(v, config)
-	case OpZeroExt8to32:
-		return rewriteValueAMD64_OpZeroExt8to32(v, config)
-	case OpZeroExt8to64:
-		return rewriteValueAMD64_OpZeroExt8to64(v, config)
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64ADDL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDL x (MOVLconst [c]))
-	// cond:
-	// result: (ADDLconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpAMD64ADDLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDL (MOVLconst [c]) x)
-	// cond:
-	// result: (ADDLconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpAMD64ADDLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDL x (NEGL y))
-	// cond:
-	// result: (SUBL x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64NEGL {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpAMD64SUBL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64ADDLconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDLconst [c] x)
-	// cond: int32(c)==0
-	// result: x
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(int32(c) == 0) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDLconst [c] (MOVLconst [d]))
-	// cond:
-	// result: (MOVLconst [int64(int32(c+d))])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = int64(int32(c + d))
-		return true
-	}
-	// match: (ADDLconst [c] (ADDLconst [d] x))
-	// cond:
-	// result: (ADDLconst [int64(int32(c+d))] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDLconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpAMD64ADDLconst)
-		v.AuxInt = int64(int32(c + d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDLconst [c] (LEAL [d] {s} x))
-	// cond: is32Bit(c+d)
-	// result: (LEAL [c+d] {s} x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAL {
-			break
-		}
-		d := v_0.AuxInt
-		s := v_0.Aux
-		x := v_0.Args[0]
-		if !(is32Bit(c + d)) {
-			break
-		}
-		v.reset(OpAMD64LEAL)
-		v.AuxInt = c + d
-		v.Aux = s
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64ADDQ(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDQ x (MOVQconst [c]))
-	// cond: is32Bit(c)
-	// result: (ADDQconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVQconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpAMD64ADDQconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDQ (MOVQconst [c]) x)
-	// cond: is32Bit(c)
-	// result: (ADDQconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVQconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpAMD64ADDQconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDQ x (SHLQconst [3] y))
-	// cond:
-	// result: (LEAQ8 x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64SHLQconst {
-			break
-		}
-		if v_1.AuxInt != 3 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpAMD64LEAQ8)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDQ x (SHLQconst [2] y))
-	// cond:
-	// result: (LEAQ4 x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64SHLQconst {
-			break
-		}
-		if v_1.AuxInt != 2 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpAMD64LEAQ4)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDQ x (SHLQconst [1] y))
-	// cond:
-	// result: (LEAQ2 x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64SHLQconst {
-			break
-		}
-		if v_1.AuxInt != 1 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpAMD64LEAQ2)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDQ x (ADDQ y y))
-	// cond:
-	// result: (LEAQ2 x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQ {
-			break
-		}
-		y := v_1.Args[0]
-		if y != v_1.Args[1] {
-			break
-		}
-		v.reset(OpAMD64LEAQ2)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDQ x (ADDQ x y))
-	// cond:
-	// result: (LEAQ2 y x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQ {
-			break
-		}
-		if x != v_1.Args[0] {
-			break
-		}
-		y := v_1.Args[1]
-		v.reset(OpAMD64LEAQ2)
-		v.AddArg(y)
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDQ x (ADDQ y x))
-	// cond:
-	// result: (LEAQ2 y x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQ {
-			break
-		}
-		y := v_1.Args[0]
-		if x != v_1.Args[1] {
-			break
-		}
-		v.reset(OpAMD64LEAQ2)
-		v.AddArg(y)
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDQ (ADDQconst [c] x) y)
-	// cond:
-	// result: (LEAQ1 [c] x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v_0.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64LEAQ1)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDQ x (ADDQconst [c] y))
-	// cond:
-	// result: (LEAQ1 [c] x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpAMD64LEAQ1)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDQ x (LEAQ [c] {s} y))
-	// cond: x.Op != OpSB && y.Op != OpSB
-	// result: (LEAQ1 [c] {s} x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64LEAQ {
-			break
-		}
-		c := v_1.AuxInt
-		s := v_1.Aux
-		y := v_1.Args[0]
-		if !(x.Op != OpSB && y.Op != OpSB) {
-			break
-		}
-		v.reset(OpAMD64LEAQ1)
-		v.AuxInt = c
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDQ (LEAQ [c] {s} x) y)
-	// cond: x.Op != OpSB && y.Op != OpSB
-	// result: (LEAQ1 [c] {s} x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ {
-			break
-		}
-		c := v_0.AuxInt
-		s := v_0.Aux
-		x := v_0.Args[0]
-		y := v.Args[1]
-		if !(x.Op != OpSB && y.Op != OpSB) {
-			break
-		}
-		v.reset(OpAMD64LEAQ1)
-		v.AuxInt = c
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDQ x (NEGQ y))
-	// cond:
-	// result: (SUBQ x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64NEGQ {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpAMD64SUBQ)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64ADDQconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDQconst [c] (ADDQ x y))
-	// cond:
-	// result: (LEAQ1 [c] x y)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQ {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpAMD64LEAQ1)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDQconst [c] (LEAQ [d] {s} x))
-	// cond: is32Bit(c+d)
-	// result: (LEAQ [c+d] {s} x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ {
-			break
-		}
-		d := v_0.AuxInt
-		s := v_0.Aux
-		x := v_0.Args[0]
-		if !(is32Bit(c + d)) {
-			break
-		}
-		v.reset(OpAMD64LEAQ)
-		v.AuxInt = c + d
-		v.Aux = s
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDQconst [c] (LEAQ1 [d] {s} x y))
-	// cond: is32Bit(c+d)
-	// result: (LEAQ1 [c+d] {s} x y)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ1 {
-			break
-		}
-		d := v_0.AuxInt
-		s := v_0.Aux
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if !(is32Bit(c + d)) {
-			break
-		}
-		v.reset(OpAMD64LEAQ1)
-		v.AuxInt = c + d
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDQconst [c] (LEAQ2 [d] {s} x y))
-	// cond: is32Bit(c+d)
-	// result: (LEAQ2 [c+d] {s} x y)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ2 {
-			break
-		}
-		d := v_0.AuxInt
-		s := v_0.Aux
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if !(is32Bit(c + d)) {
-			break
-		}
-		v.reset(OpAMD64LEAQ2)
-		v.AuxInt = c + d
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDQconst [c] (LEAQ4 [d] {s} x y))
-	// cond: is32Bit(c+d)
-	// result: (LEAQ4 [c+d] {s} x y)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ4 {
-			break
-		}
-		d := v_0.AuxInt
-		s := v_0.Aux
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if !(is32Bit(c + d)) {
-			break
-		}
-		v.reset(OpAMD64LEAQ4)
-		v.AuxInt = c + d
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDQconst [c] (LEAQ8 [d] {s} x y))
-	// cond: is32Bit(c+d)
-	// result: (LEAQ8 [c+d] {s} x y)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ8 {
-			break
-		}
-		d := v_0.AuxInt
-		s := v_0.Aux
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if !(is32Bit(c + d)) {
-			break
-		}
-		v.reset(OpAMD64LEAQ8)
-		v.AuxInt = c + d
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDQconst [0] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDQconst [c] (MOVQconst [d]))
-	// cond:
-	// result: (MOVQconst [c+d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVQconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpAMD64MOVQconst)
-		v.AuxInt = c + d
-		return true
-	}
-	// match: (ADDQconst [c] (ADDQconst [d] x))
-	// cond: is32Bit(c+d)
-	// result: (ADDQconst [c+d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		if !(is32Bit(c + d)) {
-			break
-		}
-		v.reset(OpAMD64ADDQconst)
-		v.AuxInt = c + d
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64ANDL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ANDL x (MOVLconst [c]))
-	// cond:
-	// result: (ANDLconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpAMD64ANDLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDL (MOVLconst [c]) x)
-	// cond:
-	// result: (ANDLconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpAMD64ANDLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDL x x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64ANDLconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ANDLconst [c] (ANDLconst [d] x))
-	// cond:
-	// result: (ANDLconst [c & d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ANDLconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpAMD64ANDLconst)
-		v.AuxInt = c & d
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDLconst [0xFF] x)
-	// cond:
-	// result: (MOVBQZX x)
-	for {
-		if v.AuxInt != 0xFF {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpAMD64MOVBQZX)
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDLconst [0xFFFF] x)
-	// cond:
-	// result: (MOVWQZX x)
-	for {
-		if v.AuxInt != 0xFFFF {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpAMD64MOVWQZX)
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDLconst [c] _)
-	// cond: int32(c)==0
-	// result: (MOVLconst [0])
-	for {
-		c := v.AuxInt
-		if !(int32(c) == 0) {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (ANDLconst [c] x)
-	// cond: int32(c)==-1
-	// result: x
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(int32(c) == -1) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDLconst [c] (MOVLconst [d]))
-	// cond:
-	// result: (MOVLconst [c&d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = c & d
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64ANDQ(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ANDQ x (MOVQconst [c]))
-	// cond: is32Bit(c)
-	// result: (ANDQconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVQconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpAMD64ANDQconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDQ (MOVQconst [c]) x)
-	// cond: is32Bit(c)
-	// result: (ANDQconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVQconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpAMD64ANDQconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDQ x x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64ANDQconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ANDQconst [c] (ANDQconst [d] x))
-	// cond:
-	// result: (ANDQconst [c & d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ANDQconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpAMD64ANDQconst)
-		v.AuxInt = c & d
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDQconst [0xFF] x)
-	// cond:
-	// result: (MOVBQZX x)
-	for {
-		if v.AuxInt != 0xFF {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpAMD64MOVBQZX)
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDQconst [0xFFFF] x)
-	// cond:
-	// result: (MOVWQZX x)
-	for {
-		if v.AuxInt != 0xFFFF {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpAMD64MOVWQZX)
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDQconst [0xFFFFFFFF] x)
-	// cond:
-	// result: (MOVLQZX x)
-	for {
-		if v.AuxInt != 0xFFFFFFFF {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpAMD64MOVLQZX)
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDQconst [0] _)
-	// cond:
-	// result: (MOVQconst [0])
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		v.reset(OpAMD64MOVQconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (ANDQconst [-1] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != -1 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDQconst [c] (MOVQconst [d]))
-	// cond:
-	// result: (MOVQconst [c&d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVQconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpAMD64MOVQconst)
-		v.AuxInt = c & d
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64CMPB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPB x (MOVLconst [c]))
-	// cond:
-	// result: (CMPBconst x [int64(int8(c))])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpAMD64CMPBconst)
-		v.AuxInt = int64(int8(c))
-		v.AddArg(x)
-		return true
-	}
-	// match: (CMPB (MOVLconst [c]) x)
-	// cond:
-	// result: (InvertFlags (CMPBconst x [int64(int8(c))]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpAMD64InvertFlags)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
-		v0.AuxInt = int64(int8(c))
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPBconst (MOVLconst [x]) [y])
-	// cond: int8(x)==int8(y)
-	// result: (FlagEQ)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int8(x) == int8(y)) {
-			break
-		}
-		v.reset(OpAMD64FlagEQ)
-		return true
-	}
-	// match: (CMPBconst (MOVLconst [x]) [y])
-	// cond: int8(x)<int8(y) && uint8(x)<uint8(y)
-	// result: (FlagLT_ULT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int8(x) < int8(y) && uint8(x) < uint8(y)) {
-			break
-		}
-		v.reset(OpAMD64FlagLT_ULT)
-		return true
-	}
-	// match: (CMPBconst (MOVLconst [x]) [y])
-	// cond: int8(x)<int8(y) && uint8(x)>uint8(y)
-	// result: (FlagLT_UGT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int8(x) < int8(y) && uint8(x) > uint8(y)) {
-			break
-		}
-		v.reset(OpAMD64FlagLT_UGT)
-		return true
-	}
-	// match: (CMPBconst (MOVLconst [x]) [y])
-	// cond: int8(x)>int8(y) && uint8(x)<uint8(y)
-	// result: (FlagGT_ULT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int8(x) > int8(y) && uint8(x) < uint8(y)) {
-			break
-		}
-		v.reset(OpAMD64FlagGT_ULT)
-		return true
-	}
-	// match: (CMPBconst (MOVLconst [x]) [y])
-	// cond: int8(x)>int8(y) && uint8(x)>uint8(y)
-	// result: (FlagGT_UGT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int8(x) > int8(y) && uint8(x) > uint8(y)) {
-			break
-		}
-		v.reset(OpAMD64FlagGT_UGT)
-		return true
-	}
-	// match: (CMPBconst (ANDLconst _ [m]) [n])
-	// cond: 0 <= int8(m) && int8(m) < int8(n)
-	// result: (FlagLT_ULT)
-	for {
-		n := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ANDLconst {
-			break
-		}
-		m := v_0.AuxInt
-		if !(0 <= int8(m) && int8(m) < int8(n)) {
-			break
-		}
-		v.reset(OpAMD64FlagLT_ULT)
-		return true
-	}
-	// match: (CMPBconst (ANDL x y) [0])
-	// cond:
-	// result: (TESTB x y)
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ANDL {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpAMD64TESTB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (CMPBconst (ANDLconst [c] x) [0])
-	// cond:
-	// result: (TESTBconst [int64(int8(c))] x)
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ANDLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpAMD64TESTBconst)
-		v.AuxInt = int64(int8(c))
-		v.AddArg(x)
-		return true
-	}
-	// match: (CMPBconst x [0])
-	// cond:
-	// result: (TESTB x x)
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpAMD64TESTB)
-		v.AddArg(x)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64CMPL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPL x (MOVLconst [c]))
-	// cond:
-	// result: (CMPLconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpAMD64CMPLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (CMPL (MOVLconst [c]) x)
-	// cond:
-	// result: (InvertFlags (CMPLconst x [c]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpAMD64InvertFlags)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
-		v0.AuxInt = c
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPLconst (MOVLconst [x]) [y])
-	// cond: int32(x)==int32(y)
-	// result: (FlagEQ)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int32(x) == int32(y)) {
-			break
-		}
-		v.reset(OpAMD64FlagEQ)
-		return true
-	}
-	// match: (CMPLconst (MOVLconst [x]) [y])
-	// cond: int32(x)<int32(y) && uint32(x)<uint32(y)
-	// result: (FlagLT_ULT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
-			break
-		}
-		v.reset(OpAMD64FlagLT_ULT)
-		return true
-	}
-	// match: (CMPLconst (MOVLconst [x]) [y])
-	// cond: int32(x)<int32(y) && uint32(x)>uint32(y)
-	// result: (FlagLT_UGT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
-			break
-		}
-		v.reset(OpAMD64FlagLT_UGT)
-		return true
-	}
-	// match: (CMPLconst (MOVLconst [x]) [y])
-	// cond: int32(x)>int32(y) && uint32(x)<uint32(y)
-	// result: (FlagGT_ULT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
-			break
-		}
-		v.reset(OpAMD64FlagGT_ULT)
-		return true
-	}
-	// match: (CMPLconst (MOVLconst [x]) [y])
-	// cond: int32(x)>int32(y) && uint32(x)>uint32(y)
-	// result: (FlagGT_UGT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
-			break
-		}
-		v.reset(OpAMD64FlagGT_UGT)
-		return true
-	}
-	// match: (CMPLconst (SHRLconst _ [c]) [n])
-	// cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)
-	// result: (FlagLT_ULT)
-	for {
-		n := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64SHRLconst {
-			break
-		}
-		c := v_0.AuxInt
-		if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
-			break
-		}
-		v.reset(OpAMD64FlagLT_ULT)
-		return true
-	}
-	// match: (CMPLconst (ANDLconst _ [m]) [n])
-	// cond: 0 <= int32(m) && int32(m) < int32(n)
-	// result: (FlagLT_ULT)
-	for {
-		n := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ANDLconst {
-			break
-		}
-		m := v_0.AuxInt
-		if !(0 <= int32(m) && int32(m) < int32(n)) {
-			break
-		}
-		v.reset(OpAMD64FlagLT_ULT)
-		return true
-	}
-	// match: (CMPLconst (ANDL x y) [0])
-	// cond:
-	// result: (TESTL x y)
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ANDL {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpAMD64TESTL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (CMPLconst (ANDLconst [c] x) [0])
-	// cond:
-	// result: (TESTLconst [c] x)
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ANDLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpAMD64TESTLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (CMPLconst x [0])
-	// cond:
-	// result: (TESTL x x)
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpAMD64TESTL)
-		v.AddArg(x)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64CMPQ(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPQ x (MOVQconst [c]))
-	// cond: is32Bit(c)
-	// result: (CMPQconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVQconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpAMD64CMPQconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (CMPQ (MOVQconst [c]) x)
-	// cond: is32Bit(c)
-	// result: (InvertFlags (CMPQconst x [c]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVQconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpAMD64InvertFlags)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
-		v0.AuxInt = c
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPQconst (MOVQconst [x]) [y])
-	// cond: x==y
-	// result: (FlagEQ)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVQconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(x == y) {
-			break
-		}
-		v.reset(OpAMD64FlagEQ)
-		return true
-	}
-	// match: (CMPQconst (MOVQconst [x]) [y])
-	// cond: x<y && uint64(x)<uint64(y)
-	// result: (FlagLT_ULT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVQconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(x < y && uint64(x) < uint64(y)) {
-			break
-		}
-		v.reset(OpAMD64FlagLT_ULT)
-		return true
-	}
-	// match: (CMPQconst (MOVQconst [x]) [y])
-	// cond: x<y && uint64(x)>uint64(y)
-	// result: (FlagLT_UGT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVQconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(x < y && uint64(x) > uint64(y)) {
-			break
-		}
-		v.reset(OpAMD64FlagLT_UGT)
-		return true
-	}
-	// match: (CMPQconst (MOVQconst [x]) [y])
-	// cond: x>y && uint64(x)<uint64(y)
-	// result: (FlagGT_ULT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVQconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(x > y && uint64(x) < uint64(y)) {
-			break
-		}
-		v.reset(OpAMD64FlagGT_ULT)
-		return true
-	}
-	// match: (CMPQconst (MOVQconst [x]) [y])
-	// cond: x>y && uint64(x)>uint64(y)
-	// result: (FlagGT_UGT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVQconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(x > y && uint64(x) > uint64(y)) {
-			break
-		}
-		v.reset(OpAMD64FlagGT_UGT)
-		return true
-	}
-	// match: (CMPQconst (MOVBQZX _) [c])
-	// cond: 0xFF < c
-	// result: (FlagLT_ULT)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVBQZX {
-			break
-		}
-		if !(0xFF < c) {
-			break
-		}
-		v.reset(OpAMD64FlagLT_ULT)
-		return true
-	}
-	// match: (CMPQconst (MOVWQZX _) [c])
-	// cond: 0xFFFF < c
-	// result: (FlagLT_ULT)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVWQZX {
-			break
-		}
-		if !(0xFFFF < c) {
-			break
-		}
-		v.reset(OpAMD64FlagLT_ULT)
-		return true
-	}
-	// match: (CMPQconst (MOVLQZX _) [c])
-	// cond: 0xFFFFFFFF < c
-	// result: (FlagLT_ULT)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLQZX {
-			break
-		}
-		if !(0xFFFFFFFF < c) {
-			break
-		}
-		v.reset(OpAMD64FlagLT_ULT)
-		return true
-	}
-	// match: (CMPQconst (SHRQconst _ [c]) [n])
-	// cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)
-	// result: (FlagLT_ULT)
-	for {
-		n := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64SHRQconst {
-			break
-		}
-		c := v_0.AuxInt
-		if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
-			break
-		}
-		v.reset(OpAMD64FlagLT_ULT)
-		return true
-	}
-	// match: (CMPQconst (ANDQconst _ [m]) [n])
-	// cond: 0 <= m && m < n
-	// result: (FlagLT_ULT)
-	for {
-		n := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ANDQconst {
-			break
-		}
-		m := v_0.AuxInt
-		if !(0 <= m && m < n) {
-			break
-		}
-		v.reset(OpAMD64FlagLT_ULT)
-		return true
-	}
-	// match: (CMPQconst (ANDQ x y) [0])
-	// cond:
-	// result: (TESTQ x y)
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ANDQ {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpAMD64TESTQ)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (CMPQconst (ANDQconst [c] x) [0])
-	// cond:
-	// result: (TESTQconst [c] x)
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ANDQconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpAMD64TESTQconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (CMPQconst x [0])
-	// cond:
-	// result: (TESTQ x x)
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpAMD64TESTQ)
-		v.AddArg(x)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64CMPW(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPW x (MOVLconst [c]))
-	// cond:
-	// result: (CMPWconst x [int64(int16(c))])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpAMD64CMPWconst)
-		v.AuxInt = int64(int16(c))
-		v.AddArg(x)
-		return true
-	}
-	// match: (CMPW (MOVLconst [c]) x)
-	// cond:
-	// result: (InvertFlags (CMPWconst x [int64(int16(c))]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpAMD64InvertFlags)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
-		v0.AuxInt = int64(int16(c))
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPWconst (MOVLconst [x]) [y])
-	// cond: int16(x)==int16(y)
-	// result: (FlagEQ)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int16(x) == int16(y)) {
-			break
-		}
-		v.reset(OpAMD64FlagEQ)
-		return true
-	}
-	// match: (CMPWconst (MOVLconst [x]) [y])
-	// cond: int16(x)<int16(y) && uint16(x)<uint16(y)
-	// result: (FlagLT_ULT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int16(x) < int16(y) && uint16(x) < uint16(y)) {
-			break
-		}
-		v.reset(OpAMD64FlagLT_ULT)
-		return true
-	}
-	// match: (CMPWconst (MOVLconst [x]) [y])
-	// cond: int16(x)<int16(y) && uint16(x)>uint16(y)
-	// result: (FlagLT_UGT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int16(x) < int16(y) && uint16(x) > uint16(y)) {
-			break
-		}
-		v.reset(OpAMD64FlagLT_UGT)
-		return true
-	}
-	// match: (CMPWconst (MOVLconst [x]) [y])
-	// cond: int16(x)>int16(y) && uint16(x)<uint16(y)
-	// result: (FlagGT_ULT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int16(x) > int16(y) && uint16(x) < uint16(y)) {
-			break
-		}
-		v.reset(OpAMD64FlagGT_ULT)
-		return true
-	}
-	// match: (CMPWconst (MOVLconst [x]) [y])
-	// cond: int16(x)>int16(y) && uint16(x)>uint16(y)
-	// result: (FlagGT_UGT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int16(x) > int16(y) && uint16(x) > uint16(y)) {
-			break
-		}
-		v.reset(OpAMD64FlagGT_UGT)
-		return true
-	}
-	// match: (CMPWconst (ANDLconst _ [m]) [n])
-	// cond: 0 <= int16(m) && int16(m) < int16(n)
-	// result: (FlagLT_ULT)
-	for {
-		n := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ANDLconst {
-			break
-		}
-		m := v_0.AuxInt
-		if !(0 <= int16(m) && int16(m) < int16(n)) {
-			break
-		}
-		v.reset(OpAMD64FlagLT_ULT)
-		return true
-	}
-	// match: (CMPWconst (ANDL x y) [0])
-	// cond:
-	// result: (TESTW x y)
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ANDL {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpAMD64TESTW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (CMPWconst (ANDLconst [c] x) [0])
-	// cond:
-	// result: (TESTWconst [int64(int16(c))] x)
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ANDLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpAMD64TESTWconst)
-		v.AuxInt = int64(int16(c))
-		v.AddArg(x)
-		return true
-	}
-	// match: (CMPWconst x [0])
-	// cond:
-	// result: (TESTW x x)
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpAMD64TESTW)
-		v.AddArg(x)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem)
-	// cond: is32Bit(off1+off2)
-	// result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		old := v.Args[1]
-		new_ := v.Args[2]
-		mem := v.Args[3]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpAMD64CMPXCHGLlock)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(old)
-		v.AddArg(new_)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem)
-	// cond: is32Bit(off1+off2)
-	// result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		old := v.Args[1]
-		new_ := v.Args[2]
-		mem := v.Args[3]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpAMD64CMPXCHGQlock)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(old)
-		v.AddArg(new_)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64LEAL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (LEAL [c] {s} (ADDLconst [d] x))
-	// cond: is32Bit(c+d)
-	// result: (LEAL [c+d] {s} x)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDLconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		if !(is32Bit(c + d)) {
-			break
-		}
-		v.reset(OpAMD64LEAL)
-		v.AuxInt = c + d
-		v.Aux = s
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64LEAQ(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (LEAQ [c] {s} (ADDQconst [d] x))
-	// cond: is32Bit(c+d)
-	// result: (LEAQ [c+d] {s} x)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		if !(is32Bit(c + d)) {
-			break
-		}
-		v.reset(OpAMD64LEAQ)
-		v.AuxInt = c + d
-		v.Aux = s
-		v.AddArg(x)
-		return true
-	}
-	// match: (LEAQ [c] {s} (ADDQ x y))
-	// cond: x.Op != OpSB && y.Op != OpSB
-	// result: (LEAQ1 [c] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQ {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if !(x.Op != OpSB && y.Op != OpSB) {
-			break
-		}
-		v.reset(OpAMD64LEAQ1)
-		v.AuxInt = c
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x))
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		x := v_0.Args[0]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64LEAQ)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(x)
-		return true
-	}
-	// match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y))
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ1 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64LEAQ1)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y))
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ2 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64LEAQ2)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y))
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ4 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64LEAQ4)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y))
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ8 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64LEAQ8)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64LEAQ1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (LEAQ1 [c] {s} (ADDQconst [d] x) y)
-	// cond: is32Bit(c+d)   && x.Op != OpSB
-	// result: (LEAQ1 [c+d] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		y := v.Args[1]
-		if !(is32Bit(c+d) && x.Op != OpSB) {
-			break
-		}
-		v.reset(OpAMD64LEAQ1)
-		v.AuxInt = c + d
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAQ1 [c] {s} x (ADDQconst [d] y))
-	// cond: is32Bit(c+d)   && y.Op != OpSB
-	// result: (LEAQ1 [c+d] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_1.AuxInt
-		y := v_1.Args[0]
-		if !(is32Bit(c+d) && y.Op != OpSB) {
-			break
-		}
-		v.reset(OpAMD64LEAQ1)
-		v.AuxInt = c + d
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAQ1 [c] {s} x (SHLQconst [1] y))
-	// cond:
-	// result: (LEAQ2 [c] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64SHLQconst {
-			break
-		}
-		if v_1.AuxInt != 1 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpAMD64LEAQ2)
-		v.AuxInt = c
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAQ1 [c] {s} (SHLQconst [1] x) y)
-	// cond:
-	// result: (LEAQ2 [c] {s} y x)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64SHLQconst {
-			break
-		}
-		if v_0.AuxInt != 1 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64LEAQ2)
-		v.AuxInt = c
-		v.Aux = s
-		v.AddArg(y)
-		v.AddArg(x)
-		return true
-	}
-	// match: (LEAQ1 [c] {s} x (SHLQconst [2] y))
-	// cond:
-	// result: (LEAQ4 [c] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64SHLQconst {
-			break
-		}
-		if v_1.AuxInt != 2 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpAMD64LEAQ4)
-		v.AuxInt = c
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAQ1 [c] {s} (SHLQconst [2] x) y)
-	// cond:
-	// result: (LEAQ4 [c] {s} y x)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64SHLQconst {
-			break
-		}
-		if v_0.AuxInt != 2 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64LEAQ4)
-		v.AuxInt = c
-		v.Aux = s
-		v.AddArg(y)
-		v.AddArg(x)
-		return true
-	}
-	// match: (LEAQ1 [c] {s} x (SHLQconst [3] y))
-	// cond:
-	// result: (LEAQ8 [c] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64SHLQconst {
-			break
-		}
-		if v_1.AuxInt != 3 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpAMD64LEAQ8)
-		v.AuxInt = c
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAQ1 [c] {s} (SHLQconst [3] x) y)
-	// cond:
-	// result: (LEAQ8 [c] {s} y x)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64SHLQconst {
-			break
-		}
-		if v_0.AuxInt != 3 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64LEAQ8)
-		v.AuxInt = c
-		v.Aux = s
-		v.AddArg(y)
-		v.AddArg(x)
-		return true
-	}
-	// match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
-	// result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		x := v_0.Args[0]
-		y := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
-			break
-		}
-		v.reset(OpAMD64LEAQ1)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAQ1 [off1] {sym1} x (LEAQ [off2] {sym2} y))
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB
-	// result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64LEAQ {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		y := v_1.Args[0]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB) {
-			break
-		}
-		v.reset(OpAMD64LEAQ1)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64LEAQ2(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (LEAQ2 [c] {s} (ADDQconst [d] x) y)
-	// cond: is32Bit(c+d)   && x.Op != OpSB
-	// result: (LEAQ2 [c+d] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		y := v.Args[1]
-		if !(is32Bit(c+d) && x.Op != OpSB) {
-			break
-		}
-		v.reset(OpAMD64LEAQ2)
-		v.AuxInt = c + d
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAQ2 [c] {s} x (ADDQconst [d] y))
-	// cond: is32Bit(c+2*d) && y.Op != OpSB
-	// result: (LEAQ2 [c+2*d] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_1.AuxInt
-		y := v_1.Args[0]
-		if !(is32Bit(c+2*d) && y.Op != OpSB) {
-			break
-		}
-		v.reset(OpAMD64LEAQ2)
-		v.AuxInt = c + 2*d
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAQ2 [c] {s} x (SHLQconst [1] y))
-	// cond:
-	// result: (LEAQ4 [c] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64SHLQconst {
-			break
-		}
-		if v_1.AuxInt != 1 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpAMD64LEAQ4)
-		v.AuxInt = c
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAQ2 [c] {s} x (SHLQconst [2] y))
-	// cond:
-	// result: (LEAQ8 [c] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64SHLQconst {
-			break
-		}
-		if v_1.AuxInt != 2 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpAMD64LEAQ8)
-		v.AuxInt = c
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
-	// result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		x := v_0.Args[0]
-		y := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
-			break
-		}
-		v.reset(OpAMD64LEAQ2)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64LEAQ4(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (LEAQ4 [c] {s} (ADDQconst [d] x) y)
-	// cond: is32Bit(c+d)   && x.Op != OpSB
-	// result: (LEAQ4 [c+d] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		y := v.Args[1]
-		if !(is32Bit(c+d) && x.Op != OpSB) {
-			break
-		}
-		v.reset(OpAMD64LEAQ4)
-		v.AuxInt = c + d
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAQ4 [c] {s} x (ADDQconst [d] y))
-	// cond: is32Bit(c+4*d) && y.Op != OpSB
-	// result: (LEAQ4 [c+4*d] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_1.AuxInt
-		y := v_1.Args[0]
-		if !(is32Bit(c+4*d) && y.Op != OpSB) {
-			break
-		}
-		v.reset(OpAMD64LEAQ4)
-		v.AuxInt = c + 4*d
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAQ4 [c] {s} x (SHLQconst [1] y))
-	// cond:
-	// result: (LEAQ8 [c] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64SHLQconst {
-			break
-		}
-		if v_1.AuxInt != 1 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpAMD64LEAQ8)
-		v.AuxInt = c
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
-	// result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		x := v_0.Args[0]
-		y := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
-			break
-		}
-		v.reset(OpAMD64LEAQ4)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64LEAQ8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (LEAQ8 [c] {s} (ADDQconst [d] x) y)
-	// cond: is32Bit(c+d)   && x.Op != OpSB
-	// result: (LEAQ8 [c+d] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		y := v.Args[1]
-		if !(is32Bit(c+d) && x.Op != OpSB) {
-			break
-		}
-		v.reset(OpAMD64LEAQ8)
-		v.AuxInt = c + d
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAQ8 [c] {s} x (ADDQconst [d] y))
-	// cond: is32Bit(c+8*d) && y.Op != OpSB
-	// result: (LEAQ8 [c+8*d] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_1.AuxInt
-		y := v_1.Args[0]
-		if !(is32Bit(c+8*d) && y.Op != OpSB) {
-			break
-		}
-		v.reset(OpAMD64LEAQ8)
-		v.AuxInt = c + 8*d
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
-	// result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		x := v_0.Args[0]
-		y := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
-			break
-		}
-		v.reset(OpAMD64LEAQ8)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpAMD64MOVBload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpAMD64MOVBQSXload, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpAMD64MOVWload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpAMD64MOVBQSXload, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpAMD64MOVLload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpAMD64MOVBQSXload, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpAMD64MOVQload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpAMD64MOVBQSXload, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVBQSX (ANDLconst [c] x))
-	// cond: c & 0x80 == 0
-	// result: (ANDLconst [c & 0x7f] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ANDLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v_0.Args[0]
-		if !(c&0x80 == 0) {
-			break
-		}
-		v.reset(OpAMD64ANDLconst)
-		v.AuxInt = c & 0x7f
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVBQSXload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpAMD64MOVBload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpAMD64MOVBload, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpAMD64MOVWload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpAMD64MOVBload, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpAMD64MOVLload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpAMD64MOVBload, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpAMD64MOVQload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpAMD64MOVBload, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVBloadidx1 <v.Type> [off] {sym} ptr idx mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpAMD64MOVBloadidx1 {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		idx := x.Args[1]
-		mem := x.Args[2]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpAMD64MOVBloadidx1, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(idx)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVBQZX (ANDLconst [c] x))
-	// cond:
-	// result: (ANDLconst [c & 0xff] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ANDLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpAMD64ANDLconst)
-		v.AuxInt = c & 0xff
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVBload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-	// result: x
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVBstore {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		x := v_1.Args[1]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBload  [off1] {sym} (ADDQconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVBload  [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpAMD64MOVBload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVBload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVBload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ1 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVBloadidx1)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBload [off] {sym} (ADDQ ptr idx) mem)
-	// cond: ptr.Op != OpSB
-	// result: (MOVBloadidx1 [off] {sym} ptr idx mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQ {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(OpAMD64MOVBloadidx1)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBload  [off1] {sym1} (LEAL [off2] {sym2} base) mem)
-	// cond: canMergeSym(sym1, sym2)
-	// result: (MOVBload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAL {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVBload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBload  [off1] {sym} (ADDLconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVBload  [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDLconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpAMD64MOVBload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVBloadidx1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
-	// cond:
-	// result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVBloadidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
-	// cond:
-	// result: (MOVBloadidx1 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVBloadidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVBstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem)
-	// cond:
-	// result: (MOVBstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVBQSX {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVBstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem)
-	// cond:
-	// result: (MOVBstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVBQZX {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVBstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore  [off1] {sym} (ADDQconst [off2] ptr) val mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVBstore  [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpAMD64MOVBstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem)
-	// cond: validOff(off)
-	// result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		mem := v.Args[2]
-		if !(validOff(off)) {
-			break
-		}
-		v.reset(OpAMD64MOVBstoreconst)
-		v.AuxInt = makeValAndOff(int64(int8(c)), off)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVBstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVBstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ1 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVBstoreidx1)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem)
-	// cond: ptr.Op != OpSB
-	// result: (MOVBstoreidx1 [off] {sym} ptr idx val mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQ {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(OpAMD64MOVBstoreidx1)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [i] {s} p w   x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w)   x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w)   x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem))))
-	// cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && clobber(x0)   && clobber(x1)   && clobber(x2)
-	// result: (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		w := v.Args[1]
-		x2 := v.Args[2]
-		if x2.Op != OpAMD64MOVBstore {
-			break
-		}
-		if x2.AuxInt != i-1 {
-			break
-		}
-		if x2.Aux != s {
-			break
-		}
-		if p != x2.Args[0] {
-			break
-		}
-		x2_1 := x2.Args[1]
-		if x2_1.Op != OpAMD64SHRLconst {
-			break
-		}
-		if x2_1.AuxInt != 8 {
-			break
-		}
-		if w != x2_1.Args[0] {
-			break
-		}
-		x1 := x2.Args[2]
-		if x1.Op != OpAMD64MOVBstore {
-			break
-		}
-		if x1.AuxInt != i-2 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		x1_1 := x1.Args[1]
-		if x1_1.Op != OpAMD64SHRLconst {
-			break
-		}
-		if x1_1.AuxInt != 16 {
-			break
-		}
-		if w != x1_1.Args[0] {
-			break
-		}
-		x0 := x1.Args[2]
-		if x0.Op != OpAMD64MOVBstore {
-			break
-		}
-		if x0.AuxInt != i-3 {
-			break
-		}
-		if x0.Aux != s {
-			break
-		}
-		if p != x0.Args[0] {
-			break
-		}
-		x0_1 := x0.Args[1]
-		if x0_1.Op != OpAMD64SHRLconst {
-			break
-		}
-		if x0_1.AuxInt != 24 {
-			break
-		}
-		if w != x0_1.Args[0] {
-			break
-		}
-		mem := x0.Args[2]
-		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) {
-			break
-		}
-		v.reset(OpAMD64MOVLstore)
-		v.AuxInt = i - 3
-		v.Aux = s
-		v.AddArg(p)
-		v0 := b.NewValue0(v.Line, OpAMD64BSWAPL, w.Type)
-		v0.AddArg(w)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [i] {s} p w   x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w)   x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w)   x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w)   x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w)   x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w)   x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w)   x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem))))))))
-	// cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && x4.Uses == 1   && x5.Uses == 1   && x6.Uses == 1   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(x4)   && clobber(x5)   && clobber(x6)
-	// result: (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		w := v.Args[1]
-		x6 := v.Args[2]
-		if x6.Op != OpAMD64MOVBstore {
-			break
-		}
-		if x6.AuxInt != i-1 {
-			break
-		}
-		if x6.Aux != s {
-			break
-		}
-		if p != x6.Args[0] {
-			break
-		}
-		x6_1 := x6.Args[1]
-		if x6_1.Op != OpAMD64SHRQconst {
-			break
-		}
-		if x6_1.AuxInt != 8 {
-			break
-		}
-		if w != x6_1.Args[0] {
-			break
-		}
-		x5 := x6.Args[2]
-		if x5.Op != OpAMD64MOVBstore {
-			break
-		}
-		if x5.AuxInt != i-2 {
-			break
-		}
-		if x5.Aux != s {
-			break
-		}
-		if p != x5.Args[0] {
-			break
-		}
-		x5_1 := x5.Args[1]
-		if x5_1.Op != OpAMD64SHRQconst {
-			break
-		}
-		if x5_1.AuxInt != 16 {
-			break
-		}
-		if w != x5_1.Args[0] {
-			break
-		}
-		x4 := x5.Args[2]
-		if x4.Op != OpAMD64MOVBstore {
-			break
-		}
-		if x4.AuxInt != i-3 {
-			break
-		}
-		if x4.Aux != s {
-			break
-		}
-		if p != x4.Args[0] {
-			break
-		}
-		x4_1 := x4.Args[1]
-		if x4_1.Op != OpAMD64SHRQconst {
-			break
-		}
-		if x4_1.AuxInt != 24 {
-			break
-		}
-		if w != x4_1.Args[0] {
-			break
-		}
-		x3 := x4.Args[2]
-		if x3.Op != OpAMD64MOVBstore {
-			break
-		}
-		if x3.AuxInt != i-4 {
-			break
-		}
-		if x3.Aux != s {
-			break
-		}
-		if p != x3.Args[0] {
-			break
-		}
-		x3_1 := x3.Args[1]
-		if x3_1.Op != OpAMD64SHRQconst {
-			break
-		}
-		if x3_1.AuxInt != 32 {
-			break
-		}
-		if w != x3_1.Args[0] {
-			break
-		}
-		x2 := x3.Args[2]
-		if x2.Op != OpAMD64MOVBstore {
-			break
-		}
-		if x2.AuxInt != i-5 {
-			break
-		}
-		if x2.Aux != s {
-			break
-		}
-		if p != x2.Args[0] {
-			break
-		}
-		x2_1 := x2.Args[1]
-		if x2_1.Op != OpAMD64SHRQconst {
-			break
-		}
-		if x2_1.AuxInt != 40 {
-			break
-		}
-		if w != x2_1.Args[0] {
-			break
-		}
-		x1 := x2.Args[2]
-		if x1.Op != OpAMD64MOVBstore {
-			break
-		}
-		if x1.AuxInt != i-6 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		x1_1 := x1.Args[1]
-		if x1_1.Op != OpAMD64SHRQconst {
-			break
-		}
-		if x1_1.AuxInt != 48 {
-			break
-		}
-		if w != x1_1.Args[0] {
-			break
-		}
-		x0 := x1.Args[2]
-		if x0.Op != OpAMD64MOVBstore {
-			break
-		}
-		if x0.AuxInt != i-7 {
-			break
-		}
-		if x0.Aux != s {
-			break
-		}
-		if p != x0.Args[0] {
-			break
-		}
-		x0_1 := x0.Args[1]
-		if x0_1.Op != OpAMD64SHRQconst {
-			break
-		}
-		if x0_1.AuxInt != 56 {
-			break
-		}
-		if w != x0_1.Args[0] {
-			break
-		}
-		mem := x0.Args[2]
-		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) {
-			break
-		}
-		v.reset(OpAMD64MOVQstore)
-		v.AuxInt = i - 7
-		v.Aux = s
-		v.AddArg(p)
-		v0 := b.NewValue0(v.Line, OpAMD64BSWAPQ, w.Type)
-		v0.AddArg(w)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVWstore [i-1] {s} p w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64SHRQconst {
-			break
-		}
-		if v_1.AuxInt != 8 {
-			break
-		}
-		w := v_1.Args[0]
-		x := v.Args[2]
-		if x.Op != OpAMD64MOVBstore {
-			break
-		}
-		if x.AuxInt != i-1 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if w != x.Args[1] {
-			break
-		}
-		mem := x.Args[2]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpAMD64MOVWstore)
-		v.AuxInt = i - 1
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVWstore [i-1] {s} p w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64SHRQconst {
-			break
-		}
-		j := v_1.AuxInt
-		w := v_1.Args[0]
-		x := v.Args[2]
-		if x.Op != OpAMD64MOVBstore {
-			break
-		}
-		if x.AuxInt != i-1 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		w0 := x.Args[1]
-		if w0.Op != OpAMD64SHRQconst {
-			break
-		}
-		if w0.AuxInt != j-8 {
-			break
-		}
-		if w != w0.Args[0] {
-			break
-		}
-		mem := x.Args[2]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpAMD64MOVWstore)
-		v.AuxInt = i - 1
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
-	// cond: canMergeSym(sym1, sym2)
-	// result: (MOVBstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAL {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVBstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore  [off1] {sym} (ADDLconst [off2] ptr) val mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVBstore  [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDLconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpAMD64MOVBstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
-	// cond: ValAndOff(sc).canAdd(off)
-	// result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
-	for {
-		sc := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		off := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(ValAndOff(sc).canAdd(off)) {
-			break
-		}
-		v.reset(OpAMD64MOVBstoreconst)
-		v.AuxInt = ValAndOff(sc).add(off)
-		v.Aux = s
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
-	// result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
-	for {
-		sc := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ {
-			break
-		}
-		off := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
-			break
-		}
-		v.reset(OpAMD64MOVBstoreconst)
-		v.AuxInt = ValAndOff(sc).add(off)
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem)
-	// cond: canMergeSym(sym1, sym2)
-	// result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ1 {
-			break
-		}
-		off := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVBstoreconstidx1)
-		v.AuxInt = ValAndOff(x).add(off)
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem)
-	// cond:
-	// result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQ {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		v.reset(OpAMD64MOVBstoreconstidx1)
-		v.AuxInt = x
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
-	// cond: x.Uses == 1   && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()   && clobber(x)
-	// result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		x := v.Args[1]
-		if x.Op != OpAMD64MOVBstoreconst {
-			break
-		}
-		a := x.AuxInt
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		mem := x.Args[1]
-		if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
-			break
-		}
-		v.reset(OpAMD64MOVWstoreconst)
-		v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
-	// result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
-	for {
-		sc := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAL {
-			break
-		}
-		off := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
-			break
-		}
-		v.reset(OpAMD64MOVBstoreconst)
-		v.AuxInt = ValAndOff(sc).add(off)
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
-	// cond: ValAndOff(sc).canAdd(off)
-	// result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
-	for {
-		sc := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDLconst {
-			break
-		}
-		off := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(ValAndOff(sc).canAdd(off)) {
-			break
-		}
-		v.reset(OpAMD64MOVBstoreconst)
-		v.AuxInt = ValAndOff(sc).add(off)
-		v.Aux = s
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
-	// cond:
-	// result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		c := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVBstoreconstidx1)
-		v.AuxInt = ValAndOff(x).add(c)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
-	// cond:
-	// result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		c := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVBstoreconstidx1)
-		v.AuxInt = ValAndOff(x).add(c)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem))
-	// cond: x.Uses == 1   && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()   && clobber(x)
-	// result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		i := v.Args[1]
-		x := v.Args[2]
-		if x.Op != OpAMD64MOVBstoreconstidx1 {
-			break
-		}
-		a := x.AuxInt
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if i != x.Args[1] {
-			break
-		}
-		mem := x.Args[2]
-		if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
-			break
-		}
-		v.reset(OpAMD64MOVWstoreconstidx1)
-		v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(i)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
-	// cond:
-	// result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpAMD64MOVBstoreidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
-	// cond:
-	// result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpAMD64MOVBstoreidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVWstoreidx1 [i-1] {s} p idx w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpAMD64SHRQconst {
-			break
-		}
-		if v_2.AuxInt != 8 {
-			break
-		}
-		w := v_2.Args[0]
-		x := v.Args[3]
-		if x.Op != OpAMD64MOVBstoreidx1 {
-			break
-		}
-		if x.AuxInt != i-1 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		if w != x.Args[2] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpAMD64MOVWstoreidx1)
-		v.AuxInt = i - 1
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(idx)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpAMD64SHRQconst {
-			break
-		}
-		j := v_2.AuxInt
-		w := v_2.Args[0]
-		x := v.Args[3]
-		if x.Op != OpAMD64MOVBstoreidx1 {
-			break
-		}
-		if x.AuxInt != i-1 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		w0 := x.Args[2]
-		if w0.Op != OpAMD64SHRQconst {
-			break
-		}
-		if w0.AuxInt != j-8 {
-			break
-		}
-		if w != w0.Args[0] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpAMD64MOVWstoreidx1)
-		v.AuxInt = i - 1
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(idx)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpAMD64MOVLload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpAMD64MOVLQSXload, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpAMD64MOVQload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpAMD64MOVLQSXload, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVLQSX (ANDLconst [c] x))
-	// cond: c & 0x80000000 == 0
-	// result: (ANDLconst [c & 0x7fffffff] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ANDLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v_0.Args[0]
-		if !(c&0x80000000 == 0) {
-			break
-		}
-		v.reset(OpAMD64ANDLconst)
-		v.AuxInt = c & 0x7fffffff
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVLQSXload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpAMD64MOVLload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpAMD64MOVLload, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpAMD64MOVQload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpAMD64MOVLload, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVLloadidx1 <v.Type> [off] {sym} ptr idx mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpAMD64MOVLloadidx1 {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		idx := x.Args[1]
-		mem := x.Args[2]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpAMD64MOVLloadidx1, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(idx)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVLloadidx4 <v.Type> [off] {sym} ptr idx mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpAMD64MOVLloadidx4 {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		idx := x.Args[1]
-		mem := x.Args[2]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpAMD64MOVLloadidx4, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(idx)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVLQZX (ANDLconst [c] x))
-	// cond:
-	// result: (ANDLconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ANDLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpAMD64ANDLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVLatomicload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpAMD64MOVLatomicload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVLatomicload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVLload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-	// result: x
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVLstore {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		x := v_1.Args[1]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVLload  [off1] {sym} (ADDQconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVLload  [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpAMD64MOVLload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVLload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVLload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ1 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVLloadidx1)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ4 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVLloadidx4)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLload [off] {sym} (ADDQ ptr idx) mem)
-	// cond: ptr.Op != OpSB
-	// result: (MOVLloadidx1 [off] {sym} ptr idx mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQ {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(OpAMD64MOVLloadidx1)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLload  [off1] {sym1} (LEAL [off2] {sym2} base) mem)
-	// cond: canMergeSym(sym1, sym2)
-	// result: (MOVLload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAL {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVLload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLload  [off1] {sym} (ADDLconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVLload  [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDLconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpAMD64MOVLload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem)
-	// cond:
-	// result: (MOVLloadidx4 [c] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64SHLQconst {
-			break
-		}
-		if v_1.AuxInt != 2 {
-			break
-		}
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVLloadidx4)
-		v.AuxInt = c
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
-	// cond:
-	// result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVLloadidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
-	// cond:
-	// result: (MOVLloadidx1 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVLloadidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVLloadidx4(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem)
-	// cond:
-	// result: (MOVLloadidx4 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVLloadidx4)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem)
-	// cond:
-	// result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVLloadidx4)
-		v.AuxInt = c + 4*d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVLstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem)
-	// cond:
-	// result: (MOVLstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVLQSX {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVLstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem)
-	// cond:
-	// result: (MOVLstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVLQZX {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVLstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstore  [off1] {sym} (ADDQconst [off2] ptr) val mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVLstore  [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpAMD64MOVLstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem)
-	// cond: validOff(off)
-	// result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		mem := v.Args[2]
-		if !(validOff(off)) {
-			break
-		}
-		v.reset(OpAMD64MOVLstoreconst)
-		v.AuxInt = makeValAndOff(int64(int32(c)), off)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVLstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVLstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ1 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVLstoreidx1)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ4 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVLstoreidx4)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem)
-	// cond: ptr.Op != OpSB
-	// result: (MOVLstoreidx1 [off] {sym} ptr idx val mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQ {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(OpAMD64MOVLstoreidx1)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVQstore [i-4] {s} p w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64SHRQconst {
-			break
-		}
-		if v_1.AuxInt != 32 {
-			break
-		}
-		w := v_1.Args[0]
-		x := v.Args[2]
-		if x.Op != OpAMD64MOVLstore {
-			break
-		}
-		if x.AuxInt != i-4 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if w != x.Args[1] {
-			break
-		}
-		mem := x.Args[2]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpAMD64MOVQstore)
-		v.AuxInt = i - 4
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVQstore [i-4] {s} p w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64SHRQconst {
-			break
-		}
-		j := v_1.AuxInt
-		w := v_1.Args[0]
-		x := v.Args[2]
-		if x.Op != OpAMD64MOVLstore {
-			break
-		}
-		if x.AuxInt != i-4 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		w0 := x.Args[1]
-		if w0.Op != OpAMD64SHRQconst {
-			break
-		}
-		if w0.AuxInt != j-32 {
-			break
-		}
-		if w != w0.Args[0] {
-			break
-		}
-		mem := x.Args[2]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpAMD64MOVQstore)
-		v.AuxInt = i - 4
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
-	// cond: canMergeSym(sym1, sym2)
-	// result: (MOVLstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAL {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVLstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstore  [off1] {sym} (ADDLconst [off2] ptr) val mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVLstore  [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDLconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpAMD64MOVLstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
-	// cond: ValAndOff(sc).canAdd(off)
-	// result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
-	for {
-		sc := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		off := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(ValAndOff(sc).canAdd(off)) {
-			break
-		}
-		v.reset(OpAMD64MOVLstoreconst)
-		v.AuxInt = ValAndOff(sc).add(off)
-		v.Aux = s
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
-	// result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
-	for {
-		sc := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ {
-			break
-		}
-		off := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
-			break
-		}
-		v.reset(OpAMD64MOVLstoreconst)
-		v.AuxInt = ValAndOff(sc).add(off)
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem)
-	// cond: canMergeSym(sym1, sym2)
-	// result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ1 {
-			break
-		}
-		off := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVLstoreconstidx1)
-		v.AuxInt = ValAndOff(x).add(off)
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem)
-	// cond: canMergeSym(sym1, sym2)
-	// result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ4 {
-			break
-		}
-		off := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVLstoreconstidx4)
-		v.AuxInt = ValAndOff(x).add(off)
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem)
-	// cond:
-	// result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQ {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		v.reset(OpAMD64MOVLstoreconstidx1)
-		v.AuxInt = x
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem))
-	// cond: x.Uses == 1   && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()   && clobber(x)
-	// result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		x := v.Args[1]
-		if x.Op != OpAMD64MOVLstoreconst {
-			break
-		}
-		a := x.AuxInt
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		mem := x.Args[1]
-		if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) {
-			break
-		}
-		v.reset(OpAMD64MOVQstore)
-		v.AuxInt = ValAndOff(a).Off()
-		v.Aux = s
-		v.AddArg(p)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
-		v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
-	// result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
-	for {
-		sc := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAL {
-			break
-		}
-		off := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
-			break
-		}
-		v.reset(OpAMD64MOVLstoreconst)
-		v.AuxInt = ValAndOff(sc).add(off)
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
-	// cond: ValAndOff(sc).canAdd(off)
-	// result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
-	for {
-		sc := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDLconst {
-			break
-		}
-		off := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(ValAndOff(sc).canAdd(off)) {
-			break
-		}
-		v.reset(OpAMD64MOVLstoreconst)
-		v.AuxInt = ValAndOff(sc).add(off)
-		v.Aux = s
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem)
-	// cond:
-	// result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64SHLQconst {
-			break
-		}
-		if v_1.AuxInt != 2 {
-			break
-		}
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVLstoreconstidx4)
-		v.AuxInt = c
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
-	// cond:
-	// result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		c := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVLstoreconstidx1)
-		v.AuxInt = ValAndOff(x).add(c)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
-	// cond:
-	// result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		c := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVLstoreconstidx1)
-		v.AuxInt = ValAndOff(x).add(c)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem))
-	// cond: x.Uses == 1   && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()   && clobber(x)
-	// result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		i := v.Args[1]
-		x := v.Args[2]
-		if x.Op != OpAMD64MOVLstoreconstidx1 {
-			break
-		}
-		a := x.AuxInt
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if i != x.Args[1] {
-			break
-		}
-		mem := x.Args[2]
-		if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) {
-			break
-		}
-		v.reset(OpAMD64MOVQstoreidx1)
-		v.AuxInt = ValAndOff(a).Off()
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(i)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
-		v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem)
-	// cond:
-	// result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		c := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVLstoreconstidx4)
-		v.AuxInt = ValAndOff(x).add(c)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem)
-	// cond:
-	// result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		c := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVLstoreconstidx4)
-		v.AuxInt = ValAndOff(x).add(4 * c)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem))
-	// cond: x.Uses == 1   && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()   && clobber(x)
-	// result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst <i.Type> [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		i := v.Args[1]
-		x := v.Args[2]
-		if x.Op != OpAMD64MOVLstoreconstidx4 {
-			break
-		}
-		a := x.AuxInt
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if i != x.Args[1] {
-			break
-		}
-		mem := x.Args[2]
-		if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) {
-			break
-		}
-		v.reset(OpAMD64MOVQstoreidx1)
-		v.AuxInt = ValAndOff(a).Off()
-		v.Aux = s
-		v.AddArg(p)
-		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, i.Type)
-		v0.AuxInt = 2
-		v0.AddArg(i)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
-		v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
-		v.AddArg(v1)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem)
-	// cond:
-	// result: (MOVLstoreidx4 [c] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64SHLQconst {
-			break
-		}
-		if v_1.AuxInt != 2 {
-			break
-		}
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpAMD64MOVLstoreidx4)
-		v.AuxInt = c
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
-	// cond:
-	// result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpAMD64MOVLstoreidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
-	// cond:
-	// result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpAMD64MOVLstoreidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVQstoreidx1 [i-4] {s} p idx w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpAMD64SHRQconst {
-			break
-		}
-		if v_2.AuxInt != 32 {
-			break
-		}
-		w := v_2.Args[0]
-		x := v.Args[3]
-		if x.Op != OpAMD64MOVLstoreidx1 {
-			break
-		}
-		if x.AuxInt != i-4 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		if w != x.Args[2] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpAMD64MOVQstoreidx1)
-		v.AuxInt = i - 4
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(idx)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpAMD64SHRQconst {
-			break
-		}
-		j := v_2.AuxInt
-		w := v_2.Args[0]
-		x := v.Args[3]
-		if x.Op != OpAMD64MOVLstoreidx1 {
-			break
-		}
-		if x.AuxInt != i-4 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		w0 := x.Args[2]
-		if w0.Op != OpAMD64SHRQconst {
-			break
-		}
-		if w0.AuxInt != j-32 {
-			break
-		}
-		if w != w0.Args[0] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpAMD64MOVQstoreidx1)
-		v.AuxInt = i - 4
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(idx)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem)
-	// cond:
-	// result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpAMD64MOVLstoreidx4)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem)
-	// cond:
-	// result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpAMD64MOVLstoreidx4)
-		v.AuxInt = c + 4*d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpAMD64SHRQconst {
-			break
-		}
-		if v_2.AuxInt != 32 {
-			break
-		}
-		w := v_2.Args[0]
-		x := v.Args[3]
-		if x.Op != OpAMD64MOVLstoreidx4 {
-			break
-		}
-		if x.AuxInt != i-4 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		if w != x.Args[2] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpAMD64MOVQstoreidx1)
-		v.AuxInt = i - 4
-		v.Aux = s
-		v.AddArg(p)
-		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type)
-		v0.AuxInt = 2
-		v0.AddArg(idx)
-		v.AddArg(v0)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst <idx.Type> [2] idx) w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpAMD64SHRQconst {
-			break
-		}
-		j := v_2.AuxInt
-		w := v_2.Args[0]
-		x := v.Args[3]
-		if x.Op != OpAMD64MOVLstoreidx4 {
-			break
-		}
-		if x.AuxInt != i-4 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		w0 := x.Args[2]
-		if w0.Op != OpAMD64SHRQconst {
-			break
-		}
-		if w0.AuxInt != j-32 {
-			break
-		}
-		if w != w0.Args[0] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpAMD64MOVQstoreidx1)
-		v.AuxInt = i - 4
-		v.Aux = s
-		v.AddArg(p)
-		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type)
-		v0.AuxInt = 2
-		v0.AddArg(idx)
-		v.AddArg(v0)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVOload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVOload  [off1] {sym} (ADDQconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVOload  [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpAMD64MOVOload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVOload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVOstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVOstore  [off1] {sym} (ADDQconst [off2] ptr) val mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVOstore  [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpAMD64MOVOstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVOstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVQatomicload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpAMD64MOVQatomicload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVQatomicload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVQload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-	// result: x
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVQstore {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		x := v_1.Args[1]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVQload  [off1] {sym} (ADDQconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVQload  [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpAMD64MOVQload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVQload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVQload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVQload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ1 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVQloadidx1)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ8 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVQloadidx8)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVQload [off] {sym} (ADDQ ptr idx) mem)
-	// cond: ptr.Op != OpSB
-	// result: (MOVQloadidx1 [off] {sym} ptr idx mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQ {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(OpAMD64MOVQloadidx1)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVQload  [off1] {sym1} (LEAL [off2] {sym2} base) mem)
-	// cond: canMergeSym(sym1, sym2)
-	// result: (MOVQload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAL {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVQload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVQload  [off1] {sym} (ADDLconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVQload  [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDLconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpAMD64MOVQload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem)
-	// cond:
-	// result: (MOVQloadidx8 [c] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64SHLQconst {
-			break
-		}
-		if v_1.AuxInt != 3 {
-			break
-		}
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVQloadidx8)
-		v.AuxInt = c
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
-	// cond:
-	// result: (MOVQloadidx1 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVQloadidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
-	// cond:
-	// result: (MOVQloadidx1 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVQloadidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVQloadidx8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem)
-	// cond:
-	// result: (MOVQloadidx8 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVQloadidx8)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem)
-	// cond:
-	// result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVQloadidx8)
-		v.AuxInt = c + 8*d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVQstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVQstore  [off1] {sym} (ADDQconst [off2] ptr) val mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVQstore  [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpAMD64MOVQstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem)
-	// cond: validValAndOff(c,off)
-	// result: (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVQconst {
-			break
-		}
-		c := v_1.AuxInt
-		mem := v.Args[2]
-		if !(validValAndOff(c, off)) {
-			break
-		}
-		v.reset(OpAMD64MOVQstoreconst)
-		v.AuxInt = makeValAndOff(c, off)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVQstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVQstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVQstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ1 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVQstoreidx1)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ8 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVQstoreidx8)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem)
-	// cond: ptr.Op != OpSB
-	// result: (MOVQstoreidx1 [off] {sym} ptr idx val mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQ {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(OpAMD64MOVQstoreidx1)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVQstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
-	// cond: canMergeSym(sym1, sym2)
-	// result: (MOVQstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAL {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVQstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVQstore  [off1] {sym} (ADDLconst [off2] ptr) val mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVQstore  [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDLconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpAMD64MOVQstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
-	// cond: ValAndOff(sc).canAdd(off)
-	// result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
-	for {
-		sc := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		off := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(ValAndOff(sc).canAdd(off)) {
-			break
-		}
-		v.reset(OpAMD64MOVQstoreconst)
-		v.AuxInt = ValAndOff(sc).add(off)
-		v.Aux = s
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
-	// result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
-	for {
-		sc := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ {
-			break
-		}
-		off := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
-			break
-		}
-		v.reset(OpAMD64MOVQstoreconst)
-		v.AuxInt = ValAndOff(sc).add(off)
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem)
-	// cond: canMergeSym(sym1, sym2)
-	// result: (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ1 {
-			break
-		}
-		off := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVQstoreconstidx1)
-		v.AuxInt = ValAndOff(x).add(off)
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem)
-	// cond: canMergeSym(sym1, sym2)
-	// result: (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ8 {
-			break
-		}
-		off := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVQstoreconstidx8)
-		v.AuxInt = ValAndOff(x).add(off)
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem)
-	// cond:
-	// result: (MOVQstoreconstidx1 [x] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQ {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		v.reset(OpAMD64MOVQstoreconstidx1)
-		v.AuxInt = x
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
-	// result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
-	for {
-		sc := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAL {
-			break
-		}
-		off := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
-			break
-		}
-		v.reset(OpAMD64MOVQstoreconst)
-		v.AuxInt = ValAndOff(sc).add(off)
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
-	// cond: ValAndOff(sc).canAdd(off)
-	// result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
-	for {
-		sc := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDLconst {
-			break
-		}
-		off := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(ValAndOff(sc).canAdd(off)) {
-			break
-		}
-		v.reset(OpAMD64MOVQstoreconst)
-		v.AuxInt = ValAndOff(sc).add(off)
-		v.Aux = s
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem)
-	// cond:
-	// result: (MOVQstoreconstidx8 [c] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64SHLQconst {
-			break
-		}
-		if v_1.AuxInt != 3 {
-			break
-		}
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVQstoreconstidx8)
-		v.AuxInt = c
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
-	// cond:
-	// result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		c := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVQstoreconstidx1)
-		v.AuxInt = ValAndOff(x).add(c)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
-	// cond:
-	// result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		c := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVQstoreconstidx1)
-		v.AuxInt = ValAndOff(x).add(c)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem)
-	// cond:
-	// result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		c := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVQstoreconstidx8)
-		v.AuxInt = ValAndOff(x).add(c)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem)
-	// cond:
-	// result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		c := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVQstoreconstidx8)
-		v.AuxInt = ValAndOff(x).add(8 * c)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem)
-	// cond:
-	// result: (MOVQstoreidx8 [c] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64SHLQconst {
-			break
-		}
-		if v_1.AuxInt != 3 {
-			break
-		}
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpAMD64MOVQstoreidx8)
-		v.AuxInt = c
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
-	// cond:
-	// result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpAMD64MOVQstoreidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
-	// cond:
-	// result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpAMD64MOVQstoreidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVQstoreidx8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem)
-	// cond:
-	// result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpAMD64MOVQstoreidx8)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem)
-	// cond:
-	// result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpAMD64MOVQstoreidx8)
-		v.AuxInt = c + 8*d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVSDload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVSDload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpAMD64MOVSDload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVSDload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ1 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVSDloadidx1)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ8 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVSDloadidx8)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem)
-	// cond: ptr.Op != OpSB
-	// result: (MOVSDloadidx1 [off] {sym} ptr idx mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQ {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(OpAMD64MOVSDloadidx1)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVSDloadidx1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVSDloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem)
-	// cond:
-	// result: (MOVSDloadidx8 [c] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64SHLQconst {
-			break
-		}
-		if v_1.AuxInt != 3 {
-			break
-		}
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVSDloadidx8)
-		v.AuxInt = c
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
-	// cond:
-	// result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVSDloadidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
-	// cond:
-	// result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVSDloadidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVSDloadidx8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem)
-	// cond:
-	// result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVSDloadidx8)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem)
-	// cond:
-	// result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVSDloadidx8)
-		v.AuxInt = c + 8*d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVSDstore [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpAMD64MOVSDstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVSDstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ1 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVSDstoreidx1)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ8 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVSDstoreidx8)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem)
-	// cond: ptr.Op != OpSB
-	// result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQ {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(OpAMD64MOVSDstoreidx1)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVSDstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem)
-	// cond:
-	// result: (MOVSDstoreidx8 [c] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64SHLQconst {
-			break
-		}
-		if v_1.AuxInt != 3 {
-			break
-		}
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpAMD64MOVSDstoreidx8)
-		v.AuxInt = c
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
-	// cond:
-	// result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpAMD64MOVSDstoreidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
-	// cond:
-	// result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpAMD64MOVSDstoreidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem)
-	// cond:
-	// result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpAMD64MOVSDstoreidx8)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem)
-	// cond:
-	// result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpAMD64MOVSDstoreidx8)
-		v.AuxInt = c + 8*d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVSSload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVSSload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpAMD64MOVSSload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVSSload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ1 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVSSloadidx1)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ4 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVSSloadidx4)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem)
-	// cond: ptr.Op != OpSB
-	// result: (MOVSSloadidx1 [off] {sym} ptr idx mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQ {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(OpAMD64MOVSSloadidx1)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVSSloadidx1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVSSloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem)
-	// cond:
-	// result: (MOVSSloadidx4 [c] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64SHLQconst {
-			break
-		}
-		if v_1.AuxInt != 2 {
-			break
-		}
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVSSloadidx4)
-		v.AuxInt = c
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
-	// cond:
-	// result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVSSloadidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
-	// cond:
-	// result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVSSloadidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVSSloadidx4(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem)
-	// cond:
-	// result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVSSloadidx4)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem)
-	// cond:
-	// result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVSSloadidx4)
-		v.AuxInt = c + 4*d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVSSstore [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpAMD64MOVSSstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVSSstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ1 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVSSstoreidx1)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ4 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVSSstoreidx4)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem)
-	// cond: ptr.Op != OpSB
-	// result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQ {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(OpAMD64MOVSSstoreidx1)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVSSstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem)
-	// cond:
-	// result: (MOVSSstoreidx4 [c] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64SHLQconst {
-			break
-		}
-		if v_1.AuxInt != 2 {
-			break
-		}
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpAMD64MOVSSstoreidx4)
-		v.AuxInt = c
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
-	// cond:
-	// result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpAMD64MOVSSstoreidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
-	// cond:
-	// result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpAMD64MOVSSstoreidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem)
-	// cond:
-	// result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpAMD64MOVSSstoreidx4)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem)
-	// cond:
-	// result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpAMD64MOVSSstoreidx4)
-		v.AuxInt = c + 4*d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpAMD64MOVWload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpAMD64MOVWQSXload, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpAMD64MOVLload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpAMD64MOVWQSXload, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpAMD64MOVQload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpAMD64MOVWQSXload, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVWQSX (ANDLconst [c] x))
-	// cond: c & 0x8000 == 0
-	// result: (ANDLconst [c & 0x7fff] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ANDLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v_0.Args[0]
-		if !(c&0x8000 == 0) {
-			break
-		}
-		v.reset(OpAMD64ANDLconst)
-		v.AuxInt = c & 0x7fff
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVWQSXload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpAMD64MOVWload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpAMD64MOVWload, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpAMD64MOVLload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpAMD64MOVWload, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpAMD64MOVQload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpAMD64MOVWload, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVWloadidx1 <v.Type> [off] {sym} ptr idx mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpAMD64MOVWloadidx1 {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		idx := x.Args[1]
-		mem := x.Args[2]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx1, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(idx)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVWloadidx2 <v.Type> [off] {sym} ptr idx mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpAMD64MOVWloadidx2 {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		idx := x.Args[1]
-		mem := x.Args[2]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx2, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(idx)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVWQZX (ANDLconst [c] x))
-	// cond:
-	// result: (ANDLconst [c & 0xffff] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ANDLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpAMD64ANDLconst)
-		v.AuxInt = c & 0xffff
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVWload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-	// result: x
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVWstore {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		x := v_1.Args[1]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWload  [off1] {sym} (ADDQconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVWload  [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpAMD64MOVWload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWload  [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVWload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVWload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ1 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVWloadidx1)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ2 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVWloadidx2)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWload [off] {sym} (ADDQ ptr idx) mem)
-	// cond: ptr.Op != OpSB
-	// result: (MOVWloadidx1 [off] {sym} ptr idx mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQ {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(OpAMD64MOVWloadidx1)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWload  [off1] {sym1} (LEAL [off2] {sym2} base) mem)
-	// cond: canMergeSym(sym1, sym2)
-	// result: (MOVWload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAL {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVWload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWload  [off1] {sym} (ADDLconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVWload  [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDLconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpAMD64MOVWload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVWloadidx1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem)
-	// cond:
-	// result: (MOVWloadidx2 [c] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64SHLQconst {
-			break
-		}
-		if v_1.AuxInt != 1 {
-			break
-		}
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVWloadidx2)
-		v.AuxInt = c
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem)
-	// cond:
-	// result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVWloadidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem)
-	// cond:
-	// result: (MOVWloadidx1 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVWloadidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVWloadidx2(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem)
-	// cond:
-	// result: (MOVWloadidx2 [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVWloadidx2)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem)
-	// cond:
-	// result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVWloadidx2)
-		v.AuxInt = c + 2*d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVWstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem)
-	// cond:
-	// result: (MOVWstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVWQSX {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVWstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem)
-	// cond:
-	// result: (MOVWstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVWQZX {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVWstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore  [off1] {sym} (ADDQconst [off2] ptr) val mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVWstore  [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpAMD64MOVWstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem)
-	// cond: validOff(off)
-	// result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		mem := v.Args[2]
-		if !(validOff(off)) {
-			break
-		}
-		v.reset(OpAMD64MOVWstoreconst)
-		v.AuxInt = makeValAndOff(int64(int16(c)), off)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore  [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVWstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVWstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ1 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVWstoreidx1)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ2 {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVWstoreidx2)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem)
-	// cond: ptr.Op != OpSB
-	// result: (MOVWstoreidx1 [off] {sym} ptr idx val mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQ {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(OpAMD64MOVWstoreidx1)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVLstore [i-2] {s} p w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64SHRQconst {
-			break
-		}
-		if v_1.AuxInt != 16 {
-			break
-		}
-		w := v_1.Args[0]
-		x := v.Args[2]
-		if x.Op != OpAMD64MOVWstore {
-			break
-		}
-		if x.AuxInt != i-2 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if w != x.Args[1] {
-			break
-		}
-		mem := x.Args[2]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpAMD64MOVLstore)
-		v.AuxInt = i - 2
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVLstore [i-2] {s} p w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64SHRQconst {
-			break
-		}
-		j := v_1.AuxInt
-		w := v_1.Args[0]
-		x := v.Args[2]
-		if x.Op != OpAMD64MOVWstore {
-			break
-		}
-		if x.AuxInt != i-2 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		w0 := x.Args[1]
-		if w0.Op != OpAMD64SHRQconst {
-			break
-		}
-		if w0.AuxInt != j-16 {
-			break
-		}
-		if w != w0.Args[0] {
-			break
-		}
-		mem := x.Args[2]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpAMD64MOVLstore)
-		v.AuxInt = i - 2
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore  [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
-	// cond: canMergeSym(sym1, sym2)
-	// result: (MOVWstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAL {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVWstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore  [off1] {sym} (ADDLconst [off2] ptr) val mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVWstore  [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDLconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpAMD64MOVWstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
-	// cond: ValAndOff(sc).canAdd(off)
-	// result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
-	for {
-		sc := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		off := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(ValAndOff(sc).canAdd(off)) {
-			break
-		}
-		v.reset(OpAMD64MOVWstoreconst)
-		v.AuxInt = ValAndOff(sc).add(off)
-		v.Aux = s
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
-	// result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
-	for {
-		sc := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ {
-			break
-		}
-		off := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
-			break
-		}
-		v.reset(OpAMD64MOVWstoreconst)
-		v.AuxInt = ValAndOff(sc).add(off)
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem)
-	// cond: canMergeSym(sym1, sym2)
-	// result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ1 {
-			break
-		}
-		off := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVWstoreconstidx1)
-		v.AuxInt = ValAndOff(x).add(off)
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem)
-	// cond: canMergeSym(sym1, sym2)
-	// result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAQ2 {
-			break
-		}
-		off := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpAMD64MOVWstoreconstidx2)
-		v.AuxInt = ValAndOff(x).add(off)
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem)
-	// cond:
-	// result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQ {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		v.reset(OpAMD64MOVWstoreconstidx1)
-		v.AuxInt = x
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
-	// cond: x.Uses == 1   && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()   && clobber(x)
-	// result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		x := v.Args[1]
-		if x.Op != OpAMD64MOVWstoreconst {
-			break
-		}
-		a := x.AuxInt
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		mem := x.Args[1]
-		if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
-			break
-		}
-		v.reset(OpAMD64MOVLstoreconst)
-		v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
-	// result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
-	for {
-		sc := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64LEAL {
-			break
-		}
-		off := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
-			break
-		}
-		v.reset(OpAMD64MOVWstoreconst)
-		v.AuxInt = ValAndOff(sc).add(off)
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
-	// cond: ValAndOff(sc).canAdd(off)
-	// result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
-	for {
-		sc := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDLconst {
-			break
-		}
-		off := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(ValAndOff(sc).canAdd(off)) {
-			break
-		}
-		v.reset(OpAMD64MOVWstoreconst)
-		v.AuxInt = ValAndOff(sc).add(off)
-		v.Aux = s
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem)
-	// cond:
-	// result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64SHLQconst {
-			break
-		}
-		if v_1.AuxInt != 1 {
-			break
-		}
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVWstoreconstidx2)
-		v.AuxInt = c
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem)
-	// cond:
-	// result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		c := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVWstoreconstidx1)
-		v.AuxInt = ValAndOff(x).add(c)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem)
-	// cond:
-	// result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		c := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVWstoreconstidx1)
-		v.AuxInt = ValAndOff(x).add(c)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem))
-	// cond: x.Uses == 1   && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()   && clobber(x)
-	// result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		i := v.Args[1]
-		x := v.Args[2]
-		if x.Op != OpAMD64MOVWstoreconstidx1 {
-			break
-		}
-		a := x.AuxInt
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if i != x.Args[1] {
-			break
-		}
-		mem := x.Args[2]
-		if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
-			break
-		}
-		v.reset(OpAMD64MOVLstoreconstidx1)
-		v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(i)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem)
-	// cond:
-	// result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		c := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVWstoreconstidx2)
-		v.AuxInt = ValAndOff(x).add(c)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem)
-	// cond:
-	// result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem)
-	for {
-		x := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		c := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVWstoreconstidx2)
-		v.AuxInt = ValAndOff(x).add(2 * c)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem))
-	// cond: x.Uses == 1   && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()   && clobber(x)
-	// result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst <i.Type> [1] i) mem)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		i := v.Args[1]
-		x := v.Args[2]
-		if x.Op != OpAMD64MOVWstoreconstidx2 {
-			break
-		}
-		a := x.AuxInt
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if i != x.Args[1] {
-			break
-		}
-		mem := x.Args[2]
-		if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
-			break
-		}
-		v.reset(OpAMD64MOVLstoreconstidx1)
-		v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
-		v.Aux = s
-		v.AddArg(p)
-		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, i.Type)
-		v0.AuxInt = 1
-		v0.AddArg(i)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem)
-	// cond:
-	// result: (MOVWstoreidx2 [c] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64SHLQconst {
-			break
-		}
-		if v_1.AuxInt != 1 {
-			break
-		}
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpAMD64MOVWstoreidx2)
-		v.AuxInt = c
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem)
-	// cond:
-	// result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpAMD64MOVWstoreidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem)
-	// cond:
-	// result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpAMD64MOVWstoreidx1)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVLstoreidx1 [i-2] {s} p idx w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpAMD64SHRQconst {
-			break
-		}
-		if v_2.AuxInt != 16 {
-			break
-		}
-		w := v_2.Args[0]
-		x := v.Args[3]
-		if x.Op != OpAMD64MOVWstoreidx1 {
-			break
-		}
-		if x.AuxInt != i-2 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		if w != x.Args[2] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpAMD64MOVLstoreidx1)
-		v.AuxInt = i - 2
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(idx)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpAMD64SHRQconst {
-			break
-		}
-		j := v_2.AuxInt
-		w := v_2.Args[0]
-		x := v.Args[3]
-		if x.Op != OpAMD64MOVWstoreidx1 {
-			break
-		}
-		if x.AuxInt != i-2 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		w0 := x.Args[2]
-		if w0.Op != OpAMD64SHRQconst {
-			break
-		}
-		if w0.AuxInt != j-16 {
-			break
-		}
-		if w != w0.Args[0] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpAMD64MOVLstoreidx1)
-		v.AuxInt = i - 2
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(idx)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem)
-	// cond:
-	// result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpAMD64MOVWstoreidx2)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem)
-	// cond:
-	// result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpAMD64MOVWstoreidx2)
-		v.AuxInt = c + 2*d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpAMD64SHRQconst {
-			break
-		}
-		if v_2.AuxInt != 16 {
-			break
-		}
-		w := v_2.Args[0]
-		x := v.Args[3]
-		if x.Op != OpAMD64MOVWstoreidx2 {
-			break
-		}
-		if x.AuxInt != i-2 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		if w != x.Args[2] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpAMD64MOVLstoreidx1)
-		v.AuxInt = i - 2
-		v.Aux = s
-		v.AddArg(p)
-		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type)
-		v0.AuxInt = 1
-		v0.AddArg(idx)
-		v.AddArg(v0)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst <idx.Type> [1] idx) w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpAMD64SHRQconst {
-			break
-		}
-		j := v_2.AuxInt
-		w := v_2.Args[0]
-		x := v.Args[3]
-		if x.Op != OpAMD64MOVWstoreidx2 {
-			break
-		}
-		if x.AuxInt != i-2 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		w0 := x.Args[2]
-		if w0.Op != OpAMD64SHRQconst {
-			break
-		}
-		if w0.AuxInt != j-16 {
-			break
-		}
-		if w != w0.Args[0] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpAMD64MOVLstoreidx1)
-		v.AuxInt = i - 2
-		v.Aux = s
-		v.AddArg(p)
-		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type)
-		v0.AuxInt = 1
-		v0.AddArg(idx)
-		v.AddArg(v0)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MULL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MULL x (MOVLconst [c]))
-	// cond:
-	// result: (MULLconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpAMD64MULLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULL (MOVLconst [c]) x)
-	// cond:
-	// result: (MULLconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpAMD64MULLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MULLconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MULLconst [c] (MULLconst [d] x))
-	// cond:
-	// result: (MULLconst [int64(int32(c * d))] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MULLconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpAMD64MULLconst)
-		v.AuxInt = int64(int32(c * d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULLconst [c] (MOVLconst [d]))
-	// cond:
-	// result: (MOVLconst [int64(int32(c*d))])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = int64(int32(c * d))
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MULQ(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MULQ x (MOVQconst [c]))
-	// cond: is32Bit(c)
-	// result: (MULQconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVQconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpAMD64MULQconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULQ (MOVQconst [c]) x)
-	// cond: is32Bit(c)
-	// result: (MULQconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVQconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpAMD64MULQconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64MULQconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MULQconst [c] (MULQconst [d] x))
-	// cond: is32Bit(c*d)
-	// result: (MULQconst [c * d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MULQconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		if !(is32Bit(c * d)) {
-			break
-		}
-		v.reset(OpAMD64MULQconst)
-		v.AuxInt = c * d
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULQconst [-1] x)
-	// cond:
-	// result: (NEGQ x)
-	for {
-		if v.AuxInt != -1 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpAMD64NEGQ)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULQconst [0] _)
-	// cond:
-	// result: (MOVQconst [0])
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		v.reset(OpAMD64MOVQconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (MULQconst [1] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 1 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULQconst [3] x)
-	// cond:
-	// result: (LEAQ2 x x)
-	for {
-		if v.AuxInt != 3 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpAMD64LEAQ2)
-		v.AddArg(x)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULQconst [5] x)
-	// cond:
-	// result: (LEAQ4 x x)
-	for {
-		if v.AuxInt != 5 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpAMD64LEAQ4)
-		v.AddArg(x)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULQconst [7] x)
-	// cond:
-	// result: (LEAQ8 (NEGQ <v.Type> x) x)
-	for {
-		if v.AuxInt != 7 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpAMD64LEAQ8)
-		v0 := b.NewValue0(v.Line, OpAMD64NEGQ, v.Type)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULQconst [9] x)
-	// cond:
-	// result: (LEAQ8 x x)
-	for {
-		if v.AuxInt != 9 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpAMD64LEAQ8)
-		v.AddArg(x)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULQconst [11] x)
-	// cond:
-	// result: (LEAQ2 x (LEAQ4 <v.Type> x x))
-	for {
-		if v.AuxInt != 11 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpAMD64LEAQ2)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type)
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MULQconst [13] x)
-	// cond:
-	// result: (LEAQ4 x (LEAQ2 <v.Type> x x))
-	for {
-		if v.AuxInt != 13 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpAMD64LEAQ4)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpAMD64LEAQ2, v.Type)
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MULQconst [21] x)
-	// cond:
-	// result: (LEAQ4 x (LEAQ4 <v.Type> x x))
-	for {
-		if v.AuxInt != 21 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpAMD64LEAQ4)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type)
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MULQconst [25] x)
-	// cond:
-	// result: (LEAQ8 x (LEAQ2 <v.Type> x x))
-	for {
-		if v.AuxInt != 25 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpAMD64LEAQ8)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpAMD64LEAQ2, v.Type)
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MULQconst [37] x)
-	// cond:
-	// result: (LEAQ4 x (LEAQ8 <v.Type> x x))
-	for {
-		if v.AuxInt != 37 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpAMD64LEAQ4)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpAMD64LEAQ8, v.Type)
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MULQconst [41] x)
-	// cond:
-	// result: (LEAQ8 x (LEAQ4 <v.Type> x x))
-	for {
-		if v.AuxInt != 41 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpAMD64LEAQ8)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type)
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MULQconst [73] x)
-	// cond:
-	// result: (LEAQ8 x (LEAQ8 <v.Type> x x))
-	for {
-		if v.AuxInt != 73 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpAMD64LEAQ8)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpAMD64LEAQ8, v.Type)
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MULQconst [c] x)
-	// cond: isPowerOfTwo(c)
-	// result: (SHLQconst [log2(c)] x)
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(isPowerOfTwo(c)) {
-			break
-		}
-		v.reset(OpAMD64SHLQconst)
-		v.AuxInt = log2(c)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULQconst [c] x)
-	// cond: isPowerOfTwo(c+1) && c >= 15
-	// result: (SUBQ (SHLQconst <v.Type> [log2(c+1)] x) x)
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(isPowerOfTwo(c+1) && c >= 15) {
-			break
-		}
-		v.reset(OpAMD64SUBQ)
-		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type)
-		v0.AuxInt = log2(c + 1)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULQconst [c] x)
-	// cond: isPowerOfTwo(c-1) && c >= 17
-	// result: (LEAQ1 (SHLQconst <v.Type> [log2(c-1)] x) x)
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(isPowerOfTwo(c-1) && c >= 17) {
-			break
-		}
-		v.reset(OpAMD64LEAQ1)
-		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type)
-		v0.AuxInt = log2(c - 1)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULQconst [c] x)
-	// cond: isPowerOfTwo(c-2) && c >= 34
-	// result: (LEAQ2 (SHLQconst <v.Type> [log2(c-2)] x) x)
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(isPowerOfTwo(c-2) && c >= 34) {
-			break
-		}
-		v.reset(OpAMD64LEAQ2)
-		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type)
-		v0.AuxInt = log2(c - 2)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULQconst [c] x)
-	// cond: isPowerOfTwo(c-4) && c >= 68
-	// result: (LEAQ4 (SHLQconst <v.Type> [log2(c-4)] x) x)
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(isPowerOfTwo(c-4) && c >= 68) {
-			break
-		}
-		v.reset(OpAMD64LEAQ4)
-		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type)
-		v0.AuxInt = log2(c - 4)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULQconst [c] x)
-	// cond: isPowerOfTwo(c-8) && c >= 136
-	// result: (LEAQ8 (SHLQconst <v.Type> [log2(c-8)] x) x)
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(isPowerOfTwo(c-8) && c >= 136) {
-			break
-		}
-		v.reset(OpAMD64LEAQ8)
-		v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, v.Type)
-		v0.AuxInt = log2(c - 8)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULQconst [c] x)
-	// cond: c%3 == 0 && isPowerOfTwo(c/3)
-	// result: (SHLQconst [log2(c/3)] (LEAQ2 <v.Type> x x))
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(c%3 == 0 && isPowerOfTwo(c/3)) {
-			break
-		}
-		v.reset(OpAMD64SHLQconst)
-		v.AuxInt = log2(c / 3)
-		v0 := b.NewValue0(v.Line, OpAMD64LEAQ2, v.Type)
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MULQconst [c] x)
-	// cond: c%5 == 0 && isPowerOfTwo(c/5)
-	// result: (SHLQconst [log2(c/5)] (LEAQ4 <v.Type> x x))
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(c%5 == 0 && isPowerOfTwo(c/5)) {
-			break
-		}
-		v.reset(OpAMD64SHLQconst)
-		v.AuxInt = log2(c / 5)
-		v0 := b.NewValue0(v.Line, OpAMD64LEAQ4, v.Type)
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MULQconst [c] x)
-	// cond: c%9 == 0 && isPowerOfTwo(c/9)
-	// result: (SHLQconst [log2(c/9)] (LEAQ8 <v.Type> x x))
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(c%9 == 0 && isPowerOfTwo(c/9)) {
-			break
-		}
-		v.reset(OpAMD64SHLQconst)
-		v.AuxInt = log2(c / 9)
-		v0 := b.NewValue0(v.Line, OpAMD64LEAQ8, v.Type)
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MULQconst [c] (MOVQconst [d]))
-	// cond:
-	// result: (MOVQconst [c*d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVQconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpAMD64MOVQconst)
-		v.AuxInt = c * d
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64NEGL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NEGL (MOVLconst [c]))
-	// cond:
-	// result: (MOVLconst [int64(int32(-c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = int64(int32(-c))
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64NEGQ(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NEGQ (MOVQconst [c]))
-	// cond:
-	// result: (MOVQconst [-c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVQconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpAMD64MOVQconst)
-		v.AuxInt = -c
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64NOTL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NOTL (MOVLconst [c]))
-	// cond:
-	// result: (MOVLconst [^c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = ^c
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64NOTQ(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NOTQ (MOVQconst [c]))
-	// cond:
-	// result: (MOVQconst [^c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVQconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpAMD64MOVQconst)
-		v.AuxInt = ^c
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ORL x (MOVLconst [c]))
-	// cond:
-	// result: (ORLconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpAMD64ORLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ORL (MOVLconst [c]) x)
-	// cond:
-	// result: (ORLconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpAMD64ORLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ORL x x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ORL                  x0:(MOVBload [i]   {s} p mem)     s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem)))
-	// cond: x0.Uses == 1   && x1.Uses == 1   && s0.Uses == 1   && mergePoint(b,x0,x1) != nil   && clobber(x0)   && clobber(x1)   && clobber(s0)
-	// result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem)
-	for {
-		x0 := v.Args[0]
-		if x0.Op != OpAMD64MOVBload {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		mem := x0.Args[1]
-		s0 := v.Args[1]
-		if s0.Op != OpAMD64SHLLconst {
-			break
-		}
-		if s0.AuxInt != 8 {
-			break
-		}
-		x1 := s0.Args[0]
-		if x1.Op != OpAMD64MOVBload {
-			break
-		}
-		if x1.AuxInt != i+1 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if mem != x1.Args[1] {
-			break
-		}
-		if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) {
-			break
-		}
-		b = mergePoint(b, x0, x1)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = i
-		v0.Aux = s
-		v0.AddArg(p)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (ORL o0:(ORL                        x0:(MOVWload [i]   {s} p mem)     s0:(SHLLconst [16] x1:(MOVBload [i+2] {s} p mem)))     s1:(SHLLconst [24] x2:(MOVBload [i+3] {s} p mem)))
-	// cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && o0.Uses == 1   && mergePoint(b,x0,x1,x2) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(s0)   && clobber(s1)   && clobber(o0)
-	// result: @mergePoint(b,x0,x1,x2) (MOVLload [i] {s} p mem)
-	for {
-		o0 := v.Args[0]
-		if o0.Op != OpAMD64ORL {
-			break
-		}
-		x0 := o0.Args[0]
-		if x0.Op != OpAMD64MOVWload {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		mem := x0.Args[1]
-		s0 := o0.Args[1]
-		if s0.Op != OpAMD64SHLLconst {
-			break
-		}
-		if s0.AuxInt != 16 {
-			break
-		}
-		x1 := s0.Args[0]
-		if x1.Op != OpAMD64MOVBload {
-			break
-		}
-		if x1.AuxInt != i+2 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if mem != x1.Args[1] {
-			break
-		}
-		s1 := v.Args[1]
-		if s1.Op != OpAMD64SHLLconst {
-			break
-		}
-		if s1.AuxInt != 24 {
-			break
-		}
-		x2 := s1.Args[0]
-		if x2.Op != OpAMD64MOVBload {
-			break
-		}
-		if x2.AuxInt != i+3 {
-			break
-		}
-		if x2.Aux != s {
-			break
-		}
-		if p != x2.Args[0] {
-			break
-		}
-		if mem != x2.Args[1] {
-			break
-		}
-		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) {
-			break
-		}
-		b = mergePoint(b, x0, x1, x2)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = i
-		v0.Aux = s
-		v0.AddArg(p)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (ORL                  x0:(MOVBloadidx1 [i]   {s} p idx mem)     s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem)))
-	// cond: x0.Uses == 1   && x1.Uses == 1   && s0.Uses == 1   && mergePoint(b,x0,x1) != nil   && clobber(x0)   && clobber(x1)   && clobber(s0)
-	// result: @mergePoint(b,x0,x1) (MOVWloadidx1 <v.Type> [i] {s} p idx mem)
-	for {
-		x0 := v.Args[0]
-		if x0.Op != OpAMD64MOVBloadidx1 {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		idx := x0.Args[1]
-		mem := x0.Args[2]
-		s0 := v.Args[1]
-		if s0.Op != OpAMD64SHLLconst {
-			break
-		}
-		if s0.AuxInt != 8 {
-			break
-		}
-		x1 := s0.Args[0]
-		if x1.Op != OpAMD64MOVBloadidx1 {
-			break
-		}
-		if x1.AuxInt != i+1 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if idx != x1.Args[1] {
-			break
-		}
-		if mem != x1.Args[2] {
-			break
-		}
-		if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) {
-			break
-		}
-		b = mergePoint(b, x0, x1)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx1, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = i
-		v0.Aux = s
-		v0.AddArg(p)
-		v0.AddArg(idx)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (ORL o0:(ORL                        x0:(MOVWloadidx1 [i]   {s} p idx mem)     s0:(SHLLconst [16] x1:(MOVBloadidx1 [i+2] {s} p idx mem)))     s1:(SHLLconst [24] x2:(MOVBloadidx1 [i+3] {s} p idx mem)))
-	// cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && o0.Uses == 1   && mergePoint(b,x0,x1,x2) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(s0)   && clobber(s1)   && clobber(o0)
-	// result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 <v.Type> [i] {s} p idx mem)
-	for {
-		o0 := v.Args[0]
-		if o0.Op != OpAMD64ORL {
-			break
-		}
-		x0 := o0.Args[0]
-		if x0.Op != OpAMD64MOVWloadidx1 {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		idx := x0.Args[1]
-		mem := x0.Args[2]
-		s0 := o0.Args[1]
-		if s0.Op != OpAMD64SHLLconst {
-			break
-		}
-		if s0.AuxInt != 16 {
-			break
-		}
-		x1 := s0.Args[0]
-		if x1.Op != OpAMD64MOVBloadidx1 {
-			break
-		}
-		if x1.AuxInt != i+2 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if idx != x1.Args[1] {
-			break
-		}
-		if mem != x1.Args[2] {
-			break
-		}
-		s1 := v.Args[1]
-		if s1.Op != OpAMD64SHLLconst {
-			break
-		}
-		if s1.AuxInt != 24 {
-			break
-		}
-		x2 := s1.Args[0]
-		if x2.Op != OpAMD64MOVBloadidx1 {
-			break
-		}
-		if x2.AuxInt != i+3 {
-			break
-		}
-		if x2.Aux != s {
-			break
-		}
-		if p != x2.Args[0] {
-			break
-		}
-		if idx != x2.Args[1] {
-			break
-		}
-		if mem != x2.Args[2] {
-			break
-		}
-		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) {
-			break
-		}
-		b = mergePoint(b, x0, x1, x2)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVLloadidx1, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = i
-		v0.Aux = s
-		v0.AddArg(p)
-		v0.AddArg(idx)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (ORL o1:(ORL o0:(ORL                        x0:(MOVBload [i] {s} p mem)     s0:(SHLLconst [8]  x1:(MOVBload [i-1] {s} p mem)))     s1:(SHLLconst [16] x2:(MOVBload [i-2] {s} p mem)))     s2:(SHLLconst [24] x3:(MOVBload [i-3] {s} p mem)))
-	// cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && mergePoint(b,x0,x1,x2,x3) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(o0)   && clobber(o1)
-	// result: @mergePoint(b,x0,x1,x2,x3) (BSWAPL <v.Type> (MOVLload [i-3] {s} p mem))
-	for {
-		o1 := v.Args[0]
-		if o1.Op != OpAMD64ORL {
-			break
-		}
-		o0 := o1.Args[0]
-		if o0.Op != OpAMD64ORL {
-			break
-		}
-		x0 := o0.Args[0]
-		if x0.Op != OpAMD64MOVBload {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		mem := x0.Args[1]
-		s0 := o0.Args[1]
-		if s0.Op != OpAMD64SHLLconst {
-			break
-		}
-		if s0.AuxInt != 8 {
-			break
-		}
-		x1 := s0.Args[0]
-		if x1.Op != OpAMD64MOVBload {
-			break
-		}
-		if x1.AuxInt != i-1 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if mem != x1.Args[1] {
-			break
-		}
-		s1 := o1.Args[1]
-		if s1.Op != OpAMD64SHLLconst {
-			break
-		}
-		if s1.AuxInt != 16 {
-			break
-		}
-		x2 := s1.Args[0]
-		if x2.Op != OpAMD64MOVBload {
-			break
-		}
-		if x2.AuxInt != i-2 {
-			break
-		}
-		if x2.Aux != s {
-			break
-		}
-		if p != x2.Args[0] {
-			break
-		}
-		if mem != x2.Args[1] {
-			break
-		}
-		s2 := v.Args[1]
-		if s2.Op != OpAMD64SHLLconst {
-			break
-		}
-		if s2.AuxInt != 24 {
-			break
-		}
-		x3 := s2.Args[0]
-		if x3.Op != OpAMD64MOVBload {
-			break
-		}
-		if x3.AuxInt != i-3 {
-			break
-		}
-		if x3.Aux != s {
-			break
-		}
-		if p != x3.Args[0] {
-			break
-		}
-		if mem != x3.Args[1] {
-			break
-		}
-		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) {
-			break
-		}
-		b = mergePoint(b, x0, x1, x2, x3)
-		v0 := b.NewValue0(v.Line, OpAMD64BSWAPL, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
-		v1.AuxInt = i - 3
-		v1.Aux = s
-		v1.AddArg(p)
-		v1.AddArg(mem)
-		v0.AddArg(v1)
-		return true
-	}
-	// match: (ORL o1:(ORL o0:(ORL                        x0:(MOVBloadidx1 [i] {s} p idx mem)     s0:(SHLLconst [8]  x1:(MOVBloadidx1 [i-1] {s} p idx mem)))     s1:(SHLLconst [16] x2:(MOVBloadidx1 [i-2] {s} p idx mem)))     s2:(SHLLconst [24] x3:(MOVBloadidx1 [i-3] {s} p idx mem)))
-	// cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && mergePoint(b,x0,x1,x2,x3) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(o0)   && clobber(o1)
-	// result: @mergePoint(b,x0,x1,x2,x3) (BSWAPL <v.Type> (MOVLloadidx1 <v.Type> [i-3] {s} p idx mem))
-	for {
-		o1 := v.Args[0]
-		if o1.Op != OpAMD64ORL {
-			break
-		}
-		o0 := o1.Args[0]
-		if o0.Op != OpAMD64ORL {
-			break
-		}
-		x0 := o0.Args[0]
-		if x0.Op != OpAMD64MOVBloadidx1 {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		idx := x0.Args[1]
-		mem := x0.Args[2]
-		s0 := o0.Args[1]
-		if s0.Op != OpAMD64SHLLconst {
-			break
-		}
-		if s0.AuxInt != 8 {
-			break
-		}
-		x1 := s0.Args[0]
-		if x1.Op != OpAMD64MOVBloadidx1 {
-			break
-		}
-		if x1.AuxInt != i-1 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if idx != x1.Args[1] {
-			break
-		}
-		if mem != x1.Args[2] {
-			break
-		}
-		s1 := o1.Args[1]
-		if s1.Op != OpAMD64SHLLconst {
-			break
-		}
-		if s1.AuxInt != 16 {
-			break
-		}
-		x2 := s1.Args[0]
-		if x2.Op != OpAMD64MOVBloadidx1 {
-			break
-		}
-		if x2.AuxInt != i-2 {
-			break
-		}
-		if x2.Aux != s {
-			break
-		}
-		if p != x2.Args[0] {
-			break
-		}
-		if idx != x2.Args[1] {
-			break
-		}
-		if mem != x2.Args[2] {
-			break
-		}
-		s2 := v.Args[1]
-		if s2.Op != OpAMD64SHLLconst {
-			break
-		}
-		if s2.AuxInt != 24 {
-			break
-		}
-		x3 := s2.Args[0]
-		if x3.Op != OpAMD64MOVBloadidx1 {
-			break
-		}
-		if x3.AuxInt != i-3 {
-			break
-		}
-		if x3.Aux != s {
-			break
-		}
-		if p != x3.Args[0] {
-			break
-		}
-		if idx != x3.Args[1] {
-			break
-		}
-		if mem != x3.Args[2] {
-			break
-		}
-		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) {
-			break
-		}
-		b = mergePoint(b, x0, x1, x2, x3)
-		v0 := b.NewValue0(v.Line, OpAMD64BSWAPL, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64MOVLloadidx1, v.Type)
-		v1.AuxInt = i - 3
-		v1.Aux = s
-		v1.AddArg(p)
-		v1.AddArg(idx)
-		v1.AddArg(mem)
-		v0.AddArg(v1)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64ORLconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ORLconst [c] x)
-	// cond: int32(c)==0
-	// result: x
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(int32(c) == 0) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ORLconst [c] _)
-	// cond: int32(c)==-1
-	// result: (MOVLconst [-1])
-	for {
-		c := v.AuxInt
-		if !(int32(c) == -1) {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = -1
-		return true
-	}
-	// match: (ORLconst [c] (MOVLconst [d]))
-	// cond:
-	// result: (MOVLconst [c|d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = c | d
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ORQ x (MOVQconst [c]))
-	// cond: is32Bit(c)
-	// result: (ORQconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVQconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpAMD64ORQconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ORQ (MOVQconst [c]) x)
-	// cond: is32Bit(c)
-	// result: (ORQconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVQconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpAMD64ORQconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ORQ x x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ                        x0:(MOVBload [i]   {s} p mem)     s0:(SHLQconst [8]  x1:(MOVBload [i+1] {s} p mem)))     s1:(SHLQconst [16] x2:(MOVBload [i+2] {s} p mem)))     s2:(SHLQconst [24] x3:(MOVBload [i+3] {s} p mem)))     s3:(SHLQconst [32] x4:(MOVBload [i+4] {s} p mem)))     s4:(SHLQconst [40] x5:(MOVBload [i+5] {s} p mem)))     s5:(SHLQconst [48] x6:(MOVBload [i+6] {s} p mem)))     s6:(SHLQconst [56] x7:(MOVBload [i+7] {s} p mem)))
-	// cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && x4.Uses == 1   && x5.Uses == 1   && x6.Uses == 1   && x7.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && s3.Uses == 1   && s4.Uses == 1   && s5.Uses == 1   && s6.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && o2.Uses == 1   && o3.Uses == 1   && o4.Uses == 1   && o5.Uses == 1   && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(x4)   && clobber(x5)   && clobber(x6)   && clobber(x7)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(s3)   && clobber(s4)   && clobber(s5)   && clobber(s6)   && clobber(o0)   && clobber(o1)   && clobber(o2)   && clobber(o3)   && clobber(o4)   && clobber(o5)
-	// result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQload [i] {s} p mem)
-	for {
-		o0 := v.Args[0]
-		if o0.Op != OpAMD64ORQ {
-			break
-		}
-		o1 := o0.Args[0]
-		if o1.Op != OpAMD64ORQ {
-			break
-		}
-		o2 := o1.Args[0]
-		if o2.Op != OpAMD64ORQ {
-			break
-		}
-		o3 := o2.Args[0]
-		if o3.Op != OpAMD64ORQ {
-			break
-		}
-		o4 := o3.Args[0]
-		if o4.Op != OpAMD64ORQ {
-			break
-		}
-		o5 := o4.Args[0]
-		if o5.Op != OpAMD64ORQ {
-			break
-		}
-		x0 := o5.Args[0]
-		if x0.Op != OpAMD64MOVBload {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		mem := x0.Args[1]
-		s0 := o5.Args[1]
-		if s0.Op != OpAMD64SHLQconst {
-			break
-		}
-		if s0.AuxInt != 8 {
-			break
-		}
-		x1 := s0.Args[0]
-		if x1.Op != OpAMD64MOVBload {
-			break
-		}
-		if x1.AuxInt != i+1 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if mem != x1.Args[1] {
-			break
-		}
-		s1 := o4.Args[1]
-		if s1.Op != OpAMD64SHLQconst {
-			break
-		}
-		if s1.AuxInt != 16 {
-			break
-		}
-		x2 := s1.Args[0]
-		if x2.Op != OpAMD64MOVBload {
-			break
-		}
-		if x2.AuxInt != i+2 {
-			break
-		}
-		if x2.Aux != s {
-			break
-		}
-		if p != x2.Args[0] {
-			break
-		}
-		if mem != x2.Args[1] {
-			break
-		}
-		s2 := o3.Args[1]
-		if s2.Op != OpAMD64SHLQconst {
-			break
-		}
-		if s2.AuxInt != 24 {
-			break
-		}
-		x3 := s2.Args[0]
-		if x3.Op != OpAMD64MOVBload {
-			break
-		}
-		if x3.AuxInt != i+3 {
-			break
-		}
-		if x3.Aux != s {
-			break
-		}
-		if p != x3.Args[0] {
-			break
-		}
-		if mem != x3.Args[1] {
-			break
-		}
-		s3 := o2.Args[1]
-		if s3.Op != OpAMD64SHLQconst {
-			break
-		}
-		if s3.AuxInt != 32 {
-			break
-		}
-		x4 := s3.Args[0]
-		if x4.Op != OpAMD64MOVBload {
-			break
-		}
-		if x4.AuxInt != i+4 {
-			break
-		}
-		if x4.Aux != s {
-			break
-		}
-		if p != x4.Args[0] {
-			break
-		}
-		if mem != x4.Args[1] {
-			break
-		}
-		s4 := o1.Args[1]
-		if s4.Op != OpAMD64SHLQconst {
-			break
-		}
-		if s4.AuxInt != 40 {
-			break
-		}
-		x5 := s4.Args[0]
-		if x5.Op != OpAMD64MOVBload {
-			break
-		}
-		if x5.AuxInt != i+5 {
-			break
-		}
-		if x5.Aux != s {
-			break
-		}
-		if p != x5.Args[0] {
-			break
-		}
-		if mem != x5.Args[1] {
-			break
-		}
-		s5 := o0.Args[1]
-		if s5.Op != OpAMD64SHLQconst {
-			break
-		}
-		if s5.AuxInt != 48 {
-			break
-		}
-		x6 := s5.Args[0]
-		if x6.Op != OpAMD64MOVBload {
-			break
-		}
-		if x6.AuxInt != i+6 {
-			break
-		}
-		if x6.Aux != s {
-			break
-		}
-		if p != x6.Args[0] {
-			break
-		}
-		if mem != x6.Args[1] {
-			break
-		}
-		s6 := v.Args[1]
-		if s6.Op != OpAMD64SHLQconst {
-			break
-		}
-		if s6.AuxInt != 56 {
-			break
-		}
-		x7 := s6.Args[0]
-		if x7.Op != OpAMD64MOVBload {
-			break
-		}
-		if x7.AuxInt != i+7 {
-			break
-		}
-		if x7.Aux != s {
-			break
-		}
-		if p != x7.Args[0] {
-			break
-		}
-		if mem != x7.Args[1] {
-			break
-		}
-		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) {
-			break
-		}
-		b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = i
-		v0.Aux = s
-		v0.AddArg(p)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ                        x0:(MOVBloadidx1 [i]   {s} p idx mem)     s0:(SHLQconst [8]  x1:(MOVBloadidx1 [i+1] {s} p idx mem)))     s1:(SHLQconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem)))     s2:(SHLQconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem)))     s3:(SHLQconst [32] x4:(MOVBloadidx1 [i+4] {s} p idx mem)))     s4:(SHLQconst [40] x5:(MOVBloadidx1 [i+5] {s} p idx mem)))     s5:(SHLQconst [48] x6:(MOVBloadidx1 [i+6] {s} p idx mem)))     s6:(SHLQconst [56] x7:(MOVBloadidx1 [i+7] {s} p idx mem)))
-	// cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && x4.Uses == 1   && x5.Uses == 1   && x6.Uses == 1   && x7.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && s3.Uses == 1   && s4.Uses == 1   && s5.Uses == 1   && s6.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && o2.Uses == 1   && o3.Uses == 1   && o4.Uses == 1   && o5.Uses == 1   && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(x4)   && clobber(x5)   && clobber(x6)   && clobber(x7)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(s3)   && clobber(s4)   && clobber(s5)   && clobber(s6)   && clobber(o0)   && clobber(o1)   && clobber(o2)   && clobber(o3)   && clobber(o4)   && clobber(o5)
-	// result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQloadidx1 <v.Type> [i] {s} p idx mem)
-	for {
-		o0 := v.Args[0]
-		if o0.Op != OpAMD64ORQ {
-			break
-		}
-		o1 := o0.Args[0]
-		if o1.Op != OpAMD64ORQ {
-			break
-		}
-		o2 := o1.Args[0]
-		if o2.Op != OpAMD64ORQ {
-			break
-		}
-		o3 := o2.Args[0]
-		if o3.Op != OpAMD64ORQ {
-			break
-		}
-		o4 := o3.Args[0]
-		if o4.Op != OpAMD64ORQ {
-			break
-		}
-		o5 := o4.Args[0]
-		if o5.Op != OpAMD64ORQ {
-			break
-		}
-		x0 := o5.Args[0]
-		if x0.Op != OpAMD64MOVBloadidx1 {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		idx := x0.Args[1]
-		mem := x0.Args[2]
-		s0 := o5.Args[1]
-		if s0.Op != OpAMD64SHLQconst {
-			break
-		}
-		if s0.AuxInt != 8 {
-			break
-		}
-		x1 := s0.Args[0]
-		if x1.Op != OpAMD64MOVBloadidx1 {
-			break
-		}
-		if x1.AuxInt != i+1 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if idx != x1.Args[1] {
-			break
-		}
-		if mem != x1.Args[2] {
-			break
-		}
-		s1 := o4.Args[1]
-		if s1.Op != OpAMD64SHLQconst {
-			break
-		}
-		if s1.AuxInt != 16 {
-			break
-		}
-		x2 := s1.Args[0]
-		if x2.Op != OpAMD64MOVBloadidx1 {
-			break
-		}
-		if x2.AuxInt != i+2 {
-			break
-		}
-		if x2.Aux != s {
-			break
-		}
-		if p != x2.Args[0] {
-			break
-		}
-		if idx != x2.Args[1] {
-			break
-		}
-		if mem != x2.Args[2] {
-			break
-		}
-		s2 := o3.Args[1]
-		if s2.Op != OpAMD64SHLQconst {
-			break
-		}
-		if s2.AuxInt != 24 {
-			break
-		}
-		x3 := s2.Args[0]
-		if x3.Op != OpAMD64MOVBloadidx1 {
-			break
-		}
-		if x3.AuxInt != i+3 {
-			break
-		}
-		if x3.Aux != s {
-			break
-		}
-		if p != x3.Args[0] {
-			break
-		}
-		if idx != x3.Args[1] {
-			break
-		}
-		if mem != x3.Args[2] {
-			break
-		}
-		s3 := o2.Args[1]
-		if s3.Op != OpAMD64SHLQconst {
-			break
-		}
-		if s3.AuxInt != 32 {
-			break
-		}
-		x4 := s3.Args[0]
-		if x4.Op != OpAMD64MOVBloadidx1 {
-			break
-		}
-		if x4.AuxInt != i+4 {
-			break
-		}
-		if x4.Aux != s {
-			break
-		}
-		if p != x4.Args[0] {
-			break
-		}
-		if idx != x4.Args[1] {
-			break
-		}
-		if mem != x4.Args[2] {
-			break
-		}
-		s4 := o1.Args[1]
-		if s4.Op != OpAMD64SHLQconst {
-			break
-		}
-		if s4.AuxInt != 40 {
-			break
-		}
-		x5 := s4.Args[0]
-		if x5.Op != OpAMD64MOVBloadidx1 {
-			break
-		}
-		if x5.AuxInt != i+5 {
-			break
-		}
-		if x5.Aux != s {
-			break
-		}
-		if p != x5.Args[0] {
-			break
-		}
-		if idx != x5.Args[1] {
-			break
-		}
-		if mem != x5.Args[2] {
-			break
-		}
-		s5 := o0.Args[1]
-		if s5.Op != OpAMD64SHLQconst {
-			break
-		}
-		if s5.AuxInt != 48 {
-			break
-		}
-		x6 := s5.Args[0]
-		if x6.Op != OpAMD64MOVBloadidx1 {
-			break
-		}
-		if x6.AuxInt != i+6 {
-			break
-		}
-		if x6.Aux != s {
-			break
-		}
-		if p != x6.Args[0] {
-			break
-		}
-		if idx != x6.Args[1] {
-			break
-		}
-		if mem != x6.Args[2] {
-			break
-		}
-		s6 := v.Args[1]
-		if s6.Op != OpAMD64SHLQconst {
-			break
-		}
-		if s6.AuxInt != 56 {
-			break
-		}
-		x7 := s6.Args[0]
-		if x7.Op != OpAMD64MOVBloadidx1 {
-			break
-		}
-		if x7.AuxInt != i+7 {
-			break
-		}
-		if x7.Aux != s {
-			break
-		}
-		if p != x7.Args[0] {
-			break
-		}
-		if idx != x7.Args[1] {
-			break
-		}
-		if mem != x7.Args[2] {
-			break
-		}
-		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) {
-			break
-		}
-		b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVQloadidx1, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = i
-		v0.Aux = s
-		v0.AddArg(p)
-		v0.AddArg(idx)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (ORQ o5:(ORQ o4:(ORQ o3:(ORQ o2:(ORQ o1:(ORQ o0:(ORQ                        x0:(MOVBload [i] {s} p mem)     s0:(SHLQconst [8]  x1:(MOVBload [i-1] {s} p mem)))     s1:(SHLQconst [16] x2:(MOVBload [i-2] {s} p mem)))     s2:(SHLQconst [24] x3:(MOVBload [i-3] {s} p mem)))     s3:(SHLQconst [32] x4:(MOVBload [i-4] {s} p mem)))     s4:(SHLQconst [40] x5:(MOVBload [i-5] {s} p mem)))     s5:(SHLQconst [48] x6:(MOVBload [i-6] {s} p mem)))     s6:(SHLQconst [56] x7:(MOVBload [i-7] {s} p mem)))
-	// cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && x4.Uses == 1   && x5.Uses == 1   && x6.Uses == 1   && x7.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && s3.Uses == 1   && s4.Uses == 1   && s5.Uses == 1   && s6.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && o2.Uses == 1   && o3.Uses == 1   && o4.Uses == 1   && o5.Uses == 1   && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(x4)   && clobber(x5)   && clobber(x6)   && clobber(x7)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(s3)   && clobber(s4)   && clobber(s5)   && clobber(s6)   && clobber(o0)   && clobber(o1)   && clobber(o2)   && clobber(o3)   && clobber(o4)   && clobber(o5)
-	// result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (BSWAPQ <v.Type> (MOVQload [i-7] {s} p mem))
-	for {
-		o5 := v.Args[0]
-		if o5.Op != OpAMD64ORQ {
-			break
-		}
-		o4 := o5.Args[0]
-		if o4.Op != OpAMD64ORQ {
-			break
-		}
-		o3 := o4.Args[0]
-		if o3.Op != OpAMD64ORQ {
-			break
-		}
-		o2 := o3.Args[0]
-		if o2.Op != OpAMD64ORQ {
-			break
-		}
-		o1 := o2.Args[0]
-		if o1.Op != OpAMD64ORQ {
-			break
-		}
-		o0 := o1.Args[0]
-		if o0.Op != OpAMD64ORQ {
-			break
-		}
-		x0 := o0.Args[0]
-		if x0.Op != OpAMD64MOVBload {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		mem := x0.Args[1]
-		s0 := o0.Args[1]
-		if s0.Op != OpAMD64SHLQconst {
-			break
-		}
-		if s0.AuxInt != 8 {
-			break
-		}
-		x1 := s0.Args[0]
-		if x1.Op != OpAMD64MOVBload {
-			break
-		}
-		if x1.AuxInt != i-1 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if mem != x1.Args[1] {
-			break
-		}
-		s1 := o1.Args[1]
-		if s1.Op != OpAMD64SHLQconst {
-			break
-		}
-		if s1.AuxInt != 16 {
-			break
-		}
-		x2 := s1.Args[0]
-		if x2.Op != OpAMD64MOVBload {
-			break
-		}
-		if x2.AuxInt != i-2 {
-			break
-		}
-		if x2.Aux != s {
-			break
-		}
-		if p != x2.Args[0] {
-			break
-		}
-		if mem != x2.Args[1] {
-			break
-		}
-		s2 := o2.Args[1]
-		if s2.Op != OpAMD64SHLQconst {
-			break
-		}
-		if s2.AuxInt != 24 {
-			break
-		}
-		x3 := s2.Args[0]
-		if x3.Op != OpAMD64MOVBload {
-			break
-		}
-		if x3.AuxInt != i-3 {
-			break
-		}
-		if x3.Aux != s {
-			break
-		}
-		if p != x3.Args[0] {
-			break
-		}
-		if mem != x3.Args[1] {
-			break
-		}
-		s3 := o3.Args[1]
-		if s3.Op != OpAMD64SHLQconst {
-			break
-		}
-		if s3.AuxInt != 32 {
-			break
-		}
-		x4 := s3.Args[0]
-		if x4.Op != OpAMD64MOVBload {
-			break
-		}
-		if x4.AuxInt != i-4 {
-			break
-		}
-		if x4.Aux != s {
-			break
-		}
-		if p != x4.Args[0] {
-			break
-		}
-		if mem != x4.Args[1] {
-			break
-		}
-		s4 := o4.Args[1]
-		if s4.Op != OpAMD64SHLQconst {
-			break
-		}
-		if s4.AuxInt != 40 {
-			break
-		}
-		x5 := s4.Args[0]
-		if x5.Op != OpAMD64MOVBload {
-			break
-		}
-		if x5.AuxInt != i-5 {
-			break
-		}
-		if x5.Aux != s {
-			break
-		}
-		if p != x5.Args[0] {
-			break
-		}
-		if mem != x5.Args[1] {
-			break
-		}
-		s5 := o5.Args[1]
-		if s5.Op != OpAMD64SHLQconst {
-			break
-		}
-		if s5.AuxInt != 48 {
-			break
-		}
-		x6 := s5.Args[0]
-		if x6.Op != OpAMD64MOVBload {
-			break
-		}
-		if x6.AuxInt != i-6 {
-			break
-		}
-		if x6.Aux != s {
-			break
-		}
-		if p != x6.Args[0] {
-			break
-		}
-		if mem != x6.Args[1] {
-			break
-		}
-		s6 := v.Args[1]
-		if s6.Op != OpAMD64SHLQconst {
-			break
-		}
-		if s6.AuxInt != 56 {
-			break
-		}
-		x7 := s6.Args[0]
-		if x7.Op != OpAMD64MOVBload {
-			break
-		}
-		if x7.AuxInt != i-7 {
-			break
-		}
-		if x7.Aux != s {
-			break
-		}
-		if p != x7.Args[0] {
-			break
-		}
-		if mem != x7.Args[1] {
-			break
-		}
-		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) {
-			break
-		}
-		b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
-		v0 := b.NewValue0(v.Line, OpAMD64BSWAPQ, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
-		v1.AuxInt = i - 7
-		v1.Aux = s
-		v1.AddArg(p)
-		v1.AddArg(mem)
-		v0.AddArg(v1)
-		return true
-	}
-	// match: (ORQ o5:(ORQ o4:(ORQ o3:(ORQ o2:(ORQ o1:(ORQ o0:(ORQ                        x0:(MOVBloadidx1 [i] {s} p idx mem)     s0:(SHLQconst [8]  x1:(MOVBloadidx1 [i-1] {s} p idx mem)))     s1:(SHLQconst [16] x2:(MOVBloadidx1 [i-2] {s} p idx mem)))     s2:(SHLQconst [24] x3:(MOVBloadidx1 [i-3] {s} p idx mem)))     s3:(SHLQconst [32] x4:(MOVBloadidx1 [i-4] {s} p idx mem)))     s4:(SHLQconst [40] x5:(MOVBloadidx1 [i-5] {s} p idx mem)))     s5:(SHLQconst [48] x6:(MOVBloadidx1 [i-6] {s} p idx mem)))     s6:(SHLQconst [56] x7:(MOVBloadidx1 [i-7] {s} p idx mem)))
-	// cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && x4.Uses == 1   && x5.Uses == 1   && x6.Uses == 1   && x7.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && s3.Uses == 1   && s4.Uses == 1   && s5.Uses == 1   && s6.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && o2.Uses == 1   && o3.Uses == 1   && o4.Uses == 1   && o5.Uses == 1   && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(x4)   && clobber(x5)   && clobber(x6)   && clobber(x7)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(s3)   && clobber(s4)   && clobber(s5)   && clobber(s6)   && clobber(o0)   && clobber(o1)   && clobber(o2)   && clobber(o3)   && clobber(o4)   && clobber(o5)
-	// result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (BSWAPQ <v.Type> (MOVQloadidx1 <v.Type> [i-7] {s} p idx mem))
-	for {
-		o5 := v.Args[0]
-		if o5.Op != OpAMD64ORQ {
-			break
-		}
-		o4 := o5.Args[0]
-		if o4.Op != OpAMD64ORQ {
-			break
-		}
-		o3 := o4.Args[0]
-		if o3.Op != OpAMD64ORQ {
-			break
-		}
-		o2 := o3.Args[0]
-		if o2.Op != OpAMD64ORQ {
-			break
-		}
-		o1 := o2.Args[0]
-		if o1.Op != OpAMD64ORQ {
-			break
-		}
-		o0 := o1.Args[0]
-		if o0.Op != OpAMD64ORQ {
-			break
-		}
-		x0 := o0.Args[0]
-		if x0.Op != OpAMD64MOVBloadidx1 {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		idx := x0.Args[1]
-		mem := x0.Args[2]
-		s0 := o0.Args[1]
-		if s0.Op != OpAMD64SHLQconst {
-			break
-		}
-		if s0.AuxInt != 8 {
-			break
-		}
-		x1 := s0.Args[0]
-		if x1.Op != OpAMD64MOVBloadidx1 {
-			break
-		}
-		if x1.AuxInt != i-1 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if idx != x1.Args[1] {
-			break
-		}
-		if mem != x1.Args[2] {
-			break
-		}
-		s1 := o1.Args[1]
-		if s1.Op != OpAMD64SHLQconst {
-			break
-		}
-		if s1.AuxInt != 16 {
-			break
-		}
-		x2 := s1.Args[0]
-		if x2.Op != OpAMD64MOVBloadidx1 {
-			break
-		}
-		if x2.AuxInt != i-2 {
-			break
-		}
-		if x2.Aux != s {
-			break
-		}
-		if p != x2.Args[0] {
-			break
-		}
-		if idx != x2.Args[1] {
-			break
-		}
-		if mem != x2.Args[2] {
-			break
-		}
-		s2 := o2.Args[1]
-		if s2.Op != OpAMD64SHLQconst {
-			break
-		}
-		if s2.AuxInt != 24 {
-			break
-		}
-		x3 := s2.Args[0]
-		if x3.Op != OpAMD64MOVBloadidx1 {
-			break
-		}
-		if x3.AuxInt != i-3 {
-			break
-		}
-		if x3.Aux != s {
-			break
-		}
-		if p != x3.Args[0] {
-			break
-		}
-		if idx != x3.Args[1] {
-			break
-		}
-		if mem != x3.Args[2] {
-			break
-		}
-		s3 := o3.Args[1]
-		if s3.Op != OpAMD64SHLQconst {
-			break
-		}
-		if s3.AuxInt != 32 {
-			break
-		}
-		x4 := s3.Args[0]
-		if x4.Op != OpAMD64MOVBloadidx1 {
-			break
-		}
-		if x4.AuxInt != i-4 {
-			break
-		}
-		if x4.Aux != s {
-			break
-		}
-		if p != x4.Args[0] {
-			break
-		}
-		if idx != x4.Args[1] {
-			break
-		}
-		if mem != x4.Args[2] {
-			break
-		}
-		s4 := o4.Args[1]
-		if s4.Op != OpAMD64SHLQconst {
-			break
-		}
-		if s4.AuxInt != 40 {
-			break
-		}
-		x5 := s4.Args[0]
-		if x5.Op != OpAMD64MOVBloadidx1 {
-			break
-		}
-		if x5.AuxInt != i-5 {
-			break
-		}
-		if x5.Aux != s {
-			break
-		}
-		if p != x5.Args[0] {
-			break
-		}
-		if idx != x5.Args[1] {
-			break
-		}
-		if mem != x5.Args[2] {
-			break
-		}
-		s5 := o5.Args[1]
-		if s5.Op != OpAMD64SHLQconst {
-			break
-		}
-		if s5.AuxInt != 48 {
-			break
-		}
-		x6 := s5.Args[0]
-		if x6.Op != OpAMD64MOVBloadidx1 {
-			break
-		}
-		if x6.AuxInt != i-6 {
-			break
-		}
-		if x6.Aux != s {
-			break
-		}
-		if p != x6.Args[0] {
-			break
-		}
-		if idx != x6.Args[1] {
-			break
-		}
-		if mem != x6.Args[2] {
-			break
-		}
-		s6 := v.Args[1]
-		if s6.Op != OpAMD64SHLQconst {
-			break
-		}
-		if s6.AuxInt != 56 {
-			break
-		}
-		x7 := s6.Args[0]
-		if x7.Op != OpAMD64MOVBloadidx1 {
-			break
-		}
-		if x7.AuxInt != i-7 {
-			break
-		}
-		if x7.Aux != s {
-			break
-		}
-		if p != x7.Args[0] {
-			break
-		}
-		if idx != x7.Args[1] {
-			break
-		}
-		if mem != x7.Args[2] {
-			break
-		}
-		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) {
-			break
-		}
-		b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
-		v0 := b.NewValue0(v.Line, OpAMD64BSWAPQ, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64MOVQloadidx1, v.Type)
-		v1.AuxInt = i - 7
-		v1.Aux = s
-		v1.AddArg(p)
-		v1.AddArg(idx)
-		v1.AddArg(mem)
-		v0.AddArg(v1)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64ORQconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ORQconst [0] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ORQconst [-1] _)
-	// cond:
-	// result: (MOVQconst [-1])
-	for {
-		if v.AuxInt != -1 {
-			break
-		}
-		v.reset(OpAMD64MOVQconst)
-		v.AuxInt = -1
-		return true
-	}
-	// match: (ORQconst [c] (MOVQconst [d]))
-	// cond:
-	// result: (MOVQconst [c|d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVQconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpAMD64MOVQconst)
-		v.AuxInt = c | d
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64ROLBconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ROLBconst [c] (ROLBconst [d] x))
-	// cond:
-	// result: (ROLBconst [(c+d)& 7] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ROLBconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpAMD64ROLBconst)
-		v.AuxInt = (c + d) & 7
-		v.AddArg(x)
-		return true
-	}
-	// match: (ROLBconst [0] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64ROLLconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ROLLconst [c] (ROLLconst [d] x))
-	// cond:
-	// result: (ROLLconst [(c+d)&31] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ROLLconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpAMD64ROLLconst)
-		v.AuxInt = (c + d) & 31
-		v.AddArg(x)
-		return true
-	}
-	// match: (ROLLconst [0] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64ROLQconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ROLQconst [c] (ROLQconst [d] x))
-	// cond:
-	// result: (ROLQconst [(c+d)&63] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ROLQconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpAMD64ROLQconst)
-		v.AuxInt = (c + d) & 63
-		v.AddArg(x)
-		return true
-	}
-	// match: (ROLQconst [0] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64ROLWconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ROLWconst [c] (ROLWconst [d] x))
-	// cond:
-	// result: (ROLWconst [(c+d)&15] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64ROLWconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpAMD64ROLWconst)
-		v.AuxInt = (c + d) & 15
-		v.AddArg(x)
-		return true
-	}
-	// match: (ROLWconst [0] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64SARB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SARB x (MOVQconst [c]))
-	// cond:
-	// result: (SARBconst [c&31] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVQconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpAMD64SARBconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	// match: (SARB x (MOVLconst [c]))
-	// cond:
-	// result: (SARBconst [c&31] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpAMD64SARBconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64SARBconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SARBconst [c] (MOVQconst [d]))
-	// cond:
-	// result: (MOVQconst [d>>uint64(c)])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVQconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpAMD64MOVQconst)
-		v.AuxInt = d >> uint64(c)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64SARL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SARL x (MOVQconst [c]))
-	// cond:
-	// result: (SARLconst [c&31] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVQconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpAMD64SARLconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	// match: (SARL x (MOVLconst [c]))
-	// cond:
-	// result: (SARLconst [c&31] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpAMD64SARLconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	// match: (SARL x (ANDLconst [31] y))
-	// cond:
-	// result: (SARL x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ANDLconst {
-			break
-		}
-		if v_1.AuxInt != 31 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpAMD64SARL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64SARLconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SARLconst [c] (MOVQconst [d]))
-	// cond:
-	// result: (MOVQconst [d>>uint64(c)])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVQconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpAMD64MOVQconst)
-		v.AuxInt = d >> uint64(c)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64SARQ(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SARQ x (MOVQconst [c]))
-	// cond:
-	// result: (SARQconst [c&63] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVQconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpAMD64SARQconst)
-		v.AuxInt = c & 63
-		v.AddArg(x)
-		return true
-	}
-	// match: (SARQ x (MOVLconst [c]))
-	// cond:
-	// result: (SARQconst [c&63] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpAMD64SARQconst)
-		v.AuxInt = c & 63
-		v.AddArg(x)
-		return true
-	}
-	// match: (SARQ x (ANDQconst [63] y))
-	// cond:
-	// result: (SARQ x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ANDQconst {
-			break
-		}
-		if v_1.AuxInt != 63 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpAMD64SARQ)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64SARQconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SARQconst [c] (MOVQconst [d]))
-	// cond:
-	// result: (MOVQconst [d>>uint64(c)])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVQconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpAMD64MOVQconst)
-		v.AuxInt = d >> uint64(c)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64SARW(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SARW x (MOVQconst [c]))
-	// cond:
-	// result: (SARWconst [c&31] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVQconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpAMD64SARWconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	// match: (SARW x (MOVLconst [c]))
-	// cond:
-	// result: (SARWconst [c&31] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpAMD64SARWconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64SARWconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SARWconst [c] (MOVQconst [d]))
-	// cond:
-	// result: (MOVQconst [d>>uint64(c)])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVQconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpAMD64MOVQconst)
-		v.AuxInt = d >> uint64(c)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SBBLcarrymask (FlagEQ))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagEQ {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SBBLcarrymask (FlagLT_ULT))
-	// cond:
-	// result: (MOVLconst [-1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagLT_ULT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = -1
-		return true
-	}
-	// match: (SBBLcarrymask (FlagLT_UGT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagLT_UGT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SBBLcarrymask (FlagGT_ULT))
-	// cond:
-	// result: (MOVLconst [-1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagGT_ULT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = -1
-		return true
-	}
-	// match: (SBBLcarrymask (FlagGT_UGT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagGT_UGT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SBBQcarrymask (FlagEQ))
-	// cond:
-	// result: (MOVQconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagEQ {
-			break
-		}
-		v.reset(OpAMD64MOVQconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SBBQcarrymask (FlagLT_ULT))
-	// cond:
-	// result: (MOVQconst [-1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagLT_ULT {
-			break
-		}
-		v.reset(OpAMD64MOVQconst)
-		v.AuxInt = -1
-		return true
-	}
-	// match: (SBBQcarrymask (FlagLT_UGT))
-	// cond:
-	// result: (MOVQconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagLT_UGT {
-			break
-		}
-		v.reset(OpAMD64MOVQconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SBBQcarrymask (FlagGT_ULT))
-	// cond:
-	// result: (MOVQconst [-1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagGT_ULT {
-			break
-		}
-		v.reset(OpAMD64MOVQconst)
-		v.AuxInt = -1
-		return true
-	}
-	// match: (SBBQcarrymask (FlagGT_UGT))
-	// cond:
-	// result: (MOVQconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagGT_UGT {
-			break
-		}
-		v.reset(OpAMD64MOVQconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64SETA(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SETA (InvertFlags x))
-	// cond:
-	// result: (SETB x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpAMD64SETB)
-		v.AddArg(x)
-		return true
-	}
-	// match: (SETA (FlagEQ))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagEQ {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETA (FlagLT_ULT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagLT_ULT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETA (FlagLT_UGT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagLT_UGT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETA (FlagGT_ULT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagGT_ULT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETA (FlagGT_UGT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagGT_UGT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64SETAE(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SETAE (InvertFlags x))
-	// cond:
-	// result: (SETBE x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpAMD64SETBE)
-		v.AddArg(x)
-		return true
-	}
-	// match: (SETAE (FlagEQ))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagEQ {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETAE (FlagLT_ULT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagLT_ULT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETAE (FlagLT_UGT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagLT_UGT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETAE (FlagGT_ULT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagGT_ULT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETAE (FlagGT_UGT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagGT_UGT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64SETB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SETB (InvertFlags x))
-	// cond:
-	// result: (SETA x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpAMD64SETA)
-		v.AddArg(x)
-		return true
-	}
-	// match: (SETB (FlagEQ))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagEQ {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETB (FlagLT_ULT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagLT_ULT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETB (FlagLT_UGT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagLT_UGT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETB (FlagGT_ULT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagGT_ULT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETB (FlagGT_UGT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagGT_UGT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64SETBE(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SETBE (InvertFlags x))
-	// cond:
-	// result: (SETAE x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpAMD64SETAE)
-		v.AddArg(x)
-		return true
-	}
-	// match: (SETBE (FlagEQ))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagEQ {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETBE (FlagLT_ULT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagLT_ULT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETBE (FlagLT_UGT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagLT_UGT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETBE (FlagGT_ULT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagGT_ULT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETBE (FlagGT_UGT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagGT_UGT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64SETEQ(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SETEQ (InvertFlags x))
-	// cond:
-	// result: (SETEQ x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpAMD64SETEQ)
-		v.AddArg(x)
-		return true
-	}
-	// match: (SETEQ (FlagEQ))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagEQ {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETEQ (FlagLT_ULT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagLT_ULT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETEQ (FlagLT_UGT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagLT_UGT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETEQ (FlagGT_ULT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagGT_ULT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETEQ (FlagGT_UGT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagGT_UGT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64SETG(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SETG (InvertFlags x))
-	// cond:
-	// result: (SETL x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpAMD64SETL)
-		v.AddArg(x)
-		return true
-	}
-	// match: (SETG (FlagEQ))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagEQ {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETG (FlagLT_ULT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagLT_ULT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETG (FlagLT_UGT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagLT_UGT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETG (FlagGT_ULT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagGT_ULT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETG (FlagGT_UGT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagGT_UGT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64SETGE(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SETGE (InvertFlags x))
-	// cond:
-	// result: (SETLE x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpAMD64SETLE)
-		v.AddArg(x)
-		return true
-	}
-	// match: (SETGE (FlagEQ))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagEQ {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETGE (FlagLT_ULT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagLT_ULT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETGE (FlagLT_UGT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagLT_UGT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETGE (FlagGT_ULT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagGT_ULT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETGE (FlagGT_UGT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagGT_UGT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64SETL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SETL (InvertFlags x))
-	// cond:
-	// result: (SETG x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpAMD64SETG)
-		v.AddArg(x)
-		return true
-	}
-	// match: (SETL (FlagEQ))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagEQ {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETL (FlagLT_ULT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagLT_ULT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETL (FlagLT_UGT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagLT_UGT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETL (FlagGT_ULT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagGT_ULT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETL (FlagGT_UGT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagGT_UGT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64SETLE(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SETLE (InvertFlags x))
-	// cond:
-	// result: (SETGE x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpAMD64SETGE)
-		v.AddArg(x)
-		return true
-	}
-	// match: (SETLE (FlagEQ))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagEQ {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETLE (FlagLT_ULT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagLT_ULT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETLE (FlagLT_UGT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagLT_UGT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETLE (FlagGT_ULT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagGT_ULT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETLE (FlagGT_UGT))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagGT_UGT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64SETNE(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SETNE (InvertFlags x))
-	// cond:
-	// result: (SETNE x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpAMD64SETNE)
-		v.AddArg(x)
-		return true
-	}
-	// match: (SETNE (FlagEQ))
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagEQ {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SETNE (FlagLT_ULT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagLT_ULT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETNE (FlagLT_UGT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagLT_UGT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETNE (FlagGT_ULT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagGT_ULT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SETNE (FlagGT_UGT))
-	// cond:
-	// result: (MOVLconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64FlagGT_UGT {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 1
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64SHLL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SHLL x (MOVQconst [c]))
-	// cond:
-	// result: (SHLLconst [c&31] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVQconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpAMD64SHLLconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	// match: (SHLL x (MOVLconst [c]))
-	// cond:
-	// result: (SHLLconst [c&31] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpAMD64SHLLconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	// match: (SHLL x (ANDLconst [31] y))
-	// cond:
-	// result: (SHLL x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ANDLconst {
-			break
-		}
-		if v_1.AuxInt != 31 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpAMD64SHLL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64SHLQ(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SHLQ x (MOVQconst [c]))
-	// cond:
-	// result: (SHLQconst [c&63] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVQconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpAMD64SHLQconst)
-		v.AuxInt = c & 63
-		v.AddArg(x)
-		return true
-	}
-	// match: (SHLQ x (MOVLconst [c]))
-	// cond:
-	// result: (SHLQconst [c&63] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpAMD64SHLQconst)
-		v.AuxInt = c & 63
-		v.AddArg(x)
-		return true
-	}
-	// match: (SHLQ x (ANDQconst [63] y))
-	// cond:
-	// result: (SHLQ x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ANDQconst {
-			break
-		}
-		if v_1.AuxInt != 63 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpAMD64SHLQ)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64SHRB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SHRB x (MOVQconst [c]))
-	// cond:
-	// result: (SHRBconst [c&31] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVQconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpAMD64SHRBconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	// match: (SHRB x (MOVLconst [c]))
-	// cond:
-	// result: (SHRBconst [c&31] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpAMD64SHRBconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64SHRL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SHRL x (MOVQconst [c]))
-	// cond:
-	// result: (SHRLconst [c&31] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVQconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpAMD64SHRLconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	// match: (SHRL x (MOVLconst [c]))
-	// cond:
-	// result: (SHRLconst [c&31] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpAMD64SHRLconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	// match: (SHRL x (ANDLconst [31] y))
-	// cond:
-	// result: (SHRL x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ANDLconst {
-			break
-		}
-		if v_1.AuxInt != 31 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpAMD64SHRL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64SHRQ(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SHRQ x (MOVQconst [c]))
-	// cond:
-	// result: (SHRQconst [c&63] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVQconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpAMD64SHRQconst)
-		v.AuxInt = c & 63
-		v.AddArg(x)
-		return true
-	}
-	// match: (SHRQ x (MOVLconst [c]))
-	// cond:
-	// result: (SHRQconst [c&63] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpAMD64SHRQconst)
-		v.AuxInt = c & 63
-		v.AddArg(x)
-		return true
-	}
-	// match: (SHRQ x (ANDQconst [63] y))
-	// cond:
-	// result: (SHRQ x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ANDQconst {
-			break
-		}
-		if v_1.AuxInt != 63 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpAMD64SHRQ)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64SHRW(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SHRW x (MOVQconst [c]))
-	// cond:
-	// result: (SHRWconst [c&31] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVQconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpAMD64SHRWconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	// match: (SHRW x (MOVLconst [c]))
-	// cond:
-	// result: (SHRWconst [c&31] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpAMD64SHRWconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64SUBL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBL x (MOVLconst [c]))
-	// cond:
-	// result: (SUBLconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpAMD64SUBLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUBL (MOVLconst [c]) x)
-	// cond:
-	// result: (NEGL (SUBLconst <v.Type> x [c]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpAMD64NEGL)
-		v0 := b.NewValue0(v.Line, OpAMD64SUBLconst, v.Type)
-		v0.AuxInt = c
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (SUBL x x)
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64SUBLconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBLconst [c] x)
-	// cond: int32(c) == 0
-	// result: x
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(int32(c) == 0) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUBLconst [c] x)
-	// cond:
-	// result: (ADDLconst [int64(int32(-c))] x)
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		v.reset(OpAMD64ADDLconst)
-		v.AuxInt = int64(int32(-c))
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpAMD64SUBQ(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBQ x (MOVQconst [c]))
-	// cond: is32Bit(c)
-	// result: (SUBQconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVQconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpAMD64SUBQconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUBQ (MOVQconst [c]) x)
-	// cond: is32Bit(c)
-	// result: (NEGQ (SUBQconst <v.Type> x [c]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVQconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpAMD64NEGQ)
-		v0 := b.NewValue0(v.Line, OpAMD64SUBQconst, v.Type)
-		v0.AuxInt = c
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (SUBQ x x)
-	// cond:
-	// result: (MOVQconst [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpAMD64MOVQconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64SUBQconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBQconst [0] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUBQconst [c] x)
-	// cond: c != -(1<<31)
-	// result: (ADDQconst [-c] x)
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(c != -(1 << 31)) {
-			break
-		}
-		v.reset(OpAMD64ADDQconst)
-		v.AuxInt = -c
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUBQconst (MOVQconst [d]) [c])
-	// cond:
-	// result: (MOVQconst [d-c])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVQconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpAMD64MOVQconst)
-		v.AuxInt = d - c
-		return true
-	}
-	// match: (SUBQconst (SUBQconst x [d]) [c])
-	// cond: is32Bit(-c-d)
-	// result: (ADDQconst [-c-d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64SUBQconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		if !(is32Bit(-c - d)) {
-			break
-		}
-		v.reset(OpAMD64ADDQconst)
-		v.AuxInt = -c - d
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64XADDLlock(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (XADDLlock [off1+off2] {sym} val ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		val := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		off2 := v_1.AuxInt
-		ptr := v_1.Args[0]
-		mem := v.Args[2]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpAMD64XADDLlock)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(val)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64XADDQlock(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (XADDQlock [off1+off2] {sym} val ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		val := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		off2 := v_1.AuxInt
-		ptr := v_1.Args[0]
-		mem := v.Args[2]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpAMD64XADDQlock)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(val)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64XCHGL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (XCHGL [off1+off2] {sym} val ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		val := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		off2 := v_1.AuxInt
-		ptr := v_1.Args[0]
-		mem := v.Args[2]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpAMD64XCHGL)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(val)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB
-	// result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		val := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64LEAQ {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr := v_1.Args[0]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
-			break
-		}
-		v.reset(OpAMD64XCHGL)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(val)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64XCHGQ(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (XCHGQ [off1+off2] {sym} val ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		val := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64ADDQconst {
-			break
-		}
-		off2 := v_1.AuxInt
-		ptr := v_1.Args[0]
-		mem := v.Args[2]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpAMD64XCHGQ)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(val)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB
-	// result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		val := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64LEAQ {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr := v_1.Args[0]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
-			break
-		}
-		v.reset(OpAMD64XCHGQ)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(val)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64XORL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XORL x (MOVLconst [c]))
-	// cond:
-	// result: (XORLconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpAMD64XORLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (XORL (MOVLconst [c]) x)
-	// cond:
-	// result: (XORLconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpAMD64XORLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (XORL x x)
-	// cond:
-	// result: (MOVLconst [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64XORLconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XORLconst [c] (XORLconst [d] x))
-	// cond:
-	// result: (XORLconst [c ^ d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64XORLconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpAMD64XORLconst)
-		v.AuxInt = c ^ d
-		v.AddArg(x)
-		return true
-	}
-	// match: (XORLconst [c] x)
-	// cond: int32(c)==0
-	// result: x
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(int32(c) == 0) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (XORLconst [c] (MOVLconst [d]))
-	// cond:
-	// result: (MOVLconst [c^d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVLconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = c ^ d
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64XORQ(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XORQ x (MOVQconst [c]))
-	// cond: is32Bit(c)
-	// result: (XORQconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAMD64MOVQconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpAMD64XORQconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (XORQ (MOVQconst [c]) x)
-	// cond: is32Bit(c)
-	// result: (XORQconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVQconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpAMD64XORQconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (XORQ x x)
-	// cond:
-	// result: (MOVQconst [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpAMD64MOVQconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAMD64XORQconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XORQconst [c] (XORQconst [d] x))
-	// cond:
-	// result: (XORQconst [c ^ d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64XORQconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpAMD64XORQconst)
-		v.AuxInt = c ^ d
-		v.AddArg(x)
-		return true
-	}
-	// match: (XORQconst [0] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (XORQconst [c] (MOVQconst [d]))
-	// cond:
-	// result: (MOVQconst [c^d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64MOVQconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpAMD64MOVQconst)
-		v.AuxInt = c ^ d
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAdd16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add16  x y)
-	// cond:
-	// result: (ADDL  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ADDL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpAdd32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add32  x y)
-	// cond:
-	// result: (ADDL  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ADDL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpAdd32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add32F x y)
-	// cond:
-	// result: (ADDSS x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ADDSS)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpAdd64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add64  x y)
-	// cond:
-	// result: (ADDQ  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ADDQ)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpAdd64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add64F x y)
-	// cond:
-	// result: (ADDSD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ADDSD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpAdd8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add8   x y)
-	// cond:
-	// result: (ADDL  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ADDL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpAddPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AddPtr x y)
-	// cond: config.PtrSize == 8
-	// result: (ADDQ x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		if !(config.PtrSize == 8) {
-			break
-		}
-		v.reset(OpAMD64ADDQ)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (AddPtr x y)
-	// cond: config.PtrSize == 4
-	// result: (ADDL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		if !(config.PtrSize == 4) {
-			break
-		}
-		v.reset(OpAMD64ADDL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAddr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Addr {sym} base)
-	// cond: config.PtrSize == 8
-	// result: (LEAQ {sym} base)
-	for {
-		sym := v.Aux
-		base := v.Args[0]
-		if !(config.PtrSize == 8) {
-			break
-		}
-		v.reset(OpAMD64LEAQ)
-		v.Aux = sym
-		v.AddArg(base)
-		return true
-	}
-	// match: (Addr {sym} base)
-	// cond: config.PtrSize == 4
-	// result: (LEAL {sym} base)
-	for {
-		sym := v.Aux
-		base := v.Args[0]
-		if !(config.PtrSize == 4) {
-			break
-		}
-		v.reset(OpAMD64LEAL)
-		v.Aux = sym
-		v.AddArg(base)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAnd16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And16 x y)
-	// cond:
-	// result: (ANDL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpAnd32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And32 x y)
-	// cond:
-	// result: (ANDL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpAnd64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And64 x y)
-	// cond:
-	// result: (ANDQ x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDQ)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpAnd8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And8  x y)
-	// cond:
-	// result: (ANDL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpAndB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AndB x y)
-	// cond:
-	// result: (ANDL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpAtomicAdd32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicAdd32 ptr val mem)
-	// cond:
-	// result: (AddTupleFirst32 (XADDLlock val ptr mem) val)
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpAMD64AddTupleFirst32)
-		v0 := b.NewValue0(v.Line, OpAMD64XADDLlock, MakeTuple(config.fe.TypeUInt32(), TypeMem))
-		v0.AddArg(val)
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(val)
-		return true
-	}
-}
-func rewriteValueAMD64_OpAtomicAdd64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicAdd64 ptr val mem)
-	// cond:
-	// result: (AddTupleFirst64 (XADDQlock val ptr mem) val)
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpAMD64AddTupleFirst64)
-		v0 := b.NewValue0(v.Line, OpAMD64XADDQlock, MakeTuple(config.fe.TypeUInt64(), TypeMem))
-		v0.AddArg(val)
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(val)
-		return true
-	}
-}
-func rewriteValueAMD64_OpAtomicAnd8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicAnd8 ptr val mem)
-	// cond:
-	// result: (ANDBlock ptr val mem)
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpAMD64ANDBlock)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicCompareAndSwap32 ptr old new_ mem)
-	// cond:
-	// result: (CMPXCHGLlock ptr old new_ mem)
-	for {
-		ptr := v.Args[0]
-		old := v.Args[1]
-		new_ := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpAMD64CMPXCHGLlock)
-		v.AddArg(ptr)
-		v.AddArg(old)
-		v.AddArg(new_)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicCompareAndSwap64 ptr old new_ mem)
-	// cond:
-	// result: (CMPXCHGQlock ptr old new_ mem)
-	for {
-		ptr := v.Args[0]
-		old := v.Args[1]
-		new_ := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpAMD64CMPXCHGQlock)
-		v.AddArg(ptr)
-		v.AddArg(old)
-		v.AddArg(new_)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueAMD64_OpAtomicExchange32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicExchange32 ptr val mem)
-	// cond:
-	// result: (XCHGL val ptr mem)
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpAMD64XCHGL)
-		v.AddArg(val)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueAMD64_OpAtomicExchange64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicExchange64 ptr val mem)
-	// cond:
-	// result: (XCHGQ val ptr mem)
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpAMD64XCHGQ)
-		v.AddArg(val)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueAMD64_OpAtomicLoad32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicLoad32 ptr mem)
-	// cond:
-	// result: (MOVLatomicload ptr mem)
-	for {
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		v.reset(OpAMD64MOVLatomicload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueAMD64_OpAtomicLoad64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicLoad64 ptr mem)
-	// cond:
-	// result: (MOVQatomicload ptr mem)
-	for {
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		v.reset(OpAMD64MOVQatomicload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueAMD64_OpAtomicLoadPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicLoadPtr ptr mem)
-	// cond: config.PtrSize == 8
-	// result: (MOVQatomicload ptr mem)
-	for {
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(config.PtrSize == 8) {
-			break
-		}
-		v.reset(OpAMD64MOVQatomicload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (AtomicLoadPtr ptr mem)
-	// cond: config.PtrSize == 4
-	// result: (MOVLatomicload ptr mem)
-	for {
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(config.PtrSize == 4) {
-			break
-		}
-		v.reset(OpAMD64MOVLatomicload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAtomicOr8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicOr8 ptr val mem)
-	// cond:
-	// result: (ORBlock ptr val mem)
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpAMD64ORBlock)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueAMD64_OpAtomicStore32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicStore32 ptr val mem)
-	// cond:
-	// result: (Select1 (XCHGL <MakeTuple(config.Frontend().TypeUInt32(),TypeMem)> val ptr mem))
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpAMD64XCHGL, MakeTuple(config.Frontend().TypeUInt32(), TypeMem))
-		v0.AddArg(val)
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpAtomicStore64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicStore64 ptr val mem)
-	// cond:
-	// result: (Select1 (XCHGQ <MakeTuple(config.Frontend().TypeUInt64(),TypeMem)> val ptr mem))
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpAMD64XCHGQ, MakeTuple(config.Frontend().TypeUInt64(), TypeMem))
-		v0.AddArg(val)
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicStorePtrNoWB ptr val mem)
-	// cond: config.PtrSize == 8
-	// result: (Select1 (XCHGQ <MakeTuple(config.Frontend().TypeBytePtr(),TypeMem)> val ptr mem))
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(config.PtrSize == 8) {
-			break
-		}
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpAMD64XCHGQ, MakeTuple(config.Frontend().TypeBytePtr(), TypeMem))
-		v0.AddArg(val)
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (AtomicStorePtrNoWB ptr val mem)
-	// cond: config.PtrSize == 4
-	// result: (Select1 (XCHGL <MakeTuple(config.Frontend().TypeBytePtr(),TypeMem)> val ptr mem))
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(config.PtrSize == 4) {
-			break
-		}
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpAMD64XCHGL, MakeTuple(config.Frontend().TypeBytePtr(), TypeMem))
-		v0.AddArg(val)
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpAvg64u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Avg64u x y)
-	// cond:
-	// result: (AVGQU x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64AVGQU)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpBswap32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Bswap32 x)
-	// cond:
-	// result: (BSWAPL x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64BSWAPL)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpBswap64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Bswap64 x)
-	// cond:
-	// result: (BSWAPQ x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64BSWAPQ)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpClosureCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ClosureCall [argwid] entry closure mem)
-	// cond:
-	// result: (CALLclosure [argwid] entry closure mem)
-	for {
-		argwid := v.AuxInt
-		entry := v.Args[0]
-		closure := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpAMD64CALLclosure)
-		v.AuxInt = argwid
-		v.AddArg(entry)
-		v.AddArg(closure)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueAMD64_OpCom16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com16 x)
-	// cond:
-	// result: (NOTL x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64NOTL)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpCom32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com32 x)
-	// cond:
-	// result: (NOTL x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64NOTL)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpCom64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com64 x)
-	// cond:
-	// result: (NOTQ x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64NOTQ)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpCom8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com8  x)
-	// cond:
-	// result: (NOTL x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64NOTL)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpConst16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const16  [val])
-	// cond:
-	// result: (MOVLconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueAMD64_OpConst32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const32  [val])
-	// cond:
-	// result: (MOVLconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueAMD64_OpConst32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const32F [val])
-	// cond:
-	// result: (MOVSSconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpAMD64MOVSSconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueAMD64_OpConst64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const64  [val])
-	// cond:
-	// result: (MOVQconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpAMD64MOVQconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueAMD64_OpConst64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const64F [val])
-	// cond:
-	// result: (MOVSDconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpAMD64MOVSDconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueAMD64_OpConst8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const8   [val])
-	// cond:
-	// result: (MOVLconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueAMD64_OpConstBool(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ConstBool [b])
-	// cond:
-	// result: (MOVLconst [b])
-	for {
-		b := v.AuxInt
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = b
-		return true
-	}
-}
-func rewriteValueAMD64_OpConstNil(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ConstNil)
-	// cond: config.PtrSize == 8
-	// result: (MOVQconst [0])
-	for {
-		if !(config.PtrSize == 8) {
-			break
-		}
-		v.reset(OpAMD64MOVQconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (ConstNil)
-	// cond: config.PtrSize == 4
-	// result: (MOVLconst [0])
-	for {
-		if !(config.PtrSize == 4) {
-			break
-		}
-		v.reset(OpAMD64MOVLconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpConvert(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Convert <t> x mem)
-	// cond: config.PtrSize == 8
-	// result: (MOVQconvert <t> x mem)
-	for {
-		t := v.Type
-		x := v.Args[0]
-		mem := v.Args[1]
-		if !(config.PtrSize == 8) {
-			break
-		}
-		v.reset(OpAMD64MOVQconvert)
-		v.Type = t
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Convert <t> x mem)
-	// cond: config.PtrSize == 4
-	// result: (MOVLconvert <t> x mem)
-	for {
-		t := v.Type
-		x := v.Args[0]
-		mem := v.Args[1]
-		if !(config.PtrSize == 4) {
-			break
-		}
-		v.reset(OpAMD64MOVLconvert)
-		v.Type = t
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpCtz32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Ctz32 <t> x)
-	// cond:
-	// result: (CMOVLEQ (Select0 <t> (BSFL x)) (MOVLconst <t> [32]) (Select1 <TypeFlags> (BSFL x)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v.reset(OpAMD64CMOVLEQ)
-		v0 := b.NewValue0(v.Line, OpSelect0, t)
-		v1 := b.NewValue0(v.Line, OpAMD64BSFL, MakeTuple(config.fe.TypeUInt32(), TypeFlags))
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpAMD64MOVLconst, t)
-		v2.AuxInt = 32
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpSelect1, TypeFlags)
-		v4 := b.NewValue0(v.Line, OpAMD64BSFL, MakeTuple(config.fe.TypeUInt32(), TypeFlags))
-		v4.AddArg(x)
-		v3.AddArg(v4)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueAMD64_OpCtz64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Ctz64 <t> x)
-	// cond:
-	// result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <TypeFlags> (BSFQ x)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v.reset(OpAMD64CMOVQEQ)
-		v0 := b.NewValue0(v.Line, OpSelect0, t)
-		v1 := b.NewValue0(v.Line, OpAMD64BSFQ, MakeTuple(config.fe.TypeUInt64(), TypeFlags))
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpAMD64MOVQconst, t)
-		v2.AuxInt = 64
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpSelect1, TypeFlags)
-		v4 := b.NewValue0(v.Line, OpAMD64BSFQ, MakeTuple(config.fe.TypeUInt64(), TypeFlags))
-		v4.AddArg(x)
-		v3.AddArg(v4)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueAMD64_OpCvt32Fto32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32Fto32 x)
-	// cond:
-	// result: (CVTTSS2SL x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64CVTTSS2SL)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpCvt32Fto64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32Fto64 x)
-	// cond:
-	// result: (CVTTSS2SQ x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64CVTTSS2SQ)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpCvt32Fto64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32Fto64F x)
-	// cond:
-	// result: (CVTSS2SD x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64CVTSS2SD)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpCvt32to32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32to32F x)
-	// cond:
-	// result: (CVTSL2SS x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64CVTSL2SS)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpCvt32to64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32to64F x)
-	// cond:
-	// result: (CVTSL2SD x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64CVTSL2SD)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpCvt64Fto32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64Fto32 x)
-	// cond:
-	// result: (CVTTSD2SL x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64CVTTSD2SL)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpCvt64Fto32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64Fto32F x)
-	// cond:
-	// result: (CVTSD2SS x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64CVTSD2SS)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpCvt64Fto64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64Fto64 x)
-	// cond:
-	// result: (CVTTSD2SQ x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64CVTTSD2SQ)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpCvt64to32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64to32F x)
-	// cond:
-	// result: (CVTSQ2SS x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64CVTSQ2SS)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpCvt64to64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64to64F x)
-	// cond:
-	// result: (CVTSQ2SD x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64CVTSQ2SD)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpDeferCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (DeferCall [argwid] mem)
-	// cond:
-	// result: (CALLdefer [argwid] mem)
-	for {
-		argwid := v.AuxInt
-		mem := v.Args[0]
-		v.reset(OpAMD64CALLdefer)
-		v.AuxInt = argwid
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueAMD64_OpDiv128u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div128u xhi xlo y)
-	// cond:
-	// result: (DIVQU2 xhi xlo y)
-	for {
-		xhi := v.Args[0]
-		xlo := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpAMD64DIVQU2)
-		v.AddArg(xhi)
-		v.AddArg(xlo)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpDiv16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div16  x y)
-	// cond:
-	// result: (Select0 (DIVW  x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect0)
-		v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpDiv16u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div16u x y)
-	// cond:
-	// result: (Select0 (DIVWU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect0)
-		v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpDiv32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div32  x y)
-	// cond:
-	// result: (Select0 (DIVL  x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect0)
-		v0 := b.NewValue0(v.Line, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpDiv32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div32F x y)
-	// cond:
-	// result: (DIVSS x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64DIVSS)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpDiv32u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div32u x y)
-	// cond:
-	// result: (Select0 (DIVLU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect0)
-		v0 := b.NewValue0(v.Line, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpDiv64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div64  x y)
-	// cond:
-	// result: (Select0 (DIVQ  x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect0)
-		v0 := b.NewValue0(v.Line, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpDiv64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div64F x y)
-	// cond:
-	// result: (DIVSD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64DIVSD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpDiv64u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div64u x y)
-	// cond:
-	// result: (Select0 (DIVQU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect0)
-		v0 := b.NewValue0(v.Line, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpDiv8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div8   x y)
-	// cond:
-	// result: (Select0 (DIVW  (SignExt8to16 x) (SignExt8to16 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect0)
-		v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
-		v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpDiv8u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div8u  x y)
-	// cond:
-	// result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect0)
-		v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
-		v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpEq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq16  x y)
-	// cond:
-	// result: (SETEQ (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETEQ)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpEq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq32  x y)
-	// cond:
-	// result: (SETEQ (CMPL x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETEQ)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpEq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq32F x y)
-	// cond:
-	// result: (SETEQF (UCOMISS x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETEQF)
-		v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpEq64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq64  x y)
-	// cond:
-	// result: (SETEQ (CMPQ x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETEQ)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpEq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq64F x y)
-	// cond:
-	// result: (SETEQF (UCOMISD x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETEQF)
-		v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpEq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq8   x y)
-	// cond:
-	// result: (SETEQ (CMPB x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETEQ)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpEqB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (EqB   x y)
-	// cond:
-	// result: (SETEQ (CMPB x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETEQ)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpEqPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (EqPtr x y)
-	// cond: config.PtrSize == 8
-	// result: (SETEQ (CMPQ x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		if !(config.PtrSize == 8) {
-			break
-		}
-		v.reset(OpAMD64SETEQ)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (EqPtr x y)
-	// cond: config.PtrSize == 4
-	// result: (SETEQ (CMPL x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		if !(config.PtrSize == 4) {
-			break
-		}
-		v.reset(OpAMD64SETEQ)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpGeq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq16  x y)
-	// cond:
-	// result: (SETGE (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETGE)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpGeq16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq16U x y)
-	// cond:
-	// result: (SETAE (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETAE)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpGeq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq32  x y)
-	// cond:
-	// result: (SETGE (CMPL x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETGE)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpGeq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq32F x y)
-	// cond:
-	// result: (SETGEF (UCOMISS x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETGEF)
-		v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpGeq32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq32U x y)
-	// cond:
-	// result: (SETAE (CMPL x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETAE)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpGeq64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq64  x y)
-	// cond:
-	// result: (SETGE (CMPQ x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETGE)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpGeq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq64F x y)
-	// cond:
-	// result: (SETGEF (UCOMISD x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETGEF)
-		v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpGeq64U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq64U x y)
-	// cond:
-	// result: (SETAE (CMPQ x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETAE)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpGeq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq8   x y)
-	// cond:
-	// result: (SETGE (CMPB x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETGE)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpGeq8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq8U  x y)
-	// cond:
-	// result: (SETAE (CMPB x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETAE)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpGetClosurePtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (GetClosurePtr)
-	// cond:
-	// result: (LoweredGetClosurePtr)
-	for {
-		v.reset(OpAMD64LoweredGetClosurePtr)
-		return true
-	}
-}
-func rewriteValueAMD64_OpGetG(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (GetG mem)
-	// cond:
-	// result: (LoweredGetG mem)
-	for {
-		mem := v.Args[0]
-		v.reset(OpAMD64LoweredGetG)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueAMD64_OpGoCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (GoCall [argwid] mem)
-	// cond:
-	// result: (CALLgo [argwid] mem)
-	for {
-		argwid := v.AuxInt
-		mem := v.Args[0]
-		v.reset(OpAMD64CALLgo)
-		v.AuxInt = argwid
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueAMD64_OpGreater16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater16  x y)
-	// cond:
-	// result: (SETG (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETG)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpGreater16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater16U x y)
-	// cond:
-	// result: (SETA (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETA)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpGreater32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater32  x y)
-	// cond:
-	// result: (SETG (CMPL x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETG)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpGreater32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater32F x y)
-	// cond:
-	// result: (SETGF (UCOMISS x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETGF)
-		v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpGreater32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater32U x y)
-	// cond:
-	// result: (SETA (CMPL x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETA)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpGreater64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater64  x y)
-	// cond:
-	// result: (SETG (CMPQ x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETG)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpGreater64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater64F x y)
-	// cond:
-	// result: (SETGF (UCOMISD x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETGF)
-		v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpGreater64U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater64U x y)
-	// cond:
-	// result: (SETA (CMPQ x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETA)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpGreater8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater8   x y)
-	// cond:
-	// result: (SETG (CMPB x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETG)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpGreater8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater8U  x y)
-	// cond:
-	// result: (SETA (CMPB x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETA)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpHmul16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul16  x y)
-	// cond:
-	// result: (HMULW  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64HMULW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpHmul16u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul16u x y)
-	// cond:
-	// result: (HMULWU x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64HMULWU)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpHmul32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul32  x y)
-	// cond:
-	// result: (HMULL  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64HMULL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpHmul32u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul32u x y)
-	// cond:
-	// result: (HMULLU x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64HMULLU)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpHmul64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul64  x y)
-	// cond:
-	// result: (HMULQ  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64HMULQ)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpHmul64u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul64u x y)
-	// cond:
-	// result: (HMULQU x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64HMULQU)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpHmul8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul8   x y)
-	// cond:
-	// result: (HMULB  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64HMULB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpHmul8u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul8u  x y)
-	// cond:
-	// result: (HMULBU x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64HMULBU)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpInt64Hi(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Int64Hi x)
-	// cond:
-	// result: (SHRQconst [32] x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64SHRQconst)
-		v.AuxInt = 32
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpInterCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (InterCall [argwid] entry mem)
-	// cond:
-	// result: (CALLinter [argwid] entry mem)
-	for {
-		argwid := v.AuxInt
-		entry := v.Args[0]
-		mem := v.Args[1]
-		v.reset(OpAMD64CALLinter)
-		v.AuxInt = argwid
-		v.AddArg(entry)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueAMD64_OpIsInBounds(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (IsInBounds idx len)
-	// cond:
-	// result: (SETB (CMPQ idx len))
-	for {
-		idx := v.Args[0]
-		len := v.Args[1]
-		v.reset(OpAMD64SETB)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
-		v0.AddArg(idx)
-		v0.AddArg(len)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpIsNonNil(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (IsNonNil p)
-	// cond: config.PtrSize == 8
-	// result: (SETNE (TESTQ p p))
-	for {
-		p := v.Args[0]
-		if !(config.PtrSize == 8) {
-			break
-		}
-		v.reset(OpAMD64SETNE)
-		v0 := b.NewValue0(v.Line, OpAMD64TESTQ, TypeFlags)
-		v0.AddArg(p)
-		v0.AddArg(p)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (IsNonNil p)
-	// cond: config.PtrSize == 4
-	// result: (SETNE (TESTL p p))
-	for {
-		p := v.Args[0]
-		if !(config.PtrSize == 4) {
-			break
-		}
-		v.reset(OpAMD64SETNE)
-		v0 := b.NewValue0(v.Line, OpAMD64TESTL, TypeFlags)
-		v0.AddArg(p)
-		v0.AddArg(p)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpIsSliceInBounds(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (IsSliceInBounds idx len)
-	// cond:
-	// result: (SETBE (CMPQ idx len))
-	for {
-		idx := v.Args[0]
-		len := v.Args[1]
-		v.reset(OpAMD64SETBE)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
-		v0.AddArg(idx)
-		v0.AddArg(len)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLeq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq16  x y)
-	// cond:
-	// result: (SETLE (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETLE)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLeq16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq16U x y)
-	// cond:
-	// result: (SETBE (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETBE)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLeq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq32  x y)
-	// cond:
-	// result: (SETLE (CMPL x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETLE)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLeq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq32F x y)
-	// cond:
-	// result: (SETGEF (UCOMISS y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETGEF)
-		v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
-		v0.AddArg(y)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLeq32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq32U x y)
-	// cond:
-	// result: (SETBE (CMPL x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETBE)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLeq64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq64  x y)
-	// cond:
-	// result: (SETLE (CMPQ x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETLE)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLeq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq64F x y)
-	// cond:
-	// result: (SETGEF (UCOMISD y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETGEF)
-		v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
-		v0.AddArg(y)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLeq64U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq64U x y)
-	// cond:
-	// result: (SETBE (CMPQ x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETBE)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLeq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq8   x y)
-	// cond:
-	// result: (SETLE (CMPB x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETLE)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLeq8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq8U  x y)
-	// cond:
-	// result: (SETBE (CMPB x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETBE)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLess16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less16  x y)
-	// cond:
-	// result: (SETL (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETL)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLess16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less16U x y)
-	// cond:
-	// result: (SETB (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETB)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLess32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less32  x y)
-	// cond:
-	// result: (SETL (CMPL x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETL)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLess32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less32F x y)
-	// cond:
-	// result: (SETGF (UCOMISS y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETGF)
-		v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
-		v0.AddArg(y)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLess32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less32U x y)
-	// cond:
-	// result: (SETB (CMPL x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETB)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLess64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less64  x y)
-	// cond:
-	// result: (SETL (CMPQ x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETL)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLess64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less64F x y)
-	// cond:
-	// result: (SETGF (UCOMISD y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETGF)
-		v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
-		v0.AddArg(y)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLess64U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less64U x y)
-	// cond:
-	// result: (SETB (CMPQ x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETB)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLess8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less8   x y)
-	// cond:
-	// result: (SETL (CMPB x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETL)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLess8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less8U  x y)
-	// cond:
-	// result: (SETB (CMPB x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETB)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLoad(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Load <t> ptr mem)
-	// cond: (is64BitInt(t) || isPtr(t) && config.PtrSize == 8)
-	// result: (MOVQload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is64BitInt(t) || isPtr(t) && config.PtrSize == 8) {
-			break
-		}
-		v.reset(OpAMD64MOVQload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: (is32BitInt(t) || isPtr(t) && config.PtrSize == 4)
-	// result: (MOVLload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is32BitInt(t) || isPtr(t) && config.PtrSize == 4) {
-			break
-		}
-		v.reset(OpAMD64MOVLload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: is16BitInt(t)
-	// result: (MOVWload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is16BitInt(t)) {
-			break
-		}
-		v.reset(OpAMD64MOVWload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: (t.IsBoolean() || is8BitInt(t))
-	// result: (MOVBload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(t.IsBoolean() || is8BitInt(t)) {
-			break
-		}
-		v.reset(OpAMD64MOVBload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: is32BitFloat(t)
-	// result: (MOVSSload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is32BitFloat(t)) {
-			break
-		}
-		v.reset(OpAMD64MOVSSload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: is64BitFloat(t)
-	// result: (MOVSDload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is64BitFloat(t)) {
-			break
-		}
-		v.reset(OpAMD64MOVSDload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpLrot16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lrot16 <t> x [c])
-	// cond:
-	// result: (ROLWconst <t> [c&15] x)
-	for {
-		t := v.Type
-		c := v.AuxInt
-		x := v.Args[0]
-		v.reset(OpAMD64ROLWconst)
-		v.Type = t
-		v.AuxInt = c & 15
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLrot32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lrot32 <t> x [c])
-	// cond:
-	// result: (ROLLconst <t> [c&31] x)
-	for {
-		t := v.Type
-		c := v.AuxInt
-		x := v.Args[0]
-		v.reset(OpAMD64ROLLconst)
-		v.Type = t
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLrot64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lrot64 <t> x [c])
-	// cond:
-	// result: (ROLQconst <t> [c&63] x)
-	for {
-		t := v.Type
-		c := v.AuxInt
-		x := v.Args[0]
-		v.reset(OpAMD64ROLQconst)
-		v.Type = t
-		v.AuxInt = c & 63
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLrot8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lrot8  <t> x [c])
-	// cond:
-	// result: (ROLBconst <t> [c&7] x)
-	for {
-		t := v.Type
-		c := v.AuxInt
-		x := v.Args[0]
-		v.reset(OpAMD64ROLBconst)
-		v.Type = t
-		v.AuxInt = c & 7
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLsh16x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x16 <t> x y)
-	// cond:
-	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDL)
-		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLsh16x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x32 <t> x y)
-	// cond:
-	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDL)
-		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLsh16x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x64 <t> x y)
-	// cond:
-	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDL)
-		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLsh16x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x8  <t> x y)
-	// cond:
-	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDL)
-		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLsh32x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x16 <t> x y)
-	// cond:
-	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDL)
-		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLsh32x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x32 <t> x y)
-	// cond:
-	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDL)
-		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLsh32x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x64 <t> x y)
-	// cond:
-	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDL)
-		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLsh32x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x8  <t> x y)
-	// cond:
-	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDL)
-		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLsh64x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh64x16 <t> x y)
-	// cond:
-	// result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDQ)
-		v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
-		v2.AuxInt = 64
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLsh64x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh64x32 <t> x y)
-	// cond:
-	// result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDQ)
-		v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
-		v2.AuxInt = 64
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLsh64x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh64x64 <t> x y)
-	// cond:
-	// result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDQ)
-		v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
-		v2.AuxInt = 64
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLsh64x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh64x8  <t> x y)
-	// cond:
-	// result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDQ)
-		v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
-		v2.AuxInt = 64
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLsh8x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x16 <t> x y)
-	// cond:
-	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDL)
-		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLsh8x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x32 <t> x y)
-	// cond:
-	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDL)
-		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLsh8x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x64 <t> x y)
-	// cond:
-	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDL)
-		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpLsh8x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x8  <t> x y)
-	// cond:
-	// result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDL)
-		v0 := b.NewValue0(v.Line, OpAMD64SHLL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpMod16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod16  x y)
-	// cond:
-	// result: (Select1 (DIVW  x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpMod16u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod16u x y)
-	// cond:
-	// result: (Select1 (DIVWU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpMod32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod32  x y)
-	// cond:
-	// result: (Select1 (DIVL  x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpAMD64DIVL, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpMod32u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod32u x y)
-	// cond:
-	// result: (Select1 (DIVLU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpAMD64DIVLU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpMod64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod64  x y)
-	// cond:
-	// result: (Select1 (DIVQ  x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpAMD64DIVQ, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpMod64u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod64u x y)
-	// cond:
-	// result: (Select1 (DIVQU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpAMD64DIVQU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpMod8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod8   x y)
-	// cond:
-	// result: (Select1 (DIVW  (SignExt8to16 x) (SignExt8to16 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpAMD64DIVW, MakeTuple(config.fe.TypeInt16(), config.fe.TypeInt16()))
-		v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpMod8u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod8u  x y)
-	// cond:
-	// result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpAMD64DIVWU, MakeTuple(config.fe.TypeUInt16(), config.fe.TypeUInt16()))
-		v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpMove(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Move [s] _ _ mem)
-	// cond: SizeAndAlign(s).Size() == 0
-	// result: mem
-	for {
-		s := v.AuxInt
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 0) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = mem.Type
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 1
-	// result: (MOVBstore dst (MOVBload src mem) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 1) {
-			break
-		}
-		v.reset(OpAMD64MOVBstore)
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 2
-	// result: (MOVWstore dst (MOVWload src mem) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 2) {
-			break
-		}
-		v.reset(OpAMD64MOVWstore)
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 4
-	// result: (MOVLstore dst (MOVLload src mem) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 4) {
-			break
-		}
-		v.reset(OpAMD64MOVLstore)
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 8
-	// result: (MOVQstore dst (MOVQload src mem) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 8) {
-			break
-		}
-		v.reset(OpAMD64MOVQstore)
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 16
-	// result: (MOVOstore dst (MOVOload src mem) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 16) {
-			break
-		}
-		v.reset(OpAMD64MOVOstore)
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128)
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 3
-	// result: (MOVBstore [2] dst (MOVBload [2] src mem) 		(MOVWstore dst (MOVWload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 3) {
-			break
-		}
-		v.reset(OpAMD64MOVBstore)
-		v.AuxInt = 2
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
-		v0.AuxInt = 2
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64MOVWstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 5
-	// result: (MOVBstore [4] dst (MOVBload [4] src mem) 		(MOVLstore dst (MOVLload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 5) {
-			break
-		}
-		v.reset(OpAMD64MOVBstore)
-		v.AuxInt = 4
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8())
-		v0.AuxInt = 4
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 6
-	// result: (MOVWstore [4] dst (MOVWload [4] src mem) 		(MOVLstore dst (MOVLload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 6) {
-			break
-		}
-		v.reset(OpAMD64MOVWstore)
-		v.AuxInt = 4
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16())
-		v0.AuxInt = 4
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 7
-	// result: (MOVLstore [3] dst (MOVLload [3] src mem) 		(MOVLstore dst (MOVLload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 7) {
-			break
-		}
-		v.reset(OpAMD64MOVLstore)
-		v.AuxInt = 3
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
-		v0.AuxInt = 3
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16
-	// result: (MOVQstore [SizeAndAlign(s).Size()-8] dst (MOVQload [SizeAndAlign(s).Size()-8] src mem) 		(MOVQstore dst (MOVQload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() > 8 && SizeAndAlign(s).Size() < 16) {
-			break
-		}
-		v.reset(OpAMD64MOVQstore)
-		v.AuxInt = SizeAndAlign(s).Size() - 8
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
-		v0.AuxInt = SizeAndAlign(s).Size() - 8
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8
-	// result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] 		(OffPtr <dst.Type> dst [SizeAndAlign(s).Size()%16]) 		(OffPtr <src.Type> src [SizeAndAlign(s).Size()%16]) 		(MOVQstore dst (MOVQload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 <= 8) {
-			break
-		}
-		v.reset(OpMove)
-		v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16
-		v0 := b.NewValue0(v.Line, OpOffPtr, dst.Type)
-		v0.AuxInt = SizeAndAlign(s).Size() % 16
-		v0.AddArg(dst)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpOffPtr, src.Type)
-		v1.AuxInt = SizeAndAlign(s).Size() % 16
-		v1.AddArg(src)
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem)
-		v2.AddArg(dst)
-		v3 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64())
-		v3.AddArg(src)
-		v3.AddArg(mem)
-		v2.AddArg(v3)
-		v2.AddArg(mem)
-		v.AddArg(v2)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8
-	// result: (Move [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%16] 		(OffPtr <dst.Type> dst [SizeAndAlign(s).Size()%16]) 		(OffPtr <src.Type> src [SizeAndAlign(s).Size()%16]) 		(MOVOstore dst (MOVOload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() > 16 && SizeAndAlign(s).Size()%16 != 0 && SizeAndAlign(s).Size()%16 > 8) {
-			break
-		}
-		v.reset(OpMove)
-		v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%16
-		v0 := b.NewValue0(v.Line, OpOffPtr, dst.Type)
-		v0.AuxInt = SizeAndAlign(s).Size() % 16
-		v0.AddArg(dst)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpOffPtr, src.Type)
-		v1.AuxInt = SizeAndAlign(s).Size() % 16
-		v1.AddArg(src)
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpAMD64MOVOstore, TypeMem)
-		v2.AddArg(dst)
-		v3 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128)
-		v3.AddArg(src)
-		v3.AddArg(mem)
-		v2.AddArg(v3)
-		v2.AddArg(mem)
-		v.AddArg(v2)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0 	&& !config.noDuffDevice
-	// result: (DUFFCOPY [14*(64-SizeAndAlign(s).Size()/16)] dst src mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() >= 32 && SizeAndAlign(s).Size() <= 16*64 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice) {
-			break
-		}
-		v.reset(OpAMD64DUFFCOPY)
-		v.AuxInt = 14 * (64 - SizeAndAlign(s).Size()/16)
-		v.AddArg(dst)
-		v.AddArg(src)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: (SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0
-	// result: (REPMOVSQ dst src (MOVQconst [SizeAndAlign(s).Size()/8]) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !((SizeAndAlign(s).Size() > 16*64 || config.noDuffDevice) && SizeAndAlign(s).Size()%8 == 0) {
-			break
-		}
-		v.reset(OpAMD64REPMOVSQ)
-		v.AddArg(dst)
-		v.AddArg(src)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
-		v0.AuxInt = SizeAndAlign(s).Size() / 8
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpMul16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul16  x y)
-	// cond:
-	// result: (MULL  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64MULL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpMul32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul32  x y)
-	// cond:
-	// result: (MULL  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64MULL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpMul32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul32F x y)
-	// cond:
-	// result: (MULSS x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64MULSS)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpMul64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul64  x y)
-	// cond:
-	// result: (MULQ  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64MULQ)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpMul64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul64F x y)
-	// cond:
-	// result: (MULSD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64MULSD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpMul64uhilo(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul64uhilo x y)
-	// cond:
-	// result: (MULQU2 x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64MULQU2)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpMul8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul8   x y)
-	// cond:
-	// result: (MULL  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64MULL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpNeg16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg16  x)
-	// cond:
-	// result: (NEGL x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64NEGL)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpNeg32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg32  x)
-	// cond:
-	// result: (NEGL x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64NEGL)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpNeg32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg32F x)
-	// cond:
-	// result: (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))]))
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64PXOR)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVSSconst, config.Frontend().TypeFloat32())
-		v0.AuxInt = f2i(math.Copysign(0, -1))
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpNeg64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg64  x)
-	// cond:
-	// result: (NEGQ x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64NEGQ)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpNeg64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg64F x)
-	// cond:
-	// result: (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))]))
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64PXOR)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVSDconst, config.Frontend().TypeFloat64())
-		v0.AuxInt = f2i(math.Copysign(0, -1))
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpNeg8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg8   x)
-	// cond:
-	// result: (NEGL x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64NEGL)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpNeq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq16  x y)
-	// cond:
-	// result: (SETNE (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETNE)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpNeq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq32  x y)
-	// cond:
-	// result: (SETNE (CMPL x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETNE)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpNeq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq32F x y)
-	// cond:
-	// result: (SETNEF (UCOMISS x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETNEF)
-		v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpNeq64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq64  x y)
-	// cond:
-	// result: (SETNE (CMPQ x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETNE)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpNeq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq64F x y)
-	// cond:
-	// result: (SETNEF (UCOMISD x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETNEF)
-		v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpNeq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq8   x y)
-	// cond:
-	// result: (SETNE (CMPB x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETNE)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpNeqB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NeqB   x y)
-	// cond:
-	// result: (SETNE (CMPB x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SETNE)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpNeqPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NeqPtr x y)
-	// cond: config.PtrSize == 8
-	// result: (SETNE (CMPQ x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		if !(config.PtrSize == 8) {
-			break
-		}
-		v.reset(OpAMD64SETNE)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (NeqPtr x y)
-	// cond: config.PtrSize == 4
-	// result: (SETNE (CMPL x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		if !(config.PtrSize == 4) {
-			break
-		}
-		v.reset(OpAMD64SETNE)
-		v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpNilCheck(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NilCheck ptr mem)
-	// cond:
-	// result: (LoweredNilCheck ptr mem)
-	for {
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		v.reset(OpAMD64LoweredNilCheck)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueAMD64_OpNot(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Not x)
-	// cond:
-	// result: (XORLconst [1] x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64XORLconst)
-		v.AuxInt = 1
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpOffPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (OffPtr [off] ptr)
-	// cond: config.PtrSize == 8 && is32Bit(off)
-	// result: (ADDQconst [off] ptr)
-	for {
-		off := v.AuxInt
-		ptr := v.Args[0]
-		if !(config.PtrSize == 8 && is32Bit(off)) {
-			break
-		}
-		v.reset(OpAMD64ADDQconst)
-		v.AuxInt = off
-		v.AddArg(ptr)
-		return true
-	}
-	// match: (OffPtr [off] ptr)
-	// cond: config.PtrSize == 8
-	// result: (ADDQ (MOVQconst [off]) ptr)
-	for {
-		off := v.AuxInt
-		ptr := v.Args[0]
-		if !(config.PtrSize == 8) {
-			break
-		}
-		v.reset(OpAMD64ADDQ)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
-		v0.AuxInt = off
-		v.AddArg(v0)
-		v.AddArg(ptr)
-		return true
-	}
-	// match: (OffPtr [off] ptr)
-	// cond: config.PtrSize == 4
-	// result: (ADDLconst [off] ptr)
-	for {
-		off := v.AuxInt
-		ptr := v.Args[0]
-		if !(config.PtrSize == 4) {
-			break
-		}
-		v.reset(OpAMD64ADDLconst)
-		v.AuxInt = off
-		v.AddArg(ptr)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpOr16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or16 x y)
-	// cond:
-	// result: (ORL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ORL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpOr32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or32 x y)
-	// cond:
-	// result: (ORL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ORL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpOr64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or64 x y)
-	// cond:
-	// result: (ORQ x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ORQ)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpOr8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or8  x y)
-	// cond:
-	// result: (ORL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ORL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpOrB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (OrB x y)
-	// cond:
-	// result: (ORL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ORL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh16Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux16 <t> x y)
-	// cond:
-	// result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDL)
-		v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
-		v2.AuxInt = 16
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh16Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux32 <t> x y)
-	// cond:
-	// result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDL)
-		v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
-		v2.AuxInt = 16
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh16Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux64 <t> x y)
-	// cond:
-	// result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDL)
-		v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
-		v2.AuxInt = 16
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh16Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux8  <t> x y)
-	// cond:
-	// result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDL)
-		v0 := b.NewValue0(v.Line, OpAMD64SHRW, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
-		v2.AuxInt = 16
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh16x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x16 <t> x y)
-	// cond:
-	// result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SARW)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
-		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
-		v3.AuxInt = 16
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh16x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x32 <t> x y)
-	// cond:
-	// result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SARW)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
-		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
-		v3.AuxInt = 16
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh16x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x64 <t> x y)
-	// cond:
-	// result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SARW)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
-		v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
-		v3.AuxInt = 16
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh16x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x8  <t> x y)
-	// cond:
-	// result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SARW)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
-		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
-		v3.AuxInt = 16
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh32Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux16 <t> x y)
-	// cond:
-	// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDL)
-		v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh32Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux32 <t> x y)
-	// cond:
-	// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDL)
-		v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh32Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux64 <t> x y)
-	// cond:
-	// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDL)
-		v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh32Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux8  <t> x y)
-	// cond:
-	// result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDL)
-		v0 := b.NewValue0(v.Line, OpAMD64SHRL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh32x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x16 <t> x y)
-	// cond:
-	// result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SARL)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
-		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
-		v3.AuxInt = 32
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh32x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x32 <t> x y)
-	// cond:
-	// result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SARL)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
-		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
-		v3.AuxInt = 32
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh32x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x64 <t> x y)
-	// cond:
-	// result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SARL)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
-		v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
-		v3.AuxInt = 32
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh32x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x8  <t> x y)
-	// cond:
-	// result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SARL)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
-		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
-		v3.AuxInt = 32
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh64Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64Ux16 <t> x y)
-	// cond:
-	// result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDQ)
-		v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
-		v2.AuxInt = 64
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh64Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64Ux32 <t> x y)
-	// cond:
-	// result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDQ)
-		v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
-		v2.AuxInt = 64
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh64Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64Ux64 <t> x y)
-	// cond:
-	// result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDQ)
-		v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
-		v2.AuxInt = 64
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh64Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64Ux8  <t> x y)
-	// cond:
-	// result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDQ)
-		v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
-		v2.AuxInt = 64
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh64x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64x16 <t> x y)
-	// cond:
-	// result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SARQ)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
-		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
-		v3.AuxInt = 64
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh64x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64x32 <t> x y)
-	// cond:
-	// result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SARQ)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
-		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
-		v3.AuxInt = 64
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh64x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64x64 <t> x y)
-	// cond:
-	// result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SARQ)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
-		v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
-		v3.AuxInt = 64
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh64x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64x8  <t> x y)
-	// cond:
-	// result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SARQ)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
-		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
-		v3.AuxInt = 64
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh8Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux16 <t> x y)
-	// cond:
-	// result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDL)
-		v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
-		v2.AuxInt = 8
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh8Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux32 <t> x y)
-	// cond:
-	// result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDL)
-		v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
-		v2.AuxInt = 8
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh8Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux64 <t> x y)
-	// cond:
-	// result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDL)
-		v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
-		v2.AuxInt = 8
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh8Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux8  <t> x y)
-	// cond:
-	// result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64ANDL)
-		v0 := b.NewValue0(v.Line, OpAMD64SHRB, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
-		v2.AuxInt = 8
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh8x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x16 <t> x y)
-	// cond:
-	// result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SARB)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
-		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags)
-		v3.AuxInt = 8
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh8x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x32 <t> x y)
-	// cond:
-	// result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SARB)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
-		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags)
-		v3.AuxInt = 8
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh8x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x64 <t> x y)
-	// cond:
-	// result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SARB)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type)
-		v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags)
-		v3.AuxInt = 8
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpRsh8x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x8  <t> x y)
-	// cond:
-	// result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SARB)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type)
-		v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags)
-		v3.AuxInt = 8
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpSelect0(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Select0 <t> (AddTupleFirst32 tuple val))
-	// cond:
-	// result: (ADDL val (Select0 <t> tuple))
-	for {
-		t := v.Type
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64AddTupleFirst32 {
-			break
-		}
-		tuple := v_0.Args[0]
-		val := v_0.Args[1]
-		v.reset(OpAMD64ADDL)
-		v.AddArg(val)
-		v0 := b.NewValue0(v.Line, OpSelect0, t)
-		v0.AddArg(tuple)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Select0 <t> (AddTupleFirst64 tuple val))
-	// cond:
-	// result: (ADDQ val (Select0 <t> tuple))
-	for {
-		t := v.Type
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64AddTupleFirst64 {
-			break
-		}
-		tuple := v_0.Args[0]
-		val := v_0.Args[1]
-		v.reset(OpAMD64ADDQ)
-		v.AddArg(val)
-		v0 := b.NewValue0(v.Line, OpSelect0, t)
-		v0.AddArg(tuple)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpSelect1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Select1     (AddTupleFirst32 tuple _  ))
-	// cond:
-	// result: (Select1 tuple)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64AddTupleFirst32 {
-			break
-		}
-		tuple := v_0.Args[0]
-		v.reset(OpSelect1)
-		v.AddArg(tuple)
-		return true
-	}
-	// match: (Select1     (AddTupleFirst64 tuple _  ))
-	// cond:
-	// result: (Select1 tuple)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAMD64AddTupleFirst64 {
-			break
-		}
-		tuple := v_0.Args[0]
-		v.reset(OpSelect1)
-		v.AddArg(tuple)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpSignExt16to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt16to32 x)
-	// cond:
-	// result: (MOVWQSX x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64MOVWQSX)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpSignExt16to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt16to64 x)
-	// cond:
-	// result: (MOVWQSX x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64MOVWQSX)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpSignExt32to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt32to64 x)
-	// cond:
-	// result: (MOVLQSX x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64MOVLQSX)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpSignExt8to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt8to16  x)
-	// cond:
-	// result: (MOVBQSX x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64MOVBQSX)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpSignExt8to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt8to32  x)
-	// cond:
-	// result: (MOVBQSX x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64MOVBQSX)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpSignExt8to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt8to64  x)
-	// cond:
-	// result: (MOVBQSX x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64MOVBQSX)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpSlicemask(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Slicemask <t> x)
-	// cond:
-	// result: (XORQconst [-1] (SARQconst <t> (SUBQconst <t> x [1]) [63]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v.reset(OpAMD64XORQconst)
-		v.AuxInt = -1
-		v0 := b.NewValue0(v.Line, OpAMD64SARQconst, t)
-		v0.AuxInt = 63
-		v1 := b.NewValue0(v.Line, OpAMD64SUBQconst, t)
-		v1.AuxInt = 1
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueAMD64_OpSqrt(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sqrt x)
-	// cond:
-	// result: (SQRTSD x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64SQRTSD)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpStaticCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (StaticCall [argwid] {target} mem)
-	// cond:
-	// result: (CALLstatic [argwid] {target} mem)
-	for {
-		argwid := v.AuxInt
-		target := v.Aux
-		mem := v.Args[0]
-		v.reset(OpAMD64CALLstatic)
-		v.AuxInt = argwid
-		v.Aux = target
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueAMD64_OpStore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Store [8] ptr val mem)
-	// cond: is64BitFloat(val.Type)
-	// result: (MOVSDstore ptr val mem)
-	for {
-		if v.AuxInt != 8 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is64BitFloat(val.Type)) {
-			break
-		}
-		v.reset(OpAMD64MOVSDstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [4] ptr val mem)
-	// cond: is32BitFloat(val.Type)
-	// result: (MOVSSstore ptr val mem)
-	for {
-		if v.AuxInt != 4 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32BitFloat(val.Type)) {
-			break
-		}
-		v.reset(OpAMD64MOVSSstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [8] ptr val mem)
-	// cond:
-	// result: (MOVQstore ptr val mem)
-	for {
-		if v.AuxInt != 8 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVQstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [4] ptr val mem)
-	// cond:
-	// result: (MOVLstore ptr val mem)
-	for {
-		if v.AuxInt != 4 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVLstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [2] ptr val mem)
-	// cond:
-	// result: (MOVWstore ptr val mem)
-	for {
-		if v.AuxInt != 2 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVWstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [1] ptr val mem)
-	// cond:
-	// result: (MOVBstore ptr val mem)
-	for {
-		if v.AuxInt != 1 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpAMD64MOVBstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpSub16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub16  x y)
-	// cond:
-	// result: (SUBL  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SUBL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpSub32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub32  x y)
-	// cond:
-	// result: (SUBL  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SUBL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpSub32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub32F x y)
-	// cond:
-	// result: (SUBSS x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SUBSS)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpSub64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub64  x y)
-	// cond:
-	// result: (SUBQ  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SUBQ)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpSub64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub64F x y)
-	// cond:
-	// result: (SUBSD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SUBSD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpSub8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub8   x y)
-	// cond:
-	// result: (SUBL  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64SUBL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpSubPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SubPtr x y)
-	// cond: config.PtrSize == 8
-	// result: (SUBQ x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		if !(config.PtrSize == 8) {
-			break
-		}
-		v.reset(OpAMD64SUBQ)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (SubPtr x y)
-	// cond: config.PtrSize == 4
-	// result: (SUBL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		if !(config.PtrSize == 4) {
-			break
-		}
-		v.reset(OpAMD64SUBL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpTrunc16to8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc16to8  x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpTrunc32to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc32to16 x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpTrunc32to8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc32to8  x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpTrunc64to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc64to16 x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpTrunc64to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc64to32 x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpTrunc64to8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc64to8  x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpXor16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor16 x y)
-	// cond:
-	// result: (XORL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64XORL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpXor32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor32 x y)
-	// cond:
-	// result: (XORL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64XORL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpXor64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor64 x y)
-	// cond:
-	// result: (XORQ x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64XORQ)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpXor8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor8  x y)
-	// cond:
-	// result: (XORL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAMD64XORL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueAMD64_OpZero(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Zero [s] _ mem)
-	// cond: SizeAndAlign(s).Size() == 0
-	// result: mem
-	for {
-		s := v.AuxInt
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 0) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = mem.Type
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 1
-	// result: (MOVBstoreconst [0] destptr mem)
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 1) {
-			break
-		}
-		v.reset(OpAMD64MOVBstoreconst)
-		v.AuxInt = 0
-		v.AddArg(destptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 2
-	// result: (MOVWstoreconst [0] destptr mem)
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 2) {
-			break
-		}
-		v.reset(OpAMD64MOVWstoreconst)
-		v.AuxInt = 0
-		v.AddArg(destptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 4
-	// result: (MOVLstoreconst [0] destptr mem)
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 4) {
-			break
-		}
-		v.reset(OpAMD64MOVLstoreconst)
-		v.AuxInt = 0
-		v.AddArg(destptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 8
-	// result: (MOVQstoreconst [0] destptr mem)
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 8) {
-			break
-		}
-		v.reset(OpAMD64MOVQstoreconst)
-		v.AuxInt = 0
-		v.AddArg(destptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 3
-	// result: (MOVBstoreconst [makeValAndOff(0,2)] destptr 		(MOVWstoreconst [0] destptr mem))
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 3) {
-			break
-		}
-		v.reset(OpAMD64MOVBstoreconst)
-		v.AuxInt = makeValAndOff(0, 2)
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVWstoreconst, TypeMem)
-		v0.AuxInt = 0
-		v0.AddArg(destptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 5
-	// result: (MOVBstoreconst [makeValAndOff(0,4)] destptr 		(MOVLstoreconst [0] destptr mem))
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 5) {
-			break
-		}
-		v.reset(OpAMD64MOVBstoreconst)
-		v.AuxInt = makeValAndOff(0, 4)
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVLstoreconst, TypeMem)
-		v0.AuxInt = 0
-		v0.AddArg(destptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 6
-	// result: (MOVWstoreconst [makeValAndOff(0,4)] destptr 		(MOVLstoreconst [0] destptr mem))
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 6) {
-			break
-		}
-		v.reset(OpAMD64MOVWstoreconst)
-		v.AuxInt = makeValAndOff(0, 4)
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVLstoreconst, TypeMem)
-		v0.AuxInt = 0
-		v0.AddArg(destptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 7
-	// result: (MOVLstoreconst [makeValAndOff(0,3)] destptr 		(MOVLstoreconst [0] destptr mem))
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 7) {
-			break
-		}
-		v.reset(OpAMD64MOVLstoreconst)
-		v.AuxInt = makeValAndOff(0, 3)
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVLstoreconst, TypeMem)
-		v0.AuxInt = 0
-		v0.AddArg(destptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size()%8 != 0 && SizeAndAlign(s).Size() > 8
-	// result: (Zero [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%8] (OffPtr <destptr.Type> destptr [SizeAndAlign(s).Size()%8]) 		(MOVQstoreconst [0] destptr mem))
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size()%8 != 0 && SizeAndAlign(s).Size() > 8) {
-			break
-		}
-		v.reset(OpZero)
-		v.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%8
-		v0 := b.NewValue0(v.Line, OpOffPtr, destptr.Type)
-		v0.AuxInt = SizeAndAlign(s).Size() % 8
-		v0.AddArg(destptr)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem)
-		v1.AuxInt = 0
-		v1.AddArg(destptr)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 16
-	// result: (MOVQstoreconst [makeValAndOff(0,8)] destptr 		(MOVQstoreconst [0] destptr mem))
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 16) {
-			break
-		}
-		v.reset(OpAMD64MOVQstoreconst)
-		v.AuxInt = makeValAndOff(0, 8)
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem)
-		v0.AuxInt = 0
-		v0.AddArg(destptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 24
-	// result: (MOVQstoreconst [makeValAndOff(0,16)] destptr 		(MOVQstoreconst [makeValAndOff(0,8)] destptr 			(MOVQstoreconst [0] destptr mem)))
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 24) {
-			break
-		}
-		v.reset(OpAMD64MOVQstoreconst)
-		v.AuxInt = makeValAndOff(0, 16)
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem)
-		v0.AuxInt = makeValAndOff(0, 8)
-		v0.AddArg(destptr)
-		v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem)
-		v1.AuxInt = 0
-		v1.AddArg(destptr)
-		v1.AddArg(mem)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 32
-	// result: (MOVQstoreconst [makeValAndOff(0,24)] destptr 		(MOVQstoreconst [makeValAndOff(0,16)] destptr 			(MOVQstoreconst [makeValAndOff(0,8)] destptr 				(MOVQstoreconst [0] destptr mem))))
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 32) {
-			break
-		}
-		v.reset(OpAMD64MOVQstoreconst)
-		v.AuxInt = makeValAndOff(0, 24)
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem)
-		v0.AuxInt = makeValAndOff(0, 16)
-		v0.AddArg(destptr)
-		v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem)
-		v1.AuxInt = makeValAndOff(0, 8)
-		v1.AddArg(destptr)
-		v2 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem)
-		v2.AuxInt = 0
-		v2.AddArg(destptr)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size()%16 != 0 	&& !config.noDuffDevice
-	// result: (Zero [SizeAndAlign(s).Size()-8] (OffPtr <destptr.Type> [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem))
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size()%16 != 0 && !config.noDuffDevice) {
-			break
-		}
-		v.reset(OpZero)
-		v.AuxInt = SizeAndAlign(s).Size() - 8
-		v0 := b.NewValue0(v.Line, OpOffPtr, destptr.Type)
-		v0.AuxInt = 8
-		v0.AddArg(destptr)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem)
-		v1.AddArg(destptr)
-		v2 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
-		v2.AuxInt = 0
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice
-	// result: (DUFFZERO [SizeAndAlign(s).Size()] destptr (MOVOconst [0]) mem)
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() <= 1024 && SizeAndAlign(s).Size()%16 == 0 && !config.noDuffDevice) {
-			break
-		}
-		v.reset(OpAMD64DUFFZERO)
-		v.AuxInt = SizeAndAlign(s).Size()
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVOconst, TypeInt128)
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: (SizeAndAlign(s).Size() > 1024 || (config.noDuffDevice && SizeAndAlign(s).Size() > 32)) 	&& SizeAndAlign(s).Size()%8 == 0
-	// result: (REPSTOSQ destptr (MOVQconst [SizeAndAlign(s).Size()/8]) (MOVQconst [0]) mem)
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !((SizeAndAlign(s).Size() > 1024 || (config.noDuffDevice && SizeAndAlign(s).Size() > 32)) && SizeAndAlign(s).Size()%8 == 0) {
-			break
-		}
-		v.reset(OpAMD64REPSTOSQ)
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
-		v0.AuxInt = SizeAndAlign(s).Size() / 8
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64())
-		v1.AuxInt = 0
-		v.AddArg(v1)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueAMD64_OpZeroExt16to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt16to32 x)
-	// cond:
-	// result: (MOVWQZX x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64MOVWQZX)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpZeroExt16to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt16to64 x)
-	// cond:
-	// result: (MOVWQZX x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64MOVWQZX)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpZeroExt32to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt32to64 x)
-	// cond:
-	// result: (MOVLQZX x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64MOVLQZX)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpZeroExt8to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt8to16  x)
-	// cond:
-	// result: (MOVBQZX x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64MOVBQZX)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpZeroExt8to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt8to32  x)
-	// cond:
-	// result: (MOVBQZX x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64MOVBQZX)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueAMD64_OpZeroExt8to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt8to64  x)
-	// cond:
-	// result: (MOVBQZX x)
-	for {
-		x := v.Args[0]
-		v.reset(OpAMD64MOVBQZX)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteBlockAMD64(b *Block, config *Config) bool {
-	switch b.Kind {
-	case BlockAMD64EQ:
-		// match: (EQ (InvertFlags cmp) yes no)
-		// cond:
-		// result: (EQ cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64EQ
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (FlagEQ) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (EQ (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (EQ (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (EQ (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-	case BlockAMD64GE:
-		// match: (GE (InvertFlags cmp) yes no)
-		// cond:
-		// result: (LE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64LE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (GE (FlagEQ) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (GE (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (GE (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (GE (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (GE (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockAMD64GT:
-		// match: (GT (InvertFlags cmp) yes no)
-		// cond:
-		// result: (LT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64LT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (GT (FlagEQ) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (GT (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (GT (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (GT (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (GT (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockIf:
-		// match: (If (SETL  cmp) yes no)
-		// cond:
-		// result: (LT  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64SETL {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64LT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (SETLE cmp) yes no)
-		// cond:
-		// result: (LE  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64SETLE {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64LE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (SETG  cmp) yes no)
-		// cond:
-		// result: (GT  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64SETG {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64GT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (SETGE cmp) yes no)
-		// cond:
-		// result: (GE  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64SETGE {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64GE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (SETEQ cmp) yes no)
-		// cond:
-		// result: (EQ  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64SETEQ {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64EQ
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (SETNE cmp) yes no)
-		// cond:
-		// result: (NE  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64SETNE {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64NE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (SETB  cmp) yes no)
-		// cond:
-		// result: (ULT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64SETB {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64ULT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (SETBE cmp) yes no)
-		// cond:
-		// result: (ULE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64SETBE {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64ULE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (SETA  cmp) yes no)
-		// cond:
-		// result: (UGT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64SETA {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64UGT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (SETAE cmp) yes no)
-		// cond:
-		// result: (UGE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64SETAE {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64UGE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (SETGF  cmp) yes no)
-		// cond:
-		// result: (UGT  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64SETGF {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64UGT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (SETGEF cmp) yes no)
-		// cond:
-		// result: (UGE  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64SETGEF {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64UGE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (SETEQF cmp) yes no)
-		// cond:
-		// result: (EQF  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64SETEQF {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64EQF
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (SETNEF cmp) yes no)
-		// cond:
-		// result: (NEF  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64SETNEF {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64NEF
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If cond yes no)
-		// cond:
-		// result: (NE (TESTB cond cond) yes no)
-		for {
-			v := b.Control
-			_ = v
-			cond := b.Control
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64NE
-			v0 := b.NewValue0(v.Line, OpAMD64TESTB, TypeFlags)
-			v0.AddArg(cond)
-			v0.AddArg(cond)
-			b.SetControl(v0)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockAMD64LE:
-		// match: (LE (InvertFlags cmp) yes no)
-		// cond:
-		// result: (GE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64GE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LE (FlagEQ) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LE (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LE (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LE (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (LE (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-	case BlockAMD64LT:
-		// match: (LT (InvertFlags cmp) yes no)
-		// cond:
-		// result: (GT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64GT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LT (FlagEQ) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (LT (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LT (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LT (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (LT (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-	case BlockAMD64NE:
-		// match: (NE (TESTB (SETL  cmp) (SETL  cmp)) yes no)
-		// cond:
-		// result: (LT  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64TESTB {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpAMD64SETL {
-				break
-			}
-			cmp := v_0.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != OpAMD64SETL {
-				break
-			}
-			if cmp != v_1.Args[0] {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64LT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no)
-		// cond:
-		// result: (LE  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64TESTB {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpAMD64SETLE {
-				break
-			}
-			cmp := v_0.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != OpAMD64SETLE {
-				break
-			}
-			if cmp != v_1.Args[0] {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64LE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (TESTB (SETG  cmp) (SETG  cmp)) yes no)
-		// cond:
-		// result: (GT  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64TESTB {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpAMD64SETG {
-				break
-			}
-			cmp := v_0.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != OpAMD64SETG {
-				break
-			}
-			if cmp != v_1.Args[0] {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64GT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no)
-		// cond:
-		// result: (GE  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64TESTB {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpAMD64SETGE {
-				break
-			}
-			cmp := v_0.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != OpAMD64SETGE {
-				break
-			}
-			if cmp != v_1.Args[0] {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64GE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no)
-		// cond:
-		// result: (EQ  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64TESTB {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpAMD64SETEQ {
-				break
-			}
-			cmp := v_0.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != OpAMD64SETEQ {
-				break
-			}
-			if cmp != v_1.Args[0] {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64EQ
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no)
-		// cond:
-		// result: (NE  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64TESTB {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpAMD64SETNE {
-				break
-			}
-			cmp := v_0.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != OpAMD64SETNE {
-				break
-			}
-			if cmp != v_1.Args[0] {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64NE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (TESTB (SETB  cmp) (SETB  cmp)) yes no)
-		// cond:
-		// result: (ULT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64TESTB {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpAMD64SETB {
-				break
-			}
-			cmp := v_0.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != OpAMD64SETB {
-				break
-			}
-			if cmp != v_1.Args[0] {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64ULT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no)
-		// cond:
-		// result: (ULE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64TESTB {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpAMD64SETBE {
-				break
-			}
-			cmp := v_0.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != OpAMD64SETBE {
-				break
-			}
-			if cmp != v_1.Args[0] {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64ULE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (TESTB (SETA  cmp) (SETA  cmp)) yes no)
-		// cond:
-		// result: (UGT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64TESTB {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpAMD64SETA {
-				break
-			}
-			cmp := v_0.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != OpAMD64SETA {
-				break
-			}
-			if cmp != v_1.Args[0] {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64UGT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no)
-		// cond:
-		// result: (UGE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64TESTB {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpAMD64SETAE {
-				break
-			}
-			cmp := v_0.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != OpAMD64SETAE {
-				break
-			}
-			if cmp != v_1.Args[0] {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64UGE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (TESTB (SETGF  cmp) (SETGF  cmp)) yes no)
-		// cond:
-		// result: (UGT  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64TESTB {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpAMD64SETGF {
-				break
-			}
-			cmp := v_0.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != OpAMD64SETGF {
-				break
-			}
-			if cmp != v_1.Args[0] {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64UGT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no)
-		// cond:
-		// result: (UGE  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64TESTB {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpAMD64SETGEF {
-				break
-			}
-			cmp := v_0.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != OpAMD64SETGEF {
-				break
-			}
-			if cmp != v_1.Args[0] {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64UGE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no)
-		// cond:
-		// result: (EQF  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64TESTB {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpAMD64SETEQF {
-				break
-			}
-			cmp := v_0.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != OpAMD64SETEQF {
-				break
-			}
-			if cmp != v_1.Args[0] {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64EQF
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no)
-		// cond:
-		// result: (NEF  cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64TESTB {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpAMD64SETNEF {
-				break
-			}
-			cmp := v_0.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != OpAMD64SETNEF {
-				break
-			}
-			if cmp != v_1.Args[0] {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64NEF
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (InvertFlags cmp) yes no)
-		// cond:
-		// result: (NE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64NE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (FlagEQ) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (NE (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockAMD64UGE:
-		// match: (UGE (InvertFlags cmp) yes no)
-		// cond:
-		// result: (ULE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64ULE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (UGE (FlagEQ) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (UGE (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (UGE (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (UGE (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (UGE (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockAMD64UGT:
-		// match: (UGT (InvertFlags cmp) yes no)
-		// cond:
-		// result: (ULT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64ULT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (UGT (FlagEQ) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (UGT (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (UGT (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (UGT (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (UGT (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockAMD64ULE:
-		// match: (ULE (InvertFlags cmp) yes no)
-		// cond:
-		// result: (UGE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64UGE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (ULE (FlagEQ) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (ULE (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (ULE (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (ULE (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (ULE (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-	case BlockAMD64ULT:
-		// match: (ULT (InvertFlags cmp) yes no)
-		// cond:
-		// result: (UGT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockAMD64UGT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (ULT (FlagEQ) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (ULT (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (ULT (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (ULT (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (ULT (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpAMD64FlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-	}
-	return false
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewriteARM.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewriteARM.go
deleted file mode 100644
index 3dae890..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewriteARM.go
+++ /dev/null
@@ -1,18626 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/rewriteARM.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/rewriteARM.go:1
-// autogenerated from gen/ARM.rules: do not edit!
-// generated with: cd gen; go run *.go
-
-package ssa
-
-import "math"
-
-var _ = math.MinInt8 // in case not otherwise used
-func rewriteValueARM(v *Value, config *Config) bool {
-	switch v.Op {
-	case OpARMADC:
-		return rewriteValueARM_OpARMADC(v, config)
-	case OpARMADCconst:
-		return rewriteValueARM_OpARMADCconst(v, config)
-	case OpARMADCshiftLL:
-		return rewriteValueARM_OpARMADCshiftLL(v, config)
-	case OpARMADCshiftLLreg:
-		return rewriteValueARM_OpARMADCshiftLLreg(v, config)
-	case OpARMADCshiftRA:
-		return rewriteValueARM_OpARMADCshiftRA(v, config)
-	case OpARMADCshiftRAreg:
-		return rewriteValueARM_OpARMADCshiftRAreg(v, config)
-	case OpARMADCshiftRL:
-		return rewriteValueARM_OpARMADCshiftRL(v, config)
-	case OpARMADCshiftRLreg:
-		return rewriteValueARM_OpARMADCshiftRLreg(v, config)
-	case OpARMADD:
-		return rewriteValueARM_OpARMADD(v, config)
-	case OpARMADDS:
-		return rewriteValueARM_OpARMADDS(v, config)
-	case OpARMADDSshiftLL:
-		return rewriteValueARM_OpARMADDSshiftLL(v, config)
-	case OpARMADDSshiftLLreg:
-		return rewriteValueARM_OpARMADDSshiftLLreg(v, config)
-	case OpARMADDSshiftRA:
-		return rewriteValueARM_OpARMADDSshiftRA(v, config)
-	case OpARMADDSshiftRAreg:
-		return rewriteValueARM_OpARMADDSshiftRAreg(v, config)
-	case OpARMADDSshiftRL:
-		return rewriteValueARM_OpARMADDSshiftRL(v, config)
-	case OpARMADDSshiftRLreg:
-		return rewriteValueARM_OpARMADDSshiftRLreg(v, config)
-	case OpARMADDconst:
-		return rewriteValueARM_OpARMADDconst(v, config)
-	case OpARMADDshiftLL:
-		return rewriteValueARM_OpARMADDshiftLL(v, config)
-	case OpARMADDshiftLLreg:
-		return rewriteValueARM_OpARMADDshiftLLreg(v, config)
-	case OpARMADDshiftRA:
-		return rewriteValueARM_OpARMADDshiftRA(v, config)
-	case OpARMADDshiftRAreg:
-		return rewriteValueARM_OpARMADDshiftRAreg(v, config)
-	case OpARMADDshiftRL:
-		return rewriteValueARM_OpARMADDshiftRL(v, config)
-	case OpARMADDshiftRLreg:
-		return rewriteValueARM_OpARMADDshiftRLreg(v, config)
-	case OpARMAND:
-		return rewriteValueARM_OpARMAND(v, config)
-	case OpARMANDconst:
-		return rewriteValueARM_OpARMANDconst(v, config)
-	case OpARMANDshiftLL:
-		return rewriteValueARM_OpARMANDshiftLL(v, config)
-	case OpARMANDshiftLLreg:
-		return rewriteValueARM_OpARMANDshiftLLreg(v, config)
-	case OpARMANDshiftRA:
-		return rewriteValueARM_OpARMANDshiftRA(v, config)
-	case OpARMANDshiftRAreg:
-		return rewriteValueARM_OpARMANDshiftRAreg(v, config)
-	case OpARMANDshiftRL:
-		return rewriteValueARM_OpARMANDshiftRL(v, config)
-	case OpARMANDshiftRLreg:
-		return rewriteValueARM_OpARMANDshiftRLreg(v, config)
-	case OpARMBIC:
-		return rewriteValueARM_OpARMBIC(v, config)
-	case OpARMBICconst:
-		return rewriteValueARM_OpARMBICconst(v, config)
-	case OpARMBICshiftLL:
-		return rewriteValueARM_OpARMBICshiftLL(v, config)
-	case OpARMBICshiftLLreg:
-		return rewriteValueARM_OpARMBICshiftLLreg(v, config)
-	case OpARMBICshiftRA:
-		return rewriteValueARM_OpARMBICshiftRA(v, config)
-	case OpARMBICshiftRAreg:
-		return rewriteValueARM_OpARMBICshiftRAreg(v, config)
-	case OpARMBICshiftRL:
-		return rewriteValueARM_OpARMBICshiftRL(v, config)
-	case OpARMBICshiftRLreg:
-		return rewriteValueARM_OpARMBICshiftRLreg(v, config)
-	case OpARMCMOVWHSconst:
-		return rewriteValueARM_OpARMCMOVWHSconst(v, config)
-	case OpARMCMOVWLSconst:
-		return rewriteValueARM_OpARMCMOVWLSconst(v, config)
-	case OpARMCMP:
-		return rewriteValueARM_OpARMCMP(v, config)
-	case OpARMCMPD:
-		return rewriteValueARM_OpARMCMPD(v, config)
-	case OpARMCMPF:
-		return rewriteValueARM_OpARMCMPF(v, config)
-	case OpARMCMPconst:
-		return rewriteValueARM_OpARMCMPconst(v, config)
-	case OpARMCMPshiftLL:
-		return rewriteValueARM_OpARMCMPshiftLL(v, config)
-	case OpARMCMPshiftLLreg:
-		return rewriteValueARM_OpARMCMPshiftLLreg(v, config)
-	case OpARMCMPshiftRA:
-		return rewriteValueARM_OpARMCMPshiftRA(v, config)
-	case OpARMCMPshiftRAreg:
-		return rewriteValueARM_OpARMCMPshiftRAreg(v, config)
-	case OpARMCMPshiftRL:
-		return rewriteValueARM_OpARMCMPshiftRL(v, config)
-	case OpARMCMPshiftRLreg:
-		return rewriteValueARM_OpARMCMPshiftRLreg(v, config)
-	case OpARMEqual:
-		return rewriteValueARM_OpARMEqual(v, config)
-	case OpARMGreaterEqual:
-		return rewriteValueARM_OpARMGreaterEqual(v, config)
-	case OpARMGreaterEqualU:
-		return rewriteValueARM_OpARMGreaterEqualU(v, config)
-	case OpARMGreaterThan:
-		return rewriteValueARM_OpARMGreaterThan(v, config)
-	case OpARMGreaterThanU:
-		return rewriteValueARM_OpARMGreaterThanU(v, config)
-	case OpARMLessEqual:
-		return rewriteValueARM_OpARMLessEqual(v, config)
-	case OpARMLessEqualU:
-		return rewriteValueARM_OpARMLessEqualU(v, config)
-	case OpARMLessThan:
-		return rewriteValueARM_OpARMLessThan(v, config)
-	case OpARMLessThanU:
-		return rewriteValueARM_OpARMLessThanU(v, config)
-	case OpARMMOVBUload:
-		return rewriteValueARM_OpARMMOVBUload(v, config)
-	case OpARMMOVBUreg:
-		return rewriteValueARM_OpARMMOVBUreg(v, config)
-	case OpARMMOVBload:
-		return rewriteValueARM_OpARMMOVBload(v, config)
-	case OpARMMOVBreg:
-		return rewriteValueARM_OpARMMOVBreg(v, config)
-	case OpARMMOVBstore:
-		return rewriteValueARM_OpARMMOVBstore(v, config)
-	case OpARMMOVDload:
-		return rewriteValueARM_OpARMMOVDload(v, config)
-	case OpARMMOVDstore:
-		return rewriteValueARM_OpARMMOVDstore(v, config)
-	case OpARMMOVFload:
-		return rewriteValueARM_OpARMMOVFload(v, config)
-	case OpARMMOVFstore:
-		return rewriteValueARM_OpARMMOVFstore(v, config)
-	case OpARMMOVHUload:
-		return rewriteValueARM_OpARMMOVHUload(v, config)
-	case OpARMMOVHUreg:
-		return rewriteValueARM_OpARMMOVHUreg(v, config)
-	case OpARMMOVHload:
-		return rewriteValueARM_OpARMMOVHload(v, config)
-	case OpARMMOVHreg:
-		return rewriteValueARM_OpARMMOVHreg(v, config)
-	case OpARMMOVHstore:
-		return rewriteValueARM_OpARMMOVHstore(v, config)
-	case OpARMMOVWload:
-		return rewriteValueARM_OpARMMOVWload(v, config)
-	case OpARMMOVWloadidx:
-		return rewriteValueARM_OpARMMOVWloadidx(v, config)
-	case OpARMMOVWloadshiftLL:
-		return rewriteValueARM_OpARMMOVWloadshiftLL(v, config)
-	case OpARMMOVWloadshiftRA:
-		return rewriteValueARM_OpARMMOVWloadshiftRA(v, config)
-	case OpARMMOVWloadshiftRL:
-		return rewriteValueARM_OpARMMOVWloadshiftRL(v, config)
-	case OpARMMOVWreg:
-		return rewriteValueARM_OpARMMOVWreg(v, config)
-	case OpARMMOVWstore:
-		return rewriteValueARM_OpARMMOVWstore(v, config)
-	case OpARMMOVWstoreidx:
-		return rewriteValueARM_OpARMMOVWstoreidx(v, config)
-	case OpARMMOVWstoreshiftLL:
-		return rewriteValueARM_OpARMMOVWstoreshiftLL(v, config)
-	case OpARMMOVWstoreshiftRA:
-		return rewriteValueARM_OpARMMOVWstoreshiftRA(v, config)
-	case OpARMMOVWstoreshiftRL:
-		return rewriteValueARM_OpARMMOVWstoreshiftRL(v, config)
-	case OpARMMUL:
-		return rewriteValueARM_OpARMMUL(v, config)
-	case OpARMMULA:
-		return rewriteValueARM_OpARMMULA(v, config)
-	case OpARMMVN:
-		return rewriteValueARM_OpARMMVN(v, config)
-	case OpARMMVNshiftLL:
-		return rewriteValueARM_OpARMMVNshiftLL(v, config)
-	case OpARMMVNshiftLLreg:
-		return rewriteValueARM_OpARMMVNshiftLLreg(v, config)
-	case OpARMMVNshiftRA:
-		return rewriteValueARM_OpARMMVNshiftRA(v, config)
-	case OpARMMVNshiftRAreg:
-		return rewriteValueARM_OpARMMVNshiftRAreg(v, config)
-	case OpARMMVNshiftRL:
-		return rewriteValueARM_OpARMMVNshiftRL(v, config)
-	case OpARMMVNshiftRLreg:
-		return rewriteValueARM_OpARMMVNshiftRLreg(v, config)
-	case OpARMNotEqual:
-		return rewriteValueARM_OpARMNotEqual(v, config)
-	case OpARMOR:
-		return rewriteValueARM_OpARMOR(v, config)
-	case OpARMORconst:
-		return rewriteValueARM_OpARMORconst(v, config)
-	case OpARMORshiftLL:
-		return rewriteValueARM_OpARMORshiftLL(v, config)
-	case OpARMORshiftLLreg:
-		return rewriteValueARM_OpARMORshiftLLreg(v, config)
-	case OpARMORshiftRA:
-		return rewriteValueARM_OpARMORshiftRA(v, config)
-	case OpARMORshiftRAreg:
-		return rewriteValueARM_OpARMORshiftRAreg(v, config)
-	case OpARMORshiftRL:
-		return rewriteValueARM_OpARMORshiftRL(v, config)
-	case OpARMORshiftRLreg:
-		return rewriteValueARM_OpARMORshiftRLreg(v, config)
-	case OpARMRSB:
-		return rewriteValueARM_OpARMRSB(v, config)
-	case OpARMRSBSshiftLL:
-		return rewriteValueARM_OpARMRSBSshiftLL(v, config)
-	case OpARMRSBSshiftLLreg:
-		return rewriteValueARM_OpARMRSBSshiftLLreg(v, config)
-	case OpARMRSBSshiftRA:
-		return rewriteValueARM_OpARMRSBSshiftRA(v, config)
-	case OpARMRSBSshiftRAreg:
-		return rewriteValueARM_OpARMRSBSshiftRAreg(v, config)
-	case OpARMRSBSshiftRL:
-		return rewriteValueARM_OpARMRSBSshiftRL(v, config)
-	case OpARMRSBSshiftRLreg:
-		return rewriteValueARM_OpARMRSBSshiftRLreg(v, config)
-	case OpARMRSBconst:
-		return rewriteValueARM_OpARMRSBconst(v, config)
-	case OpARMRSBshiftLL:
-		return rewriteValueARM_OpARMRSBshiftLL(v, config)
-	case OpARMRSBshiftLLreg:
-		return rewriteValueARM_OpARMRSBshiftLLreg(v, config)
-	case OpARMRSBshiftRA:
-		return rewriteValueARM_OpARMRSBshiftRA(v, config)
-	case OpARMRSBshiftRAreg:
-		return rewriteValueARM_OpARMRSBshiftRAreg(v, config)
-	case OpARMRSBshiftRL:
-		return rewriteValueARM_OpARMRSBshiftRL(v, config)
-	case OpARMRSBshiftRLreg:
-		return rewriteValueARM_OpARMRSBshiftRLreg(v, config)
-	case OpARMRSCconst:
-		return rewriteValueARM_OpARMRSCconst(v, config)
-	case OpARMRSCshiftLL:
-		return rewriteValueARM_OpARMRSCshiftLL(v, config)
-	case OpARMRSCshiftLLreg:
-		return rewriteValueARM_OpARMRSCshiftLLreg(v, config)
-	case OpARMRSCshiftRA:
-		return rewriteValueARM_OpARMRSCshiftRA(v, config)
-	case OpARMRSCshiftRAreg:
-		return rewriteValueARM_OpARMRSCshiftRAreg(v, config)
-	case OpARMRSCshiftRL:
-		return rewriteValueARM_OpARMRSCshiftRL(v, config)
-	case OpARMRSCshiftRLreg:
-		return rewriteValueARM_OpARMRSCshiftRLreg(v, config)
-	case OpARMSBC:
-		return rewriteValueARM_OpARMSBC(v, config)
-	case OpARMSBCconst:
-		return rewriteValueARM_OpARMSBCconst(v, config)
-	case OpARMSBCshiftLL:
-		return rewriteValueARM_OpARMSBCshiftLL(v, config)
-	case OpARMSBCshiftLLreg:
-		return rewriteValueARM_OpARMSBCshiftLLreg(v, config)
-	case OpARMSBCshiftRA:
-		return rewriteValueARM_OpARMSBCshiftRA(v, config)
-	case OpARMSBCshiftRAreg:
-		return rewriteValueARM_OpARMSBCshiftRAreg(v, config)
-	case OpARMSBCshiftRL:
-		return rewriteValueARM_OpARMSBCshiftRL(v, config)
-	case OpARMSBCshiftRLreg:
-		return rewriteValueARM_OpARMSBCshiftRLreg(v, config)
-	case OpARMSLL:
-		return rewriteValueARM_OpARMSLL(v, config)
-	case OpARMSLLconst:
-		return rewriteValueARM_OpARMSLLconst(v, config)
-	case OpARMSRA:
-		return rewriteValueARM_OpARMSRA(v, config)
-	case OpARMSRAcond:
-		return rewriteValueARM_OpARMSRAcond(v, config)
-	case OpARMSRAconst:
-		return rewriteValueARM_OpARMSRAconst(v, config)
-	case OpARMSRL:
-		return rewriteValueARM_OpARMSRL(v, config)
-	case OpARMSRLconst:
-		return rewriteValueARM_OpARMSRLconst(v, config)
-	case OpARMSUB:
-		return rewriteValueARM_OpARMSUB(v, config)
-	case OpARMSUBS:
-		return rewriteValueARM_OpARMSUBS(v, config)
-	case OpARMSUBSshiftLL:
-		return rewriteValueARM_OpARMSUBSshiftLL(v, config)
-	case OpARMSUBSshiftLLreg:
-		return rewriteValueARM_OpARMSUBSshiftLLreg(v, config)
-	case OpARMSUBSshiftRA:
-		return rewriteValueARM_OpARMSUBSshiftRA(v, config)
-	case OpARMSUBSshiftRAreg:
-		return rewriteValueARM_OpARMSUBSshiftRAreg(v, config)
-	case OpARMSUBSshiftRL:
-		return rewriteValueARM_OpARMSUBSshiftRL(v, config)
-	case OpARMSUBSshiftRLreg:
-		return rewriteValueARM_OpARMSUBSshiftRLreg(v, config)
-	case OpARMSUBconst:
-		return rewriteValueARM_OpARMSUBconst(v, config)
-	case OpARMSUBshiftLL:
-		return rewriteValueARM_OpARMSUBshiftLL(v, config)
-	case OpARMSUBshiftLLreg:
-		return rewriteValueARM_OpARMSUBshiftLLreg(v, config)
-	case OpARMSUBshiftRA:
-		return rewriteValueARM_OpARMSUBshiftRA(v, config)
-	case OpARMSUBshiftRAreg:
-		return rewriteValueARM_OpARMSUBshiftRAreg(v, config)
-	case OpARMSUBshiftRL:
-		return rewriteValueARM_OpARMSUBshiftRL(v, config)
-	case OpARMSUBshiftRLreg:
-		return rewriteValueARM_OpARMSUBshiftRLreg(v, config)
-	case OpARMXOR:
-		return rewriteValueARM_OpARMXOR(v, config)
-	case OpARMXORconst:
-		return rewriteValueARM_OpARMXORconst(v, config)
-	case OpARMXORshiftLL:
-		return rewriteValueARM_OpARMXORshiftLL(v, config)
-	case OpARMXORshiftLLreg:
-		return rewriteValueARM_OpARMXORshiftLLreg(v, config)
-	case OpARMXORshiftRA:
-		return rewriteValueARM_OpARMXORshiftRA(v, config)
-	case OpARMXORshiftRAreg:
-		return rewriteValueARM_OpARMXORshiftRAreg(v, config)
-	case OpARMXORshiftRL:
-		return rewriteValueARM_OpARMXORshiftRL(v, config)
-	case OpARMXORshiftRLreg:
-		return rewriteValueARM_OpARMXORshiftRLreg(v, config)
-	case OpARMXORshiftRR:
-		return rewriteValueARM_OpARMXORshiftRR(v, config)
-	case OpAdd16:
-		return rewriteValueARM_OpAdd16(v, config)
-	case OpAdd32:
-		return rewriteValueARM_OpAdd32(v, config)
-	case OpAdd32F:
-		return rewriteValueARM_OpAdd32F(v, config)
-	case OpAdd32carry:
-		return rewriteValueARM_OpAdd32carry(v, config)
-	case OpAdd32withcarry:
-		return rewriteValueARM_OpAdd32withcarry(v, config)
-	case OpAdd64F:
-		return rewriteValueARM_OpAdd64F(v, config)
-	case OpAdd8:
-		return rewriteValueARM_OpAdd8(v, config)
-	case OpAddPtr:
-		return rewriteValueARM_OpAddPtr(v, config)
-	case OpAddr:
-		return rewriteValueARM_OpAddr(v, config)
-	case OpAnd16:
-		return rewriteValueARM_OpAnd16(v, config)
-	case OpAnd32:
-		return rewriteValueARM_OpAnd32(v, config)
-	case OpAnd8:
-		return rewriteValueARM_OpAnd8(v, config)
-	case OpAndB:
-		return rewriteValueARM_OpAndB(v, config)
-	case OpBswap32:
-		return rewriteValueARM_OpBswap32(v, config)
-	case OpClosureCall:
-		return rewriteValueARM_OpClosureCall(v, config)
-	case OpCom16:
-		return rewriteValueARM_OpCom16(v, config)
-	case OpCom32:
-		return rewriteValueARM_OpCom32(v, config)
-	case OpCom8:
-		return rewriteValueARM_OpCom8(v, config)
-	case OpConst16:
-		return rewriteValueARM_OpConst16(v, config)
-	case OpConst32:
-		return rewriteValueARM_OpConst32(v, config)
-	case OpConst32F:
-		return rewriteValueARM_OpConst32F(v, config)
-	case OpConst64F:
-		return rewriteValueARM_OpConst64F(v, config)
-	case OpConst8:
-		return rewriteValueARM_OpConst8(v, config)
-	case OpConstBool:
-		return rewriteValueARM_OpConstBool(v, config)
-	case OpConstNil:
-		return rewriteValueARM_OpConstNil(v, config)
-	case OpConvert:
-		return rewriteValueARM_OpConvert(v, config)
-	case OpCtz32:
-		return rewriteValueARM_OpCtz32(v, config)
-	case OpCvt32Fto32:
-		return rewriteValueARM_OpCvt32Fto32(v, config)
-	case OpCvt32Fto32U:
-		return rewriteValueARM_OpCvt32Fto32U(v, config)
-	case OpCvt32Fto64F:
-		return rewriteValueARM_OpCvt32Fto64F(v, config)
-	case OpCvt32Uto32F:
-		return rewriteValueARM_OpCvt32Uto32F(v, config)
-	case OpCvt32Uto64F:
-		return rewriteValueARM_OpCvt32Uto64F(v, config)
-	case OpCvt32to32F:
-		return rewriteValueARM_OpCvt32to32F(v, config)
-	case OpCvt32to64F:
-		return rewriteValueARM_OpCvt32to64F(v, config)
-	case OpCvt64Fto32:
-		return rewriteValueARM_OpCvt64Fto32(v, config)
-	case OpCvt64Fto32F:
-		return rewriteValueARM_OpCvt64Fto32F(v, config)
-	case OpCvt64Fto32U:
-		return rewriteValueARM_OpCvt64Fto32U(v, config)
-	case OpDeferCall:
-		return rewriteValueARM_OpDeferCall(v, config)
-	case OpDiv16:
-		return rewriteValueARM_OpDiv16(v, config)
-	case OpDiv16u:
-		return rewriteValueARM_OpDiv16u(v, config)
-	case OpDiv32:
-		return rewriteValueARM_OpDiv32(v, config)
-	case OpDiv32F:
-		return rewriteValueARM_OpDiv32F(v, config)
-	case OpDiv32u:
-		return rewriteValueARM_OpDiv32u(v, config)
-	case OpDiv64F:
-		return rewriteValueARM_OpDiv64F(v, config)
-	case OpDiv8:
-		return rewriteValueARM_OpDiv8(v, config)
-	case OpDiv8u:
-		return rewriteValueARM_OpDiv8u(v, config)
-	case OpEq16:
-		return rewriteValueARM_OpEq16(v, config)
-	case OpEq32:
-		return rewriteValueARM_OpEq32(v, config)
-	case OpEq32F:
-		return rewriteValueARM_OpEq32F(v, config)
-	case OpEq64F:
-		return rewriteValueARM_OpEq64F(v, config)
-	case OpEq8:
-		return rewriteValueARM_OpEq8(v, config)
-	case OpEqB:
-		return rewriteValueARM_OpEqB(v, config)
-	case OpEqPtr:
-		return rewriteValueARM_OpEqPtr(v, config)
-	case OpGeq16:
-		return rewriteValueARM_OpGeq16(v, config)
-	case OpGeq16U:
-		return rewriteValueARM_OpGeq16U(v, config)
-	case OpGeq32:
-		return rewriteValueARM_OpGeq32(v, config)
-	case OpGeq32F:
-		return rewriteValueARM_OpGeq32F(v, config)
-	case OpGeq32U:
-		return rewriteValueARM_OpGeq32U(v, config)
-	case OpGeq64F:
-		return rewriteValueARM_OpGeq64F(v, config)
-	case OpGeq8:
-		return rewriteValueARM_OpGeq8(v, config)
-	case OpGeq8U:
-		return rewriteValueARM_OpGeq8U(v, config)
-	case OpGetClosurePtr:
-		return rewriteValueARM_OpGetClosurePtr(v, config)
-	case OpGoCall:
-		return rewriteValueARM_OpGoCall(v, config)
-	case OpGreater16:
-		return rewriteValueARM_OpGreater16(v, config)
-	case OpGreater16U:
-		return rewriteValueARM_OpGreater16U(v, config)
-	case OpGreater32:
-		return rewriteValueARM_OpGreater32(v, config)
-	case OpGreater32F:
-		return rewriteValueARM_OpGreater32F(v, config)
-	case OpGreater32U:
-		return rewriteValueARM_OpGreater32U(v, config)
-	case OpGreater64F:
-		return rewriteValueARM_OpGreater64F(v, config)
-	case OpGreater8:
-		return rewriteValueARM_OpGreater8(v, config)
-	case OpGreater8U:
-		return rewriteValueARM_OpGreater8U(v, config)
-	case OpHmul16:
-		return rewriteValueARM_OpHmul16(v, config)
-	case OpHmul16u:
-		return rewriteValueARM_OpHmul16u(v, config)
-	case OpHmul32:
-		return rewriteValueARM_OpHmul32(v, config)
-	case OpHmul32u:
-		return rewriteValueARM_OpHmul32u(v, config)
-	case OpHmul8:
-		return rewriteValueARM_OpHmul8(v, config)
-	case OpHmul8u:
-		return rewriteValueARM_OpHmul8u(v, config)
-	case OpInterCall:
-		return rewriteValueARM_OpInterCall(v, config)
-	case OpIsInBounds:
-		return rewriteValueARM_OpIsInBounds(v, config)
-	case OpIsNonNil:
-		return rewriteValueARM_OpIsNonNil(v, config)
-	case OpIsSliceInBounds:
-		return rewriteValueARM_OpIsSliceInBounds(v, config)
-	case OpLeq16:
-		return rewriteValueARM_OpLeq16(v, config)
-	case OpLeq16U:
-		return rewriteValueARM_OpLeq16U(v, config)
-	case OpLeq32:
-		return rewriteValueARM_OpLeq32(v, config)
-	case OpLeq32F:
-		return rewriteValueARM_OpLeq32F(v, config)
-	case OpLeq32U:
-		return rewriteValueARM_OpLeq32U(v, config)
-	case OpLeq64F:
-		return rewriteValueARM_OpLeq64F(v, config)
-	case OpLeq8:
-		return rewriteValueARM_OpLeq8(v, config)
-	case OpLeq8U:
-		return rewriteValueARM_OpLeq8U(v, config)
-	case OpLess16:
-		return rewriteValueARM_OpLess16(v, config)
-	case OpLess16U:
-		return rewriteValueARM_OpLess16U(v, config)
-	case OpLess32:
-		return rewriteValueARM_OpLess32(v, config)
-	case OpLess32F:
-		return rewriteValueARM_OpLess32F(v, config)
-	case OpLess32U:
-		return rewriteValueARM_OpLess32U(v, config)
-	case OpLess64F:
-		return rewriteValueARM_OpLess64F(v, config)
-	case OpLess8:
-		return rewriteValueARM_OpLess8(v, config)
-	case OpLess8U:
-		return rewriteValueARM_OpLess8U(v, config)
-	case OpLoad:
-		return rewriteValueARM_OpLoad(v, config)
-	case OpLrot16:
-		return rewriteValueARM_OpLrot16(v, config)
-	case OpLrot32:
-		return rewriteValueARM_OpLrot32(v, config)
-	case OpLrot8:
-		return rewriteValueARM_OpLrot8(v, config)
-	case OpLsh16x16:
-		return rewriteValueARM_OpLsh16x16(v, config)
-	case OpLsh16x32:
-		return rewriteValueARM_OpLsh16x32(v, config)
-	case OpLsh16x64:
-		return rewriteValueARM_OpLsh16x64(v, config)
-	case OpLsh16x8:
-		return rewriteValueARM_OpLsh16x8(v, config)
-	case OpLsh32x16:
-		return rewriteValueARM_OpLsh32x16(v, config)
-	case OpLsh32x32:
-		return rewriteValueARM_OpLsh32x32(v, config)
-	case OpLsh32x64:
-		return rewriteValueARM_OpLsh32x64(v, config)
-	case OpLsh32x8:
-		return rewriteValueARM_OpLsh32x8(v, config)
-	case OpLsh8x16:
-		return rewriteValueARM_OpLsh8x16(v, config)
-	case OpLsh8x32:
-		return rewriteValueARM_OpLsh8x32(v, config)
-	case OpLsh8x64:
-		return rewriteValueARM_OpLsh8x64(v, config)
-	case OpLsh8x8:
-		return rewriteValueARM_OpLsh8x8(v, config)
-	case OpMod16:
-		return rewriteValueARM_OpMod16(v, config)
-	case OpMod16u:
-		return rewriteValueARM_OpMod16u(v, config)
-	case OpMod32:
-		return rewriteValueARM_OpMod32(v, config)
-	case OpMod32u:
-		return rewriteValueARM_OpMod32u(v, config)
-	case OpMod8:
-		return rewriteValueARM_OpMod8(v, config)
-	case OpMod8u:
-		return rewriteValueARM_OpMod8u(v, config)
-	case OpMove:
-		return rewriteValueARM_OpMove(v, config)
-	case OpMul16:
-		return rewriteValueARM_OpMul16(v, config)
-	case OpMul32:
-		return rewriteValueARM_OpMul32(v, config)
-	case OpMul32F:
-		return rewriteValueARM_OpMul32F(v, config)
-	case OpMul32uhilo:
-		return rewriteValueARM_OpMul32uhilo(v, config)
-	case OpMul64F:
-		return rewriteValueARM_OpMul64F(v, config)
-	case OpMul8:
-		return rewriteValueARM_OpMul8(v, config)
-	case OpNeg16:
-		return rewriteValueARM_OpNeg16(v, config)
-	case OpNeg32:
-		return rewriteValueARM_OpNeg32(v, config)
-	case OpNeg32F:
-		return rewriteValueARM_OpNeg32F(v, config)
-	case OpNeg64F:
-		return rewriteValueARM_OpNeg64F(v, config)
-	case OpNeg8:
-		return rewriteValueARM_OpNeg8(v, config)
-	case OpNeq16:
-		return rewriteValueARM_OpNeq16(v, config)
-	case OpNeq32:
-		return rewriteValueARM_OpNeq32(v, config)
-	case OpNeq32F:
-		return rewriteValueARM_OpNeq32F(v, config)
-	case OpNeq64F:
-		return rewriteValueARM_OpNeq64F(v, config)
-	case OpNeq8:
-		return rewriteValueARM_OpNeq8(v, config)
-	case OpNeqB:
-		return rewriteValueARM_OpNeqB(v, config)
-	case OpNeqPtr:
-		return rewriteValueARM_OpNeqPtr(v, config)
-	case OpNilCheck:
-		return rewriteValueARM_OpNilCheck(v, config)
-	case OpNot:
-		return rewriteValueARM_OpNot(v, config)
-	case OpOffPtr:
-		return rewriteValueARM_OpOffPtr(v, config)
-	case OpOr16:
-		return rewriteValueARM_OpOr16(v, config)
-	case OpOr32:
-		return rewriteValueARM_OpOr32(v, config)
-	case OpOr8:
-		return rewriteValueARM_OpOr8(v, config)
-	case OpOrB:
-		return rewriteValueARM_OpOrB(v, config)
-	case OpRsh16Ux16:
-		return rewriteValueARM_OpRsh16Ux16(v, config)
-	case OpRsh16Ux32:
-		return rewriteValueARM_OpRsh16Ux32(v, config)
-	case OpRsh16Ux64:
-		return rewriteValueARM_OpRsh16Ux64(v, config)
-	case OpRsh16Ux8:
-		return rewriteValueARM_OpRsh16Ux8(v, config)
-	case OpRsh16x16:
-		return rewriteValueARM_OpRsh16x16(v, config)
-	case OpRsh16x32:
-		return rewriteValueARM_OpRsh16x32(v, config)
-	case OpRsh16x64:
-		return rewriteValueARM_OpRsh16x64(v, config)
-	case OpRsh16x8:
-		return rewriteValueARM_OpRsh16x8(v, config)
-	case OpRsh32Ux16:
-		return rewriteValueARM_OpRsh32Ux16(v, config)
-	case OpRsh32Ux32:
-		return rewriteValueARM_OpRsh32Ux32(v, config)
-	case OpRsh32Ux64:
-		return rewriteValueARM_OpRsh32Ux64(v, config)
-	case OpRsh32Ux8:
-		return rewriteValueARM_OpRsh32Ux8(v, config)
-	case OpRsh32x16:
-		return rewriteValueARM_OpRsh32x16(v, config)
-	case OpRsh32x32:
-		return rewriteValueARM_OpRsh32x32(v, config)
-	case OpRsh32x64:
-		return rewriteValueARM_OpRsh32x64(v, config)
-	case OpRsh32x8:
-		return rewriteValueARM_OpRsh32x8(v, config)
-	case OpRsh8Ux16:
-		return rewriteValueARM_OpRsh8Ux16(v, config)
-	case OpRsh8Ux32:
-		return rewriteValueARM_OpRsh8Ux32(v, config)
-	case OpRsh8Ux64:
-		return rewriteValueARM_OpRsh8Ux64(v, config)
-	case OpRsh8Ux8:
-		return rewriteValueARM_OpRsh8Ux8(v, config)
-	case OpRsh8x16:
-		return rewriteValueARM_OpRsh8x16(v, config)
-	case OpRsh8x32:
-		return rewriteValueARM_OpRsh8x32(v, config)
-	case OpRsh8x64:
-		return rewriteValueARM_OpRsh8x64(v, config)
-	case OpRsh8x8:
-		return rewriteValueARM_OpRsh8x8(v, config)
-	case OpSelect0:
-		return rewriteValueARM_OpSelect0(v, config)
-	case OpSelect1:
-		return rewriteValueARM_OpSelect1(v, config)
-	case OpSignExt16to32:
-		return rewriteValueARM_OpSignExt16to32(v, config)
-	case OpSignExt8to16:
-		return rewriteValueARM_OpSignExt8to16(v, config)
-	case OpSignExt8to32:
-		return rewriteValueARM_OpSignExt8to32(v, config)
-	case OpSignmask:
-		return rewriteValueARM_OpSignmask(v, config)
-	case OpSlicemask:
-		return rewriteValueARM_OpSlicemask(v, config)
-	case OpSqrt:
-		return rewriteValueARM_OpSqrt(v, config)
-	case OpStaticCall:
-		return rewriteValueARM_OpStaticCall(v, config)
-	case OpStore:
-		return rewriteValueARM_OpStore(v, config)
-	case OpSub16:
-		return rewriteValueARM_OpSub16(v, config)
-	case OpSub32:
-		return rewriteValueARM_OpSub32(v, config)
-	case OpSub32F:
-		return rewriteValueARM_OpSub32F(v, config)
-	case OpSub32carry:
-		return rewriteValueARM_OpSub32carry(v, config)
-	case OpSub32withcarry:
-		return rewriteValueARM_OpSub32withcarry(v, config)
-	case OpSub64F:
-		return rewriteValueARM_OpSub64F(v, config)
-	case OpSub8:
-		return rewriteValueARM_OpSub8(v, config)
-	case OpSubPtr:
-		return rewriteValueARM_OpSubPtr(v, config)
-	case OpTrunc16to8:
-		return rewriteValueARM_OpTrunc16to8(v, config)
-	case OpTrunc32to16:
-		return rewriteValueARM_OpTrunc32to16(v, config)
-	case OpTrunc32to8:
-		return rewriteValueARM_OpTrunc32to8(v, config)
-	case OpXor16:
-		return rewriteValueARM_OpXor16(v, config)
-	case OpXor32:
-		return rewriteValueARM_OpXor32(v, config)
-	case OpXor8:
-		return rewriteValueARM_OpXor8(v, config)
-	case OpZero:
-		return rewriteValueARM_OpZero(v, config)
-	case OpZeroExt16to32:
-		return rewriteValueARM_OpZeroExt16to32(v, config)
-	case OpZeroExt8to16:
-		return rewriteValueARM_OpZeroExt8to16(v, config)
-	case OpZeroExt8to32:
-		return rewriteValueARM_OpZeroExt8to32(v, config)
-	case OpZeromask:
-		return rewriteValueARM_OpZeromask(v, config)
-	}
-	return false
-}
-func rewriteValueARM_OpARMADC(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADC (MOVWconst [c]) x flags)
-	// cond:
-	// result: (ADCconst [c] x flags)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		flags := v.Args[2]
-		v.reset(OpARMADCconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (ADC x (MOVWconst [c]) flags)
-	// cond:
-	// result: (ADCconst [c] x flags)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		flags := v.Args[2]
-		v.reset(OpARMADCconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (ADC x (SLLconst [c] y) flags)
-	// cond:
-	// result: (ADCshiftLL x y [c] flags)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSLLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		flags := v.Args[2]
-		v.reset(OpARMADCshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (ADC (SLLconst [c] y) x flags)
-	// cond:
-	// result: (ADCshiftLL x y [c] flags)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSLLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		flags := v.Args[2]
-		v.reset(OpARMADCshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (ADC x (SRLconst [c] y) flags)
-	// cond:
-	// result: (ADCshiftRL x y [c] flags)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		flags := v.Args[2]
-		v.reset(OpARMADCshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (ADC (SRLconst [c] y) x flags)
-	// cond:
-	// result: (ADCshiftRL x y [c] flags)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		flags := v.Args[2]
-		v.reset(OpARMADCshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (ADC x (SRAconst [c] y) flags)
-	// cond:
-	// result: (ADCshiftRA x y [c] flags)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRAconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		flags := v.Args[2]
-		v.reset(OpARMADCshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (ADC (SRAconst [c] y) x flags)
-	// cond:
-	// result: (ADCshiftRA x y [c] flags)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRAconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		flags := v.Args[2]
-		v.reset(OpARMADCshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (ADC x (SLL y z) flags)
-	// cond:
-	// result: (ADCshiftLLreg x y z flags)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSLL {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		flags := v.Args[2]
-		v.reset(OpARMADCshiftLLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (ADC (SLL y z) x flags)
-	// cond:
-	// result: (ADCshiftLLreg x y z flags)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSLL {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		flags := v.Args[2]
-		v.reset(OpARMADCshiftLLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (ADC x (SRL y z) flags)
-	// cond:
-	// result: (ADCshiftRLreg x y z flags)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRL {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		flags := v.Args[2]
-		v.reset(OpARMADCshiftRLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (ADC (SRL y z) x flags)
-	// cond:
-	// result: (ADCshiftRLreg x y z flags)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRL {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		flags := v.Args[2]
-		v.reset(OpARMADCshiftRLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (ADC x (SRA y z) flags)
-	// cond:
-	// result: (ADCshiftRAreg x y z flags)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRA {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		flags := v.Args[2]
-		v.reset(OpARMADCshiftRAreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (ADC (SRA y z) x flags)
-	// cond:
-	// result: (ADCshiftRAreg x y z flags)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRA {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		flags := v.Args[2]
-		v.reset(OpARMADCshiftRAreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		v.AddArg(flags)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMADCconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADCconst [c] (ADDconst [d] x) flags)
-	// cond:
-	// result: (ADCconst [int64(int32(c+d))] x flags)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMADDconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		flags := v.Args[1]
-		v.reset(OpARMADCconst)
-		v.AuxInt = int64(int32(c + d))
-		v.AddArg(x)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (ADCconst [c] (SUBconst [d] x) flags)
-	// cond:
-	// result: (ADCconst [int64(int32(c-d))] x flags)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSUBconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		flags := v.Args[1]
-		v.reset(OpARMADCconst)
-		v.AuxInt = int64(int32(c - d))
-		v.AddArg(x)
-		v.AddArg(flags)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMADCshiftLL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADCshiftLL (MOVWconst [c]) x [d] flags)
-	// cond:
-	// result: (ADCconst [c] (SLLconst <x.Type> x [d]) flags)
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		flags := v.Args[2]
-		v.reset(OpARMADCconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (ADCshiftLL x (MOVWconst [c]) [d] flags)
-	// cond:
-	// result: (ADCconst x [int64(uint32(c)<<uint64(d))] flags)
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		flags := v.Args[2]
-		v.reset(OpARMADCconst)
-		v.AuxInt = int64(uint32(c) << uint64(d))
-		v.AddArg(x)
-		v.AddArg(flags)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMADCshiftLLreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADCshiftLLreg (MOVWconst [c]) x y flags)
-	// cond:
-	// result: (ADCconst [c] (SLL <x.Type> x y) flags)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		flags := v.Args[3]
-		v.reset(OpARMADCconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (ADCshiftLLreg x y (MOVWconst [c]) flags)
-	// cond:
-	// result: (ADCshiftLL x y [c] flags)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		flags := v.Args[3]
-		v.reset(OpARMADCshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(flags)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMADCshiftRA(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADCshiftRA (MOVWconst [c]) x [d] flags)
-	// cond:
-	// result: (ADCconst [c] (SRAconst <x.Type> x [d]) flags)
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		flags := v.Args[2]
-		v.reset(OpARMADCconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (ADCshiftRA x (MOVWconst [c]) [d] flags)
-	// cond:
-	// result: (ADCconst x [int64(int32(c)>>uint64(d))] flags)
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		flags := v.Args[2]
-		v.reset(OpARMADCconst)
-		v.AuxInt = int64(int32(c) >> uint64(d))
-		v.AddArg(x)
-		v.AddArg(flags)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMADCshiftRAreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADCshiftRAreg (MOVWconst [c]) x y flags)
-	// cond:
-	// result: (ADCconst [c] (SRA <x.Type> x y) flags)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		flags := v.Args[3]
-		v.reset(OpARMADCconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (ADCshiftRAreg x y (MOVWconst [c]) flags)
-	// cond:
-	// result: (ADCshiftRA x y [c] flags)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		flags := v.Args[3]
-		v.reset(OpARMADCshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(flags)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMADCshiftRL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADCshiftRL (MOVWconst [c]) x [d] flags)
-	// cond:
-	// result: (ADCconst [c] (SRLconst <x.Type> x [d]) flags)
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		flags := v.Args[2]
-		v.reset(OpARMADCconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (ADCshiftRL x (MOVWconst [c]) [d] flags)
-	// cond:
-	// result: (ADCconst x [int64(uint32(c)>>uint64(d))] flags)
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		flags := v.Args[2]
-		v.reset(OpARMADCconst)
-		v.AuxInt = int64(uint32(c) >> uint64(d))
-		v.AddArg(x)
-		v.AddArg(flags)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMADCshiftRLreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADCshiftRLreg (MOVWconst [c]) x y flags)
-	// cond:
-	// result: (ADCconst [c] (SRL <x.Type> x y) flags)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		flags := v.Args[3]
-		v.reset(OpARMADCconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (ADCshiftRLreg x y (MOVWconst [c]) flags)
-	// cond:
-	// result: (ADCshiftRL x y [c] flags)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		flags := v.Args[3]
-		v.reset(OpARMADCshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(flags)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMADD(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADD (MOVWconst [c]) x)
-	// cond:
-	// result: (ADDconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMADDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADD x (MOVWconst [c]))
-	// cond:
-	// result: (ADDconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMADDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADD x (SLLconst [c] y))
-	// cond:
-	// result: (ADDshiftLL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSLLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMADDshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADD (SLLconst [c] y) x)
-	// cond:
-	// result: (ADDshiftLL x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSLLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMADDshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADD x (SRLconst [c] y))
-	// cond:
-	// result: (ADDshiftRL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMADDshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADD (SRLconst [c] y) x)
-	// cond:
-	// result: (ADDshiftRL x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMADDshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADD x (SRAconst [c] y))
-	// cond:
-	// result: (ADDshiftRA x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRAconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMADDshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADD (SRAconst [c] y) x)
-	// cond:
-	// result: (ADDshiftRA x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRAconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMADDshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADD x (SLL y z))
-	// cond:
-	// result: (ADDshiftLLreg x y z)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSLL {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		v.reset(OpARMADDshiftLLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (ADD (SLL y z) x)
-	// cond:
-	// result: (ADDshiftLLreg x y z)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSLL {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		v.reset(OpARMADDshiftLLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (ADD x (SRL y z))
-	// cond:
-	// result: (ADDshiftRLreg x y z)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRL {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		v.reset(OpARMADDshiftRLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (ADD (SRL y z) x)
-	// cond:
-	// result: (ADDshiftRLreg x y z)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRL {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		v.reset(OpARMADDshiftRLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (ADD x (SRA y z))
-	// cond:
-	// result: (ADDshiftRAreg x y z)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRA {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		v.reset(OpARMADDshiftRAreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (ADD (SRA y z) x)
-	// cond:
-	// result: (ADDshiftRAreg x y z)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRA {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		v.reset(OpARMADDshiftRAreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (ADD x (RSBconst [0] y))
-	// cond:
-	// result: (SUB x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMRSBconst {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpARMSUB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADD (RSBconst [0] y) x)
-	// cond:
-	// result: (SUB x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMRSBconst {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMSUB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADD (MUL x y) a)
-	// cond:
-	// result: (MULA x y a)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMUL {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		a := v.Args[1]
-		v.reset(OpARMMULA)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(a)
-		return true
-	}
-	// match: (ADD a (MUL x y))
-	// cond:
-	// result: (MULA x y a)
-	for {
-		a := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMUL {
-			break
-		}
-		x := v_1.Args[0]
-		y := v_1.Args[1]
-		v.reset(OpARMMULA)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(a)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMADDS(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDS (MOVWconst [c]) x)
-	// cond:
-	// result: (ADDSconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMADDSconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDS x (MOVWconst [c]))
-	// cond:
-	// result: (ADDSconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMADDSconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDS x (SLLconst [c] y))
-	// cond:
-	// result: (ADDSshiftLL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSLLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMADDSshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDS (SLLconst [c] y) x)
-	// cond:
-	// result: (ADDSshiftLL x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSLLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMADDSshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDS x (SRLconst [c] y))
-	// cond:
-	// result: (ADDSshiftRL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMADDSshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDS (SRLconst [c] y) x)
-	// cond:
-	// result: (ADDSshiftRL x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMADDSshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDS x (SRAconst [c] y))
-	// cond:
-	// result: (ADDSshiftRA x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRAconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMADDSshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDS (SRAconst [c] y) x)
-	// cond:
-	// result: (ADDSshiftRA x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRAconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMADDSshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDS x (SLL y z))
-	// cond:
-	// result: (ADDSshiftLLreg x y z)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSLL {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		v.reset(OpARMADDSshiftLLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (ADDS (SLL y z) x)
-	// cond:
-	// result: (ADDSshiftLLreg x y z)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSLL {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		v.reset(OpARMADDSshiftLLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (ADDS x (SRL y z))
-	// cond:
-	// result: (ADDSshiftRLreg x y z)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRL {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		v.reset(OpARMADDSshiftRLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (ADDS (SRL y z) x)
-	// cond:
-	// result: (ADDSshiftRLreg x y z)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRL {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		v.reset(OpARMADDSshiftRLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (ADDS x (SRA y z))
-	// cond:
-	// result: (ADDSshiftRAreg x y z)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRA {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		v.reset(OpARMADDSshiftRAreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (ADDS (SRA y z) x)
-	// cond:
-	// result: (ADDSshiftRAreg x y z)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRA {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		v.reset(OpARMADDSshiftRAreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMADDSshiftLL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDSshiftLL (MOVWconst [c]) x [d])
-	// cond:
-	// result: (ADDSconst [c] (SLLconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMADDSconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ADDSshiftLL x (MOVWconst [c]) [d])
-	// cond:
-	// result: (ADDSconst x [int64(uint32(c)<<uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMADDSconst)
-		v.AuxInt = int64(uint32(c) << uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMADDSshiftLLreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDSshiftLLreg (MOVWconst [c]) x y)
-	// cond:
-	// result: (ADDSconst [c] (SLL <x.Type> x y))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpARMADDSconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ADDSshiftLLreg x y (MOVWconst [c]))
-	// cond:
-	// result: (ADDSshiftLL x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMADDSshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMADDSshiftRA(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDSshiftRA (MOVWconst [c]) x [d])
-	// cond:
-	// result: (ADDSconst [c] (SRAconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMADDSconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ADDSshiftRA x (MOVWconst [c]) [d])
-	// cond:
-	// result: (ADDSconst x [int64(int32(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMADDSconst)
-		v.AuxInt = int64(int32(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMADDSshiftRAreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDSshiftRAreg (MOVWconst [c]) x y)
-	// cond:
-	// result: (ADDSconst [c] (SRA <x.Type> x y))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpARMADDSconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ADDSshiftRAreg x y (MOVWconst [c]))
-	// cond:
-	// result: (ADDSshiftRA x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMADDSshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMADDSshiftRL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDSshiftRL (MOVWconst [c]) x [d])
-	// cond:
-	// result: (ADDSconst [c] (SRLconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMADDSconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ADDSshiftRL x (MOVWconst [c]) [d])
-	// cond:
-	// result: (ADDSconst x [int64(uint32(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMADDSconst)
-		v.AuxInt = int64(uint32(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMADDSshiftRLreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDSshiftRLreg (MOVWconst [c]) x y)
-	// cond:
-	// result: (ADDSconst [c] (SRL <x.Type> x y))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpARMADDSconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ADDSshiftRLreg x y (MOVWconst [c]))
-	// cond:
-	// result: (ADDSshiftRL x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMADDSshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMADDconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDconst [off1] (MOVWaddr [off2] {sym} ptr))
-	// cond:
-	// result: (MOVWaddr [off1+off2] {sym} ptr)
-	for {
-		off1 := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym := v_0.Aux
-		ptr := v_0.Args[0]
-		v.reset(OpARMMOVWaddr)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		return true
-	}
-	// match: (ADDconst [0] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDconst [c] (MOVWconst [d]))
-	// cond:
-	// result: (MOVWconst [int64(int32(c+d))])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = int64(int32(c + d))
-		return true
-	}
-	// match: (ADDconst [c] (ADDconst [d] x))
-	// cond:
-	// result: (ADDconst [int64(int32(c+d))] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMADDconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpARMADDconst)
-		v.AuxInt = int64(int32(c + d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDconst [c] (SUBconst [d] x))
-	// cond:
-	// result: (ADDconst [int64(int32(c-d))] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSUBconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpARMADDconst)
-		v.AuxInt = int64(int32(c - d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDconst [c] (RSBconst [d] x))
-	// cond:
-	// result: (RSBconst [int64(int32(c+d))] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMRSBconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpARMRSBconst)
-		v.AuxInt = int64(int32(c + d))
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMADDshiftLL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDshiftLL (MOVWconst [c]) x [d])
-	// cond:
-	// result: (ADDconst [c] (SLLconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMADDconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ADDshiftLL x (MOVWconst [c]) [d])
-	// cond:
-	// result: (ADDconst x [int64(uint32(c)<<uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMADDconst)
-		v.AuxInt = int64(uint32(c) << uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMADDshiftLLreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDshiftLLreg (MOVWconst [c]) x y)
-	// cond:
-	// result: (ADDconst [c] (SLL <x.Type> x y))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpARMADDconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ADDshiftLLreg x y (MOVWconst [c]))
-	// cond:
-	// result: (ADDshiftLL x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMADDshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMADDshiftRA(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDshiftRA (MOVWconst [c]) x [d])
-	// cond:
-	// result: (ADDconst [c] (SRAconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMADDconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ADDshiftRA x (MOVWconst [c]) [d])
-	// cond:
-	// result: (ADDconst x [int64(int32(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMADDconst)
-		v.AuxInt = int64(int32(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMADDshiftRAreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDshiftRAreg (MOVWconst [c]) x y)
-	// cond:
-	// result: (ADDconst [c] (SRA <x.Type> x y))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpARMADDconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ADDshiftRAreg x y (MOVWconst [c]))
-	// cond:
-	// result: (ADDshiftRA x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMADDshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMADDshiftRL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDshiftRL (MOVWconst [c]) x [d])
-	// cond:
-	// result: (ADDconst [c] (SRLconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMADDconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ADDshiftRL x (MOVWconst [c]) [d])
-	// cond:
-	// result: (ADDconst x [int64(uint32(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMADDconst)
-		v.AuxInt = int64(uint32(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMADDshiftRLreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDshiftRLreg (MOVWconst [c]) x y)
-	// cond:
-	// result: (ADDconst [c] (SRL <x.Type> x y))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpARMADDconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ADDshiftRLreg x y (MOVWconst [c]))
-	// cond:
-	// result: (ADDshiftRL x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMADDshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMAND(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AND (MOVWconst [c]) x)
-	// cond:
-	// result: (ANDconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMANDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (AND x (MOVWconst [c]))
-	// cond:
-	// result: (ANDconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMANDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (AND x (SLLconst [c] y))
-	// cond:
-	// result: (ANDshiftLL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSLLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMANDshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (AND (SLLconst [c] y) x)
-	// cond:
-	// result: (ANDshiftLL x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSLLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMANDshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (AND x (SRLconst [c] y))
-	// cond:
-	// result: (ANDshiftRL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMANDshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (AND (SRLconst [c] y) x)
-	// cond:
-	// result: (ANDshiftRL x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMANDshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (AND x (SRAconst [c] y))
-	// cond:
-	// result: (ANDshiftRA x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRAconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMANDshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (AND (SRAconst [c] y) x)
-	// cond:
-	// result: (ANDshiftRA x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRAconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMANDshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (AND x (SLL y z))
-	// cond:
-	// result: (ANDshiftLLreg x y z)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSLL {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		v.reset(OpARMANDshiftLLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (AND (SLL y z) x)
-	// cond:
-	// result: (ANDshiftLLreg x y z)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSLL {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		v.reset(OpARMANDshiftLLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (AND x (SRL y z))
-	// cond:
-	// result: (ANDshiftRLreg x y z)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRL {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		v.reset(OpARMANDshiftRLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (AND (SRL y z) x)
-	// cond:
-	// result: (ANDshiftRLreg x y z)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRL {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		v.reset(OpARMANDshiftRLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (AND x (SRA y z))
-	// cond:
-	// result: (ANDshiftRAreg x y z)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRA {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		v.reset(OpARMANDshiftRAreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (AND (SRA y z) x)
-	// cond:
-	// result: (ANDshiftRAreg x y z)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRA {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		v.reset(OpARMANDshiftRAreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (AND x x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (AND x (MVN y))
-	// cond:
-	// result: (BIC x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMVN {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpARMBIC)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (AND (MVN y) x)
-	// cond:
-	// result: (BIC x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMVN {
-			break
-		}
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMBIC)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (AND x (MVNshiftLL y [c]))
-	// cond:
-	// result: (BICshiftLL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMVNshiftLL {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMBICshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (AND (MVNshiftLL y [c]) x)
-	// cond:
-	// result: (BICshiftLL x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMVNshiftLL {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMBICshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (AND x (MVNshiftRL y [c]))
-	// cond:
-	// result: (BICshiftRL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMVNshiftRL {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMBICshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (AND (MVNshiftRL y [c]) x)
-	// cond:
-	// result: (BICshiftRL x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMVNshiftRL {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMBICshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (AND x (MVNshiftRA y [c]))
-	// cond:
-	// result: (BICshiftRA x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMVNshiftRA {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMBICshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (AND (MVNshiftRA y [c]) x)
-	// cond:
-	// result: (BICshiftRA x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMVNshiftRA {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMBICshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMANDconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ANDconst [0] _)
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (ANDconst [c] x)
-	// cond: int32(c)==-1
-	// result: x
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(int32(c) == -1) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDconst [c] (MOVWconst [d]))
-	// cond:
-	// result: (MOVWconst [c&d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = c & d
-		return true
-	}
-	// match: (ANDconst [c] (ANDconst [d] x))
-	// cond:
-	// result: (ANDconst [c&d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMANDconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpARMANDconst)
-		v.AuxInt = c & d
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMANDshiftLL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ANDshiftLL (MOVWconst [c]) x [d])
-	// cond:
-	// result: (ANDconst [c] (SLLconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMANDconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ANDshiftLL x (MOVWconst [c]) [d])
-	// cond:
-	// result: (ANDconst x [int64(uint32(c)<<uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMANDconst)
-		v.AuxInt = int64(uint32(c) << uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDshiftLL x y:(SLLconst x [c]) [d])
-	// cond: c==d
-	// result: y
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		y := v.Args[1]
-		if y.Op != OpARMSLLconst {
-			break
-		}
-		c := y.AuxInt
-		if x != y.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMANDshiftLLreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ANDshiftLLreg (MOVWconst [c]) x y)
-	// cond:
-	// result: (ANDconst [c] (SLL <x.Type> x y))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpARMANDconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ANDshiftLLreg x y (MOVWconst [c]))
-	// cond:
-	// result: (ANDshiftLL x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMANDshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMANDshiftRA(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ANDshiftRA (MOVWconst [c]) x [d])
-	// cond:
-	// result: (ANDconst [c] (SRAconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMANDconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ANDshiftRA x (MOVWconst [c]) [d])
-	// cond:
-	// result: (ANDconst x [int64(int32(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMANDconst)
-		v.AuxInt = int64(int32(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDshiftRA x y:(SRAconst x [c]) [d])
-	// cond: c==d
-	// result: y
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		y := v.Args[1]
-		if y.Op != OpARMSRAconst {
-			break
-		}
-		c := y.AuxInt
-		if x != y.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMANDshiftRAreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ANDshiftRAreg (MOVWconst [c]) x y)
-	// cond:
-	// result: (ANDconst [c] (SRA <x.Type> x y))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpARMANDconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ANDshiftRAreg x y (MOVWconst [c]))
-	// cond:
-	// result: (ANDshiftRA x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMANDshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMANDshiftRL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ANDshiftRL (MOVWconst [c]) x [d])
-	// cond:
-	// result: (ANDconst [c] (SRLconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMANDconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ANDshiftRL x (MOVWconst [c]) [d])
-	// cond:
-	// result: (ANDconst x [int64(uint32(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMANDconst)
-		v.AuxInt = int64(uint32(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDshiftRL x y:(SRLconst x [c]) [d])
-	// cond: c==d
-	// result: y
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		y := v.Args[1]
-		if y.Op != OpARMSRLconst {
-			break
-		}
-		c := y.AuxInt
-		if x != y.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMANDshiftRLreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ANDshiftRLreg (MOVWconst [c]) x y)
-	// cond:
-	// result: (ANDconst [c] (SRL <x.Type> x y))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpARMANDconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ANDshiftRLreg x y (MOVWconst [c]))
-	// cond:
-	// result: (ANDshiftRL x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMANDshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMBIC(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (BIC x (MOVWconst [c]))
-	// cond:
-	// result: (BICconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMBICconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (BIC x (SLLconst [c] y))
-	// cond:
-	// result: (BICshiftLL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSLLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMBICshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (BIC x (SRLconst [c] y))
-	// cond:
-	// result: (BICshiftRL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMBICshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (BIC x (SRAconst [c] y))
-	// cond:
-	// result: (BICshiftRA x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRAconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMBICshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (BIC x (SLL y z))
-	// cond:
-	// result: (BICshiftLLreg x y z)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSLL {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		v.reset(OpARMBICshiftLLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (BIC x (SRL y z))
-	// cond:
-	// result: (BICshiftRLreg x y z)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRL {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		v.reset(OpARMBICshiftRLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (BIC x (SRA y z))
-	// cond:
-	// result: (BICshiftRAreg x y z)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRA {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		v.reset(OpARMBICshiftRAreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (BIC x x)
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMBICconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (BICconst [0] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (BICconst [c] _)
-	// cond: int32(c)==-1
-	// result: (MOVWconst [0])
-	for {
-		c := v.AuxInt
-		if !(int32(c) == -1) {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (BICconst [c] (MOVWconst [d]))
-	// cond:
-	// result: (MOVWconst [d&^c])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = d &^ c
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMBICshiftLL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (BICshiftLL x (MOVWconst [c]) [d])
-	// cond:
-	// result: (BICconst x [int64(uint32(c)<<uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMBICconst)
-		v.AuxInt = int64(uint32(c) << uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (BICshiftLL x (SLLconst x [c]) [d])
-	// cond: c==d
-	// result: (MOVWconst [0])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSLLconst {
-			break
-		}
-		c := v_1.AuxInt
-		if x != v_1.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMBICshiftLLreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (BICshiftLLreg x y (MOVWconst [c]))
-	// cond:
-	// result: (BICshiftLL x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMBICshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMBICshiftRA(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (BICshiftRA x (MOVWconst [c]) [d])
-	// cond:
-	// result: (BICconst x [int64(int32(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMBICconst)
-		v.AuxInt = int64(int32(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (BICshiftRA x (SRAconst x [c]) [d])
-	// cond: c==d
-	// result: (MOVWconst [0])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRAconst {
-			break
-		}
-		c := v_1.AuxInt
-		if x != v_1.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMBICshiftRAreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (BICshiftRAreg x y (MOVWconst [c]))
-	// cond:
-	// result: (BICshiftRA x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMBICshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMBICshiftRL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (BICshiftRL x (MOVWconst [c]) [d])
-	// cond:
-	// result: (BICconst x [int64(uint32(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMBICconst)
-		v.AuxInt = int64(uint32(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (BICshiftRL x (SRLconst x [c]) [d])
-	// cond: c==d
-	// result: (MOVWconst [0])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRLconst {
-			break
-		}
-		c := v_1.AuxInt
-		if x != v_1.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMBICshiftRLreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (BICshiftRLreg x y (MOVWconst [c]))
-	// cond:
-	// result: (BICshiftRL x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMBICshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMCMOVWHSconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMOVWHSconst _ (FlagEQ) [c])
-	// cond:
-	// result: (MOVWconst [c])
-	for {
-		c := v.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMFlagEQ {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = c
-		return true
-	}
-	// match: (CMOVWHSconst x (FlagLT_ULT))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMFlagLT_ULT {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (CMOVWHSconst _ (FlagLT_UGT) [c])
-	// cond:
-	// result: (MOVWconst [c])
-	for {
-		c := v.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMFlagLT_UGT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = c
-		return true
-	}
-	// match: (CMOVWHSconst x (FlagGT_ULT))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMFlagGT_ULT {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (CMOVWHSconst _ (FlagGT_UGT) [c])
-	// cond:
-	// result: (MOVWconst [c])
-	for {
-		c := v.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMFlagGT_UGT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = c
-		return true
-	}
-	// match: (CMOVWHSconst x (InvertFlags flags) [c])
-	// cond:
-	// result: (CMOVWLSconst x flags [c])
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMInvertFlags {
-			break
-		}
-		flags := v_1.Args[0]
-		v.reset(OpARMCMOVWLSconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(flags)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMCMOVWLSconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMOVWLSconst _ (FlagEQ) [c])
-	// cond:
-	// result: (MOVWconst [c])
-	for {
-		c := v.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMFlagEQ {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = c
-		return true
-	}
-	// match: (CMOVWLSconst _ (FlagLT_ULT) [c])
-	// cond:
-	// result: (MOVWconst [c])
-	for {
-		c := v.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMFlagLT_ULT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = c
-		return true
-	}
-	// match: (CMOVWLSconst x (FlagLT_UGT))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMFlagLT_UGT {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (CMOVWLSconst _ (FlagGT_ULT) [c])
-	// cond:
-	// result: (MOVWconst [c])
-	for {
-		c := v.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMFlagGT_ULT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = c
-		return true
-	}
-	// match: (CMOVWLSconst x (FlagGT_UGT))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMFlagGT_UGT {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (CMOVWLSconst x (InvertFlags flags) [c])
-	// cond:
-	// result: (CMOVWHSconst x flags [c])
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMInvertFlags {
-			break
-		}
-		flags := v_1.Args[0]
-		v.reset(OpARMCMOVWHSconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(flags)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMCMP(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMP x (MOVWconst [c]))
-	// cond:
-	// result: (CMPconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMCMPconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (CMP (MOVWconst [c]) x)
-	// cond:
-	// result: (InvertFlags (CMPconst [c] x))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMInvertFlags)
-		v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-		v0.AuxInt = c
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (CMP x (SLLconst [c] y))
-	// cond:
-	// result: (CMPshiftLL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSLLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMCMPshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (CMP (SLLconst [c] y) x)
-	// cond:
-	// result: (InvertFlags (CMPshiftLL x y [c]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSLLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMInvertFlags)
-		v0 := b.NewValue0(v.Line, OpARMCMPshiftLL, TypeFlags)
-		v0.AuxInt = c
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (CMP x (SRLconst [c] y))
-	// cond:
-	// result: (CMPshiftRL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMCMPshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (CMP (SRLconst [c] y) x)
-	// cond:
-	// result: (InvertFlags (CMPshiftRL x y [c]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMInvertFlags)
-		v0 := b.NewValue0(v.Line, OpARMCMPshiftRL, TypeFlags)
-		v0.AuxInt = c
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (CMP x (SRAconst [c] y))
-	// cond:
-	// result: (CMPshiftRA x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRAconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMCMPshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (CMP (SRAconst [c] y) x)
-	// cond:
-	// result: (InvertFlags (CMPshiftRA x y [c]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRAconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMInvertFlags)
-		v0 := b.NewValue0(v.Line, OpARMCMPshiftRA, TypeFlags)
-		v0.AuxInt = c
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (CMP x (SLL y z))
-	// cond:
-	// result: (CMPshiftLLreg x y z)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSLL {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		v.reset(OpARMCMPshiftLLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (CMP (SLL y z) x)
-	// cond:
-	// result: (InvertFlags (CMPshiftLLreg x y z))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSLL {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		v.reset(OpARMInvertFlags)
-		v0 := b.NewValue0(v.Line, OpARMCMPshiftLLreg, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v0.AddArg(z)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (CMP x (SRL y z))
-	// cond:
-	// result: (CMPshiftRLreg x y z)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRL {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		v.reset(OpARMCMPshiftRLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (CMP (SRL y z) x)
-	// cond:
-	// result: (InvertFlags (CMPshiftRLreg x y z))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRL {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		v.reset(OpARMInvertFlags)
-		v0 := b.NewValue0(v.Line, OpARMCMPshiftRLreg, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v0.AddArg(z)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (CMP x (SRA y z))
-	// cond:
-	// result: (CMPshiftRAreg x y z)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRA {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		v.reset(OpARMCMPshiftRAreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (CMP (SRA y z) x)
-	// cond:
-	// result: (InvertFlags (CMPshiftRAreg x y z))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRA {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		v.reset(OpARMInvertFlags)
-		v0 := b.NewValue0(v.Line, OpARMCMPshiftRAreg, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v0.AddArg(z)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMCMPD(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPD x (MOVDconst [0]))
-	// cond:
-	// result: (CMPD0 x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVDconst {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		v.reset(OpARMCMPD0)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMCMPF(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPF x (MOVFconst [0]))
-	// cond:
-	// result: (CMPF0 x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVFconst {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		v.reset(OpARMCMPF0)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPconst (MOVWconst [x]) [y])
-	// cond: int32(x)==int32(y)
-	// result: (FlagEQ)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int32(x) == int32(y)) {
-			break
-		}
-		v.reset(OpARMFlagEQ)
-		return true
-	}
-	// match: (CMPconst (MOVWconst [x]) [y])
-	// cond: int32(x)<int32(y) && uint32(x)<uint32(y)
-	// result: (FlagLT_ULT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
-			break
-		}
-		v.reset(OpARMFlagLT_ULT)
-		return true
-	}
-	// match: (CMPconst (MOVWconst [x]) [y])
-	// cond: int32(x)<int32(y) && uint32(x)>uint32(y)
-	// result: (FlagLT_UGT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
-			break
-		}
-		v.reset(OpARMFlagLT_UGT)
-		return true
-	}
-	// match: (CMPconst (MOVWconst [x]) [y])
-	// cond: int32(x)>int32(y) && uint32(x)<uint32(y)
-	// result: (FlagGT_ULT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
-			break
-		}
-		v.reset(OpARMFlagGT_ULT)
-		return true
-	}
-	// match: (CMPconst (MOVWconst [x]) [y])
-	// cond: int32(x)>int32(y) && uint32(x)>uint32(y)
-	// result: (FlagGT_UGT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
-			break
-		}
-		v.reset(OpARMFlagGT_UGT)
-		return true
-	}
-	// match: (CMPconst (MOVBUreg _) [c])
-	// cond: 0xff < c
-	// result: (FlagLT_ULT)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVBUreg {
-			break
-		}
-		if !(0xff < c) {
-			break
-		}
-		v.reset(OpARMFlagLT_ULT)
-		return true
-	}
-	// match: (CMPconst (MOVHUreg _) [c])
-	// cond: 0xffff < c
-	// result: (FlagLT_ULT)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVHUreg {
-			break
-		}
-		if !(0xffff < c) {
-			break
-		}
-		v.reset(OpARMFlagLT_ULT)
-		return true
-	}
-	// match: (CMPconst (ANDconst _ [m]) [n])
-	// cond: 0 <= int32(m) && int32(m) < int32(n)
-	// result: (FlagLT_ULT)
-	for {
-		n := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMANDconst {
-			break
-		}
-		m := v_0.AuxInt
-		if !(0 <= int32(m) && int32(m) < int32(n)) {
-			break
-		}
-		v.reset(OpARMFlagLT_ULT)
-		return true
-	}
-	// match: (CMPconst (SRLconst _ [c]) [n])
-	// cond: 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n)
-	// result: (FlagLT_ULT)
-	for {
-		n := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRLconst {
-			break
-		}
-		c := v_0.AuxInt
-		if !(0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n)) {
-			break
-		}
-		v.reset(OpARMFlagLT_ULT)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMCMPshiftLL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPshiftLL (MOVWconst [c]) x [d])
-	// cond:
-	// result: (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMInvertFlags)
-		v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-		v0.AuxInt = c
-		v1 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-		v1.AuxInt = d
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (CMPshiftLL x (MOVWconst [c]) [d])
-	// cond:
-	// result: (CMPconst x [int64(uint32(c)<<uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMCMPconst)
-		v.AuxInt = int64(uint32(c) << uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMCMPshiftLLreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPshiftLLreg (MOVWconst [c]) x y)
-	// cond:
-	// result: (InvertFlags (CMPconst [c] (SLL <x.Type> x y)))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpARMInvertFlags)
-		v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-		v0.AuxInt = c
-		v1 := b.NewValue0(v.Line, OpARMSLL, x.Type)
-		v1.AddArg(x)
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (CMPshiftLLreg x y (MOVWconst [c]))
-	// cond:
-	// result: (CMPshiftLL x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMCMPshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMCMPshiftRA(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPshiftRA (MOVWconst [c]) x [d])
-	// cond:
-	// result: (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMInvertFlags)
-		v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-		v0.AuxInt = c
-		v1 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
-		v1.AuxInt = d
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (CMPshiftRA x (MOVWconst [c]) [d])
-	// cond:
-	// result: (CMPconst x [int64(int32(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMCMPconst)
-		v.AuxInt = int64(int32(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMCMPshiftRAreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPshiftRAreg (MOVWconst [c]) x y)
-	// cond:
-	// result: (InvertFlags (CMPconst [c] (SRA <x.Type> x y)))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpARMInvertFlags)
-		v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-		v0.AuxInt = c
-		v1 := b.NewValue0(v.Line, OpARMSRA, x.Type)
-		v1.AddArg(x)
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (CMPshiftRAreg x y (MOVWconst [c]))
-	// cond:
-	// result: (CMPshiftRA x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMCMPshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMCMPshiftRL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPshiftRL (MOVWconst [c]) x [d])
-	// cond:
-	// result: (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMInvertFlags)
-		v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-		v0.AuxInt = c
-		v1 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
-		v1.AuxInt = d
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (CMPshiftRL x (MOVWconst [c]) [d])
-	// cond:
-	// result: (CMPconst x [int64(uint32(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMCMPconst)
-		v.AuxInt = int64(uint32(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMCMPshiftRLreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPshiftRLreg (MOVWconst [c]) x y)
-	// cond:
-	// result: (InvertFlags (CMPconst [c] (SRL <x.Type> x y)))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpARMInvertFlags)
-		v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-		v0.AuxInt = c
-		v1 := b.NewValue0(v.Line, OpARMSRL, x.Type)
-		v1.AddArg(x)
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (CMPshiftRLreg x y (MOVWconst [c]))
-	// cond:
-	// result: (CMPshiftRL x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMCMPshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMEqual(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Equal (FlagEQ))
-	// cond:
-	// result: (MOVWconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagEQ {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (Equal (FlagLT_ULT))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagLT_ULT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Equal (FlagLT_UGT))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagLT_UGT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Equal (FlagGT_ULT))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagGT_ULT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Equal (FlagGT_UGT))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagGT_UGT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Equal (InvertFlags x))
-	// cond:
-	// result: (Equal x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMInvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpARMEqual)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMGreaterEqual(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (GreaterEqual (FlagEQ))
-	// cond:
-	// result: (MOVWconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagEQ {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (GreaterEqual (FlagLT_ULT))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagLT_ULT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (GreaterEqual (FlagLT_UGT))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagLT_UGT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (GreaterEqual (FlagGT_ULT))
-	// cond:
-	// result: (MOVWconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagGT_ULT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (GreaterEqual (FlagGT_UGT))
-	// cond:
-	// result: (MOVWconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagGT_UGT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (GreaterEqual (InvertFlags x))
-	// cond:
-	// result: (LessEqual x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMInvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpARMLessEqual)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMGreaterEqualU(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (GreaterEqualU (FlagEQ))
-	// cond:
-	// result: (MOVWconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagEQ {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (GreaterEqualU (FlagLT_ULT))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagLT_ULT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (GreaterEqualU (FlagLT_UGT))
-	// cond:
-	// result: (MOVWconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagLT_UGT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (GreaterEqualU (FlagGT_ULT))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagGT_ULT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (GreaterEqualU (FlagGT_UGT))
-	// cond:
-	// result: (MOVWconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagGT_UGT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (GreaterEqualU (InvertFlags x))
-	// cond:
-	// result: (LessEqualU x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMInvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpARMLessEqualU)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMGreaterThan(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (GreaterThan (FlagEQ))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagEQ {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (GreaterThan (FlagLT_ULT))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagLT_ULT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (GreaterThan (FlagLT_UGT))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagLT_UGT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (GreaterThan (FlagGT_ULT))
-	// cond:
-	// result: (MOVWconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagGT_ULT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (GreaterThan (FlagGT_UGT))
-	// cond:
-	// result: (MOVWconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagGT_UGT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (GreaterThan (InvertFlags x))
-	// cond:
-	// result: (LessThan x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMInvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpARMLessThan)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMGreaterThanU(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (GreaterThanU (FlagEQ))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagEQ {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (GreaterThanU (FlagLT_ULT))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagLT_ULT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (GreaterThanU (FlagLT_UGT))
-	// cond:
-	// result: (MOVWconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagLT_UGT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (GreaterThanU (FlagGT_ULT))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagGT_ULT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (GreaterThanU (FlagGT_UGT))
-	// cond:
-	// result: (MOVWconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagGT_UGT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (GreaterThanU (InvertFlags x))
-	// cond:
-	// result: (LessThanU x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMInvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpARMLessThanU)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMLessEqual(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (LessEqual (FlagEQ))
-	// cond:
-	// result: (MOVWconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagEQ {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (LessEqual (FlagLT_ULT))
-	// cond:
-	// result: (MOVWconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagLT_ULT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (LessEqual (FlagLT_UGT))
-	// cond:
-	// result: (MOVWconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagLT_UGT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (LessEqual (FlagGT_ULT))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagGT_ULT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (LessEqual (FlagGT_UGT))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagGT_UGT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (LessEqual (InvertFlags x))
-	// cond:
-	// result: (GreaterEqual x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMInvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpARMGreaterEqual)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMLessEqualU(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (LessEqualU (FlagEQ))
-	// cond:
-	// result: (MOVWconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagEQ {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (LessEqualU (FlagLT_ULT))
-	// cond:
-	// result: (MOVWconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagLT_ULT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (LessEqualU (FlagLT_UGT))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagLT_UGT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (LessEqualU (FlagGT_ULT))
-	// cond:
-	// result: (MOVWconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagGT_ULT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (LessEqualU (FlagGT_UGT))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagGT_UGT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (LessEqualU (InvertFlags x))
-	// cond:
-	// result: (GreaterEqualU x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMInvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpARMGreaterEqualU)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMLessThan(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (LessThan (FlagEQ))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagEQ {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (LessThan (FlagLT_ULT))
-	// cond:
-	// result: (MOVWconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagLT_ULT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (LessThan (FlagLT_UGT))
-	// cond:
-	// result: (MOVWconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagLT_UGT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (LessThan (FlagGT_ULT))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagGT_ULT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (LessThan (FlagGT_UGT))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagGT_UGT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (LessThan (InvertFlags x))
-	// cond:
-	// result: (GreaterThan x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMInvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpARMGreaterThan)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMLessThanU(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (LessThanU (FlagEQ))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagEQ {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (LessThanU (FlagLT_ULT))
-	// cond:
-	// result: (MOVWconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagLT_ULT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (LessThanU (FlagLT_UGT))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagLT_UGT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (LessThanU (FlagGT_ULT))
-	// cond:
-	// result: (MOVWconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagGT_ULT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (LessThanU (FlagGT_UGT))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagGT_UGT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (LessThanU (InvertFlags x))
-	// cond:
-	// result: (GreaterThanU x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMInvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpARMGreaterThanU)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMOVBUload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond:
-	// result: (MOVBUload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		v.reset(OpARMMOVBUload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpARMMOVBUload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)
-	// result: x
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVBstore {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		x := v_1.Args[1]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMOVBUreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBUreg x:(MOVBUload _ _))
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARMMOVBUload {
-			break
-		}
-		v.reset(OpARMMOVWreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBUreg (ANDconst [c] x))
-	// cond:
-	// result: (ANDconst [c&0xff] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMANDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpARMANDconst)
-		v.AuxInt = c & 0xff
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBUreg x:(MOVBUreg _))
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARMMOVBUreg {
-			break
-		}
-		v.reset(OpARMMOVWreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBUreg (MOVWconst [c]))
-	// cond:
-	// result: (MOVWconst [int64(uint8(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = int64(uint8(c))
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMOVBload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond:
-	// result: (MOVBload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		v.reset(OpARMMOVBload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpARMMOVBload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)
-	// result: x
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVBstore {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		x := v_1.Args[1]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMOVBreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBreg x:(MOVBload _ _))
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARMMOVBload {
-			break
-		}
-		v.reset(OpARMMOVWreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBreg (ANDconst [c] x))
-	// cond: c & 0x80 == 0
-	// result: (ANDconst [c&0x7f] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMANDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v_0.Args[0]
-		if !(c&0x80 == 0) {
-			break
-		}
-		v.reset(OpARMANDconst)
-		v.AuxInt = c & 0x7f
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBreg x:(MOVBreg _))
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARMMOVBreg {
-			break
-		}
-		v.reset(OpARMMOVWreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBreg (MOVWconst [c]))
-	// cond:
-	// result: (MOVWconst [int64(int8(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = int64(int8(c))
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMOVBstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	// cond:
-	// result: (MOVBstore [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpARMMOVBstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpARMMOVBstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
-	// cond:
-	// result: (MOVBstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVBreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpARMMOVBstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
-	// cond:
-	// result: (MOVBstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVBUreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpARMMOVBstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
-	// cond:
-	// result: (MOVBstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVHreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpARMMOVBstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
-	// cond:
-	// result: (MOVBstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVHUreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpARMMOVBstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMOVDload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond:
-	// result: (MOVDload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		v.reset(OpARMMOVDload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpARMMOVDload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-	// result: x
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVDstore {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		x := v_1.Args[1]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMOVDstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	// cond:
-	// result: (MOVDstore [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpARMMOVDstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpARMMOVDstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMOVFload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVFload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond:
-	// result: (MOVFload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		v.reset(OpARMMOVFload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpARMMOVFload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-	// result: x
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVFstore {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		x := v_1.Args[1]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMOVFstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVFstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	// cond:
-	// result: (MOVFstore [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpARMMOVFstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpARMMOVFstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMOVHUload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond:
-	// result: (MOVHUload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		v.reset(OpARMMOVHUload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpARMMOVHUload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)
-	// result: x
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVHstore {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		x := v_1.Args[1]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMOVHUreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHUreg x:(MOVBUload _ _))
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARMMOVBUload {
-			break
-		}
-		v.reset(OpARMMOVWreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHUreg x:(MOVHUload _ _))
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARMMOVHUload {
-			break
-		}
-		v.reset(OpARMMOVWreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHUreg (ANDconst [c] x))
-	// cond:
-	// result: (ANDconst [c&0xffff] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMANDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpARMANDconst)
-		v.AuxInt = c & 0xffff
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHUreg x:(MOVBUreg _))
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARMMOVBUreg {
-			break
-		}
-		v.reset(OpARMMOVWreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHUreg x:(MOVHUreg _))
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARMMOVHUreg {
-			break
-		}
-		v.reset(OpARMMOVWreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHUreg (MOVWconst [c]))
-	// cond:
-	// result: (MOVWconst [int64(uint16(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = int64(uint16(c))
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMOVHload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond:
-	// result: (MOVHload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		v.reset(OpARMMOVHload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpARMMOVHload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)
-	// result: x
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVHstore {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		x := v_1.Args[1]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMOVHreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHreg x:(MOVBload _ _))
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARMMOVBload {
-			break
-		}
-		v.reset(OpARMMOVWreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg x:(MOVBUload _ _))
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARMMOVBUload {
-			break
-		}
-		v.reset(OpARMMOVWreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg x:(MOVHload _ _))
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARMMOVHload {
-			break
-		}
-		v.reset(OpARMMOVWreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg (ANDconst [c] x))
-	// cond: c & 0x8000 == 0
-	// result: (ANDconst [c&0x7fff] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMANDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v_0.Args[0]
-		if !(c&0x8000 == 0) {
-			break
-		}
-		v.reset(OpARMANDconst)
-		v.AuxInt = c & 0x7fff
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg x:(MOVBreg _))
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARMMOVBreg {
-			break
-		}
-		v.reset(OpARMMOVWreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg x:(MOVBUreg _))
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARMMOVBUreg {
-			break
-		}
-		v.reset(OpARMMOVWreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg x:(MOVHreg _))
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARMMOVHreg {
-			break
-		}
-		v.reset(OpARMMOVWreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg (MOVWconst [c]))
-	// cond:
-	// result: (MOVWconst [int64(int16(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = int64(int16(c))
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMOVHstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	// cond:
-	// result: (MOVHstore [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpARMMOVHstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpARMMOVHstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
-	// cond:
-	// result: (MOVHstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVHreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpARMMOVHstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
-	// cond:
-	// result: (MOVHstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVHUreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpARMMOVHstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMOVWload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond:
-	// result: (MOVWload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		v.reset(OpARMMOVWload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpARMMOVWload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-	// result: x
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWstore {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		x := v_1.Args[1]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWload [0] {sym} (ADD ptr idx) mem)
-	// cond: sym == nil && !config.nacl
-	// result: (MOVWloadidx ptr idx mem)
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMADD {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(sym == nil && !config.nacl) {
-			break
-		}
-		v.reset(OpARMMOVWloadidx)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem)
-	// cond: sym == nil && !config.nacl
-	// result: (MOVWloadshiftLL ptr idx [c] mem)
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMADDshiftLL {
-			break
-		}
-		c := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(sym == nil && !config.nacl) {
-			break
-		}
-		v.reset(OpARMMOVWloadshiftLL)
-		v.AuxInt = c
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem)
-	// cond: sym == nil && !config.nacl
-	// result: (MOVWloadshiftRL ptr idx [c] mem)
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMADDshiftRL {
-			break
-		}
-		c := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(sym == nil && !config.nacl) {
-			break
-		}
-		v.reset(OpARMMOVWloadshiftRL)
-		v.AuxInt = c
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem)
-	// cond: sym == nil && !config.nacl
-	// result: (MOVWloadshiftRA ptr idx [c] mem)
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMADDshiftRA {
-			break
-		}
-		c := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(sym == nil && !config.nacl) {
-			break
-		}
-		v.reset(OpARMMOVWloadshiftRA)
-		v.AuxInt = c
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMOVWloadidx(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWloadidx ptr idx (MOVWstoreidx ptr2 idx x _))
-	// cond: isSamePtr(ptr, ptr2)
-	// result: x
-	for {
-		ptr := v.Args[0]
-		idx := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWstoreidx {
-			break
-		}
-		ptr2 := v_2.Args[0]
-		if idx != v_2.Args[1] {
-			break
-		}
-		x := v_2.Args[2]
-		if !(isSamePtr(ptr, ptr2)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWloadidx ptr (MOVWconst [c]) mem)
-	// cond:
-	// result: (MOVWload [c] ptr mem)
-	for {
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		mem := v.Args[2]
-		v.reset(OpARMMOVWload)
-		v.AuxInt = c
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWloadidx (MOVWconst [c]) ptr mem)
-	// cond:
-	// result: (MOVWload [c] ptr mem)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		ptr := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpARMMOVWload)
-		v.AuxInt = c
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWloadidx ptr (SLLconst idx [c]) mem)
-	// cond:
-	// result: (MOVWloadshiftLL ptr idx [c] mem)
-	for {
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSLLconst {
-			break
-		}
-		c := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpARMMOVWloadshiftLL)
-		v.AuxInt = c
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWloadidx (SLLconst idx [c]) ptr mem)
-	// cond:
-	// result: (MOVWloadshiftLL ptr idx [c] mem)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSLLconst {
-			break
-		}
-		c := v_0.AuxInt
-		idx := v_0.Args[0]
-		ptr := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpARMMOVWloadshiftLL)
-		v.AuxInt = c
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWloadidx ptr (SRLconst idx [c]) mem)
-	// cond:
-	// result: (MOVWloadshiftRL ptr idx [c] mem)
-	for {
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRLconst {
-			break
-		}
-		c := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpARMMOVWloadshiftRL)
-		v.AuxInt = c
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWloadidx (SRLconst idx [c]) ptr mem)
-	// cond:
-	// result: (MOVWloadshiftRL ptr idx [c] mem)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRLconst {
-			break
-		}
-		c := v_0.AuxInt
-		idx := v_0.Args[0]
-		ptr := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpARMMOVWloadshiftRL)
-		v.AuxInt = c
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWloadidx ptr (SRAconst idx [c]) mem)
-	// cond:
-	// result: (MOVWloadshiftRA ptr idx [c] mem)
-	for {
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRAconst {
-			break
-		}
-		c := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpARMMOVWloadshiftRA)
-		v.AuxInt = c
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWloadidx (SRAconst idx [c]) ptr mem)
-	// cond:
-	// result: (MOVWloadshiftRA ptr idx [c] mem)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRAconst {
-			break
-		}
-		c := v_0.AuxInt
-		idx := v_0.Args[0]
-		ptr := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpARMMOVWloadshiftRA)
-		v.AuxInt = c
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMOVWloadshiftLL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWloadshiftLL ptr idx [c] (MOVWstoreshiftLL ptr2 idx [d] x _))
-	// cond: c==d && isSamePtr(ptr, ptr2)
-	// result: x
-	for {
-		c := v.AuxInt
-		ptr := v.Args[0]
-		idx := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWstoreshiftLL {
-			break
-		}
-		d := v_2.AuxInt
-		ptr2 := v_2.Args[0]
-		if idx != v_2.Args[1] {
-			break
-		}
-		x := v_2.Args[2]
-		if !(c == d && isSamePtr(ptr, ptr2)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWloadshiftLL ptr (MOVWconst [c]) [d] mem)
-	// cond:
-	// result: (MOVWload [int64(uint32(c)<<uint64(d))] ptr mem)
-	for {
-		d := v.AuxInt
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		mem := v.Args[2]
-		v.reset(OpARMMOVWload)
-		v.AuxInt = int64(uint32(c) << uint64(d))
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMOVWloadshiftRA(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWloadshiftRA ptr idx [c] (MOVWstoreshiftRA ptr2 idx [d] x _))
-	// cond: c==d && isSamePtr(ptr, ptr2)
-	// result: x
-	for {
-		c := v.AuxInt
-		ptr := v.Args[0]
-		idx := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWstoreshiftRA {
-			break
-		}
-		d := v_2.AuxInt
-		ptr2 := v_2.Args[0]
-		if idx != v_2.Args[1] {
-			break
-		}
-		x := v_2.Args[2]
-		if !(c == d && isSamePtr(ptr, ptr2)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWloadshiftRA ptr (MOVWconst [c]) [d] mem)
-	// cond:
-	// result: (MOVWload [int64(int32(c)>>uint64(d))] ptr mem)
-	for {
-		d := v.AuxInt
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		mem := v.Args[2]
-		v.reset(OpARMMOVWload)
-		v.AuxInt = int64(int32(c) >> uint64(d))
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMOVWloadshiftRL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWloadshiftRL ptr idx [c] (MOVWstoreshiftRL ptr2 idx [d] x _))
-	// cond: c==d && isSamePtr(ptr, ptr2)
-	// result: x
-	for {
-		c := v.AuxInt
-		ptr := v.Args[0]
-		idx := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWstoreshiftRL {
-			break
-		}
-		d := v_2.AuxInt
-		ptr2 := v_2.Args[0]
-		if idx != v_2.Args[1] {
-			break
-		}
-		x := v_2.Args[2]
-		if !(c == d && isSamePtr(ptr, ptr2)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWloadshiftRL ptr (MOVWconst [c]) [d] mem)
-	// cond:
-	// result: (MOVWload [int64(uint32(c)>>uint64(d))] ptr mem)
-	for {
-		d := v.AuxInt
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		mem := v.Args[2]
-		v.reset(OpARMMOVWload)
-		v.AuxInt = int64(uint32(c) >> uint64(d))
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMOVWreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWreg x)
-	// cond: x.Uses == 1
-	// result: (MOVWnop x)
-	for {
-		x := v.Args[0]
-		if !(x.Uses == 1) {
-			break
-		}
-		v.reset(OpARMMOVWnop)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg (MOVWconst [c]))
-	// cond:
-	// result: (MOVWconst [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = c
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMOVWstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	// cond:
-	// result: (MOVWstore [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpARMMOVWstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpARMMOVWstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [0] {sym} (ADD ptr idx) val mem)
-	// cond: sym == nil && !config.nacl
-	// result: (MOVWstoreidx ptr idx val mem)
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMADD {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(sym == nil && !config.nacl) {
-			break
-		}
-		v.reset(OpARMMOVWstoreidx)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem)
-	// cond: sym == nil && !config.nacl
-	// result: (MOVWstoreshiftLL ptr idx [c] val mem)
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMADDshiftLL {
-			break
-		}
-		c := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(sym == nil && !config.nacl) {
-			break
-		}
-		v.reset(OpARMMOVWstoreshiftLL)
-		v.AuxInt = c
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem)
-	// cond: sym == nil && !config.nacl
-	// result: (MOVWstoreshiftRL ptr idx [c] val mem)
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMADDshiftRL {
-			break
-		}
-		c := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(sym == nil && !config.nacl) {
-			break
-		}
-		v.reset(OpARMMOVWstoreshiftRL)
-		v.AuxInt = c
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem)
-	// cond: sym == nil && !config.nacl
-	// result: (MOVWstoreshiftRA ptr idx [c] val mem)
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMADDshiftRA {
-			break
-		}
-		c := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(sym == nil && !config.nacl) {
-			break
-		}
-		v.reset(OpARMMOVWstoreshiftRA)
-		v.AuxInt = c
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMOVWstoreidx(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWstoreidx ptr (MOVWconst [c]) val mem)
-	// cond:
-	// result: (MOVWstore [c] ptr val mem)
-	for {
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpARMMOVWstore)
-		v.AuxInt = c
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreidx (MOVWconst [c]) ptr val mem)
-	// cond:
-	// result: (MOVWstore [c] ptr val mem)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		ptr := v.Args[1]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpARMMOVWstore)
-		v.AuxInt = c
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreidx ptr (SLLconst idx [c]) val mem)
-	// cond:
-	// result: (MOVWstoreshiftLL ptr idx [c] val mem)
-	for {
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSLLconst {
-			break
-		}
-		c := v_1.AuxInt
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpARMMOVWstoreshiftLL)
-		v.AuxInt = c
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreidx (SLLconst idx [c]) ptr val mem)
-	// cond:
-	// result: (MOVWstoreshiftLL ptr idx [c] val mem)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSLLconst {
-			break
-		}
-		c := v_0.AuxInt
-		idx := v_0.Args[0]
-		ptr := v.Args[1]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpARMMOVWstoreshiftLL)
-		v.AuxInt = c
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreidx ptr (SRLconst idx [c]) val mem)
-	// cond:
-	// result: (MOVWstoreshiftRL ptr idx [c] val mem)
-	for {
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRLconst {
-			break
-		}
-		c := v_1.AuxInt
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpARMMOVWstoreshiftRL)
-		v.AuxInt = c
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreidx (SRLconst idx [c]) ptr val mem)
-	// cond:
-	// result: (MOVWstoreshiftRL ptr idx [c] val mem)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRLconst {
-			break
-		}
-		c := v_0.AuxInt
-		idx := v_0.Args[0]
-		ptr := v.Args[1]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpARMMOVWstoreshiftRL)
-		v.AuxInt = c
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreidx ptr (SRAconst idx [c]) val mem)
-	// cond:
-	// result: (MOVWstoreshiftRA ptr idx [c] val mem)
-	for {
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRAconst {
-			break
-		}
-		c := v_1.AuxInt
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpARMMOVWstoreshiftRA)
-		v.AuxInt = c
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreidx (SRAconst idx [c]) ptr val mem)
-	// cond:
-	// result: (MOVWstoreshiftRA ptr idx [c] val mem)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRAconst {
-			break
-		}
-		c := v_0.AuxInt
-		idx := v_0.Args[0]
-		ptr := v.Args[1]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpARMMOVWstoreshiftRA)
-		v.AuxInt = c
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMOVWstoreshiftLL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWstoreshiftLL ptr (MOVWconst [c]) [d] val mem)
-	// cond:
-	// result: (MOVWstore [int64(uint32(c)<<uint64(d))] ptr val mem)
-	for {
-		d := v.AuxInt
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpARMMOVWstore)
-		v.AuxInt = int64(uint32(c) << uint64(d))
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMOVWstoreshiftRA(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWstoreshiftRA ptr (MOVWconst [c]) [d] val mem)
-	// cond:
-	// result: (MOVWstore [int64(int32(c)>>uint64(d))] ptr val mem)
-	for {
-		d := v.AuxInt
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpARMMOVWstore)
-		v.AuxInt = int64(int32(c) >> uint64(d))
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMOVWstoreshiftRL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWstoreshiftRL ptr (MOVWconst [c]) [d] val mem)
-	// cond:
-	// result: (MOVWstore [int64(uint32(c)>>uint64(d))] ptr val mem)
-	for {
-		d := v.AuxInt
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpARMMOVWstore)
-		v.AuxInt = int64(uint32(c) >> uint64(d))
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MUL x (MOVWconst [c]))
-	// cond: int32(c) == -1
-	// result: (RSBconst [0] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(int32(c) == -1) {
-			break
-		}
-		v.reset(OpARMRSBconst)
-		v.AuxInt = 0
-		v.AddArg(x)
-		return true
-	}
-	// match: (MUL _ (MOVWconst [0]))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (MUL x (MOVWconst [1]))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		if v_1.AuxInt != 1 {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MUL x (MOVWconst [c]))
-	// cond: isPowerOfTwo(c)
-	// result: (SLLconst [log2(c)] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(isPowerOfTwo(c)) {
-			break
-		}
-		v.reset(OpARMSLLconst)
-		v.AuxInt = log2(c)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MUL x (MOVWconst [c]))
-	// cond: isPowerOfTwo(c-1) && int32(c) >= 3
-	// result: (ADDshiftLL x x [log2(c-1)])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
-			break
-		}
-		v.reset(OpARMADDshiftLL)
-		v.AuxInt = log2(c - 1)
-		v.AddArg(x)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MUL x (MOVWconst [c]))
-	// cond: isPowerOfTwo(c+1) && int32(c) >= 7
-	// result: (RSBshiftLL x x [log2(c+1)])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
-			break
-		}
-		v.reset(OpARMRSBshiftLL)
-		v.AuxInt = log2(c + 1)
-		v.AddArg(x)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MUL x (MOVWconst [c]))
-	// cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
-	// result: (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
-			break
-		}
-		v.reset(OpARMSLLconst)
-		v.AuxInt = log2(c / 3)
-		v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
-		v0.AuxInt = 1
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MUL x (MOVWconst [c]))
-	// cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
-	// result: (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
-			break
-		}
-		v.reset(OpARMSLLconst)
-		v.AuxInt = log2(c / 5)
-		v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
-		v0.AuxInt = 2
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MUL x (MOVWconst [c]))
-	// cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
-	// result: (SLLconst [log2(c/7)] (RSBshiftLL <x.Type> x x [3]))
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
-			break
-		}
-		v.reset(OpARMSLLconst)
-		v.AuxInt = log2(c / 7)
-		v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
-		v0.AuxInt = 3
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MUL x (MOVWconst [c]))
-	// cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
-	// result: (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
-			break
-		}
-		v.reset(OpARMSLLconst)
-		v.AuxInt = log2(c / 9)
-		v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
-		v0.AuxInt = 3
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MUL (MOVWconst [c]) x)
-	// cond: int32(c) == -1
-	// result: (RSBconst [0] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(int32(c) == -1) {
-			break
-		}
-		v.reset(OpARMRSBconst)
-		v.AuxInt = 0
-		v.AddArg(x)
-		return true
-	}
-	// match: (MUL (MOVWconst [0]) _)
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (MUL (MOVWconst [1]) x)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		if v_0.AuxInt != 1 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MUL (MOVWconst [c]) x)
-	// cond: isPowerOfTwo(c)
-	// result: (SLLconst [log2(c)] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(isPowerOfTwo(c)) {
-			break
-		}
-		v.reset(OpARMSLLconst)
-		v.AuxInt = log2(c)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MUL (MOVWconst [c]) x)
-	// cond: isPowerOfTwo(c-1) && int32(c) >= 3
-	// result: (ADDshiftLL x x [log2(c-1)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
-			break
-		}
-		v.reset(OpARMADDshiftLL)
-		v.AuxInt = log2(c - 1)
-		v.AddArg(x)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MUL (MOVWconst [c]) x)
-	// cond: isPowerOfTwo(c+1) && int32(c) >= 7
-	// result: (RSBshiftLL x x [log2(c+1)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
-			break
-		}
-		v.reset(OpARMRSBshiftLL)
-		v.AuxInt = log2(c + 1)
-		v.AddArg(x)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MUL (MOVWconst [c]) x)
-	// cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
-	// result: (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
-			break
-		}
-		v.reset(OpARMSLLconst)
-		v.AuxInt = log2(c / 3)
-		v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
-		v0.AuxInt = 1
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MUL (MOVWconst [c]) x)
-	// cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
-	// result: (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
-			break
-		}
-		v.reset(OpARMSLLconst)
-		v.AuxInt = log2(c / 5)
-		v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
-		v0.AuxInt = 2
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MUL (MOVWconst [c]) x)
-	// cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
-	// result: (SLLconst [log2(c/7)] (RSBshiftLL <x.Type> x x [3]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
-			break
-		}
-		v.reset(OpARMSLLconst)
-		v.AuxInt = log2(c / 7)
-		v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
-		v0.AuxInt = 3
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MUL (MOVWconst [c]) x)
-	// cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
-	// result: (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
-			break
-		}
-		v.reset(OpARMSLLconst)
-		v.AuxInt = log2(c / 9)
-		v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
-		v0.AuxInt = 3
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MUL (MOVWconst [c]) (MOVWconst [d]))
-	// cond:
-	// result: (MOVWconst [int64(int32(c*d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = int64(int32(c * d))
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MULA x (MOVWconst [c]) a)
-	// cond: int32(c) == -1
-	// result: (SUB a x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		a := v.Args[2]
-		if !(int32(c) == -1) {
-			break
-		}
-		v.reset(OpARMSUB)
-		v.AddArg(a)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULA _ (MOVWconst [0]) a)
-	// cond:
-	// result: a
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		a := v.Args[2]
-		v.reset(OpCopy)
-		v.Type = a.Type
-		v.AddArg(a)
-		return true
-	}
-	// match: (MULA x (MOVWconst [1]) a)
-	// cond:
-	// result: (ADD x a)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		if v_1.AuxInt != 1 {
-			break
-		}
-		a := v.Args[2]
-		v.reset(OpARMADD)
-		v.AddArg(x)
-		v.AddArg(a)
-		return true
-	}
-	// match: (MULA x (MOVWconst [c]) a)
-	// cond: isPowerOfTwo(c)
-	// result: (ADD (SLLconst <x.Type> [log2(c)] x) a)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		a := v.Args[2]
-		if !(isPowerOfTwo(c)) {
-			break
-		}
-		v.reset(OpARMADD)
-		v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-		v0.AuxInt = log2(c)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(a)
-		return true
-	}
-	// match: (MULA x (MOVWconst [c]) a)
-	// cond: isPowerOfTwo(c-1) && int32(c) >= 3
-	// result: (ADD (ADDshiftLL <x.Type> x x [log2(c-1)]) a)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		a := v.Args[2]
-		if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
-			break
-		}
-		v.reset(OpARMADD)
-		v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
-		v0.AuxInt = log2(c - 1)
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(a)
-		return true
-	}
-	// match: (MULA x (MOVWconst [c]) a)
-	// cond: isPowerOfTwo(c+1) && int32(c) >= 7
-	// result: (ADD (RSBshiftLL <x.Type> x x [log2(c+1)]) a)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		a := v.Args[2]
-		if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
-			break
-		}
-		v.reset(OpARMADD)
-		v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
-		v0.AuxInt = log2(c + 1)
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(a)
-		return true
-	}
-	// match: (MULA x (MOVWconst [c]) a)
-	// cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
-	// result: (ADD (SLLconst <x.Type> [log2(c/3)] (ADDshiftLL <x.Type> x x [1])) a)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		a := v.Args[2]
-		if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
-			break
-		}
-		v.reset(OpARMADD)
-		v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-		v0.AuxInt = log2(c / 3)
-		v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
-		v1.AuxInt = 1
-		v1.AddArg(x)
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v.AddArg(a)
-		return true
-	}
-	// match: (MULA x (MOVWconst [c]) a)
-	// cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
-	// result: (ADD (SLLconst <x.Type> [log2(c/5)] (ADDshiftLL <x.Type> x x [2])) a)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		a := v.Args[2]
-		if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
-			break
-		}
-		v.reset(OpARMADD)
-		v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-		v0.AuxInt = log2(c / 5)
-		v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
-		v1.AuxInt = 2
-		v1.AddArg(x)
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v.AddArg(a)
-		return true
-	}
-	// match: (MULA x (MOVWconst [c]) a)
-	// cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
-	// result: (ADD (SLLconst <x.Type> [log2(c/7)] (RSBshiftLL <x.Type> x x [3])) a)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		a := v.Args[2]
-		if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
-			break
-		}
-		v.reset(OpARMADD)
-		v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-		v0.AuxInt = log2(c / 7)
-		v1 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
-		v1.AuxInt = 3
-		v1.AddArg(x)
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v.AddArg(a)
-		return true
-	}
-	// match: (MULA x (MOVWconst [c]) a)
-	// cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
-	// result: (ADD (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])) a)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		a := v.Args[2]
-		if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
-			break
-		}
-		v.reset(OpARMADD)
-		v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-		v0.AuxInt = log2(c / 9)
-		v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
-		v1.AuxInt = 3
-		v1.AddArg(x)
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v.AddArg(a)
-		return true
-	}
-	// match: (MULA (MOVWconst [c]) x a)
-	// cond: int32(c) == -1
-	// result: (SUB a x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		a := v.Args[2]
-		if !(int32(c) == -1) {
-			break
-		}
-		v.reset(OpARMSUB)
-		v.AddArg(a)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULA (MOVWconst [0]) _ a)
-	// cond:
-	// result: a
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		a := v.Args[2]
-		v.reset(OpCopy)
-		v.Type = a.Type
-		v.AddArg(a)
-		return true
-	}
-	// match: (MULA (MOVWconst [1]) x a)
-	// cond:
-	// result: (ADD x a)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		if v_0.AuxInt != 1 {
-			break
-		}
-		x := v.Args[1]
-		a := v.Args[2]
-		v.reset(OpARMADD)
-		v.AddArg(x)
-		v.AddArg(a)
-		return true
-	}
-	// match: (MULA (MOVWconst [c]) x a)
-	// cond: isPowerOfTwo(c)
-	// result: (ADD (SLLconst <x.Type> [log2(c)] x) a)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		a := v.Args[2]
-		if !(isPowerOfTwo(c)) {
-			break
-		}
-		v.reset(OpARMADD)
-		v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-		v0.AuxInt = log2(c)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(a)
-		return true
-	}
-	// match: (MULA (MOVWconst [c]) x a)
-	// cond: isPowerOfTwo(c-1) && int32(c) >= 3
-	// result: (ADD (ADDshiftLL <x.Type> x x [log2(c-1)]) a)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		a := v.Args[2]
-		if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
-			break
-		}
-		v.reset(OpARMADD)
-		v0 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
-		v0.AuxInt = log2(c - 1)
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(a)
-		return true
-	}
-	// match: (MULA (MOVWconst [c]) x a)
-	// cond: isPowerOfTwo(c+1) && int32(c) >= 7
-	// result: (ADD (RSBshiftLL <x.Type> x x [log2(c+1)]) a)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		a := v.Args[2]
-		if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
-			break
-		}
-		v.reset(OpARMADD)
-		v0 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
-		v0.AuxInt = log2(c + 1)
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(a)
-		return true
-	}
-	// match: (MULA (MOVWconst [c]) x a)
-	// cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
-	// result: (ADD (SLLconst <x.Type> [log2(c/3)] (ADDshiftLL <x.Type> x x [1])) a)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		a := v.Args[2]
-		if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
-			break
-		}
-		v.reset(OpARMADD)
-		v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-		v0.AuxInt = log2(c / 3)
-		v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
-		v1.AuxInt = 1
-		v1.AddArg(x)
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v.AddArg(a)
-		return true
-	}
-	// match: (MULA (MOVWconst [c]) x a)
-	// cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
-	// result: (ADD (SLLconst <x.Type> [log2(c/5)] (ADDshiftLL <x.Type> x x [2])) a)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		a := v.Args[2]
-		if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
-			break
-		}
-		v.reset(OpARMADD)
-		v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-		v0.AuxInt = log2(c / 5)
-		v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
-		v1.AuxInt = 2
-		v1.AddArg(x)
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v.AddArg(a)
-		return true
-	}
-	// match: (MULA (MOVWconst [c]) x a)
-	// cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
-	// result: (ADD (SLLconst <x.Type> [log2(c/7)] (RSBshiftLL <x.Type> x x [3])) a)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		a := v.Args[2]
-		if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
-			break
-		}
-		v.reset(OpARMADD)
-		v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-		v0.AuxInt = log2(c / 7)
-		v1 := b.NewValue0(v.Line, OpARMRSBshiftLL, x.Type)
-		v1.AuxInt = 3
-		v1.AddArg(x)
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v.AddArg(a)
-		return true
-	}
-	// match: (MULA (MOVWconst [c]) x a)
-	// cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
-	// result: (ADD (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])) a)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		a := v.Args[2]
-		if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
-			break
-		}
-		v.reset(OpARMADD)
-		v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-		v0.AuxInt = log2(c / 9)
-		v1 := b.NewValue0(v.Line, OpARMADDshiftLL, x.Type)
-		v1.AuxInt = 3
-		v1.AddArg(x)
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v.AddArg(a)
-		return true
-	}
-	// match: (MULA (MOVWconst [c]) (MOVWconst [d]) a)
-	// cond:
-	// result: (ADDconst [int64(int32(c*d))] a)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		d := v_1.AuxInt
-		a := v.Args[2]
-		v.reset(OpARMADDconst)
-		v.AuxInt = int64(int32(c * d))
-		v.AddArg(a)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMVN(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MVN (MOVWconst [c]))
-	// cond:
-	// result: (MOVWconst [^c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = ^c
-		return true
-	}
-	// match: (MVN (SLLconst [c] x))
-	// cond:
-	// result: (MVNshiftLL x [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSLLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpARMMVNshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (MVN (SRLconst [c] x))
-	// cond:
-	// result: (MVNshiftRL x [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRLconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpARMMVNshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (MVN (SRAconst [c] x))
-	// cond:
-	// result: (MVNshiftRA x [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRAconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpARMMVNshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (MVN (SLL x y))
-	// cond:
-	// result: (MVNshiftLLreg x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSLL {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpARMMVNshiftLLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (MVN (SRL x y))
-	// cond:
-	// result: (MVNshiftRLreg x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRL {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpARMMVNshiftRLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (MVN (SRA x y))
-	// cond:
-	// result: (MVNshiftRAreg x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRA {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpARMMVNshiftRAreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMVNshiftLL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MVNshiftLL (MOVWconst [c]) [d])
-	// cond:
-	// result: (MOVWconst [^int64(uint32(c)<<uint64(d))])
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = ^int64(uint32(c) << uint64(d))
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMVNshiftLLreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MVNshiftLLreg x (MOVWconst [c]))
-	// cond:
-	// result: (MVNshiftLL x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMMVNshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMVNshiftRA(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MVNshiftRA (MOVWconst [c]) [d])
-	// cond:
-	// result: (MOVWconst [^int64(int32(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = ^int64(int32(c) >> uint64(d))
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMVNshiftRAreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MVNshiftRAreg x (MOVWconst [c]))
-	// cond:
-	// result: (MVNshiftRA x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMMVNshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMVNshiftRL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MVNshiftRL (MOVWconst [c]) [d])
-	// cond:
-	// result: (MOVWconst [^int64(uint32(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = ^int64(uint32(c) >> uint64(d))
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMMVNshiftRLreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MVNshiftRLreg x (MOVWconst [c]))
-	// cond:
-	// result: (MVNshiftRL x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMMVNshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMNotEqual(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NotEqual (FlagEQ))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagEQ {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (NotEqual (FlagLT_ULT))
-	// cond:
-	// result: (MOVWconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagLT_ULT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (NotEqual (FlagLT_UGT))
-	// cond:
-	// result: (MOVWconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagLT_UGT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (NotEqual (FlagGT_ULT))
-	// cond:
-	// result: (MOVWconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagGT_ULT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (NotEqual (FlagGT_UGT))
-	// cond:
-	// result: (MOVWconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMFlagGT_UGT {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (NotEqual (InvertFlags x))
-	// cond:
-	// result: (NotEqual x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMInvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpARMNotEqual)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMOR(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (OR (MOVWconst [c]) x)
-	// cond:
-	// result: (ORconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMORconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (OR x (MOVWconst [c]))
-	// cond:
-	// result: (ORconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMORconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (OR x (SLLconst [c] y))
-	// cond:
-	// result: (ORshiftLL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSLLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMORshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (OR (SLLconst [c] y) x)
-	// cond:
-	// result: (ORshiftLL x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSLLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMORshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (OR x (SRLconst [c] y))
-	// cond:
-	// result: (ORshiftRL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMORshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (OR (SRLconst [c] y) x)
-	// cond:
-	// result: (ORshiftRL x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMORshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (OR x (SRAconst [c] y))
-	// cond:
-	// result: (ORshiftRA x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRAconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMORshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (OR (SRAconst [c] y) x)
-	// cond:
-	// result: (ORshiftRA x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRAconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMORshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (OR x (SLL y z))
-	// cond:
-	// result: (ORshiftLLreg x y z)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSLL {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		v.reset(OpARMORshiftLLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (OR (SLL y z) x)
-	// cond:
-	// result: (ORshiftLLreg x y z)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSLL {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		v.reset(OpARMORshiftLLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (OR x (SRL y z))
-	// cond:
-	// result: (ORshiftRLreg x y z)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRL {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		v.reset(OpARMORshiftRLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (OR (SRL y z) x)
-	// cond:
-	// result: (ORshiftRLreg x y z)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRL {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		v.reset(OpARMORshiftRLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (OR x (SRA y z))
-	// cond:
-	// result: (ORshiftRAreg x y z)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRA {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		v.reset(OpARMORshiftRAreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (OR (SRA y z) x)
-	// cond:
-	// result: (ORshiftRAreg x y z)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRA {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		v.reset(OpARMORshiftRAreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (OR x x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMORconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ORconst [0] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ORconst [c] _)
-	// cond: int32(c)==-1
-	// result: (MOVWconst [-1])
-	for {
-		c := v.AuxInt
-		if !(int32(c) == -1) {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = -1
-		return true
-	}
-	// match: (ORconst [c] (MOVWconst [d]))
-	// cond:
-	// result: (MOVWconst [c|d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = c | d
-		return true
-	}
-	// match: (ORconst [c] (ORconst [d] x))
-	// cond:
-	// result: (ORconst [c|d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMORconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpARMORconst)
-		v.AuxInt = c | d
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMORshiftLL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ORshiftLL (MOVWconst [c]) x [d])
-	// cond:
-	// result: (ORconst [c] (SLLconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMORconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ORshiftLL x (MOVWconst [c]) [d])
-	// cond:
-	// result: (ORconst x [int64(uint32(c)<<uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMORconst)
-		v.AuxInt = int64(uint32(c) << uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (ORshiftLL x y:(SLLconst x [c]) [d])
-	// cond: c==d
-	// result: y
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		y := v.Args[1]
-		if y.Op != OpARMSLLconst {
-			break
-		}
-		c := y.AuxInt
-		if x != y.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMORshiftLLreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ORshiftLLreg (MOVWconst [c]) x y)
-	// cond:
-	// result: (ORconst [c] (SLL <x.Type> x y))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpARMORconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ORshiftLLreg x y (MOVWconst [c]))
-	// cond:
-	// result: (ORshiftLL x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMORshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMORshiftRA(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ORshiftRA (MOVWconst [c]) x [d])
-	// cond:
-	// result: (ORconst [c] (SRAconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMORconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ORshiftRA x (MOVWconst [c]) [d])
-	// cond:
-	// result: (ORconst x [int64(int32(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMORconst)
-		v.AuxInt = int64(int32(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (ORshiftRA x y:(SRAconst x [c]) [d])
-	// cond: c==d
-	// result: y
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		y := v.Args[1]
-		if y.Op != OpARMSRAconst {
-			break
-		}
-		c := y.AuxInt
-		if x != y.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMORshiftRAreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ORshiftRAreg (MOVWconst [c]) x y)
-	// cond:
-	// result: (ORconst [c] (SRA <x.Type> x y))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpARMORconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ORshiftRAreg x y (MOVWconst [c]))
-	// cond:
-	// result: (ORshiftRA x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMORshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMORshiftRL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ORshiftRL (MOVWconst [c]) x [d])
-	// cond:
-	// result: (ORconst [c] (SRLconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMORconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ORshiftRL x (MOVWconst [c]) [d])
-	// cond:
-	// result: (ORconst x [int64(uint32(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMORconst)
-		v.AuxInt = int64(uint32(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (ORshiftRL x y:(SRLconst x [c]) [d])
-	// cond: c==d
-	// result: y
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		y := v.Args[1]
-		if y.Op != OpARMSRLconst {
-			break
-		}
-		c := y.AuxInt
-		if x != y.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMORshiftRLreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ORshiftRLreg (MOVWconst [c]) x y)
-	// cond:
-	// result: (ORconst [c] (SRL <x.Type> x y))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpARMORconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ORshiftRLreg x y (MOVWconst [c]))
-	// cond:
-	// result: (ORshiftRL x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMORshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (RSB (MOVWconst [c]) x)
-	// cond:
-	// result: (SUBconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMSUBconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (RSB x (MOVWconst [c]))
-	// cond:
-	// result: (RSBconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMRSBconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (RSB x (SLLconst [c] y))
-	// cond:
-	// result: (RSBshiftLL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSLLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMRSBshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (RSB (SLLconst [c] y) x)
-	// cond:
-	// result: (SUBshiftLL x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSLLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMSUBshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (RSB x (SRLconst [c] y))
-	// cond:
-	// result: (RSBshiftRL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMRSBshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (RSB (SRLconst [c] y) x)
-	// cond:
-	// result: (SUBshiftRL x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMSUBshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (RSB x (SRAconst [c] y))
-	// cond:
-	// result: (RSBshiftRA x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRAconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMRSBshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (RSB (SRAconst [c] y) x)
-	// cond:
-	// result: (SUBshiftRA x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRAconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMSUBshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (RSB x (SLL y z))
-	// cond:
-	// result: (RSBshiftLLreg x y z)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSLL {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		v.reset(OpARMRSBshiftLLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (RSB (SLL y z) x)
-	// cond:
-	// result: (SUBshiftLLreg x y z)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSLL {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		v.reset(OpARMSUBshiftLLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (RSB x (SRL y z))
-	// cond:
-	// result: (RSBshiftRLreg x y z)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRL {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		v.reset(OpARMRSBshiftRLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (RSB (SRL y z) x)
-	// cond:
-	// result: (SUBshiftRLreg x y z)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRL {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		v.reset(OpARMSUBshiftRLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (RSB x (SRA y z))
-	// cond:
-	// result: (RSBshiftRAreg x y z)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRA {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		v.reset(OpARMRSBshiftRAreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (RSB (SRA y z) x)
-	// cond:
-	// result: (SUBshiftRAreg x y z)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRA {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		v.reset(OpARMSUBshiftRAreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (RSB x x)
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMRSBSshiftLL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (RSBSshiftLL (MOVWconst [c]) x [d])
-	// cond:
-	// result: (SUBSconst [c] (SLLconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMSUBSconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (RSBSshiftLL x (MOVWconst [c]) [d])
-	// cond:
-	// result: (RSBSconst x [int64(uint32(c)<<uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMRSBSconst)
-		v.AuxInt = int64(uint32(c) << uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMRSBSshiftLLreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (RSBSshiftLLreg (MOVWconst [c]) x y)
-	// cond:
-	// result: (SUBSconst [c] (SLL <x.Type> x y))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpARMSUBSconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (RSBSshiftLLreg x y (MOVWconst [c]))
-	// cond:
-	// result: (RSBSshiftLL x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMRSBSshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMRSBSshiftRA(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (RSBSshiftRA (MOVWconst [c]) x [d])
-	// cond:
-	// result: (SUBSconst [c] (SRAconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMSUBSconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (RSBSshiftRA x (MOVWconst [c]) [d])
-	// cond:
-	// result: (RSBSconst x [int64(int32(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMRSBSconst)
-		v.AuxInt = int64(int32(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMRSBSshiftRAreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (RSBSshiftRAreg (MOVWconst [c]) x y)
-	// cond:
-	// result: (SUBSconst [c] (SRA <x.Type> x y))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpARMSUBSconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (RSBSshiftRAreg x y (MOVWconst [c]))
-	// cond:
-	// result: (RSBSshiftRA x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMRSBSshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMRSBSshiftRL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (RSBSshiftRL (MOVWconst [c]) x [d])
-	// cond:
-	// result: (SUBSconst [c] (SRLconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMSUBSconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (RSBSshiftRL x (MOVWconst [c]) [d])
-	// cond:
-	// result: (RSBSconst x [int64(uint32(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMRSBSconst)
-		v.AuxInt = int64(uint32(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMRSBSshiftRLreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (RSBSshiftRLreg (MOVWconst [c]) x y)
-	// cond:
-	// result: (SUBSconst [c] (SRL <x.Type> x y))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpARMSUBSconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (RSBSshiftRLreg x y (MOVWconst [c]))
-	// cond:
-	// result: (RSBSshiftRL x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMRSBSshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMRSBconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (RSBconst [c] (MOVWconst [d]))
-	// cond:
-	// result: (MOVWconst [int64(int32(c-d))])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = int64(int32(c - d))
-		return true
-	}
-	// match: (RSBconst [c] (RSBconst [d] x))
-	// cond:
-	// result: (ADDconst [int64(int32(c-d))] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMRSBconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpARMADDconst)
-		v.AuxInt = int64(int32(c - d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (RSBconst [c] (ADDconst [d] x))
-	// cond:
-	// result: (RSBconst [int64(int32(c-d))] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMADDconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpARMRSBconst)
-		v.AuxInt = int64(int32(c - d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (RSBconst [c] (SUBconst [d] x))
-	// cond:
-	// result: (RSBconst [int64(int32(c+d))] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSUBconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpARMRSBconst)
-		v.AuxInt = int64(int32(c + d))
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMRSBshiftLL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (RSBshiftLL (MOVWconst [c]) x [d])
-	// cond:
-	// result: (SUBconst [c] (SLLconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMSUBconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (RSBshiftLL x (MOVWconst [c]) [d])
-	// cond:
-	// result: (RSBconst x [int64(uint32(c)<<uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMRSBconst)
-		v.AuxInt = int64(uint32(c) << uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (RSBshiftLL x (SLLconst x [c]) [d])
-	// cond: c==d
-	// result: (MOVWconst [0])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSLLconst {
-			break
-		}
-		c := v_1.AuxInt
-		if x != v_1.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMRSBshiftLLreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (RSBshiftLLreg (MOVWconst [c]) x y)
-	// cond:
-	// result: (SUBconst [c] (SLL <x.Type> x y))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpARMSUBconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (RSBshiftLLreg x y (MOVWconst [c]))
-	// cond:
-	// result: (RSBshiftLL x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMRSBshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMRSBshiftRA(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (RSBshiftRA (MOVWconst [c]) x [d])
-	// cond:
-	// result: (SUBconst [c] (SRAconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMSUBconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (RSBshiftRA x (MOVWconst [c]) [d])
-	// cond:
-	// result: (RSBconst x [int64(int32(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMRSBconst)
-		v.AuxInt = int64(int32(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (RSBshiftRA x (SRAconst x [c]) [d])
-	// cond: c==d
-	// result: (MOVWconst [0])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRAconst {
-			break
-		}
-		c := v_1.AuxInt
-		if x != v_1.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMRSBshiftRAreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (RSBshiftRAreg (MOVWconst [c]) x y)
-	// cond:
-	// result: (SUBconst [c] (SRA <x.Type> x y))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpARMSUBconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (RSBshiftRAreg x y (MOVWconst [c]))
-	// cond:
-	// result: (RSBshiftRA x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMRSBshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMRSBshiftRL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (RSBshiftRL (MOVWconst [c]) x [d])
-	// cond:
-	// result: (SUBconst [c] (SRLconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMSUBconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (RSBshiftRL x (MOVWconst [c]) [d])
-	// cond:
-	// result: (RSBconst x [int64(uint32(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMRSBconst)
-		v.AuxInt = int64(uint32(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (RSBshiftRL x (SRLconst x [c]) [d])
-	// cond: c==d
-	// result: (MOVWconst [0])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRLconst {
-			break
-		}
-		c := v_1.AuxInt
-		if x != v_1.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMRSBshiftRLreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (RSBshiftRLreg (MOVWconst [c]) x y)
-	// cond:
-	// result: (SUBconst [c] (SRL <x.Type> x y))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpARMSUBconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (RSBshiftRLreg x y (MOVWconst [c]))
-	// cond:
-	// result: (RSBshiftRL x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMRSBshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMRSCconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (RSCconst [c] (ADDconst [d] x) flags)
-	// cond:
-	// result: (RSCconst [int64(int32(c-d))] x flags)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMADDconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		flags := v.Args[1]
-		v.reset(OpARMRSCconst)
-		v.AuxInt = int64(int32(c - d))
-		v.AddArg(x)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (RSCconst [c] (SUBconst [d] x) flags)
-	// cond:
-	// result: (RSCconst [int64(int32(c+d))] x flags)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSUBconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		flags := v.Args[1]
-		v.reset(OpARMRSCconst)
-		v.AuxInt = int64(int32(c + d))
-		v.AddArg(x)
-		v.AddArg(flags)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMRSCshiftLL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (RSCshiftLL (MOVWconst [c]) x [d] flags)
-	// cond:
-	// result: (SBCconst [c] (SLLconst <x.Type> x [d]) flags)
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		flags := v.Args[2]
-		v.reset(OpARMSBCconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (RSCshiftLL x (MOVWconst [c]) [d] flags)
-	// cond:
-	// result: (RSCconst x [int64(uint32(c)<<uint64(d))] flags)
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		flags := v.Args[2]
-		v.reset(OpARMRSCconst)
-		v.AuxInt = int64(uint32(c) << uint64(d))
-		v.AddArg(x)
-		v.AddArg(flags)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMRSCshiftLLreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (RSCshiftLLreg (MOVWconst [c]) x y flags)
-	// cond:
-	// result: (SBCconst [c] (SLL <x.Type> x y) flags)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		flags := v.Args[3]
-		v.reset(OpARMSBCconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (RSCshiftLLreg x y (MOVWconst [c]) flags)
-	// cond:
-	// result: (RSCshiftLL x y [c] flags)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		flags := v.Args[3]
-		v.reset(OpARMRSCshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(flags)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMRSCshiftRA(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (RSCshiftRA (MOVWconst [c]) x [d] flags)
-	// cond:
-	// result: (SBCconst [c] (SRAconst <x.Type> x [d]) flags)
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		flags := v.Args[2]
-		v.reset(OpARMSBCconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (RSCshiftRA x (MOVWconst [c]) [d] flags)
-	// cond:
-	// result: (RSCconst x [int64(int32(c)>>uint64(d))] flags)
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		flags := v.Args[2]
-		v.reset(OpARMRSCconst)
-		v.AuxInt = int64(int32(c) >> uint64(d))
-		v.AddArg(x)
-		v.AddArg(flags)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMRSCshiftRAreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (RSCshiftRAreg (MOVWconst [c]) x y flags)
-	// cond:
-	// result: (SBCconst [c] (SRA <x.Type> x y) flags)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		flags := v.Args[3]
-		v.reset(OpARMSBCconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (RSCshiftRAreg x y (MOVWconst [c]) flags)
-	// cond:
-	// result: (RSCshiftRA x y [c] flags)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		flags := v.Args[3]
-		v.reset(OpARMRSCshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(flags)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMRSCshiftRL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (RSCshiftRL (MOVWconst [c]) x [d] flags)
-	// cond:
-	// result: (SBCconst [c] (SRLconst <x.Type> x [d]) flags)
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		flags := v.Args[2]
-		v.reset(OpARMSBCconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (RSCshiftRL x (MOVWconst [c]) [d] flags)
-	// cond:
-	// result: (RSCconst x [int64(uint32(c)>>uint64(d))] flags)
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		flags := v.Args[2]
-		v.reset(OpARMRSCconst)
-		v.AuxInt = int64(uint32(c) >> uint64(d))
-		v.AddArg(x)
-		v.AddArg(flags)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMRSCshiftRLreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (RSCshiftRLreg (MOVWconst [c]) x y flags)
-	// cond:
-	// result: (SBCconst [c] (SRL <x.Type> x y) flags)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		flags := v.Args[3]
-		v.reset(OpARMSBCconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (RSCshiftRLreg x y (MOVWconst [c]) flags)
-	// cond:
-	// result: (RSCshiftRL x y [c] flags)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		flags := v.Args[3]
-		v.reset(OpARMRSCshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(flags)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMSBC(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SBC (MOVWconst [c]) x flags)
-	// cond:
-	// result: (RSCconst [c] x flags)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		flags := v.Args[2]
-		v.reset(OpARMRSCconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (SBC x (MOVWconst [c]) flags)
-	// cond:
-	// result: (SBCconst [c] x flags)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		flags := v.Args[2]
-		v.reset(OpARMSBCconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (SBC x (SLLconst [c] y) flags)
-	// cond:
-	// result: (SBCshiftLL x y [c] flags)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSLLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		flags := v.Args[2]
-		v.reset(OpARMSBCshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (SBC (SLLconst [c] y) x flags)
-	// cond:
-	// result: (RSCshiftLL x y [c] flags)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSLLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		flags := v.Args[2]
-		v.reset(OpARMRSCshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (SBC x (SRLconst [c] y) flags)
-	// cond:
-	// result: (SBCshiftRL x y [c] flags)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		flags := v.Args[2]
-		v.reset(OpARMSBCshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (SBC (SRLconst [c] y) x flags)
-	// cond:
-	// result: (RSCshiftRL x y [c] flags)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		flags := v.Args[2]
-		v.reset(OpARMRSCshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (SBC x (SRAconst [c] y) flags)
-	// cond:
-	// result: (SBCshiftRA x y [c] flags)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRAconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		flags := v.Args[2]
-		v.reset(OpARMSBCshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (SBC (SRAconst [c] y) x flags)
-	// cond:
-	// result: (RSCshiftRA x y [c] flags)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRAconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		flags := v.Args[2]
-		v.reset(OpARMRSCshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (SBC x (SLL y z) flags)
-	// cond:
-	// result: (SBCshiftLLreg x y z flags)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSLL {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		flags := v.Args[2]
-		v.reset(OpARMSBCshiftLLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (SBC (SLL y z) x flags)
-	// cond:
-	// result: (RSCshiftLLreg x y z flags)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSLL {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		flags := v.Args[2]
-		v.reset(OpARMRSCshiftLLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (SBC x (SRL y z) flags)
-	// cond:
-	// result: (SBCshiftRLreg x y z flags)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRL {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		flags := v.Args[2]
-		v.reset(OpARMSBCshiftRLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (SBC (SRL y z) x flags)
-	// cond:
-	// result: (RSCshiftRLreg x y z flags)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRL {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		flags := v.Args[2]
-		v.reset(OpARMRSCshiftRLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (SBC x (SRA y z) flags)
-	// cond:
-	// result: (SBCshiftRAreg x y z flags)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRA {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		flags := v.Args[2]
-		v.reset(OpARMSBCshiftRAreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (SBC (SRA y z) x flags)
-	// cond:
-	// result: (RSCshiftRAreg x y z flags)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRA {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		flags := v.Args[2]
-		v.reset(OpARMRSCshiftRAreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		v.AddArg(flags)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMSBCconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SBCconst [c] (ADDconst [d] x) flags)
-	// cond:
-	// result: (SBCconst [int64(int32(c-d))] x flags)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMADDconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		flags := v.Args[1]
-		v.reset(OpARMSBCconst)
-		v.AuxInt = int64(int32(c - d))
-		v.AddArg(x)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (SBCconst [c] (SUBconst [d] x) flags)
-	// cond:
-	// result: (SBCconst [int64(int32(c+d))] x flags)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSUBconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		flags := v.Args[1]
-		v.reset(OpARMSBCconst)
-		v.AuxInt = int64(int32(c + d))
-		v.AddArg(x)
-		v.AddArg(flags)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMSBCshiftLL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SBCshiftLL (MOVWconst [c]) x [d] flags)
-	// cond:
-	// result: (RSCconst [c] (SLLconst <x.Type> x [d]) flags)
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		flags := v.Args[2]
-		v.reset(OpARMRSCconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (SBCshiftLL x (MOVWconst [c]) [d] flags)
-	// cond:
-	// result: (SBCconst x [int64(uint32(c)<<uint64(d))] flags)
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		flags := v.Args[2]
-		v.reset(OpARMSBCconst)
-		v.AuxInt = int64(uint32(c) << uint64(d))
-		v.AddArg(x)
-		v.AddArg(flags)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMSBCshiftLLreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SBCshiftLLreg (MOVWconst [c]) x y flags)
-	// cond:
-	// result: (RSCconst [c] (SLL <x.Type> x y) flags)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		flags := v.Args[3]
-		v.reset(OpARMRSCconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (SBCshiftLLreg x y (MOVWconst [c]) flags)
-	// cond:
-	// result: (SBCshiftLL x y [c] flags)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		flags := v.Args[3]
-		v.reset(OpARMSBCshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(flags)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMSBCshiftRA(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SBCshiftRA (MOVWconst [c]) x [d] flags)
-	// cond:
-	// result: (RSCconst [c] (SRAconst <x.Type> x [d]) flags)
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		flags := v.Args[2]
-		v.reset(OpARMRSCconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (SBCshiftRA x (MOVWconst [c]) [d] flags)
-	// cond:
-	// result: (SBCconst x [int64(int32(c)>>uint64(d))] flags)
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		flags := v.Args[2]
-		v.reset(OpARMSBCconst)
-		v.AuxInt = int64(int32(c) >> uint64(d))
-		v.AddArg(x)
-		v.AddArg(flags)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMSBCshiftRAreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SBCshiftRAreg (MOVWconst [c]) x y flags)
-	// cond:
-	// result: (RSCconst [c] (SRA <x.Type> x y) flags)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		flags := v.Args[3]
-		v.reset(OpARMRSCconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (SBCshiftRAreg x y (MOVWconst [c]) flags)
-	// cond:
-	// result: (SBCshiftRA x y [c] flags)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		flags := v.Args[3]
-		v.reset(OpARMSBCshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(flags)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMSBCshiftRL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SBCshiftRL (MOVWconst [c]) x [d] flags)
-	// cond:
-	// result: (RSCconst [c] (SRLconst <x.Type> x [d]) flags)
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		flags := v.Args[2]
-		v.reset(OpARMRSCconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (SBCshiftRL x (MOVWconst [c]) [d] flags)
-	// cond:
-	// result: (SBCconst x [int64(uint32(c)>>uint64(d))] flags)
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		flags := v.Args[2]
-		v.reset(OpARMSBCconst)
-		v.AuxInt = int64(uint32(c) >> uint64(d))
-		v.AddArg(x)
-		v.AddArg(flags)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMSBCshiftRLreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SBCshiftRLreg (MOVWconst [c]) x y flags)
-	// cond:
-	// result: (RSCconst [c] (SRL <x.Type> x y) flags)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		flags := v.Args[3]
-		v.reset(OpARMRSCconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v.AddArg(flags)
-		return true
-	}
-	// match: (SBCshiftRLreg x y (MOVWconst [c]) flags)
-	// cond:
-	// result: (SBCshiftRL x y [c] flags)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		flags := v.Args[3]
-		v.reset(OpARMSBCshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(flags)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMSLL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SLL x (MOVWconst [c]))
-	// cond:
-	// result: (SLLconst x [c&31])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMSLLconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMSLLconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SLLconst [c] (MOVWconst [d]))
-	// cond:
-	// result: (MOVWconst [int64(uint32(d)<<uint64(c))])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = int64(uint32(d) << uint64(c))
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMSRA(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SRA x (MOVWconst [c]))
-	// cond:
-	// result: (SRAconst x [c&31])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMSRAconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMSRAcond(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SRAcond x _ (FlagEQ))
-	// cond:
-	// result: (SRAconst x [31])
-	for {
-		x := v.Args[0]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMFlagEQ {
-			break
-		}
-		v.reset(OpARMSRAconst)
-		v.AuxInt = 31
-		v.AddArg(x)
-		return true
-	}
-	// match: (SRAcond x y (FlagLT_ULT))
-	// cond:
-	// result: (SRA x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMFlagLT_ULT {
-			break
-		}
-		v.reset(OpARMSRA)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (SRAcond x _ (FlagLT_UGT))
-	// cond:
-	// result: (SRAconst x [31])
-	for {
-		x := v.Args[0]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMFlagLT_UGT {
-			break
-		}
-		v.reset(OpARMSRAconst)
-		v.AuxInt = 31
-		v.AddArg(x)
-		return true
-	}
-	// match: (SRAcond x y (FlagGT_ULT))
-	// cond:
-	// result: (SRA x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMFlagGT_ULT {
-			break
-		}
-		v.reset(OpARMSRA)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (SRAcond x _ (FlagGT_UGT))
-	// cond:
-	// result: (SRAconst x [31])
-	for {
-		x := v.Args[0]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMFlagGT_UGT {
-			break
-		}
-		v.reset(OpARMSRAconst)
-		v.AuxInt = 31
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMSRAconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SRAconst [c] (MOVWconst [d]))
-	// cond:
-	// result: (MOVWconst [int64(int32(d)>>uint64(c))])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = int64(int32(d) >> uint64(c))
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMSRL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SRL x (MOVWconst [c]))
-	// cond:
-	// result: (SRLconst x [c&31])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMSRLconst)
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMSRLconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SRLconst [c] (MOVWconst [d]))
-	// cond:
-	// result: (MOVWconst [int64(uint32(d)>>uint64(c))])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = int64(uint32(d) >> uint64(c))
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMSUB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUB (MOVWconst [c]) x)
-	// cond:
-	// result: (RSBconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMRSBconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUB x (MOVWconst [c]))
-	// cond:
-	// result: (SUBconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMSUBconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUB x (SLLconst [c] y))
-	// cond:
-	// result: (SUBshiftLL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSLLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMSUBshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (SUB (SLLconst [c] y) x)
-	// cond:
-	// result: (RSBshiftLL x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSLLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMRSBshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (SUB x (SRLconst [c] y))
-	// cond:
-	// result: (SUBshiftRL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMSUBshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (SUB (SRLconst [c] y) x)
-	// cond:
-	// result: (RSBshiftRL x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMRSBshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (SUB x (SRAconst [c] y))
-	// cond:
-	// result: (SUBshiftRA x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRAconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMSUBshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (SUB (SRAconst [c] y) x)
-	// cond:
-	// result: (RSBshiftRA x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRAconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMRSBshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (SUB x (SLL y z))
-	// cond:
-	// result: (SUBshiftLLreg x y z)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSLL {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		v.reset(OpARMSUBshiftLLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (SUB (SLL y z) x)
-	// cond:
-	// result: (RSBshiftLLreg x y z)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSLL {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		v.reset(OpARMRSBshiftLLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (SUB x (SRL y z))
-	// cond:
-	// result: (SUBshiftRLreg x y z)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRL {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		v.reset(OpARMSUBshiftRLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (SUB (SRL y z) x)
-	// cond:
-	// result: (RSBshiftRLreg x y z)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRL {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		v.reset(OpARMRSBshiftRLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (SUB x (SRA y z))
-	// cond:
-	// result: (SUBshiftRAreg x y z)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRA {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		v.reset(OpARMSUBshiftRAreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (SUB (SRA y z) x)
-	// cond:
-	// result: (RSBshiftRAreg x y z)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRA {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		v.reset(OpARMRSBshiftRAreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (SUB x x)
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMSUBS(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBS (MOVWconst [c]) x)
-	// cond:
-	// result: (RSBSconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMRSBSconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUBS x (MOVWconst [c]))
-	// cond:
-	// result: (SUBSconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMSUBSconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUBS x (SLLconst [c] y))
-	// cond:
-	// result: (SUBSshiftLL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSLLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMSUBSshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (SUBS (SLLconst [c] y) x)
-	// cond:
-	// result: (RSBSshiftLL x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSLLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMRSBSshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (SUBS x (SRLconst [c] y))
-	// cond:
-	// result: (SUBSshiftRL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMSUBSshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (SUBS (SRLconst [c] y) x)
-	// cond:
-	// result: (RSBSshiftRL x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMRSBSshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (SUBS x (SRAconst [c] y))
-	// cond:
-	// result: (SUBSshiftRA x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRAconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMSUBSshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (SUBS (SRAconst [c] y) x)
-	// cond:
-	// result: (RSBSshiftRA x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRAconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMRSBSshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (SUBS x (SLL y z))
-	// cond:
-	// result: (SUBSshiftLLreg x y z)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSLL {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		v.reset(OpARMSUBSshiftLLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (SUBS (SLL y z) x)
-	// cond:
-	// result: (RSBSshiftLLreg x y z)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSLL {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		v.reset(OpARMRSBSshiftLLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (SUBS x (SRL y z))
-	// cond:
-	// result: (SUBSshiftRLreg x y z)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRL {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		v.reset(OpARMSUBSshiftRLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (SUBS (SRL y z) x)
-	// cond:
-	// result: (RSBSshiftRLreg x y z)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRL {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		v.reset(OpARMRSBSshiftRLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (SUBS x (SRA y z))
-	// cond:
-	// result: (SUBSshiftRAreg x y z)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRA {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		v.reset(OpARMSUBSshiftRAreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (SUBS (SRA y z) x)
-	// cond:
-	// result: (RSBSshiftRAreg x y z)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRA {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		v.reset(OpARMRSBSshiftRAreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMSUBSshiftLL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBSshiftLL (MOVWconst [c]) x [d])
-	// cond:
-	// result: (RSBSconst [c] (SLLconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMRSBSconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (SUBSshiftLL x (MOVWconst [c]) [d])
-	// cond:
-	// result: (SUBSconst x [int64(uint32(c)<<uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMSUBSconst)
-		v.AuxInt = int64(uint32(c) << uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMSUBSshiftLLreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBSshiftLLreg (MOVWconst [c]) x y)
-	// cond:
-	// result: (RSBSconst [c] (SLL <x.Type> x y))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpARMRSBSconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (SUBSshiftLLreg x y (MOVWconst [c]))
-	// cond:
-	// result: (SUBSshiftLL x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMSUBSshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMSUBSshiftRA(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBSshiftRA (MOVWconst [c]) x [d])
-	// cond:
-	// result: (RSBSconst [c] (SRAconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMRSBSconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (SUBSshiftRA x (MOVWconst [c]) [d])
-	// cond:
-	// result: (SUBSconst x [int64(int32(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMSUBSconst)
-		v.AuxInt = int64(int32(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMSUBSshiftRAreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBSshiftRAreg (MOVWconst [c]) x y)
-	// cond:
-	// result: (RSBSconst [c] (SRA <x.Type> x y))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpARMRSBSconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (SUBSshiftRAreg x y (MOVWconst [c]))
-	// cond:
-	// result: (SUBSshiftRA x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMSUBSshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMSUBSshiftRL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBSshiftRL (MOVWconst [c]) x [d])
-	// cond:
-	// result: (RSBSconst [c] (SRLconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMRSBSconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (SUBSshiftRL x (MOVWconst [c]) [d])
-	// cond:
-	// result: (SUBSconst x [int64(uint32(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMSUBSconst)
-		v.AuxInt = int64(uint32(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMSUBSshiftRLreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBSshiftRLreg (MOVWconst [c]) x y)
-	// cond:
-	// result: (RSBSconst [c] (SRL <x.Type> x y))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpARMRSBSconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (SUBSshiftRLreg x y (MOVWconst [c]))
-	// cond:
-	// result: (SUBSshiftRL x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMSUBSshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMSUBconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBconst [0] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUBconst [c] (MOVWconst [d]))
-	// cond:
-	// result: (MOVWconst [int64(int32(d-c))])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = int64(int32(d - c))
-		return true
-	}
-	// match: (SUBconst [c] (SUBconst [d] x))
-	// cond:
-	// result: (ADDconst [int64(int32(-c-d))] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSUBconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpARMADDconst)
-		v.AuxInt = int64(int32(-c - d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUBconst [c] (ADDconst [d] x))
-	// cond:
-	// result: (ADDconst [int64(int32(-c+d))] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMADDconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpARMADDconst)
-		v.AuxInt = int64(int32(-c + d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUBconst [c] (RSBconst [d] x))
-	// cond:
-	// result: (RSBconst [int64(int32(-c+d))] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMRSBconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpARMRSBconst)
-		v.AuxInt = int64(int32(-c + d))
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMSUBshiftLL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBshiftLL (MOVWconst [c]) x [d])
-	// cond:
-	// result: (RSBconst [c] (SLLconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMRSBconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (SUBshiftLL x (MOVWconst [c]) [d])
-	// cond:
-	// result: (SUBconst x [int64(uint32(c)<<uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMSUBconst)
-		v.AuxInt = int64(uint32(c) << uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUBshiftLL x (SLLconst x [c]) [d])
-	// cond: c==d
-	// result: (MOVWconst [0])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSLLconst {
-			break
-		}
-		c := v_1.AuxInt
-		if x != v_1.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMSUBshiftLLreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBshiftLLreg (MOVWconst [c]) x y)
-	// cond:
-	// result: (RSBconst [c] (SLL <x.Type> x y))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpARMRSBconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (SUBshiftLLreg x y (MOVWconst [c]))
-	// cond:
-	// result: (SUBshiftLL x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMSUBshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMSUBshiftRA(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBshiftRA (MOVWconst [c]) x [d])
-	// cond:
-	// result: (RSBconst [c] (SRAconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMRSBconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (SUBshiftRA x (MOVWconst [c]) [d])
-	// cond:
-	// result: (SUBconst x [int64(int32(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMSUBconst)
-		v.AuxInt = int64(int32(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUBshiftRA x (SRAconst x [c]) [d])
-	// cond: c==d
-	// result: (MOVWconst [0])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRAconst {
-			break
-		}
-		c := v_1.AuxInt
-		if x != v_1.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMSUBshiftRAreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBshiftRAreg (MOVWconst [c]) x y)
-	// cond:
-	// result: (RSBconst [c] (SRA <x.Type> x y))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpARMRSBconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (SUBshiftRAreg x y (MOVWconst [c]))
-	// cond:
-	// result: (SUBshiftRA x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMSUBshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMSUBshiftRL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBshiftRL (MOVWconst [c]) x [d])
-	// cond:
-	// result: (RSBconst [c] (SRLconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMRSBconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (SUBshiftRL x (MOVWconst [c]) [d])
-	// cond:
-	// result: (SUBconst x [int64(uint32(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMSUBconst)
-		v.AuxInt = int64(uint32(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUBshiftRL x (SRLconst x [c]) [d])
-	// cond: c==d
-	// result: (MOVWconst [0])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRLconst {
-			break
-		}
-		c := v_1.AuxInt
-		if x != v_1.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMSUBshiftRLreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBshiftRLreg (MOVWconst [c]) x y)
-	// cond:
-	// result: (RSBconst [c] (SRL <x.Type> x y))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpARMRSBconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (SUBshiftRLreg x y (MOVWconst [c]))
-	// cond:
-	// result: (SUBshiftRL x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMSUBshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMXOR(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XOR (MOVWconst [c]) x)
-	// cond:
-	// result: (XORconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMXORconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (XOR x (MOVWconst [c]))
-	// cond:
-	// result: (XORconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMXORconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (XOR x (SLLconst [c] y))
-	// cond:
-	// result: (XORshiftLL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSLLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMXORshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (XOR (SLLconst [c] y) x)
-	// cond:
-	// result: (XORshiftLL x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSLLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMXORshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (XOR x (SRLconst [c] y))
-	// cond:
-	// result: (XORshiftRL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMXORshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (XOR (SRLconst [c] y) x)
-	// cond:
-	// result: (XORshiftRL x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMXORshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (XOR x (SRAconst [c] y))
-	// cond:
-	// result: (XORshiftRA x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRAconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMXORshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (XOR (SRAconst [c] y) x)
-	// cond:
-	// result: (XORshiftRA x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRAconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMXORshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (XOR x (SRRconst [c] y))
-	// cond:
-	// result: (XORshiftRR x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRRconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARMXORshiftRR)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (XOR (SRRconst [c] y) x)
-	// cond:
-	// result: (XORshiftRR x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRRconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARMXORshiftRR)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (XOR x (SLL y z))
-	// cond:
-	// result: (XORshiftLLreg x y z)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSLL {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		v.reset(OpARMXORshiftLLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (XOR (SLL y z) x)
-	// cond:
-	// result: (XORshiftLLreg x y z)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSLL {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		v.reset(OpARMXORshiftLLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (XOR x (SRL y z))
-	// cond:
-	// result: (XORshiftRLreg x y z)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRL {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		v.reset(OpARMXORshiftRLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (XOR (SRL y z) x)
-	// cond:
-	// result: (XORshiftRLreg x y z)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRL {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		v.reset(OpARMXORshiftRLreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (XOR x (SRA y z))
-	// cond:
-	// result: (XORshiftRAreg x y z)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRA {
-			break
-		}
-		y := v_1.Args[0]
-		z := v_1.Args[1]
-		v.reset(OpARMXORshiftRAreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (XOR (SRA y z) x)
-	// cond:
-	// result: (XORshiftRAreg x y z)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMSRA {
-			break
-		}
-		y := v_0.Args[0]
-		z := v_0.Args[1]
-		x := v.Args[1]
-		v.reset(OpARMXORshiftRAreg)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(z)
-		return true
-	}
-	// match: (XOR x x)
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMXORconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XORconst [0] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (XORconst [c] (MOVWconst [d]))
-	// cond:
-	// result: (MOVWconst [c^d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = c ^ d
-		return true
-	}
-	// match: (XORconst [c] (XORconst [d] x))
-	// cond:
-	// result: (XORconst [c^d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMXORconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpARMXORconst)
-		v.AuxInt = c ^ d
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMXORshiftLL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XORshiftLL (MOVWconst [c]) x [d])
-	// cond:
-	// result: (XORconst [c] (SLLconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMXORconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (XORshiftLL x (MOVWconst [c]) [d])
-	// cond:
-	// result: (XORconst x [int64(uint32(c)<<uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMXORconst)
-		v.AuxInt = int64(uint32(c) << uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (XORshiftLL x (SLLconst x [c]) [d])
-	// cond: c==d
-	// result: (MOVWconst [0])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSLLconst {
-			break
-		}
-		c := v_1.AuxInt
-		if x != v_1.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMXORshiftLLreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XORshiftLLreg (MOVWconst [c]) x y)
-	// cond:
-	// result: (XORconst [c] (SLL <x.Type> x y))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpARMXORconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (XORshiftLLreg x y (MOVWconst [c]))
-	// cond:
-	// result: (XORshiftLL x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMXORshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMXORshiftRA(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XORshiftRA (MOVWconst [c]) x [d])
-	// cond:
-	// result: (XORconst [c] (SRAconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMXORconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRAconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (XORshiftRA x (MOVWconst [c]) [d])
-	// cond:
-	// result: (XORconst x [int64(int32(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMXORconst)
-		v.AuxInt = int64(int32(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (XORshiftRA x (SRAconst x [c]) [d])
-	// cond: c==d
-	// result: (MOVWconst [0])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRAconst {
-			break
-		}
-		c := v_1.AuxInt
-		if x != v_1.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMXORshiftRAreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XORshiftRAreg (MOVWconst [c]) x y)
-	// cond:
-	// result: (XORconst [c] (SRA <x.Type> x y))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpARMXORconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRA, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (XORshiftRAreg x y (MOVWconst [c]))
-	// cond:
-	// result: (XORshiftRA x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMXORshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMXORshiftRL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XORshiftRL (MOVWconst [c]) x [d])
-	// cond:
-	// result: (XORconst [c] (SRLconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMXORconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (XORshiftRL x (MOVWconst [c]) [d])
-	// cond:
-	// result: (XORconst x [int64(uint32(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMXORconst)
-		v.AuxInt = int64(uint32(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (XORshiftRL x (SRLconst x [c]) [d])
-	// cond: c==d
-	// result: (MOVWconst [0])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMSRLconst {
-			break
-		}
-		c := v_1.AuxInt
-		if x != v_1.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMXORshiftRLreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XORshiftRLreg (MOVWconst [c]) x y)
-	// cond:
-	// result: (XORconst [c] (SRL <x.Type> x y))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		y := v.Args[2]
-		v.reset(OpARMXORconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (XORshiftRLreg x y (MOVWconst [c]))
-	// cond:
-	// result: (XORshiftRL x y [c])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		v.reset(OpARMXORshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpARMXORshiftRR(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XORshiftRR (MOVWconst [c]) x [d])
-	// cond:
-	// result: (XORconst [c] (SRRconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARMXORconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARMSRRconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (XORshiftRR x (MOVWconst [c]) [d])
-	// cond:
-	// result: (XORconst x [int64(uint32(c)>>uint64(d)|uint32(c)<<uint64(32-d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARMXORconst)
-		v.AuxInt = int64(uint32(c)>>uint64(d) | uint32(c)<<uint64(32-d))
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpAdd16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add16 x y)
-	// cond:
-	// result: (ADD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMADD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpAdd32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add32 x y)
-	// cond:
-	// result: (ADD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMADD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpAdd32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add32F x y)
-	// cond:
-	// result: (ADDF x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMADDF)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpAdd32carry(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add32carry x y)
-	// cond:
-	// result: (ADDS x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMADDS)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpAdd32withcarry(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add32withcarry x y c)
-	// cond:
-	// result: (ADC x y c)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		c := v.Args[2]
-		v.reset(OpARMADC)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(c)
-		return true
-	}
-}
-func rewriteValueARM_OpAdd64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add64F x y)
-	// cond:
-	// result: (ADDD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMADDD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpAdd8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add8 x y)
-	// cond:
-	// result: (ADD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMADD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpAddPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AddPtr x y)
-	// cond:
-	// result: (ADD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMADD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpAddr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Addr {sym} base)
-	// cond:
-	// result: (MOVWaddr {sym} base)
-	for {
-		sym := v.Aux
-		base := v.Args[0]
-		v.reset(OpARMMOVWaddr)
-		v.Aux = sym
-		v.AddArg(base)
-		return true
-	}
-}
-func rewriteValueARM_OpAnd16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And16 x y)
-	// cond:
-	// result: (AND x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMAND)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpAnd32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And32 x y)
-	// cond:
-	// result: (AND x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMAND)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpAnd8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And8 x y)
-	// cond:
-	// result: (AND x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMAND)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpAndB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AndB x y)
-	// cond:
-	// result: (AND x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMAND)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpBswap32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Bswap32 <t> x)
-	// cond:
-	// result: (XOR <t> 		(SRLconst <t> (BICconst <t> (XOR <t> x (SRRconst <t> [16] x)) [0xff0000]) [8]) 		(SRRconst <t> x [8]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v.reset(OpARMXOR)
-		v.Type = t
-		v0 := b.NewValue0(v.Line, OpARMSRLconst, t)
-		v0.AuxInt = 8
-		v1 := b.NewValue0(v.Line, OpARMBICconst, t)
-		v1.AuxInt = 0xff0000
-		v2 := b.NewValue0(v.Line, OpARMXOR, t)
-		v2.AddArg(x)
-		v3 := b.NewValue0(v.Line, OpARMSRRconst, t)
-		v3.AuxInt = 16
-		v3.AddArg(x)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v4 := b.NewValue0(v.Line, OpARMSRRconst, t)
-		v4.AuxInt = 8
-		v4.AddArg(x)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueARM_OpClosureCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ClosureCall [argwid] entry closure mem)
-	// cond:
-	// result: (CALLclosure [argwid] entry closure mem)
-	for {
-		argwid := v.AuxInt
-		entry := v.Args[0]
-		closure := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpARMCALLclosure)
-		v.AuxInt = argwid
-		v.AddArg(entry)
-		v.AddArg(closure)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueARM_OpCom16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com16 x)
-	// cond:
-	// result: (MVN x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARMMVN)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpCom32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com32 x)
-	// cond:
-	// result: (MVN x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARMMVN)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpCom8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com8 x)
-	// cond:
-	// result: (MVN x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARMMVN)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpConst16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const16 [val])
-	// cond:
-	// result: (MOVWconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueARM_OpConst32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const32 [val])
-	// cond:
-	// result: (MOVWconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueARM_OpConst32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const32F [val])
-	// cond:
-	// result: (MOVFconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpARMMOVFconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueARM_OpConst64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const64F [val])
-	// cond:
-	// result: (MOVDconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpARMMOVDconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueARM_OpConst8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const8 [val])
-	// cond:
-	// result: (MOVWconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueARM_OpConstBool(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ConstBool [b])
-	// cond:
-	// result: (MOVWconst [b])
-	for {
-		b := v.AuxInt
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = b
-		return true
-	}
-}
-func rewriteValueARM_OpConstNil(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ConstNil)
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-}
-func rewriteValueARM_OpConvert(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Convert x mem)
-	// cond:
-	// result: (MOVWconvert x mem)
-	for {
-		x := v.Args[0]
-		mem := v.Args[1]
-		v.reset(OpARMMOVWconvert)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueARM_OpCtz32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Ctz32 <t> x)
-	// cond:
-	// result: (RSBconst [32] (CLZ <t> (SUBconst <t> (AND <t> x (RSBconst <t> [0] x)) [1])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v.reset(OpARMRSBconst)
-		v.AuxInt = 32
-		v0 := b.NewValue0(v.Line, OpARMCLZ, t)
-		v1 := b.NewValue0(v.Line, OpARMSUBconst, t)
-		v1.AuxInt = 1
-		v2 := b.NewValue0(v.Line, OpARMAND, t)
-		v2.AddArg(x)
-		v3 := b.NewValue0(v.Line, OpARMRSBconst, t)
-		v3.AuxInt = 0
-		v3.AddArg(x)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpCvt32Fto32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32Fto32 x)
-	// cond:
-	// result: (MOVFW x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARMMOVFW)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpCvt32Fto32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32Fto32U x)
-	// cond:
-	// result: (MOVFWU x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARMMOVFWU)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpCvt32Fto64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32Fto64F x)
-	// cond:
-	// result: (MOVFD x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARMMOVFD)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpCvt32Uto32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32Uto32F x)
-	// cond:
-	// result: (MOVWUF x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARMMOVWUF)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpCvt32Uto64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32Uto64F x)
-	// cond:
-	// result: (MOVWUD x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARMMOVWUD)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpCvt32to32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32to32F x)
-	// cond:
-	// result: (MOVWF x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARMMOVWF)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpCvt32to64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32to64F x)
-	// cond:
-	// result: (MOVWD x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARMMOVWD)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpCvt64Fto32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64Fto32 x)
-	// cond:
-	// result: (MOVDW x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARMMOVDW)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpCvt64Fto32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64Fto32F x)
-	// cond:
-	// result: (MOVDF x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARMMOVDF)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpCvt64Fto32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64Fto32U x)
-	// cond:
-	// result: (MOVDWU x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARMMOVDWU)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpDeferCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (DeferCall [argwid] mem)
-	// cond:
-	// result: (CALLdefer [argwid] mem)
-	for {
-		argwid := v.AuxInt
-		mem := v.Args[0]
-		v.reset(OpARMCALLdefer)
-		v.AuxInt = argwid
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueARM_OpDiv16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div16 x y)
-	// cond:
-	// result: (Div32 (SignExt16to32 x) (SignExt16to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpDiv32)
-		v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM_OpDiv16u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div16u x y)
-	// cond:
-	// result: (Div32u (ZeroExt16to32 x) (ZeroExt16to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpDiv32u)
-		v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM_OpDiv32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div32 x y)
-	// cond:
-	// result: (SUB (XOR <config.fe.TypeUInt32()> 		(Select0 <config.fe.TypeUInt32()> (UDIVrtcall 			(SUB <config.fe.TypeUInt32()> (XOR x <config.fe.TypeUInt32()> (Signmask x)) (Signmask x)) 			(SUB <config.fe.TypeUInt32()> (XOR y <config.fe.TypeUInt32()> (Signmask y)) (Signmask y)))) 		(Signmask (XOR <config.fe.TypeUInt32()> x y))) (Signmask (XOR <config.fe.TypeUInt32()> x y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMSUB)
-		v0 := b.NewValue0(v.Line, OpARMXOR, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpSelect0, config.fe.TypeUInt32())
-		v2 := b.NewValue0(v.Line, OpARMUDIVrtcall, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
-		v3 := b.NewValue0(v.Line, OpARMSUB, config.fe.TypeUInt32())
-		v4 := b.NewValue0(v.Line, OpARMXOR, config.fe.TypeUInt32())
-		v4.AddArg(x)
-		v5 := b.NewValue0(v.Line, OpSignmask, config.fe.TypeInt32())
-		v5.AddArg(x)
-		v4.AddArg(v5)
-		v3.AddArg(v4)
-		v6 := b.NewValue0(v.Line, OpSignmask, config.fe.TypeInt32())
-		v6.AddArg(x)
-		v3.AddArg(v6)
-		v2.AddArg(v3)
-		v7 := b.NewValue0(v.Line, OpARMSUB, config.fe.TypeUInt32())
-		v8 := b.NewValue0(v.Line, OpARMXOR, config.fe.TypeUInt32())
-		v8.AddArg(y)
-		v9 := b.NewValue0(v.Line, OpSignmask, config.fe.TypeInt32())
-		v9.AddArg(y)
-		v8.AddArg(v9)
-		v7.AddArg(v8)
-		v10 := b.NewValue0(v.Line, OpSignmask, config.fe.TypeInt32())
-		v10.AddArg(y)
-		v7.AddArg(v10)
-		v2.AddArg(v7)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v11 := b.NewValue0(v.Line, OpSignmask, config.fe.TypeInt32())
-		v12 := b.NewValue0(v.Line, OpARMXOR, config.fe.TypeUInt32())
-		v12.AddArg(x)
-		v12.AddArg(y)
-		v11.AddArg(v12)
-		v0.AddArg(v11)
-		v.AddArg(v0)
-		v13 := b.NewValue0(v.Line, OpSignmask, config.fe.TypeInt32())
-		v14 := b.NewValue0(v.Line, OpARMXOR, config.fe.TypeUInt32())
-		v14.AddArg(x)
-		v14.AddArg(y)
-		v13.AddArg(v14)
-		v.AddArg(v13)
-		return true
-	}
-}
-func rewriteValueARM_OpDiv32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div32F x y)
-	// cond:
-	// result: (DIVF x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMDIVF)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpDiv32u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div32u x y)
-	// cond:
-	// result: (Select0 <config.fe.TypeUInt32()> (UDIVrtcall x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect0)
-		v.Type = config.fe.TypeUInt32()
-		v0 := b.NewValue0(v.Line, OpARMUDIVrtcall, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpDiv64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div64F x y)
-	// cond:
-	// result: (DIVD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMDIVD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpDiv8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div8 x y)
-	// cond:
-	// result: (Div32 (SignExt8to32 x) (SignExt8to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpDiv32)
-		v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM_OpDiv8u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div8u x y)
-	// cond:
-	// result: (Div32u (ZeroExt8to32 x) (ZeroExt8to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpDiv32u)
-		v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM_OpEq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq16 x y)
-	// cond:
-	// result: (Equal (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMEqual)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpEq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq32 x y)
-	// cond:
-	// result: (Equal (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMEqual)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpEq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq32F x y)
-	// cond:
-	// result: (Equal (CMPF x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMEqual)
-		v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpEq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq64F x y)
-	// cond:
-	// result: (Equal (CMPD x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMEqual)
-		v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpEq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq8 x y)
-	// cond:
-	// result: (Equal (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMEqual)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpEqB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (EqB x y)
-	// cond:
-	// result: (XORconst [1] (XOR <config.fe.TypeBool()> x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMXORconst)
-		v.AuxInt = 1
-		v0 := b.NewValue0(v.Line, OpARMXOR, config.fe.TypeBool())
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpEqPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (EqPtr x y)
-	// cond:
-	// result: (Equal (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMEqual)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpGeq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq16 x y)
-	// cond:
-	// result: (GreaterEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMGreaterEqual)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpGeq16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq16U x y)
-	// cond:
-	// result: (GreaterEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMGreaterEqualU)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpGeq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq32 x y)
-	// cond:
-	// result: (GreaterEqual (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMGreaterEqual)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpGeq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq32F x y)
-	// cond:
-	// result: (GreaterEqual (CMPF x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMGreaterEqual)
-		v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpGeq32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq32U x y)
-	// cond:
-	// result: (GreaterEqualU (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMGreaterEqualU)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpGeq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq64F x y)
-	// cond:
-	// result: (GreaterEqual (CMPD x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMGreaterEqual)
-		v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpGeq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq8 x y)
-	// cond:
-	// result: (GreaterEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMGreaterEqual)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpGeq8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq8U x y)
-	// cond:
-	// result: (GreaterEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMGreaterEqualU)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpGetClosurePtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (GetClosurePtr)
-	// cond:
-	// result: (LoweredGetClosurePtr)
-	for {
-		v.reset(OpARMLoweredGetClosurePtr)
-		return true
-	}
-}
-func rewriteValueARM_OpGoCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (GoCall [argwid] mem)
-	// cond:
-	// result: (CALLgo [argwid] mem)
-	for {
-		argwid := v.AuxInt
-		mem := v.Args[0]
-		v.reset(OpARMCALLgo)
-		v.AuxInt = argwid
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueARM_OpGreater16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater16 x y)
-	// cond:
-	// result: (GreaterThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMGreaterThan)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpGreater16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater16U x y)
-	// cond:
-	// result: (GreaterThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMGreaterThanU)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpGreater32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater32 x y)
-	// cond:
-	// result: (GreaterThan (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMGreaterThan)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpGreater32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater32F x y)
-	// cond:
-	// result: (GreaterThan (CMPF x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMGreaterThan)
-		v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpGreater32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater32U x y)
-	// cond:
-	// result: (GreaterThanU (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMGreaterThanU)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpGreater64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater64F x y)
-	// cond:
-	// result: (GreaterThan (CMPD x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMGreaterThan)
-		v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpGreater8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater8 x y)
-	// cond:
-	// result: (GreaterThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMGreaterThan)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpGreater8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater8U x y)
-	// cond:
-	// result: (GreaterThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMGreaterThanU)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpHmul16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul16 x y)
-	// cond:
-	// result: (SRAconst (MUL <config.fe.TypeInt32()> (SignExt16to32 x) (SignExt16to32 y)) [16])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMSRAconst)
-		v.AuxInt = 16
-		v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt32())
-		v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpHmul16u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul16u x y)
-	// cond:
-	// result: (SRLconst (MUL <config.fe.TypeUInt32()> (ZeroExt16to32 x) (ZeroExt16to32 y)) [16])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMSRLconst)
-		v.AuxInt = 16
-		v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpHmul32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul32 x y)
-	// cond:
-	// result: (HMUL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMHMUL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpHmul32u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul32u x y)
-	// cond:
-	// result: (HMULU x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMHMULU)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpHmul8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul8 x y)
-	// cond:
-	// result: (SRAconst (MUL <config.fe.TypeInt16()> (SignExt8to32 x) (SignExt8to32 y)) [8])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMSRAconst)
-		v.AuxInt = 8
-		v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt16())
-		v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpHmul8u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul8u x y)
-	// cond:
-	// result: (SRLconst (MUL <config.fe.TypeUInt16()> (ZeroExt8to32 x) (ZeroExt8to32 y)) [8])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMSRLconst)
-		v.AuxInt = 8
-		v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt16())
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpInterCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (InterCall [argwid] entry mem)
-	// cond:
-	// result: (CALLinter [argwid] entry mem)
-	for {
-		argwid := v.AuxInt
-		entry := v.Args[0]
-		mem := v.Args[1]
-		v.reset(OpARMCALLinter)
-		v.AuxInt = argwid
-		v.AddArg(entry)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueARM_OpIsInBounds(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (IsInBounds idx len)
-	// cond:
-	// result: (LessThanU (CMP idx len))
-	for {
-		idx := v.Args[0]
-		len := v.Args[1]
-		v.reset(OpARMLessThanU)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v0.AddArg(idx)
-		v0.AddArg(len)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpIsNonNil(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (IsNonNil ptr)
-	// cond:
-	// result: (NotEqual (CMPconst [0] ptr))
-	for {
-		ptr := v.Args[0]
-		v.reset(OpARMNotEqual)
-		v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-		v0.AuxInt = 0
-		v0.AddArg(ptr)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpIsSliceInBounds(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (IsSliceInBounds idx len)
-	// cond:
-	// result: (LessEqualU (CMP idx len))
-	for {
-		idx := v.Args[0]
-		len := v.Args[1]
-		v.reset(OpARMLessEqualU)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v0.AddArg(idx)
-		v0.AddArg(len)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpLeq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq16 x y)
-	// cond:
-	// result: (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMLessEqual)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpLeq16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq16U x y)
-	// cond:
-	// result: (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMLessEqualU)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpLeq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq32 x y)
-	// cond:
-	// result: (LessEqual (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMLessEqual)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpLeq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq32F x y)
-	// cond:
-	// result: (GreaterEqual (CMPF y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMGreaterEqual)
-		v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
-		v0.AddArg(y)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpLeq32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq32U x y)
-	// cond:
-	// result: (LessEqualU (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMLessEqualU)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpLeq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq64F x y)
-	// cond:
-	// result: (GreaterEqual (CMPD y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMGreaterEqual)
-		v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
-		v0.AddArg(y)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpLeq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq8 x y)
-	// cond:
-	// result: (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMLessEqual)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpLeq8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq8U x y)
-	// cond:
-	// result: (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMLessEqualU)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpLess16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less16 x y)
-	// cond:
-	// result: (LessThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMLessThan)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpLess16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less16U x y)
-	// cond:
-	// result: (LessThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMLessThanU)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpLess32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less32 x y)
-	// cond:
-	// result: (LessThan (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMLessThan)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpLess32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less32F x y)
-	// cond:
-	// result: (GreaterThan (CMPF y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMGreaterThan)
-		v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
-		v0.AddArg(y)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpLess32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less32U x y)
-	// cond:
-	// result: (LessThanU (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMLessThanU)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpLess64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less64F x y)
-	// cond:
-	// result: (GreaterThan (CMPD y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMGreaterThan)
-		v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
-		v0.AddArg(y)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpLess8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less8 x y)
-	// cond:
-	// result: (LessThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMLessThan)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpLess8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less8U x y)
-	// cond:
-	// result: (LessThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMLessThanU)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpLoad(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Load <t> ptr mem)
-	// cond: t.IsBoolean()
-	// result: (MOVBUload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(t.IsBoolean()) {
-			break
-		}
-		v.reset(OpARMMOVBUload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: (is8BitInt(t) && isSigned(t))
-	// result: (MOVBload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is8BitInt(t) && isSigned(t)) {
-			break
-		}
-		v.reset(OpARMMOVBload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: (is8BitInt(t) && !isSigned(t))
-	// result: (MOVBUload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is8BitInt(t) && !isSigned(t)) {
-			break
-		}
-		v.reset(OpARMMOVBUload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: (is16BitInt(t) && isSigned(t))
-	// result: (MOVHload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is16BitInt(t) && isSigned(t)) {
-			break
-		}
-		v.reset(OpARMMOVHload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: (is16BitInt(t) && !isSigned(t))
-	// result: (MOVHUload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is16BitInt(t) && !isSigned(t)) {
-			break
-		}
-		v.reset(OpARMMOVHUload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: (is32BitInt(t) || isPtr(t))
-	// result: (MOVWload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is32BitInt(t) || isPtr(t)) {
-			break
-		}
-		v.reset(OpARMMOVWload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: is32BitFloat(t)
-	// result: (MOVFload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is32BitFloat(t)) {
-			break
-		}
-		v.reset(OpARMMOVFload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: is64BitFloat(t)
-	// result: (MOVDload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is64BitFloat(t)) {
-			break
-		}
-		v.reset(OpARMMOVDload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpLrot16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lrot16 <t> x [c])
-	// cond:
-	// result: (OR (SLLconst <t> x [c&15]) (SRLconst <t> x [16-c&15]))
-	for {
-		t := v.Type
-		c := v.AuxInt
-		x := v.Args[0]
-		v.reset(OpARMOR)
-		v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
-		v0.AuxInt = c & 15
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARMSRLconst, t)
-		v1.AuxInt = 16 - c&15
-		v1.AddArg(x)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM_OpLrot32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lrot32 x [c])
-	// cond:
-	// result: (SRRconst x [32-c&31])
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		v.reset(OpARMSRRconst)
-		v.AuxInt = 32 - c&31
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpLrot8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lrot8 <t> x [c])
-	// cond:
-	// result: (OR (SLLconst <t> x [c&7]) (SRLconst <t> x [8-c&7]))
-	for {
-		t := v.Type
-		c := v.AuxInt
-		x := v.Args[0]
-		v.reset(OpARMOR)
-		v0 := b.NewValue0(v.Line, OpARMSLLconst, t)
-		v0.AuxInt = c & 7
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARMSRLconst, t)
-		v1.AuxInt = 8 - c&7
-		v1.AddArg(x)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM_OpLsh16x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x16 x y)
-	// cond:
-	// result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMCMOVWHSconst)
-		v.AuxInt = 0
-		v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-		v2.AuxInt = 256
-		v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueARM_OpLsh16x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x32 x y)
-	// cond:
-	// result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMCMOVWHSconst)
-		v.AuxInt = 0
-		v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-		v1.AuxInt = 256
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM_OpLsh16x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x64 x (Const64 [c]))
-	// cond: uint64(c) < 16
-	// result: (SLLconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 16) {
-			break
-		}
-		v.reset(OpARMSLLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh16x64 _ (Const64 [c]))
-	// cond: uint64(c) >= 16
-	// result: (Const16 [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 16) {
-			break
-		}
-		v.reset(OpConst16)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpLsh16x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x8  x y)
-	// cond:
-	// result: (SLL x (ZeroExt8to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMSLL)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpLsh32x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x16 x y)
-	// cond:
-	// result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMCMOVWHSconst)
-		v.AuxInt = 0
-		v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-		v2.AuxInt = 256
-		v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueARM_OpLsh32x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x32 x y)
-	// cond:
-	// result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMCMOVWHSconst)
-		v.AuxInt = 0
-		v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-		v1.AuxInt = 256
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM_OpLsh32x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x64 x (Const64 [c]))
-	// cond: uint64(c) < 32
-	// result: (SLLconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 32) {
-			break
-		}
-		v.reset(OpARMSLLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh32x64 _ (Const64 [c]))
-	// cond: uint64(c) >= 32
-	// result: (Const32 [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 32) {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpLsh32x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x8  x y)
-	// cond:
-	// result: (SLL x (ZeroExt8to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMSLL)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpLsh8x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x16 x y)
-	// cond:
-	// result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMCMOVWHSconst)
-		v.AuxInt = 0
-		v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-		v2.AuxInt = 256
-		v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueARM_OpLsh8x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x32 x y)
-	// cond:
-	// result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMCMOVWHSconst)
-		v.AuxInt = 0
-		v0 := b.NewValue0(v.Line, OpARMSLL, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-		v1.AuxInt = 256
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM_OpLsh8x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x64 x (Const64 [c]))
-	// cond: uint64(c) < 8
-	// result: (SLLconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 8) {
-			break
-		}
-		v.reset(OpARMSLLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh8x64 _ (Const64 [c]))
-	// cond: uint64(c) >= 8
-	// result: (Const8 [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 8) {
-			break
-		}
-		v.reset(OpConst8)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpLsh8x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x8  x y)
-	// cond:
-	// result: (SLL x (ZeroExt8to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMSLL)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpMod16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod16 x y)
-	// cond:
-	// result: (Mod32 (SignExt16to32 x) (SignExt16to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMod32)
-		v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM_OpMod16u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod16u x y)
-	// cond:
-	// result: (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMod32u)
-		v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM_OpMod32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod32 x y)
-	// cond:
-	// result: (SUB (XOR <config.fe.TypeUInt32()> 		(Select1 <config.fe.TypeUInt32()> (UDIVrtcall 			(SUB <config.fe.TypeUInt32()> (XOR <config.fe.TypeUInt32()> x (Signmask x)) (Signmask x)) 			(SUB <config.fe.TypeUInt32()> (XOR <config.fe.TypeUInt32()> y (Signmask y)) (Signmask y)))) 		(Signmask x)) (Signmask x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMSUB)
-		v0 := b.NewValue0(v.Line, OpARMXOR, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpSelect1, config.fe.TypeUInt32())
-		v2 := b.NewValue0(v.Line, OpARMUDIVrtcall, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
-		v3 := b.NewValue0(v.Line, OpARMSUB, config.fe.TypeUInt32())
-		v4 := b.NewValue0(v.Line, OpARMXOR, config.fe.TypeUInt32())
-		v4.AddArg(x)
-		v5 := b.NewValue0(v.Line, OpSignmask, config.fe.TypeInt32())
-		v5.AddArg(x)
-		v4.AddArg(v5)
-		v3.AddArg(v4)
-		v6 := b.NewValue0(v.Line, OpSignmask, config.fe.TypeInt32())
-		v6.AddArg(x)
-		v3.AddArg(v6)
-		v2.AddArg(v3)
-		v7 := b.NewValue0(v.Line, OpARMSUB, config.fe.TypeUInt32())
-		v8 := b.NewValue0(v.Line, OpARMXOR, config.fe.TypeUInt32())
-		v8.AddArg(y)
-		v9 := b.NewValue0(v.Line, OpSignmask, config.fe.TypeInt32())
-		v9.AddArg(y)
-		v8.AddArg(v9)
-		v7.AddArg(v8)
-		v10 := b.NewValue0(v.Line, OpSignmask, config.fe.TypeInt32())
-		v10.AddArg(y)
-		v7.AddArg(v10)
-		v2.AddArg(v7)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v11 := b.NewValue0(v.Line, OpSignmask, config.fe.TypeInt32())
-		v11.AddArg(x)
-		v0.AddArg(v11)
-		v.AddArg(v0)
-		v12 := b.NewValue0(v.Line, OpSignmask, config.fe.TypeInt32())
-		v12.AddArg(x)
-		v.AddArg(v12)
-		return true
-	}
-}
-func rewriteValueARM_OpMod32u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod32u x y)
-	// cond:
-	// result: (Select1 <config.fe.TypeUInt32()> (UDIVrtcall x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect1)
-		v.Type = config.fe.TypeUInt32()
-		v0 := b.NewValue0(v.Line, OpARMUDIVrtcall, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpMod8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod8 x y)
-	// cond:
-	// result: (Mod32 (SignExt8to32 x) (SignExt8to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMod32)
-		v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM_OpMod8u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod8u x y)
-	// cond:
-	// result: (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMod32u)
-		v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM_OpMove(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Move [s] _ _ mem)
-	// cond: SizeAndAlign(s).Size() == 0
-	// result: mem
-	for {
-		s := v.AuxInt
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 0) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = mem.Type
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 1
-	// result: (MOVBstore dst (MOVBUload src mem) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 1) {
-			break
-		}
-		v.reset(OpARMMOVBstore)
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
-	// result: (MOVHstore dst (MOVHUload src mem) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
-			break
-		}
-		v.reset(OpARMMOVHstore)
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 2
-	// result: (MOVBstore [1] dst (MOVBUload [1] src mem) 		(MOVBstore dst (MOVBUload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 2) {
-			break
-		}
-		v.reset(OpARMMOVBstore)
-		v.AuxInt = 1
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
-		v0.AuxInt = 1
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
-	// result: (MOVWstore dst (MOVWload src mem) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
-			break
-		}
-		v.reset(OpARMMOVWstore)
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpARMMOVWload, config.fe.TypeUInt32())
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
-	// result: (MOVHstore [2] dst (MOVHUload [2] src mem) 		(MOVHstore dst (MOVHUload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
-			break
-		}
-		v.reset(OpARMMOVHstore)
-		v.AuxInt = 2
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
-		v0.AuxInt = 2
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARMMOVHstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 4
-	// result: (MOVBstore [3] dst (MOVBUload [3] src mem) 		(MOVBstore [2] dst (MOVBUload [2] src mem) 			(MOVBstore [1] dst (MOVBUload [1] src mem) 				(MOVBstore dst (MOVBUload src mem) mem))))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 4) {
-			break
-		}
-		v.reset(OpARMMOVBstore)
-		v.AuxInt = 3
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
-		v0.AuxInt = 3
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
-		v1.AuxInt = 2
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
-		v2.AuxInt = 2
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
-		v3.AuxInt = 1
-		v3.AddArg(dst)
-		v4 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
-		v4.AuxInt = 1
-		v4.AddArg(src)
-		v4.AddArg(mem)
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
-		v5.AddArg(dst)
-		v6 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
-		v6.AddArg(src)
-		v6.AddArg(mem)
-		v5.AddArg(v6)
-		v5.AddArg(mem)
-		v3.AddArg(v5)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 3
-	// result: (MOVBstore [2] dst (MOVBUload [2] src mem) 		(MOVBstore [1] dst (MOVBUload [1] src mem) 			(MOVBstore dst (MOVBUload src mem) mem)))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 3) {
-			break
-		}
-		v.reset(OpARMMOVBstore)
-		v.AuxInt = 2
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
-		v0.AuxInt = 2
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
-		v1.AuxInt = 1
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
-		v2.AuxInt = 1
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
-		v3.AddArg(dst)
-		v4 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8())
-		v4.AddArg(src)
-		v4.AddArg(mem)
-		v3.AddArg(v4)
-		v3.AddArg(mem)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 	&& SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice
-	// result: (DUFFCOPY [8 * (128 - int64(SizeAndAlign(s).Size()/4))] dst src mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice) {
-			break
-		}
-		v.reset(OpARMDUFFCOPY)
-		v.AuxInt = 8 * (128 - int64(SizeAndAlign(s).Size()/4))
-		v.AddArg(dst)
-		v.AddArg(src)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: (SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%4 != 0
-	// result: (LoweredMove [SizeAndAlign(s).Align()] 		dst 		src 		(ADDconst <src.Type> src [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)]) 		mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !((SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%4 != 0) {
-			break
-		}
-		v.reset(OpARMLoweredMove)
-		v.AuxInt = SizeAndAlign(s).Align()
-		v.AddArg(dst)
-		v.AddArg(src)
-		v0 := b.NewValue0(v.Line, OpARMADDconst, src.Type)
-		v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
-		v0.AddArg(src)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpMul16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul16 x y)
-	// cond:
-	// result: (MUL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMMUL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpMul32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul32 x y)
-	// cond:
-	// result: (MUL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMMUL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpMul32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul32F x y)
-	// cond:
-	// result: (MULF x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMMULF)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpMul32uhilo(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul32uhilo x y)
-	// cond:
-	// result: (MULLU x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMMULLU)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpMul64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul64F x y)
-	// cond:
-	// result: (MULD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMMULD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpMul8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul8 x y)
-	// cond:
-	// result: (MUL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMMUL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpNeg16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg16 x)
-	// cond:
-	// result: (RSBconst [0] x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARMRSBconst)
-		v.AuxInt = 0
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpNeg32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg32 x)
-	// cond:
-	// result: (RSBconst [0] x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARMRSBconst)
-		v.AuxInt = 0
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpNeg32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg32F x)
-	// cond:
-	// result: (NEGF x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARMNEGF)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpNeg64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg64F x)
-	// cond:
-	// result: (NEGD x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARMNEGD)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpNeg8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg8 x)
-	// cond:
-	// result: (RSBconst [0] x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARMRSBconst)
-		v.AuxInt = 0
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpNeq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq16 x y)
-	// cond:
-	// result: (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMNotEqual)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpNeq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq32 x y)
-	// cond:
-	// result: (NotEqual (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMNotEqual)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpNeq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq32F x y)
-	// cond:
-	// result: (NotEqual (CMPF x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMNotEqual)
-		v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpNeq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq64F x y)
-	// cond:
-	// result: (NotEqual (CMPD x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMNotEqual)
-		v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpNeq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq8 x y)
-	// cond:
-	// result: (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMNotEqual)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpNeqB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NeqB x y)
-	// cond:
-	// result: (XOR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMXOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpNeqPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NeqPtr x y)
-	// cond:
-	// result: (NotEqual (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMNotEqual)
-		v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpNilCheck(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NilCheck ptr mem)
-	// cond:
-	// result: (LoweredNilCheck ptr mem)
-	for {
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		v.reset(OpARMLoweredNilCheck)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueARM_OpNot(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Not x)
-	// cond:
-	// result: (XORconst [1] x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARMXORconst)
-		v.AuxInt = 1
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpOffPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (OffPtr [off] ptr:(SP))
-	// cond:
-	// result: (MOVWaddr [off] ptr)
-	for {
-		off := v.AuxInt
-		ptr := v.Args[0]
-		if ptr.Op != OpSP {
-			break
-		}
-		v.reset(OpARMMOVWaddr)
-		v.AuxInt = off
-		v.AddArg(ptr)
-		return true
-	}
-	// match: (OffPtr [off] ptr)
-	// cond:
-	// result: (ADDconst [off] ptr)
-	for {
-		off := v.AuxInt
-		ptr := v.Args[0]
-		v.reset(OpARMADDconst)
-		v.AuxInt = off
-		v.AddArg(ptr)
-		return true
-	}
-}
-func rewriteValueARM_OpOr16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or16 x y)
-	// cond:
-	// result: (OR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpOr32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or32 x y)
-	// cond:
-	// result: (OR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpOr8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or8 x y)
-	// cond:
-	// result: (OR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpOrB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (OrB x y)
-	// cond:
-	// result: (OR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpRsh16Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux16 x y)
-	// cond:
-	// result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMCMOVWHSconst)
-		v.AuxInt = 0
-		v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-		v3.AuxInt = 256
-		v4 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueARM_OpRsh16Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux32 x y)
-	// cond:
-	// result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) y) (CMPconst [256] y) [0])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMCMOVWHSconst)
-		v.AuxInt = 0
-		v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-		v2.AuxInt = 256
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueARM_OpRsh16Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux64 x (Const64 [c]))
-	// cond: uint64(c) < 16
-	// result: (SRLconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 16) {
-			break
-		}
-		v.reset(OpARMSRLconst)
-		v.AuxInt = c + 16
-		v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
-		v0.AuxInt = 16
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh16Ux64 _ (Const64 [c]))
-	// cond: uint64(c) >= 16
-	// result: (Const16 [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 16) {
-			break
-		}
-		v.reset(OpConst16)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpRsh16Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux8  x y)
-	// cond:
-	// result: (SRL (ZeroExt16to32 x) (ZeroExt8to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMSRL)
-		v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM_OpRsh16x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x16 x y)
-	// cond:
-	// result: (SRAcond (SignExt16to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMSRAcond)
-		v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-		v2.AuxInt = 256
-		v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueARM_OpRsh16x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x32 x y)
-	// cond:
-	// result: (SRAcond (SignExt16to32 x) y (CMPconst [256] y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMSRAcond)
-		v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-		v1.AuxInt = 256
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM_OpRsh16x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x64 x (Const64 [c]))
-	// cond: uint64(c) < 16
-	// result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 16) {
-			break
-		}
-		v.reset(OpARMSRAconst)
-		v.AuxInt = c + 16
-		v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
-		v0.AuxInt = 16
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh16x64 x (Const64 [c]))
-	// cond: uint64(c) >= 16
-	// result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [31])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 16) {
-			break
-		}
-		v.reset(OpARMSRAconst)
-		v.AuxInt = 31
-		v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
-		v0.AuxInt = 16
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpRsh16x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x8  x y)
-	// cond:
-	// result: (SRA (SignExt16to32 x) (ZeroExt8to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMSRA)
-		v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM_OpRsh32Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux16 x y)
-	// cond:
-	// result: (CMOVWHSconst (SRL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMCMOVWHSconst)
-		v.AuxInt = 0
-		v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-		v2.AuxInt = 256
-		v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueARM_OpRsh32Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux32 x y)
-	// cond:
-	// result: (CMOVWHSconst (SRL <x.Type> x y) (CMPconst [256] y) [0])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMCMOVWHSconst)
-		v.AuxInt = 0
-		v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-		v1.AuxInt = 256
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM_OpRsh32Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux64 x (Const64 [c]))
-	// cond: uint64(c) < 32
-	// result: (SRLconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 32) {
-			break
-		}
-		v.reset(OpARMSRLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh32Ux64 _ (Const64 [c]))
-	// cond: uint64(c) >= 32
-	// result: (Const32 [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 32) {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpRsh32Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux8  x y)
-	// cond:
-	// result: (SRL x (ZeroExt8to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMSRL)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpRsh32x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x16 x y)
-	// cond:
-	// result: (SRAcond x (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMSRAcond)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-		v1.AuxInt = 256
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM_OpRsh32x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x32 x y)
-	// cond:
-	// result: (SRAcond x y (CMPconst [256] y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMSRAcond)
-		v.AddArg(x)
-		v.AddArg(y)
-		v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-		v0.AuxInt = 256
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpRsh32x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x64 x (Const64 [c]))
-	// cond: uint64(c) < 32
-	// result: (SRAconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 32) {
-			break
-		}
-		v.reset(OpARMSRAconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh32x64 x (Const64 [c]))
-	// cond: uint64(c) >= 32
-	// result: (SRAconst x [31])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 32) {
-			break
-		}
-		v.reset(OpARMSRAconst)
-		v.AuxInt = 31
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpRsh32x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x8  x y)
-	// cond:
-	// result: (SRA x (ZeroExt8to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMSRA)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpRsh8Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux16 x y)
-	// cond:
-	// result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMCMOVWHSconst)
-		v.AuxInt = 0
-		v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-		v3.AuxInt = 256
-		v4 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueARM_OpRsh8Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux32 x y)
-	// cond:
-	// result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) y) (CMPconst [256] y) [0])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMCMOVWHSconst)
-		v.AuxInt = 0
-		v0 := b.NewValue0(v.Line, OpARMSRL, x.Type)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-		v2.AuxInt = 256
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueARM_OpRsh8Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux64 x (Const64 [c]))
-	// cond: uint64(c) < 8
-	// result: (SRLconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 8) {
-			break
-		}
-		v.reset(OpARMSRLconst)
-		v.AuxInt = c + 24
-		v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
-		v0.AuxInt = 24
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh8Ux64 _ (Const64 [c]))
-	// cond: uint64(c) >= 8
-	// result: (Const8 [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 8) {
-			break
-		}
-		v.reset(OpConst8)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpRsh8Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux8  x y)
-	// cond:
-	// result: (SRL (ZeroExt8to32 x) (ZeroExt8to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMSRL)
-		v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM_OpRsh8x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x16 x y)
-	// cond:
-	// result: (SRAcond (SignExt8to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMSRAcond)
-		v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-		v2.AuxInt = 256
-		v3 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueARM_OpRsh8x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x32 x y)
-	// cond:
-	// result: (SRAcond (SignExt8to32 x) y (CMPconst [256] y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMSRAcond)
-		v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-		v1.AuxInt = 256
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM_OpRsh8x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x64 x (Const64 [c]))
-	// cond: uint64(c) < 8
-	// result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 8) {
-			break
-		}
-		v.reset(OpARMSRAconst)
-		v.AuxInt = c + 24
-		v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
-		v0.AuxInt = 24
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh8x64 x (Const64 [c]))
-	// cond: uint64(c) >= 8
-	// result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [31])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 8) {
-			break
-		}
-		v.reset(OpARMSRAconst)
-		v.AuxInt = 31
-		v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32())
-		v0.AuxInt = 24
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpRsh8x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x8  x y)
-	// cond:
-	// result: (SRA (SignExt8to32 x) (ZeroExt8to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMSRA)
-		v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM_OpSelect0(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Select0 (UDIVrtcall x (MOVWconst [1])))
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMUDIVrtcall {
-			break
-		}
-		x := v_0.Args[0]
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpARMMOVWconst {
-			break
-		}
-		if v_0_1.AuxInt != 1 {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Select0 (UDIVrtcall x (MOVWconst [c])))
-	// cond: isPowerOfTwo(c)
-	// result: (SRLconst [log2(c)] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMUDIVrtcall {
-			break
-		}
-		x := v_0.Args[0]
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0_1.AuxInt
-		if !(isPowerOfTwo(c)) {
-			break
-		}
-		v.reset(OpARMSRLconst)
-		v.AuxInt = log2(c)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Select0 (UDIVrtcall (MOVWconst [c]) (MOVWconst [d])))
-	// cond:
-	// result: (MOVWconst [int64(uint32(c)/uint32(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMUDIVrtcall {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0_0.AuxInt
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpARMMOVWconst {
-			break
-		}
-		d := v_0_1.AuxInt
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = int64(uint32(c) / uint32(d))
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpSelect1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Select1 (UDIVrtcall _ (MOVWconst [1])))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMUDIVrtcall {
-			break
-		}
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpARMMOVWconst {
-			break
-		}
-		if v_0_1.AuxInt != 1 {
-			break
-		}
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Select1 (UDIVrtcall x (MOVWconst [c])))
-	// cond: isPowerOfTwo(c)
-	// result: (ANDconst [c-1] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMUDIVrtcall {
-			break
-		}
-		x := v_0.Args[0]
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0_1.AuxInt
-		if !(isPowerOfTwo(c)) {
-			break
-		}
-		v.reset(OpARMANDconst)
-		v.AuxInt = c - 1
-		v.AddArg(x)
-		return true
-	}
-	// match: (Select1 (UDIVrtcall (MOVWconst [c]) (MOVWconst [d])))
-	// cond:
-	// result: (MOVWconst [int64(uint32(c)%uint32(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARMUDIVrtcall {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpARMMOVWconst {
-			break
-		}
-		c := v_0_0.AuxInt
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpARMMOVWconst {
-			break
-		}
-		d := v_0_1.AuxInt
-		v.reset(OpARMMOVWconst)
-		v.AuxInt = int64(uint32(c) % uint32(d))
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpSignExt16to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt16to32 x)
-	// cond:
-	// result: (MOVHreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARMMOVHreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpSignExt8to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt8to16 x)
-	// cond:
-	// result: (MOVBreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARMMOVBreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpSignExt8to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt8to32 x)
-	// cond:
-	// result: (MOVBreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARMMOVBreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpSignmask(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Signmask x)
-	// cond:
-	// result: (SRAconst x [31])
-	for {
-		x := v.Args[0]
-		v.reset(OpARMSRAconst)
-		v.AuxInt = 31
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpSlicemask(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Slicemask <t> x)
-	// cond:
-	// result: (MVN (SRAconst <t> (SUBconst <t> x [1]) [31]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v.reset(OpARMMVN)
-		v0 := b.NewValue0(v.Line, OpARMSRAconst, t)
-		v0.AuxInt = 31
-		v1 := b.NewValue0(v.Line, OpARMSUBconst, t)
-		v1.AuxInt = 1
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM_OpSqrt(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sqrt x)
-	// cond:
-	// result: (SQRTD x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARMSQRTD)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpStaticCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (StaticCall [argwid] {target} mem)
-	// cond:
-	// result: (CALLstatic [argwid] {target} mem)
-	for {
-		argwid := v.AuxInt
-		target := v.Aux
-		mem := v.Args[0]
-		v.reset(OpARMCALLstatic)
-		v.AuxInt = argwid
-		v.Aux = target
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueARM_OpStore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Store [1] ptr val mem)
-	// cond:
-	// result: (MOVBstore ptr val mem)
-	for {
-		if v.AuxInt != 1 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpARMMOVBstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [2] ptr val mem)
-	// cond:
-	// result: (MOVHstore ptr val mem)
-	for {
-		if v.AuxInt != 2 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpARMMOVHstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [4] ptr val mem)
-	// cond: !is32BitFloat(val.Type)
-	// result: (MOVWstore ptr val mem)
-	for {
-		if v.AuxInt != 4 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(!is32BitFloat(val.Type)) {
-			break
-		}
-		v.reset(OpARMMOVWstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [4] ptr val mem)
-	// cond: is32BitFloat(val.Type)
-	// result: (MOVFstore ptr val mem)
-	for {
-		if v.AuxInt != 4 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32BitFloat(val.Type)) {
-			break
-		}
-		v.reset(OpARMMOVFstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [8] ptr val mem)
-	// cond: is64BitFloat(val.Type)
-	// result: (MOVDstore ptr val mem)
-	for {
-		if v.AuxInt != 8 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is64BitFloat(val.Type)) {
-			break
-		}
-		v.reset(OpARMMOVDstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpSub16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub16 x y)
-	// cond:
-	// result: (SUB x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMSUB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpSub32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub32 x y)
-	// cond:
-	// result: (SUB x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMSUB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpSub32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub32F x y)
-	// cond:
-	// result: (SUBF x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMSUBF)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpSub32carry(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub32carry x y)
-	// cond:
-	// result: (SUBS x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMSUBS)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpSub32withcarry(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub32withcarry x y c)
-	// cond:
-	// result: (SBC x y c)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		c := v.Args[2]
-		v.reset(OpARMSBC)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(c)
-		return true
-	}
-}
-func rewriteValueARM_OpSub64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub64F x y)
-	// cond:
-	// result: (SUBD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMSUBD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpSub8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub8 x y)
-	// cond:
-	// result: (SUB x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMSUB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpSubPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SubPtr x y)
-	// cond:
-	// result: (SUB x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMSUB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpTrunc16to8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc16to8 x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpTrunc32to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc32to16 x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpTrunc32to8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc32to8 x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpXor16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor16 x y)
-	// cond:
-	// result: (XOR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMXOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpXor32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor32 x y)
-	// cond:
-	// result: (XOR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMXOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpXor8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor8 x y)
-	// cond:
-	// result: (XOR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARMXOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM_OpZero(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Zero [s] _ mem)
-	// cond: SizeAndAlign(s).Size() == 0
-	// result: mem
-	for {
-		s := v.AuxInt
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 0) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = mem.Type
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 1
-	// result: (MOVBstore ptr (MOVWconst [0]) mem)
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 1) {
-			break
-		}
-		v.reset(OpARMMOVBstore)
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
-	// result: (MOVHstore ptr (MOVWconst [0]) mem)
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
-			break
-		}
-		v.reset(OpARMMOVHstore)
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 2
-	// result: (MOVBstore [1] ptr (MOVWconst [0]) 		(MOVBstore [0] ptr (MOVWconst [0]) mem))
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 2) {
-			break
-		}
-		v.reset(OpARMMOVBstore)
-		v.AuxInt = 1
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
-		v1.AuxInt = 0
-		v1.AddArg(ptr)
-		v2 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
-		v2.AuxInt = 0
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
-	// result: (MOVWstore ptr (MOVWconst [0]) mem)
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
-			break
-		}
-		v.reset(OpARMMOVWstore)
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
-	// result: (MOVHstore [2] ptr (MOVWconst [0]) 		(MOVHstore [0] ptr (MOVWconst [0]) mem))
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
-			break
-		}
-		v.reset(OpARMMOVHstore)
-		v.AuxInt = 2
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARMMOVHstore, TypeMem)
-		v1.AuxInt = 0
-		v1.AddArg(ptr)
-		v2 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
-		v2.AuxInt = 0
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 4
-	// result: (MOVBstore [3] ptr (MOVWconst [0]) 		(MOVBstore [2] ptr (MOVWconst [0]) 			(MOVBstore [1] ptr (MOVWconst [0]) 				(MOVBstore [0] ptr (MOVWconst [0]) mem))))
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 4) {
-			break
-		}
-		v.reset(OpARMMOVBstore)
-		v.AuxInt = 3
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
-		v1.AuxInt = 2
-		v1.AddArg(ptr)
-		v2 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
-		v2.AuxInt = 0
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
-		v3.AuxInt = 1
-		v3.AddArg(ptr)
-		v4 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
-		v4.AuxInt = 0
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
-		v5.AuxInt = 0
-		v5.AddArg(ptr)
-		v6 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
-		v6.AuxInt = 0
-		v5.AddArg(v6)
-		v5.AddArg(mem)
-		v3.AddArg(v5)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 3
-	// result: (MOVBstore [2] ptr (MOVWconst [0]) 		(MOVBstore [1] ptr (MOVWconst [0]) 			(MOVBstore [0] ptr (MOVWconst [0]) mem)))
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 3) {
-			break
-		}
-		v.reset(OpARMMOVBstore)
-		v.AuxInt = 2
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
-		v1.AuxInt = 1
-		v1.AddArg(ptr)
-		v2 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
-		v2.AuxInt = 0
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem)
-		v3.AuxInt = 0
-		v3.AddArg(ptr)
-		v4 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
-		v4.AuxInt = 0
-		v3.AddArg(v4)
-		v3.AddArg(mem)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 	&& SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice
-	// result: (DUFFZERO [4 * (128 - int64(SizeAndAlign(s).Size()/4))] ptr (MOVWconst [0]) mem)
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0 && !config.noDuffDevice) {
-			break
-		}
-		v.reset(OpARMDUFFZERO)
-		v.AuxInt = 4 * (128 - int64(SizeAndAlign(s).Size()/4))
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: (SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%4 != 0
-	// result: (LoweredZero [SizeAndAlign(s).Align()] 		ptr 		(ADDconst <ptr.Type> ptr [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)]) 		(MOVWconst [0]) 		mem)
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !((SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%4 != 0) {
-			break
-		}
-		v.reset(OpARMLoweredZero)
-		v.AuxInt = SizeAndAlign(s).Align()
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpARMADDconst, ptr.Type)
-		v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
-		v0.AddArg(ptr)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32())
-		v1.AuxInt = 0
-		v.AddArg(v1)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM_OpZeroExt16to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt16to32 x)
-	// cond:
-	// result: (MOVHUreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARMMOVHUreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpZeroExt8to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt8to16 x)
-	// cond:
-	// result: (MOVBUreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARMMOVBUreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpZeroExt8to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt8to32 x)
-	// cond:
-	// result: (MOVBUreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARMMOVBUreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM_OpZeromask(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Zeromask x)
-	// cond:
-	// result: (SRAconst (RSBshiftRL <config.fe.TypeInt32()> x x [1]) [31])
-	for {
-		x := v.Args[0]
-		v.reset(OpARMSRAconst)
-		v.AuxInt = 31
-		v0 := b.NewValue0(v.Line, OpARMRSBshiftRL, config.fe.TypeInt32())
-		v0.AuxInt = 1
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteBlockARM(b *Block, config *Config) bool {
-	switch b.Kind {
-	case BlockARMEQ:
-		// match: (EQ (FlagEQ) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (EQ (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (EQ (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (EQ (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (EQ (InvertFlags cmp) yes no)
-		// cond:
-		// result: (EQ cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMInvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMEQ
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockARMGE:
-		// match: (GE (FlagEQ) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (GE (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (GE (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (GE (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (GE (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (GE (InvertFlags cmp) yes no)
-		// cond:
-		// result: (LE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMInvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMLE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockARMGT:
-		// match: (GT (FlagEQ) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (GT (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (GT (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (GT (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (GT (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (GT (InvertFlags cmp) yes no)
-		// cond:
-		// result: (LT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMInvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMLT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockIf:
-		// match: (If (Equal cc) yes no)
-		// cond:
-		// result: (EQ cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMEqual {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMEQ
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (NotEqual cc) yes no)
-		// cond:
-		// result: (NE cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMNotEqual {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMNE
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (LessThan cc) yes no)
-		// cond:
-		// result: (LT cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMLessThan {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMLT
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (LessThanU cc) yes no)
-		// cond:
-		// result: (ULT cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMLessThanU {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMULT
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (LessEqual cc) yes no)
-		// cond:
-		// result: (LE cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMLessEqual {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMLE
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (LessEqualU cc) yes no)
-		// cond:
-		// result: (ULE cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMLessEqualU {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMULE
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (GreaterThan cc) yes no)
-		// cond:
-		// result: (GT cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMGreaterThan {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMGT
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (GreaterThanU cc) yes no)
-		// cond:
-		// result: (UGT cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMGreaterThanU {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMUGT
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (GreaterEqual cc) yes no)
-		// cond:
-		// result: (GE cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMGreaterEqual {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMGE
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (GreaterEqualU cc) yes no)
-		// cond:
-		// result: (UGE cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMGreaterEqualU {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMUGE
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If cond yes no)
-		// cond:
-		// result: (NE (CMPconst [0] cond) yes no)
-		for {
-			v := b.Control
-			_ = v
-			cond := b.Control
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMNE
-			v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags)
-			v0.AuxInt = 0
-			v0.AddArg(cond)
-			b.SetControl(v0)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockARMLE:
-		// match: (LE (FlagEQ) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LE (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LE (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LE (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (LE (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (LE (InvertFlags cmp) yes no)
-		// cond:
-		// result: (GE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMInvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMGE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockARMLT:
-		// match: (LT (FlagEQ) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (LT (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LT (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LT (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (LT (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (LT (InvertFlags cmp) yes no)
-		// cond:
-		// result: (GT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMInvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMGT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockARMNE:
-		// match: (NE (CMPconst [0] (Equal cc)) yes no)
-		// cond:
-		// result: (EQ cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMCMPconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpARMEqual {
-				break
-			}
-			cc := v_0.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMEQ
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (CMPconst [0] (NotEqual cc)) yes no)
-		// cond:
-		// result: (NE cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMCMPconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpARMNotEqual {
-				break
-			}
-			cc := v_0.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMNE
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (CMPconst [0] (LessThan cc)) yes no)
-		// cond:
-		// result: (LT cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMCMPconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpARMLessThan {
-				break
-			}
-			cc := v_0.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMLT
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (CMPconst [0] (LessThanU cc)) yes no)
-		// cond:
-		// result: (ULT cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMCMPconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpARMLessThanU {
-				break
-			}
-			cc := v_0.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMULT
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (CMPconst [0] (LessEqual cc)) yes no)
-		// cond:
-		// result: (LE cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMCMPconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpARMLessEqual {
-				break
-			}
-			cc := v_0.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMLE
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (CMPconst [0] (LessEqualU cc)) yes no)
-		// cond:
-		// result: (ULE cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMCMPconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpARMLessEqualU {
-				break
-			}
-			cc := v_0.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMULE
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (CMPconst [0] (GreaterThan cc)) yes no)
-		// cond:
-		// result: (GT cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMCMPconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpARMGreaterThan {
-				break
-			}
-			cc := v_0.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMGT
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (CMPconst [0] (GreaterThanU cc)) yes no)
-		// cond:
-		// result: (UGT cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMCMPconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpARMGreaterThanU {
-				break
-			}
-			cc := v_0.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMUGT
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (CMPconst [0] (GreaterEqual cc)) yes no)
-		// cond:
-		// result: (GE cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMCMPconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpARMGreaterEqual {
-				break
-			}
-			cc := v_0.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMGE
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (CMPconst [0] (GreaterEqualU cc)) yes no)
-		// cond:
-		// result: (UGE cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMCMPconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpARMGreaterEqualU {
-				break
-			}
-			cc := v_0.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMUGE
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (FlagEQ) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (NE (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (InvertFlags cmp) yes no)
-		// cond:
-		// result: (NE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMInvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMNE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockARMUGE:
-		// match: (UGE (FlagEQ) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (UGE (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (UGE (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (UGE (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (UGE (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (UGE (InvertFlags cmp) yes no)
-		// cond:
-		// result: (ULE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMInvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMULE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockARMUGT:
-		// match: (UGT (FlagEQ) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (UGT (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (UGT (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (UGT (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (UGT (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (UGT (InvertFlags cmp) yes no)
-		// cond:
-		// result: (ULT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMInvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMULT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockARMULE:
-		// match: (ULE (FlagEQ) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (ULE (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (ULE (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (ULE (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (ULE (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (ULE (InvertFlags cmp) yes no)
-		// cond:
-		// result: (UGE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMInvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMUGE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockARMULT:
-		// match: (ULT (FlagEQ) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (ULT (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (ULT (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (ULT (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (ULT (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARMFlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (ULT (InvertFlags cmp) yes no)
-		// cond:
-		// result: (UGT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARMInvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARMUGT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-	}
-	return false
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewriteARM64.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewriteARM64.go
deleted file mode 100644
index 7e609b3..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewriteARM64.go
+++ /dev/null
@@ -1,16706 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/rewriteARM64.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/rewriteARM64.go:1
-// autogenerated from gen/ARM64.rules: do not edit!
-// generated with: cd gen; go run *.go
-
-package ssa
-
-import "math"
-
-var _ = math.MinInt8 // in case not otherwise used
-func rewriteValueARM64(v *Value, config *Config) bool {
-	switch v.Op {
-	case OpARM64ADD:
-		return rewriteValueARM64_OpARM64ADD(v, config)
-	case OpARM64ADDconst:
-		return rewriteValueARM64_OpARM64ADDconst(v, config)
-	case OpARM64ADDshiftLL:
-		return rewriteValueARM64_OpARM64ADDshiftLL(v, config)
-	case OpARM64ADDshiftRA:
-		return rewriteValueARM64_OpARM64ADDshiftRA(v, config)
-	case OpARM64ADDshiftRL:
-		return rewriteValueARM64_OpARM64ADDshiftRL(v, config)
-	case OpARM64AND:
-		return rewriteValueARM64_OpARM64AND(v, config)
-	case OpARM64ANDconst:
-		return rewriteValueARM64_OpARM64ANDconst(v, config)
-	case OpARM64ANDshiftLL:
-		return rewriteValueARM64_OpARM64ANDshiftLL(v, config)
-	case OpARM64ANDshiftRA:
-		return rewriteValueARM64_OpARM64ANDshiftRA(v, config)
-	case OpARM64ANDshiftRL:
-		return rewriteValueARM64_OpARM64ANDshiftRL(v, config)
-	case OpARM64BIC:
-		return rewriteValueARM64_OpARM64BIC(v, config)
-	case OpARM64BICconst:
-		return rewriteValueARM64_OpARM64BICconst(v, config)
-	case OpARM64BICshiftLL:
-		return rewriteValueARM64_OpARM64BICshiftLL(v, config)
-	case OpARM64BICshiftRA:
-		return rewriteValueARM64_OpARM64BICshiftRA(v, config)
-	case OpARM64BICshiftRL:
-		return rewriteValueARM64_OpARM64BICshiftRL(v, config)
-	case OpARM64CMP:
-		return rewriteValueARM64_OpARM64CMP(v, config)
-	case OpARM64CMPW:
-		return rewriteValueARM64_OpARM64CMPW(v, config)
-	case OpARM64CMPWconst:
-		return rewriteValueARM64_OpARM64CMPWconst(v, config)
-	case OpARM64CMPconst:
-		return rewriteValueARM64_OpARM64CMPconst(v, config)
-	case OpARM64CMPshiftLL:
-		return rewriteValueARM64_OpARM64CMPshiftLL(v, config)
-	case OpARM64CMPshiftRA:
-		return rewriteValueARM64_OpARM64CMPshiftRA(v, config)
-	case OpARM64CMPshiftRL:
-		return rewriteValueARM64_OpARM64CMPshiftRL(v, config)
-	case OpARM64CSELULT:
-		return rewriteValueARM64_OpARM64CSELULT(v, config)
-	case OpARM64CSELULT0:
-		return rewriteValueARM64_OpARM64CSELULT0(v, config)
-	case OpARM64DIV:
-		return rewriteValueARM64_OpARM64DIV(v, config)
-	case OpARM64DIVW:
-		return rewriteValueARM64_OpARM64DIVW(v, config)
-	case OpARM64Equal:
-		return rewriteValueARM64_OpARM64Equal(v, config)
-	case OpARM64FMOVDload:
-		return rewriteValueARM64_OpARM64FMOVDload(v, config)
-	case OpARM64FMOVDstore:
-		return rewriteValueARM64_OpARM64FMOVDstore(v, config)
-	case OpARM64FMOVSload:
-		return rewriteValueARM64_OpARM64FMOVSload(v, config)
-	case OpARM64FMOVSstore:
-		return rewriteValueARM64_OpARM64FMOVSstore(v, config)
-	case OpARM64GreaterEqual:
-		return rewriteValueARM64_OpARM64GreaterEqual(v, config)
-	case OpARM64GreaterEqualU:
-		return rewriteValueARM64_OpARM64GreaterEqualU(v, config)
-	case OpARM64GreaterThan:
-		return rewriteValueARM64_OpARM64GreaterThan(v, config)
-	case OpARM64GreaterThanU:
-		return rewriteValueARM64_OpARM64GreaterThanU(v, config)
-	case OpARM64LessEqual:
-		return rewriteValueARM64_OpARM64LessEqual(v, config)
-	case OpARM64LessEqualU:
-		return rewriteValueARM64_OpARM64LessEqualU(v, config)
-	case OpARM64LessThan:
-		return rewriteValueARM64_OpARM64LessThan(v, config)
-	case OpARM64LessThanU:
-		return rewriteValueARM64_OpARM64LessThanU(v, config)
-	case OpARM64MOD:
-		return rewriteValueARM64_OpARM64MOD(v, config)
-	case OpARM64MODW:
-		return rewriteValueARM64_OpARM64MODW(v, config)
-	case OpARM64MOVBUload:
-		return rewriteValueARM64_OpARM64MOVBUload(v, config)
-	case OpARM64MOVBUreg:
-		return rewriteValueARM64_OpARM64MOVBUreg(v, config)
-	case OpARM64MOVBload:
-		return rewriteValueARM64_OpARM64MOVBload(v, config)
-	case OpARM64MOVBreg:
-		return rewriteValueARM64_OpARM64MOVBreg(v, config)
-	case OpARM64MOVBstore:
-		return rewriteValueARM64_OpARM64MOVBstore(v, config)
-	case OpARM64MOVBstorezero:
-		return rewriteValueARM64_OpARM64MOVBstorezero(v, config)
-	case OpARM64MOVDload:
-		return rewriteValueARM64_OpARM64MOVDload(v, config)
-	case OpARM64MOVDreg:
-		return rewriteValueARM64_OpARM64MOVDreg(v, config)
-	case OpARM64MOVDstore:
-		return rewriteValueARM64_OpARM64MOVDstore(v, config)
-	case OpARM64MOVDstorezero:
-		return rewriteValueARM64_OpARM64MOVDstorezero(v, config)
-	case OpARM64MOVHUload:
-		return rewriteValueARM64_OpARM64MOVHUload(v, config)
-	case OpARM64MOVHUreg:
-		return rewriteValueARM64_OpARM64MOVHUreg(v, config)
-	case OpARM64MOVHload:
-		return rewriteValueARM64_OpARM64MOVHload(v, config)
-	case OpARM64MOVHreg:
-		return rewriteValueARM64_OpARM64MOVHreg(v, config)
-	case OpARM64MOVHstore:
-		return rewriteValueARM64_OpARM64MOVHstore(v, config)
-	case OpARM64MOVHstorezero:
-		return rewriteValueARM64_OpARM64MOVHstorezero(v, config)
-	case OpARM64MOVWUload:
-		return rewriteValueARM64_OpARM64MOVWUload(v, config)
-	case OpARM64MOVWUreg:
-		return rewriteValueARM64_OpARM64MOVWUreg(v, config)
-	case OpARM64MOVWload:
-		return rewriteValueARM64_OpARM64MOVWload(v, config)
-	case OpARM64MOVWreg:
-		return rewriteValueARM64_OpARM64MOVWreg(v, config)
-	case OpARM64MOVWstore:
-		return rewriteValueARM64_OpARM64MOVWstore(v, config)
-	case OpARM64MOVWstorezero:
-		return rewriteValueARM64_OpARM64MOVWstorezero(v, config)
-	case OpARM64MUL:
-		return rewriteValueARM64_OpARM64MUL(v, config)
-	case OpARM64MULW:
-		return rewriteValueARM64_OpARM64MULW(v, config)
-	case OpARM64MVN:
-		return rewriteValueARM64_OpARM64MVN(v, config)
-	case OpARM64NEG:
-		return rewriteValueARM64_OpARM64NEG(v, config)
-	case OpARM64NotEqual:
-		return rewriteValueARM64_OpARM64NotEqual(v, config)
-	case OpARM64OR:
-		return rewriteValueARM64_OpARM64OR(v, config)
-	case OpARM64ORconst:
-		return rewriteValueARM64_OpARM64ORconst(v, config)
-	case OpARM64ORshiftLL:
-		return rewriteValueARM64_OpARM64ORshiftLL(v, config)
-	case OpARM64ORshiftRA:
-		return rewriteValueARM64_OpARM64ORshiftRA(v, config)
-	case OpARM64ORshiftRL:
-		return rewriteValueARM64_OpARM64ORshiftRL(v, config)
-	case OpARM64SLL:
-		return rewriteValueARM64_OpARM64SLL(v, config)
-	case OpARM64SLLconst:
-		return rewriteValueARM64_OpARM64SLLconst(v, config)
-	case OpARM64SRA:
-		return rewriteValueARM64_OpARM64SRA(v, config)
-	case OpARM64SRAconst:
-		return rewriteValueARM64_OpARM64SRAconst(v, config)
-	case OpARM64SRL:
-		return rewriteValueARM64_OpARM64SRL(v, config)
-	case OpARM64SRLconst:
-		return rewriteValueARM64_OpARM64SRLconst(v, config)
-	case OpARM64SUB:
-		return rewriteValueARM64_OpARM64SUB(v, config)
-	case OpARM64SUBconst:
-		return rewriteValueARM64_OpARM64SUBconst(v, config)
-	case OpARM64SUBshiftLL:
-		return rewriteValueARM64_OpARM64SUBshiftLL(v, config)
-	case OpARM64SUBshiftRA:
-		return rewriteValueARM64_OpARM64SUBshiftRA(v, config)
-	case OpARM64SUBshiftRL:
-		return rewriteValueARM64_OpARM64SUBshiftRL(v, config)
-	case OpARM64UDIV:
-		return rewriteValueARM64_OpARM64UDIV(v, config)
-	case OpARM64UDIVW:
-		return rewriteValueARM64_OpARM64UDIVW(v, config)
-	case OpARM64UMOD:
-		return rewriteValueARM64_OpARM64UMOD(v, config)
-	case OpARM64UMODW:
-		return rewriteValueARM64_OpARM64UMODW(v, config)
-	case OpARM64XOR:
-		return rewriteValueARM64_OpARM64XOR(v, config)
-	case OpARM64XORconst:
-		return rewriteValueARM64_OpARM64XORconst(v, config)
-	case OpARM64XORshiftLL:
-		return rewriteValueARM64_OpARM64XORshiftLL(v, config)
-	case OpARM64XORshiftRA:
-		return rewriteValueARM64_OpARM64XORshiftRA(v, config)
-	case OpARM64XORshiftRL:
-		return rewriteValueARM64_OpARM64XORshiftRL(v, config)
-	case OpAdd16:
-		return rewriteValueARM64_OpAdd16(v, config)
-	case OpAdd32:
-		return rewriteValueARM64_OpAdd32(v, config)
-	case OpAdd32F:
-		return rewriteValueARM64_OpAdd32F(v, config)
-	case OpAdd64:
-		return rewriteValueARM64_OpAdd64(v, config)
-	case OpAdd64F:
-		return rewriteValueARM64_OpAdd64F(v, config)
-	case OpAdd8:
-		return rewriteValueARM64_OpAdd8(v, config)
-	case OpAddPtr:
-		return rewriteValueARM64_OpAddPtr(v, config)
-	case OpAddr:
-		return rewriteValueARM64_OpAddr(v, config)
-	case OpAnd16:
-		return rewriteValueARM64_OpAnd16(v, config)
-	case OpAnd32:
-		return rewriteValueARM64_OpAnd32(v, config)
-	case OpAnd64:
-		return rewriteValueARM64_OpAnd64(v, config)
-	case OpAnd8:
-		return rewriteValueARM64_OpAnd8(v, config)
-	case OpAndB:
-		return rewriteValueARM64_OpAndB(v, config)
-	case OpAtomicAdd32:
-		return rewriteValueARM64_OpAtomicAdd32(v, config)
-	case OpAtomicAdd64:
-		return rewriteValueARM64_OpAtomicAdd64(v, config)
-	case OpAtomicAnd8:
-		return rewriteValueARM64_OpAtomicAnd8(v, config)
-	case OpAtomicCompareAndSwap32:
-		return rewriteValueARM64_OpAtomicCompareAndSwap32(v, config)
-	case OpAtomicCompareAndSwap64:
-		return rewriteValueARM64_OpAtomicCompareAndSwap64(v, config)
-	case OpAtomicExchange32:
-		return rewriteValueARM64_OpAtomicExchange32(v, config)
-	case OpAtomicExchange64:
-		return rewriteValueARM64_OpAtomicExchange64(v, config)
-	case OpAtomicLoad32:
-		return rewriteValueARM64_OpAtomicLoad32(v, config)
-	case OpAtomicLoad64:
-		return rewriteValueARM64_OpAtomicLoad64(v, config)
-	case OpAtomicLoadPtr:
-		return rewriteValueARM64_OpAtomicLoadPtr(v, config)
-	case OpAtomicOr8:
-		return rewriteValueARM64_OpAtomicOr8(v, config)
-	case OpAtomicStore32:
-		return rewriteValueARM64_OpAtomicStore32(v, config)
-	case OpAtomicStore64:
-		return rewriteValueARM64_OpAtomicStore64(v, config)
-	case OpAtomicStorePtrNoWB:
-		return rewriteValueARM64_OpAtomicStorePtrNoWB(v, config)
-	case OpAvg64u:
-		return rewriteValueARM64_OpAvg64u(v, config)
-	case OpBswap32:
-		return rewriteValueARM64_OpBswap32(v, config)
-	case OpBswap64:
-		return rewriteValueARM64_OpBswap64(v, config)
-	case OpClosureCall:
-		return rewriteValueARM64_OpClosureCall(v, config)
-	case OpCom16:
-		return rewriteValueARM64_OpCom16(v, config)
-	case OpCom32:
-		return rewriteValueARM64_OpCom32(v, config)
-	case OpCom64:
-		return rewriteValueARM64_OpCom64(v, config)
-	case OpCom8:
-		return rewriteValueARM64_OpCom8(v, config)
-	case OpConst16:
-		return rewriteValueARM64_OpConst16(v, config)
-	case OpConst32:
-		return rewriteValueARM64_OpConst32(v, config)
-	case OpConst32F:
-		return rewriteValueARM64_OpConst32F(v, config)
-	case OpConst64:
-		return rewriteValueARM64_OpConst64(v, config)
-	case OpConst64F:
-		return rewriteValueARM64_OpConst64F(v, config)
-	case OpConst8:
-		return rewriteValueARM64_OpConst8(v, config)
-	case OpConstBool:
-		return rewriteValueARM64_OpConstBool(v, config)
-	case OpConstNil:
-		return rewriteValueARM64_OpConstNil(v, config)
-	case OpConvert:
-		return rewriteValueARM64_OpConvert(v, config)
-	case OpCtz32:
-		return rewriteValueARM64_OpCtz32(v, config)
-	case OpCtz64:
-		return rewriteValueARM64_OpCtz64(v, config)
-	case OpCvt32Fto32:
-		return rewriteValueARM64_OpCvt32Fto32(v, config)
-	case OpCvt32Fto32U:
-		return rewriteValueARM64_OpCvt32Fto32U(v, config)
-	case OpCvt32Fto64:
-		return rewriteValueARM64_OpCvt32Fto64(v, config)
-	case OpCvt32Fto64F:
-		return rewriteValueARM64_OpCvt32Fto64F(v, config)
-	case OpCvt32Fto64U:
-		return rewriteValueARM64_OpCvt32Fto64U(v, config)
-	case OpCvt32Uto32F:
-		return rewriteValueARM64_OpCvt32Uto32F(v, config)
-	case OpCvt32Uto64F:
-		return rewriteValueARM64_OpCvt32Uto64F(v, config)
-	case OpCvt32to32F:
-		return rewriteValueARM64_OpCvt32to32F(v, config)
-	case OpCvt32to64F:
-		return rewriteValueARM64_OpCvt32to64F(v, config)
-	case OpCvt64Fto32:
-		return rewriteValueARM64_OpCvt64Fto32(v, config)
-	case OpCvt64Fto32F:
-		return rewriteValueARM64_OpCvt64Fto32F(v, config)
-	case OpCvt64Fto32U:
-		return rewriteValueARM64_OpCvt64Fto32U(v, config)
-	case OpCvt64Fto64:
-		return rewriteValueARM64_OpCvt64Fto64(v, config)
-	case OpCvt64Fto64U:
-		return rewriteValueARM64_OpCvt64Fto64U(v, config)
-	case OpCvt64Uto32F:
-		return rewriteValueARM64_OpCvt64Uto32F(v, config)
-	case OpCvt64Uto64F:
-		return rewriteValueARM64_OpCvt64Uto64F(v, config)
-	case OpCvt64to32F:
-		return rewriteValueARM64_OpCvt64to32F(v, config)
-	case OpCvt64to64F:
-		return rewriteValueARM64_OpCvt64to64F(v, config)
-	case OpDeferCall:
-		return rewriteValueARM64_OpDeferCall(v, config)
-	case OpDiv16:
-		return rewriteValueARM64_OpDiv16(v, config)
-	case OpDiv16u:
-		return rewriteValueARM64_OpDiv16u(v, config)
-	case OpDiv32:
-		return rewriteValueARM64_OpDiv32(v, config)
-	case OpDiv32F:
-		return rewriteValueARM64_OpDiv32F(v, config)
-	case OpDiv32u:
-		return rewriteValueARM64_OpDiv32u(v, config)
-	case OpDiv64:
-		return rewriteValueARM64_OpDiv64(v, config)
-	case OpDiv64F:
-		return rewriteValueARM64_OpDiv64F(v, config)
-	case OpDiv64u:
-		return rewriteValueARM64_OpDiv64u(v, config)
-	case OpDiv8:
-		return rewriteValueARM64_OpDiv8(v, config)
-	case OpDiv8u:
-		return rewriteValueARM64_OpDiv8u(v, config)
-	case OpEq16:
-		return rewriteValueARM64_OpEq16(v, config)
-	case OpEq32:
-		return rewriteValueARM64_OpEq32(v, config)
-	case OpEq32F:
-		return rewriteValueARM64_OpEq32F(v, config)
-	case OpEq64:
-		return rewriteValueARM64_OpEq64(v, config)
-	case OpEq64F:
-		return rewriteValueARM64_OpEq64F(v, config)
-	case OpEq8:
-		return rewriteValueARM64_OpEq8(v, config)
-	case OpEqB:
-		return rewriteValueARM64_OpEqB(v, config)
-	case OpEqPtr:
-		return rewriteValueARM64_OpEqPtr(v, config)
-	case OpGeq16:
-		return rewriteValueARM64_OpGeq16(v, config)
-	case OpGeq16U:
-		return rewriteValueARM64_OpGeq16U(v, config)
-	case OpGeq32:
-		return rewriteValueARM64_OpGeq32(v, config)
-	case OpGeq32F:
-		return rewriteValueARM64_OpGeq32F(v, config)
-	case OpGeq32U:
-		return rewriteValueARM64_OpGeq32U(v, config)
-	case OpGeq64:
-		return rewriteValueARM64_OpGeq64(v, config)
-	case OpGeq64F:
-		return rewriteValueARM64_OpGeq64F(v, config)
-	case OpGeq64U:
-		return rewriteValueARM64_OpGeq64U(v, config)
-	case OpGeq8:
-		return rewriteValueARM64_OpGeq8(v, config)
-	case OpGeq8U:
-		return rewriteValueARM64_OpGeq8U(v, config)
-	case OpGetClosurePtr:
-		return rewriteValueARM64_OpGetClosurePtr(v, config)
-	case OpGoCall:
-		return rewriteValueARM64_OpGoCall(v, config)
-	case OpGreater16:
-		return rewriteValueARM64_OpGreater16(v, config)
-	case OpGreater16U:
-		return rewriteValueARM64_OpGreater16U(v, config)
-	case OpGreater32:
-		return rewriteValueARM64_OpGreater32(v, config)
-	case OpGreater32F:
-		return rewriteValueARM64_OpGreater32F(v, config)
-	case OpGreater32U:
-		return rewriteValueARM64_OpGreater32U(v, config)
-	case OpGreater64:
-		return rewriteValueARM64_OpGreater64(v, config)
-	case OpGreater64F:
-		return rewriteValueARM64_OpGreater64F(v, config)
-	case OpGreater64U:
-		return rewriteValueARM64_OpGreater64U(v, config)
-	case OpGreater8:
-		return rewriteValueARM64_OpGreater8(v, config)
-	case OpGreater8U:
-		return rewriteValueARM64_OpGreater8U(v, config)
-	case OpHmul16:
-		return rewriteValueARM64_OpHmul16(v, config)
-	case OpHmul16u:
-		return rewriteValueARM64_OpHmul16u(v, config)
-	case OpHmul32:
-		return rewriteValueARM64_OpHmul32(v, config)
-	case OpHmul32u:
-		return rewriteValueARM64_OpHmul32u(v, config)
-	case OpHmul64:
-		return rewriteValueARM64_OpHmul64(v, config)
-	case OpHmul64u:
-		return rewriteValueARM64_OpHmul64u(v, config)
-	case OpHmul8:
-		return rewriteValueARM64_OpHmul8(v, config)
-	case OpHmul8u:
-		return rewriteValueARM64_OpHmul8u(v, config)
-	case OpInterCall:
-		return rewriteValueARM64_OpInterCall(v, config)
-	case OpIsInBounds:
-		return rewriteValueARM64_OpIsInBounds(v, config)
-	case OpIsNonNil:
-		return rewriteValueARM64_OpIsNonNil(v, config)
-	case OpIsSliceInBounds:
-		return rewriteValueARM64_OpIsSliceInBounds(v, config)
-	case OpLeq16:
-		return rewriteValueARM64_OpLeq16(v, config)
-	case OpLeq16U:
-		return rewriteValueARM64_OpLeq16U(v, config)
-	case OpLeq32:
-		return rewriteValueARM64_OpLeq32(v, config)
-	case OpLeq32F:
-		return rewriteValueARM64_OpLeq32F(v, config)
-	case OpLeq32U:
-		return rewriteValueARM64_OpLeq32U(v, config)
-	case OpLeq64:
-		return rewriteValueARM64_OpLeq64(v, config)
-	case OpLeq64F:
-		return rewriteValueARM64_OpLeq64F(v, config)
-	case OpLeq64U:
-		return rewriteValueARM64_OpLeq64U(v, config)
-	case OpLeq8:
-		return rewriteValueARM64_OpLeq8(v, config)
-	case OpLeq8U:
-		return rewriteValueARM64_OpLeq8U(v, config)
-	case OpLess16:
-		return rewriteValueARM64_OpLess16(v, config)
-	case OpLess16U:
-		return rewriteValueARM64_OpLess16U(v, config)
-	case OpLess32:
-		return rewriteValueARM64_OpLess32(v, config)
-	case OpLess32F:
-		return rewriteValueARM64_OpLess32F(v, config)
-	case OpLess32U:
-		return rewriteValueARM64_OpLess32U(v, config)
-	case OpLess64:
-		return rewriteValueARM64_OpLess64(v, config)
-	case OpLess64F:
-		return rewriteValueARM64_OpLess64F(v, config)
-	case OpLess64U:
-		return rewriteValueARM64_OpLess64U(v, config)
-	case OpLess8:
-		return rewriteValueARM64_OpLess8(v, config)
-	case OpLess8U:
-		return rewriteValueARM64_OpLess8U(v, config)
-	case OpLoad:
-		return rewriteValueARM64_OpLoad(v, config)
-	case OpLrot16:
-		return rewriteValueARM64_OpLrot16(v, config)
-	case OpLrot32:
-		return rewriteValueARM64_OpLrot32(v, config)
-	case OpLrot64:
-		return rewriteValueARM64_OpLrot64(v, config)
-	case OpLrot8:
-		return rewriteValueARM64_OpLrot8(v, config)
-	case OpLsh16x16:
-		return rewriteValueARM64_OpLsh16x16(v, config)
-	case OpLsh16x32:
-		return rewriteValueARM64_OpLsh16x32(v, config)
-	case OpLsh16x64:
-		return rewriteValueARM64_OpLsh16x64(v, config)
-	case OpLsh16x8:
-		return rewriteValueARM64_OpLsh16x8(v, config)
-	case OpLsh32x16:
-		return rewriteValueARM64_OpLsh32x16(v, config)
-	case OpLsh32x32:
-		return rewriteValueARM64_OpLsh32x32(v, config)
-	case OpLsh32x64:
-		return rewriteValueARM64_OpLsh32x64(v, config)
-	case OpLsh32x8:
-		return rewriteValueARM64_OpLsh32x8(v, config)
-	case OpLsh64x16:
-		return rewriteValueARM64_OpLsh64x16(v, config)
-	case OpLsh64x32:
-		return rewriteValueARM64_OpLsh64x32(v, config)
-	case OpLsh64x64:
-		return rewriteValueARM64_OpLsh64x64(v, config)
-	case OpLsh64x8:
-		return rewriteValueARM64_OpLsh64x8(v, config)
-	case OpLsh8x16:
-		return rewriteValueARM64_OpLsh8x16(v, config)
-	case OpLsh8x32:
-		return rewriteValueARM64_OpLsh8x32(v, config)
-	case OpLsh8x64:
-		return rewriteValueARM64_OpLsh8x64(v, config)
-	case OpLsh8x8:
-		return rewriteValueARM64_OpLsh8x8(v, config)
-	case OpMod16:
-		return rewriteValueARM64_OpMod16(v, config)
-	case OpMod16u:
-		return rewriteValueARM64_OpMod16u(v, config)
-	case OpMod32:
-		return rewriteValueARM64_OpMod32(v, config)
-	case OpMod32u:
-		return rewriteValueARM64_OpMod32u(v, config)
-	case OpMod64:
-		return rewriteValueARM64_OpMod64(v, config)
-	case OpMod64u:
-		return rewriteValueARM64_OpMod64u(v, config)
-	case OpMod8:
-		return rewriteValueARM64_OpMod8(v, config)
-	case OpMod8u:
-		return rewriteValueARM64_OpMod8u(v, config)
-	case OpMove:
-		return rewriteValueARM64_OpMove(v, config)
-	case OpMul16:
-		return rewriteValueARM64_OpMul16(v, config)
-	case OpMul32:
-		return rewriteValueARM64_OpMul32(v, config)
-	case OpMul32F:
-		return rewriteValueARM64_OpMul32F(v, config)
-	case OpMul64:
-		return rewriteValueARM64_OpMul64(v, config)
-	case OpMul64F:
-		return rewriteValueARM64_OpMul64F(v, config)
-	case OpMul8:
-		return rewriteValueARM64_OpMul8(v, config)
-	case OpNeg16:
-		return rewriteValueARM64_OpNeg16(v, config)
-	case OpNeg32:
-		return rewriteValueARM64_OpNeg32(v, config)
-	case OpNeg32F:
-		return rewriteValueARM64_OpNeg32F(v, config)
-	case OpNeg64:
-		return rewriteValueARM64_OpNeg64(v, config)
-	case OpNeg64F:
-		return rewriteValueARM64_OpNeg64F(v, config)
-	case OpNeg8:
-		return rewriteValueARM64_OpNeg8(v, config)
-	case OpNeq16:
-		return rewriteValueARM64_OpNeq16(v, config)
-	case OpNeq32:
-		return rewriteValueARM64_OpNeq32(v, config)
-	case OpNeq32F:
-		return rewriteValueARM64_OpNeq32F(v, config)
-	case OpNeq64:
-		return rewriteValueARM64_OpNeq64(v, config)
-	case OpNeq64F:
-		return rewriteValueARM64_OpNeq64F(v, config)
-	case OpNeq8:
-		return rewriteValueARM64_OpNeq8(v, config)
-	case OpNeqB:
-		return rewriteValueARM64_OpNeqB(v, config)
-	case OpNeqPtr:
-		return rewriteValueARM64_OpNeqPtr(v, config)
-	case OpNilCheck:
-		return rewriteValueARM64_OpNilCheck(v, config)
-	case OpNot:
-		return rewriteValueARM64_OpNot(v, config)
-	case OpOffPtr:
-		return rewriteValueARM64_OpOffPtr(v, config)
-	case OpOr16:
-		return rewriteValueARM64_OpOr16(v, config)
-	case OpOr32:
-		return rewriteValueARM64_OpOr32(v, config)
-	case OpOr64:
-		return rewriteValueARM64_OpOr64(v, config)
-	case OpOr8:
-		return rewriteValueARM64_OpOr8(v, config)
-	case OpOrB:
-		return rewriteValueARM64_OpOrB(v, config)
-	case OpRsh16Ux16:
-		return rewriteValueARM64_OpRsh16Ux16(v, config)
-	case OpRsh16Ux32:
-		return rewriteValueARM64_OpRsh16Ux32(v, config)
-	case OpRsh16Ux64:
-		return rewriteValueARM64_OpRsh16Ux64(v, config)
-	case OpRsh16Ux8:
-		return rewriteValueARM64_OpRsh16Ux8(v, config)
-	case OpRsh16x16:
-		return rewriteValueARM64_OpRsh16x16(v, config)
-	case OpRsh16x32:
-		return rewriteValueARM64_OpRsh16x32(v, config)
-	case OpRsh16x64:
-		return rewriteValueARM64_OpRsh16x64(v, config)
-	case OpRsh16x8:
-		return rewriteValueARM64_OpRsh16x8(v, config)
-	case OpRsh32Ux16:
-		return rewriteValueARM64_OpRsh32Ux16(v, config)
-	case OpRsh32Ux32:
-		return rewriteValueARM64_OpRsh32Ux32(v, config)
-	case OpRsh32Ux64:
-		return rewriteValueARM64_OpRsh32Ux64(v, config)
-	case OpRsh32Ux8:
-		return rewriteValueARM64_OpRsh32Ux8(v, config)
-	case OpRsh32x16:
-		return rewriteValueARM64_OpRsh32x16(v, config)
-	case OpRsh32x32:
-		return rewriteValueARM64_OpRsh32x32(v, config)
-	case OpRsh32x64:
-		return rewriteValueARM64_OpRsh32x64(v, config)
-	case OpRsh32x8:
-		return rewriteValueARM64_OpRsh32x8(v, config)
-	case OpRsh64Ux16:
-		return rewriteValueARM64_OpRsh64Ux16(v, config)
-	case OpRsh64Ux32:
-		return rewriteValueARM64_OpRsh64Ux32(v, config)
-	case OpRsh64Ux64:
-		return rewriteValueARM64_OpRsh64Ux64(v, config)
-	case OpRsh64Ux8:
-		return rewriteValueARM64_OpRsh64Ux8(v, config)
-	case OpRsh64x16:
-		return rewriteValueARM64_OpRsh64x16(v, config)
-	case OpRsh64x32:
-		return rewriteValueARM64_OpRsh64x32(v, config)
-	case OpRsh64x64:
-		return rewriteValueARM64_OpRsh64x64(v, config)
-	case OpRsh64x8:
-		return rewriteValueARM64_OpRsh64x8(v, config)
-	case OpRsh8Ux16:
-		return rewriteValueARM64_OpRsh8Ux16(v, config)
-	case OpRsh8Ux32:
-		return rewriteValueARM64_OpRsh8Ux32(v, config)
-	case OpRsh8Ux64:
-		return rewriteValueARM64_OpRsh8Ux64(v, config)
-	case OpRsh8Ux8:
-		return rewriteValueARM64_OpRsh8Ux8(v, config)
-	case OpRsh8x16:
-		return rewriteValueARM64_OpRsh8x16(v, config)
-	case OpRsh8x32:
-		return rewriteValueARM64_OpRsh8x32(v, config)
-	case OpRsh8x64:
-		return rewriteValueARM64_OpRsh8x64(v, config)
-	case OpRsh8x8:
-		return rewriteValueARM64_OpRsh8x8(v, config)
-	case OpSignExt16to32:
-		return rewriteValueARM64_OpSignExt16to32(v, config)
-	case OpSignExt16to64:
-		return rewriteValueARM64_OpSignExt16to64(v, config)
-	case OpSignExt32to64:
-		return rewriteValueARM64_OpSignExt32to64(v, config)
-	case OpSignExt8to16:
-		return rewriteValueARM64_OpSignExt8to16(v, config)
-	case OpSignExt8to32:
-		return rewriteValueARM64_OpSignExt8to32(v, config)
-	case OpSignExt8to64:
-		return rewriteValueARM64_OpSignExt8to64(v, config)
-	case OpSlicemask:
-		return rewriteValueARM64_OpSlicemask(v, config)
-	case OpSqrt:
-		return rewriteValueARM64_OpSqrt(v, config)
-	case OpStaticCall:
-		return rewriteValueARM64_OpStaticCall(v, config)
-	case OpStore:
-		return rewriteValueARM64_OpStore(v, config)
-	case OpSub16:
-		return rewriteValueARM64_OpSub16(v, config)
-	case OpSub32:
-		return rewriteValueARM64_OpSub32(v, config)
-	case OpSub32F:
-		return rewriteValueARM64_OpSub32F(v, config)
-	case OpSub64:
-		return rewriteValueARM64_OpSub64(v, config)
-	case OpSub64F:
-		return rewriteValueARM64_OpSub64F(v, config)
-	case OpSub8:
-		return rewriteValueARM64_OpSub8(v, config)
-	case OpSubPtr:
-		return rewriteValueARM64_OpSubPtr(v, config)
-	case OpTrunc16to8:
-		return rewriteValueARM64_OpTrunc16to8(v, config)
-	case OpTrunc32to16:
-		return rewriteValueARM64_OpTrunc32to16(v, config)
-	case OpTrunc32to8:
-		return rewriteValueARM64_OpTrunc32to8(v, config)
-	case OpTrunc64to16:
-		return rewriteValueARM64_OpTrunc64to16(v, config)
-	case OpTrunc64to32:
-		return rewriteValueARM64_OpTrunc64to32(v, config)
-	case OpTrunc64to8:
-		return rewriteValueARM64_OpTrunc64to8(v, config)
-	case OpXor16:
-		return rewriteValueARM64_OpXor16(v, config)
-	case OpXor32:
-		return rewriteValueARM64_OpXor32(v, config)
-	case OpXor64:
-		return rewriteValueARM64_OpXor64(v, config)
-	case OpXor8:
-		return rewriteValueARM64_OpXor8(v, config)
-	case OpZero:
-		return rewriteValueARM64_OpZero(v, config)
-	case OpZeroExt16to32:
-		return rewriteValueARM64_OpZeroExt16to32(v, config)
-	case OpZeroExt16to64:
-		return rewriteValueARM64_OpZeroExt16to64(v, config)
-	case OpZeroExt32to64:
-		return rewriteValueARM64_OpZeroExt32to64(v, config)
-	case OpZeroExt8to16:
-		return rewriteValueARM64_OpZeroExt8to16(v, config)
-	case OpZeroExt8to32:
-		return rewriteValueARM64_OpZeroExt8to32(v, config)
-	case OpZeroExt8to64:
-		return rewriteValueARM64_OpZeroExt8to64(v, config)
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64ADD(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADD (MOVDconst [c]) x)
-	// cond:
-	// result: (ADDconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARM64ADDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADD x (MOVDconst [c]))
-	// cond:
-	// result: (ADDconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64ADDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADD x (NEG y))
-	// cond:
-	// result: (SUB x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64NEG {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpARM64SUB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADD (NEG y) x)
-	// cond:
-	// result: (SUB x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64NEG {
-			break
-		}
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARM64SUB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADD x (SLLconst [c] y))
-	// cond:
-	// result: (ADDshiftLL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64SLLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARM64ADDshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADD (SLLconst [c] y) x)
-	// cond:
-	// result: (ADDshiftLL x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64SLLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARM64ADDshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADD x (SRLconst [c] y))
-	// cond:
-	// result: (ADDshiftRL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64SRLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARM64ADDshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADD (SRLconst [c] y) x)
-	// cond:
-	// result: (ADDshiftRL x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64SRLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARM64ADDshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADD x (SRAconst [c] y))
-	// cond:
-	// result: (ADDshiftRA x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64SRAconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARM64ADDshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADD (SRAconst [c] y) x)
-	// cond:
-	// result: (ADDshiftRA x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64SRAconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARM64ADDshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64ADDconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDconst [off1] (MOVDaddr [off2] {sym} ptr))
-	// cond:
-	// result: (MOVDaddr [off1+off2] {sym} ptr)
-	for {
-		off1 := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym := v_0.Aux
-		ptr := v_0.Args[0]
-		v.reset(OpARM64MOVDaddr)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		return true
-	}
-	// match: (ADDconst [0]  x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDconst [c] (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [c+d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = c + d
-		return true
-	}
-	// match: (ADDconst [c] (ADDconst [d] x))
-	// cond:
-	// result: (ADDconst [c+d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64ADDconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpARM64ADDconst)
-		v.AuxInt = c + d
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDconst [c] (SUBconst [d] x))
-	// cond:
-	// result: (ADDconst [c-d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64SUBconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpARM64ADDconst)
-		v.AuxInt = c - d
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64ADDshiftLL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDshiftLL (MOVDconst [c]) x [d])
-	// cond:
-	// result: (ADDconst [c] (SLLconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARM64ADDconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARM64SLLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ADDshiftLL x (MOVDconst [c]) [d])
-	// cond:
-	// result: (ADDconst x [int64(uint64(c)<<uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64ADDconst)
-		v.AuxInt = int64(uint64(c) << uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64ADDshiftRA(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDshiftRA (MOVDconst [c]) x [d])
-	// cond:
-	// result: (ADDconst [c] (SRAconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARM64ADDconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARM64SRAconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ADDshiftRA x (MOVDconst [c]) [d])
-	// cond:
-	// result: (ADDconst x [int64(int64(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64ADDconst)
-		v.AuxInt = int64(int64(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64ADDshiftRL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDshiftRL (MOVDconst [c]) x [d])
-	// cond:
-	// result: (ADDconst [c] (SRLconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARM64ADDconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARM64SRLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ADDshiftRL x (MOVDconst [c]) [d])
-	// cond:
-	// result: (ADDconst x [int64(uint64(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64ADDconst)
-		v.AuxInt = int64(uint64(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64AND(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AND (MOVDconst [c]) x)
-	// cond:
-	// result: (ANDconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARM64ANDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (AND x (MOVDconst [c]))
-	// cond:
-	// result: (ANDconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64ANDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (AND x x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (AND x (MVN y))
-	// cond:
-	// result: (BIC x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MVN {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpARM64BIC)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (AND x (SLLconst [c] y))
-	// cond:
-	// result: (ANDshiftLL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64SLLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARM64ANDshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (AND (SLLconst [c] y) x)
-	// cond:
-	// result: (ANDshiftLL x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64SLLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARM64ANDshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (AND x (SRLconst [c] y))
-	// cond:
-	// result: (ANDshiftRL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64SRLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARM64ANDshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (AND (SRLconst [c] y) x)
-	// cond:
-	// result: (ANDshiftRL x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64SRLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARM64ANDshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (AND x (SRAconst [c] y))
-	// cond:
-	// result: (ANDshiftRA x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64SRAconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARM64ANDshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (AND (SRAconst [c] y) x)
-	// cond:
-	// result: (ANDshiftRA x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64SRAconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARM64ANDshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64ANDconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ANDconst [0]  _)
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (ANDconst [-1] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != -1 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDconst [c] (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [c&d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = c & d
-		return true
-	}
-	// match: (ANDconst [c] (ANDconst [d] x))
-	// cond:
-	// result: (ANDconst [c&d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64ANDconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpARM64ANDconst)
-		v.AuxInt = c & d
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64ANDshiftLL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ANDshiftLL (MOVDconst [c]) x [d])
-	// cond:
-	// result: (ANDconst [c] (SLLconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARM64ANDconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARM64SLLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ANDshiftLL x (MOVDconst [c]) [d])
-	// cond:
-	// result: (ANDconst x [int64(uint64(c)<<uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64ANDconst)
-		v.AuxInt = int64(uint64(c) << uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDshiftLL x y:(SLLconst x [c]) [d])
-	// cond: c==d
-	// result: y
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		y := v.Args[1]
-		if y.Op != OpARM64SLLconst {
-			break
-		}
-		c := y.AuxInt
-		if x != y.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64ANDshiftRA(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ANDshiftRA (MOVDconst [c]) x [d])
-	// cond:
-	// result: (ANDconst [c] (SRAconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARM64ANDconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARM64SRAconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ANDshiftRA x (MOVDconst [c]) [d])
-	// cond:
-	// result: (ANDconst x [int64(int64(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64ANDconst)
-		v.AuxInt = int64(int64(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDshiftRA x y:(SRAconst x [c]) [d])
-	// cond: c==d
-	// result: y
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		y := v.Args[1]
-		if y.Op != OpARM64SRAconst {
-			break
-		}
-		c := y.AuxInt
-		if x != y.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64ANDshiftRL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ANDshiftRL (MOVDconst [c]) x [d])
-	// cond:
-	// result: (ANDconst [c] (SRLconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARM64ANDconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARM64SRLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ANDshiftRL x (MOVDconst [c]) [d])
-	// cond:
-	// result: (ANDconst x [int64(uint64(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64ANDconst)
-		v.AuxInt = int64(uint64(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDshiftRL x y:(SRLconst x [c]) [d])
-	// cond: c==d
-	// result: y
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		y := v.Args[1]
-		if y.Op != OpARM64SRLconst {
-			break
-		}
-		c := y.AuxInt
-		if x != y.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64BIC(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (BIC x (MOVDconst [c]))
-	// cond:
-	// result: (BICconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64BICconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (BIC x x)
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (BIC x (SLLconst [c] y))
-	// cond:
-	// result: (BICshiftLL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64SLLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARM64BICshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (BIC x (SRLconst [c] y))
-	// cond:
-	// result: (BICshiftRL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64SRLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARM64BICshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (BIC x (SRAconst [c] y))
-	// cond:
-	// result: (BICshiftRA x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64SRAconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARM64BICshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64BICconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (BICconst [0]  x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (BICconst [-1] _)
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		if v.AuxInt != -1 {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (BICconst [c] (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [d&^c])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = d &^ c
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64BICshiftLL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (BICshiftLL x (MOVDconst [c]) [d])
-	// cond:
-	// result: (BICconst x [int64(uint64(c)<<uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64BICconst)
-		v.AuxInt = int64(uint64(c) << uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (BICshiftLL x (SLLconst x [c]) [d])
-	// cond: c==d
-	// result: (MOVDconst [0])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64SLLconst {
-			break
-		}
-		c := v_1.AuxInt
-		if x != v_1.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64BICshiftRA(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (BICshiftRA x (MOVDconst [c]) [d])
-	// cond:
-	// result: (BICconst x [int64(int64(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64BICconst)
-		v.AuxInt = int64(int64(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (BICshiftRA x (SRAconst x [c]) [d])
-	// cond: c==d
-	// result: (MOVDconst [0])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64SRAconst {
-			break
-		}
-		c := v_1.AuxInt
-		if x != v_1.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64BICshiftRL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (BICshiftRL x (MOVDconst [c]) [d])
-	// cond:
-	// result: (BICconst x [int64(uint64(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64BICconst)
-		v.AuxInt = int64(uint64(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (BICshiftRL x (SRLconst x [c]) [d])
-	// cond: c==d
-	// result: (MOVDconst [0])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64SRLconst {
-			break
-		}
-		c := v_1.AuxInt
-		if x != v_1.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64CMP(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMP x (MOVDconst [c]))
-	// cond:
-	// result: (CMPconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64CMPconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (CMP (MOVDconst [c]) x)
-	// cond:
-	// result: (InvertFlags (CMPconst [c] x))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARM64InvertFlags)
-		v0 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v0.AuxInt = c
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (CMP x (SLLconst [c] y))
-	// cond:
-	// result: (CMPshiftLL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64SLLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARM64CMPshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (CMP (SLLconst [c] y) x)
-	// cond:
-	// result: (InvertFlags (CMPshiftLL x y [c]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64SLLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARM64InvertFlags)
-		v0 := b.NewValue0(v.Line, OpARM64CMPshiftLL, TypeFlags)
-		v0.AuxInt = c
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (CMP x (SRLconst [c] y))
-	// cond:
-	// result: (CMPshiftRL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64SRLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARM64CMPshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (CMP (SRLconst [c] y) x)
-	// cond:
-	// result: (InvertFlags (CMPshiftRL x y [c]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64SRLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARM64InvertFlags)
-		v0 := b.NewValue0(v.Line, OpARM64CMPshiftRL, TypeFlags)
-		v0.AuxInt = c
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (CMP x (SRAconst [c] y))
-	// cond:
-	// result: (CMPshiftRA x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64SRAconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARM64CMPshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (CMP (SRAconst [c] y) x)
-	// cond:
-	// result: (InvertFlags (CMPshiftRA x y [c]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64SRAconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARM64InvertFlags)
-		v0 := b.NewValue0(v.Line, OpARM64CMPshiftRA, TypeFlags)
-		v0.AuxInt = c
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64CMPW(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPW x (MOVDconst [c]))
-	// cond:
-	// result: (CMPWconst [int64(int32(c))] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64CMPWconst)
-		v.AuxInt = int64(int32(c))
-		v.AddArg(x)
-		return true
-	}
-	// match: (CMPW (MOVDconst [c]) x)
-	// cond:
-	// result: (InvertFlags (CMPWconst [int64(int32(c))] x))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARM64InvertFlags)
-		v0 := b.NewValue0(v.Line, OpARM64CMPWconst, TypeFlags)
-		v0.AuxInt = int64(int32(c))
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64CMPWconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPWconst (MOVDconst [x]) [y])
-	// cond: int32(x)==int32(y)
-	// result: (FlagEQ)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int32(x) == int32(y)) {
-			break
-		}
-		v.reset(OpARM64FlagEQ)
-		return true
-	}
-	// match: (CMPWconst (MOVDconst [x]) [y])
-	// cond: int32(x)<int32(y) && uint32(x)<uint32(y)
-	// result: (FlagLT_ULT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
-			break
-		}
-		v.reset(OpARM64FlagLT_ULT)
-		return true
-	}
-	// match: (CMPWconst (MOVDconst [x]) [y])
-	// cond: int32(x)<int32(y) && uint32(x)>uint32(y)
-	// result: (FlagLT_UGT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
-			break
-		}
-		v.reset(OpARM64FlagLT_UGT)
-		return true
-	}
-	// match: (CMPWconst (MOVDconst [x]) [y])
-	// cond: int32(x)>int32(y) && uint32(x)<uint32(y)
-	// result: (FlagGT_ULT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
-			break
-		}
-		v.reset(OpARM64FlagGT_ULT)
-		return true
-	}
-	// match: (CMPWconst (MOVDconst [x]) [y])
-	// cond: int32(x)>int32(y) && uint32(x)>uint32(y)
-	// result: (FlagGT_UGT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
-			break
-		}
-		v.reset(OpARM64FlagGT_UGT)
-		return true
-	}
-	// match: (CMPWconst (MOVBUreg _) [c])
-	// cond: 0xff < int32(c)
-	// result: (FlagLT_ULT)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVBUreg {
-			break
-		}
-		if !(0xff < int32(c)) {
-			break
-		}
-		v.reset(OpARM64FlagLT_ULT)
-		return true
-	}
-	// match: (CMPWconst (MOVHUreg _) [c])
-	// cond: 0xffff < int32(c)
-	// result: (FlagLT_ULT)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVHUreg {
-			break
-		}
-		if !(0xffff < int32(c)) {
-			break
-		}
-		v.reset(OpARM64FlagLT_ULT)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64CMPconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPconst  (MOVDconst [x]) [y])
-	// cond: x==y
-	// result: (FlagEQ)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(x == y) {
-			break
-		}
-		v.reset(OpARM64FlagEQ)
-		return true
-	}
-	// match: (CMPconst  (MOVDconst [x]) [y])
-	// cond: int64(x)<int64(y) && uint64(x)<uint64(y)
-	// result: (FlagLT_ULT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int64(x) < int64(y) && uint64(x) < uint64(y)) {
-			break
-		}
-		v.reset(OpARM64FlagLT_ULT)
-		return true
-	}
-	// match: (CMPconst  (MOVDconst [x]) [y])
-	// cond: int64(x)<int64(y) && uint64(x)>uint64(y)
-	// result: (FlagLT_UGT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int64(x) < int64(y) && uint64(x) > uint64(y)) {
-			break
-		}
-		v.reset(OpARM64FlagLT_UGT)
-		return true
-	}
-	// match: (CMPconst  (MOVDconst [x]) [y])
-	// cond: int64(x)>int64(y) && uint64(x)<uint64(y)
-	// result: (FlagGT_ULT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int64(x) > int64(y) && uint64(x) < uint64(y)) {
-			break
-		}
-		v.reset(OpARM64FlagGT_ULT)
-		return true
-	}
-	// match: (CMPconst  (MOVDconst [x]) [y])
-	// cond: int64(x)>int64(y) && uint64(x)>uint64(y)
-	// result: (FlagGT_UGT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int64(x) > int64(y) && uint64(x) > uint64(y)) {
-			break
-		}
-		v.reset(OpARM64FlagGT_UGT)
-		return true
-	}
-	// match: (CMPconst (MOVBUreg _) [c])
-	// cond: 0xff < c
-	// result: (FlagLT_ULT)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVBUreg {
-			break
-		}
-		if !(0xff < c) {
-			break
-		}
-		v.reset(OpARM64FlagLT_ULT)
-		return true
-	}
-	// match: (CMPconst (MOVHUreg _) [c])
-	// cond: 0xffff < c
-	// result: (FlagLT_ULT)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVHUreg {
-			break
-		}
-		if !(0xffff < c) {
-			break
-		}
-		v.reset(OpARM64FlagLT_ULT)
-		return true
-	}
-	// match: (CMPconst (MOVWUreg _) [c])
-	// cond: 0xffffffff < c
-	// result: (FlagLT_ULT)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVWUreg {
-			break
-		}
-		if !(0xffffffff < c) {
-			break
-		}
-		v.reset(OpARM64FlagLT_ULT)
-		return true
-	}
-	// match: (CMPconst (ANDconst _ [m]) [n])
-	// cond: 0 <= m && m < n
-	// result: (FlagLT_ULT)
-	for {
-		n := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64ANDconst {
-			break
-		}
-		m := v_0.AuxInt
-		if !(0 <= m && m < n) {
-			break
-		}
-		v.reset(OpARM64FlagLT_ULT)
-		return true
-	}
-	// match: (CMPconst (SRLconst _ [c]) [n])
-	// cond: 0 <= n && 0 < c && c <= 63 && (1<<uint64(64-c)) <= uint64(n)
-	// result: (FlagLT_ULT)
-	for {
-		n := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64SRLconst {
-			break
-		}
-		c := v_0.AuxInt
-		if !(0 <= n && 0 < c && c <= 63 && (1<<uint64(64-c)) <= uint64(n)) {
-			break
-		}
-		v.reset(OpARM64FlagLT_ULT)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64CMPshiftLL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPshiftLL (MOVDconst [c]) x [d])
-	// cond:
-	// result: (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARM64InvertFlags)
-		v0 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v0.AuxInt = c
-		v1 := b.NewValue0(v.Line, OpARM64SLLconst, x.Type)
-		v1.AuxInt = d
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (CMPshiftLL x (MOVDconst [c]) [d])
-	// cond:
-	// result: (CMPconst x [int64(uint64(c)<<uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64CMPconst)
-		v.AuxInt = int64(uint64(c) << uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64CMPshiftRA(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPshiftRA (MOVDconst [c]) x [d])
-	// cond:
-	// result: (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARM64InvertFlags)
-		v0 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v0.AuxInt = c
-		v1 := b.NewValue0(v.Line, OpARM64SRAconst, x.Type)
-		v1.AuxInt = d
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (CMPshiftRA x (MOVDconst [c]) [d])
-	// cond:
-	// result: (CMPconst x [int64(int64(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64CMPconst)
-		v.AuxInt = int64(int64(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64CMPshiftRL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPshiftRL (MOVDconst [c]) x [d])
-	// cond:
-	// result: (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARM64InvertFlags)
-		v0 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v0.AuxInt = c
-		v1 := b.NewValue0(v.Line, OpARM64SRLconst, x.Type)
-		v1.AuxInt = d
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (CMPshiftRL x (MOVDconst [c]) [d])
-	// cond:
-	// result: (CMPconst x [int64(uint64(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64CMPconst)
-		v.AuxInt = int64(uint64(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64CSELULT(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CSELULT x (MOVDconst [0]) flag)
-	// cond:
-	// result: (CSELULT0 x flag)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		flag := v.Args[2]
-		v.reset(OpARM64CSELULT0)
-		v.AddArg(x)
-		v.AddArg(flag)
-		return true
-	}
-	// match: (CSELULT _ y (FlagEQ))
-	// cond:
-	// result: y
-	for {
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARM64FlagEQ {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (CSELULT x _ (FlagLT_ULT))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARM64FlagLT_ULT {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (CSELULT _ y (FlagLT_UGT))
-	// cond:
-	// result: y
-	for {
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARM64FlagLT_UGT {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (CSELULT x _ (FlagGT_ULT))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARM64FlagGT_ULT {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (CSELULT _ y (FlagGT_UGT))
-	// cond:
-	// result: y
-	for {
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpARM64FlagGT_UGT {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64CSELULT0(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CSELULT0 _ (FlagEQ))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64FlagEQ {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (CSELULT0 x (FlagLT_ULT))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64FlagLT_ULT {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (CSELULT0 _ (FlagLT_UGT))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64FlagLT_UGT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (CSELULT0 x (FlagGT_ULT))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64FlagGT_ULT {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (CSELULT0 _ (FlagGT_UGT))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64FlagGT_UGT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64DIV(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (DIV   (MOVDconst [c]) (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [int64(c)/int64(d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = int64(c) / int64(d)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64DIVW(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (DIVW  (MOVDconst [c]) (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [int64(int32(c)/int32(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = int64(int32(c) / int32(d))
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64Equal(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Equal (FlagEQ))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagEQ {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (Equal (FlagLT_ULT))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagLT_ULT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Equal (FlagLT_UGT))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagLT_UGT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Equal (FlagGT_ULT))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagGT_ULT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Equal (FlagGT_UGT))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagGT_UGT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Equal (InvertFlags x))
-	// cond:
-	// result: (Equal x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpARM64Equal)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64FMOVDload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: (off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)
-	// result: (FMOVDload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym)) {
-			break
-		}
-		v.reset(OpARM64FMOVDload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
-	// result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
-			break
-		}
-		v.reset(OpARM64FMOVDload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64FMOVDstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	// cond: (off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)
-	// result: (FMOVDstore [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym)) {
-			break
-		}
-		v.reset(OpARM64FMOVDstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
-	// result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
-			break
-		}
-		v.reset(OpARM64FMOVDstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64FMOVSload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: (off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)
-	// result: (FMOVSload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym)) {
-			break
-		}
-		v.reset(OpARM64FMOVSload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
-	// result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
-			break
-		}
-		v.reset(OpARM64FMOVSload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64FMOVSstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	// cond: (off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)
-	// result: (FMOVSstore [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym)) {
-			break
-		}
-		v.reset(OpARM64FMOVSstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
-	// result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
-			break
-		}
-		v.reset(OpARM64FMOVSstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64GreaterEqual(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (GreaterEqual (FlagEQ))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagEQ {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (GreaterEqual (FlagLT_ULT))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagLT_ULT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (GreaterEqual (FlagLT_UGT))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagLT_UGT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (GreaterEqual (FlagGT_ULT))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagGT_ULT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (GreaterEqual (FlagGT_UGT))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagGT_UGT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (GreaterEqual (InvertFlags x))
-	// cond:
-	// result: (LessEqual x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpARM64LessEqual)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64GreaterEqualU(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (GreaterEqualU (FlagEQ))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagEQ {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (GreaterEqualU (FlagLT_ULT))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagLT_ULT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (GreaterEqualU (FlagLT_UGT))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagLT_UGT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (GreaterEqualU (FlagGT_ULT))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagGT_ULT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (GreaterEqualU (FlagGT_UGT))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagGT_UGT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (GreaterEqualU (InvertFlags x))
-	// cond:
-	// result: (LessEqualU x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpARM64LessEqualU)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64GreaterThan(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (GreaterThan (FlagEQ))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagEQ {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (GreaterThan (FlagLT_ULT))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagLT_ULT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (GreaterThan (FlagLT_UGT))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagLT_UGT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (GreaterThan (FlagGT_ULT))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagGT_ULT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (GreaterThan (FlagGT_UGT))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagGT_UGT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (GreaterThan (InvertFlags x))
-	// cond:
-	// result: (LessThan x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpARM64LessThan)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64GreaterThanU(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (GreaterThanU (FlagEQ))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagEQ {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (GreaterThanU (FlagLT_ULT))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagLT_ULT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (GreaterThanU (FlagLT_UGT))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagLT_UGT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (GreaterThanU (FlagGT_ULT))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagGT_ULT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (GreaterThanU (FlagGT_UGT))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagGT_UGT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (GreaterThanU (InvertFlags x))
-	// cond:
-	// result: (LessThanU x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpARM64LessThanU)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64LessEqual(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (LessEqual (FlagEQ))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagEQ {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (LessEqual (FlagLT_ULT))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagLT_ULT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (LessEqual (FlagLT_UGT))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagLT_UGT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (LessEqual (FlagGT_ULT))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagGT_ULT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (LessEqual (FlagGT_UGT))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagGT_UGT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (LessEqual (InvertFlags x))
-	// cond:
-	// result: (GreaterEqual x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpARM64GreaterEqual)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64LessEqualU(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (LessEqualU (FlagEQ))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagEQ {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (LessEqualU (FlagLT_ULT))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagLT_ULT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (LessEqualU (FlagLT_UGT))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagLT_UGT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (LessEqualU (FlagGT_ULT))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagGT_ULT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (LessEqualU (FlagGT_UGT))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagGT_UGT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (LessEqualU (InvertFlags x))
-	// cond:
-	// result: (GreaterEqualU x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpARM64GreaterEqualU)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64LessThan(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (LessThan (FlagEQ))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagEQ {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (LessThan (FlagLT_ULT))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagLT_ULT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (LessThan (FlagLT_UGT))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagLT_UGT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (LessThan (FlagGT_ULT))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagGT_ULT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (LessThan (FlagGT_UGT))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagGT_UGT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (LessThan (InvertFlags x))
-	// cond:
-	// result: (GreaterThan x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpARM64GreaterThan)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64LessThanU(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (LessThanU (FlagEQ))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagEQ {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (LessThanU (FlagLT_ULT))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagLT_ULT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (LessThanU (FlagLT_UGT))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagLT_UGT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (LessThanU (FlagGT_ULT))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagGT_ULT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (LessThanU (FlagGT_UGT))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagGT_UGT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (LessThanU (InvertFlags x))
-	// cond:
-	// result: (GreaterThanU x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpARM64GreaterThanU)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64MOD(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOD   (MOVDconst [c]) (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [int64(c)%int64(d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = int64(c) % int64(d)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64MODW(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MODW  (MOVDconst [c]) (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [int64(int32(c)%int32(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = int64(int32(c) % int32(d))
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64MOVBUload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond:
-	// result: (MOVBUload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		v.reset(OpARM64MOVBUload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpARM64MOVBUload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBUload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-	// result: (MOVDconst [0])
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVBstorezero {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64MOVBUreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBUreg x:(MOVBUload _ _))
-	// cond:
-	// result: (MOVDreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARM64MOVBUload {
-			break
-		}
-		v.reset(OpARM64MOVDreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBUreg x:(MOVBUreg _))
-	// cond:
-	// result: (MOVDreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARM64MOVBUreg {
-			break
-		}
-		v.reset(OpARM64MOVDreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBUreg (MOVDconst [c]))
-	// cond:
-	// result: (MOVDconst [int64(uint8(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = int64(uint8(c))
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64MOVBload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond:
-	// result: (MOVBload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		v.reset(OpARM64MOVBload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpARM64MOVBload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-	// result: (MOVDconst [0])
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVBstorezero {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64MOVBreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBreg x:(MOVBload _ _))
-	// cond:
-	// result: (MOVDreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARM64MOVBload {
-			break
-		}
-		v.reset(OpARM64MOVDreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBreg x:(MOVBreg _))
-	// cond:
-	// result: (MOVDreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARM64MOVBreg {
-			break
-		}
-		v.reset(OpARM64MOVDreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBreg  (MOVDconst [c]))
-	// cond:
-	// result: (MOVDconst [int64(int8(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = int64(int8(c))
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64MOVBstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	// cond:
-	// result: (MOVBstore [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpARM64MOVBstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpARM64MOVBstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem)
-	// cond:
-	// result: (MOVBstorezero [off] {sym} ptr mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		mem := v.Args[2]
-		v.reset(OpARM64MOVBstorezero)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
-	// cond:
-	// result: (MOVBstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVBreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpARM64MOVBstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
-	// cond:
-	// result: (MOVBstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVBUreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpARM64MOVBstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
-	// cond:
-	// result: (MOVBstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVHreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpARM64MOVBstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
-	// cond:
-	// result: (MOVBstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVHUreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpARM64MOVBstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem)
-	// cond:
-	// result: (MOVBstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVWreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpARM64MOVBstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem)
-	// cond:
-	// result: (MOVBstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVWUreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpARM64MOVBstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64MOVBstorezero(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond:
-	// result: (MOVBstorezero [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		v.reset(OpARM64MOVBstorezero)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpARM64MOVBstorezero)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64MOVDload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: (off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)
-	// result: (MOVDload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym)) {
-			break
-		}
-		v.reset(OpARM64MOVDload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
-	// result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
-			break
-		}
-		v.reset(OpARM64MOVDload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVDload [off] {sym} ptr (MOVDstorezero [off2] {sym2} ptr2 _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-	// result: (MOVDconst [0])
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDstorezero {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64MOVDreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVDreg x)
-	// cond: x.Uses == 1
-	// result: (MOVDnop x)
-	for {
-		x := v.Args[0]
-		if !(x.Uses == 1) {
-			break
-		}
-		v.reset(OpARM64MOVDnop)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVDreg  (MOVDconst [c]))
-	// cond:
-	// result: (MOVDconst [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = c
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64MOVDstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	// cond: (off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)
-	// result: (MOVDstore [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym)) {
-			break
-		}
-		v.reset(OpARM64MOVDstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
-	// result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
-			break
-		}
-		v.reset(OpARM64MOVDstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem)
-	// cond:
-	// result: (MOVDstorezero [off] {sym} ptr mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		mem := v.Args[2]
-		v.reset(OpARM64MOVDstorezero)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64MOVDstorezero(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: (off1+off2)%2==8 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)
-	// result: (MOVDstorezero [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !((off1+off2)%2 == 8 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym)) {
-			break
-		}
-		v.reset(OpARM64MOVDstorezero)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
-	// result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
-			break
-		}
-		v.reset(OpARM64MOVDstorezero)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64MOVHUload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: (off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)
-	// result: (MOVHUload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym)) {
-			break
-		}
-		v.reset(OpARM64MOVHUload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
-	// result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
-			break
-		}
-		v.reset(OpARM64MOVHUload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHUload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-	// result: (MOVDconst [0])
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVHstorezero {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64MOVHUreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHUreg x:(MOVBUload _ _))
-	// cond:
-	// result: (MOVDreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARM64MOVBUload {
-			break
-		}
-		v.reset(OpARM64MOVDreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHUreg x:(MOVHUload _ _))
-	// cond:
-	// result: (MOVDreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARM64MOVHUload {
-			break
-		}
-		v.reset(OpARM64MOVDreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHUreg x:(MOVBUreg _))
-	// cond:
-	// result: (MOVDreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARM64MOVBUreg {
-			break
-		}
-		v.reset(OpARM64MOVDreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHUreg x:(MOVHUreg _))
-	// cond:
-	// result: (MOVDreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARM64MOVHUreg {
-			break
-		}
-		v.reset(OpARM64MOVDreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHUreg (MOVDconst [c]))
-	// cond:
-	// result: (MOVDconst [int64(uint16(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = int64(uint16(c))
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64MOVHload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: (off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)
-	// result: (MOVHload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym)) {
-			break
-		}
-		v.reset(OpARM64MOVHload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
-	// result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
-			break
-		}
-		v.reset(OpARM64MOVHload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-	// result: (MOVDconst [0])
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVHstorezero {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64MOVHreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHreg x:(MOVBload _ _))
-	// cond:
-	// result: (MOVDreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARM64MOVBload {
-			break
-		}
-		v.reset(OpARM64MOVDreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg x:(MOVBUload _ _))
-	// cond:
-	// result: (MOVDreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARM64MOVBUload {
-			break
-		}
-		v.reset(OpARM64MOVDreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg x:(MOVHload _ _))
-	// cond:
-	// result: (MOVDreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARM64MOVHload {
-			break
-		}
-		v.reset(OpARM64MOVDreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg x:(MOVBreg _))
-	// cond:
-	// result: (MOVDreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARM64MOVBreg {
-			break
-		}
-		v.reset(OpARM64MOVDreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg x:(MOVBUreg _))
-	// cond:
-	// result: (MOVDreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARM64MOVBUreg {
-			break
-		}
-		v.reset(OpARM64MOVDreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg x:(MOVHreg _))
-	// cond:
-	// result: (MOVDreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARM64MOVHreg {
-			break
-		}
-		v.reset(OpARM64MOVDreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg  (MOVDconst [c]))
-	// cond:
-	// result: (MOVDconst [int64(int16(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = int64(int16(c))
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64MOVHstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	// cond: (off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)
-	// result: (MOVHstore [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym)) {
-			break
-		}
-		v.reset(OpARM64MOVHstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
-	// result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
-			break
-		}
-		v.reset(OpARM64MOVHstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem)
-	// cond:
-	// result: (MOVHstorezero [off] {sym} ptr mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		mem := v.Args[2]
-		v.reset(OpARM64MOVHstorezero)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
-	// cond:
-	// result: (MOVHstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVHreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpARM64MOVHstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
-	// cond:
-	// result: (MOVHstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVHUreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpARM64MOVHstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem)
-	// cond:
-	// result: (MOVHstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVWreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpARM64MOVHstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem)
-	// cond:
-	// result: (MOVHstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVWUreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpARM64MOVHstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64MOVHstorezero(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: (off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)
-	// result: (MOVHstorezero [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym)) {
-			break
-		}
-		v.reset(OpARM64MOVHstorezero)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
-	// result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
-			break
-		}
-		v.reset(OpARM64MOVHstorezero)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64MOVWUload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: (off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)
-	// result: (MOVWUload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym)) {
-			break
-		}
-		v.reset(OpARM64MOVWUload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
-	// result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
-			break
-		}
-		v.reset(OpARM64MOVWUload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWUload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-	// result: (MOVDconst [0])
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVWstorezero {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64MOVWUreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWUreg x:(MOVBUload _ _))
-	// cond:
-	// result: (MOVDreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARM64MOVBUload {
-			break
-		}
-		v.reset(OpARM64MOVDreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWUreg x:(MOVHUload _ _))
-	// cond:
-	// result: (MOVDreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARM64MOVHUload {
-			break
-		}
-		v.reset(OpARM64MOVDreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWUreg x:(MOVWUload _ _))
-	// cond:
-	// result: (MOVDreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARM64MOVWUload {
-			break
-		}
-		v.reset(OpARM64MOVDreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWUreg x:(MOVBUreg _))
-	// cond:
-	// result: (MOVDreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARM64MOVBUreg {
-			break
-		}
-		v.reset(OpARM64MOVDreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWUreg x:(MOVHUreg _))
-	// cond:
-	// result: (MOVDreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARM64MOVHUreg {
-			break
-		}
-		v.reset(OpARM64MOVDreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWUreg x:(MOVWUreg _))
-	// cond:
-	// result: (MOVDreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARM64MOVWUreg {
-			break
-		}
-		v.reset(OpARM64MOVDreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWUreg (MOVDconst [c]))
-	// cond:
-	// result: (MOVDconst [int64(uint32(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = int64(uint32(c))
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64MOVWload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: (off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)
-	// result: (MOVWload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym)) {
-			break
-		}
-		v.reset(OpARM64MOVWload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
-	// result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
-			break
-		}
-		v.reset(OpARM64MOVWload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-	// result: (MOVDconst [0])
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVWstorezero {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64MOVWreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWreg x:(MOVBload _ _))
-	// cond:
-	// result: (MOVDreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARM64MOVBload {
-			break
-		}
-		v.reset(OpARM64MOVDreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg x:(MOVBUload _ _))
-	// cond:
-	// result: (MOVDreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARM64MOVBUload {
-			break
-		}
-		v.reset(OpARM64MOVDreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg x:(MOVHload _ _))
-	// cond:
-	// result: (MOVDreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARM64MOVHload {
-			break
-		}
-		v.reset(OpARM64MOVDreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg x:(MOVHUload _ _))
-	// cond:
-	// result: (MOVDreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARM64MOVHUload {
-			break
-		}
-		v.reset(OpARM64MOVDreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg x:(MOVWload _ _))
-	// cond:
-	// result: (MOVDreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARM64MOVWload {
-			break
-		}
-		v.reset(OpARM64MOVDreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg x:(MOVBreg _))
-	// cond:
-	// result: (MOVDreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARM64MOVBreg {
-			break
-		}
-		v.reset(OpARM64MOVDreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg x:(MOVBUreg _))
-	// cond:
-	// result: (MOVDreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARM64MOVBUreg {
-			break
-		}
-		v.reset(OpARM64MOVDreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg x:(MOVHreg _))
-	// cond:
-	// result: (MOVDreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARM64MOVHreg {
-			break
-		}
-		v.reset(OpARM64MOVDreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg x:(MOVHreg _))
-	// cond:
-	// result: (MOVDreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARM64MOVHreg {
-			break
-		}
-		v.reset(OpARM64MOVDreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg x:(MOVWreg _))
-	// cond:
-	// result: (MOVDreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpARM64MOVWreg {
-			break
-		}
-		v.reset(OpARM64MOVDreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg  (MOVDconst [c]))
-	// cond:
-	// result: (MOVDconst [int64(int32(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = int64(int32(c))
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64MOVWstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	// cond: (off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)
-	// result: (MOVWstore [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym)) {
-			break
-		}
-		v.reset(OpARM64MOVWstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
-	// result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
-			break
-		}
-		v.reset(OpARM64MOVWstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem)
-	// cond:
-	// result: (MOVWstorezero [off] {sym} ptr mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		mem := v.Args[2]
-		v.reset(OpARM64MOVWstorezero)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
-	// cond:
-	// result: (MOVWstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVWreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpARM64MOVWstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem)
-	// cond:
-	// result: (MOVWstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVWUreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpARM64MOVWstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64MOVWstorezero(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: (off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)
-	// result: (MOVWstorezero [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym)) {
-			break
-		}
-		v.reset(OpARM64MOVWstorezero)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
-	// result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
-			break
-		}
-		v.reset(OpARM64MOVWstorezero)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64MUL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MUL x (MOVDconst [-1]))
-	// cond:
-	// result: (NEG x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		if v_1.AuxInt != -1 {
-			break
-		}
-		v.reset(OpARM64NEG)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MUL _ (MOVDconst [0]))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (MUL x (MOVDconst [1]))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		if v_1.AuxInt != 1 {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MUL x (MOVDconst [c]))
-	// cond: isPowerOfTwo(c)
-	// result: (SLLconst [log2(c)] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(isPowerOfTwo(c)) {
-			break
-		}
-		v.reset(OpARM64SLLconst)
-		v.AuxInt = log2(c)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MUL x (MOVDconst [c]))
-	// cond: isPowerOfTwo(c-1) && c >= 3
-	// result: (ADDshiftLL x x [log2(c-1)])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(isPowerOfTwo(c-1) && c >= 3) {
-			break
-		}
-		v.reset(OpARM64ADDshiftLL)
-		v.AuxInt = log2(c - 1)
-		v.AddArg(x)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MUL x (MOVDconst [c]))
-	// cond: isPowerOfTwo(c+1) && c >= 7
-	// result: (ADDshiftLL (NEG <x.Type> x) x [log2(c+1)])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(isPowerOfTwo(c+1) && c >= 7) {
-			break
-		}
-		v.reset(OpARM64ADDshiftLL)
-		v.AuxInt = log2(c + 1)
-		v0 := b.NewValue0(v.Line, OpARM64NEG, x.Type)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MUL x (MOVDconst [c]))
-	// cond: c%3 == 0 && isPowerOfTwo(c/3)
-	// result: (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(c%3 == 0 && isPowerOfTwo(c/3)) {
-			break
-		}
-		v.reset(OpARM64SLLconst)
-		v.AuxInt = log2(c / 3)
-		v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
-		v0.AuxInt = 1
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MUL x (MOVDconst [c]))
-	// cond: c%5 == 0 && isPowerOfTwo(c/5)
-	// result: (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(c%5 == 0 && isPowerOfTwo(c/5)) {
-			break
-		}
-		v.reset(OpARM64SLLconst)
-		v.AuxInt = log2(c / 5)
-		v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
-		v0.AuxInt = 2
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MUL x (MOVDconst [c]))
-	// cond: c%7 == 0 && isPowerOfTwo(c/7)
-	// result: (SLLconst [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(c%7 == 0 && isPowerOfTwo(c/7)) {
-			break
-		}
-		v.reset(OpARM64SLLconst)
-		v.AuxInt = log2(c / 7)
-		v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
-		v0.AuxInt = 3
-		v1 := b.NewValue0(v.Line, OpARM64NEG, x.Type)
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MUL x (MOVDconst [c]))
-	// cond: c%9 == 0 && isPowerOfTwo(c/9)
-	// result: (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(c%9 == 0 && isPowerOfTwo(c/9)) {
-			break
-		}
-		v.reset(OpARM64SLLconst)
-		v.AuxInt = log2(c / 9)
-		v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
-		v0.AuxInt = 3
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MUL (MOVDconst [-1]) x)
-	// cond:
-	// result: (NEG x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		if v_0.AuxInt != -1 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpARM64NEG)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MUL (MOVDconst [0]) _)
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (MUL (MOVDconst [1]) x)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		if v_0.AuxInt != 1 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MUL (MOVDconst [c]) x)
-	// cond: isPowerOfTwo(c)
-	// result: (SLLconst [log2(c)] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(isPowerOfTwo(c)) {
-			break
-		}
-		v.reset(OpARM64SLLconst)
-		v.AuxInt = log2(c)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MUL (MOVDconst [c]) x)
-	// cond: isPowerOfTwo(c)
-	// result: (SLLconst [log2(c)] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(isPowerOfTwo(c)) {
-			break
-		}
-		v.reset(OpARM64SLLconst)
-		v.AuxInt = log2(c)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MUL (MOVDconst [c]) x)
-	// cond: isPowerOfTwo(c-1) && c >= 3
-	// result: (ADDshiftLL x x [log2(c-1)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(isPowerOfTwo(c-1) && c >= 3) {
-			break
-		}
-		v.reset(OpARM64ADDshiftLL)
-		v.AuxInt = log2(c - 1)
-		v.AddArg(x)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MUL (MOVDconst [c]) x)
-	// cond: isPowerOfTwo(c+1) && c >= 7
-	// result: (ADDshiftLL (NEG <x.Type> x) x [log2(c+1)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(isPowerOfTwo(c+1) && c >= 7) {
-			break
-		}
-		v.reset(OpARM64ADDshiftLL)
-		v.AuxInt = log2(c + 1)
-		v0 := b.NewValue0(v.Line, OpARM64NEG, x.Type)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MUL (MOVDconst [c]) x)
-	// cond: c%3 == 0 && isPowerOfTwo(c/3)
-	// result: (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(c%3 == 0 && isPowerOfTwo(c/3)) {
-			break
-		}
-		v.reset(OpARM64SLLconst)
-		v.AuxInt = log2(c / 3)
-		v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
-		v0.AuxInt = 1
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MUL (MOVDconst [c]) x)
-	// cond: c%5 == 0 && isPowerOfTwo(c/5)
-	// result: (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(c%5 == 0 && isPowerOfTwo(c/5)) {
-			break
-		}
-		v.reset(OpARM64SLLconst)
-		v.AuxInt = log2(c / 5)
-		v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
-		v0.AuxInt = 2
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MUL (MOVDconst [c]) x)
-	// cond: c%7 == 0 && isPowerOfTwo(c/7)
-	// result: (SLLconst [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(c%7 == 0 && isPowerOfTwo(c/7)) {
-			break
-		}
-		v.reset(OpARM64SLLconst)
-		v.AuxInt = log2(c / 7)
-		v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
-		v0.AuxInt = 3
-		v1 := b.NewValue0(v.Line, OpARM64NEG, x.Type)
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MUL (MOVDconst [c]) x)
-	// cond: c%9 == 0 && isPowerOfTwo(c/9)
-	// result: (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(c%9 == 0 && isPowerOfTwo(c/9)) {
-			break
-		}
-		v.reset(OpARM64SLLconst)
-		v.AuxInt = log2(c / 9)
-		v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
-		v0.AuxInt = 3
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MUL   (MOVDconst [c]) (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [c*d])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = c * d
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64MULW(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MULW x (MOVDconst [c]))
-	// cond: int32(c)==-1
-	// result: (NEG x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(int32(c) == -1) {
-			break
-		}
-		v.reset(OpARM64NEG)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULW _ (MOVDconst [c]))
-	// cond: int32(c)==0
-	// result: (MOVDconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(int32(c) == 0) {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (MULW x (MOVDconst [c]))
-	// cond: int32(c)==1
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(int32(c) == 1) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULW x (MOVDconst [c]))
-	// cond: isPowerOfTwo(c)
-	// result: (SLLconst [log2(c)] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(isPowerOfTwo(c)) {
-			break
-		}
-		v.reset(OpARM64SLLconst)
-		v.AuxInt = log2(c)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULW x (MOVDconst [c]))
-	// cond: isPowerOfTwo(c-1) && int32(c) >= 3
-	// result: (ADDshiftLL x x [log2(c-1)])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
-			break
-		}
-		v.reset(OpARM64ADDshiftLL)
-		v.AuxInt = log2(c - 1)
-		v.AddArg(x)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULW x (MOVDconst [c]))
-	// cond: isPowerOfTwo(c+1) && int32(c) >= 7
-	// result: (ADDshiftLL (NEG <x.Type> x) x [log2(c+1)])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
-			break
-		}
-		v.reset(OpARM64ADDshiftLL)
-		v.AuxInt = log2(c + 1)
-		v0 := b.NewValue0(v.Line, OpARM64NEG, x.Type)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULW x (MOVDconst [c]))
-	// cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
-	// result: (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
-			break
-		}
-		v.reset(OpARM64SLLconst)
-		v.AuxInt = log2(c / 3)
-		v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
-		v0.AuxInt = 1
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MULW x (MOVDconst [c]))
-	// cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
-	// result: (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
-			break
-		}
-		v.reset(OpARM64SLLconst)
-		v.AuxInt = log2(c / 5)
-		v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
-		v0.AuxInt = 2
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MULW x (MOVDconst [c]))
-	// cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
-	// result: (SLLconst [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
-			break
-		}
-		v.reset(OpARM64SLLconst)
-		v.AuxInt = log2(c / 7)
-		v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
-		v0.AuxInt = 3
-		v1 := b.NewValue0(v.Line, OpARM64NEG, x.Type)
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MULW x (MOVDconst [c]))
-	// cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
-	// result: (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
-			break
-		}
-		v.reset(OpARM64SLLconst)
-		v.AuxInt = log2(c / 9)
-		v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
-		v0.AuxInt = 3
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MULW (MOVDconst [c]) x)
-	// cond: int32(c)==-1
-	// result: (NEG x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(int32(c) == -1) {
-			break
-		}
-		v.reset(OpARM64NEG)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULW (MOVDconst [c]) _)
-	// cond: int32(c)==0
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		if !(int32(c) == 0) {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (MULW (MOVDconst [c]) x)
-	// cond: int32(c)==1
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(int32(c) == 1) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULW (MOVDconst [c]) x)
-	// cond: isPowerOfTwo(c)
-	// result: (SLLconst [log2(c)] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(isPowerOfTwo(c)) {
-			break
-		}
-		v.reset(OpARM64SLLconst)
-		v.AuxInt = log2(c)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULW (MOVDconst [c]) x)
-	// cond: isPowerOfTwo(c-1) && int32(c) >= 3
-	// result: (ADDshiftLL x x [log2(c-1)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
-			break
-		}
-		v.reset(OpARM64ADDshiftLL)
-		v.AuxInt = log2(c - 1)
-		v.AddArg(x)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULW (MOVDconst [c]) x)
-	// cond: isPowerOfTwo(c+1) && int32(c) >= 7
-	// result: (ADDshiftLL (NEG <x.Type> x) x [log2(c+1)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
-			break
-		}
-		v.reset(OpARM64ADDshiftLL)
-		v.AuxInt = log2(c + 1)
-		v0 := b.NewValue0(v.Line, OpARM64NEG, x.Type)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULW (MOVDconst [c]) x)
-	// cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
-	// result: (SLLconst [log2(c/3)] (ADDshiftLL <x.Type> x x [1]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
-			break
-		}
-		v.reset(OpARM64SLLconst)
-		v.AuxInt = log2(c / 3)
-		v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
-		v0.AuxInt = 1
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MULW (MOVDconst [c]) x)
-	// cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
-	// result: (SLLconst [log2(c/5)] (ADDshiftLL <x.Type> x x [2]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
-			break
-		}
-		v.reset(OpARM64SLLconst)
-		v.AuxInt = log2(c / 5)
-		v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
-		v0.AuxInt = 2
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MULW (MOVDconst [c]) x)
-	// cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
-	// result: (SLLconst [log2(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
-			break
-		}
-		v.reset(OpARM64SLLconst)
-		v.AuxInt = log2(c / 7)
-		v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
-		v0.AuxInt = 3
-		v1 := b.NewValue0(v.Line, OpARM64NEG, x.Type)
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MULW (MOVDconst [c]) x)
-	// cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
-	// result: (SLLconst [log2(c/9)] (ADDshiftLL <x.Type> x x [3]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
-			break
-		}
-		v.reset(OpARM64SLLconst)
-		v.AuxInt = log2(c / 9)
-		v0 := b.NewValue0(v.Line, OpARM64ADDshiftLL, x.Type)
-		v0.AuxInt = 3
-		v0.AddArg(x)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (MULW  (MOVDconst [c]) (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [int64(int32(c)*int32(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = int64(int32(c) * int32(d))
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64MVN(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MVN (MOVDconst [c]))
-	// cond:
-	// result: (MOVDconst [^c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = ^c
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64NEG(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NEG (MOVDconst [c]))
-	// cond:
-	// result: (MOVDconst [-c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = -c
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64NotEqual(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NotEqual (FlagEQ))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagEQ {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (NotEqual (FlagLT_ULT))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagLT_ULT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (NotEqual (FlagLT_UGT))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagLT_UGT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (NotEqual (FlagGT_ULT))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagGT_ULT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (NotEqual (FlagGT_UGT))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64FlagGT_UGT {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (NotEqual (InvertFlags x))
-	// cond:
-	// result: (NotEqual x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpARM64NotEqual)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64OR(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (OR  (MOVDconst [c]) x)
-	// cond:
-	// result: (ORconst  [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARM64ORconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (OR  x (MOVDconst [c]))
-	// cond:
-	// result: (ORconst  [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64ORconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (OR  x x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (OR  x s:(SLLconst [c] y))
-	// cond: s.Uses == 1 && clobber(s)
-	// result: (ORshiftLL  x y [c])
-	for {
-		x := v.Args[0]
-		s := v.Args[1]
-		if s.Op != OpARM64SLLconst {
-			break
-		}
-		c := s.AuxInt
-		y := s.Args[0]
-		if !(s.Uses == 1 && clobber(s)) {
-			break
-		}
-		v.reset(OpARM64ORshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (OR  s:(SLLconst [c] y) x)
-	// cond: s.Uses == 1 && clobber(s)
-	// result: (ORshiftLL  x y [c])
-	for {
-		s := v.Args[0]
-		if s.Op != OpARM64SLLconst {
-			break
-		}
-		c := s.AuxInt
-		y := s.Args[0]
-		x := v.Args[1]
-		if !(s.Uses == 1 && clobber(s)) {
-			break
-		}
-		v.reset(OpARM64ORshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (OR  x (SLLconst [c] y))
-	// cond:
-	// result: (ORshiftLL  x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64SLLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARM64ORshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (OR  (SLLconst [c] y) x)
-	// cond:
-	// result: (ORshiftLL  x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64SLLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARM64ORshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (OR  x (SRLconst [c] y))
-	// cond:
-	// result: (ORshiftRL  x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64SRLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARM64ORshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (OR  (SRLconst [c] y) x)
-	// cond:
-	// result: (ORshiftRL  x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64SRLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARM64ORshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (OR  x (SRAconst [c] y))
-	// cond:
-	// result: (ORshiftRA  x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64SRAconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARM64ORshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (OR  (SRAconst [c] y) x)
-	// cond:
-	// result: (ORshiftRA  x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64SRAconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARM64ORshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] 	y0:(MOVDnop x0:(MOVBUload [i]   {s} p mem))) 	y1:(MOVDnop x1:(MOVBUload [i-1] {s} p mem))) 	y2:(MOVDnop x2:(MOVBUload [i-2] {s} p mem))) 	y3:(MOVDnop x3:(MOVBUload [i-3] {s} p mem)))
-	// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 	&& y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 	&& o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 	&& mergePoint(b,x0,x1,x2,x3) != nil 	&& clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) 	&& clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) 	&& clobber(o0) && clobber(o1) && clobber(s0)
-	// result: @mergePoint(b,x0,x1,x2,x3) (MOVWUload <t> {s} (OffPtr <p.Type> [i-3] p) mem)
-	for {
-		t := v.Type
-		o0 := v.Args[0]
-		if o0.Op != OpARM64ORshiftLL {
-			break
-		}
-		if o0.AuxInt != 8 {
-			break
-		}
-		o1 := o0.Args[0]
-		if o1.Op != OpARM64ORshiftLL {
-			break
-		}
-		if o1.AuxInt != 16 {
-			break
-		}
-		s0 := o1.Args[0]
-		if s0.Op != OpARM64SLLconst {
-			break
-		}
-		if s0.AuxInt != 24 {
-			break
-		}
-		y0 := s0.Args[0]
-		if y0.Op != OpARM64MOVDnop {
-			break
-		}
-		x0 := y0.Args[0]
-		if x0.Op != OpARM64MOVBUload {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		mem := x0.Args[1]
-		y1 := o1.Args[1]
-		if y1.Op != OpARM64MOVDnop {
-			break
-		}
-		x1 := y1.Args[0]
-		if x1.Op != OpARM64MOVBUload {
-			break
-		}
-		if x1.AuxInt != i-1 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if mem != x1.Args[1] {
-			break
-		}
-		y2 := o0.Args[1]
-		if y2.Op != OpARM64MOVDnop {
-			break
-		}
-		x2 := y2.Args[0]
-		if x2.Op != OpARM64MOVBUload {
-			break
-		}
-		if x2.AuxInt != i-2 {
-			break
-		}
-		if x2.Aux != s {
-			break
-		}
-		if p != x2.Args[0] {
-			break
-		}
-		if mem != x2.Args[1] {
-			break
-		}
-		y3 := v.Args[1]
-		if y3.Op != OpARM64MOVDnop {
-			break
-		}
-		x3 := y3.Args[0]
-		if x3.Op != OpARM64MOVBUload {
-			break
-		}
-		if x3.AuxInt != i-3 {
-			break
-		}
-		if x3.Aux != s {
-			break
-		}
-		if p != x3.Args[0] {
-			break
-		}
-		if mem != x3.Args[1] {
-			break
-		}
-		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) {
-			break
-		}
-		b = mergePoint(b, x0, x1, x2, x3)
-		v0 := b.NewValue0(v.Line, OpARM64MOVWUload, t)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.Aux = s
-		v1 := b.NewValue0(v.Line, OpOffPtr, p.Type)
-		v1.AuxInt = i - 3
-		v1.AddArg(p)
-		v0.AddArg(v1)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] 	y0:(MOVDnop x0:(MOVBUload [i]   {s} p mem))) 	y1:(MOVDnop x1:(MOVBUload [i-1] {s} p mem))) 	y2:(MOVDnop x2:(MOVBUload [i-2] {s} p mem))) 	y3:(MOVDnop x3:(MOVBUload [i-3] {s} p mem))) 	y4:(MOVDnop x4:(MOVBUload [i-4] {s} p mem))) 	y5:(MOVDnop x5:(MOVBUload [i-5] {s} p mem))) 	y6:(MOVDnop x6:(MOVBUload [i-6] {s} p mem))) 	y7:(MOVDnop x7:(MOVBUload [i-7] {s} p mem)))
-	// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 	&& x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 	&& y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 	&& y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 	&& o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 	&& o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 	&& mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil 	&& clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) 	&& clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) 	&& clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) 	&& clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) 	&& clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) 	&& clobber(o4) && clobber(o5) && clobber(s0)
-	// result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [i-7] p) mem))
-	for {
-		t := v.Type
-		o0 := v.Args[0]
-		if o0.Op != OpARM64ORshiftLL {
-			break
-		}
-		if o0.AuxInt != 8 {
-			break
-		}
-		o1 := o0.Args[0]
-		if o1.Op != OpARM64ORshiftLL {
-			break
-		}
-		if o1.AuxInt != 16 {
-			break
-		}
-		o2 := o1.Args[0]
-		if o2.Op != OpARM64ORshiftLL {
-			break
-		}
-		if o2.AuxInt != 24 {
-			break
-		}
-		o3 := o2.Args[0]
-		if o3.Op != OpARM64ORshiftLL {
-			break
-		}
-		if o3.AuxInt != 32 {
-			break
-		}
-		o4 := o3.Args[0]
-		if o4.Op != OpARM64ORshiftLL {
-			break
-		}
-		if o4.AuxInt != 40 {
-			break
-		}
-		o5 := o4.Args[0]
-		if o5.Op != OpARM64ORshiftLL {
-			break
-		}
-		if o5.AuxInt != 48 {
-			break
-		}
-		s0 := o5.Args[0]
-		if s0.Op != OpARM64SLLconst {
-			break
-		}
-		if s0.AuxInt != 56 {
-			break
-		}
-		y0 := s0.Args[0]
-		if y0.Op != OpARM64MOVDnop {
-			break
-		}
-		x0 := y0.Args[0]
-		if x0.Op != OpARM64MOVBUload {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		mem := x0.Args[1]
-		y1 := o5.Args[1]
-		if y1.Op != OpARM64MOVDnop {
-			break
-		}
-		x1 := y1.Args[0]
-		if x1.Op != OpARM64MOVBUload {
-			break
-		}
-		if x1.AuxInt != i-1 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if mem != x1.Args[1] {
-			break
-		}
-		y2 := o4.Args[1]
-		if y2.Op != OpARM64MOVDnop {
-			break
-		}
-		x2 := y2.Args[0]
-		if x2.Op != OpARM64MOVBUload {
-			break
-		}
-		if x2.AuxInt != i-2 {
-			break
-		}
-		if x2.Aux != s {
-			break
-		}
-		if p != x2.Args[0] {
-			break
-		}
-		if mem != x2.Args[1] {
-			break
-		}
-		y3 := o3.Args[1]
-		if y3.Op != OpARM64MOVDnop {
-			break
-		}
-		x3 := y3.Args[0]
-		if x3.Op != OpARM64MOVBUload {
-			break
-		}
-		if x3.AuxInt != i-3 {
-			break
-		}
-		if x3.Aux != s {
-			break
-		}
-		if p != x3.Args[0] {
-			break
-		}
-		if mem != x3.Args[1] {
-			break
-		}
-		y4 := o2.Args[1]
-		if y4.Op != OpARM64MOVDnop {
-			break
-		}
-		x4 := y4.Args[0]
-		if x4.Op != OpARM64MOVBUload {
-			break
-		}
-		if x4.AuxInt != i-4 {
-			break
-		}
-		if x4.Aux != s {
-			break
-		}
-		if p != x4.Args[0] {
-			break
-		}
-		if mem != x4.Args[1] {
-			break
-		}
-		y5 := o1.Args[1]
-		if y5.Op != OpARM64MOVDnop {
-			break
-		}
-		x5 := y5.Args[0]
-		if x5.Op != OpARM64MOVBUload {
-			break
-		}
-		if x5.AuxInt != i-5 {
-			break
-		}
-		if x5.Aux != s {
-			break
-		}
-		if p != x5.Args[0] {
-			break
-		}
-		if mem != x5.Args[1] {
-			break
-		}
-		y6 := o0.Args[1]
-		if y6.Op != OpARM64MOVDnop {
-			break
-		}
-		x6 := y6.Args[0]
-		if x6.Op != OpARM64MOVBUload {
-			break
-		}
-		if x6.AuxInt != i-6 {
-			break
-		}
-		if x6.Aux != s {
-			break
-		}
-		if p != x6.Args[0] {
-			break
-		}
-		if mem != x6.Args[1] {
-			break
-		}
-		y7 := v.Args[1]
-		if y7.Op != OpARM64MOVDnop {
-			break
-		}
-		x7 := y7.Args[0]
-		if x7.Op != OpARM64MOVBUload {
-			break
-		}
-		if x7.AuxInt != i-7 {
-			break
-		}
-		if x7.Aux != s {
-			break
-		}
-		if p != x7.Args[0] {
-			break
-		}
-		if mem != x7.Args[1] {
-			break
-		}
-		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) {
-			break
-		}
-		b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
-		v0 := b.NewValue0(v.Line, OpARM64REV, t)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64MOVDload, t)
-		v1.Aux = s
-		v2 := b.NewValue0(v.Line, OpOffPtr, p.Type)
-		v2.AuxInt = i - 7
-		v2.AddArg(p)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v0.AddArg(v1)
-		return true
-	}
-	// match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] 	y0:(MOVDnop x0:(MOVBUload [i]   {s} p mem))) 	y1:(MOVDnop x1:(MOVBUload [i+1] {s} p mem))) 	y2:(MOVDnop x2:(MOVBUload [i+2] {s} p mem))) 	y3:(MOVDnop x3:(MOVBUload [i+3] {s} p mem)))
-	// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 	&& y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 	&& o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 	&& mergePoint(b,x0,x1,x2,x3) != nil 	&& clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) 	&& clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) 	&& clobber(o0) && clobber(o1) && clobber(s0)
-	// result: @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [i] p) mem))
-	for {
-		t := v.Type
-		o0 := v.Args[0]
-		if o0.Op != OpARM64ORshiftLL {
-			break
-		}
-		if o0.AuxInt != 8 {
-			break
-		}
-		o1 := o0.Args[0]
-		if o1.Op != OpARM64ORshiftLL {
-			break
-		}
-		if o1.AuxInt != 16 {
-			break
-		}
-		s0 := o1.Args[0]
-		if s0.Op != OpARM64SLLconst {
-			break
-		}
-		if s0.AuxInt != 24 {
-			break
-		}
-		y0 := s0.Args[0]
-		if y0.Op != OpARM64MOVDnop {
-			break
-		}
-		x0 := y0.Args[0]
-		if x0.Op != OpARM64MOVBUload {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		mem := x0.Args[1]
-		y1 := o1.Args[1]
-		if y1.Op != OpARM64MOVDnop {
-			break
-		}
-		x1 := y1.Args[0]
-		if x1.Op != OpARM64MOVBUload {
-			break
-		}
-		if x1.AuxInt != i+1 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if mem != x1.Args[1] {
-			break
-		}
-		y2 := o0.Args[1]
-		if y2.Op != OpARM64MOVDnop {
-			break
-		}
-		x2 := y2.Args[0]
-		if x2.Op != OpARM64MOVBUload {
-			break
-		}
-		if x2.AuxInt != i+2 {
-			break
-		}
-		if x2.Aux != s {
-			break
-		}
-		if p != x2.Args[0] {
-			break
-		}
-		if mem != x2.Args[1] {
-			break
-		}
-		y3 := v.Args[1]
-		if y3.Op != OpARM64MOVDnop {
-			break
-		}
-		x3 := y3.Args[0]
-		if x3.Op != OpARM64MOVBUload {
-			break
-		}
-		if x3.AuxInt != i+3 {
-			break
-		}
-		if x3.Aux != s {
-			break
-		}
-		if p != x3.Args[0] {
-			break
-		}
-		if mem != x3.Args[1] {
-			break
-		}
-		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) {
-			break
-		}
-		b = mergePoint(b, x0, x1, x2, x3)
-		v0 := b.NewValue0(v.Line, OpARM64REVW, t)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64MOVWUload, t)
-		v1.Aux = s
-		v2 := b.NewValue0(v.Line, OpOffPtr, p.Type)
-		v2.AuxInt = i
-		v2.AddArg(p)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v0.AddArg(v1)
-		return true
-	}
-	// match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] 	y0:(MOVDnop x0:(MOVBUload [i]   {s} p mem))) 	y1:(MOVDnop x1:(MOVBUload [i+1] {s} p mem))) 	y2:(MOVDnop x2:(MOVBUload [i+2] {s} p mem))) 	y3:(MOVDnop x3:(MOVBUload [i+3] {s} p mem))) 	y4:(MOVDnop x4:(MOVBUload [i+4] {s} p mem))) 	y5:(MOVDnop x5:(MOVBUload [i+5] {s} p mem))) 	y6:(MOVDnop x6:(MOVBUload [i+6] {s} p mem))) 	y7:(MOVDnop x7:(MOVBUload [i+7] {s} p mem)))
-	// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 	&& x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 	&& y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 	&& y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 	&& o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 	&& o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 	&& mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil 	&& clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) 	&& clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) 	&& clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) 	&& clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) 	&& clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) 	&& clobber(o4) && clobber(o5) && clobber(s0)
-	// result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [i] p) mem))
-	for {
-		t := v.Type
-		o0 := v.Args[0]
-		if o0.Op != OpARM64ORshiftLL {
-			break
-		}
-		if o0.AuxInt != 8 {
-			break
-		}
-		o1 := o0.Args[0]
-		if o1.Op != OpARM64ORshiftLL {
-			break
-		}
-		if o1.AuxInt != 16 {
-			break
-		}
-		o2 := o1.Args[0]
-		if o2.Op != OpARM64ORshiftLL {
-			break
-		}
-		if o2.AuxInt != 24 {
-			break
-		}
-		o3 := o2.Args[0]
-		if o3.Op != OpARM64ORshiftLL {
-			break
-		}
-		if o3.AuxInt != 32 {
-			break
-		}
-		o4 := o3.Args[0]
-		if o4.Op != OpARM64ORshiftLL {
-			break
-		}
-		if o4.AuxInt != 40 {
-			break
-		}
-		o5 := o4.Args[0]
-		if o5.Op != OpARM64ORshiftLL {
-			break
-		}
-		if o5.AuxInt != 48 {
-			break
-		}
-		s0 := o5.Args[0]
-		if s0.Op != OpARM64SLLconst {
-			break
-		}
-		if s0.AuxInt != 56 {
-			break
-		}
-		y0 := s0.Args[0]
-		if y0.Op != OpARM64MOVDnop {
-			break
-		}
-		x0 := y0.Args[0]
-		if x0.Op != OpARM64MOVBUload {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		mem := x0.Args[1]
-		y1 := o5.Args[1]
-		if y1.Op != OpARM64MOVDnop {
-			break
-		}
-		x1 := y1.Args[0]
-		if x1.Op != OpARM64MOVBUload {
-			break
-		}
-		if x1.AuxInt != i+1 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if mem != x1.Args[1] {
-			break
-		}
-		y2 := o4.Args[1]
-		if y2.Op != OpARM64MOVDnop {
-			break
-		}
-		x2 := y2.Args[0]
-		if x2.Op != OpARM64MOVBUload {
-			break
-		}
-		if x2.AuxInt != i+2 {
-			break
-		}
-		if x2.Aux != s {
-			break
-		}
-		if p != x2.Args[0] {
-			break
-		}
-		if mem != x2.Args[1] {
-			break
-		}
-		y3 := o3.Args[1]
-		if y3.Op != OpARM64MOVDnop {
-			break
-		}
-		x3 := y3.Args[0]
-		if x3.Op != OpARM64MOVBUload {
-			break
-		}
-		if x3.AuxInt != i+3 {
-			break
-		}
-		if x3.Aux != s {
-			break
-		}
-		if p != x3.Args[0] {
-			break
-		}
-		if mem != x3.Args[1] {
-			break
-		}
-		y4 := o2.Args[1]
-		if y4.Op != OpARM64MOVDnop {
-			break
-		}
-		x4 := y4.Args[0]
-		if x4.Op != OpARM64MOVBUload {
-			break
-		}
-		if x4.AuxInt != i+4 {
-			break
-		}
-		if x4.Aux != s {
-			break
-		}
-		if p != x4.Args[0] {
-			break
-		}
-		if mem != x4.Args[1] {
-			break
-		}
-		y5 := o1.Args[1]
-		if y5.Op != OpARM64MOVDnop {
-			break
-		}
-		x5 := y5.Args[0]
-		if x5.Op != OpARM64MOVBUload {
-			break
-		}
-		if x5.AuxInt != i+5 {
-			break
-		}
-		if x5.Aux != s {
-			break
-		}
-		if p != x5.Args[0] {
-			break
-		}
-		if mem != x5.Args[1] {
-			break
-		}
-		y6 := o0.Args[1]
-		if y6.Op != OpARM64MOVDnop {
-			break
-		}
-		x6 := y6.Args[0]
-		if x6.Op != OpARM64MOVBUload {
-			break
-		}
-		if x6.AuxInt != i+6 {
-			break
-		}
-		if x6.Aux != s {
-			break
-		}
-		if p != x6.Args[0] {
-			break
-		}
-		if mem != x6.Args[1] {
-			break
-		}
-		y7 := v.Args[1]
-		if y7.Op != OpARM64MOVDnop {
-			break
-		}
-		x7 := y7.Args[0]
-		if x7.Op != OpARM64MOVBUload {
-			break
-		}
-		if x7.AuxInt != i+7 {
-			break
-		}
-		if x7.Aux != s {
-			break
-		}
-		if p != x7.Args[0] {
-			break
-		}
-		if mem != x7.Args[1] {
-			break
-		}
-		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) {
-			break
-		}
-		b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
-		v0 := b.NewValue0(v.Line, OpARM64REV, t)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64MOVDload, t)
-		v1.Aux = s
-		v2 := b.NewValue0(v.Line, OpOffPtr, p.Type)
-		v2.AuxInt = i
-		v2.AddArg(p)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v0.AddArg(v1)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64ORconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ORconst  [0]  x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ORconst  [-1] _)
-	// cond:
-	// result: (MOVDconst [-1])
-	for {
-		if v.AuxInt != -1 {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = -1
-		return true
-	}
-	// match: (ORconst  [c] (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [c|d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = c | d
-		return true
-	}
-	// match: (ORconst  [c] (ORconst [d] x))
-	// cond:
-	// result: (ORconst [c|d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64ORconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpARM64ORconst)
-		v.AuxInt = c | d
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64ORshiftLL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ORshiftLL  (MOVDconst [c]) x [d])
-	// cond:
-	// result: (ORconst  [c] (SLLconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARM64ORconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARM64SLLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ORshiftLL  x (MOVDconst [c]) [d])
-	// cond:
-	// result: (ORconst  x [int64(uint64(c)<<uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64ORconst)
-		v.AuxInt = int64(uint64(c) << uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (ORshiftLL  x y:(SLLconst x [c]) [d])
-	// cond: c==d
-	// result: y
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		y := v.Args[1]
-		if y.Op != OpARM64SLLconst {
-			break
-		}
-		c := y.AuxInt
-		if x != y.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (ORshiftLL <t> [8] 	y0:(MOVDnop x0:(MOVBUload [i]   {s} p mem)) 	y1:(MOVDnop x1:(MOVBUload [i+1] {s} p mem)))
-	// cond: x0.Uses == 1 && x1.Uses == 1 	&& y0.Uses == 1 && y1.Uses == 1 	&& mergePoint(b,x0,x1) != nil 	&& clobber(x0) && clobber(x1) 	&& clobber(y0) && clobber(y1)
-	// result: @mergePoint(b,x0,x1) (MOVHUload <t> {s} (OffPtr <p.Type> [i] p) mem)
-	for {
-		t := v.Type
-		if v.AuxInt != 8 {
-			break
-		}
-		y0 := v.Args[0]
-		if y0.Op != OpARM64MOVDnop {
-			break
-		}
-		x0 := y0.Args[0]
-		if x0.Op != OpARM64MOVBUload {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		mem := x0.Args[1]
-		y1 := v.Args[1]
-		if y1.Op != OpARM64MOVDnop {
-			break
-		}
-		x1 := y1.Args[0]
-		if x1.Op != OpARM64MOVBUload {
-			break
-		}
-		if x1.AuxInt != i+1 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if mem != x1.Args[1] {
-			break
-		}
-		if !(x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)) {
-			break
-		}
-		b = mergePoint(b, x0, x1)
-		v0 := b.NewValue0(v.Line, OpARM64MOVHUload, t)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.Aux = s
-		v1 := b.NewValue0(v.Line, OpOffPtr, p.Type)
-		v1.AuxInt = i
-		v1.AddArg(p)
-		v0.AddArg(v1)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (ORshiftLL <t> [24] o0:(ORshiftLL [16] 	            x0:(MOVHUload [i]   {s} p mem) 	y1:(MOVDnop x1:(MOVBUload [i+2] {s} p mem))) 	y2:(MOVDnop x2:(MOVBUload [i+3] {s} p mem)))
-	// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 	&& y1.Uses == 1 && y2.Uses == 1 	&& o0.Uses == 1 	&& mergePoint(b,x0,x1,x2) != nil 	&& clobber(x0) && clobber(x1) && clobber(x2) 	&& clobber(y1) && clobber(y2) 	&& clobber(o0)
-	// result: @mergePoint(b,x0,x1,x2) (MOVWUload <t> {s} (OffPtr <p.Type> [i] p) mem)
-	for {
-		t := v.Type
-		if v.AuxInt != 24 {
-			break
-		}
-		o0 := v.Args[0]
-		if o0.Op != OpARM64ORshiftLL {
-			break
-		}
-		if o0.AuxInt != 16 {
-			break
-		}
-		x0 := o0.Args[0]
-		if x0.Op != OpARM64MOVHUload {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		mem := x0.Args[1]
-		y1 := o0.Args[1]
-		if y1.Op != OpARM64MOVDnop {
-			break
-		}
-		x1 := y1.Args[0]
-		if x1.Op != OpARM64MOVBUload {
-			break
-		}
-		if x1.AuxInt != i+2 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if mem != x1.Args[1] {
-			break
-		}
-		y2 := v.Args[1]
-		if y2.Op != OpARM64MOVDnop {
-			break
-		}
-		x2 := y2.Args[0]
-		if x2.Op != OpARM64MOVBUload {
-			break
-		}
-		if x2.AuxInt != i+3 {
-			break
-		}
-		if x2.Aux != s {
-			break
-		}
-		if p != x2.Args[0] {
-			break
-		}
-		if mem != x2.Args[1] {
-			break
-		}
-		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y1) && clobber(y2) && clobber(o0)) {
-			break
-		}
-		b = mergePoint(b, x0, x1, x2)
-		v0 := b.NewValue0(v.Line, OpARM64MOVWUload, t)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.Aux = s
-		v1 := b.NewValue0(v.Line, OpOffPtr, p.Type)
-		v1.AuxInt = i
-		v1.AddArg(p)
-		v0.AddArg(v1)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] 	            x0:(MOVWUload [i]   {s} p mem) 	y1:(MOVDnop x1:(MOVBUload [i+4] {s} p mem))) 	y2:(MOVDnop x2:(MOVBUload [i+5] {s} p mem))) 	y3:(MOVDnop x3:(MOVBUload [i+6] {s} p mem))) 	y4:(MOVDnop x4:(MOVBUload [i+7] {s} p mem)))
-	// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 	&& y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 	&& o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 	&& mergePoint(b,x0,x1,x2,x3,x4) != nil 	&& clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) 	&& clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) 	&& clobber(o0) && clobber(o1) && clobber(o2)
-	// result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDload <t> {s} (OffPtr <p.Type> [i] p) mem)
-	for {
-		t := v.Type
-		if v.AuxInt != 56 {
-			break
-		}
-		o0 := v.Args[0]
-		if o0.Op != OpARM64ORshiftLL {
-			break
-		}
-		if o0.AuxInt != 48 {
-			break
-		}
-		o1 := o0.Args[0]
-		if o1.Op != OpARM64ORshiftLL {
-			break
-		}
-		if o1.AuxInt != 40 {
-			break
-		}
-		o2 := o1.Args[0]
-		if o2.Op != OpARM64ORshiftLL {
-			break
-		}
-		if o2.AuxInt != 32 {
-			break
-		}
-		x0 := o2.Args[0]
-		if x0.Op != OpARM64MOVWUload {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		mem := x0.Args[1]
-		y1 := o2.Args[1]
-		if y1.Op != OpARM64MOVDnop {
-			break
-		}
-		x1 := y1.Args[0]
-		if x1.Op != OpARM64MOVBUload {
-			break
-		}
-		if x1.AuxInt != i+4 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if mem != x1.Args[1] {
-			break
-		}
-		y2 := o1.Args[1]
-		if y2.Op != OpARM64MOVDnop {
-			break
-		}
-		x2 := y2.Args[0]
-		if x2.Op != OpARM64MOVBUload {
-			break
-		}
-		if x2.AuxInt != i+5 {
-			break
-		}
-		if x2.Aux != s {
-			break
-		}
-		if p != x2.Args[0] {
-			break
-		}
-		if mem != x2.Args[1] {
-			break
-		}
-		y3 := o0.Args[1]
-		if y3.Op != OpARM64MOVDnop {
-			break
-		}
-		x3 := y3.Args[0]
-		if x3.Op != OpARM64MOVBUload {
-			break
-		}
-		if x3.AuxInt != i+6 {
-			break
-		}
-		if x3.Aux != s {
-			break
-		}
-		if p != x3.Args[0] {
-			break
-		}
-		if mem != x3.Args[1] {
-			break
-		}
-		y4 := v.Args[1]
-		if y4.Op != OpARM64MOVDnop {
-			break
-		}
-		x4 := y4.Args[0]
-		if x4.Op != OpARM64MOVBUload {
-			break
-		}
-		if x4.AuxInt != i+7 {
-			break
-		}
-		if x4.Aux != s {
-			break
-		}
-		if p != x4.Args[0] {
-			break
-		}
-		if mem != x4.Args[1] {
-			break
-		}
-		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2)) {
-			break
-		}
-		b = mergePoint(b, x0, x1, x2, x3, x4)
-		v0 := b.NewValue0(v.Line, OpARM64MOVDload, t)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.Aux = s
-		v1 := b.NewValue0(v.Line, OpOffPtr, p.Type)
-		v1.AuxInt = i
-		v1.AddArg(p)
-		v0.AddArg(v1)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (ORshiftLL <t> [8] 	y0:(MOVDnop x0:(MOVBUload [i]   {s} p mem)) 	y1:(MOVDnop x1:(MOVBUload [i-1] {s} p mem)))
-	// cond: ((i-1)%2 == 0 || i-1<256 && i-1>-256 && !isArg(s) && !isAuto(s)) 	&& x0.Uses == 1 && x1.Uses == 1 	&& y0.Uses == 1 && y1.Uses == 1 	&& mergePoint(b,x0,x1) != nil 	&& clobber(x0) && clobber(x1) 	&& clobber(y0) && clobber(y1)
-	// result: @mergePoint(b,x0,x1) (REV16W <t> (MOVHUload <t> [i-1] {s} p mem))
-	for {
-		t := v.Type
-		if v.AuxInt != 8 {
-			break
-		}
-		y0 := v.Args[0]
-		if y0.Op != OpARM64MOVDnop {
-			break
-		}
-		x0 := y0.Args[0]
-		if x0.Op != OpARM64MOVBUload {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		mem := x0.Args[1]
-		y1 := v.Args[1]
-		if y1.Op != OpARM64MOVDnop {
-			break
-		}
-		x1 := y1.Args[0]
-		if x1.Op != OpARM64MOVBUload {
-			break
-		}
-		if x1.AuxInt != i-1 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if mem != x1.Args[1] {
-			break
-		}
-		if !(((i-1)%2 == 0 || i-1 < 256 && i-1 > -256 && !isArg(s) && !isAuto(s)) && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)) {
-			break
-		}
-		b = mergePoint(b, x0, x1)
-		v0 := b.NewValue0(v.Line, OpARM64REV16W, t)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64MOVHUload, t)
-		v1.AuxInt = i - 1
-		v1.Aux = s
-		v1.AddArg(p)
-		v1.AddArg(mem)
-		v0.AddArg(v1)
-		return true
-	}
-	// match: (ORshiftLL <t> [24] o0:(ORshiftLL [16] 	y0:(REV16W  x0:(MOVHUload [i]   {s} p mem)) 	y1:(MOVDnop x1:(MOVBUload [i-1] {s} p mem))) 	y2:(MOVDnop x2:(MOVBUload [i-2] {s} p mem)))
-	// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 	&& y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 	&& o0.Uses == 1 	&& mergePoint(b,x0,x1,x2) != nil 	&& clobber(x0) && clobber(x1) && clobber(x2) 	&& clobber(y0) && clobber(y1) && clobber(y2) 	&& clobber(o0)
-	// result: @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [i-2] p) mem))
-	for {
-		t := v.Type
-		if v.AuxInt != 24 {
-			break
-		}
-		o0 := v.Args[0]
-		if o0.Op != OpARM64ORshiftLL {
-			break
-		}
-		if o0.AuxInt != 16 {
-			break
-		}
-		y0 := o0.Args[0]
-		if y0.Op != OpARM64REV16W {
-			break
-		}
-		x0 := y0.Args[0]
-		if x0.Op != OpARM64MOVHUload {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		mem := x0.Args[1]
-		y1 := o0.Args[1]
-		if y1.Op != OpARM64MOVDnop {
-			break
-		}
-		x1 := y1.Args[0]
-		if x1.Op != OpARM64MOVBUload {
-			break
-		}
-		if x1.AuxInt != i-1 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if mem != x1.Args[1] {
-			break
-		}
-		y2 := v.Args[1]
-		if y2.Op != OpARM64MOVDnop {
-			break
-		}
-		x2 := y2.Args[0]
-		if x2.Op != OpARM64MOVBUload {
-			break
-		}
-		if x2.AuxInt != i-2 {
-			break
-		}
-		if x2.Aux != s {
-			break
-		}
-		if p != x2.Args[0] {
-			break
-		}
-		if mem != x2.Args[1] {
-			break
-		}
-		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(o0)) {
-			break
-		}
-		b = mergePoint(b, x0, x1, x2)
-		v0 := b.NewValue0(v.Line, OpARM64REVW, t)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64MOVWUload, t)
-		v1.Aux = s
-		v2 := b.NewValue0(v.Line, OpOffPtr, p.Type)
-		v2.AuxInt = i - 2
-		v2.AddArg(p)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v0.AddArg(v1)
-		return true
-	}
-	// match: (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] 	y0:(REVW    x0:(MOVWUload [i]   {s} p mem)) 	y1:(MOVDnop x1:(MOVBUload [i-1] {s} p mem))) 	y2:(MOVDnop x2:(MOVBUload [i-2] {s} p mem))) 	y3:(MOVDnop x3:(MOVBUload [i-3] {s} p mem))) 	y4:(MOVDnop x4:(MOVBUload [i-4] {s} p mem)))
-	// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 	&& y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 	&& o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 	&& mergePoint(b,x0,x1,x2,x3,x4) != nil 	&& clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) 	&& clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) 	&& clobber(o0) && clobber(o1) && clobber(o2)
-	// result: @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [i-4] p) mem))
-	for {
-		t := v.Type
-		if v.AuxInt != 56 {
-			break
-		}
-		o0 := v.Args[0]
-		if o0.Op != OpARM64ORshiftLL {
-			break
-		}
-		if o0.AuxInt != 48 {
-			break
-		}
-		o1 := o0.Args[0]
-		if o1.Op != OpARM64ORshiftLL {
-			break
-		}
-		if o1.AuxInt != 40 {
-			break
-		}
-		o2 := o1.Args[0]
-		if o2.Op != OpARM64ORshiftLL {
-			break
-		}
-		if o2.AuxInt != 32 {
-			break
-		}
-		y0 := o2.Args[0]
-		if y0.Op != OpARM64REVW {
-			break
-		}
-		x0 := y0.Args[0]
-		if x0.Op != OpARM64MOVWUload {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		mem := x0.Args[1]
-		y1 := o2.Args[1]
-		if y1.Op != OpARM64MOVDnop {
-			break
-		}
-		x1 := y1.Args[0]
-		if x1.Op != OpARM64MOVBUload {
-			break
-		}
-		if x1.AuxInt != i-1 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if mem != x1.Args[1] {
-			break
-		}
-		y2 := o1.Args[1]
-		if y2.Op != OpARM64MOVDnop {
-			break
-		}
-		x2 := y2.Args[0]
-		if x2.Op != OpARM64MOVBUload {
-			break
-		}
-		if x2.AuxInt != i-2 {
-			break
-		}
-		if x2.Aux != s {
-			break
-		}
-		if p != x2.Args[0] {
-			break
-		}
-		if mem != x2.Args[1] {
-			break
-		}
-		y3 := o0.Args[1]
-		if y3.Op != OpARM64MOVDnop {
-			break
-		}
-		x3 := y3.Args[0]
-		if x3.Op != OpARM64MOVBUload {
-			break
-		}
-		if x3.AuxInt != i-3 {
-			break
-		}
-		if x3.Aux != s {
-			break
-		}
-		if p != x3.Args[0] {
-			break
-		}
-		if mem != x3.Args[1] {
-			break
-		}
-		y4 := v.Args[1]
-		if y4.Op != OpARM64MOVDnop {
-			break
-		}
-		x4 := y4.Args[0]
-		if x4.Op != OpARM64MOVBUload {
-			break
-		}
-		if x4.AuxInt != i-4 {
-			break
-		}
-		if x4.Aux != s {
-			break
-		}
-		if p != x4.Args[0] {
-			break
-		}
-		if mem != x4.Args[1] {
-			break
-		}
-		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2)) {
-			break
-		}
-		b = mergePoint(b, x0, x1, x2, x3, x4)
-		v0 := b.NewValue0(v.Line, OpARM64REV, t)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64MOVDload, t)
-		v1.Aux = s
-		v2 := b.NewValue0(v.Line, OpOffPtr, p.Type)
-		v2.AuxInt = i - 4
-		v2.AddArg(p)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v0.AddArg(v1)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64ORshiftRA(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ORshiftRA  (MOVDconst [c]) x [d])
-	// cond:
-	// result: (ORconst  [c] (SRAconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARM64ORconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARM64SRAconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ORshiftRA  x (MOVDconst [c]) [d])
-	// cond:
-	// result: (ORconst  x [int64(int64(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64ORconst)
-		v.AuxInt = int64(int64(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (ORshiftRA  x y:(SRAconst x [c]) [d])
-	// cond: c==d
-	// result: y
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		y := v.Args[1]
-		if y.Op != OpARM64SRAconst {
-			break
-		}
-		c := y.AuxInt
-		if x != y.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64ORshiftRL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ORshiftRL  (MOVDconst [c]) x [d])
-	// cond:
-	// result: (ORconst  [c] (SRLconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARM64ORconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARM64SRLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (ORshiftRL  x (MOVDconst [c]) [d])
-	// cond:
-	// result: (ORconst  x [int64(uint64(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64ORconst)
-		v.AuxInt = int64(uint64(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (ORshiftRL  x y:(SRLconst x [c]) [d])
-	// cond: c==d
-	// result: y
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		y := v.Args[1]
-		if y.Op != OpARM64SRLconst {
-			break
-		}
-		c := y.AuxInt
-		if x != y.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64SLL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SLL x (MOVDconst [c]))
-	// cond:
-	// result: (SLLconst x [c&63])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64SLLconst)
-		v.AuxInt = c & 63
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64SLLconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SLLconst [c] (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [int64(d)<<uint64(c)])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = int64(d) << uint64(c)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64SRA(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SRA x (MOVDconst [c]))
-	// cond:
-	// result: (SRAconst x [c&63])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64SRAconst)
-		v.AuxInt = c & 63
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64SRAconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SRAconst [c] (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [int64(d)>>uint64(c)])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = int64(d) >> uint64(c)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64SRL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SRL x (MOVDconst [c]))
-	// cond:
-	// result: (SRLconst x [c&63])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64SRLconst)
-		v.AuxInt = c & 63
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64SRLconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SRLconst [c] (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [int64(uint64(d)>>uint64(c))])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = int64(uint64(d) >> uint64(c))
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64SUB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUB x (MOVDconst [c]))
-	// cond:
-	// result: (SUBconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64SUBconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUB x x)
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SUB x (SLLconst [c] y))
-	// cond:
-	// result: (SUBshiftLL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64SLLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARM64SUBshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (SUB x (SRLconst [c] y))
-	// cond:
-	// result: (SUBshiftRL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64SRLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARM64SUBshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (SUB x (SRAconst [c] y))
-	// cond:
-	// result: (SUBshiftRA x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64SRAconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARM64SUBshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64SUBconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBconst [0]  x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUBconst [c] (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [d-c])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = d - c
-		return true
-	}
-	// match: (SUBconst [c] (SUBconst [d] x))
-	// cond:
-	// result: (ADDconst [-c-d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64SUBconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpARM64ADDconst)
-		v.AuxInt = -c - d
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUBconst [c] (ADDconst [d] x))
-	// cond:
-	// result: (ADDconst [-c+d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64ADDconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpARM64ADDconst)
-		v.AuxInt = -c + d
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64SUBshiftLL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBshiftLL x (MOVDconst [c]) [d])
-	// cond:
-	// result: (SUBconst x [int64(uint64(c)<<uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64SUBconst)
-		v.AuxInt = int64(uint64(c) << uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUBshiftLL x (SLLconst x [c]) [d])
-	// cond: c==d
-	// result: (MOVDconst [0])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64SLLconst {
-			break
-		}
-		c := v_1.AuxInt
-		if x != v_1.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64SUBshiftRA(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBshiftRA x (MOVDconst [c]) [d])
-	// cond:
-	// result: (SUBconst x [int64(int64(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64SUBconst)
-		v.AuxInt = int64(int64(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUBshiftRA x (SRAconst x [c]) [d])
-	// cond: c==d
-	// result: (MOVDconst [0])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64SRAconst {
-			break
-		}
-		c := v_1.AuxInt
-		if x != v_1.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64SUBshiftRL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBshiftRL x (MOVDconst [c]) [d])
-	// cond:
-	// result: (SUBconst x [int64(uint64(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64SUBconst)
-		v.AuxInt = int64(uint64(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUBshiftRL x (SRLconst x [c]) [d])
-	// cond: c==d
-	// result: (MOVDconst [0])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64SRLconst {
-			break
-		}
-		c := v_1.AuxInt
-		if x != v_1.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64UDIV(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (UDIV x (MOVDconst [1]))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		if v_1.AuxInt != 1 {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (UDIV x (MOVDconst [c]))
-	// cond: isPowerOfTwo(c)
-	// result: (SRLconst [log2(c)] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(isPowerOfTwo(c)) {
-			break
-		}
-		v.reset(OpARM64SRLconst)
-		v.AuxInt = log2(c)
-		v.AddArg(x)
-		return true
-	}
-	// match: (UDIV  (MOVDconst [c]) (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [int64(uint64(c)/uint64(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = int64(uint64(c) / uint64(d))
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64UDIVW(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (UDIVW x (MOVDconst [c]))
-	// cond: uint32(c)==1
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) == 1) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (UDIVW x (MOVDconst [c]))
-	// cond: isPowerOfTwo(c) && is32Bit(c)
-	// result: (SRLconst [log2(c)] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(isPowerOfTwo(c) && is32Bit(c)) {
-			break
-		}
-		v.reset(OpARM64SRLconst)
-		v.AuxInt = log2(c)
-		v.AddArg(x)
-		return true
-	}
-	// match: (UDIVW (MOVDconst [c]) (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [int64(uint32(c)/uint32(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = int64(uint32(c) / uint32(d))
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64UMOD(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (UMOD _ (MOVDconst [1]))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		if v_1.AuxInt != 1 {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (UMOD x (MOVDconst [c]))
-	// cond: isPowerOfTwo(c)
-	// result: (ANDconst [c-1] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(isPowerOfTwo(c)) {
-			break
-		}
-		v.reset(OpARM64ANDconst)
-		v.AuxInt = c - 1
-		v.AddArg(x)
-		return true
-	}
-	// match: (UMOD  (MOVDconst [c]) (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [int64(uint64(c)%uint64(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = int64(uint64(c) % uint64(d))
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64UMODW(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (UMODW _ (MOVDconst [c]))
-	// cond: uint32(c)==1
-	// result: (MOVDconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) == 1) {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (UMODW x (MOVDconst [c]))
-	// cond: isPowerOfTwo(c) && is32Bit(c)
-	// result: (ANDconst [c-1] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(isPowerOfTwo(c) && is32Bit(c)) {
-			break
-		}
-		v.reset(OpARM64ANDconst)
-		v.AuxInt = c - 1
-		v.AddArg(x)
-		return true
-	}
-	// match: (UMODW (MOVDconst [c]) (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [int64(uint32(c)%uint32(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = int64(uint32(c) % uint32(d))
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64XOR(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XOR (MOVDconst [c]) x)
-	// cond:
-	// result: (XORconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARM64XORconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (XOR x (MOVDconst [c]))
-	// cond:
-	// result: (XORconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64XORconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (XOR x x)
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (XOR x (SLLconst [c] y))
-	// cond:
-	// result: (XORshiftLL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64SLLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARM64XORshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (XOR (SLLconst [c] y) x)
-	// cond:
-	// result: (XORshiftLL x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64SLLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARM64XORshiftLL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (XOR x (SRLconst [c] y))
-	// cond:
-	// result: (XORshiftRL x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64SRLconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARM64XORshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (XOR (SRLconst [c] y) x)
-	// cond:
-	// result: (XORshiftRL x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64SRLconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARM64XORshiftRL)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (XOR x (SRAconst [c] y))
-	// cond:
-	// result: (XORshiftRA x y [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64SRAconst {
-			break
-		}
-		c := v_1.AuxInt
-		y := v_1.Args[0]
-		v.reset(OpARM64XORshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (XOR (SRAconst [c] y) x)
-	// cond:
-	// result: (XORshiftRA x y [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64SRAconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpARM64XORshiftRA)
-		v.AuxInt = c
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64XORconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XORconst [0]  x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (XORconst [-1] x)
-	// cond:
-	// result: (MVN x)
-	for {
-		if v.AuxInt != -1 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpARM64MVN)
-		v.AddArg(x)
-		return true
-	}
-	// match: (XORconst [c] (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [c^d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = c ^ d
-		return true
-	}
-	// match: (XORconst [c] (XORconst [d] x))
-	// cond:
-	// result: (XORconst [c^d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64XORconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpARM64XORconst)
-		v.AuxInt = c ^ d
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64XORshiftLL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XORshiftLL (MOVDconst [c]) x [d])
-	// cond:
-	// result: (XORconst [c] (SLLconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARM64XORconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARM64SLLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (XORshiftLL x (MOVDconst [c]) [d])
-	// cond:
-	// result: (XORconst x [int64(uint64(c)<<uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64XORconst)
-		v.AuxInt = int64(uint64(c) << uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (XORshiftLL x (SLLconst x [c]) [d])
-	// cond: c==d
-	// result: (MOVDconst [0])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64SLLconst {
-			break
-		}
-		c := v_1.AuxInt
-		if x != v_1.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64XORshiftRA(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XORshiftRA (MOVDconst [c]) x [d])
-	// cond:
-	// result: (XORconst [c] (SRAconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARM64XORconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARM64SRAconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (XORshiftRA x (MOVDconst [c]) [d])
-	// cond:
-	// result: (XORconst x [int64(int64(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64XORconst)
-		v.AuxInt = int64(int64(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (XORshiftRA x (SRAconst x [c]) [d])
-	// cond: c==d
-	// result: (MOVDconst [0])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64SRAconst {
-			break
-		}
-		c := v_1.AuxInt
-		if x != v_1.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpARM64XORshiftRL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XORshiftRL (MOVDconst [c]) x [d])
-	// cond:
-	// result: (XORconst [c] (SRLconst <x.Type> x [d]))
-	for {
-		d := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpARM64XORconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpARM64SRLconst, x.Type)
-		v0.AuxInt = d
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (XORshiftRL x (MOVDconst [c]) [d])
-	// cond:
-	// result: (XORconst x [int64(uint64(c)>>uint64(d))])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpARM64XORconst)
-		v.AuxInt = int64(uint64(c) >> uint64(d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (XORshiftRL x (SRLconst x [c]) [d])
-	// cond: c==d
-	// result: (MOVDconst [0])
-	for {
-		d := v.AuxInt
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64SRLconst {
-			break
-		}
-		c := v_1.AuxInt
-		if x != v_1.Args[0] {
-			break
-		}
-		if !(c == d) {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpAdd16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add16 x y)
-	// cond:
-	// result: (ADD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64ADD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpAdd32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add32 x y)
-	// cond:
-	// result: (ADD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64ADD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpAdd32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add32F x y)
-	// cond:
-	// result: (FADDS x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64FADDS)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpAdd64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add64 x y)
-	// cond:
-	// result: (ADD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64ADD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpAdd64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add64F x y)
-	// cond:
-	// result: (FADDD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64FADDD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpAdd8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add8 x y)
-	// cond:
-	// result: (ADD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64ADD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpAddPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AddPtr x y)
-	// cond:
-	// result: (ADD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64ADD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpAddr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Addr {sym} base)
-	// cond:
-	// result: (MOVDaddr {sym} base)
-	for {
-		sym := v.Aux
-		base := v.Args[0]
-		v.reset(OpARM64MOVDaddr)
-		v.Aux = sym
-		v.AddArg(base)
-		return true
-	}
-}
-func rewriteValueARM64_OpAnd16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And16 x y)
-	// cond:
-	// result: (AND x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64AND)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpAnd32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And32 x y)
-	// cond:
-	// result: (AND x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64AND)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpAnd64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And64 x y)
-	// cond:
-	// result: (AND x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64AND)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpAnd8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And8 x y)
-	// cond:
-	// result: (AND x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64AND)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpAndB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AndB x y)
-	// cond:
-	// result: (AND x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64AND)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpAtomicAdd32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicAdd32 ptr val mem)
-	// cond:
-	// result: (LoweredAtomicAdd32 ptr val mem)
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpARM64LoweredAtomicAdd32)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueARM64_OpAtomicAdd64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicAdd64 ptr val mem)
-	// cond:
-	// result: (LoweredAtomicAdd64 ptr val mem)
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpARM64LoweredAtomicAdd64)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueARM64_OpAtomicAnd8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicAnd8 ptr val mem)
-	// cond:
-	// result: (LoweredAtomicAnd8 ptr val mem)
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpARM64LoweredAtomicAnd8)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueARM64_OpAtomicCompareAndSwap32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicCompareAndSwap32 ptr old new_ mem)
-	// cond:
-	// result: (LoweredAtomicCas32 ptr old new_ mem)
-	for {
-		ptr := v.Args[0]
-		old := v.Args[1]
-		new_ := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpARM64LoweredAtomicCas32)
-		v.AddArg(ptr)
-		v.AddArg(old)
-		v.AddArg(new_)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueARM64_OpAtomicCompareAndSwap64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicCompareAndSwap64 ptr old new_ mem)
-	// cond:
-	// result: (LoweredAtomicCas64 ptr old new_ mem)
-	for {
-		ptr := v.Args[0]
-		old := v.Args[1]
-		new_ := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpARM64LoweredAtomicCas64)
-		v.AddArg(ptr)
-		v.AddArg(old)
-		v.AddArg(new_)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueARM64_OpAtomicExchange32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicExchange32 ptr val mem)
-	// cond:
-	// result: (LoweredAtomicExchange32 ptr val mem)
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpARM64LoweredAtomicExchange32)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueARM64_OpAtomicExchange64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicExchange64 ptr val mem)
-	// cond:
-	// result: (LoweredAtomicExchange64 ptr val mem)
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpARM64LoweredAtomicExchange64)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueARM64_OpAtomicLoad32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicLoad32  ptr mem)
-	// cond:
-	// result: (LDARW ptr mem)
-	for {
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		v.reset(OpARM64LDARW)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueARM64_OpAtomicLoad64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicLoad64  ptr mem)
-	// cond:
-	// result: (LDAR  ptr mem)
-	for {
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		v.reset(OpARM64LDAR)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueARM64_OpAtomicLoadPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicLoadPtr ptr mem)
-	// cond:
-	// result: (LDAR  ptr mem)
-	for {
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		v.reset(OpARM64LDAR)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueARM64_OpAtomicOr8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicOr8  ptr val mem)
-	// cond:
-	// result: (LoweredAtomicOr8  ptr val mem)
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpARM64LoweredAtomicOr8)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueARM64_OpAtomicStore32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicStore32      ptr val mem)
-	// cond:
-	// result: (STLRW ptr val mem)
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpARM64STLRW)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueARM64_OpAtomicStore64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicStore64      ptr val mem)
-	// cond:
-	// result: (STLR  ptr val mem)
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpARM64STLR)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueARM64_OpAtomicStorePtrNoWB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicStorePtrNoWB ptr val mem)
-	// cond:
-	// result: (STLR  ptr val mem)
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpARM64STLR)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueARM64_OpAvg64u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Avg64u <t> x y)
-	// cond:
-	// result: (ADD (ADD <t> (SRLconst <t> x [1]) (SRLconst <t> y [1])) (AND <t> (AND <t> x y) (MOVDconst [1])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64ADD)
-		v0 := b.NewValue0(v.Line, OpARM64ADD, t)
-		v1 := b.NewValue0(v.Line, OpARM64SRLconst, t)
-		v1.AuxInt = 1
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpARM64SRLconst, t)
-		v2.AuxInt = 1
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpARM64AND, t)
-		v4 := b.NewValue0(v.Line, OpARM64AND, t)
-		v4.AddArg(x)
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-		v5.AuxInt = 1
-		v3.AddArg(v5)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueARM64_OpBswap32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Bswap32 x)
-	// cond:
-	// result: (REVW x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64REVW)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpBswap64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Bswap64 x)
-	// cond:
-	// result: (REV x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64REV)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpClosureCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ClosureCall [argwid] entry closure mem)
-	// cond:
-	// result: (CALLclosure [argwid] entry closure mem)
-	for {
-		argwid := v.AuxInt
-		entry := v.Args[0]
-		closure := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpARM64CALLclosure)
-		v.AuxInt = argwid
-		v.AddArg(entry)
-		v.AddArg(closure)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueARM64_OpCom16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com16 x)
-	// cond:
-	// result: (MVN x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64MVN)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpCom32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com32 x)
-	// cond:
-	// result: (MVN x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64MVN)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpCom64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com64 x)
-	// cond:
-	// result: (MVN x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64MVN)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpCom8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com8 x)
-	// cond:
-	// result: (MVN x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64MVN)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpConst16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const16 [val])
-	// cond:
-	// result: (MOVDconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueARM64_OpConst32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const32 [val])
-	// cond:
-	// result: (MOVDconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueARM64_OpConst32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const32F [val])
-	// cond:
-	// result: (FMOVSconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpARM64FMOVSconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueARM64_OpConst64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const64 [val])
-	// cond:
-	// result: (MOVDconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueARM64_OpConst64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const64F [val])
-	// cond:
-	// result: (FMOVDconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpARM64FMOVDconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueARM64_OpConst8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const8 [val])
-	// cond:
-	// result: (MOVDconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueARM64_OpConstBool(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ConstBool [b])
-	// cond:
-	// result: (MOVDconst [b])
-	for {
-		b := v.AuxInt
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = b
-		return true
-	}
-}
-func rewriteValueARM64_OpConstNil(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ConstNil)
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-}
-func rewriteValueARM64_OpConvert(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Convert x mem)
-	// cond:
-	// result: (MOVDconvert x mem)
-	for {
-		x := v.Args[0]
-		mem := v.Args[1]
-		v.reset(OpARM64MOVDconvert)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueARM64_OpCtz32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Ctz32 <t> x)
-	// cond:
-	// result: (CLZW (RBITW <t> x))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v.reset(OpARM64CLZW)
-		v0 := b.NewValue0(v.Line, OpARM64RBITW, t)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpCtz64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Ctz64 <t> x)
-	// cond:
-	// result: (CLZ (RBIT <t> x))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v.reset(OpARM64CLZ)
-		v0 := b.NewValue0(v.Line, OpARM64RBIT, t)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpCvt32Fto32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32Fto32 x)
-	// cond:
-	// result: (FCVTZSSW x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64FCVTZSSW)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpCvt32Fto32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32Fto32U x)
-	// cond:
-	// result: (FCVTZUSW x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64FCVTZUSW)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpCvt32Fto64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32Fto64 x)
-	// cond:
-	// result: (FCVTZSS x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64FCVTZSS)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpCvt32Fto64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32Fto64F x)
-	// cond:
-	// result: (FCVTSD x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64FCVTSD)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpCvt32Fto64U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32Fto64U x)
-	// cond:
-	// result: (FCVTZUS x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64FCVTZUS)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpCvt32Uto32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32Uto32F x)
-	// cond:
-	// result: (UCVTFWS x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64UCVTFWS)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpCvt32Uto64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32Uto64F x)
-	// cond:
-	// result: (UCVTFWD x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64UCVTFWD)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpCvt32to32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32to32F x)
-	// cond:
-	// result: (SCVTFWS x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64SCVTFWS)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpCvt32to64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32to64F x)
-	// cond:
-	// result: (SCVTFWD x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64SCVTFWD)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpCvt64Fto32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64Fto32 x)
-	// cond:
-	// result: (FCVTZSDW x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64FCVTZSDW)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpCvt64Fto32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64Fto32F x)
-	// cond:
-	// result: (FCVTDS x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64FCVTDS)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpCvt64Fto32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64Fto32U x)
-	// cond:
-	// result: (FCVTZUDW x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64FCVTZUDW)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpCvt64Fto64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64Fto64 x)
-	// cond:
-	// result: (FCVTZSD x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64FCVTZSD)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpCvt64Fto64U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64Fto64U x)
-	// cond:
-	// result: (FCVTZUD x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64FCVTZUD)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpCvt64Uto32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64Uto32F x)
-	// cond:
-	// result: (UCVTFS x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64UCVTFS)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpCvt64Uto64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64Uto64F x)
-	// cond:
-	// result: (UCVTFD x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64UCVTFD)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpCvt64to32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64to32F x)
-	// cond:
-	// result: (SCVTFS x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64SCVTFS)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpCvt64to64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64to64F x)
-	// cond:
-	// result: (SCVTFD x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64SCVTFD)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpDeferCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (DeferCall [argwid] mem)
-	// cond:
-	// result: (CALLdefer [argwid] mem)
-	for {
-		argwid := v.AuxInt
-		mem := v.Args[0]
-		v.reset(OpARM64CALLdefer)
-		v.AuxInt = argwid
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueARM64_OpDiv16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div16 x y)
-	// cond:
-	// result: (DIVW (SignExt16to32 x) (SignExt16to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64DIVW)
-		v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM64_OpDiv16u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div16u x y)
-	// cond:
-	// result: (UDIVW (ZeroExt16to32 x) (ZeroExt16to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64UDIVW)
-		v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM64_OpDiv32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div32 x y)
-	// cond:
-	// result: (DIVW x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64DIVW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpDiv32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div32F x y)
-	// cond:
-	// result: (FDIVS x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64FDIVS)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpDiv32u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div32u x y)
-	// cond:
-	// result: (UDIVW x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64UDIVW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpDiv64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div64 x y)
-	// cond:
-	// result: (DIV x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64DIV)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpDiv64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div64F x y)
-	// cond:
-	// result: (FDIVD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64FDIVD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpDiv64u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div64u x y)
-	// cond:
-	// result: (UDIV x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64UDIV)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpDiv8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div8 x y)
-	// cond:
-	// result: (DIVW (SignExt8to32 x) (SignExt8to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64DIVW)
-		v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM64_OpDiv8u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div8u x y)
-	// cond:
-	// result: (UDIVW (ZeroExt8to32 x) (ZeroExt8to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64UDIVW)
-		v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM64_OpEq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq16 x y)
-	// cond:
-	// result: (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64Equal)
-		v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpEq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq32 x y)
-	// cond:
-	// result: (Equal (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64Equal)
-		v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpEq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq32F x y)
-	// cond:
-	// result: (Equal (FCMPS x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64Equal)
-		v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpEq64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq64 x y)
-	// cond:
-	// result: (Equal (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64Equal)
-		v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpEq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq64F x y)
-	// cond:
-	// result: (Equal (FCMPD x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64Equal)
-		v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpEq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq8 x y)
-	// cond:
-	// result: (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64Equal)
-		v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpEqB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (EqB x y)
-	// cond:
-	// result: (XOR (MOVDconst [1]) (XOR <config.fe.TypeBool()> x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64XOR)
-		v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 1
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64XOR, config.fe.TypeBool())
-		v1.AddArg(x)
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM64_OpEqPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (EqPtr x y)
-	// cond:
-	// result: (Equal (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64Equal)
-		v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpGeq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq16 x y)
-	// cond:
-	// result: (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64GreaterEqual)
-		v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpGeq16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq16U x y)
-	// cond:
-	// result: (GreaterEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64GreaterEqualU)
-		v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpGeq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq32 x y)
-	// cond:
-	// result: (GreaterEqual (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64GreaterEqual)
-		v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpGeq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq32F x y)
-	// cond:
-	// result: (GreaterEqual (FCMPS x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64GreaterEqual)
-		v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpGeq32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq32U x y)
-	// cond:
-	// result: (GreaterEqualU (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64GreaterEqualU)
-		v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpGeq64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq64 x y)
-	// cond:
-	// result: (GreaterEqual (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64GreaterEqual)
-		v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpGeq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq64F x y)
-	// cond:
-	// result: (GreaterEqual (FCMPD x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64GreaterEqual)
-		v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpGeq64U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq64U x y)
-	// cond:
-	// result: (GreaterEqualU (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64GreaterEqualU)
-		v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpGeq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq8 x y)
-	// cond:
-	// result: (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64GreaterEqual)
-		v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpGeq8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq8U x y)
-	// cond:
-	// result: (GreaterEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64GreaterEqualU)
-		v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpGetClosurePtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (GetClosurePtr)
-	// cond:
-	// result: (LoweredGetClosurePtr)
-	for {
-		v.reset(OpARM64LoweredGetClosurePtr)
-		return true
-	}
-}
-func rewriteValueARM64_OpGoCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (GoCall [argwid] mem)
-	// cond:
-	// result: (CALLgo [argwid] mem)
-	for {
-		argwid := v.AuxInt
-		mem := v.Args[0]
-		v.reset(OpARM64CALLgo)
-		v.AuxInt = argwid
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueARM64_OpGreater16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater16 x y)
-	// cond:
-	// result: (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64GreaterThan)
-		v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpGreater16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater16U x y)
-	// cond:
-	// result: (GreaterThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64GreaterThanU)
-		v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpGreater32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater32 x y)
-	// cond:
-	// result: (GreaterThan (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64GreaterThan)
-		v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpGreater32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater32F x y)
-	// cond:
-	// result: (GreaterThan (FCMPS x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64GreaterThan)
-		v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpGreater32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater32U x y)
-	// cond:
-	// result: (GreaterThanU (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64GreaterThanU)
-		v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpGreater64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater64 x y)
-	// cond:
-	// result: (GreaterThan (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64GreaterThan)
-		v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpGreater64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater64F x y)
-	// cond:
-	// result: (GreaterThan (FCMPD x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64GreaterThan)
-		v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpGreater64U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater64U x y)
-	// cond:
-	// result: (GreaterThanU (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64GreaterThanU)
-		v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpGreater8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater8 x y)
-	// cond:
-	// result: (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64GreaterThan)
-		v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpGreater8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater8U x y)
-	// cond:
-	// result: (GreaterThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64GreaterThanU)
-		v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpHmul16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul16 x y)
-	// cond:
-	// result: (SRAconst (MULW <config.fe.TypeInt32()> (SignExt16to32 x) (SignExt16to32 y)) [16])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64SRAconst)
-		v.AuxInt = 16
-		v0 := b.NewValue0(v.Line, OpARM64MULW, config.fe.TypeInt32())
-		v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpHmul16u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul16u x y)
-	// cond:
-	// result: (SRLconst (MUL <config.fe.TypeUInt32()> (ZeroExt16to32 x) (ZeroExt16to32 y)) [16])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64SRLconst)
-		v.AuxInt = 16
-		v0 := b.NewValue0(v.Line, OpARM64MUL, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpHmul32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul32 x y)
-	// cond:
-	// result: (SRAconst (MULL <config.fe.TypeInt64()> x y) [32])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64SRAconst)
-		v.AuxInt = 32
-		v0 := b.NewValue0(v.Line, OpARM64MULL, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpHmul32u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul32u x y)
-	// cond:
-	// result: (SRAconst (UMULL <config.fe.TypeUInt64()> x y) [32])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64SRAconst)
-		v.AuxInt = 32
-		v0 := b.NewValue0(v.Line, OpARM64UMULL, config.fe.TypeUInt64())
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpHmul64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul64 x y)
-	// cond:
-	// result: (MULH x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64MULH)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpHmul64u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul64u x y)
-	// cond:
-	// result: (UMULH x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64UMULH)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpHmul8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul8 x y)
-	// cond:
-	// result: (SRAconst (MULW <config.fe.TypeInt16()> (SignExt8to32 x) (SignExt8to32 y)) [8])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64SRAconst)
-		v.AuxInt = 8
-		v0 := b.NewValue0(v.Line, OpARM64MULW, config.fe.TypeInt16())
-		v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpHmul8u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul8u x y)
-	// cond:
-	// result: (SRLconst (MUL <config.fe.TypeUInt16()> (ZeroExt8to32 x) (ZeroExt8to32 y)) [8])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64SRLconst)
-		v.AuxInt = 8
-		v0 := b.NewValue0(v.Line, OpARM64MUL, config.fe.TypeUInt16())
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpInterCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (InterCall [argwid] entry mem)
-	// cond:
-	// result: (CALLinter [argwid] entry mem)
-	for {
-		argwid := v.AuxInt
-		entry := v.Args[0]
-		mem := v.Args[1]
-		v.reset(OpARM64CALLinter)
-		v.AuxInt = argwid
-		v.AddArg(entry)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueARM64_OpIsInBounds(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (IsInBounds idx len)
-	// cond:
-	// result: (LessThanU (CMP idx len))
-	for {
-		idx := v.Args[0]
-		len := v.Args[1]
-		v.reset(OpARM64LessThanU)
-		v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-		v0.AddArg(idx)
-		v0.AddArg(len)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpIsNonNil(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (IsNonNil ptr)
-	// cond:
-	// result: (NotEqual (CMPconst [0] ptr))
-	for {
-		ptr := v.Args[0]
-		v.reset(OpARM64NotEqual)
-		v0 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v0.AuxInt = 0
-		v0.AddArg(ptr)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpIsSliceInBounds(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (IsSliceInBounds idx len)
-	// cond:
-	// result: (LessEqualU (CMP idx len))
-	for {
-		idx := v.Args[0]
-		len := v.Args[1]
-		v.reset(OpARM64LessEqualU)
-		v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-		v0.AddArg(idx)
-		v0.AddArg(len)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpLeq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq16 x y)
-	// cond:
-	// result: (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64LessEqual)
-		v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpLeq16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq16U x y)
-	// cond:
-	// result: (LessEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64LessEqualU)
-		v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpLeq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq32 x y)
-	// cond:
-	// result: (LessEqual (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64LessEqual)
-		v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpLeq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq32F x y)
-	// cond:
-	// result: (GreaterEqual (FCMPS y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64GreaterEqual)
-		v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
-		v0.AddArg(y)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpLeq32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq32U x y)
-	// cond:
-	// result: (LessEqualU (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64LessEqualU)
-		v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpLeq64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq64 x y)
-	// cond:
-	// result: (LessEqual (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64LessEqual)
-		v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpLeq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq64F x y)
-	// cond:
-	// result: (GreaterEqual (FCMPD y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64GreaterEqual)
-		v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
-		v0.AddArg(y)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpLeq64U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq64U x y)
-	// cond:
-	// result: (LessEqualU (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64LessEqualU)
-		v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpLeq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq8 x y)
-	// cond:
-	// result: (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64LessEqual)
-		v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpLeq8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq8U x y)
-	// cond:
-	// result: (LessEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64LessEqualU)
-		v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpLess16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less16 x y)
-	// cond:
-	// result: (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64LessThan)
-		v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpLess16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less16U x y)
-	// cond:
-	// result: (LessThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64LessThanU)
-		v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpLess32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less32 x y)
-	// cond:
-	// result: (LessThan (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64LessThan)
-		v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpLess32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less32F x y)
-	// cond:
-	// result: (GreaterThan (FCMPS y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64GreaterThan)
-		v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
-		v0.AddArg(y)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpLess32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less32U x y)
-	// cond:
-	// result: (LessThanU (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64LessThanU)
-		v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpLess64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less64 x y)
-	// cond:
-	// result: (LessThan (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64LessThan)
-		v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpLess64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less64F x y)
-	// cond:
-	// result: (GreaterThan (FCMPD y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64GreaterThan)
-		v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
-		v0.AddArg(y)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpLess64U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less64U x y)
-	// cond:
-	// result: (LessThanU (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64LessThanU)
-		v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpLess8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less8 x y)
-	// cond:
-	// result: (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64LessThan)
-		v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpLess8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less8U x y)
-	// cond:
-	// result: (LessThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64LessThanU)
-		v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpLoad(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Load <t> ptr mem)
-	// cond: t.IsBoolean()
-	// result: (MOVBUload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(t.IsBoolean()) {
-			break
-		}
-		v.reset(OpARM64MOVBUload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: (is8BitInt(t) && isSigned(t))
-	// result: (MOVBload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is8BitInt(t) && isSigned(t)) {
-			break
-		}
-		v.reset(OpARM64MOVBload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: (is8BitInt(t) && !isSigned(t))
-	// result: (MOVBUload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is8BitInt(t) && !isSigned(t)) {
-			break
-		}
-		v.reset(OpARM64MOVBUload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: (is16BitInt(t) && isSigned(t))
-	// result: (MOVHload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is16BitInt(t) && isSigned(t)) {
-			break
-		}
-		v.reset(OpARM64MOVHload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: (is16BitInt(t) && !isSigned(t))
-	// result: (MOVHUload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is16BitInt(t) && !isSigned(t)) {
-			break
-		}
-		v.reset(OpARM64MOVHUload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: (is32BitInt(t) && isSigned(t))
-	// result: (MOVWload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is32BitInt(t) && isSigned(t)) {
-			break
-		}
-		v.reset(OpARM64MOVWload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: (is32BitInt(t) && !isSigned(t))
-	// result: (MOVWUload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is32BitInt(t) && !isSigned(t)) {
-			break
-		}
-		v.reset(OpARM64MOVWUload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: (is64BitInt(t) || isPtr(t))
-	// result: (MOVDload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is64BitInt(t) || isPtr(t)) {
-			break
-		}
-		v.reset(OpARM64MOVDload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: is32BitFloat(t)
-	// result: (FMOVSload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is32BitFloat(t)) {
-			break
-		}
-		v.reset(OpARM64FMOVSload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: is64BitFloat(t)
-	// result: (FMOVDload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is64BitFloat(t)) {
-			break
-		}
-		v.reset(OpARM64FMOVDload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpLrot16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lrot16 <t> x [c])
-	// cond:
-	// result: (OR (SLLconst <t> x [c&15]) (SRLconst <t> (ZeroExt16to64 x) [16-c&15]))
-	for {
-		t := v.Type
-		c := v.AuxInt
-		x := v.Args[0]
-		v.reset(OpARM64OR)
-		v0 := b.NewValue0(v.Line, OpARM64SLLconst, t)
-		v0.AuxInt = c & 15
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64SRLconst, t)
-		v1.AuxInt = 16 - c&15
-		v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v2.AddArg(x)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM64_OpLrot32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lrot32 x [c])
-	// cond:
-	// result: (RORWconst x [32-c&31])
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		v.reset(OpARM64RORWconst)
-		v.AuxInt = 32 - c&31
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpLrot64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lrot64 x [c])
-	// cond:
-	// result: (RORconst  x [64-c&63])
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		v.reset(OpARM64RORconst)
-		v.AuxInt = 64 - c&63
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpLrot8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lrot8  <t> x [c])
-	// cond:
-	// result: (OR (SLLconst <t> x [c&7])  (SRLconst <t> (ZeroExt8to64  x) [8-c&7]))
-	for {
-		t := v.Type
-		c := v.AuxInt
-		x := v.Args[0]
-		v.reset(OpARM64OR)
-		v0 := b.NewValue0(v.Line, OpARM64SLLconst, t)
-		v0.AuxInt = c & 7
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64SRLconst, t)
-		v1.AuxInt = 8 - c&7
-		v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v2.AddArg(x)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM64_OpLsh16x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x16 <t> x y)
-	// cond:
-	// result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpConst64, t)
-		v2.AuxInt = 0
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v3.AuxInt = 64
-		v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueARM64_OpLsh16x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x32 <t> x y)
-	// cond:
-	// result: (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpConst64, t)
-		v2.AuxInt = 0
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v3.AuxInt = 64
-		v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueARM64_OpLsh16x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x64  x (MOVDconst [c]))
-	// cond: uint64(c) < 16
-	// result: (SLLconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 16) {
-			break
-		}
-		v.reset(OpARM64SLLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh16x64  _ (MOVDconst [c]))
-	// cond: uint64(c) >= 16
-	// result: (MOVDconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 16) {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Lsh16x64 <t> x y)
-	// cond:
-	// result: (CSELULT (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpConst64, t)
-		v1.AuxInt = 0
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v2.AuxInt = 64
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueARM64_OpLsh16x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x8  <t> x y)
-	// cond:
-	// result: (CSELULT (SLL <t> x (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpConst64, t)
-		v2.AuxInt = 0
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v3.AuxInt = 64
-		v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueARM64_OpLsh32x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x16 <t> x y)
-	// cond:
-	// result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpConst64, t)
-		v2.AuxInt = 0
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v3.AuxInt = 64
-		v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueARM64_OpLsh32x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x32 <t> x y)
-	// cond:
-	// result: (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpConst64, t)
-		v2.AuxInt = 0
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v3.AuxInt = 64
-		v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueARM64_OpLsh32x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x64  x (MOVDconst [c]))
-	// cond: uint64(c) < 32
-	// result: (SLLconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 32) {
-			break
-		}
-		v.reset(OpARM64SLLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh32x64  _ (MOVDconst [c]))
-	// cond: uint64(c) >= 32
-	// result: (MOVDconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 32) {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Lsh32x64 <t> x y)
-	// cond:
-	// result: (CSELULT (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpConst64, t)
-		v1.AuxInt = 0
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v2.AuxInt = 64
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueARM64_OpLsh32x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x8  <t> x y)
-	// cond:
-	// result: (CSELULT (SLL <t> x (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpConst64, t)
-		v2.AuxInt = 0
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v3.AuxInt = 64
-		v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueARM64_OpLsh64x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh64x16 <t> x y)
-	// cond:
-	// result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpConst64, t)
-		v2.AuxInt = 0
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v3.AuxInt = 64
-		v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueARM64_OpLsh64x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh64x32 <t> x y)
-	// cond:
-	// result: (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpConst64, t)
-		v2.AuxInt = 0
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v3.AuxInt = 64
-		v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueARM64_OpLsh64x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh64x64  x (MOVDconst [c]))
-	// cond: uint64(c) < 64
-	// result: (SLLconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 64) {
-			break
-		}
-		v.reset(OpARM64SLLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh64x64  _ (MOVDconst [c]))
-	// cond: uint64(c) >= 64
-	// result: (MOVDconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 64) {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Lsh64x64 <t> x y)
-	// cond:
-	// result: (CSELULT (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpConst64, t)
-		v1.AuxInt = 0
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v2.AuxInt = 64
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueARM64_OpLsh64x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh64x8  <t> x y)
-	// cond:
-	// result: (CSELULT (SLL <t> x (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpConst64, t)
-		v2.AuxInt = 0
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v3.AuxInt = 64
-		v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueARM64_OpLsh8x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x16 <t> x y)
-	// cond:
-	// result: (CSELULT (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpConst64, t)
-		v2.AuxInt = 0
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v3.AuxInt = 64
-		v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueARM64_OpLsh8x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x32 <t> x y)
-	// cond:
-	// result: (CSELULT (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpConst64, t)
-		v2.AuxInt = 0
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v3.AuxInt = 64
-		v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueARM64_OpLsh8x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x64   x (MOVDconst [c]))
-	// cond: uint64(c) < 8
-	// result: (SLLconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 8) {
-			break
-		}
-		v.reset(OpARM64SLLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh8x64   _ (MOVDconst [c]))
-	// cond: uint64(c) >= 8
-	// result: (MOVDconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 8) {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Lsh8x64 <t> x y)
-	// cond:
-	// result: (CSELULT (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpConst64, t)
-		v1.AuxInt = 0
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v2.AuxInt = 64
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueARM64_OpLsh8x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x8  <t> x y)
-	// cond:
-	// result: (CSELULT (SLL <t> x (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SLL, t)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpConst64, t)
-		v2.AuxInt = 0
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v3.AuxInt = 64
-		v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueARM64_OpMod16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod16 x y)
-	// cond:
-	// result: (MODW (SignExt16to32 x) (SignExt16to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64MODW)
-		v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM64_OpMod16u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod16u x y)
-	// cond:
-	// result: (UMODW (ZeroExt16to32 x) (ZeroExt16to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64UMODW)
-		v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM64_OpMod32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod32 x y)
-	// cond:
-	// result: (MODW x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64MODW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpMod32u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod32u x y)
-	// cond:
-	// result: (UMODW x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64UMODW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpMod64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod64 x y)
-	// cond:
-	// result: (MOD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64MOD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpMod64u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod64u x y)
-	// cond:
-	// result: (UMOD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64UMOD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpMod8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod8 x y)
-	// cond:
-	// result: (MODW (SignExt8to32 x) (SignExt8to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64MODW)
-		v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM64_OpMod8u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod8u x y)
-	// cond:
-	// result: (UMODW (ZeroExt8to32 x) (ZeroExt8to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64UMODW)
-		v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM64_OpMove(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Move [s] _ _ mem)
-	// cond: SizeAndAlign(s).Size() == 0
-	// result: mem
-	for {
-		s := v.AuxInt
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 0) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = mem.Type
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 1
-	// result: (MOVBstore dst (MOVBUload src mem) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 1) {
-			break
-		}
-		v.reset(OpARM64MOVBstore)
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 2
-	// result: (MOVHstore dst (MOVHUload src mem) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 2) {
-			break
-		}
-		v.reset(OpARM64MOVHstore)
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 4
-	// result: (MOVWstore dst (MOVWUload src mem) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 4) {
-			break
-		}
-		v.reset(OpARM64MOVWstore)
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpARM64MOVWUload, config.fe.TypeUInt32())
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 8
-	// result: (MOVDstore dst (MOVDload src mem) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 8) {
-			break
-		}
-		v.reset(OpARM64MOVDstore)
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpARM64MOVDload, config.fe.TypeUInt64())
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 3
-	// result: (MOVBstore [2] dst (MOVBUload [2] src mem) 		(MOVHstore dst (MOVHUload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 3) {
-			break
-		}
-		v.reset(OpARM64MOVBstore)
-		v.AuxInt = 2
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
-		v0.AuxInt = 2
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 5
-	// result: (MOVBstore [4] dst (MOVBUload [4] src mem) 		(MOVWstore dst (MOVWUload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 5) {
-			break
-		}
-		v.reset(OpARM64MOVBstore)
-		v.AuxInt = 4
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
-		v0.AuxInt = 4
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64MOVWstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpARM64MOVWUload, config.fe.TypeUInt32())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 6
-	// result: (MOVHstore [4] dst (MOVHUload [4] src mem) 		(MOVWstore dst (MOVWUload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 6) {
-			break
-		}
-		v.reset(OpARM64MOVHstore)
-		v.AuxInt = 4
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
-		v0.AuxInt = 4
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64MOVWstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpARM64MOVWUload, config.fe.TypeUInt32())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 7
-	// result: (MOVBstore [6] dst (MOVBUload [6] src mem) 		(MOVHstore [4] dst (MOVHUload [4] src mem) 			(MOVWstore dst (MOVWUload src mem) mem)))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 7) {
-			break
-		}
-		v.reset(OpARM64MOVBstore)
-		v.AuxInt = 6
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpARM64MOVBUload, config.fe.TypeUInt8())
-		v0.AuxInt = 6
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
-		v1.AuxInt = 4
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpARM64MOVHUload, config.fe.TypeUInt16())
-		v2.AuxInt = 4
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARM64MOVWstore, TypeMem)
-		v3.AddArg(dst)
-		v4 := b.NewValue0(v.Line, OpARM64MOVWUload, config.fe.TypeUInt32())
-		v4.AddArg(src)
-		v4.AddArg(mem)
-		v3.AddArg(v4)
-		v3.AddArg(mem)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 12
-	// result: (MOVWstore [8] dst (MOVWUload [8] src mem) 		(MOVDstore dst (MOVDload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 12) {
-			break
-		}
-		v.reset(OpARM64MOVWstore)
-		v.AuxInt = 8
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpARM64MOVWUload, config.fe.TypeUInt32())
-		v0.AuxInt = 8
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64MOVDstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpARM64MOVDload, config.fe.TypeUInt64())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 16
-	// result: (MOVDstore [8] dst (MOVDload [8] src mem) 		(MOVDstore dst (MOVDload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 16) {
-			break
-		}
-		v.reset(OpARM64MOVDstore)
-		v.AuxInt = 8
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpARM64MOVDload, config.fe.TypeUInt64())
-		v0.AuxInt = 8
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64MOVDstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpARM64MOVDload, config.fe.TypeUInt64())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 24
-	// result: (MOVDstore [16] dst (MOVDload [16] src mem) 		(MOVDstore [8] dst (MOVDload [8] src mem) 			(MOVDstore dst (MOVDload src mem) mem)))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 24) {
-			break
-		}
-		v.reset(OpARM64MOVDstore)
-		v.AuxInt = 16
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpARM64MOVDload, config.fe.TypeUInt64())
-		v0.AuxInt = 16
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64MOVDstore, TypeMem)
-		v1.AuxInt = 8
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpARM64MOVDload, config.fe.TypeUInt64())
-		v2.AuxInt = 8
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARM64MOVDstore, TypeMem)
-		v3.AddArg(dst)
-		v4 := b.NewValue0(v.Line, OpARM64MOVDload, config.fe.TypeUInt64())
-		v4.AddArg(src)
-		v4.AddArg(mem)
-		v3.AddArg(v4)
-		v3.AddArg(mem)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size()%8 != 0 && SizeAndAlign(s).Size() > 8
-	// result: (Move [MakeSizeAndAlign(SizeAndAlign(s).Size()%8, 1).Int64()] 		(OffPtr <dst.Type> dst [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%8]) 		(OffPtr <src.Type> src [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%8]) 		(Move [MakeSizeAndAlign(SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%8, 1).Int64()] dst src mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size()%8 != 0 && SizeAndAlign(s).Size() > 8) {
-			break
-		}
-		v.reset(OpMove)
-		v.AuxInt = MakeSizeAndAlign(SizeAndAlign(s).Size()%8, 1).Int64()
-		v0 := b.NewValue0(v.Line, OpOffPtr, dst.Type)
-		v0.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%8
-		v0.AddArg(dst)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpOffPtr, src.Type)
-		v1.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%8
-		v1.AddArg(src)
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpMove, TypeMem)
-		v2.AuxInt = MakeSizeAndAlign(SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%8, 1).Int64()
-		v2.AddArg(dst)
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v.AddArg(v2)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size() > 24 && SizeAndAlign(s).Size() <= 8*128 	&& !config.noDuffDevice
-	// result: (DUFFCOPY [8 * (128 - int64(SizeAndAlign(s).Size()/8))] dst src mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size() > 24 && SizeAndAlign(s).Size() <= 8*128 && !config.noDuffDevice) {
-			break
-		}
-		v.reset(OpARM64DUFFCOPY)
-		v.AuxInt = 8 * (128 - int64(SizeAndAlign(s).Size()/8))
-		v.AddArg(dst)
-		v.AddArg(src)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() > 24 && SizeAndAlign(s).Size()%8 == 0
-	// result: (LoweredMove 		dst 		src 		(ADDconst <src.Type> src [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)]) 		mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() > 24 && SizeAndAlign(s).Size()%8 == 0) {
-			break
-		}
-		v.reset(OpARM64LoweredMove)
-		v.AddArg(dst)
-		v.AddArg(src)
-		v0 := b.NewValue0(v.Line, OpARM64ADDconst, src.Type)
-		v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
-		v0.AddArg(src)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpMul16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul16 x y)
-	// cond:
-	// result: (MULW x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64MULW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpMul32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul32 x y)
-	// cond:
-	// result: (MULW x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64MULW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpMul32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul32F x y)
-	// cond:
-	// result: (FMULS x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64FMULS)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpMul64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul64 x y)
-	// cond:
-	// result: (MUL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64MUL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpMul64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul64F x y)
-	// cond:
-	// result: (FMULD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64FMULD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpMul8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul8 x y)
-	// cond:
-	// result: (MULW x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64MULW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpNeg16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg16 x)
-	// cond:
-	// result: (NEG x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64NEG)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpNeg32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg32 x)
-	// cond:
-	// result: (NEG x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64NEG)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpNeg32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg32F x)
-	// cond:
-	// result: (FNEGS x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64FNEGS)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpNeg64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg64 x)
-	// cond:
-	// result: (NEG x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64NEG)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpNeg64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg64F x)
-	// cond:
-	// result: (FNEGD x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64FNEGD)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpNeg8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg8 x)
-	// cond:
-	// result: (NEG x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64NEG)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpNeq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq16 x y)
-	// cond:
-	// result: (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64NotEqual)
-		v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpNeq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq32 x y)
-	// cond:
-	// result: (NotEqual (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64NotEqual)
-		v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpNeq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq32F x y)
-	// cond:
-	// result: (NotEqual (FCMPS x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64NotEqual)
-		v0 := b.NewValue0(v.Line, OpARM64FCMPS, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpNeq64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq64 x y)
-	// cond:
-	// result: (NotEqual (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64NotEqual)
-		v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpNeq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq64F x y)
-	// cond:
-	// result: (NotEqual (FCMPD x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64NotEqual)
-		v0 := b.NewValue0(v.Line, OpARM64FCMPD, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpNeq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq8 x y)
-	// cond:
-	// result: (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64NotEqual)
-		v0 := b.NewValue0(v.Line, OpARM64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpNeqB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NeqB x y)
-	// cond:
-	// result: (XOR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64XOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpNeqPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NeqPtr x y)
-	// cond:
-	// result: (NotEqual (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64NotEqual)
-		v0 := b.NewValue0(v.Line, OpARM64CMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpNilCheck(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NilCheck ptr mem)
-	// cond:
-	// result: (LoweredNilCheck ptr mem)
-	for {
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		v.reset(OpARM64LoweredNilCheck)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueARM64_OpNot(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Not x)
-	// cond:
-	// result: (XOR (MOVDconst [1]) x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64XOR)
-		v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 1
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpOffPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (OffPtr [off] ptr:(SP))
-	// cond:
-	// result: (MOVDaddr [off] ptr)
-	for {
-		off := v.AuxInt
-		ptr := v.Args[0]
-		if ptr.Op != OpSP {
-			break
-		}
-		v.reset(OpARM64MOVDaddr)
-		v.AuxInt = off
-		v.AddArg(ptr)
-		return true
-	}
-	// match: (OffPtr [off] ptr)
-	// cond:
-	// result: (ADDconst [off] ptr)
-	for {
-		off := v.AuxInt
-		ptr := v.Args[0]
-		v.reset(OpARM64ADDconst)
-		v.AuxInt = off
-		v.AddArg(ptr)
-		return true
-	}
-}
-func rewriteValueARM64_OpOr16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or16 x y)
-	// cond:
-	// result: (OR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64OR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpOr32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or32 x y)
-	// cond:
-	// result: (OR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64OR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpOr64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or64 x y)
-	// cond:
-	// result: (OR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64OR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpOr8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or8 x y)
-	// cond:
-	// result: (OR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64OR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpOrB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (OrB x y)
-	// cond:
-	// result: (OR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64OR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh16Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux16 <t> x y)
-	// cond:
-	// result: (CSELULT (SRL <t> (ZeroExt16to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpConst64, t)
-		v3.AuxInt = 0
-		v.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v4.AuxInt = 64
-		v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh16Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux32 <t> x y)
-	// cond:
-	// result: (CSELULT (SRL <t> (ZeroExt16to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpConst64, t)
-		v3.AuxInt = 0
-		v.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v4.AuxInt = 64
-		v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh16Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux64 x (MOVDconst [c]))
-	// cond: uint64(c) < 16
-	// result: (SRLconst (ZeroExt16to64 x) [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 16) {
-			break
-		}
-		v.reset(OpARM64SRLconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh16Ux64 _ (MOVDconst [c]))
-	// cond: uint64(c) >= 16
-	// result: (MOVDconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 16) {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Rsh16Ux64 <t> x y)
-	// cond:
-	// result: (CSELULT (SRL <t> (ZeroExt16to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpConst64, t)
-		v2.AuxInt = 0
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v3.AuxInt = 64
-		v3.AddArg(y)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh16Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux8  <t> x y)
-	// cond:
-	// result: (CSELULT (SRL <t> (ZeroExt16to64 x) (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpConst64, t)
-		v3.AuxInt = 0
-		v.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v4.AuxInt = 64
-		v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh16x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x16 x y)
-	// cond:
-	// result: (SRA (SignExt16to64 x) (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64SRA)
-		v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpConst64, y.Type)
-		v3.AuxInt = 63
-		v1.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v4.AuxInt = 64
-		v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v1.AddArg(v4)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh16x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x32 x y)
-	// cond:
-	// result: (SRA (SignExt16to64 x) (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64SRA)
-		v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-		v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpConst64, y.Type)
-		v3.AuxInt = 63
-		v1.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v4.AuxInt = 64
-		v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v1.AddArg(v4)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh16x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x64  x (MOVDconst [c]))
-	// cond: uint64(c) < 16
-	// result: (SRAconst (SignExt16to64 x) [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 16) {
-			break
-		}
-		v.reset(OpARM64SRAconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh16x64 x (MOVDconst [c]))
-	// cond: uint64(c) >= 16
-	// result: (SRAconst (SignExt16to64 x) [63])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 16) {
-			break
-		}
-		v.reset(OpARM64SRAconst)
-		v.AuxInt = 63
-		v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh16x64 x y)
-	// cond:
-	// result: (SRA (SignExt16to64 x) (CSELULT <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64SRA)
-		v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-		v1.AddArg(y)
-		v2 := b.NewValue0(v.Line, OpConst64, y.Type)
-		v2.AuxInt = 63
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v3.AuxInt = 64
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh16x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x8  x y)
-	// cond:
-	// result: (SRA (SignExt16to64 x) (CSELULT <y.Type> (ZeroExt8to64  y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64  y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64SRA)
-		v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpConst64, y.Type)
-		v3.AuxInt = 63
-		v1.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v4.AuxInt = 64
-		v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v1.AddArg(v4)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh32Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux16 <t> x y)
-	// cond:
-	// result: (CSELULT (SRL <t> (ZeroExt32to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-		v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpConst64, t)
-		v3.AuxInt = 0
-		v.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v4.AuxInt = 64
-		v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh32Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux32 <t> x y)
-	// cond:
-	// result: (CSELULT (SRL <t> (ZeroExt32to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-		v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpConst64, t)
-		v3.AuxInt = 0
-		v.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v4.AuxInt = 64
-		v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh32Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux64 x (MOVDconst [c]))
-	// cond: uint64(c) < 32
-	// result: (SRLconst (ZeroExt32to64 x) [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 32) {
-			break
-		}
-		v.reset(OpARM64SRLconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh32Ux64 _ (MOVDconst [c]))
-	// cond: uint64(c) >= 32
-	// result: (MOVDconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 32) {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Rsh32Ux64 <t> x y)
-	// cond:
-	// result: (CSELULT (SRL <t> (ZeroExt32to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-		v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpConst64, t)
-		v2.AuxInt = 0
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v3.AuxInt = 64
-		v3.AddArg(y)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh32Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux8  <t> x y)
-	// cond:
-	// result: (CSELULT (SRL <t> (ZeroExt32to64 x) (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-		v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpConst64, t)
-		v3.AuxInt = 0
-		v.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v4.AuxInt = 64
-		v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh32x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x16 x y)
-	// cond:
-	// result: (SRA (SignExt32to64 x) (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64SRA)
-		v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpConst64, y.Type)
-		v3.AuxInt = 63
-		v1.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v4.AuxInt = 64
-		v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v1.AddArg(v4)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh32x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x32 x y)
-	// cond:
-	// result: (SRA (SignExt32to64 x) (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64SRA)
-		v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-		v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpConst64, y.Type)
-		v3.AuxInt = 63
-		v1.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v4.AuxInt = 64
-		v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v1.AddArg(v4)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh32x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x64  x (MOVDconst [c]))
-	// cond: uint64(c) < 32
-	// result: (SRAconst (SignExt32to64 x) [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 32) {
-			break
-		}
-		v.reset(OpARM64SRAconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh32x64 x (MOVDconst [c]))
-	// cond: uint64(c) >= 32
-	// result: (SRAconst (SignExt32to64 x) [63])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 32) {
-			break
-		}
-		v.reset(OpARM64SRAconst)
-		v.AuxInt = 63
-		v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh32x64 x y)
-	// cond:
-	// result: (SRA (SignExt32to64 x) (CSELULT <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64SRA)
-		v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-		v1.AddArg(y)
-		v2 := b.NewValue0(v.Line, OpConst64, y.Type)
-		v2.AuxInt = 63
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v3.AuxInt = 64
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh32x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x8  x y)
-	// cond:
-	// result: (SRA (SignExt32to64 x) (CSELULT <y.Type> (ZeroExt8to64  y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64  y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64SRA)
-		v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpConst64, y.Type)
-		v3.AuxInt = 63
-		v1.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v4.AuxInt = 64
-		v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v1.AddArg(v4)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh64Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64Ux16 <t> x y)
-	// cond:
-	// result: (CSELULT (SRL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpConst64, t)
-		v2.AuxInt = 0
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v3.AuxInt = 64
-		v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh64Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64Ux32 <t> x y)
-	// cond:
-	// result: (CSELULT (SRL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpConst64, t)
-		v2.AuxInt = 0
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v3.AuxInt = 64
-		v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh64Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64Ux64 x (MOVDconst [c]))
-	// cond: uint64(c) < 64
-	// result: (SRLconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 64) {
-			break
-		}
-		v.reset(OpARM64SRLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh64Ux64 _ (MOVDconst [c]))
-	// cond: uint64(c) >= 64
-	// result: (MOVDconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 64) {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Rsh64Ux64 <t> x y)
-	// cond:
-	// result: (CSELULT (SRL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpConst64, t)
-		v1.AuxInt = 0
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v2.AuxInt = 64
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh64Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64Ux8  <t> x y)
-	// cond:
-	// result: (CSELULT (SRL <t> x (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpConst64, t)
-		v2.AuxInt = 0
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v3.AuxInt = 64
-		v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh64x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64x16 x y)
-	// cond:
-	// result: (SRA x (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64SRA)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpConst64, y.Type)
-		v2.AuxInt = 63
-		v0.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v3.AuxInt = 64
-		v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v0.AddArg(v3)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh64x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64x32 x y)
-	// cond:
-	// result: (SRA x (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64SRA)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-		v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpConst64, y.Type)
-		v2.AuxInt = 63
-		v0.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v3.AuxInt = 64
-		v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v0.AddArg(v3)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh64x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64x64  x (MOVDconst [c]))
-	// cond: uint64(c) < 64
-	// result: (SRAconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 64) {
-			break
-		}
-		v.reset(OpARM64SRAconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh64x64 x (MOVDconst [c]))
-	// cond: uint64(c) >= 64
-	// result: (SRAconst x [63])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 64) {
-			break
-		}
-		v.reset(OpARM64SRAconst)
-		v.AuxInt = 63
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh64x64 x y)
-	// cond:
-	// result: (SRA x (CSELULT <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64SRA)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpConst64, y.Type)
-		v1.AuxInt = 63
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v2.AuxInt = 64
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh64x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64x8  x y)
-	// cond:
-	// result: (SRA x (CSELULT <y.Type> (ZeroExt8to64  y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64  y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64SRA)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpConst64, y.Type)
-		v2.AuxInt = 63
-		v0.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v3.AuxInt = 64
-		v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v0.AddArg(v3)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh8Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux16 <t> x y)
-	// cond:
-	// result: (CSELULT (SRL <t> (ZeroExt8to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpConst64, t)
-		v3.AuxInt = 0
-		v.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v4.AuxInt = 64
-		v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh8Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux32 <t> x y)
-	// cond:
-	// result: (CSELULT (SRL <t> (ZeroExt8to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpConst64, t)
-		v3.AuxInt = 0
-		v.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v4.AuxInt = 64
-		v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh8Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux64  x (MOVDconst [c]))
-	// cond: uint64(c) < 8
-	// result: (SRLconst (ZeroExt8to64  x) [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 8) {
-			break
-		}
-		v.reset(OpARM64SRLconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh8Ux64  _ (MOVDconst [c]))
-	// cond: uint64(c) >= 8
-	// result: (MOVDconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 8) {
-			break
-		}
-		v.reset(OpARM64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Rsh8Ux64 <t> x y)
-	// cond:
-	// result: (CSELULT (SRL <t> (ZeroExt8to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpConst64, t)
-		v2.AuxInt = 0
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v3.AuxInt = 64
-		v3.AddArg(y)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh8Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux8  <t> x y)
-	// cond:
-	// result: (CSELULT (SRL <t> (ZeroExt8to64 x) (ZeroExt8to64  y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64  y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64CSELULT)
-		v0 := b.NewValue0(v.Line, OpARM64SRL, t)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpConst64, t)
-		v3.AuxInt = 0
-		v.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v4.AuxInt = 64
-		v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh8x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x16 x y)
-	// cond:
-	// result: (SRA (SignExt8to64 x) (CSELULT <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64SRA)
-		v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpConst64, y.Type)
-		v3.AuxInt = 63
-		v1.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v4.AuxInt = 64
-		v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v1.AddArg(v4)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh8x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x32 x y)
-	// cond:
-	// result: (SRA (SignExt8to64 x) (CSELULT <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64SRA)
-		v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-		v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpConst64, y.Type)
-		v3.AuxInt = 63
-		v1.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v4.AuxInt = 64
-		v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v1.AddArg(v4)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh8x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x64   x (MOVDconst [c]))
-	// cond: uint64(c) < 8
-	// result: (SRAconst (SignExt8to64  x) [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 8) {
-			break
-		}
-		v.reset(OpARM64SRAconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh8x64  x (MOVDconst [c]))
-	// cond: uint64(c) >= 8
-	// result: (SRAconst (SignExt8to64  x) [63])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpARM64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 8) {
-			break
-		}
-		v.reset(OpARM64SRAconst)
-		v.AuxInt = 63
-		v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh8x64 x y)
-	// cond:
-	// result: (SRA (SignExt8to64 x) (CSELULT <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64SRA)
-		v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-		v1.AddArg(y)
-		v2 := b.NewValue0(v.Line, OpConst64, y.Type)
-		v2.AuxInt = 63
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v3.AuxInt = 64
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM64_OpRsh8x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x8  x y)
-	// cond:
-	// result: (SRA (SignExt8to64 x) (CSELULT <y.Type> (ZeroExt8to64  y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64  y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64SRA)
-		v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64CSELULT, y.Type)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpConst64, y.Type)
-		v3.AuxInt = 63
-		v1.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpARM64CMPconst, TypeFlags)
-		v4.AuxInt = 64
-		v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v1.AddArg(v4)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueARM64_OpSignExt16to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt16to32 x)
-	// cond:
-	// result: (MOVHreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64MOVHreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpSignExt16to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt16to64 x)
-	// cond:
-	// result: (MOVHreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64MOVHreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpSignExt32to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt32to64 x)
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64MOVWreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpSignExt8to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt8to16 x)
-	// cond:
-	// result: (MOVBreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64MOVBreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpSignExt8to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt8to32 x)
-	// cond:
-	// result: (MOVBreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64MOVBreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpSignExt8to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt8to64 x)
-	// cond:
-	// result: (MOVBreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64MOVBreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpSlicemask(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Slicemask <t> x)
-	// cond:
-	// result: (MVN (SRAconst <t> (SUBconst <t> x [1]) [63]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v.reset(OpARM64MVN)
-		v0 := b.NewValue0(v.Line, OpARM64SRAconst, t)
-		v0.AuxInt = 63
-		v1 := b.NewValue0(v.Line, OpARM64SUBconst, t)
-		v1.AuxInt = 1
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueARM64_OpSqrt(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sqrt x)
-	// cond:
-	// result: (FSQRTD x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64FSQRTD)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpStaticCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (StaticCall [argwid] {target} mem)
-	// cond:
-	// result: (CALLstatic [argwid] {target} mem)
-	for {
-		argwid := v.AuxInt
-		target := v.Aux
-		mem := v.Args[0]
-		v.reset(OpARM64CALLstatic)
-		v.AuxInt = argwid
-		v.Aux = target
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueARM64_OpStore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Store [1] ptr val mem)
-	// cond:
-	// result: (MOVBstore ptr val mem)
-	for {
-		if v.AuxInt != 1 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpARM64MOVBstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [2] ptr val mem)
-	// cond:
-	// result: (MOVHstore ptr val mem)
-	for {
-		if v.AuxInt != 2 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpARM64MOVHstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [4] ptr val mem)
-	// cond: !is32BitFloat(val.Type)
-	// result: (MOVWstore ptr val mem)
-	for {
-		if v.AuxInt != 4 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(!is32BitFloat(val.Type)) {
-			break
-		}
-		v.reset(OpARM64MOVWstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [8] ptr val mem)
-	// cond: !is64BitFloat(val.Type)
-	// result: (MOVDstore ptr val mem)
-	for {
-		if v.AuxInt != 8 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(!is64BitFloat(val.Type)) {
-			break
-		}
-		v.reset(OpARM64MOVDstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [4] ptr val mem)
-	// cond: is32BitFloat(val.Type)
-	// result: (FMOVSstore ptr val mem)
-	for {
-		if v.AuxInt != 4 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32BitFloat(val.Type)) {
-			break
-		}
-		v.reset(OpARM64FMOVSstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [8] ptr val mem)
-	// cond: is64BitFloat(val.Type)
-	// result: (FMOVDstore ptr val mem)
-	for {
-		if v.AuxInt != 8 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is64BitFloat(val.Type)) {
-			break
-		}
-		v.reset(OpARM64FMOVDstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpSub16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub16 x y)
-	// cond:
-	// result: (SUB x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64SUB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpSub32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub32 x y)
-	// cond:
-	// result: (SUB x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64SUB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpSub32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub32F x y)
-	// cond:
-	// result: (FSUBS x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64FSUBS)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpSub64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub64 x y)
-	// cond:
-	// result: (SUB x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64SUB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpSub64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub64F x y)
-	// cond:
-	// result: (FSUBD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64FSUBD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpSub8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub8 x y)
-	// cond:
-	// result: (SUB x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64SUB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpSubPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SubPtr x y)
-	// cond:
-	// result: (SUB x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64SUB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpTrunc16to8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc16to8 x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpTrunc32to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc32to16 x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpTrunc32to8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc32to8 x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpTrunc64to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc64to16 x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpTrunc64to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc64to32 x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpTrunc64to8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc64to8 x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpXor16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor16 x y)
-	// cond:
-	// result: (XOR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64XOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpXor32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor32 x y)
-	// cond:
-	// result: (XOR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64XOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpXor64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor64 x y)
-	// cond:
-	// result: (XOR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64XOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpXor8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor8 x y)
-	// cond:
-	// result: (XOR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpARM64XOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueARM64_OpZero(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Zero [s] _ mem)
-	// cond: SizeAndAlign(s).Size() == 0
-	// result: mem
-	for {
-		s := v.AuxInt
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 0) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = mem.Type
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 1
-	// result: (MOVBstore ptr (MOVDconst [0]) mem)
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 1) {
-			break
-		}
-		v.reset(OpARM64MOVBstore)
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 2
-	// result: (MOVHstore ptr (MOVDconst [0]) mem)
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 2) {
-			break
-		}
-		v.reset(OpARM64MOVHstore)
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 4
-	// result: (MOVWstore ptr (MOVDconst [0]) mem)
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 4) {
-			break
-		}
-		v.reset(OpARM64MOVWstore)
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 8
-	// result: (MOVDstore ptr (MOVDconst [0]) mem)
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 8) {
-			break
-		}
-		v.reset(OpARM64MOVDstore)
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 3
-	// result: (MOVBstore [2] ptr (MOVDconst [0]) 		(MOVHstore ptr (MOVDconst [0]) mem))
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 3) {
-			break
-		}
-		v.reset(OpARM64MOVBstore)
-		v.AuxInt = 2
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
-		v1.AddArg(ptr)
-		v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-		v2.AuxInt = 0
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 5
-	// result: (MOVBstore [4] ptr (MOVDconst [0]) 		(MOVWstore ptr (MOVDconst [0]) mem))
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 5) {
-			break
-		}
-		v.reset(OpARM64MOVBstore)
-		v.AuxInt = 4
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64MOVWstore, TypeMem)
-		v1.AddArg(ptr)
-		v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-		v2.AuxInt = 0
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 6
-	// result: (MOVHstore [4] ptr (MOVDconst [0]) 		(MOVWstore ptr (MOVDconst [0]) mem))
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 6) {
-			break
-		}
-		v.reset(OpARM64MOVHstore)
-		v.AuxInt = 4
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64MOVWstore, TypeMem)
-		v1.AddArg(ptr)
-		v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-		v2.AuxInt = 0
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 7
-	// result: (MOVBstore [6] ptr (MOVDconst [0]) 		(MOVHstore [4] ptr (MOVDconst [0]) 			(MOVWstore ptr (MOVDconst [0]) mem)))
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 7) {
-			break
-		}
-		v.reset(OpARM64MOVBstore)
-		v.AuxInt = 6
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64MOVHstore, TypeMem)
-		v1.AuxInt = 4
-		v1.AddArg(ptr)
-		v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-		v2.AuxInt = 0
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARM64MOVWstore, TypeMem)
-		v3.AddArg(ptr)
-		v4 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-		v4.AuxInt = 0
-		v3.AddArg(v4)
-		v3.AddArg(mem)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 12
-	// result: (MOVWstore [8] ptr (MOVDconst [0]) 		(MOVDstore ptr (MOVDconst [0]) mem))
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 12) {
-			break
-		}
-		v.reset(OpARM64MOVWstore)
-		v.AuxInt = 8
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64MOVDstore, TypeMem)
-		v1.AddArg(ptr)
-		v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-		v2.AuxInt = 0
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 16
-	// result: (MOVDstore [8] ptr (MOVDconst [0]) 		(MOVDstore ptr (MOVDconst [0]) mem))
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 16) {
-			break
-		}
-		v.reset(OpARM64MOVDstore)
-		v.AuxInt = 8
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64MOVDstore, TypeMem)
-		v1.AddArg(ptr)
-		v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-		v2.AuxInt = 0
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 24
-	// result: (MOVDstore [16] ptr (MOVDconst [0]) 		(MOVDstore [8] ptr (MOVDconst [0]) 			(MOVDstore ptr (MOVDconst [0]) mem)))
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 24) {
-			break
-		}
-		v.reset(OpARM64MOVDstore)
-		v.AuxInt = 16
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpARM64MOVDstore, TypeMem)
-		v1.AuxInt = 8
-		v1.AddArg(ptr)
-		v2 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-		v2.AuxInt = 0
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpARM64MOVDstore, TypeMem)
-		v3.AddArg(ptr)
-		v4 := b.NewValue0(v.Line, OpARM64MOVDconst, config.fe.TypeUInt64())
-		v4.AuxInt = 0
-		v3.AddArg(v4)
-		v3.AddArg(mem)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size()%8 != 0 && SizeAndAlign(s).Size() > 8
-	// result: (Zero [MakeSizeAndAlign(SizeAndAlign(s).Size()%8, 1).Int64()] 		(OffPtr <ptr.Type> ptr [SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%8]) 		(Zero [MakeSizeAndAlign(SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%8, 1).Int64()] ptr mem))
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size()%8 != 0 && SizeAndAlign(s).Size() > 8) {
-			break
-		}
-		v.reset(OpZero)
-		v.AuxInt = MakeSizeAndAlign(SizeAndAlign(s).Size()%8, 1).Int64()
-		v0 := b.NewValue0(v.Line, OpOffPtr, ptr.Type)
-		v0.AuxInt = SizeAndAlign(s).Size() - SizeAndAlign(s).Size()%8
-		v0.AddArg(ptr)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZero, TypeMem)
-		v1.AuxInt = MakeSizeAndAlign(SizeAndAlign(s).Size()-SizeAndAlign(s).Size()%8, 1).Int64()
-		v1.AddArg(ptr)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size() > 24 && SizeAndAlign(s).Size() <= 8*128 	&& !config.noDuffDevice
-	// result: (DUFFZERO [4 * (128 - int64(SizeAndAlign(s).Size()/8))] ptr mem)
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size() > 24 && SizeAndAlign(s).Size() <= 8*128 && !config.noDuffDevice) {
-			break
-		}
-		v.reset(OpARM64DUFFZERO)
-		v.AuxInt = 4 * (128 - int64(SizeAndAlign(s).Size()/8))
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size()%8 == 0 && (SizeAndAlign(s).Size() > 8*128 || config.noDuffDevice)
-	// result: (LoweredZero 		ptr 		(ADDconst <ptr.Type> [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)] ptr) 		mem)
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size()%8 == 0 && (SizeAndAlign(s).Size() > 8*128 || config.noDuffDevice)) {
-			break
-		}
-		v.reset(OpARM64LoweredZero)
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpARM64ADDconst, ptr.Type)
-		v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
-		v0.AddArg(ptr)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueARM64_OpZeroExt16to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt16to32 x)
-	// cond:
-	// result: (MOVHUreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64MOVHUreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpZeroExt16to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt16to64 x)
-	// cond:
-	// result: (MOVHUreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64MOVHUreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpZeroExt32to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt32to64 x)
-	// cond:
-	// result: (MOVWUreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64MOVWUreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpZeroExt8to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt8to16 x)
-	// cond:
-	// result: (MOVBUreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64MOVBUreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpZeroExt8to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt8to32 x)
-	// cond:
-	// result: (MOVBUreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64MOVBUreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueARM64_OpZeroExt8to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt8to64 x)
-	// cond:
-	// result: (MOVBUreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpARM64MOVBUreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteBlockARM64(b *Block, config *Config) bool {
-	switch b.Kind {
-	case BlockARM64EQ:
-		// match: (EQ (CMPconst [0] x) yes no)
-		// cond:
-		// result: (Z x yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64CMPconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			x := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64Z
-			b.SetControl(x)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (CMPWconst [0] x) yes no)
-		// cond:
-		// result: (ZW x yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64CMPWconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			x := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64ZW
-			b.SetControl(x)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (FlagEQ) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (EQ (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (EQ (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (EQ (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (EQ (InvertFlags cmp) yes no)
-		// cond:
-		// result: (EQ cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64EQ
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockARM64GE:
-		// match: (GE (FlagEQ) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (GE (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (GE (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (GE (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (GE (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (GE (InvertFlags cmp) yes no)
-		// cond:
-		// result: (LE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64LE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockARM64GT:
-		// match: (GT (FlagEQ) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (GT (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (GT (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (GT (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (GT (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (GT (InvertFlags cmp) yes no)
-		// cond:
-		// result: (LT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64LT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockIf:
-		// match: (If (Equal cc) yes no)
-		// cond:
-		// result: (EQ cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64Equal {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64EQ
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (NotEqual cc) yes no)
-		// cond:
-		// result: (NE cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64NotEqual {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64NE
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (LessThan cc) yes no)
-		// cond:
-		// result: (LT cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64LessThan {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64LT
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (LessThanU cc) yes no)
-		// cond:
-		// result: (ULT cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64LessThanU {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64ULT
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (LessEqual cc) yes no)
-		// cond:
-		// result: (LE cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64LessEqual {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64LE
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (LessEqualU cc) yes no)
-		// cond:
-		// result: (ULE cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64LessEqualU {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64ULE
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (GreaterThan cc) yes no)
-		// cond:
-		// result: (GT cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64GreaterThan {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64GT
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (GreaterThanU cc) yes no)
-		// cond:
-		// result: (UGT cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64GreaterThanU {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64UGT
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (GreaterEqual cc) yes no)
-		// cond:
-		// result: (GE cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64GreaterEqual {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64GE
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (GreaterEqualU cc) yes no)
-		// cond:
-		// result: (UGE cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64GreaterEqualU {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64UGE
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If cond yes no)
-		// cond:
-		// result: (NZ cond yes no)
-		for {
-			v := b.Control
-			_ = v
-			cond := b.Control
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64NZ
-			b.SetControl(cond)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockARM64LE:
-		// match: (LE (FlagEQ) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LE (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LE (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LE (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (LE (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (LE (InvertFlags cmp) yes no)
-		// cond:
-		// result: (GE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64GE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockARM64LT:
-		// match: (LT (FlagEQ) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (LT (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LT (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LT (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (LT (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (LT (InvertFlags cmp) yes no)
-		// cond:
-		// result: (GT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64GT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockARM64NE:
-		// match: (NE (CMPconst [0] x) yes no)
-		// cond:
-		// result: (NZ x yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64CMPconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			x := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64NZ
-			b.SetControl(x)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (CMPWconst [0] x) yes no)
-		// cond:
-		// result: (NZW x yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64CMPWconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			x := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64NZW
-			b.SetControl(x)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (FlagEQ) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (NE (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (InvertFlags cmp) yes no)
-		// cond:
-		// result: (NE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64NE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockARM64NZ:
-		// match: (NZ (Equal cc) yes no)
-		// cond:
-		// result: (EQ cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64Equal {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64EQ
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NZ (NotEqual cc) yes no)
-		// cond:
-		// result: (NE cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64NotEqual {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64NE
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NZ (LessThan cc) yes no)
-		// cond:
-		// result: (LT cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64LessThan {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64LT
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NZ (LessThanU cc) yes no)
-		// cond:
-		// result: (ULT cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64LessThanU {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64ULT
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NZ (LessEqual cc) yes no)
-		// cond:
-		// result: (LE cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64LessEqual {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64LE
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NZ (LessEqualU cc) yes no)
-		// cond:
-		// result: (ULE cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64LessEqualU {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64ULE
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NZ (GreaterThan cc) yes no)
-		// cond:
-		// result: (GT cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64GreaterThan {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64GT
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NZ (GreaterThanU cc) yes no)
-		// cond:
-		// result: (UGT cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64GreaterThanU {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64UGT
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NZ (GreaterEqual cc) yes no)
-		// cond:
-		// result: (GE cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64GreaterEqual {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64GE
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NZ (GreaterEqualU cc) yes no)
-		// cond:
-		// result: (UGE cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64GreaterEqualU {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64UGE
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NZ (MOVDconst [0]) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARM64MOVDconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (NZ (MOVDconst [c]) yes no)
-		// cond: c != 0
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64MOVDconst {
-				break
-			}
-			c := v.AuxInt
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			if !(c != 0) {
-				break
-			}
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockARM64NZW:
-		// match: (NZW (MOVDconst [c]) yes no)
-		// cond: int32(c) == 0
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARM64MOVDconst {
-				break
-			}
-			c := v.AuxInt
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			if !(int32(c) == 0) {
-				break
-			}
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (NZW (MOVDconst [c]) yes no)
-		// cond: int32(c) != 0
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64MOVDconst {
-				break
-			}
-			c := v.AuxInt
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			if !(int32(c) != 0) {
-				break
-			}
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockARM64UGE:
-		// match: (UGE (FlagEQ) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (UGE (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (UGE (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (UGE (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (UGE (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (UGE (InvertFlags cmp) yes no)
-		// cond:
-		// result: (ULE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64ULE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockARM64UGT:
-		// match: (UGT (FlagEQ) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (UGT (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (UGT (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (UGT (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (UGT (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (UGT (InvertFlags cmp) yes no)
-		// cond:
-		// result: (ULT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64ULT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockARM64ULE:
-		// match: (ULE (FlagEQ) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (ULE (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (ULE (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (ULE (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (ULE (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (ULE (InvertFlags cmp) yes no)
-		// cond:
-		// result: (UGE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64UGE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockARM64ULT:
-		// match: (ULT (FlagEQ) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (ULT (FlagLT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagLT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (ULT (FlagLT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagLT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (ULT (FlagGT_ULT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagGT_ULT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (ULT (FlagGT_UGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARM64FlagGT_UGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (ULT (InvertFlags cmp) yes no)
-		// cond:
-		// result: (UGT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockARM64UGT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockARM64Z:
-		// match: (Z (MOVDconst [0]) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64MOVDconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (Z (MOVDconst [c]) yes no)
-		// cond: c != 0
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARM64MOVDconst {
-				break
-			}
-			c := v.AuxInt
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			if !(c != 0) {
-				break
-			}
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-	case BlockARM64ZW:
-		// match: (ZW (MOVDconst [c]) yes no)
-		// cond: int32(c) == 0
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpARM64MOVDconst {
-				break
-			}
-			c := v.AuxInt
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			if !(int32(c) == 0) {
-				break
-			}
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (ZW (MOVDconst [c]) yes no)
-		// cond: int32(c) != 0
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpARM64MOVDconst {
-				break
-			}
-			c := v.AuxInt
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			if !(int32(c) != 0) {
-				break
-			}
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-	}
-	return false
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewriteMIPS.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewriteMIPS.go
deleted file mode 100644
index fe2bb6e..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewriteMIPS.go
+++ /dev/null
@@ -1,9834 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/rewriteMIPS.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/rewriteMIPS.go:1
-// autogenerated from gen/MIPS.rules: do not edit!
-// generated with: cd gen; go run *.go
-
-package ssa
-
-import "math"
-
-var _ = math.MinInt8 // in case not otherwise used
-func rewriteValueMIPS(v *Value, config *Config) bool {
-	switch v.Op {
-	case OpAdd16:
-		return rewriteValueMIPS_OpAdd16(v, config)
-	case OpAdd32:
-		return rewriteValueMIPS_OpAdd32(v, config)
-	case OpAdd32F:
-		return rewriteValueMIPS_OpAdd32F(v, config)
-	case OpAdd32withcarry:
-		return rewriteValueMIPS_OpAdd32withcarry(v, config)
-	case OpAdd64F:
-		return rewriteValueMIPS_OpAdd64F(v, config)
-	case OpAdd8:
-		return rewriteValueMIPS_OpAdd8(v, config)
-	case OpAddPtr:
-		return rewriteValueMIPS_OpAddPtr(v, config)
-	case OpAddr:
-		return rewriteValueMIPS_OpAddr(v, config)
-	case OpAnd16:
-		return rewriteValueMIPS_OpAnd16(v, config)
-	case OpAnd32:
-		return rewriteValueMIPS_OpAnd32(v, config)
-	case OpAnd8:
-		return rewriteValueMIPS_OpAnd8(v, config)
-	case OpAndB:
-		return rewriteValueMIPS_OpAndB(v, config)
-	case OpAtomicAdd32:
-		return rewriteValueMIPS_OpAtomicAdd32(v, config)
-	case OpAtomicAnd8:
-		return rewriteValueMIPS_OpAtomicAnd8(v, config)
-	case OpAtomicCompareAndSwap32:
-		return rewriteValueMIPS_OpAtomicCompareAndSwap32(v, config)
-	case OpAtomicExchange32:
-		return rewriteValueMIPS_OpAtomicExchange32(v, config)
-	case OpAtomicLoad32:
-		return rewriteValueMIPS_OpAtomicLoad32(v, config)
-	case OpAtomicLoadPtr:
-		return rewriteValueMIPS_OpAtomicLoadPtr(v, config)
-	case OpAtomicOr8:
-		return rewriteValueMIPS_OpAtomicOr8(v, config)
-	case OpAtomicStore32:
-		return rewriteValueMIPS_OpAtomicStore32(v, config)
-	case OpAtomicStorePtrNoWB:
-		return rewriteValueMIPS_OpAtomicStorePtrNoWB(v, config)
-	case OpClosureCall:
-		return rewriteValueMIPS_OpClosureCall(v, config)
-	case OpCom16:
-		return rewriteValueMIPS_OpCom16(v, config)
-	case OpCom32:
-		return rewriteValueMIPS_OpCom32(v, config)
-	case OpCom8:
-		return rewriteValueMIPS_OpCom8(v, config)
-	case OpConst16:
-		return rewriteValueMIPS_OpConst16(v, config)
-	case OpConst32:
-		return rewriteValueMIPS_OpConst32(v, config)
-	case OpConst32F:
-		return rewriteValueMIPS_OpConst32F(v, config)
-	case OpConst64F:
-		return rewriteValueMIPS_OpConst64F(v, config)
-	case OpConst8:
-		return rewriteValueMIPS_OpConst8(v, config)
-	case OpConstBool:
-		return rewriteValueMIPS_OpConstBool(v, config)
-	case OpConstNil:
-		return rewriteValueMIPS_OpConstNil(v, config)
-	case OpConvert:
-		return rewriteValueMIPS_OpConvert(v, config)
-	case OpCtz32:
-		return rewriteValueMIPS_OpCtz32(v, config)
-	case OpCvt32Fto32:
-		return rewriteValueMIPS_OpCvt32Fto32(v, config)
-	case OpCvt32Fto64F:
-		return rewriteValueMIPS_OpCvt32Fto64F(v, config)
-	case OpCvt32to32F:
-		return rewriteValueMIPS_OpCvt32to32F(v, config)
-	case OpCvt32to64F:
-		return rewriteValueMIPS_OpCvt32to64F(v, config)
-	case OpCvt64Fto32:
-		return rewriteValueMIPS_OpCvt64Fto32(v, config)
-	case OpCvt64Fto32F:
-		return rewriteValueMIPS_OpCvt64Fto32F(v, config)
-	case OpDeferCall:
-		return rewriteValueMIPS_OpDeferCall(v, config)
-	case OpDiv16:
-		return rewriteValueMIPS_OpDiv16(v, config)
-	case OpDiv16u:
-		return rewriteValueMIPS_OpDiv16u(v, config)
-	case OpDiv32:
-		return rewriteValueMIPS_OpDiv32(v, config)
-	case OpDiv32F:
-		return rewriteValueMIPS_OpDiv32F(v, config)
-	case OpDiv32u:
-		return rewriteValueMIPS_OpDiv32u(v, config)
-	case OpDiv64F:
-		return rewriteValueMIPS_OpDiv64F(v, config)
-	case OpDiv8:
-		return rewriteValueMIPS_OpDiv8(v, config)
-	case OpDiv8u:
-		return rewriteValueMIPS_OpDiv8u(v, config)
-	case OpEq16:
-		return rewriteValueMIPS_OpEq16(v, config)
-	case OpEq32:
-		return rewriteValueMIPS_OpEq32(v, config)
-	case OpEq32F:
-		return rewriteValueMIPS_OpEq32F(v, config)
-	case OpEq64F:
-		return rewriteValueMIPS_OpEq64F(v, config)
-	case OpEq8:
-		return rewriteValueMIPS_OpEq8(v, config)
-	case OpEqB:
-		return rewriteValueMIPS_OpEqB(v, config)
-	case OpEqPtr:
-		return rewriteValueMIPS_OpEqPtr(v, config)
-	case OpGeq16:
-		return rewriteValueMIPS_OpGeq16(v, config)
-	case OpGeq16U:
-		return rewriteValueMIPS_OpGeq16U(v, config)
-	case OpGeq32:
-		return rewriteValueMIPS_OpGeq32(v, config)
-	case OpGeq32F:
-		return rewriteValueMIPS_OpGeq32F(v, config)
-	case OpGeq32U:
-		return rewriteValueMIPS_OpGeq32U(v, config)
-	case OpGeq64F:
-		return rewriteValueMIPS_OpGeq64F(v, config)
-	case OpGeq8:
-		return rewriteValueMIPS_OpGeq8(v, config)
-	case OpGeq8U:
-		return rewriteValueMIPS_OpGeq8U(v, config)
-	case OpGetClosurePtr:
-		return rewriteValueMIPS_OpGetClosurePtr(v, config)
-	case OpGoCall:
-		return rewriteValueMIPS_OpGoCall(v, config)
-	case OpGreater16:
-		return rewriteValueMIPS_OpGreater16(v, config)
-	case OpGreater16U:
-		return rewriteValueMIPS_OpGreater16U(v, config)
-	case OpGreater32:
-		return rewriteValueMIPS_OpGreater32(v, config)
-	case OpGreater32F:
-		return rewriteValueMIPS_OpGreater32F(v, config)
-	case OpGreater32U:
-		return rewriteValueMIPS_OpGreater32U(v, config)
-	case OpGreater64F:
-		return rewriteValueMIPS_OpGreater64F(v, config)
-	case OpGreater8:
-		return rewriteValueMIPS_OpGreater8(v, config)
-	case OpGreater8U:
-		return rewriteValueMIPS_OpGreater8U(v, config)
-	case OpHmul16:
-		return rewriteValueMIPS_OpHmul16(v, config)
-	case OpHmul16u:
-		return rewriteValueMIPS_OpHmul16u(v, config)
-	case OpHmul32:
-		return rewriteValueMIPS_OpHmul32(v, config)
-	case OpHmul32u:
-		return rewriteValueMIPS_OpHmul32u(v, config)
-	case OpHmul8:
-		return rewriteValueMIPS_OpHmul8(v, config)
-	case OpHmul8u:
-		return rewriteValueMIPS_OpHmul8u(v, config)
-	case OpInterCall:
-		return rewriteValueMIPS_OpInterCall(v, config)
-	case OpIsInBounds:
-		return rewriteValueMIPS_OpIsInBounds(v, config)
-	case OpIsNonNil:
-		return rewriteValueMIPS_OpIsNonNil(v, config)
-	case OpIsSliceInBounds:
-		return rewriteValueMIPS_OpIsSliceInBounds(v, config)
-	case OpLeq16:
-		return rewriteValueMIPS_OpLeq16(v, config)
-	case OpLeq16U:
-		return rewriteValueMIPS_OpLeq16U(v, config)
-	case OpLeq32:
-		return rewriteValueMIPS_OpLeq32(v, config)
-	case OpLeq32F:
-		return rewriteValueMIPS_OpLeq32F(v, config)
-	case OpLeq32U:
-		return rewriteValueMIPS_OpLeq32U(v, config)
-	case OpLeq64F:
-		return rewriteValueMIPS_OpLeq64F(v, config)
-	case OpLeq8:
-		return rewriteValueMIPS_OpLeq8(v, config)
-	case OpLeq8U:
-		return rewriteValueMIPS_OpLeq8U(v, config)
-	case OpLess16:
-		return rewriteValueMIPS_OpLess16(v, config)
-	case OpLess16U:
-		return rewriteValueMIPS_OpLess16U(v, config)
-	case OpLess32:
-		return rewriteValueMIPS_OpLess32(v, config)
-	case OpLess32F:
-		return rewriteValueMIPS_OpLess32F(v, config)
-	case OpLess32U:
-		return rewriteValueMIPS_OpLess32U(v, config)
-	case OpLess64F:
-		return rewriteValueMIPS_OpLess64F(v, config)
-	case OpLess8:
-		return rewriteValueMIPS_OpLess8(v, config)
-	case OpLess8U:
-		return rewriteValueMIPS_OpLess8U(v, config)
-	case OpLoad:
-		return rewriteValueMIPS_OpLoad(v, config)
-	case OpLsh16x16:
-		return rewriteValueMIPS_OpLsh16x16(v, config)
-	case OpLsh16x32:
-		return rewriteValueMIPS_OpLsh16x32(v, config)
-	case OpLsh16x64:
-		return rewriteValueMIPS_OpLsh16x64(v, config)
-	case OpLsh16x8:
-		return rewriteValueMIPS_OpLsh16x8(v, config)
-	case OpLsh32x16:
-		return rewriteValueMIPS_OpLsh32x16(v, config)
-	case OpLsh32x32:
-		return rewriteValueMIPS_OpLsh32x32(v, config)
-	case OpLsh32x64:
-		return rewriteValueMIPS_OpLsh32x64(v, config)
-	case OpLsh32x8:
-		return rewriteValueMIPS_OpLsh32x8(v, config)
-	case OpLsh8x16:
-		return rewriteValueMIPS_OpLsh8x16(v, config)
-	case OpLsh8x32:
-		return rewriteValueMIPS_OpLsh8x32(v, config)
-	case OpLsh8x64:
-		return rewriteValueMIPS_OpLsh8x64(v, config)
-	case OpLsh8x8:
-		return rewriteValueMIPS_OpLsh8x8(v, config)
-	case OpMIPSADD:
-		return rewriteValueMIPS_OpMIPSADD(v, config)
-	case OpMIPSADDconst:
-		return rewriteValueMIPS_OpMIPSADDconst(v, config)
-	case OpMIPSAND:
-		return rewriteValueMIPS_OpMIPSAND(v, config)
-	case OpMIPSANDconst:
-		return rewriteValueMIPS_OpMIPSANDconst(v, config)
-	case OpMIPSCMOVZ:
-		return rewriteValueMIPS_OpMIPSCMOVZ(v, config)
-	case OpMIPSCMOVZzero:
-		return rewriteValueMIPS_OpMIPSCMOVZzero(v, config)
-	case OpMIPSLoweredAtomicAdd:
-		return rewriteValueMIPS_OpMIPSLoweredAtomicAdd(v, config)
-	case OpMIPSLoweredAtomicStore:
-		return rewriteValueMIPS_OpMIPSLoweredAtomicStore(v, config)
-	case OpMIPSMOVBUload:
-		return rewriteValueMIPS_OpMIPSMOVBUload(v, config)
-	case OpMIPSMOVBUreg:
-		return rewriteValueMIPS_OpMIPSMOVBUreg(v, config)
-	case OpMIPSMOVBload:
-		return rewriteValueMIPS_OpMIPSMOVBload(v, config)
-	case OpMIPSMOVBreg:
-		return rewriteValueMIPS_OpMIPSMOVBreg(v, config)
-	case OpMIPSMOVBstore:
-		return rewriteValueMIPS_OpMIPSMOVBstore(v, config)
-	case OpMIPSMOVBstorezero:
-		return rewriteValueMIPS_OpMIPSMOVBstorezero(v, config)
-	case OpMIPSMOVDload:
-		return rewriteValueMIPS_OpMIPSMOVDload(v, config)
-	case OpMIPSMOVDstore:
-		return rewriteValueMIPS_OpMIPSMOVDstore(v, config)
-	case OpMIPSMOVFload:
-		return rewriteValueMIPS_OpMIPSMOVFload(v, config)
-	case OpMIPSMOVFstore:
-		return rewriteValueMIPS_OpMIPSMOVFstore(v, config)
-	case OpMIPSMOVHUload:
-		return rewriteValueMIPS_OpMIPSMOVHUload(v, config)
-	case OpMIPSMOVHUreg:
-		return rewriteValueMIPS_OpMIPSMOVHUreg(v, config)
-	case OpMIPSMOVHload:
-		return rewriteValueMIPS_OpMIPSMOVHload(v, config)
-	case OpMIPSMOVHreg:
-		return rewriteValueMIPS_OpMIPSMOVHreg(v, config)
-	case OpMIPSMOVHstore:
-		return rewriteValueMIPS_OpMIPSMOVHstore(v, config)
-	case OpMIPSMOVHstorezero:
-		return rewriteValueMIPS_OpMIPSMOVHstorezero(v, config)
-	case OpMIPSMOVWload:
-		return rewriteValueMIPS_OpMIPSMOVWload(v, config)
-	case OpMIPSMOVWreg:
-		return rewriteValueMIPS_OpMIPSMOVWreg(v, config)
-	case OpMIPSMOVWstore:
-		return rewriteValueMIPS_OpMIPSMOVWstore(v, config)
-	case OpMIPSMOVWstorezero:
-		return rewriteValueMIPS_OpMIPSMOVWstorezero(v, config)
-	case OpMIPSMUL:
-		return rewriteValueMIPS_OpMIPSMUL(v, config)
-	case OpMIPSNEG:
-		return rewriteValueMIPS_OpMIPSNEG(v, config)
-	case OpMIPSNOR:
-		return rewriteValueMIPS_OpMIPSNOR(v, config)
-	case OpMIPSNORconst:
-		return rewriteValueMIPS_OpMIPSNORconst(v, config)
-	case OpMIPSOR:
-		return rewriteValueMIPS_OpMIPSOR(v, config)
-	case OpMIPSORconst:
-		return rewriteValueMIPS_OpMIPSORconst(v, config)
-	case OpMIPSSGT:
-		return rewriteValueMIPS_OpMIPSSGT(v, config)
-	case OpMIPSSGTU:
-		return rewriteValueMIPS_OpMIPSSGTU(v, config)
-	case OpMIPSSGTUconst:
-		return rewriteValueMIPS_OpMIPSSGTUconst(v, config)
-	case OpMIPSSGTUzero:
-		return rewriteValueMIPS_OpMIPSSGTUzero(v, config)
-	case OpMIPSSGTconst:
-		return rewriteValueMIPS_OpMIPSSGTconst(v, config)
-	case OpMIPSSGTzero:
-		return rewriteValueMIPS_OpMIPSSGTzero(v, config)
-	case OpMIPSSLL:
-		return rewriteValueMIPS_OpMIPSSLL(v, config)
-	case OpMIPSSLLconst:
-		return rewriteValueMIPS_OpMIPSSLLconst(v, config)
-	case OpMIPSSRA:
-		return rewriteValueMIPS_OpMIPSSRA(v, config)
-	case OpMIPSSRAconst:
-		return rewriteValueMIPS_OpMIPSSRAconst(v, config)
-	case OpMIPSSRL:
-		return rewriteValueMIPS_OpMIPSSRL(v, config)
-	case OpMIPSSRLconst:
-		return rewriteValueMIPS_OpMIPSSRLconst(v, config)
-	case OpMIPSSUB:
-		return rewriteValueMIPS_OpMIPSSUB(v, config)
-	case OpMIPSSUBconst:
-		return rewriteValueMIPS_OpMIPSSUBconst(v, config)
-	case OpMIPSXOR:
-		return rewriteValueMIPS_OpMIPSXOR(v, config)
-	case OpMIPSXORconst:
-		return rewriteValueMIPS_OpMIPSXORconst(v, config)
-	case OpMod16:
-		return rewriteValueMIPS_OpMod16(v, config)
-	case OpMod16u:
-		return rewriteValueMIPS_OpMod16u(v, config)
-	case OpMod32:
-		return rewriteValueMIPS_OpMod32(v, config)
-	case OpMod32u:
-		return rewriteValueMIPS_OpMod32u(v, config)
-	case OpMod8:
-		return rewriteValueMIPS_OpMod8(v, config)
-	case OpMod8u:
-		return rewriteValueMIPS_OpMod8u(v, config)
-	case OpMove:
-		return rewriteValueMIPS_OpMove(v, config)
-	case OpMul16:
-		return rewriteValueMIPS_OpMul16(v, config)
-	case OpMul32:
-		return rewriteValueMIPS_OpMul32(v, config)
-	case OpMul32F:
-		return rewriteValueMIPS_OpMul32F(v, config)
-	case OpMul32uhilo:
-		return rewriteValueMIPS_OpMul32uhilo(v, config)
-	case OpMul64F:
-		return rewriteValueMIPS_OpMul64F(v, config)
-	case OpMul8:
-		return rewriteValueMIPS_OpMul8(v, config)
-	case OpNeg16:
-		return rewriteValueMIPS_OpNeg16(v, config)
-	case OpNeg32:
-		return rewriteValueMIPS_OpNeg32(v, config)
-	case OpNeg32F:
-		return rewriteValueMIPS_OpNeg32F(v, config)
-	case OpNeg64F:
-		return rewriteValueMIPS_OpNeg64F(v, config)
-	case OpNeg8:
-		return rewriteValueMIPS_OpNeg8(v, config)
-	case OpNeq16:
-		return rewriteValueMIPS_OpNeq16(v, config)
-	case OpNeq32:
-		return rewriteValueMIPS_OpNeq32(v, config)
-	case OpNeq32F:
-		return rewriteValueMIPS_OpNeq32F(v, config)
-	case OpNeq64F:
-		return rewriteValueMIPS_OpNeq64F(v, config)
-	case OpNeq8:
-		return rewriteValueMIPS_OpNeq8(v, config)
-	case OpNeqB:
-		return rewriteValueMIPS_OpNeqB(v, config)
-	case OpNeqPtr:
-		return rewriteValueMIPS_OpNeqPtr(v, config)
-	case OpNilCheck:
-		return rewriteValueMIPS_OpNilCheck(v, config)
-	case OpNot:
-		return rewriteValueMIPS_OpNot(v, config)
-	case OpOffPtr:
-		return rewriteValueMIPS_OpOffPtr(v, config)
-	case OpOr16:
-		return rewriteValueMIPS_OpOr16(v, config)
-	case OpOr32:
-		return rewriteValueMIPS_OpOr32(v, config)
-	case OpOr8:
-		return rewriteValueMIPS_OpOr8(v, config)
-	case OpOrB:
-		return rewriteValueMIPS_OpOrB(v, config)
-	case OpRsh16Ux16:
-		return rewriteValueMIPS_OpRsh16Ux16(v, config)
-	case OpRsh16Ux32:
-		return rewriteValueMIPS_OpRsh16Ux32(v, config)
-	case OpRsh16Ux64:
-		return rewriteValueMIPS_OpRsh16Ux64(v, config)
-	case OpRsh16Ux8:
-		return rewriteValueMIPS_OpRsh16Ux8(v, config)
-	case OpRsh16x16:
-		return rewriteValueMIPS_OpRsh16x16(v, config)
-	case OpRsh16x32:
-		return rewriteValueMIPS_OpRsh16x32(v, config)
-	case OpRsh16x64:
-		return rewriteValueMIPS_OpRsh16x64(v, config)
-	case OpRsh16x8:
-		return rewriteValueMIPS_OpRsh16x8(v, config)
-	case OpRsh32Ux16:
-		return rewriteValueMIPS_OpRsh32Ux16(v, config)
-	case OpRsh32Ux32:
-		return rewriteValueMIPS_OpRsh32Ux32(v, config)
-	case OpRsh32Ux64:
-		return rewriteValueMIPS_OpRsh32Ux64(v, config)
-	case OpRsh32Ux8:
-		return rewriteValueMIPS_OpRsh32Ux8(v, config)
-	case OpRsh32x16:
-		return rewriteValueMIPS_OpRsh32x16(v, config)
-	case OpRsh32x32:
-		return rewriteValueMIPS_OpRsh32x32(v, config)
-	case OpRsh32x64:
-		return rewriteValueMIPS_OpRsh32x64(v, config)
-	case OpRsh32x8:
-		return rewriteValueMIPS_OpRsh32x8(v, config)
-	case OpRsh8Ux16:
-		return rewriteValueMIPS_OpRsh8Ux16(v, config)
-	case OpRsh8Ux32:
-		return rewriteValueMIPS_OpRsh8Ux32(v, config)
-	case OpRsh8Ux64:
-		return rewriteValueMIPS_OpRsh8Ux64(v, config)
-	case OpRsh8Ux8:
-		return rewriteValueMIPS_OpRsh8Ux8(v, config)
-	case OpRsh8x16:
-		return rewriteValueMIPS_OpRsh8x16(v, config)
-	case OpRsh8x32:
-		return rewriteValueMIPS_OpRsh8x32(v, config)
-	case OpRsh8x64:
-		return rewriteValueMIPS_OpRsh8x64(v, config)
-	case OpRsh8x8:
-		return rewriteValueMIPS_OpRsh8x8(v, config)
-	case OpSelect0:
-		return rewriteValueMIPS_OpSelect0(v, config)
-	case OpSelect1:
-		return rewriteValueMIPS_OpSelect1(v, config)
-	case OpSignExt16to32:
-		return rewriteValueMIPS_OpSignExt16to32(v, config)
-	case OpSignExt8to16:
-		return rewriteValueMIPS_OpSignExt8to16(v, config)
-	case OpSignExt8to32:
-		return rewriteValueMIPS_OpSignExt8to32(v, config)
-	case OpSignmask:
-		return rewriteValueMIPS_OpSignmask(v, config)
-	case OpSlicemask:
-		return rewriteValueMIPS_OpSlicemask(v, config)
-	case OpSqrt:
-		return rewriteValueMIPS_OpSqrt(v, config)
-	case OpStaticCall:
-		return rewriteValueMIPS_OpStaticCall(v, config)
-	case OpStore:
-		return rewriteValueMIPS_OpStore(v, config)
-	case OpSub16:
-		return rewriteValueMIPS_OpSub16(v, config)
-	case OpSub32:
-		return rewriteValueMIPS_OpSub32(v, config)
-	case OpSub32F:
-		return rewriteValueMIPS_OpSub32F(v, config)
-	case OpSub32withcarry:
-		return rewriteValueMIPS_OpSub32withcarry(v, config)
-	case OpSub64F:
-		return rewriteValueMIPS_OpSub64F(v, config)
-	case OpSub8:
-		return rewriteValueMIPS_OpSub8(v, config)
-	case OpSubPtr:
-		return rewriteValueMIPS_OpSubPtr(v, config)
-	case OpTrunc16to8:
-		return rewriteValueMIPS_OpTrunc16to8(v, config)
-	case OpTrunc32to16:
-		return rewriteValueMIPS_OpTrunc32to16(v, config)
-	case OpTrunc32to8:
-		return rewriteValueMIPS_OpTrunc32to8(v, config)
-	case OpXor16:
-		return rewriteValueMIPS_OpXor16(v, config)
-	case OpXor32:
-		return rewriteValueMIPS_OpXor32(v, config)
-	case OpXor8:
-		return rewriteValueMIPS_OpXor8(v, config)
-	case OpZero:
-		return rewriteValueMIPS_OpZero(v, config)
-	case OpZeroExt16to32:
-		return rewriteValueMIPS_OpZeroExt16to32(v, config)
-	case OpZeroExt8to16:
-		return rewriteValueMIPS_OpZeroExt8to16(v, config)
-	case OpZeroExt8to32:
-		return rewriteValueMIPS_OpZeroExt8to32(v, config)
-	case OpZeromask:
-		return rewriteValueMIPS_OpZeromask(v, config)
-	}
-	return false
-}
-func rewriteValueMIPS_OpAdd16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add16 x y)
-	// cond:
-	// result: (ADD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSADD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpAdd32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add32 x y)
-	// cond:
-	// result: (ADD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSADD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpAdd32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add32F x y)
-	// cond:
-	// result: (ADDF x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSADDF)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpAdd32withcarry(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add32withcarry <t> x y c)
-	// cond:
-	// result: (ADD c (ADD <t> x y))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		c := v.Args[2]
-		v.reset(OpMIPSADD)
-		v.AddArg(c)
-		v0 := b.NewValue0(v.Line, OpMIPSADD, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpAdd64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add64F x y)
-	// cond:
-	// result: (ADDD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSADDD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpAdd8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add8 x y)
-	// cond:
-	// result: (ADD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSADD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpAddPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AddPtr x y)
-	// cond:
-	// result: (ADD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSADD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpAddr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Addr {sym} base)
-	// cond:
-	// result: (MOVWaddr {sym} base)
-	for {
-		sym := v.Aux
-		base := v.Args[0]
-		v.reset(OpMIPSMOVWaddr)
-		v.Aux = sym
-		v.AddArg(base)
-		return true
-	}
-}
-func rewriteValueMIPS_OpAnd16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And16 x y)
-	// cond:
-	// result: (AND x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSAND)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpAnd32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And32 x y)
-	// cond:
-	// result: (AND x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSAND)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpAnd8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And8 x y)
-	// cond:
-	// result: (AND x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSAND)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpAndB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AndB x y)
-	// cond:
-	// result: (AND x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSAND)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpAtomicAdd32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicAdd32 ptr val mem)
-	// cond:
-	// result: (LoweredAtomicAdd ptr val mem)
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpMIPSLoweredAtomicAdd)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueMIPS_OpAtomicAnd8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicAnd8  ptr val mem)
-	// cond: !config.BigEndian
-	// result: (LoweredAtomicAnd (AND <config.fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr) 		(OR <config.fe.TypeUInt32()> (SLL <config.fe.TypeUInt32()> (ZeroExt8to32 val) 			(SLLconst <config.fe.TypeUInt32()> [3] 				(ANDconst  <config.fe.TypeUInt32()> [3] ptr))) 		(NORconst [0] <config.fe.TypeUInt32()> (SLL <config.fe.TypeUInt32()> 			(MOVWconst [0xff]) (SLLconst <config.fe.TypeUInt32()> [3] 				(ANDconst <config.fe.TypeUInt32()> [3] 					(XORconst <config.fe.TypeUInt32()> [3] ptr)))))) mem)
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(!config.BigEndian) {
-			break
-		}
-		v.reset(OpMIPSLoweredAtomicAnd)
-		v0 := b.NewValue0(v.Line, OpMIPSAND, config.fe.TypeUInt32().PtrTo())
-		v1 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v1.AuxInt = ^3
-		v0.AddArg(v1)
-		v0.AddArg(ptr)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpMIPSOR, config.fe.TypeUInt32())
-		v3 := b.NewValue0(v.Line, OpMIPSSLL, config.fe.TypeUInt32())
-		v4 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v4.AddArg(val)
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpMIPSSLLconst, config.fe.TypeUInt32())
-		v5.AuxInt = 3
-		v6 := b.NewValue0(v.Line, OpMIPSANDconst, config.fe.TypeUInt32())
-		v6.AuxInt = 3
-		v6.AddArg(ptr)
-		v5.AddArg(v6)
-		v3.AddArg(v5)
-		v2.AddArg(v3)
-		v7 := b.NewValue0(v.Line, OpMIPSNORconst, config.fe.TypeUInt32())
-		v7.AuxInt = 0
-		v8 := b.NewValue0(v.Line, OpMIPSSLL, config.fe.TypeUInt32())
-		v9 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v9.AuxInt = 0xff
-		v8.AddArg(v9)
-		v10 := b.NewValue0(v.Line, OpMIPSSLLconst, config.fe.TypeUInt32())
-		v10.AuxInt = 3
-		v11 := b.NewValue0(v.Line, OpMIPSANDconst, config.fe.TypeUInt32())
-		v11.AuxInt = 3
-		v12 := b.NewValue0(v.Line, OpMIPSXORconst, config.fe.TypeUInt32())
-		v12.AuxInt = 3
-		v12.AddArg(ptr)
-		v11.AddArg(v12)
-		v10.AddArg(v11)
-		v8.AddArg(v10)
-		v7.AddArg(v8)
-		v2.AddArg(v7)
-		v.AddArg(v2)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (AtomicAnd8  ptr val mem)
-	// cond: config.BigEndian
-	// result: (LoweredAtomicAnd (AND <config.fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr) 		(OR <config.fe.TypeUInt32()> (SLL <config.fe.TypeUInt32()> (ZeroExt8to32 val) 			(SLLconst <config.fe.TypeUInt32()> [3] 				(ANDconst  <config.fe.TypeUInt32()> [3] 					(XORconst <config.fe.TypeUInt32()> [3] ptr)))) 		(NORconst [0] <config.fe.TypeUInt32()> (SLL <config.fe.TypeUInt32()> 			(MOVWconst [0xff]) (SLLconst <config.fe.TypeUInt32()> [3] 				(ANDconst <config.fe.TypeUInt32()> [3] 					(XORconst <config.fe.TypeUInt32()> [3] ptr)))))) mem)
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(config.BigEndian) {
-			break
-		}
-		v.reset(OpMIPSLoweredAtomicAnd)
-		v0 := b.NewValue0(v.Line, OpMIPSAND, config.fe.TypeUInt32().PtrTo())
-		v1 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v1.AuxInt = ^3
-		v0.AddArg(v1)
-		v0.AddArg(ptr)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpMIPSOR, config.fe.TypeUInt32())
-		v3 := b.NewValue0(v.Line, OpMIPSSLL, config.fe.TypeUInt32())
-		v4 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v4.AddArg(val)
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpMIPSSLLconst, config.fe.TypeUInt32())
-		v5.AuxInt = 3
-		v6 := b.NewValue0(v.Line, OpMIPSANDconst, config.fe.TypeUInt32())
-		v6.AuxInt = 3
-		v7 := b.NewValue0(v.Line, OpMIPSXORconst, config.fe.TypeUInt32())
-		v7.AuxInt = 3
-		v7.AddArg(ptr)
-		v6.AddArg(v7)
-		v5.AddArg(v6)
-		v3.AddArg(v5)
-		v2.AddArg(v3)
-		v8 := b.NewValue0(v.Line, OpMIPSNORconst, config.fe.TypeUInt32())
-		v8.AuxInt = 0
-		v9 := b.NewValue0(v.Line, OpMIPSSLL, config.fe.TypeUInt32())
-		v10 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v10.AuxInt = 0xff
-		v9.AddArg(v10)
-		v11 := b.NewValue0(v.Line, OpMIPSSLLconst, config.fe.TypeUInt32())
-		v11.AuxInt = 3
-		v12 := b.NewValue0(v.Line, OpMIPSANDconst, config.fe.TypeUInt32())
-		v12.AuxInt = 3
-		v13 := b.NewValue0(v.Line, OpMIPSXORconst, config.fe.TypeUInt32())
-		v13.AuxInt = 3
-		v13.AddArg(ptr)
-		v12.AddArg(v13)
-		v11.AddArg(v12)
-		v9.AddArg(v11)
-		v8.AddArg(v9)
-		v2.AddArg(v8)
-		v.AddArg(v2)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpAtomicCompareAndSwap32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicCompareAndSwap32 ptr old new_ mem)
-	// cond:
-	// result: (LoweredAtomicCas ptr old new_ mem)
-	for {
-		ptr := v.Args[0]
-		old := v.Args[1]
-		new_ := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpMIPSLoweredAtomicCas)
-		v.AddArg(ptr)
-		v.AddArg(old)
-		v.AddArg(new_)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueMIPS_OpAtomicExchange32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicExchange32 ptr val mem)
-	// cond:
-	// result: (LoweredAtomicExchange ptr val mem)
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpMIPSLoweredAtomicExchange)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueMIPS_OpAtomicLoad32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicLoad32  ptr mem)
-	// cond:
-	// result: (LoweredAtomicLoad ptr mem)
-	for {
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		v.reset(OpMIPSLoweredAtomicLoad)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueMIPS_OpAtomicLoadPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicLoadPtr ptr mem)
-	// cond:
-	// result: (LoweredAtomicLoad  ptr mem)
-	for {
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		v.reset(OpMIPSLoweredAtomicLoad)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueMIPS_OpAtomicOr8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicOr8 ptr val mem)
-	// cond: !config.BigEndian
-	// result: (LoweredAtomicOr (AND <config.fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr) 		(SLL <config.fe.TypeUInt32()> (ZeroExt8to32 val) 			(SLLconst <config.fe.TypeUInt32()> [3] 				(ANDconst <config.fe.TypeUInt32()> [3] ptr))) mem)
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(!config.BigEndian) {
-			break
-		}
-		v.reset(OpMIPSLoweredAtomicOr)
-		v0 := b.NewValue0(v.Line, OpMIPSAND, config.fe.TypeUInt32().PtrTo())
-		v1 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v1.AuxInt = ^3
-		v0.AddArg(v1)
-		v0.AddArg(ptr)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpMIPSSLL, config.fe.TypeUInt32())
-		v3 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v3.AddArg(val)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpMIPSSLLconst, config.fe.TypeUInt32())
-		v4.AuxInt = 3
-		v5 := b.NewValue0(v.Line, OpMIPSANDconst, config.fe.TypeUInt32())
-		v5.AuxInt = 3
-		v5.AddArg(ptr)
-		v4.AddArg(v5)
-		v2.AddArg(v4)
-		v.AddArg(v2)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (AtomicOr8 ptr val mem)
-	// cond: config.BigEndian
-	// result: (LoweredAtomicOr (AND <config.fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr) 		(SLL <config.fe.TypeUInt32()> (ZeroExt8to32 val) 			(SLLconst <config.fe.TypeUInt32()> [3] 				(ANDconst <config.fe.TypeUInt32()> [3] 					(XORconst <config.fe.TypeUInt32()> [3] ptr)))) mem)
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(config.BigEndian) {
-			break
-		}
-		v.reset(OpMIPSLoweredAtomicOr)
-		v0 := b.NewValue0(v.Line, OpMIPSAND, config.fe.TypeUInt32().PtrTo())
-		v1 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v1.AuxInt = ^3
-		v0.AddArg(v1)
-		v0.AddArg(ptr)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpMIPSSLL, config.fe.TypeUInt32())
-		v3 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v3.AddArg(val)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpMIPSSLLconst, config.fe.TypeUInt32())
-		v4.AuxInt = 3
-		v5 := b.NewValue0(v.Line, OpMIPSANDconst, config.fe.TypeUInt32())
-		v5.AuxInt = 3
-		v6 := b.NewValue0(v.Line, OpMIPSXORconst, config.fe.TypeUInt32())
-		v6.AuxInt = 3
-		v6.AddArg(ptr)
-		v5.AddArg(v6)
-		v4.AddArg(v5)
-		v2.AddArg(v4)
-		v.AddArg(v2)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpAtomicStore32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicStore32      ptr val mem)
-	// cond:
-	// result: (LoweredAtomicStore ptr val mem)
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpMIPSLoweredAtomicStore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueMIPS_OpAtomicStorePtrNoWB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicStorePtrNoWB ptr val mem)
-	// cond:
-	// result: (LoweredAtomicStore  ptr val mem)
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpMIPSLoweredAtomicStore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueMIPS_OpClosureCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ClosureCall [argwid] entry closure mem)
-	// cond:
-	// result: (CALLclosure [argwid] entry closure mem)
-	for {
-		argwid := v.AuxInt
-		entry := v.Args[0]
-		closure := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpMIPSCALLclosure)
-		v.AuxInt = argwid
-		v.AddArg(entry)
-		v.AddArg(closure)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueMIPS_OpCom16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com16 x)
-	// cond:
-	// result: (NORconst [0] x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPSNORconst)
-		v.AuxInt = 0
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS_OpCom32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com32 x)
-	// cond:
-	// result: (NORconst [0] x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPSNORconst)
-		v.AuxInt = 0
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS_OpCom8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com8 x)
-	// cond:
-	// result: (NORconst [0] x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPSNORconst)
-		v.AuxInt = 0
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS_OpConst16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const16 [val])
-	// cond:
-	// result: (MOVWconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueMIPS_OpConst32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const32 [val])
-	// cond:
-	// result: (MOVWconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueMIPS_OpConst32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const32F [val])
-	// cond:
-	// result: (MOVFconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpMIPSMOVFconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueMIPS_OpConst64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const64F [val])
-	// cond:
-	// result: (MOVDconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpMIPSMOVDconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueMIPS_OpConst8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const8 [val])
-	// cond:
-	// result: (MOVWconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueMIPS_OpConstBool(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ConstBool [b])
-	// cond:
-	// result: (MOVWconst [b])
-	for {
-		b := v.AuxInt
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = b
-		return true
-	}
-}
-func rewriteValueMIPS_OpConstNil(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ConstNil)
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-}
-func rewriteValueMIPS_OpConvert(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Convert x mem)
-	// cond:
-	// result: (MOVWconvert x mem)
-	for {
-		x := v.Args[0]
-		mem := v.Args[1]
-		v.reset(OpMIPSMOVWconvert)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueMIPS_OpCtz32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Ctz32 <t> x)
-	// cond:
-	// result: (SUB (MOVWconst [32]) (CLZ <t> (SUBconst <t> [1] (AND <t> x (NEG <t> x)))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v.reset(OpMIPSSUB)
-		v0 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v0.AuxInt = 32
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSCLZ, t)
-		v2 := b.NewValue0(v.Line, OpMIPSSUBconst, t)
-		v2.AuxInt = 1
-		v3 := b.NewValue0(v.Line, OpMIPSAND, t)
-		v3.AddArg(x)
-		v4 := b.NewValue0(v.Line, OpMIPSNEG, t)
-		v4.AddArg(x)
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS_OpCvt32Fto32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32Fto32 x)
-	// cond:
-	// result: (TRUNCFW x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPSTRUNCFW)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS_OpCvt32Fto64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32Fto64F x)
-	// cond:
-	// result: (MOVFD x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPSMOVFD)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS_OpCvt32to32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32to32F x)
-	// cond:
-	// result: (MOVWF x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPSMOVWF)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS_OpCvt32to64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32to64F x)
-	// cond:
-	// result: (MOVWD x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPSMOVWD)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS_OpCvt64Fto32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64Fto32 x)
-	// cond:
-	// result: (TRUNCDW x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPSTRUNCDW)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS_OpCvt64Fto32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64Fto32F x)
-	// cond:
-	// result: (MOVDF x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPSMOVDF)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS_OpDeferCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (DeferCall [argwid] mem)
-	// cond:
-	// result: (CALLdefer [argwid] mem)
-	for {
-		argwid := v.AuxInt
-		mem := v.Args[0]
-		v.reset(OpMIPSCALLdefer)
-		v.AuxInt = argwid
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueMIPS_OpDiv16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div16 x y)
-	// cond:
-	// result: (Select1 (DIV (SignExt16to32 x) (SignExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpMIPSDIV, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
-		v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpDiv16u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div16u x y)
-	// cond:
-	// result: (Select1 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpMIPSDIVU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpDiv32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div32 x y)
-	// cond:
-	// result: (Select1 (DIV x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpMIPSDIV, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpDiv32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div32F x y)
-	// cond:
-	// result: (DIVF x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSDIVF)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpDiv32u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div32u x y)
-	// cond:
-	// result: (Select1 (DIVU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpMIPSDIVU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpDiv64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div64F x y)
-	// cond:
-	// result: (DIVD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSDIVD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpDiv8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div8 x y)
-	// cond:
-	// result: (Select1 (DIV (SignExt8to32 x) (SignExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpMIPSDIV, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
-		v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpDiv8u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div8u x y)
-	// cond:
-	// result: (Select1 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpMIPSDIVU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpEq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq16 x y)
-	// cond:
-	// result: (SGTUconst [1] (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSGTUconst)
-		v.AuxInt = 1
-		v0 := b.NewValue0(v.Line, OpMIPSXOR, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpEq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq32 x y)
-	// cond:
-	// result: (SGTUconst [1] (XOR x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSGTUconst)
-		v.AuxInt = 1
-		v0 := b.NewValue0(v.Line, OpMIPSXOR, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpEq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq32F x y)
-	// cond:
-	// result: (FPFlagTrue (CMPEQF x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSFPFlagTrue)
-		v0 := b.NewValue0(v.Line, OpMIPSCMPEQF, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpEq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq64F x y)
-	// cond:
-	// result: (FPFlagTrue (CMPEQD x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSFPFlagTrue)
-		v0 := b.NewValue0(v.Line, OpMIPSCMPEQD, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpEq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq8 x y)
-	// cond:
-	// result: (SGTUconst [1] (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSGTUconst)
-		v.AuxInt = 1
-		v0 := b.NewValue0(v.Line, OpMIPSXOR, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpEqB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (EqB x y)
-	// cond:
-	// result: (XORconst [1] (XOR <config.fe.TypeBool()> x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSXORconst)
-		v.AuxInt = 1
-		v0 := b.NewValue0(v.Line, OpMIPSXOR, config.fe.TypeBool())
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpEqPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (EqPtr x y)
-	// cond:
-	// result: (SGTUconst [1] (XOR x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSGTUconst)
-		v.AuxInt = 1
-		v0 := b.NewValue0(v.Line, OpMIPSXOR, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpGeq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq16 x y)
-	// cond:
-	// result: (XORconst [1] (SGT (SignExt16to32 y) (SignExt16to32 x)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSXORconst)
-		v.AuxInt = 1
-		v0 := b.NewValue0(v.Line, OpMIPSSGT, config.fe.TypeBool())
-		v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v2.AddArg(x)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpGeq16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq16U x y)
-	// cond:
-	// result: (XORconst [1] (SGTU (ZeroExt16to32 y) (ZeroExt16to32 x)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSXORconst)
-		v.AuxInt = 1
-		v0 := b.NewValue0(v.Line, OpMIPSSGTU, config.fe.TypeBool())
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(x)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpGeq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq32 x y)
-	// cond:
-	// result: (XORconst [1] (SGT y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSXORconst)
-		v.AuxInt = 1
-		v0 := b.NewValue0(v.Line, OpMIPSSGT, config.fe.TypeBool())
-		v0.AddArg(y)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpGeq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq32F x y)
-	// cond:
-	// result: (FPFlagTrue (CMPGEF x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSFPFlagTrue)
-		v0 := b.NewValue0(v.Line, OpMIPSCMPGEF, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpGeq32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq32U x y)
-	// cond:
-	// result: (XORconst [1] (SGTU y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSXORconst)
-		v.AuxInt = 1
-		v0 := b.NewValue0(v.Line, OpMIPSSGTU, config.fe.TypeBool())
-		v0.AddArg(y)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpGeq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq64F x y)
-	// cond:
-	// result: (FPFlagTrue (CMPGED x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSFPFlagTrue)
-		v0 := b.NewValue0(v.Line, OpMIPSCMPGED, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpGeq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq8 x y)
-	// cond:
-	// result: (XORconst [1] (SGT (SignExt8to32 y) (SignExt8to32 x)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSXORconst)
-		v.AuxInt = 1
-		v0 := b.NewValue0(v.Line, OpMIPSSGT, config.fe.TypeBool())
-		v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v2.AddArg(x)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpGeq8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq8U x y)
-	// cond:
-	// result: (XORconst [1] (SGTU (ZeroExt8to32 y) (ZeroExt8to32 x)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSXORconst)
-		v.AuxInt = 1
-		v0 := b.NewValue0(v.Line, OpMIPSSGTU, config.fe.TypeBool())
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(x)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpGetClosurePtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (GetClosurePtr)
-	// cond:
-	// result: (LoweredGetClosurePtr)
-	for {
-		v.reset(OpMIPSLoweredGetClosurePtr)
-		return true
-	}
-}
-func rewriteValueMIPS_OpGoCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (GoCall [argwid] mem)
-	// cond:
-	// result: (CALLgo [argwid] mem)
-	for {
-		argwid := v.AuxInt
-		mem := v.Args[0]
-		v.reset(OpMIPSCALLgo)
-		v.AuxInt = argwid
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueMIPS_OpGreater16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater16 x y)
-	// cond:
-	// result: (SGT (SignExt16to32 x) (SignExt16to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSGT)
-		v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS_OpGreater16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater16U x y)
-	// cond:
-	// result: (SGTU (ZeroExt16to32 x) (ZeroExt16to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSGTU)
-		v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS_OpGreater32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater32 x y)
-	// cond:
-	// result: (SGT x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSGT)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpGreater32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater32F x y)
-	// cond:
-	// result: (FPFlagTrue (CMPGTF x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSFPFlagTrue)
-		v0 := b.NewValue0(v.Line, OpMIPSCMPGTF, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpGreater32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater32U x y)
-	// cond:
-	// result: (SGTU x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSGTU)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpGreater64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater64F x y)
-	// cond:
-	// result: (FPFlagTrue (CMPGTD x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSFPFlagTrue)
-		v0 := b.NewValue0(v.Line, OpMIPSCMPGTD, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpGreater8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater8 x y)
-	// cond:
-	// result: (SGT (SignExt8to32 x) (SignExt8to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSGT)
-		v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS_OpGreater8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater8U x y)
-	// cond:
-	// result: (SGTU (ZeroExt8to32 x) (ZeroExt8to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSGTU)
-		v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS_OpHmul16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul16 x y)
-	// cond:
-	// result: (SRAconst (MUL <config.fe.TypeInt32()> (SignExt16to32 x) (SignExt16to32 y)) [16])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSRAconst)
-		v.AuxInt = 16
-		v0 := b.NewValue0(v.Line, OpMIPSMUL, config.fe.TypeInt32())
-		v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpHmul16u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul16u x y)
-	// cond:
-	// result: (SRLconst (MUL <config.fe.TypeUInt32()> (ZeroExt16to32 x) (ZeroExt16to32 y)) [16])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSRLconst)
-		v.AuxInt = 16
-		v0 := b.NewValue0(v.Line, OpMIPSMUL, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpHmul32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul32 x y)
-	// cond:
-	// result: (Select0 (MULT x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect0)
-		v0 := b.NewValue0(v.Line, OpMIPSMULT, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpHmul32u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul32u x y)
-	// cond:
-	// result: (Select0 (MULTU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect0)
-		v0 := b.NewValue0(v.Line, OpMIPSMULTU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpHmul8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul8 x y)
-	// cond:
-	// result: (SRAconst  (MUL <config.fe.TypeInt32()> (SignExt8to32 x) (SignExt8to32 y)) [8])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSRAconst)
-		v.AuxInt = 8
-		v0 := b.NewValue0(v.Line, OpMIPSMUL, config.fe.TypeInt32())
-		v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpHmul8u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul8u x y)
-	// cond:
-	// result: (SRLconst (MUL <config.fe.TypeUInt32()> (ZeroExt8to32 x) (ZeroExt8to32 y)) [8])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSRLconst)
-		v.AuxInt = 8
-		v0 := b.NewValue0(v.Line, OpMIPSMUL, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpInterCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (InterCall [argwid] entry mem)
-	// cond:
-	// result: (CALLinter [argwid] entry mem)
-	for {
-		argwid := v.AuxInt
-		entry := v.Args[0]
-		mem := v.Args[1]
-		v.reset(OpMIPSCALLinter)
-		v.AuxInt = argwid
-		v.AddArg(entry)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueMIPS_OpIsInBounds(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (IsInBounds idx len)
-	// cond:
-	// result: (SGTU len idx)
-	for {
-		idx := v.Args[0]
-		len := v.Args[1]
-		v.reset(OpMIPSSGTU)
-		v.AddArg(len)
-		v.AddArg(idx)
-		return true
-	}
-}
-func rewriteValueMIPS_OpIsNonNil(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (IsNonNil ptr)
-	// cond:
-	// result: (SGTU ptr (MOVWconst [0]))
-	for {
-		ptr := v.Args[0]
-		v.reset(OpMIPSSGTU)
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpIsSliceInBounds(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (IsSliceInBounds idx len)
-	// cond:
-	// result: (XORconst [1] (SGTU idx len))
-	for {
-		idx := v.Args[0]
-		len := v.Args[1]
-		v.reset(OpMIPSXORconst)
-		v.AuxInt = 1
-		v0 := b.NewValue0(v.Line, OpMIPSSGTU, config.fe.TypeBool())
-		v0.AddArg(idx)
-		v0.AddArg(len)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpLeq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq16 x y)
-	// cond:
-	// result: (XORconst [1] (SGT (SignExt16to32 x) (SignExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSXORconst)
-		v.AuxInt = 1
-		v0 := b.NewValue0(v.Line, OpMIPSSGT, config.fe.TypeBool())
-		v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpLeq16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq16U x y)
-	// cond:
-	// result: (XORconst [1] (SGTU (ZeroExt16to32 x) (ZeroExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSXORconst)
-		v.AuxInt = 1
-		v0 := b.NewValue0(v.Line, OpMIPSSGTU, config.fe.TypeBool())
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpLeq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq32 x y)
-	// cond:
-	// result: (XORconst [1] (SGT x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSXORconst)
-		v.AuxInt = 1
-		v0 := b.NewValue0(v.Line, OpMIPSSGT, config.fe.TypeBool())
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpLeq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq32F x y)
-	// cond:
-	// result: (FPFlagTrue (CMPGEF y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSFPFlagTrue)
-		v0 := b.NewValue0(v.Line, OpMIPSCMPGEF, TypeFlags)
-		v0.AddArg(y)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpLeq32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq32U x y)
-	// cond:
-	// result: (XORconst [1] (SGTU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSXORconst)
-		v.AuxInt = 1
-		v0 := b.NewValue0(v.Line, OpMIPSSGTU, config.fe.TypeBool())
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpLeq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq64F x y)
-	// cond:
-	// result: (FPFlagTrue (CMPGED y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSFPFlagTrue)
-		v0 := b.NewValue0(v.Line, OpMIPSCMPGED, TypeFlags)
-		v0.AddArg(y)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpLeq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq8 x y)
-	// cond:
-	// result: (XORconst [1] (SGT (SignExt8to32 x) (SignExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSXORconst)
-		v.AuxInt = 1
-		v0 := b.NewValue0(v.Line, OpMIPSSGT, config.fe.TypeBool())
-		v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpLeq8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq8U x y)
-	// cond:
-	// result: (XORconst [1] (SGTU (ZeroExt8to32 x) (ZeroExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSXORconst)
-		v.AuxInt = 1
-		v0 := b.NewValue0(v.Line, OpMIPSSGTU, config.fe.TypeBool())
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpLess16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less16 x y)
-	// cond:
-	// result: (SGT (SignExt16to32 y) (SignExt16to32 x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSGT)
-		v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS_OpLess16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less16U x y)
-	// cond:
-	// result: (SGTU (ZeroExt16to32 y) (ZeroExt16to32 x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSGTU)
-		v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS_OpLess32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less32 x y)
-	// cond:
-	// result: (SGT y x)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSGT)
-		v.AddArg(y)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS_OpLess32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less32F x y)
-	// cond:
-	// result: (FPFlagTrue (CMPGTF y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSFPFlagTrue)
-		v0 := b.NewValue0(v.Line, OpMIPSCMPGTF, TypeFlags)
-		v0.AddArg(y)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpLess32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less32U x y)
-	// cond:
-	// result: (SGTU y x)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSGTU)
-		v.AddArg(y)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS_OpLess64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less64F x y)
-	// cond:
-	// result: (FPFlagTrue (CMPGTD y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSFPFlagTrue)
-		v0 := b.NewValue0(v.Line, OpMIPSCMPGTD, TypeFlags)
-		v0.AddArg(y)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpLess8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less8 x y)
-	// cond:
-	// result: (SGT (SignExt8to32 y) (SignExt8to32 x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSGT)
-		v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS_OpLess8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less8U x y)
-	// cond:
-	// result: (SGTU (ZeroExt8to32 y) (ZeroExt8to32 x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSGTU)
-		v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS_OpLoad(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Load <t> ptr mem)
-	// cond: t.IsBoolean()
-	// result: (MOVBUload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(t.IsBoolean()) {
-			break
-		}
-		v.reset(OpMIPSMOVBUload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: (is8BitInt(t) && isSigned(t))
-	// result: (MOVBload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is8BitInt(t) && isSigned(t)) {
-			break
-		}
-		v.reset(OpMIPSMOVBload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: (is8BitInt(t) && !isSigned(t))
-	// result: (MOVBUload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is8BitInt(t) && !isSigned(t)) {
-			break
-		}
-		v.reset(OpMIPSMOVBUload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: (is16BitInt(t) && isSigned(t))
-	// result: (MOVHload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is16BitInt(t) && isSigned(t)) {
-			break
-		}
-		v.reset(OpMIPSMOVHload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: (is16BitInt(t) && !isSigned(t))
-	// result: (MOVHUload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is16BitInt(t) && !isSigned(t)) {
-			break
-		}
-		v.reset(OpMIPSMOVHUload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: (is32BitInt(t) || isPtr(t))
-	// result: (MOVWload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is32BitInt(t) || isPtr(t)) {
-			break
-		}
-		v.reset(OpMIPSMOVWload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: is32BitFloat(t)
-	// result: (MOVFload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is32BitFloat(t)) {
-			break
-		}
-		v.reset(OpMIPSMOVFload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: is64BitFloat(t)
-	// result: (MOVDload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is64BitFloat(t)) {
-			break
-		}
-		v.reset(OpMIPSMOVDload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpLsh16x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x16 <t> x y)
-	// cond:
-	// result: (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSCMOVZ)
-		v0 := b.NewValue0(v.Line, OpMIPSSLL, t)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v2.AuxInt = 0
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPSSGTUconst, config.fe.TypeBool())
-		v3.AuxInt = 32
-		v4 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueMIPS_OpLsh16x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x32 <t> x y)
-	// cond:
-	// result: (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSCMOVZ)
-		v0 := b.NewValue0(v.Line, OpMIPSSLL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v1.AuxInt = 0
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpMIPSSGTUconst, config.fe.TypeBool())
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueMIPS_OpLsh16x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x64 x (Const64 [c]))
-	// cond: uint32(c) < 16
-	// result: (SLLconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 16) {
-			break
-		}
-		v.reset(OpMIPSSLLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh16x64 _ (Const64 [c]))
-	// cond: uint32(c) >= 16
-	// result: (MOVWconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) >= 16) {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpLsh16x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x8 <t> x y)
-	// cond:
-	// result: (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSCMOVZ)
-		v0 := b.NewValue0(v.Line, OpMIPSSLL, t)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v2.AuxInt = 0
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPSSGTUconst, config.fe.TypeBool())
-		v3.AuxInt = 32
-		v4 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueMIPS_OpLsh32x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x16 <t> x y)
-	// cond:
-	// result: (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSCMOVZ)
-		v0 := b.NewValue0(v.Line, OpMIPSSLL, t)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v2.AuxInt = 0
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPSSGTUconst, config.fe.TypeBool())
-		v3.AuxInt = 32
-		v4 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueMIPS_OpLsh32x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x32 <t> x y)
-	// cond:
-	// result: (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSCMOVZ)
-		v0 := b.NewValue0(v.Line, OpMIPSSLL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v1.AuxInt = 0
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpMIPSSGTUconst, config.fe.TypeBool())
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueMIPS_OpLsh32x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x64 x (Const64 [c]))
-	// cond: uint32(c) < 32
-	// result: (SLLconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 32) {
-			break
-		}
-		v.reset(OpMIPSSLLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh32x64 _ (Const64 [c]))
-	// cond: uint32(c) >= 32
-	// result: (MOVWconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) >= 32) {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpLsh32x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x8 <t> x y)
-	// cond:
-	// result: (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSCMOVZ)
-		v0 := b.NewValue0(v.Line, OpMIPSSLL, t)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v2.AuxInt = 0
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPSSGTUconst, config.fe.TypeBool())
-		v3.AuxInt = 32
-		v4 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueMIPS_OpLsh8x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x16 <t> x y)
-	// cond:
-	// result: (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSCMOVZ)
-		v0 := b.NewValue0(v.Line, OpMIPSSLL, t)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v2.AuxInt = 0
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPSSGTUconst, config.fe.TypeBool())
-		v3.AuxInt = 32
-		v4 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueMIPS_OpLsh8x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x32 <t> x y)
-	// cond:
-	// result: (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSCMOVZ)
-		v0 := b.NewValue0(v.Line, OpMIPSSLL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v1.AuxInt = 0
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpMIPSSGTUconst, config.fe.TypeBool())
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueMIPS_OpLsh8x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x64 x (Const64 [c]))
-	// cond: uint32(c) < 8
-	// result: (SLLconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 8) {
-			break
-		}
-		v.reset(OpMIPSSLLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh8x64 _ (Const64 [c]))
-	// cond: uint32(c) >= 8
-	// result: (MOVWconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) >= 8) {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpLsh8x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x8 <t> x y)
-	// cond:
-	// result: (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSCMOVZ)
-		v0 := b.NewValue0(v.Line, OpMIPSSLL, t)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v2.AuxInt = 0
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPSSGTUconst, config.fe.TypeBool())
-		v3.AuxInt = 32
-		v4 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueMIPS_OpMIPSADD(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADD (MOVWconst [c]) x)
-	// cond:
-	// result: (ADDconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpMIPSADDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADD x (MOVWconst [c]))
-	// cond:
-	// result: (ADDconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpMIPSADDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADD x (NEG y))
-	// cond:
-	// result: (SUB x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSNEG {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpMIPSSUB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADD (NEG y) x)
-	// cond:
-	// result: (SUB x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSNEG {
-			break
-		}
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpMIPSSUB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSADDconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDconst [off1] (MOVWaddr [off2] {sym} ptr))
-	// cond:
-	// result: (MOVWaddr [off1+off2] {sym} ptr)
-	for {
-		off1 := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym := v_0.Aux
-		ptr := v_0.Args[0]
-		v.reset(OpMIPSMOVWaddr)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		return true
-	}
-	// match: (ADDconst [0]  x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDconst [c] (MOVWconst [d]))
-	// cond:
-	// result: (MOVWconst [int64(int32(c+d))])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = int64(int32(c + d))
-		return true
-	}
-	// match: (ADDconst [c] (ADDconst [d] x))
-	// cond:
-	// result: (ADDconst [int64(int32(c+d))] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSADDconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpMIPSADDconst)
-		v.AuxInt = int64(int32(c + d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDconst [c] (SUBconst [d] x))
-	// cond:
-	// result: (ADDconst [int64(int32(c-d))] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSSUBconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpMIPSADDconst)
-		v.AuxInt = int64(int32(c - d))
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSAND(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AND (MOVWconst [c]) x)
-	// cond:
-	// result: (ANDconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpMIPSANDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (AND x (MOVWconst [c]))
-	// cond:
-	// result: (ANDconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpMIPSANDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (AND x x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (AND (SGTUconst [1] x) (SGTUconst [1] y))
-	// cond:
-	// result: (SGTUconst [1] (OR <x.Type> x y))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSSGTUconst {
-			break
-		}
-		if v_0.AuxInt != 1 {
-			break
-		}
-		x := v_0.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSSGTUconst {
-			break
-		}
-		if v_1.AuxInt != 1 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpMIPSSGTUconst)
-		v.AuxInt = 1
-		v0 := b.NewValue0(v.Line, OpMIPSOR, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSANDconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ANDconst [0]  _)
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (ANDconst [-1] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != -1 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDconst [c] (MOVWconst [d]))
-	// cond:
-	// result: (MOVWconst [c&d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = c & d
-		return true
-	}
-	// match: (ANDconst [c] (ANDconst [d] x))
-	// cond:
-	// result: (ANDconst [c&d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSANDconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpMIPSANDconst)
-		v.AuxInt = c & d
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSCMOVZ(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMOVZ _ b (MOVWconst [0]))
-	// cond:
-	// result: b
-	for {
-		b := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpMIPSMOVWconst {
-			break
-		}
-		if v_2.AuxInt != 0 {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = b.Type
-		v.AddArg(b)
-		return true
-	}
-	// match: (CMOVZ a _ (MOVWconst [c]))
-	// cond: c!=0
-	// result: a
-	for {
-		a := v.Args[0]
-		v_2 := v.Args[2]
-		if v_2.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_2.AuxInt
-		if !(c != 0) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = a.Type
-		v.AddArg(a)
-		return true
-	}
-	// match: (CMOVZ a (MOVWconst [0]) c)
-	// cond:
-	// result: (CMOVZzero a c)
-	for {
-		a := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		c := v.Args[2]
-		v.reset(OpMIPSCMOVZzero)
-		v.AddArg(a)
-		v.AddArg(c)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSCMOVZzero(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMOVZzero _ (MOVWconst [0]))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (CMOVZzero a (MOVWconst [c]))
-	// cond: c!=0
-	// result: a
-	for {
-		a := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(c != 0) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = a.Type
-		v.AddArg(a)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSLoweredAtomicAdd(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (LoweredAtomicAdd ptr (MOVWconst [c]) mem)
-	// cond: is16Bit(c)
-	// result: (LoweredAtomicAddconst [c] ptr mem)
-	for {
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		mem := v.Args[2]
-		if !(is16Bit(c)) {
-			break
-		}
-		v.reset(OpMIPSLoweredAtomicAddconst)
-		v.AuxInt = c
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSLoweredAtomicStore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (LoweredAtomicStore ptr (MOVWconst [0]) mem)
-	// cond:
-	// result: (LoweredAtomicStorezero ptr mem)
-	for {
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		mem := v.Args[2]
-		v.reset(OpMIPSLoweredAtomicStorezero)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSMOVBUload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBUload [off1] {sym} x:(ADDconst [off2] ptr) mem)
-	// cond: (is16Bit(off1+off2) || x.Uses == 1)
-	// result: (MOVBUload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		x := v.Args[0]
-		if x.Op != OpMIPSADDconst {
-			break
-		}
-		off2 := x.AuxInt
-		ptr := x.Args[0]
-		mem := v.Args[1]
-		if !(is16Bit(off1+off2) || x.Uses == 1) {
-			break
-		}
-		v.reset(OpMIPSMOVBUload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpMIPSMOVBUload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)
-	// result: x
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVBstore {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		x := v_1.Args[1]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSMOVBUreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBUreg x:(MOVBUload _ _))
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPSMOVBUload {
-			break
-		}
-		v.reset(OpMIPSMOVWreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBUreg x:(MOVBUreg _))
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPSMOVBUreg {
-			break
-		}
-		v.reset(OpMIPSMOVWreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBUreg <t> x:(MOVBload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVBUload <t> [off] {sym} ptr mem)
-	for {
-		t := v.Type
-		x := v.Args[0]
-		if x.Op != OpMIPSMOVBload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpMIPSMOVBUload, t)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVBUreg (ANDconst [c] x))
-	// cond:
-	// result: (ANDconst [c&0xff] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSANDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpMIPSANDconst)
-		v.AuxInt = c & 0xff
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBUreg (MOVWconst [c]))
-	// cond:
-	// result: (MOVWconst [int64(uint8(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = int64(uint8(c))
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSMOVBload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBload  [off1] {sym} x:(ADDconst [off2] ptr) mem)
-	// cond: (is16Bit(off1+off2) || x.Uses == 1)
-	// result: (MOVBload  [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		x := v.Args[0]
-		if x.Op != OpMIPSADDconst {
-			break
-		}
-		off2 := x.AuxInt
-		ptr := x.Args[0]
-		mem := v.Args[1]
-		if !(is16Bit(off1+off2) || x.Uses == 1) {
-			break
-		}
-		v.reset(OpMIPSMOVBload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpMIPSMOVBload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)
-	// result: x
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVBstore {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		x := v_1.Args[1]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSMOVBreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBreg x:(MOVBload _ _))
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPSMOVBload {
-			break
-		}
-		v.reset(OpMIPSMOVWreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBreg x:(MOVBreg _))
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPSMOVBreg {
-			break
-		}
-		v.reset(OpMIPSMOVWreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBreg <t> x:(MOVBUload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVBload <t> [off] {sym} ptr mem)
-	for {
-		t := v.Type
-		x := v.Args[0]
-		if x.Op != OpMIPSMOVBUload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpMIPSMOVBload, t)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVBreg (ANDconst [c] x))
-	// cond: c & 0x80 == 0
-	// result: (ANDconst [c&0x7f] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSANDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v_0.Args[0]
-		if !(c&0x80 == 0) {
-			break
-		}
-		v.reset(OpMIPSANDconst)
-		v.AuxInt = c & 0x7f
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBreg  (MOVWconst [c]))
-	// cond:
-	// result: (MOVWconst [int64(int8(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = int64(int8(c))
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSMOVBstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBstore [off1] {sym} x:(ADDconst [off2] ptr) val mem)
-	// cond: (is16Bit(off1+off2) || x.Uses == 1)
-	// result: (MOVBstore [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		x := v.Args[0]
-		if x.Op != OpMIPSADDconst {
-			break
-		}
-		off2 := x.AuxInt
-		ptr := x.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is16Bit(off1+off2) || x.Uses == 1) {
-			break
-		}
-		v.reset(OpMIPSMOVBstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpMIPSMOVBstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVWconst [0]) mem)
-	// cond:
-	// result: (MOVBstorezero [off] {sym} ptr mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		mem := v.Args[2]
-		v.reset(OpMIPSMOVBstorezero)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
-	// cond:
-	// result: (MOVBstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVBreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpMIPSMOVBstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
-	// cond:
-	// result: (MOVBstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVBUreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpMIPSMOVBstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
-	// cond:
-	// result: (MOVBstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVHreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpMIPSMOVBstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
-	// cond:
-	// result: (MOVBstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVHUreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpMIPSMOVBstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem)
-	// cond:
-	// result: (MOVBstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVWreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpMIPSMOVBstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSMOVBstorezero(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem)
-	// cond: (is16Bit(off1+off2) || x.Uses == 1)
-	// result: (MOVBstorezero [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		x := v.Args[0]
-		if x.Op != OpMIPSADDconst {
-			break
-		}
-		off2 := x.AuxInt
-		ptr := x.Args[0]
-		mem := v.Args[1]
-		if !(is16Bit(off1+off2) || x.Uses == 1) {
-			break
-		}
-		v.reset(OpMIPSMOVBstorezero)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpMIPSMOVBstorezero)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSMOVDload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVDload  [off1] {sym} x:(ADDconst [off2] ptr) mem)
-	// cond: (is16Bit(off1+off2) || x.Uses == 1)
-	// result: (MOVDload  [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		x := v.Args[0]
-		if x.Op != OpMIPSADDconst {
-			break
-		}
-		off2 := x.AuxInt
-		ptr := x.Args[0]
-		mem := v.Args[1]
-		if !(is16Bit(off1+off2) || x.Uses == 1) {
-			break
-		}
-		v.reset(OpMIPSMOVDload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpMIPSMOVDload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-	// result: x
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVDstore {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		x := v_1.Args[1]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSMOVDstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVDstore [off1] {sym} x:(ADDconst [off2] ptr) val mem)
-	// cond: (is16Bit(off1+off2) || x.Uses == 1)
-	// result: (MOVDstore [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		x := v.Args[0]
-		if x.Op != OpMIPSADDconst {
-			break
-		}
-		off2 := x.AuxInt
-		ptr := x.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is16Bit(off1+off2) || x.Uses == 1) {
-			break
-		}
-		v.reset(OpMIPSMOVDstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpMIPSMOVDstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSMOVFload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVFload  [off1] {sym} x:(ADDconst [off2] ptr) mem)
-	// cond: (is16Bit(off1+off2) || x.Uses == 1)
-	// result: (MOVFload  [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		x := v.Args[0]
-		if x.Op != OpMIPSADDconst {
-			break
-		}
-		off2 := x.AuxInt
-		ptr := x.Args[0]
-		mem := v.Args[1]
-		if !(is16Bit(off1+off2) || x.Uses == 1) {
-			break
-		}
-		v.reset(OpMIPSMOVFload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpMIPSMOVFload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-	// result: x
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVFstore {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		x := v_1.Args[1]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSMOVFstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVFstore [off1] {sym} x:(ADDconst [off2] ptr) val mem)
-	// cond: (is16Bit(off1+off2) || x.Uses == 1)
-	// result: (MOVFstore [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		x := v.Args[0]
-		if x.Op != OpMIPSADDconst {
-			break
-		}
-		off2 := x.AuxInt
-		ptr := x.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is16Bit(off1+off2) || x.Uses == 1) {
-			break
-		}
-		v.reset(OpMIPSMOVFstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpMIPSMOVFstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSMOVHUload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHUload [off1] {sym} x:(ADDconst [off2] ptr) mem)
-	// cond: (is16Bit(off1+off2) || x.Uses == 1)
-	// result: (MOVHUload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		x := v.Args[0]
-		if x.Op != OpMIPSADDconst {
-			break
-		}
-		off2 := x.AuxInt
-		ptr := x.Args[0]
-		mem := v.Args[1]
-		if !(is16Bit(off1+off2) || x.Uses == 1) {
-			break
-		}
-		v.reset(OpMIPSMOVHUload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpMIPSMOVHUload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)
-	// result: x
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVHstore {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		x := v_1.Args[1]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSMOVHUreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHUreg x:(MOVBUload _ _))
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPSMOVBUload {
-			break
-		}
-		v.reset(OpMIPSMOVWreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHUreg x:(MOVHUload _ _))
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPSMOVHUload {
-			break
-		}
-		v.reset(OpMIPSMOVWreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHUreg x:(MOVBUreg _))
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPSMOVBUreg {
-			break
-		}
-		v.reset(OpMIPSMOVWreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHUreg x:(MOVHUreg _))
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPSMOVHUreg {
-			break
-		}
-		v.reset(OpMIPSMOVWreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHUreg <t> x:(MOVHload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVHUload <t> [off] {sym} ptr mem)
-	for {
-		t := v.Type
-		x := v.Args[0]
-		if x.Op != OpMIPSMOVHload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpMIPSMOVHUload, t)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVHUreg (ANDconst [c] x))
-	// cond:
-	// result: (ANDconst [c&0xffff] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSANDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpMIPSANDconst)
-		v.AuxInt = c & 0xffff
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHUreg (MOVWconst [c]))
-	// cond:
-	// result: (MOVWconst [int64(uint16(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = int64(uint16(c))
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSMOVHload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHload  [off1] {sym} x:(ADDconst [off2] ptr) mem)
-	// cond: (is16Bit(off1+off2) || x.Uses == 1)
-	// result: (MOVHload  [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		x := v.Args[0]
-		if x.Op != OpMIPSADDconst {
-			break
-		}
-		off2 := x.AuxInt
-		ptr := x.Args[0]
-		mem := v.Args[1]
-		if !(is16Bit(off1+off2) || x.Uses == 1) {
-			break
-		}
-		v.reset(OpMIPSMOVHload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpMIPSMOVHload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)
-	// result: x
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVHstore {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		x := v_1.Args[1]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSMOVHreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHreg x:(MOVBload _ _))
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPSMOVBload {
-			break
-		}
-		v.reset(OpMIPSMOVWreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg x:(MOVBUload _ _))
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPSMOVBUload {
-			break
-		}
-		v.reset(OpMIPSMOVWreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg x:(MOVHload _ _))
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPSMOVHload {
-			break
-		}
-		v.reset(OpMIPSMOVWreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg x:(MOVBreg _))
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPSMOVBreg {
-			break
-		}
-		v.reset(OpMIPSMOVWreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg x:(MOVBUreg _))
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPSMOVBUreg {
-			break
-		}
-		v.reset(OpMIPSMOVWreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg x:(MOVHreg _))
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPSMOVHreg {
-			break
-		}
-		v.reset(OpMIPSMOVWreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg <t> x:(MOVHUload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVHload <t> [off] {sym} ptr mem)
-	for {
-		t := v.Type
-		x := v.Args[0]
-		if x.Op != OpMIPSMOVHUload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpMIPSMOVHload, t)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVHreg (ANDconst [c] x))
-	// cond: c & 0x8000 == 0
-	// result: (ANDconst [c&0x7fff] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSANDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v_0.Args[0]
-		if !(c&0x8000 == 0) {
-			break
-		}
-		v.reset(OpMIPSANDconst)
-		v.AuxInt = c & 0x7fff
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg  (MOVWconst [c]))
-	// cond:
-	// result: (MOVWconst [int64(int16(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = int64(int16(c))
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSMOVHstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHstore [off1] {sym} x:(ADDconst [off2] ptr) val mem)
-	// cond: (is16Bit(off1+off2) || x.Uses == 1)
-	// result: (MOVHstore [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		x := v.Args[0]
-		if x.Op != OpMIPSADDconst {
-			break
-		}
-		off2 := x.AuxInt
-		ptr := x.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is16Bit(off1+off2) || x.Uses == 1) {
-			break
-		}
-		v.reset(OpMIPSMOVHstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpMIPSMOVHstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [off] {sym} ptr (MOVWconst [0]) mem)
-	// cond:
-	// result: (MOVHstorezero [off] {sym} ptr mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		mem := v.Args[2]
-		v.reset(OpMIPSMOVHstorezero)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
-	// cond:
-	// result: (MOVHstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVHreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpMIPSMOVHstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
-	// cond:
-	// result: (MOVHstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVHUreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpMIPSMOVHstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem)
-	// cond:
-	// result: (MOVHstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVWreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpMIPSMOVHstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSMOVHstorezero(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem)
-	// cond: (is16Bit(off1+off2) || x.Uses == 1)
-	// result: (MOVHstorezero [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		x := v.Args[0]
-		if x.Op != OpMIPSADDconst {
-			break
-		}
-		off2 := x.AuxInt
-		ptr := x.Args[0]
-		mem := v.Args[1]
-		if !(is16Bit(off1+off2) || x.Uses == 1) {
-			break
-		}
-		v.reset(OpMIPSMOVHstorezero)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpMIPSMOVHstorezero)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSMOVWload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWload  [off1] {sym} x:(ADDconst [off2] ptr) mem)
-	// cond: (is16Bit(off1+off2) || x.Uses == 1)
-	// result: (MOVWload  [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		x := v.Args[0]
-		if x.Op != OpMIPSADDconst {
-			break
-		}
-		off2 := x.AuxInt
-		ptr := x.Args[0]
-		mem := v.Args[1]
-		if !(is16Bit(off1+off2) || x.Uses == 1) {
-			break
-		}
-		v.reset(OpMIPSMOVWload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpMIPSMOVWload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-	// result: x
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVWstore {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		x := v_1.Args[1]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSMOVWreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWreg x)
-	// cond: x.Uses == 1
-	// result: (MOVWnop x)
-	for {
-		x := v.Args[0]
-		if !(x.Uses == 1) {
-			break
-		}
-		v.reset(OpMIPSMOVWnop)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg  (MOVWconst [c]))
-	// cond:
-	// result: (MOVWconst [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = c
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSMOVWstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWstore [off1] {sym} x:(ADDconst [off2] ptr) val mem)
-	// cond: (is16Bit(off1+off2) || x.Uses == 1)
-	// result: (MOVWstore [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		x := v.Args[0]
-		if x.Op != OpMIPSADDconst {
-			break
-		}
-		off2 := x.AuxInt
-		ptr := x.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is16Bit(off1+off2) || x.Uses == 1) {
-			break
-		}
-		v.reset(OpMIPSMOVWstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpMIPSMOVWstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [off] {sym} ptr (MOVWconst [0]) mem)
-	// cond:
-	// result: (MOVWstorezero [off] {sym} ptr mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		mem := v.Args[2]
-		v.reset(OpMIPSMOVWstorezero)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
-	// cond:
-	// result: (MOVWstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVWreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpMIPSMOVWstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSMOVWstorezero(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem)
-	// cond: (is16Bit(off1+off2) || x.Uses == 1)
-	// result: (MOVWstorezero [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		x := v.Args[0]
-		if x.Op != OpMIPSADDconst {
-			break
-		}
-		off2 := x.AuxInt
-		ptr := x.Args[0]
-		mem := v.Args[1]
-		if !(is16Bit(off1+off2) || x.Uses == 1) {
-			break
-		}
-		v.reset(OpMIPSMOVWstorezero)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpMIPSMOVWstorezero)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSMUL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MUL (MOVWconst [0]) _ )
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (MUL (MOVWconst [1]) x )
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		if v_0.AuxInt != 1 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MUL (MOVWconst [-1]) x )
-	// cond:
-	// result: (NEG x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		if v_0.AuxInt != -1 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpMIPSNEG)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MUL (MOVWconst [c]) x )
-	// cond: isPowerOfTwo(int64(uint32(c)))
-	// result: (SLLconst [log2(int64(uint32(c)))] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(isPowerOfTwo(int64(uint32(c)))) {
-			break
-		}
-		v.reset(OpMIPSSLLconst)
-		v.AuxInt = log2(int64(uint32(c)))
-		v.AddArg(x)
-		return true
-	}
-	// match: (MUL (MOVWconst [c]) (MOVWconst [d]))
-	// cond:
-	// result: (MOVWconst [int64(int32(c)*int32(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = int64(int32(c) * int32(d))
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSNEG(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NEG (MOVWconst [c]))
-	// cond:
-	// result: (MOVWconst [int64(int32(-c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = int64(int32(-c))
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSNOR(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NOR (MOVWconst [c]) x)
-	// cond:
-	// result: (NORconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpMIPSNORconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (NOR x (MOVWconst [c]))
-	// cond:
-	// result: (NORconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpMIPSNORconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSNORconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NORconst [c] (MOVWconst [d]))
-	// cond:
-	// result: (MOVWconst [^(c|d)])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = ^(c | d)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSOR(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (OR  (MOVWconst [c]) x)
-	// cond:
-	// result: (ORconst  [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpMIPSORconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (OR  x (MOVWconst [c]))
-	// cond:
-	// result: (ORconst  [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpMIPSORconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (OR  x x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (OR (SGTUzero x) (SGTUzero y))
-	// cond:
-	// result: (SGTUzero (OR <x.Type> x y))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSSGTUzero {
-			break
-		}
-		x := v_0.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSSGTUzero {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpMIPSSGTUzero)
-		v0 := b.NewValue0(v.Line, OpMIPSOR, x.Type)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSORconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ORconst  [0]  x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ORconst  [-1] _)
-	// cond:
-	// result: (MOVWconst [-1])
-	for {
-		if v.AuxInt != -1 {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = -1
-		return true
-	}
-	// match: (ORconst [c] (MOVWconst [d]))
-	// cond:
-	// result: (MOVWconst [c|d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = c | d
-		return true
-	}
-	// match: (ORconst [c] (ORconst [d] x))
-	// cond:
-	// result: (ORconst [c|d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSORconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpMIPSORconst)
-		v.AuxInt = c | d
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSSGT(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SGT  (MOVWconst [c]) x)
-	// cond:
-	// result: (SGTconst  [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpMIPSSGTconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (SGT x (MOVWconst [0]))
-	// cond:
-	// result: (SGTzero x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		v.reset(OpMIPSSGTzero)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSSGTU(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SGTU (MOVWconst [c]) x)
-	// cond:
-	// result: (SGTUconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpMIPSSGTUconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (SGTU x (MOVWconst [0]))
-	// cond:
-	// result: (SGTUzero x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		v.reset(OpMIPSSGTUzero)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSSGTUconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SGTUconst [c] (MOVWconst [d]))
-	// cond: uint32(c)>uint32(d)
-	// result: (MOVWconst [1])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		d := v_0.AuxInt
-		if !(uint32(c) > uint32(d)) {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SGTUconst [c] (MOVWconst [d]))
-	// cond: uint32(c)<=uint32(d)
-	// result: (MOVWconst [0])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		d := v_0.AuxInt
-		if !(uint32(c) <= uint32(d)) {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SGTUconst [c] (MOVBUreg _))
-	// cond: 0xff < uint32(c)
-	// result: (MOVWconst [1])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVBUreg {
-			break
-		}
-		if !(0xff < uint32(c)) {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SGTUconst [c] (MOVHUreg _))
-	// cond: 0xffff < uint32(c)
-	// result: (MOVWconst [1])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVHUreg {
-			break
-		}
-		if !(0xffff < uint32(c)) {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SGTUconst [c] (ANDconst [m] _))
-	// cond: uint32(m) < uint32(c)
-	// result: (MOVWconst [1])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSANDconst {
-			break
-		}
-		m := v_0.AuxInt
-		if !(uint32(m) < uint32(c)) {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SGTUconst [c] (SRLconst _ [d]))
-	// cond: uint32(d) <= 31 && 1<<(32-uint32(d)) <= uint32(c)
-	// result: (MOVWconst [1])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSSRLconst {
-			break
-		}
-		d := v_0.AuxInt
-		if !(uint32(d) <= 31 && 1<<(32-uint32(d)) <= uint32(c)) {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSSGTUzero(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SGTUzero (MOVWconst [d]))
-	// cond: uint32(d) != 0
-	// result: (MOVWconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		d := v_0.AuxInt
-		if !(uint32(d) != 0) {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SGTUzero (MOVWconst [d]))
-	// cond: uint32(d) == 0
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		d := v_0.AuxInt
-		if !(uint32(d) == 0) {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSSGTconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SGTconst [c] (MOVWconst [d]))
-	// cond: int32(c) > int32(d)
-	// result: (MOVWconst [1])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		d := v_0.AuxInt
-		if !(int32(c) > int32(d)) {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SGTconst [c] (MOVWconst [d]))
-	// cond: int32(c) <= int32(d)
-	// result: (MOVWconst [0])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		d := v_0.AuxInt
-		if !(int32(c) <= int32(d)) {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SGTconst [c] (MOVBreg _))
-	// cond: 0x7f < int32(c)
-	// result: (MOVWconst [1])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVBreg {
-			break
-		}
-		if !(0x7f < int32(c)) {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SGTconst [c] (MOVBreg _))
-	// cond: int32(c) <= -0x80
-	// result: (MOVWconst [0])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVBreg {
-			break
-		}
-		if !(int32(c) <= -0x80) {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SGTconst [c] (MOVBUreg _))
-	// cond: 0xff < int32(c)
-	// result: (MOVWconst [1])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVBUreg {
-			break
-		}
-		if !(0xff < int32(c)) {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SGTconst [c] (MOVBUreg _))
-	// cond: int32(c) < 0
-	// result: (MOVWconst [0])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVBUreg {
-			break
-		}
-		if !(int32(c) < 0) {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SGTconst [c] (MOVHreg _))
-	// cond: 0x7fff < int32(c)
-	// result: (MOVWconst [1])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVHreg {
-			break
-		}
-		if !(0x7fff < int32(c)) {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SGTconst [c] (MOVHreg _))
-	// cond: int32(c) <= -0x8000
-	// result: (MOVWconst [0])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVHreg {
-			break
-		}
-		if !(int32(c) <= -0x8000) {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SGTconst [c] (MOVHUreg _))
-	// cond: 0xffff < int32(c)
-	// result: (MOVWconst [1])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVHUreg {
-			break
-		}
-		if !(0xffff < int32(c)) {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SGTconst [c] (MOVHUreg _))
-	// cond: int32(c) < 0
-	// result: (MOVWconst [0])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVHUreg {
-			break
-		}
-		if !(int32(c) < 0) {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SGTconst [c] (ANDconst [m] _))
-	// cond: 0 <= int32(m) && int32(m) < int32(c)
-	// result: (MOVWconst [1])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSANDconst {
-			break
-		}
-		m := v_0.AuxInt
-		if !(0 <= int32(m) && int32(m) < int32(c)) {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SGTconst [c] (SRLconst _ [d]))
-	// cond: 0 <= int32(c) && uint32(d) <= 31 && 1<<(32-uint32(d)) <= int32(c)
-	// result: (MOVWconst [1])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSSRLconst {
-			break
-		}
-		d := v_0.AuxInt
-		if !(0 <= int32(c) && uint32(d) <= 31 && 1<<(32-uint32(d)) <= int32(c)) {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSSGTzero(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SGTzero (MOVWconst [d]))
-	// cond: int32(d) > 0
-	// result: (MOVWconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		d := v_0.AuxInt
-		if !(int32(d) > 0) {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SGTzero (MOVWconst [d]))
-	// cond: int32(d) <= 0
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		d := v_0.AuxInt
-		if !(int32(d) <= 0) {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSSLL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SLL _ (MOVWconst [c]))
-	// cond: uint32(c)>=32
-	// result: (MOVWconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) >= 32) {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SLL x (MOVWconst [c]))
-	// cond:
-	// result: (SLLconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpMIPSSLLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSSLLconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SLLconst [c] (MOVWconst [d]))
-	// cond:
-	// result: (MOVWconst [int64(int32(uint32(d)<<uint32(c)))])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = int64(int32(uint32(d) << uint32(c)))
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSSRA(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SRA x (MOVWconst [c]))
-	// cond: uint32(c)>=32
-	// result: (SRAconst x [31])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) >= 32) {
-			break
-		}
-		v.reset(OpMIPSSRAconst)
-		v.AuxInt = 31
-		v.AddArg(x)
-		return true
-	}
-	// match: (SRA x (MOVWconst [c]))
-	// cond:
-	// result: (SRAconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpMIPSSRAconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSSRAconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SRAconst [c] (MOVWconst [d]))
-	// cond:
-	// result: (MOVWconst [int64(int32(d)>>uint32(c))])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = int64(int32(d) >> uint32(c))
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSSRL(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SRL _ (MOVWconst [c]))
-	// cond: uint32(c)>=32
-	// result: (MOVWconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) >= 32) {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SRL x (MOVWconst [c]))
-	// cond:
-	// result: (SRLconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpMIPSSRLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSSRLconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SRLconst [c] (MOVWconst [d]))
-	// cond:
-	// result: (MOVWconst [int64(uint32(d)>>uint32(c))])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = int64(uint32(d) >> uint32(c))
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSSUB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUB x (MOVWconst [c]))
-	// cond:
-	// result: (SUBconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpMIPSSUBconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUB x x)
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SUB (MOVWconst [0]) x)
-	// cond:
-	// result: (NEG x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpMIPSNEG)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSSUBconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBconst [0]  x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUBconst [c] (MOVWconst [d]))
-	// cond:
-	// result: (MOVWconst [int64(int32(d-c))])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = int64(int32(d - c))
-		return true
-	}
-	// match: (SUBconst [c] (SUBconst [d] x))
-	// cond:
-	// result: (ADDconst [int64(int32(-c-d))] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSSUBconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpMIPSADDconst)
-		v.AuxInt = int64(int32(-c - d))
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUBconst [c] (ADDconst [d] x))
-	// cond:
-	// result: (ADDconst [int64(int32(-c+d))] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSADDconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpMIPSADDconst)
-		v.AuxInt = int64(int32(-c + d))
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSXOR(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XOR (MOVWconst [c]) x)
-	// cond:
-	// result: (XORconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpMIPSXORconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (XOR x (MOVWconst [c]))
-	// cond:
-	// result: (XORconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpMIPSXORconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (XOR x x)
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMIPSXORconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XORconst [0]  x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (XORconst [-1] x)
-	// cond:
-	// result: (NORconst [0] x)
-	for {
-		if v.AuxInt != -1 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpMIPSNORconst)
-		v.AuxInt = 0
-		v.AddArg(x)
-		return true
-	}
-	// match: (XORconst [c] (MOVWconst [d]))
-	// cond:
-	// result: (MOVWconst [c^d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = c ^ d
-		return true
-	}
-	// match: (XORconst [c] (XORconst [d] x))
-	// cond:
-	// result: (XORconst [c^d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSXORconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpMIPSXORconst)
-		v.AuxInt = c ^ d
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMod16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod16 x y)
-	// cond:
-	// result: (Select0 (DIV (SignExt16to32 x) (SignExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect0)
-		v0 := b.NewValue0(v.Line, OpMIPSDIV, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
-		v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpMod16u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod16u x y)
-	// cond:
-	// result: (Select0 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect0)
-		v0 := b.NewValue0(v.Line, OpMIPSDIVU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpMod32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod32 x y)
-	// cond:
-	// result: (Select0 (DIV x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect0)
-		v0 := b.NewValue0(v.Line, OpMIPSDIV, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpMod32u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod32u x y)
-	// cond:
-	// result: (Select0 (DIVU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect0)
-		v0 := b.NewValue0(v.Line, OpMIPSDIVU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpMod8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod8 x y)
-	// cond:
-	// result: (Select0 (DIV (SignExt8to32 x) (SignExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect0)
-		v0 := b.NewValue0(v.Line, OpMIPSDIV, MakeTuple(config.fe.TypeInt32(), config.fe.TypeInt32()))
-		v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpMod8u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod8u x y)
-	// cond:
-	// result: (Select0 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect0)
-		v0 := b.NewValue0(v.Line, OpMIPSDIVU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpMove(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Move [s] _ _ mem)
-	// cond: SizeAndAlign(s).Size() == 0
-	// result: mem
-	for {
-		s := v.AuxInt
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 0) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = mem.Type
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 1
-	// result: (MOVBstore dst (MOVBUload src mem) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 1) {
-			break
-		}
-		v.reset(OpMIPSMOVBstore)
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpMIPSMOVBUload, config.fe.TypeUInt8())
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
-	// result: (MOVHstore dst (MOVHUload src mem) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
-			break
-		}
-		v.reset(OpMIPSMOVHstore)
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpMIPSMOVHUload, config.fe.TypeUInt16())
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 2
-	// result: (MOVBstore [1] dst (MOVBUload [1] src mem) 		(MOVBstore dst (MOVBUload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 2) {
-			break
-		}
-		v.reset(OpMIPSMOVBstore)
-		v.AuxInt = 1
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpMIPSMOVBUload, config.fe.TypeUInt8())
-		v0.AuxInt = 1
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSMOVBstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVBUload, config.fe.TypeUInt8())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
-	// result: (MOVWstore dst (MOVWload src mem) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
-			break
-		}
-		v.reset(OpMIPSMOVWstore)
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpMIPSMOVWload, config.fe.TypeUInt32())
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
-	// result: (MOVHstore [2] dst (MOVHUload [2] src mem) 		(MOVHstore dst (MOVHUload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
-			break
-		}
-		v.reset(OpMIPSMOVHstore)
-		v.AuxInt = 2
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpMIPSMOVHUload, config.fe.TypeUInt16())
-		v0.AuxInt = 2
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSMOVHstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVHUload, config.fe.TypeUInt16())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 4
-	// result: (MOVBstore [3] dst (MOVBUload [3] src mem) 		(MOVBstore [2] dst (MOVBUload [2] src mem) 			(MOVBstore [1] dst (MOVBUload [1] src mem) 				(MOVBstore dst (MOVBUload src mem) mem))))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 4) {
-			break
-		}
-		v.reset(OpMIPSMOVBstore)
-		v.AuxInt = 3
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpMIPSMOVBUload, config.fe.TypeUInt8())
-		v0.AuxInt = 3
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSMOVBstore, TypeMem)
-		v1.AuxInt = 2
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVBUload, config.fe.TypeUInt8())
-		v2.AuxInt = 2
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPSMOVBstore, TypeMem)
-		v3.AuxInt = 1
-		v3.AddArg(dst)
-		v4 := b.NewValue0(v.Line, OpMIPSMOVBUload, config.fe.TypeUInt8())
-		v4.AuxInt = 1
-		v4.AddArg(src)
-		v4.AddArg(mem)
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpMIPSMOVBstore, TypeMem)
-		v5.AddArg(dst)
-		v6 := b.NewValue0(v.Line, OpMIPSMOVBUload, config.fe.TypeUInt8())
-		v6.AddArg(src)
-		v6.AddArg(mem)
-		v5.AddArg(v6)
-		v5.AddArg(mem)
-		v3.AddArg(v5)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 3
-	// result: (MOVBstore [2] dst (MOVBUload [2] src mem) 		(MOVBstore [1] dst (MOVBUload [1] src mem) 			(MOVBstore dst (MOVBUload src mem) mem)))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 3) {
-			break
-		}
-		v.reset(OpMIPSMOVBstore)
-		v.AuxInt = 2
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpMIPSMOVBUload, config.fe.TypeUInt8())
-		v0.AuxInt = 2
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSMOVBstore, TypeMem)
-		v1.AuxInt = 1
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVBUload, config.fe.TypeUInt8())
-		v2.AuxInt = 1
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPSMOVBstore, TypeMem)
-		v3.AddArg(dst)
-		v4 := b.NewValue0(v.Line, OpMIPSMOVBUload, config.fe.TypeUInt8())
-		v4.AddArg(src)
-		v4.AddArg(mem)
-		v3.AddArg(v4)
-		v3.AddArg(mem)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0
-	// result: (MOVWstore [4] dst (MOVWload [4] src mem) 		(MOVWstore dst (MOVWload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0) {
-			break
-		}
-		v.reset(OpMIPSMOVWstore)
-		v.AuxInt = 4
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpMIPSMOVWload, config.fe.TypeUInt32())
-		v0.AuxInt = 4
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSMOVWstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVWload, config.fe.TypeUInt32())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0
-	// result: (MOVHstore [6] dst (MOVHload [6] src mem) 		(MOVHstore [4] dst (MOVHload [4] src mem) 			(MOVHstore [2] dst (MOVHload [2] src mem) 				(MOVHstore dst (MOVHload src mem) mem))))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0) {
-			break
-		}
-		v.reset(OpMIPSMOVHstore)
-		v.AuxInt = 6
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpMIPSMOVHload, config.fe.TypeInt16())
-		v0.AuxInt = 6
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSMOVHstore, TypeMem)
-		v1.AuxInt = 4
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVHload, config.fe.TypeInt16())
-		v2.AuxInt = 4
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPSMOVHstore, TypeMem)
-		v3.AuxInt = 2
-		v3.AddArg(dst)
-		v4 := b.NewValue0(v.Line, OpMIPSMOVHload, config.fe.TypeInt16())
-		v4.AuxInt = 2
-		v4.AddArg(src)
-		v4.AddArg(mem)
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpMIPSMOVHstore, TypeMem)
-		v5.AddArg(dst)
-		v6 := b.NewValue0(v.Line, OpMIPSMOVHload, config.fe.TypeInt16())
-		v6.AddArg(src)
-		v6.AddArg(mem)
-		v5.AddArg(v6)
-		v5.AddArg(mem)
-		v3.AddArg(v5)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 6 && SizeAndAlign(s).Align()%2 == 0
-	// result: (MOVHstore [4] dst (MOVHload [4] src mem) 		(MOVHstore [2] dst (MOVHload [2] src mem) 			(MOVHstore dst (MOVHload src mem) mem)))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 6 && SizeAndAlign(s).Align()%2 == 0) {
-			break
-		}
-		v.reset(OpMIPSMOVHstore)
-		v.AuxInt = 4
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpMIPSMOVHload, config.fe.TypeInt16())
-		v0.AuxInt = 4
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSMOVHstore, TypeMem)
-		v1.AuxInt = 2
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVHload, config.fe.TypeInt16())
-		v2.AuxInt = 2
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPSMOVHstore, TypeMem)
-		v3.AddArg(dst)
-		v4 := b.NewValue0(v.Line, OpMIPSMOVHload, config.fe.TypeInt16())
-		v4.AddArg(src)
-		v4.AddArg(mem)
-		v3.AddArg(v4)
-		v3.AddArg(mem)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 12 && SizeAndAlign(s).Align()%4 == 0
-	// result: (MOVWstore [8] dst (MOVWload [8] src mem) 		(MOVWstore [4] dst (MOVWload [4] src mem) 			(MOVWstore dst (MOVWload src mem) mem)))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 12 && SizeAndAlign(s).Align()%4 == 0) {
-			break
-		}
-		v.reset(OpMIPSMOVWstore)
-		v.AuxInt = 8
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpMIPSMOVWload, config.fe.TypeUInt32())
-		v0.AuxInt = 8
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSMOVWstore, TypeMem)
-		v1.AuxInt = 4
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVWload, config.fe.TypeUInt32())
-		v2.AuxInt = 4
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPSMOVWstore, TypeMem)
-		v3.AddArg(dst)
-		v4 := b.NewValue0(v.Line, OpMIPSMOVWload, config.fe.TypeUInt32())
-		v4.AddArg(src)
-		v4.AddArg(mem)
-		v3.AddArg(v4)
-		v3.AddArg(mem)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%4 == 0
-	// result: (MOVWstore [12] dst (MOVWload [12] src mem) 		(MOVWstore [8] dst (MOVWload [8] src mem) 			(MOVWstore [4] dst (MOVWload [4] src mem) 				(MOVWstore dst (MOVWload src mem) mem))))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%4 == 0) {
-			break
-		}
-		v.reset(OpMIPSMOVWstore)
-		v.AuxInt = 12
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpMIPSMOVWload, config.fe.TypeUInt32())
-		v0.AuxInt = 12
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSMOVWstore, TypeMem)
-		v1.AuxInt = 8
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVWload, config.fe.TypeUInt32())
-		v2.AuxInt = 8
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPSMOVWstore, TypeMem)
-		v3.AuxInt = 4
-		v3.AddArg(dst)
-		v4 := b.NewValue0(v.Line, OpMIPSMOVWload, config.fe.TypeUInt32())
-		v4.AuxInt = 4
-		v4.AddArg(src)
-		v4.AddArg(mem)
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpMIPSMOVWstore, TypeMem)
-		v5.AddArg(dst)
-		v6 := b.NewValue0(v.Line, OpMIPSMOVWload, config.fe.TypeUInt32())
-		v6.AddArg(src)
-		v6.AddArg(mem)
-		v5.AddArg(v6)
-		v5.AddArg(mem)
-		v3.AddArg(v5)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: (SizeAndAlign(s).Size() > 16 || SizeAndAlign(s).Align()%4 != 0)
-	// result: (LoweredMove [SizeAndAlign(s).Align()] 		dst 		src 		(ADDconst <src.Type> src [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)]) 		mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() > 16 || SizeAndAlign(s).Align()%4 != 0) {
-			break
-		}
-		v.reset(OpMIPSLoweredMove)
-		v.AuxInt = SizeAndAlign(s).Align()
-		v.AddArg(dst)
-		v.AddArg(src)
-		v0 := b.NewValue0(v.Line, OpMIPSADDconst, src.Type)
-		v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
-		v0.AddArg(src)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpMul16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul16 x y)
-	// cond:
-	// result: (MUL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSMUL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpMul32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul32 x y)
-	// cond:
-	// result: (MUL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSMUL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpMul32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul32F x y)
-	// cond:
-	// result: (MULF x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSMULF)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpMul32uhilo(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul32uhilo x y)
-	// cond:
-	// result: (MULTU x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSMULTU)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpMul64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul64F x y)
-	// cond:
-	// result: (MULD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSMULD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpMul8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul8 x y)
-	// cond:
-	// result: (MUL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSMUL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpNeg16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg16 x)
-	// cond:
-	// result: (NEG x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPSNEG)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS_OpNeg32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg32 x)
-	// cond:
-	// result: (NEG x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPSNEG)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS_OpNeg32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg32F x)
-	// cond:
-	// result: (NEGF x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPSNEGF)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS_OpNeg64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg64F x)
-	// cond:
-	// result: (NEGD x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPSNEGD)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS_OpNeg8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg8 x)
-	// cond:
-	// result: (NEG x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPSNEG)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS_OpNeq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq16 x y)
-	// cond:
-	// result: (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)) (MOVWconst [0]))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSGTU)
-		v0 := b.NewValue0(v.Line, OpMIPSXOR, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v3.AuxInt = 0
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueMIPS_OpNeq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq32 x y)
-	// cond:
-	// result: (SGTU (XOR x y) (MOVWconst [0]))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSGTU)
-		v0 := b.NewValue0(v.Line, OpMIPSXOR, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v1.AuxInt = 0
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS_OpNeq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq32F x y)
-	// cond:
-	// result: (FPFlagFalse (CMPEQF x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSFPFlagFalse)
-		v0 := b.NewValue0(v.Line, OpMIPSCMPEQF, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpNeq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq64F x y)
-	// cond:
-	// result: (FPFlagFalse (CMPEQD x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSFPFlagFalse)
-		v0 := b.NewValue0(v.Line, OpMIPSCMPEQD, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpNeq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq8 x y)
-	// cond:
-	// result: (SGTU (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)) (MOVWconst [0]))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSGTU)
-		v0 := b.NewValue0(v.Line, OpMIPSXOR, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v3.AuxInt = 0
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueMIPS_OpNeqB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NeqB x y)
-	// cond:
-	// result: (XOR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSXOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpNeqPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NeqPtr x y)
-	// cond:
-	// result: (SGTU (XOR x y) (MOVWconst [0]))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSGTU)
-		v0 := b.NewValue0(v.Line, OpMIPSXOR, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v1.AuxInt = 0
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS_OpNilCheck(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NilCheck ptr mem)
-	// cond:
-	// result: (LoweredNilCheck ptr mem)
-	for {
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		v.reset(OpMIPSLoweredNilCheck)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueMIPS_OpNot(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Not x)
-	// cond:
-	// result: (XORconst [1] x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPSXORconst)
-		v.AuxInt = 1
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS_OpOffPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (OffPtr [off] ptr:(SP))
-	// cond:
-	// result: (MOVWaddr [off] ptr)
-	for {
-		off := v.AuxInt
-		ptr := v.Args[0]
-		if ptr.Op != OpSP {
-			break
-		}
-		v.reset(OpMIPSMOVWaddr)
-		v.AuxInt = off
-		v.AddArg(ptr)
-		return true
-	}
-	// match: (OffPtr [off] ptr)
-	// cond:
-	// result: (ADDconst [off] ptr)
-	for {
-		off := v.AuxInt
-		ptr := v.Args[0]
-		v.reset(OpMIPSADDconst)
-		v.AuxInt = off
-		v.AddArg(ptr)
-		return true
-	}
-}
-func rewriteValueMIPS_OpOr16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or16 x y)
-	// cond:
-	// result: (OR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpOr32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or32 x y)
-	// cond:
-	// result: (OR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpOr8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or8 x y)
-	// cond:
-	// result: (OR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpOrB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (OrB x y)
-	// cond:
-	// result: (OR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpRsh16Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux16 <t> x y)
-	// cond:
-	// result: (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSCMOVZ)
-		v0 := b.NewValue0(v.Line, OpMIPSSRL, t)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v3.AuxInt = 0
-		v.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpMIPSSGTUconst, config.fe.TypeBool())
-		v4.AuxInt = 32
-		v5 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueMIPS_OpRsh16Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux32 <t> x y)
-	// cond:
-	// result: (CMOVZ (SRL <t> (ZeroExt16to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSCMOVZ)
-		v0 := b.NewValue0(v.Line, OpMIPSSRL, t)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v2.AuxInt = 0
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPSSGTUconst, config.fe.TypeBool())
-		v3.AuxInt = 32
-		v3.AddArg(y)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueMIPS_OpRsh16Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux64 x (Const64 [c]))
-	// cond: uint32(c) < 16
-	// result: (SRLconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 16) {
-			break
-		}
-		v.reset(OpMIPSSRLconst)
-		v.AuxInt = c + 16
-		v0 := b.NewValue0(v.Line, OpMIPSSLLconst, config.fe.TypeUInt32())
-		v0.AuxInt = 16
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh16Ux64 _ (Const64 [c]))
-	// cond: uint32(c) >= 16
-	// result: (MOVWconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) >= 16) {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpRsh16Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux8 <t> x y)
-	// cond:
-	// result: (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSCMOVZ)
-		v0 := b.NewValue0(v.Line, OpMIPSSRL, t)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v3.AuxInt = 0
-		v.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpMIPSSGTUconst, config.fe.TypeBool())
-		v4.AuxInt = 32
-		v5 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueMIPS_OpRsh16x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x16 x y)
-	// cond:
-	// result: (SRA (SignExt16to32 x) ( CMOVZ <config.fe.TypeUInt32()> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSRA)
-		v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSCMOVZ, config.fe.TypeUInt32())
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v3.AuxInt = -1
-		v1.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpMIPSSGTUconst, config.fe.TypeBool())
-		v4.AuxInt = 32
-		v5 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v1.AddArg(v4)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS_OpRsh16x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x32 x y)
-	// cond:
-	// result: (SRA (SignExt16to32 x) ( CMOVZ <config.fe.TypeUInt32()> y (MOVWconst [-1]) (SGTUconst [32] y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSRA)
-		v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSCMOVZ, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v2.AuxInt = -1
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPSSGTUconst, config.fe.TypeBool())
-		v3.AuxInt = 32
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS_OpRsh16x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x64 x (Const64 [c]))
-	// cond: uint32(c) < 16
-	// result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [c+16])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 16) {
-			break
-		}
-		v.reset(OpMIPSSRAconst)
-		v.AuxInt = c + 16
-		v0 := b.NewValue0(v.Line, OpMIPSSLLconst, config.fe.TypeUInt32())
-		v0.AuxInt = 16
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh16x64 x (Const64 [c]))
-	// cond: uint32(c) >= 16
-	// result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [16]) [31])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) >= 16) {
-			break
-		}
-		v.reset(OpMIPSSRAconst)
-		v.AuxInt = 31
-		v0 := b.NewValue0(v.Line, OpMIPSSLLconst, config.fe.TypeUInt32())
-		v0.AuxInt = 16
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpRsh16x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x8 x y)
-	// cond:
-	// result: (SRA (SignExt16to32 x) ( CMOVZ <config.fe.TypeUInt32()> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSRA)
-		v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSCMOVZ, config.fe.TypeUInt32())
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v3.AuxInt = -1
-		v1.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpMIPSSGTUconst, config.fe.TypeBool())
-		v4.AuxInt = 32
-		v5 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v1.AddArg(v4)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS_OpRsh32Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux16 <t> x y)
-	// cond:
-	// result: (CMOVZ (SRL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSCMOVZ)
-		v0 := b.NewValue0(v.Line, OpMIPSSRL, t)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v2.AuxInt = 0
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPSSGTUconst, config.fe.TypeBool())
-		v3.AuxInt = 32
-		v4 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueMIPS_OpRsh32Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux32 <t> x y)
-	// cond:
-	// result: (CMOVZ (SRL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSCMOVZ)
-		v0 := b.NewValue0(v.Line, OpMIPSSRL, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v1.AuxInt = 0
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpMIPSSGTUconst, config.fe.TypeBool())
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueMIPS_OpRsh32Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux64 x (Const64 [c]))
-	// cond: uint32(c) < 32
-	// result: (SRLconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 32) {
-			break
-		}
-		v.reset(OpMIPSSRLconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh32Ux64 _ (Const64 [c]))
-	// cond: uint32(c) >= 32
-	// result: (MOVWconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) >= 32) {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpRsh32Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux8 <t> x y)
-	// cond:
-	// result: (CMOVZ (SRL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSCMOVZ)
-		v0 := b.NewValue0(v.Line, OpMIPSSRL, t)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v2.AuxInt = 0
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPSSGTUconst, config.fe.TypeBool())
-		v3.AuxInt = 32
-		v4 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueMIPS_OpRsh32x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x16 x y)
-	// cond:
-	// result: (SRA x ( CMOVZ <config.fe.TypeUInt32()> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSRA)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpMIPSCMOVZ, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v2.AuxInt = -1
-		v0.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPSSGTUconst, config.fe.TypeBool())
-		v3.AuxInt = 32
-		v4 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v0.AddArg(v3)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpRsh32x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x32 x y)
-	// cond:
-	// result: (SRA x ( CMOVZ <config.fe.TypeUInt32()> y (MOVWconst [-1]) (SGTUconst [32] y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSRA)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpMIPSCMOVZ, config.fe.TypeUInt32())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v1.AuxInt = -1
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpMIPSSGTUconst, config.fe.TypeBool())
-		v2.AuxInt = 32
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpRsh32x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x64 x (Const64 [c]))
-	// cond: uint32(c) < 32
-	// result: (SRAconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 32) {
-			break
-		}
-		v.reset(OpMIPSSRAconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh32x64 x (Const64 [c]))
-	// cond: uint32(c) >= 32
-	// result: (SRAconst x [31])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) >= 32) {
-			break
-		}
-		v.reset(OpMIPSSRAconst)
-		v.AuxInt = 31
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpRsh32x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x8 x y)
-	// cond:
-	// result: (SRA x ( CMOVZ <config.fe.TypeUInt32()> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSRA)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpMIPSCMOVZ, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v2.AuxInt = -1
-		v0.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPSSGTUconst, config.fe.TypeBool())
-		v3.AuxInt = 32
-		v4 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v0.AddArg(v3)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpRsh8Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux16 <t> x y)
-	// cond:
-	// result: (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSCMOVZ)
-		v0 := b.NewValue0(v.Line, OpMIPSSRL, t)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v3.AuxInt = 0
-		v.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpMIPSSGTUconst, config.fe.TypeBool())
-		v4.AuxInt = 32
-		v5 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueMIPS_OpRsh8Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux32 <t> x y)
-	// cond:
-	// result: (CMOVZ (SRL <t> (ZeroExt8to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSCMOVZ)
-		v0 := b.NewValue0(v.Line, OpMIPSSRL, t)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v2.AuxInt = 0
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPSSGTUconst, config.fe.TypeBool())
-		v3.AuxInt = 32
-		v3.AddArg(y)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueMIPS_OpRsh8Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux64 x (Const64 [c]))
-	// cond: uint32(c) < 8
-	// result: (SRLconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 8) {
-			break
-		}
-		v.reset(OpMIPSSRLconst)
-		v.AuxInt = c + 24
-		v0 := b.NewValue0(v.Line, OpMIPSSLLconst, config.fe.TypeUInt32())
-		v0.AuxInt = 24
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh8Ux64 _ (Const64 [c]))
-	// cond: uint32(c) >= 8
-	// result: (MOVWconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) >= 8) {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpRsh8Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux8 <t> x y)
-	// cond:
-	// result: (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSCMOVZ)
-		v0 := b.NewValue0(v.Line, OpMIPSSRL, t)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v3.AuxInt = 0
-		v.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpMIPSSGTUconst, config.fe.TypeBool())
-		v4.AuxInt = 32
-		v5 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueMIPS_OpRsh8x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x16 x y)
-	// cond:
-	// result: (SRA (SignExt16to32 x) ( CMOVZ <config.fe.TypeUInt32()> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSRA)
-		v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSCMOVZ, config.fe.TypeUInt32())
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v3.AuxInt = -1
-		v1.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpMIPSSGTUconst, config.fe.TypeBool())
-		v4.AuxInt = 32
-		v5 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v1.AddArg(v4)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS_OpRsh8x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x32 x y)
-	// cond:
-	// result: (SRA (SignExt16to32 x) ( CMOVZ <config.fe.TypeUInt32()> y (MOVWconst [-1]) (SGTUconst [32] y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSRA)
-		v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSCMOVZ, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v2.AuxInt = -1
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPSSGTUconst, config.fe.TypeBool())
-		v3.AuxInt = 32
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS_OpRsh8x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x64 x (Const64 [c]))
-	// cond: uint32(c) < 8
-	// result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [c+24])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 8) {
-			break
-		}
-		v.reset(OpMIPSSRAconst)
-		v.AuxInt = c + 24
-		v0 := b.NewValue0(v.Line, OpMIPSSLLconst, config.fe.TypeUInt32())
-		v0.AuxInt = 24
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh8x64 x (Const64 [c]))
-	// cond: uint32(c) >= 8
-	// result: (SRAconst (SLLconst <config.fe.TypeUInt32()> x [24]) [31])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) >= 8) {
-			break
-		}
-		v.reset(OpMIPSSRAconst)
-		v.AuxInt = 31
-		v0 := b.NewValue0(v.Line, OpMIPSSLLconst, config.fe.TypeUInt32())
-		v0.AuxInt = 24
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpRsh8x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x8 x y)
-	// cond:
-	// result: (SRA (SignExt16to32 x) ( CMOVZ <config.fe.TypeUInt32()> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSRA)
-		v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSCMOVZ, config.fe.TypeUInt32())
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v3.AuxInt = -1
-		v1.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpMIPSSGTUconst, config.fe.TypeBool())
-		v4.AuxInt = 32
-		v5 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v1.AddArg(v4)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS_OpSelect0(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Select0 (Add32carry <t> x y))
-	// cond:
-	// result: (ADD <t.FieldType(0)> x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAdd32carry {
-			break
-		}
-		t := v_0.Type
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpMIPSADD)
-		v.Type = t.FieldType(0)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Select0 (Sub32carry <t> x y))
-	// cond:
-	// result: (SUB <t.FieldType(0)> x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpSub32carry {
-			break
-		}
-		t := v_0.Type
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpMIPSSUB)
-		v.Type = t.FieldType(0)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Select0 (MULTU x (MOVWconst [c])))
-	// cond: x.Op != OpMIPSMOVWconst
-	// result: (Select0 (MULTU (MOVWconst [c]) x ))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMULTU {
-			break
-		}
-		x := v_0.Args[0]
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_0_1.AuxInt
-		if !(x.Op != OpMIPSMOVWconst) {
-			break
-		}
-		v.reset(OpSelect0)
-		v0 := b.NewValue0(v.Line, OpMIPSMULTU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
-		v1 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v1.AuxInt = c
-		v0.AddArg(v1)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Select0 (MULTU (MOVWconst [0]) _ ))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMULTU {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		if v_0_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Select0 (MULTU (MOVWconst [1]) _ ))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMULTU {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		if v_0_0.AuxInt != 1 {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Select0 (MULTU (MOVWconst [-1]) x ))
-	// cond:
-	// result: (CMOVZ (ADDconst <x.Type> [-1] x) (MOVWconst [0]) x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMULTU {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		if v_0_0.AuxInt != -1 {
-			break
-		}
-		x := v_0.Args[1]
-		v.reset(OpMIPSCMOVZ)
-		v0 := b.NewValue0(v.Line, OpMIPSADDconst, x.Type)
-		v0.AuxInt = -1
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v1.AuxInt = 0
-		v.AddArg(v1)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Select0 (MULTU (MOVWconst [c]) x ))
-	// cond: isPowerOfTwo(int64(uint32(c)))
-	// result: (SRLconst [32-log2(int64(uint32(c)))] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMULTU {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_0_0.AuxInt
-		x := v_0.Args[1]
-		if !(isPowerOfTwo(int64(uint32(c)))) {
-			break
-		}
-		v.reset(OpMIPSSRLconst)
-		v.AuxInt = 32 - log2(int64(uint32(c)))
-		v.AddArg(x)
-		return true
-	}
-	// match: (Select0 (MULTU  (MOVWconst [c]) (MOVWconst [d])))
-	// cond:
-	// result: (MOVWconst [(c*d)>>32])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMULTU {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_0_0.AuxInt
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		d := v_0_1.AuxInt
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = (c * d) >> 32
-		return true
-	}
-	// match: (Select0 (DIV  (MOVWconst [c]) (MOVWconst [d])))
-	// cond:
-	// result: (MOVWconst [int64(int32(c)%int32(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSDIV {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_0_0.AuxInt
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		d := v_0_1.AuxInt
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = int64(int32(c) % int32(d))
-		return true
-	}
-	// match: (Select0 (DIVU (MOVWconst [c]) (MOVWconst [d])))
-	// cond:
-	// result: (MOVWconst [int64(int32(uint32(c)%uint32(d)))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSDIVU {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_0_0.AuxInt
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		d := v_0_1.AuxInt
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = int64(int32(uint32(c) % uint32(d)))
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpSelect1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Select1 (Add32carry <t> x y))
-	// cond:
-	// result: (SGTU <config.fe.TypeBool()> x (ADD <t.FieldType(0)> x y))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAdd32carry {
-			break
-		}
-		t := v_0.Type
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpMIPSSGTU)
-		v.Type = config.fe.TypeBool()
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpMIPSADD, t.FieldType(0))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Select1 (Sub32carry <t> x y))
-	// cond:
-	// result: (SGTU <config.fe.TypeBool()> (SUB <t.FieldType(0)> x y) x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpSub32carry {
-			break
-		}
-		t := v_0.Type
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpMIPSSGTU)
-		v.Type = config.fe.TypeBool()
-		v0 := b.NewValue0(v.Line, OpMIPSSUB, t.FieldType(0))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Select1 (MULTU x (MOVWconst [c])))
-	// cond: x.Op != OpMIPSMOVWconst
-	// result: (Select1 (MULTU (MOVWconst [c]) x ))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMULTU {
-			break
-		}
-		x := v_0.Args[0]
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_0_1.AuxInt
-		if !(x.Op != OpMIPSMOVWconst) {
-			break
-		}
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpMIPSMULTU, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
-		v1 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v1.AuxInt = c
-		v0.AddArg(v1)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Select1 (MULTU (MOVWconst [0]) _ ))
-	// cond:
-	// result: (MOVWconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMULTU {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		if v_0_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Select1 (MULTU (MOVWconst [1]) x ))
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMULTU {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		if v_0_0.AuxInt != 1 {
-			break
-		}
-		x := v_0.Args[1]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Select1 (MULTU (MOVWconst [-1]) x ))
-	// cond:
-	// result: (NEG <x.Type> x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMULTU {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		if v_0_0.AuxInt != -1 {
-			break
-		}
-		x := v_0.Args[1]
-		v.reset(OpMIPSNEG)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Select1 (MULTU (MOVWconst [c]) x ))
-	// cond: isPowerOfTwo(int64(uint32(c)))
-	// result: (SLLconst [log2(int64(uint32(c)))] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMULTU {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_0_0.AuxInt
-		x := v_0.Args[1]
-		if !(isPowerOfTwo(int64(uint32(c)))) {
-			break
-		}
-		v.reset(OpMIPSSLLconst)
-		v.AuxInt = log2(int64(uint32(c)))
-		v.AddArg(x)
-		return true
-	}
-	// match: (Select1 (MULTU  (MOVWconst [c]) (MOVWconst [d])))
-	// cond:
-	// result: (MOVWconst [int64(int32(uint32(c)*uint32(d)))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSMULTU {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_0_0.AuxInt
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		d := v_0_1.AuxInt
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = int64(int32(uint32(c) * uint32(d)))
-		return true
-	}
-	// match: (Select1 (DIV  (MOVWconst [c]) (MOVWconst [d])))
-	// cond:
-	// result: (MOVWconst [int64(int32(c)/int32(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSDIV {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_0_0.AuxInt
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		d := v_0_1.AuxInt
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = int64(int32(c) / int32(d))
-		return true
-	}
-	// match: (Select1 (DIVU (MOVWconst [c]) (MOVWconst [d])))
-	// cond:
-	// result: (MOVWconst [int64(int32(uint32(c)/uint32(d)))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPSDIVU {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpMIPSMOVWconst {
-			break
-		}
-		c := v_0_0.AuxInt
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpMIPSMOVWconst {
-			break
-		}
-		d := v_0_1.AuxInt
-		v.reset(OpMIPSMOVWconst)
-		v.AuxInt = int64(int32(uint32(c) / uint32(d)))
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpSignExt16to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt16to32 x)
-	// cond:
-	// result: (MOVHreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPSMOVHreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS_OpSignExt8to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt8to16 x)
-	// cond:
-	// result: (MOVBreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPSMOVBreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS_OpSignExt8to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt8to32 x)
-	// cond:
-	// result: (MOVBreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPSMOVBreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS_OpSignmask(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Signmask x)
-	// cond:
-	// result: (SRAconst x [31])
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPSSRAconst)
-		v.AuxInt = 31
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS_OpSlicemask(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Slicemask x)
-	// cond:
-	// result: (NEG (SGT x (MOVWconst [0])))
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPSNEG)
-		v0 := b.NewValue0(v.Line, OpMIPSSGT, config.fe.TypeBool())
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v1.AuxInt = 0
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS_OpSqrt(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sqrt x)
-	// cond:
-	// result: (SQRTD x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPSSQRTD)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS_OpStaticCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (StaticCall [argwid] {target} mem)
-	// cond:
-	// result: (CALLstatic [argwid] {target} mem)
-	for {
-		argwid := v.AuxInt
-		target := v.Aux
-		mem := v.Args[0]
-		v.reset(OpMIPSCALLstatic)
-		v.AuxInt = argwid
-		v.Aux = target
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueMIPS_OpStore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Store [1] ptr val mem)
-	// cond:
-	// result: (MOVBstore ptr val mem)
-	for {
-		if v.AuxInt != 1 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpMIPSMOVBstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [2] ptr val mem)
-	// cond:
-	// result: (MOVHstore ptr val mem)
-	for {
-		if v.AuxInt != 2 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpMIPSMOVHstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [4] ptr val mem)
-	// cond: !is32BitFloat(val.Type)
-	// result: (MOVWstore ptr val mem)
-	for {
-		if v.AuxInt != 4 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(!is32BitFloat(val.Type)) {
-			break
-		}
-		v.reset(OpMIPSMOVWstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [8] ptr val mem)
-	// cond: !is64BitFloat(val.Type)
-	// result: (MOVWstore ptr val mem)
-	for {
-		if v.AuxInt != 8 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(!is64BitFloat(val.Type)) {
-			break
-		}
-		v.reset(OpMIPSMOVWstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [4] ptr val mem)
-	// cond: is32BitFloat(val.Type)
-	// result: (MOVFstore ptr val mem)
-	for {
-		if v.AuxInt != 4 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32BitFloat(val.Type)) {
-			break
-		}
-		v.reset(OpMIPSMOVFstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [8] ptr val mem)
-	// cond: is64BitFloat(val.Type)
-	// result: (MOVDstore ptr val mem)
-	for {
-		if v.AuxInt != 8 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is64BitFloat(val.Type)) {
-			break
-		}
-		v.reset(OpMIPSMOVDstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpSub16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub16 x y)
-	// cond:
-	// result: (SUB x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSUB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpSub32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub32 x y)
-	// cond:
-	// result: (SUB x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSUB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpSub32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub32F x y)
-	// cond:
-	// result: (SUBF x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSUBF)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpSub32withcarry(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub32withcarry <t> x y c)
-	// cond:
-	// result: (SUB (SUB <t> x y) c)
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		c := v.Args[2]
-		v.reset(OpMIPSSUB)
-		v0 := b.NewValue0(v.Line, OpMIPSSUB, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v.AddArg(c)
-		return true
-	}
-}
-func rewriteValueMIPS_OpSub64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub64F x y)
-	// cond:
-	// result: (SUBD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSUBD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpSub8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub8 x y)
-	// cond:
-	// result: (SUB x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSUB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpSubPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SubPtr x y)
-	// cond:
-	// result: (SUB x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSSUB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpTrunc16to8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc16to8 x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS_OpTrunc32to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc32to16 x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS_OpTrunc32to8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc32to8 x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS_OpXor16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor16 x y)
-	// cond:
-	// result: (XOR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSXOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpXor32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor32 x y)
-	// cond:
-	// result: (XOR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSXOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpXor8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor8 x y)
-	// cond:
-	// result: (XOR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPSXOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS_OpZero(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Zero [s] _ mem)
-	// cond: SizeAndAlign(s).Size() == 0
-	// result: mem
-	for {
-		s := v.AuxInt
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 0) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = mem.Type
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 1
-	// result: (MOVBstore ptr (MOVWconst [0]) mem)
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 1) {
-			break
-		}
-		v.reset(OpMIPSMOVBstore)
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
-	// result: (MOVHstore ptr (MOVWconst [0]) mem)
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
-			break
-		}
-		v.reset(OpMIPSMOVHstore)
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 2
-	// result: (MOVBstore [1] ptr (MOVWconst [0]) 		(MOVBstore [0] ptr (MOVWconst [0]) mem))
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 2) {
-			break
-		}
-		v.reset(OpMIPSMOVBstore)
-		v.AuxInt = 1
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSMOVBstore, TypeMem)
-		v1.AuxInt = 0
-		v1.AddArg(ptr)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v2.AuxInt = 0
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
-	// result: (MOVWstore ptr (MOVWconst [0]) mem)
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
-			break
-		}
-		v.reset(OpMIPSMOVWstore)
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
-	// result: (MOVHstore [2] ptr (MOVWconst [0]) 		(MOVHstore [0] ptr (MOVWconst [0]) mem))
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
-			break
-		}
-		v.reset(OpMIPSMOVHstore)
-		v.AuxInt = 2
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSMOVHstore, TypeMem)
-		v1.AuxInt = 0
-		v1.AddArg(ptr)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v2.AuxInt = 0
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 4
-	// result: (MOVBstore [3] ptr (MOVWconst [0]) 		(MOVBstore [2] ptr (MOVWconst [0]) 			(MOVBstore [1] ptr (MOVWconst [0]) 				(MOVBstore [0] ptr (MOVWconst [0]) mem))))
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 4) {
-			break
-		}
-		v.reset(OpMIPSMOVBstore)
-		v.AuxInt = 3
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSMOVBstore, TypeMem)
-		v1.AuxInt = 2
-		v1.AddArg(ptr)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v2.AuxInt = 0
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPSMOVBstore, TypeMem)
-		v3.AuxInt = 1
-		v3.AddArg(ptr)
-		v4 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v4.AuxInt = 0
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpMIPSMOVBstore, TypeMem)
-		v5.AuxInt = 0
-		v5.AddArg(ptr)
-		v6 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v6.AuxInt = 0
-		v5.AddArg(v6)
-		v5.AddArg(mem)
-		v3.AddArg(v5)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 3
-	// result: (MOVBstore [2] ptr (MOVWconst [0]) 		(MOVBstore [1] ptr (MOVWconst [0]) 			(MOVBstore [0] ptr (MOVWconst [0]) mem)))
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 3) {
-			break
-		}
-		v.reset(OpMIPSMOVBstore)
-		v.AuxInt = 2
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSMOVBstore, TypeMem)
-		v1.AuxInt = 1
-		v1.AddArg(ptr)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v2.AuxInt = 0
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPSMOVBstore, TypeMem)
-		v3.AuxInt = 0
-		v3.AddArg(ptr)
-		v4 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v4.AuxInt = 0
-		v3.AddArg(v4)
-		v3.AddArg(mem)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 6 && SizeAndAlign(s).Align()%2 == 0
-	// result: (MOVHstore [4] ptr (MOVWconst [0]) 		(MOVHstore [2] ptr (MOVWconst [0]) 			(MOVHstore [0] ptr (MOVWconst [0]) mem)))
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 6 && SizeAndAlign(s).Align()%2 == 0) {
-			break
-		}
-		v.reset(OpMIPSMOVHstore)
-		v.AuxInt = 4
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSMOVHstore, TypeMem)
-		v1.AuxInt = 2
-		v1.AddArg(ptr)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v2.AuxInt = 0
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPSMOVHstore, TypeMem)
-		v3.AuxInt = 0
-		v3.AddArg(ptr)
-		v4 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v4.AuxInt = 0
-		v3.AddArg(v4)
-		v3.AddArg(mem)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0
-	// result: (MOVWstore [4] ptr (MOVWconst [0]) 			(MOVWstore [0] ptr (MOVWconst [0]) mem))
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0) {
-			break
-		}
-		v.reset(OpMIPSMOVWstore)
-		v.AuxInt = 4
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSMOVWstore, TypeMem)
-		v1.AuxInt = 0
-		v1.AddArg(ptr)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v2.AuxInt = 0
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 12 && SizeAndAlign(s).Align()%4 == 0
-	// result: (MOVWstore [8] ptr (MOVWconst [0]) 		(MOVWstore [4] ptr (MOVWconst [0]) 			(MOVWstore [0] ptr (MOVWconst [0]) mem)))
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 12 && SizeAndAlign(s).Align()%4 == 0) {
-			break
-		}
-		v.reset(OpMIPSMOVWstore)
-		v.AuxInt = 8
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSMOVWstore, TypeMem)
-		v1.AuxInt = 4
-		v1.AddArg(ptr)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v2.AuxInt = 0
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPSMOVWstore, TypeMem)
-		v3.AuxInt = 0
-		v3.AddArg(ptr)
-		v4 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v4.AuxInt = 0
-		v3.AddArg(v4)
-		v3.AddArg(mem)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%4 == 0
-	// result: (MOVWstore [12] ptr (MOVWconst [0]) 		(MOVWstore [8] ptr (MOVWconst [0]) 			(MOVWstore [4] ptr (MOVWconst [0]) 				(MOVWstore [0] ptr (MOVWconst [0]) mem))))
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%4 == 0) {
-			break
-		}
-		v.reset(OpMIPSMOVWstore)
-		v.AuxInt = 12
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPSMOVWstore, TypeMem)
-		v1.AuxInt = 8
-		v1.AddArg(ptr)
-		v2 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v2.AuxInt = 0
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPSMOVWstore, TypeMem)
-		v3.AuxInt = 4
-		v3.AddArg(ptr)
-		v4 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v4.AuxInt = 0
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpMIPSMOVWstore, TypeMem)
-		v5.AuxInt = 0
-		v5.AddArg(ptr)
-		v6 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v6.AuxInt = 0
-		v5.AddArg(v6)
-		v5.AddArg(mem)
-		v3.AddArg(v5)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: (SizeAndAlign(s).Size() > 16  || SizeAndAlign(s).Align()%4 != 0)
-	// result: (LoweredZero [SizeAndAlign(s).Align()] 		ptr 		(ADDconst <ptr.Type> ptr [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)]) 		mem)
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() > 16 || SizeAndAlign(s).Align()%4 != 0) {
-			break
-		}
-		v.reset(OpMIPSLoweredZero)
-		v.AuxInt = SizeAndAlign(s).Align()
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMIPSADDconst, ptr.Type)
-		v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
-		v0.AddArg(ptr)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS_OpZeroExt16to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt16to32 x)
-	// cond:
-	// result: (MOVHUreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPSMOVHUreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS_OpZeroExt8to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt8to16 x)
-	// cond:
-	// result: (MOVBUreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPSMOVBUreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS_OpZeroExt8to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt8to32 x)
-	// cond:
-	// result: (MOVBUreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPSMOVBUreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS_OpZeromask(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Zeromask x)
-	// cond:
-	// result: (NEG (SGTU x (MOVWconst [0])))
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPSNEG)
-		v0 := b.NewValue0(v.Line, OpMIPSSGTU, config.fe.TypeBool())
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpMIPSMOVWconst, config.fe.TypeUInt32())
-		v1.AuxInt = 0
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteBlockMIPS(b *Block, config *Config) bool {
-	switch b.Kind {
-	case BlockMIPSEQ:
-		// match: (EQ (FPFlagTrue cmp) yes no)
-		// cond:
-		// result: (FPF cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSFPFlagTrue {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPSFPF
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (FPFlagFalse cmp) yes no)
-		// cond:
-		// result: (FPT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSFPFlagFalse {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPSFPT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (XORconst [1] cmp:(SGT _ _)) yes no)
-		// cond:
-		// result: (NE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSXORconst {
-				break
-			}
-			if v.AuxInt != 1 {
-				break
-			}
-			cmp := v.Args[0]
-			if cmp.Op != OpMIPSSGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPSNE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (XORconst [1] cmp:(SGTU _ _)) yes no)
-		// cond:
-		// result: (NE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSXORconst {
-				break
-			}
-			if v.AuxInt != 1 {
-				break
-			}
-			cmp := v.Args[0]
-			if cmp.Op != OpMIPSSGTU {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPSNE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (XORconst [1] cmp:(SGTconst _)) yes no)
-		// cond:
-		// result: (NE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSXORconst {
-				break
-			}
-			if v.AuxInt != 1 {
-				break
-			}
-			cmp := v.Args[0]
-			if cmp.Op != OpMIPSSGTconst {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPSNE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (XORconst [1] cmp:(SGTUconst _)) yes no)
-		// cond:
-		// result: (NE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSXORconst {
-				break
-			}
-			if v.AuxInt != 1 {
-				break
-			}
-			cmp := v.Args[0]
-			if cmp.Op != OpMIPSSGTUconst {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPSNE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (XORconst [1] cmp:(SGTzero _)) yes no)
-		// cond:
-		// result: (NE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSXORconst {
-				break
-			}
-			if v.AuxInt != 1 {
-				break
-			}
-			cmp := v.Args[0]
-			if cmp.Op != OpMIPSSGTzero {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPSNE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (XORconst [1] cmp:(SGTUzero _)) yes no)
-		// cond:
-		// result: (NE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSXORconst {
-				break
-			}
-			if v.AuxInt != 1 {
-				break
-			}
-			cmp := v.Args[0]
-			if cmp.Op != OpMIPSSGTUzero {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPSNE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (SGTUconst [1] x) yes no)
-		// cond:
-		// result: (NE x yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSSGTUconst {
-				break
-			}
-			if v.AuxInt != 1 {
-				break
-			}
-			x := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPSNE
-			b.SetControl(x)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (SGTUzero x) yes no)
-		// cond:
-		// result: (EQ x yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSSGTUzero {
-				break
-			}
-			x := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPSEQ
-			b.SetControl(x)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (SGTconst [0] x) yes no)
-		// cond:
-		// result: (GEZ x yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSSGTconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			x := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPSGEZ
-			b.SetControl(x)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (SGTzero x) yes no)
-		// cond:
-		// result: (LEZ x yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSSGTzero {
-				break
-			}
-			x := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPSLEZ
-			b.SetControl(x)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ  (MOVWconst [0]) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSMOVWconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ  (MOVWconst [c]) yes no)
-		// cond: c != 0
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSMOVWconst {
-				break
-			}
-			c := v.AuxInt
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			if !(c != 0) {
-				break
-			}
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-	case BlockMIPSGEZ:
-		// match: (GEZ (MOVWconst [c]) yes no)
-		// cond: int32(c) >= 0
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSMOVWconst {
-				break
-			}
-			c := v.AuxInt
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			if !(int32(c) >= 0) {
-				break
-			}
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (GEZ (MOVWconst [c]) yes no)
-		// cond: int32(c) <  0
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSMOVWconst {
-				break
-			}
-			c := v.AuxInt
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			if !(int32(c) < 0) {
-				break
-			}
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-	case BlockMIPSGTZ:
-		// match: (GTZ (MOVWconst [c]) yes no)
-		// cond: int32(c) >  0
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSMOVWconst {
-				break
-			}
-			c := v.AuxInt
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			if !(int32(c) > 0) {
-				break
-			}
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (GTZ (MOVWconst [c]) yes no)
-		// cond: int32(c) <= 0
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSMOVWconst {
-				break
-			}
-			c := v.AuxInt
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			if !(int32(c) <= 0) {
-				break
-			}
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-	case BlockIf:
-		// match: (If cond yes no)
-		// cond:
-		// result: (NE cond yes no)
-		for {
-			v := b.Control
-			_ = v
-			cond := b.Control
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPSNE
-			b.SetControl(cond)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockMIPSLEZ:
-		// match: (LEZ (MOVWconst [c]) yes no)
-		// cond: int32(c) <= 0
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSMOVWconst {
-				break
-			}
-			c := v.AuxInt
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			if !(int32(c) <= 0) {
-				break
-			}
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LEZ (MOVWconst [c]) yes no)
-		// cond: int32(c) >  0
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSMOVWconst {
-				break
-			}
-			c := v.AuxInt
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			if !(int32(c) > 0) {
-				break
-			}
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-	case BlockMIPSLTZ:
-		// match: (LTZ (MOVWconst [c]) yes no)
-		// cond: int32(c) <  0
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSMOVWconst {
-				break
-			}
-			c := v.AuxInt
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			if !(int32(c) < 0) {
-				break
-			}
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LTZ (MOVWconst [c]) yes no)
-		// cond: int32(c) >= 0
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSMOVWconst {
-				break
-			}
-			c := v.AuxInt
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			if !(int32(c) >= 0) {
-				break
-			}
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-	case BlockMIPSNE:
-		// match: (NE (FPFlagTrue cmp) yes no)
-		// cond:
-		// result: (FPT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSFPFlagTrue {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPSFPT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (FPFlagFalse cmp) yes no)
-		// cond:
-		// result: (FPF cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSFPFlagFalse {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPSFPF
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (XORconst [1] cmp:(SGT _ _)) yes no)
-		// cond:
-		// result: (EQ cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSXORconst {
-				break
-			}
-			if v.AuxInt != 1 {
-				break
-			}
-			cmp := v.Args[0]
-			if cmp.Op != OpMIPSSGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPSEQ
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (XORconst [1] cmp:(SGTU _ _)) yes no)
-		// cond:
-		// result: (EQ cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSXORconst {
-				break
-			}
-			if v.AuxInt != 1 {
-				break
-			}
-			cmp := v.Args[0]
-			if cmp.Op != OpMIPSSGTU {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPSEQ
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (XORconst [1] cmp:(SGTconst _)) yes no)
-		// cond:
-		// result: (EQ cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSXORconst {
-				break
-			}
-			if v.AuxInt != 1 {
-				break
-			}
-			cmp := v.Args[0]
-			if cmp.Op != OpMIPSSGTconst {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPSEQ
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (XORconst [1] cmp:(SGTUconst _)) yes no)
-		// cond:
-		// result: (EQ cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSXORconst {
-				break
-			}
-			if v.AuxInt != 1 {
-				break
-			}
-			cmp := v.Args[0]
-			if cmp.Op != OpMIPSSGTUconst {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPSEQ
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (XORconst [1] cmp:(SGTzero _)) yes no)
-		// cond:
-		// result: (EQ cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSXORconst {
-				break
-			}
-			if v.AuxInt != 1 {
-				break
-			}
-			cmp := v.Args[0]
-			if cmp.Op != OpMIPSSGTzero {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPSEQ
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (XORconst [1] cmp:(SGTUzero _)) yes no)
-		// cond:
-		// result: (EQ cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSXORconst {
-				break
-			}
-			if v.AuxInt != 1 {
-				break
-			}
-			cmp := v.Args[0]
-			if cmp.Op != OpMIPSSGTUzero {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPSEQ
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (SGTUconst [1] x) yes no)
-		// cond:
-		// result: (EQ x yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSSGTUconst {
-				break
-			}
-			if v.AuxInt != 1 {
-				break
-			}
-			x := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPSEQ
-			b.SetControl(x)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (SGTUzero x) yes no)
-		// cond:
-		// result: (NE x yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSSGTUzero {
-				break
-			}
-			x := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPSNE
-			b.SetControl(x)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (SGTconst [0] x) yes no)
-		// cond:
-		// result: (LTZ x yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSSGTconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			x := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPSLTZ
-			b.SetControl(x)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (SGTzero x) yes no)
-		// cond:
-		// result: (GTZ x yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSSGTzero {
-				break
-			}
-			x := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPSGTZ
-			b.SetControl(x)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE  (MOVWconst [0]) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSMOVWconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (NE  (MOVWconst [c]) yes no)
-		// cond: c != 0
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPSMOVWconst {
-				break
-			}
-			c := v.AuxInt
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			if !(c != 0) {
-				break
-			}
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-	}
-	return false
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewriteMIPS64.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewriteMIPS64.go
deleted file mode 100644
index 6bef465..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewriteMIPS64.go
+++ /dev/null
@@ -1,10435 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/rewriteMIPS64.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/rewriteMIPS64.go:1
-// autogenerated from gen/MIPS64.rules: do not edit!
-// generated with: cd gen; go run *.go
-
-package ssa
-
-import "math"
-
-var _ = math.MinInt8 // in case not otherwise used
-func rewriteValueMIPS64(v *Value, config *Config) bool {
-	switch v.Op {
-	case OpAdd16:
-		return rewriteValueMIPS64_OpAdd16(v, config)
-	case OpAdd32:
-		return rewriteValueMIPS64_OpAdd32(v, config)
-	case OpAdd32F:
-		return rewriteValueMIPS64_OpAdd32F(v, config)
-	case OpAdd64:
-		return rewriteValueMIPS64_OpAdd64(v, config)
-	case OpAdd64F:
-		return rewriteValueMIPS64_OpAdd64F(v, config)
-	case OpAdd8:
-		return rewriteValueMIPS64_OpAdd8(v, config)
-	case OpAddPtr:
-		return rewriteValueMIPS64_OpAddPtr(v, config)
-	case OpAddr:
-		return rewriteValueMIPS64_OpAddr(v, config)
-	case OpAnd16:
-		return rewriteValueMIPS64_OpAnd16(v, config)
-	case OpAnd32:
-		return rewriteValueMIPS64_OpAnd32(v, config)
-	case OpAnd64:
-		return rewriteValueMIPS64_OpAnd64(v, config)
-	case OpAnd8:
-		return rewriteValueMIPS64_OpAnd8(v, config)
-	case OpAndB:
-		return rewriteValueMIPS64_OpAndB(v, config)
-	case OpAvg64u:
-		return rewriteValueMIPS64_OpAvg64u(v, config)
-	case OpClosureCall:
-		return rewriteValueMIPS64_OpClosureCall(v, config)
-	case OpCom16:
-		return rewriteValueMIPS64_OpCom16(v, config)
-	case OpCom32:
-		return rewriteValueMIPS64_OpCom32(v, config)
-	case OpCom64:
-		return rewriteValueMIPS64_OpCom64(v, config)
-	case OpCom8:
-		return rewriteValueMIPS64_OpCom8(v, config)
-	case OpConst16:
-		return rewriteValueMIPS64_OpConst16(v, config)
-	case OpConst32:
-		return rewriteValueMIPS64_OpConst32(v, config)
-	case OpConst32F:
-		return rewriteValueMIPS64_OpConst32F(v, config)
-	case OpConst64:
-		return rewriteValueMIPS64_OpConst64(v, config)
-	case OpConst64F:
-		return rewriteValueMIPS64_OpConst64F(v, config)
-	case OpConst8:
-		return rewriteValueMIPS64_OpConst8(v, config)
-	case OpConstBool:
-		return rewriteValueMIPS64_OpConstBool(v, config)
-	case OpConstNil:
-		return rewriteValueMIPS64_OpConstNil(v, config)
-	case OpConvert:
-		return rewriteValueMIPS64_OpConvert(v, config)
-	case OpCvt32Fto32:
-		return rewriteValueMIPS64_OpCvt32Fto32(v, config)
-	case OpCvt32Fto64:
-		return rewriteValueMIPS64_OpCvt32Fto64(v, config)
-	case OpCvt32Fto64F:
-		return rewriteValueMIPS64_OpCvt32Fto64F(v, config)
-	case OpCvt32to32F:
-		return rewriteValueMIPS64_OpCvt32to32F(v, config)
-	case OpCvt32to64F:
-		return rewriteValueMIPS64_OpCvt32to64F(v, config)
-	case OpCvt64Fto32:
-		return rewriteValueMIPS64_OpCvt64Fto32(v, config)
-	case OpCvt64Fto32F:
-		return rewriteValueMIPS64_OpCvt64Fto32F(v, config)
-	case OpCvt64Fto64:
-		return rewriteValueMIPS64_OpCvt64Fto64(v, config)
-	case OpCvt64to32F:
-		return rewriteValueMIPS64_OpCvt64to32F(v, config)
-	case OpCvt64to64F:
-		return rewriteValueMIPS64_OpCvt64to64F(v, config)
-	case OpDeferCall:
-		return rewriteValueMIPS64_OpDeferCall(v, config)
-	case OpDiv16:
-		return rewriteValueMIPS64_OpDiv16(v, config)
-	case OpDiv16u:
-		return rewriteValueMIPS64_OpDiv16u(v, config)
-	case OpDiv32:
-		return rewriteValueMIPS64_OpDiv32(v, config)
-	case OpDiv32F:
-		return rewriteValueMIPS64_OpDiv32F(v, config)
-	case OpDiv32u:
-		return rewriteValueMIPS64_OpDiv32u(v, config)
-	case OpDiv64:
-		return rewriteValueMIPS64_OpDiv64(v, config)
-	case OpDiv64F:
-		return rewriteValueMIPS64_OpDiv64F(v, config)
-	case OpDiv64u:
-		return rewriteValueMIPS64_OpDiv64u(v, config)
-	case OpDiv8:
-		return rewriteValueMIPS64_OpDiv8(v, config)
-	case OpDiv8u:
-		return rewriteValueMIPS64_OpDiv8u(v, config)
-	case OpEq16:
-		return rewriteValueMIPS64_OpEq16(v, config)
-	case OpEq32:
-		return rewriteValueMIPS64_OpEq32(v, config)
-	case OpEq32F:
-		return rewriteValueMIPS64_OpEq32F(v, config)
-	case OpEq64:
-		return rewriteValueMIPS64_OpEq64(v, config)
-	case OpEq64F:
-		return rewriteValueMIPS64_OpEq64F(v, config)
-	case OpEq8:
-		return rewriteValueMIPS64_OpEq8(v, config)
-	case OpEqB:
-		return rewriteValueMIPS64_OpEqB(v, config)
-	case OpEqPtr:
-		return rewriteValueMIPS64_OpEqPtr(v, config)
-	case OpGeq16:
-		return rewriteValueMIPS64_OpGeq16(v, config)
-	case OpGeq16U:
-		return rewriteValueMIPS64_OpGeq16U(v, config)
-	case OpGeq32:
-		return rewriteValueMIPS64_OpGeq32(v, config)
-	case OpGeq32F:
-		return rewriteValueMIPS64_OpGeq32F(v, config)
-	case OpGeq32U:
-		return rewriteValueMIPS64_OpGeq32U(v, config)
-	case OpGeq64:
-		return rewriteValueMIPS64_OpGeq64(v, config)
-	case OpGeq64F:
-		return rewriteValueMIPS64_OpGeq64F(v, config)
-	case OpGeq64U:
-		return rewriteValueMIPS64_OpGeq64U(v, config)
-	case OpGeq8:
-		return rewriteValueMIPS64_OpGeq8(v, config)
-	case OpGeq8U:
-		return rewriteValueMIPS64_OpGeq8U(v, config)
-	case OpGetClosurePtr:
-		return rewriteValueMIPS64_OpGetClosurePtr(v, config)
-	case OpGoCall:
-		return rewriteValueMIPS64_OpGoCall(v, config)
-	case OpGreater16:
-		return rewriteValueMIPS64_OpGreater16(v, config)
-	case OpGreater16U:
-		return rewriteValueMIPS64_OpGreater16U(v, config)
-	case OpGreater32:
-		return rewriteValueMIPS64_OpGreater32(v, config)
-	case OpGreater32F:
-		return rewriteValueMIPS64_OpGreater32F(v, config)
-	case OpGreater32U:
-		return rewriteValueMIPS64_OpGreater32U(v, config)
-	case OpGreater64:
-		return rewriteValueMIPS64_OpGreater64(v, config)
-	case OpGreater64F:
-		return rewriteValueMIPS64_OpGreater64F(v, config)
-	case OpGreater64U:
-		return rewriteValueMIPS64_OpGreater64U(v, config)
-	case OpGreater8:
-		return rewriteValueMIPS64_OpGreater8(v, config)
-	case OpGreater8U:
-		return rewriteValueMIPS64_OpGreater8U(v, config)
-	case OpHmul16:
-		return rewriteValueMIPS64_OpHmul16(v, config)
-	case OpHmul16u:
-		return rewriteValueMIPS64_OpHmul16u(v, config)
-	case OpHmul32:
-		return rewriteValueMIPS64_OpHmul32(v, config)
-	case OpHmul32u:
-		return rewriteValueMIPS64_OpHmul32u(v, config)
-	case OpHmul64:
-		return rewriteValueMIPS64_OpHmul64(v, config)
-	case OpHmul64u:
-		return rewriteValueMIPS64_OpHmul64u(v, config)
-	case OpHmul8:
-		return rewriteValueMIPS64_OpHmul8(v, config)
-	case OpHmul8u:
-		return rewriteValueMIPS64_OpHmul8u(v, config)
-	case OpInterCall:
-		return rewriteValueMIPS64_OpInterCall(v, config)
-	case OpIsInBounds:
-		return rewriteValueMIPS64_OpIsInBounds(v, config)
-	case OpIsNonNil:
-		return rewriteValueMIPS64_OpIsNonNil(v, config)
-	case OpIsSliceInBounds:
-		return rewriteValueMIPS64_OpIsSliceInBounds(v, config)
-	case OpLeq16:
-		return rewriteValueMIPS64_OpLeq16(v, config)
-	case OpLeq16U:
-		return rewriteValueMIPS64_OpLeq16U(v, config)
-	case OpLeq32:
-		return rewriteValueMIPS64_OpLeq32(v, config)
-	case OpLeq32F:
-		return rewriteValueMIPS64_OpLeq32F(v, config)
-	case OpLeq32U:
-		return rewriteValueMIPS64_OpLeq32U(v, config)
-	case OpLeq64:
-		return rewriteValueMIPS64_OpLeq64(v, config)
-	case OpLeq64F:
-		return rewriteValueMIPS64_OpLeq64F(v, config)
-	case OpLeq64U:
-		return rewriteValueMIPS64_OpLeq64U(v, config)
-	case OpLeq8:
-		return rewriteValueMIPS64_OpLeq8(v, config)
-	case OpLeq8U:
-		return rewriteValueMIPS64_OpLeq8U(v, config)
-	case OpLess16:
-		return rewriteValueMIPS64_OpLess16(v, config)
-	case OpLess16U:
-		return rewriteValueMIPS64_OpLess16U(v, config)
-	case OpLess32:
-		return rewriteValueMIPS64_OpLess32(v, config)
-	case OpLess32F:
-		return rewriteValueMIPS64_OpLess32F(v, config)
-	case OpLess32U:
-		return rewriteValueMIPS64_OpLess32U(v, config)
-	case OpLess64:
-		return rewriteValueMIPS64_OpLess64(v, config)
-	case OpLess64F:
-		return rewriteValueMIPS64_OpLess64F(v, config)
-	case OpLess64U:
-		return rewriteValueMIPS64_OpLess64U(v, config)
-	case OpLess8:
-		return rewriteValueMIPS64_OpLess8(v, config)
-	case OpLess8U:
-		return rewriteValueMIPS64_OpLess8U(v, config)
-	case OpLoad:
-		return rewriteValueMIPS64_OpLoad(v, config)
-	case OpLsh16x16:
-		return rewriteValueMIPS64_OpLsh16x16(v, config)
-	case OpLsh16x32:
-		return rewriteValueMIPS64_OpLsh16x32(v, config)
-	case OpLsh16x64:
-		return rewriteValueMIPS64_OpLsh16x64(v, config)
-	case OpLsh16x8:
-		return rewriteValueMIPS64_OpLsh16x8(v, config)
-	case OpLsh32x16:
-		return rewriteValueMIPS64_OpLsh32x16(v, config)
-	case OpLsh32x32:
-		return rewriteValueMIPS64_OpLsh32x32(v, config)
-	case OpLsh32x64:
-		return rewriteValueMIPS64_OpLsh32x64(v, config)
-	case OpLsh32x8:
-		return rewriteValueMIPS64_OpLsh32x8(v, config)
-	case OpLsh64x16:
-		return rewriteValueMIPS64_OpLsh64x16(v, config)
-	case OpLsh64x32:
-		return rewriteValueMIPS64_OpLsh64x32(v, config)
-	case OpLsh64x64:
-		return rewriteValueMIPS64_OpLsh64x64(v, config)
-	case OpLsh64x8:
-		return rewriteValueMIPS64_OpLsh64x8(v, config)
-	case OpLsh8x16:
-		return rewriteValueMIPS64_OpLsh8x16(v, config)
-	case OpLsh8x32:
-		return rewriteValueMIPS64_OpLsh8x32(v, config)
-	case OpLsh8x64:
-		return rewriteValueMIPS64_OpLsh8x64(v, config)
-	case OpLsh8x8:
-		return rewriteValueMIPS64_OpLsh8x8(v, config)
-	case OpMIPS64ADDV:
-		return rewriteValueMIPS64_OpMIPS64ADDV(v, config)
-	case OpMIPS64ADDVconst:
-		return rewriteValueMIPS64_OpMIPS64ADDVconst(v, config)
-	case OpMIPS64AND:
-		return rewriteValueMIPS64_OpMIPS64AND(v, config)
-	case OpMIPS64ANDconst:
-		return rewriteValueMIPS64_OpMIPS64ANDconst(v, config)
-	case OpMIPS64MOVBUload:
-		return rewriteValueMIPS64_OpMIPS64MOVBUload(v, config)
-	case OpMIPS64MOVBUreg:
-		return rewriteValueMIPS64_OpMIPS64MOVBUreg(v, config)
-	case OpMIPS64MOVBload:
-		return rewriteValueMIPS64_OpMIPS64MOVBload(v, config)
-	case OpMIPS64MOVBreg:
-		return rewriteValueMIPS64_OpMIPS64MOVBreg(v, config)
-	case OpMIPS64MOVBstore:
-		return rewriteValueMIPS64_OpMIPS64MOVBstore(v, config)
-	case OpMIPS64MOVBstorezero:
-		return rewriteValueMIPS64_OpMIPS64MOVBstorezero(v, config)
-	case OpMIPS64MOVDload:
-		return rewriteValueMIPS64_OpMIPS64MOVDload(v, config)
-	case OpMIPS64MOVDstore:
-		return rewriteValueMIPS64_OpMIPS64MOVDstore(v, config)
-	case OpMIPS64MOVFload:
-		return rewriteValueMIPS64_OpMIPS64MOVFload(v, config)
-	case OpMIPS64MOVFstore:
-		return rewriteValueMIPS64_OpMIPS64MOVFstore(v, config)
-	case OpMIPS64MOVHUload:
-		return rewriteValueMIPS64_OpMIPS64MOVHUload(v, config)
-	case OpMIPS64MOVHUreg:
-		return rewriteValueMIPS64_OpMIPS64MOVHUreg(v, config)
-	case OpMIPS64MOVHload:
-		return rewriteValueMIPS64_OpMIPS64MOVHload(v, config)
-	case OpMIPS64MOVHreg:
-		return rewriteValueMIPS64_OpMIPS64MOVHreg(v, config)
-	case OpMIPS64MOVHstore:
-		return rewriteValueMIPS64_OpMIPS64MOVHstore(v, config)
-	case OpMIPS64MOVHstorezero:
-		return rewriteValueMIPS64_OpMIPS64MOVHstorezero(v, config)
-	case OpMIPS64MOVVload:
-		return rewriteValueMIPS64_OpMIPS64MOVVload(v, config)
-	case OpMIPS64MOVVreg:
-		return rewriteValueMIPS64_OpMIPS64MOVVreg(v, config)
-	case OpMIPS64MOVVstore:
-		return rewriteValueMIPS64_OpMIPS64MOVVstore(v, config)
-	case OpMIPS64MOVVstorezero:
-		return rewriteValueMIPS64_OpMIPS64MOVVstorezero(v, config)
-	case OpMIPS64MOVWUload:
-		return rewriteValueMIPS64_OpMIPS64MOVWUload(v, config)
-	case OpMIPS64MOVWUreg:
-		return rewriteValueMIPS64_OpMIPS64MOVWUreg(v, config)
-	case OpMIPS64MOVWload:
-		return rewriteValueMIPS64_OpMIPS64MOVWload(v, config)
-	case OpMIPS64MOVWreg:
-		return rewriteValueMIPS64_OpMIPS64MOVWreg(v, config)
-	case OpMIPS64MOVWstore:
-		return rewriteValueMIPS64_OpMIPS64MOVWstore(v, config)
-	case OpMIPS64MOVWstorezero:
-		return rewriteValueMIPS64_OpMIPS64MOVWstorezero(v, config)
-	case OpMIPS64NEGV:
-		return rewriteValueMIPS64_OpMIPS64NEGV(v, config)
-	case OpMIPS64NOR:
-		return rewriteValueMIPS64_OpMIPS64NOR(v, config)
-	case OpMIPS64NORconst:
-		return rewriteValueMIPS64_OpMIPS64NORconst(v, config)
-	case OpMIPS64OR:
-		return rewriteValueMIPS64_OpMIPS64OR(v, config)
-	case OpMIPS64ORconst:
-		return rewriteValueMIPS64_OpMIPS64ORconst(v, config)
-	case OpMIPS64SGT:
-		return rewriteValueMIPS64_OpMIPS64SGT(v, config)
-	case OpMIPS64SGTU:
-		return rewriteValueMIPS64_OpMIPS64SGTU(v, config)
-	case OpMIPS64SGTUconst:
-		return rewriteValueMIPS64_OpMIPS64SGTUconst(v, config)
-	case OpMIPS64SGTconst:
-		return rewriteValueMIPS64_OpMIPS64SGTconst(v, config)
-	case OpMIPS64SLLV:
-		return rewriteValueMIPS64_OpMIPS64SLLV(v, config)
-	case OpMIPS64SLLVconst:
-		return rewriteValueMIPS64_OpMIPS64SLLVconst(v, config)
-	case OpMIPS64SRAV:
-		return rewriteValueMIPS64_OpMIPS64SRAV(v, config)
-	case OpMIPS64SRAVconst:
-		return rewriteValueMIPS64_OpMIPS64SRAVconst(v, config)
-	case OpMIPS64SRLV:
-		return rewriteValueMIPS64_OpMIPS64SRLV(v, config)
-	case OpMIPS64SRLVconst:
-		return rewriteValueMIPS64_OpMIPS64SRLVconst(v, config)
-	case OpMIPS64SUBV:
-		return rewriteValueMIPS64_OpMIPS64SUBV(v, config)
-	case OpMIPS64SUBVconst:
-		return rewriteValueMIPS64_OpMIPS64SUBVconst(v, config)
-	case OpMIPS64XOR:
-		return rewriteValueMIPS64_OpMIPS64XOR(v, config)
-	case OpMIPS64XORconst:
-		return rewriteValueMIPS64_OpMIPS64XORconst(v, config)
-	case OpMod16:
-		return rewriteValueMIPS64_OpMod16(v, config)
-	case OpMod16u:
-		return rewriteValueMIPS64_OpMod16u(v, config)
-	case OpMod32:
-		return rewriteValueMIPS64_OpMod32(v, config)
-	case OpMod32u:
-		return rewriteValueMIPS64_OpMod32u(v, config)
-	case OpMod64:
-		return rewriteValueMIPS64_OpMod64(v, config)
-	case OpMod64u:
-		return rewriteValueMIPS64_OpMod64u(v, config)
-	case OpMod8:
-		return rewriteValueMIPS64_OpMod8(v, config)
-	case OpMod8u:
-		return rewriteValueMIPS64_OpMod8u(v, config)
-	case OpMove:
-		return rewriteValueMIPS64_OpMove(v, config)
-	case OpMul16:
-		return rewriteValueMIPS64_OpMul16(v, config)
-	case OpMul32:
-		return rewriteValueMIPS64_OpMul32(v, config)
-	case OpMul32F:
-		return rewriteValueMIPS64_OpMul32F(v, config)
-	case OpMul64:
-		return rewriteValueMIPS64_OpMul64(v, config)
-	case OpMul64F:
-		return rewriteValueMIPS64_OpMul64F(v, config)
-	case OpMul8:
-		return rewriteValueMIPS64_OpMul8(v, config)
-	case OpNeg16:
-		return rewriteValueMIPS64_OpNeg16(v, config)
-	case OpNeg32:
-		return rewriteValueMIPS64_OpNeg32(v, config)
-	case OpNeg32F:
-		return rewriteValueMIPS64_OpNeg32F(v, config)
-	case OpNeg64:
-		return rewriteValueMIPS64_OpNeg64(v, config)
-	case OpNeg64F:
-		return rewriteValueMIPS64_OpNeg64F(v, config)
-	case OpNeg8:
-		return rewriteValueMIPS64_OpNeg8(v, config)
-	case OpNeq16:
-		return rewriteValueMIPS64_OpNeq16(v, config)
-	case OpNeq32:
-		return rewriteValueMIPS64_OpNeq32(v, config)
-	case OpNeq32F:
-		return rewriteValueMIPS64_OpNeq32F(v, config)
-	case OpNeq64:
-		return rewriteValueMIPS64_OpNeq64(v, config)
-	case OpNeq64F:
-		return rewriteValueMIPS64_OpNeq64F(v, config)
-	case OpNeq8:
-		return rewriteValueMIPS64_OpNeq8(v, config)
-	case OpNeqB:
-		return rewriteValueMIPS64_OpNeqB(v, config)
-	case OpNeqPtr:
-		return rewriteValueMIPS64_OpNeqPtr(v, config)
-	case OpNilCheck:
-		return rewriteValueMIPS64_OpNilCheck(v, config)
-	case OpNot:
-		return rewriteValueMIPS64_OpNot(v, config)
-	case OpOffPtr:
-		return rewriteValueMIPS64_OpOffPtr(v, config)
-	case OpOr16:
-		return rewriteValueMIPS64_OpOr16(v, config)
-	case OpOr32:
-		return rewriteValueMIPS64_OpOr32(v, config)
-	case OpOr64:
-		return rewriteValueMIPS64_OpOr64(v, config)
-	case OpOr8:
-		return rewriteValueMIPS64_OpOr8(v, config)
-	case OpOrB:
-		return rewriteValueMIPS64_OpOrB(v, config)
-	case OpRsh16Ux16:
-		return rewriteValueMIPS64_OpRsh16Ux16(v, config)
-	case OpRsh16Ux32:
-		return rewriteValueMIPS64_OpRsh16Ux32(v, config)
-	case OpRsh16Ux64:
-		return rewriteValueMIPS64_OpRsh16Ux64(v, config)
-	case OpRsh16Ux8:
-		return rewriteValueMIPS64_OpRsh16Ux8(v, config)
-	case OpRsh16x16:
-		return rewriteValueMIPS64_OpRsh16x16(v, config)
-	case OpRsh16x32:
-		return rewriteValueMIPS64_OpRsh16x32(v, config)
-	case OpRsh16x64:
-		return rewriteValueMIPS64_OpRsh16x64(v, config)
-	case OpRsh16x8:
-		return rewriteValueMIPS64_OpRsh16x8(v, config)
-	case OpRsh32Ux16:
-		return rewriteValueMIPS64_OpRsh32Ux16(v, config)
-	case OpRsh32Ux32:
-		return rewriteValueMIPS64_OpRsh32Ux32(v, config)
-	case OpRsh32Ux64:
-		return rewriteValueMIPS64_OpRsh32Ux64(v, config)
-	case OpRsh32Ux8:
-		return rewriteValueMIPS64_OpRsh32Ux8(v, config)
-	case OpRsh32x16:
-		return rewriteValueMIPS64_OpRsh32x16(v, config)
-	case OpRsh32x32:
-		return rewriteValueMIPS64_OpRsh32x32(v, config)
-	case OpRsh32x64:
-		return rewriteValueMIPS64_OpRsh32x64(v, config)
-	case OpRsh32x8:
-		return rewriteValueMIPS64_OpRsh32x8(v, config)
-	case OpRsh64Ux16:
-		return rewriteValueMIPS64_OpRsh64Ux16(v, config)
-	case OpRsh64Ux32:
-		return rewriteValueMIPS64_OpRsh64Ux32(v, config)
-	case OpRsh64Ux64:
-		return rewriteValueMIPS64_OpRsh64Ux64(v, config)
-	case OpRsh64Ux8:
-		return rewriteValueMIPS64_OpRsh64Ux8(v, config)
-	case OpRsh64x16:
-		return rewriteValueMIPS64_OpRsh64x16(v, config)
-	case OpRsh64x32:
-		return rewriteValueMIPS64_OpRsh64x32(v, config)
-	case OpRsh64x64:
-		return rewriteValueMIPS64_OpRsh64x64(v, config)
-	case OpRsh64x8:
-		return rewriteValueMIPS64_OpRsh64x8(v, config)
-	case OpRsh8Ux16:
-		return rewriteValueMIPS64_OpRsh8Ux16(v, config)
-	case OpRsh8Ux32:
-		return rewriteValueMIPS64_OpRsh8Ux32(v, config)
-	case OpRsh8Ux64:
-		return rewriteValueMIPS64_OpRsh8Ux64(v, config)
-	case OpRsh8Ux8:
-		return rewriteValueMIPS64_OpRsh8Ux8(v, config)
-	case OpRsh8x16:
-		return rewriteValueMIPS64_OpRsh8x16(v, config)
-	case OpRsh8x32:
-		return rewriteValueMIPS64_OpRsh8x32(v, config)
-	case OpRsh8x64:
-		return rewriteValueMIPS64_OpRsh8x64(v, config)
-	case OpRsh8x8:
-		return rewriteValueMIPS64_OpRsh8x8(v, config)
-	case OpSelect0:
-		return rewriteValueMIPS64_OpSelect0(v, config)
-	case OpSelect1:
-		return rewriteValueMIPS64_OpSelect1(v, config)
-	case OpSignExt16to32:
-		return rewriteValueMIPS64_OpSignExt16to32(v, config)
-	case OpSignExt16to64:
-		return rewriteValueMIPS64_OpSignExt16to64(v, config)
-	case OpSignExt32to64:
-		return rewriteValueMIPS64_OpSignExt32to64(v, config)
-	case OpSignExt8to16:
-		return rewriteValueMIPS64_OpSignExt8to16(v, config)
-	case OpSignExt8to32:
-		return rewriteValueMIPS64_OpSignExt8to32(v, config)
-	case OpSignExt8to64:
-		return rewriteValueMIPS64_OpSignExt8to64(v, config)
-	case OpSlicemask:
-		return rewriteValueMIPS64_OpSlicemask(v, config)
-	case OpStaticCall:
-		return rewriteValueMIPS64_OpStaticCall(v, config)
-	case OpStore:
-		return rewriteValueMIPS64_OpStore(v, config)
-	case OpSub16:
-		return rewriteValueMIPS64_OpSub16(v, config)
-	case OpSub32:
-		return rewriteValueMIPS64_OpSub32(v, config)
-	case OpSub32F:
-		return rewriteValueMIPS64_OpSub32F(v, config)
-	case OpSub64:
-		return rewriteValueMIPS64_OpSub64(v, config)
-	case OpSub64F:
-		return rewriteValueMIPS64_OpSub64F(v, config)
-	case OpSub8:
-		return rewriteValueMIPS64_OpSub8(v, config)
-	case OpSubPtr:
-		return rewriteValueMIPS64_OpSubPtr(v, config)
-	case OpTrunc16to8:
-		return rewriteValueMIPS64_OpTrunc16to8(v, config)
-	case OpTrunc32to16:
-		return rewriteValueMIPS64_OpTrunc32to16(v, config)
-	case OpTrunc32to8:
-		return rewriteValueMIPS64_OpTrunc32to8(v, config)
-	case OpTrunc64to16:
-		return rewriteValueMIPS64_OpTrunc64to16(v, config)
-	case OpTrunc64to32:
-		return rewriteValueMIPS64_OpTrunc64to32(v, config)
-	case OpTrunc64to8:
-		return rewriteValueMIPS64_OpTrunc64to8(v, config)
-	case OpXor16:
-		return rewriteValueMIPS64_OpXor16(v, config)
-	case OpXor32:
-		return rewriteValueMIPS64_OpXor32(v, config)
-	case OpXor64:
-		return rewriteValueMIPS64_OpXor64(v, config)
-	case OpXor8:
-		return rewriteValueMIPS64_OpXor8(v, config)
-	case OpZero:
-		return rewriteValueMIPS64_OpZero(v, config)
-	case OpZeroExt16to32:
-		return rewriteValueMIPS64_OpZeroExt16to32(v, config)
-	case OpZeroExt16to64:
-		return rewriteValueMIPS64_OpZeroExt16to64(v, config)
-	case OpZeroExt32to64:
-		return rewriteValueMIPS64_OpZeroExt32to64(v, config)
-	case OpZeroExt8to16:
-		return rewriteValueMIPS64_OpZeroExt8to16(v, config)
-	case OpZeroExt8to32:
-		return rewriteValueMIPS64_OpZeroExt8to32(v, config)
-	case OpZeroExt8to64:
-		return rewriteValueMIPS64_OpZeroExt8to64(v, config)
-	}
-	return false
-}
-func rewriteValueMIPS64_OpAdd16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add16 x y)
-	// cond:
-	// result: (ADDV x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64ADDV)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpAdd32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add32 x y)
-	// cond:
-	// result: (ADDV x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64ADDV)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpAdd32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add32F x y)
-	// cond:
-	// result: (ADDF x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64ADDF)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpAdd64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add64 x y)
-	// cond:
-	// result: (ADDV x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64ADDV)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpAdd64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add64F x y)
-	// cond:
-	// result: (ADDD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64ADDD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpAdd8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add8 x y)
-	// cond:
-	// result: (ADDV x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64ADDV)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpAddPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AddPtr x y)
-	// cond:
-	// result: (ADDV x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64ADDV)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpAddr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Addr {sym} base)
-	// cond:
-	// result: (MOVVaddr {sym} base)
-	for {
-		sym := v.Aux
-		base := v.Args[0]
-		v.reset(OpMIPS64MOVVaddr)
-		v.Aux = sym
-		v.AddArg(base)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpAnd16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And16 x y)
-	// cond:
-	// result: (AND x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpAnd32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And32 x y)
-	// cond:
-	// result: (AND x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpAnd64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And64 x y)
-	// cond:
-	// result: (AND x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpAnd8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And8 x y)
-	// cond:
-	// result: (AND x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpAndB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AndB x y)
-	// cond:
-	// result: (AND x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpAvg64u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Avg64u <t> x y)
-	// cond:
-	// result: (ADDV (ADDV <t> (SRLVconst <t> x [1]) (SRLVconst <t> y [1])) (AND <t> (AND <t> x y) (MOVVconst [1])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64ADDV)
-		v0 := b.NewValue0(v.Line, OpMIPS64ADDV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SRLVconst, t)
-		v1.AuxInt = 1
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpMIPS64SRLVconst, t)
-		v2.AuxInt = 1
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpMIPS64AND, t)
-		v4 := b.NewValue0(v.Line, OpMIPS64AND, t)
-		v4.AddArg(x)
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v5.AuxInt = 1
-		v3.AddArg(v5)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpClosureCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ClosureCall [argwid] entry closure mem)
-	// cond:
-	// result: (CALLclosure [argwid] entry closure mem)
-	for {
-		argwid := v.AuxInt
-		entry := v.Args[0]
-		closure := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpMIPS64CALLclosure)
-		v.AuxInt = argwid
-		v.AddArg(entry)
-		v.AddArg(closure)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpCom16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com16 x)
-	// cond:
-	// result: (NOR (MOVVconst [0]) x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64NOR)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpCom32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com32 x)
-	// cond:
-	// result: (NOR (MOVVconst [0]) x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64NOR)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpCom64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com64 x)
-	// cond:
-	// result: (NOR (MOVVconst [0]) x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64NOR)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpCom8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com8 x)
-	// cond:
-	// result: (NOR (MOVVconst [0]) x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64NOR)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpConst16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const16 [val])
-	// cond:
-	// result: (MOVVconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueMIPS64_OpConst32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const32 [val])
-	// cond:
-	// result: (MOVVconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueMIPS64_OpConst32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const32F [val])
-	// cond:
-	// result: (MOVFconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpMIPS64MOVFconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueMIPS64_OpConst64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const64 [val])
-	// cond:
-	// result: (MOVVconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueMIPS64_OpConst64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const64F [val])
-	// cond:
-	// result: (MOVDconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpMIPS64MOVDconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueMIPS64_OpConst8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const8 [val])
-	// cond:
-	// result: (MOVVconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueMIPS64_OpConstBool(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ConstBool [b])
-	// cond:
-	// result: (MOVVconst [b])
-	for {
-		b := v.AuxInt
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = b
-		return true
-	}
-}
-func rewriteValueMIPS64_OpConstNil(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ConstNil)
-	// cond:
-	// result: (MOVVconst [0])
-	for {
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = 0
-		return true
-	}
-}
-func rewriteValueMIPS64_OpConvert(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Convert x mem)
-	// cond:
-	// result: (MOVVconvert x mem)
-	for {
-		x := v.Args[0]
-		mem := v.Args[1]
-		v.reset(OpMIPS64MOVVconvert)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpCvt32Fto32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32Fto32 x)
-	// cond:
-	// result: (TRUNCFW x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64TRUNCFW)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpCvt32Fto64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32Fto64 x)
-	// cond:
-	// result: (TRUNCFV x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64TRUNCFV)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpCvt32Fto64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32Fto64F x)
-	// cond:
-	// result: (MOVFD x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64MOVFD)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpCvt32to32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32to32F x)
-	// cond:
-	// result: (MOVWF x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64MOVWF)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpCvt32to64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32to64F x)
-	// cond:
-	// result: (MOVWD x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64MOVWD)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpCvt64Fto32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64Fto32 x)
-	// cond:
-	// result: (TRUNCDW x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64TRUNCDW)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpCvt64Fto32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64Fto32F x)
-	// cond:
-	// result: (MOVDF x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64MOVDF)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpCvt64Fto64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64Fto64 x)
-	// cond:
-	// result: (TRUNCDV x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64TRUNCDV)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpCvt64to32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64to32F x)
-	// cond:
-	// result: (MOVVF x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64MOVVF)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpCvt64to64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64to64F x)
-	// cond:
-	// result: (MOVVD x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64MOVVD)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpDeferCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (DeferCall [argwid] mem)
-	// cond:
-	// result: (CALLdefer [argwid] mem)
-	for {
-		argwid := v.AuxInt
-		mem := v.Args[0]
-		v.reset(OpMIPS64CALLdefer)
-		v.AuxInt = argwid
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpDiv16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div16 x y)
-	// cond:
-	// result: (Select1 (DIVV (SignExt16to64 x) (SignExt16to64 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
-		v1 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpDiv16u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div16u x y)
-	// cond:
-	// result: (Select1 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
-		v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpDiv32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div32 x y)
-	// cond:
-	// result: (Select1 (DIVV (SignExt32to64 x) (SignExt32to64 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
-		v1 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpDiv32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div32F x y)
-	// cond:
-	// result: (DIVF x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64DIVF)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpDiv32u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div32u x y)
-	// cond:
-	// result: (Select1 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
-		v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpDiv64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div64 x y)
-	// cond:
-	// result: (Select1 (DIVV x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpDiv64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div64F x y)
-	// cond:
-	// result: (DIVD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64DIVD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpDiv64u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div64u x y)
-	// cond:
-	// result: (Select1 (DIVVU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpDiv8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div8 x y)
-	// cond:
-	// result: (Select1 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
-		v1 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpDiv8u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div8u x y)
-	// cond:
-	// result: (Select1 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
-		v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpEq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq16 x y)
-	// cond:
-	// result: (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SGTU)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 1
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeUInt64())
-		v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v2.AddArg(x)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpEq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq32 x y)
-	// cond:
-	// result: (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SGTU)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 1
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeUInt64())
-		v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v2.AddArg(x)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpEq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq32F x y)
-	// cond:
-	// result: (FPFlagTrue (CMPEQF x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64FPFlagTrue)
-		v0 := b.NewValue0(v.Line, OpMIPS64CMPEQF, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpEq64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq64 x y)
-	// cond:
-	// result: (SGTU (MOVVconst [1]) (XOR x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SGTU)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 1
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpEq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq64F x y)
-	// cond:
-	// result: (FPFlagTrue (CMPEQD x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64FPFlagTrue)
-		v0 := b.NewValue0(v.Line, OpMIPS64CMPEQD, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpEq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq8 x y)
-	// cond:
-	// result: (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SGTU)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 1
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeUInt64())
-		v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v2.AddArg(x)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpEqB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (EqB x y)
-	// cond:
-	// result: (XOR (MOVVconst [1]) (XOR <config.fe.TypeBool()> x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64XOR)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 1
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeBool())
-		v1.AddArg(x)
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpEqPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (EqPtr x y)
-	// cond:
-	// result: (SGTU (MOVVconst [1]) (XOR x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SGTU)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 1
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpGeq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq16 x y)
-	// cond:
-	// result: (XOR (MOVVconst [1]) (SGT (SignExt16to64 y) (SignExt16to64 x)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64XOR)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 1
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGT, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-		v3.AddArg(x)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpGeq16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq16U x y)
-	// cond:
-	// result: (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64XOR)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 1
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v3.AddArg(x)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpGeq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq32 x y)
-	// cond:
-	// result: (XOR (MOVVconst [1]) (SGT (SignExt32to64 y) (SignExt32to64 x)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64XOR)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 1
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGT, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-		v3.AddArg(x)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpGeq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq32F x y)
-	// cond:
-	// result: (FPFlagTrue (CMPGEF x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64FPFlagTrue)
-		v0 := b.NewValue0(v.Line, OpMIPS64CMPGEF, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpGeq32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq32U x y)
-	// cond:
-	// result: (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64XOR)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 1
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v3.AddArg(x)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpGeq64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq64 x y)
-	// cond:
-	// result: (XOR (MOVVconst [1]) (SGT y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64XOR)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 1
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGT, config.fe.TypeBool())
-		v1.AddArg(y)
-		v1.AddArg(x)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpGeq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq64F x y)
-	// cond:
-	// result: (FPFlagTrue (CMPGED x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64FPFlagTrue)
-		v0 := b.NewValue0(v.Line, OpMIPS64CMPGED, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpGeq64U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq64U x y)
-	// cond:
-	// result: (XOR (MOVVconst [1]) (SGTU y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64XOR)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 1
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v1.AddArg(y)
-		v1.AddArg(x)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpGeq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq8 x y)
-	// cond:
-	// result: (XOR (MOVVconst [1]) (SGT (SignExt8to64 y) (SignExt8to64 x)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64XOR)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 1
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGT, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-		v3.AddArg(x)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpGeq8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq8U x y)
-	// cond:
-	// result: (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64XOR)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 1
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v3.AddArg(x)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpGetClosurePtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (GetClosurePtr)
-	// cond:
-	// result: (LoweredGetClosurePtr)
-	for {
-		v.reset(OpMIPS64LoweredGetClosurePtr)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpGoCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (GoCall [argwid] mem)
-	// cond:
-	// result: (CALLgo [argwid] mem)
-	for {
-		argwid := v.AuxInt
-		mem := v.Args[0]
-		v.reset(OpMIPS64CALLgo)
-		v.AuxInt = argwid
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpGreater16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater16 x y)
-	// cond:
-	// result: (SGT (SignExt16to64 x) (SignExt16to64 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SGT)
-		v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpGreater16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater16U x y)
-	// cond:
-	// result: (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SGTU)
-		v0 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpGreater32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater32 x y)
-	// cond:
-	// result: (SGT (SignExt32to64 x) (SignExt32to64 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SGT)
-		v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpGreater32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater32F x y)
-	// cond:
-	// result: (FPFlagTrue (CMPGTF x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64FPFlagTrue)
-		v0 := b.NewValue0(v.Line, OpMIPS64CMPGTF, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpGreater32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater32U x y)
-	// cond:
-	// result: (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SGTU)
-		v0 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpGreater64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater64 x y)
-	// cond:
-	// result: (SGT x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SGT)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpGreater64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater64F x y)
-	// cond:
-	// result: (FPFlagTrue (CMPGTD x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64FPFlagTrue)
-		v0 := b.NewValue0(v.Line, OpMIPS64CMPGTD, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpGreater64U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater64U x y)
-	// cond:
-	// result: (SGTU x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SGTU)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpGreater8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater8 x y)
-	// cond:
-	// result: (SGT (SignExt8to64 x) (SignExt8to64 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SGT)
-		v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpGreater8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater8U x y)
-	// cond:
-	// result: (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SGTU)
-		v0 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpHmul16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul16 x y)
-	// cond:
-	// result: (SRAVconst (Select1 <config.fe.TypeInt32()> (MULV (SignExt16to64 x) (SignExt16to64 y))) [16])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SRAVconst)
-		v.AuxInt = 16
-		v0 := b.NewValue0(v.Line, OpSelect1, config.fe.TypeInt32())
-		v1 := b.NewValue0(v.Line, OpMIPS64MULV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
-		v2 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-		v2.AddArg(x)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpHmul16u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul16u x y)
-	// cond:
-	// result: (SRLVconst (Select1 <config.fe.TypeUInt32()> (MULVU (ZeroExt16to64 x) (ZeroExt16to64 y))) [16])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SRLVconst)
-		v.AuxInt = 16
-		v0 := b.NewValue0(v.Line, OpSelect1, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpMIPS64MULVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
-		v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v2.AddArg(x)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpHmul32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul32 x y)
-	// cond:
-	// result: (SRAVconst (Select1 <config.fe.TypeInt64()> (MULV (SignExt32to64 x) (SignExt32to64 y))) [32])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SRAVconst)
-		v.AuxInt = 32
-		v0 := b.NewValue0(v.Line, OpSelect1, config.fe.TypeInt64())
-		v1 := b.NewValue0(v.Line, OpMIPS64MULV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
-		v2 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-		v2.AddArg(x)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpHmul32u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul32u x y)
-	// cond:
-	// result: (SRLVconst (Select1 <config.fe.TypeUInt64()> (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SRLVconst)
-		v.AuxInt = 32
-		v0 := b.NewValue0(v.Line, OpSelect1, config.fe.TypeUInt64())
-		v1 := b.NewValue0(v.Line, OpMIPS64MULVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
-		v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v2.AddArg(x)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpHmul64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul64 x y)
-	// cond:
-	// result: (Select0 (MULV x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect0)
-		v0 := b.NewValue0(v.Line, OpMIPS64MULV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpHmul64u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul64u x y)
-	// cond:
-	// result: (Select0 (MULVU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect0)
-		v0 := b.NewValue0(v.Line, OpMIPS64MULVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpHmul8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul8 x y)
-	// cond:
-	// result: (SRAVconst (Select1 <config.fe.TypeInt16()> (MULV (SignExt8to64 x) (SignExt8to64 y))) [8])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SRAVconst)
-		v.AuxInt = 8
-		v0 := b.NewValue0(v.Line, OpSelect1, config.fe.TypeInt16())
-		v1 := b.NewValue0(v.Line, OpMIPS64MULV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
-		v2 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-		v2.AddArg(x)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpHmul8u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul8u x y)
-	// cond:
-	// result: (SRLVconst (Select1 <config.fe.TypeUInt16()> (MULVU (ZeroExt8to64 x) (ZeroExt8to64 y))) [8])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SRLVconst)
-		v.AuxInt = 8
-		v0 := b.NewValue0(v.Line, OpSelect1, config.fe.TypeUInt16())
-		v1 := b.NewValue0(v.Line, OpMIPS64MULVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
-		v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v2.AddArg(x)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpInterCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (InterCall [argwid] entry mem)
-	// cond:
-	// result: (CALLinter [argwid] entry mem)
-	for {
-		argwid := v.AuxInt
-		entry := v.Args[0]
-		mem := v.Args[1]
-		v.reset(OpMIPS64CALLinter)
-		v.AuxInt = argwid
-		v.AddArg(entry)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpIsInBounds(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (IsInBounds idx len)
-	// cond:
-	// result: (SGTU len idx)
-	for {
-		idx := v.Args[0]
-		len := v.Args[1]
-		v.reset(OpMIPS64SGTU)
-		v.AddArg(len)
-		v.AddArg(idx)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpIsNonNil(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (IsNonNil ptr)
-	// cond:
-	// result: (SGTU ptr (MOVVconst [0]))
-	for {
-		ptr := v.Args[0]
-		v.reset(OpMIPS64SGTU)
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpIsSliceInBounds(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (IsSliceInBounds idx len)
-	// cond:
-	// result: (XOR (MOVVconst [1]) (SGTU idx len))
-	for {
-		idx := v.Args[0]
-		len := v.Args[1]
-		v.reset(OpMIPS64XOR)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 1
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v1.AddArg(idx)
-		v1.AddArg(len)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLeq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq16 x y)
-	// cond:
-	// result: (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64XOR)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 1
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGT, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-		v2.AddArg(x)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLeq16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq16U x y)
-	// cond:
-	// result: (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64XOR)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 1
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v2.AddArg(x)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLeq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq32 x y)
-	// cond:
-	// result: (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64XOR)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 1
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGT, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-		v2.AddArg(x)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLeq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq32F x y)
-	// cond:
-	// result: (FPFlagTrue (CMPGEF y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64FPFlagTrue)
-		v0 := b.NewValue0(v.Line, OpMIPS64CMPGEF, TypeFlags)
-		v0.AddArg(y)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLeq32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq32U x y)
-	// cond:
-	// result: (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64XOR)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 1
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v2.AddArg(x)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLeq64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq64 x y)
-	// cond:
-	// result: (XOR (MOVVconst [1]) (SGT x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64XOR)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 1
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGT, config.fe.TypeBool())
-		v1.AddArg(x)
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLeq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq64F x y)
-	// cond:
-	// result: (FPFlagTrue (CMPGED y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64FPFlagTrue)
-		v0 := b.NewValue0(v.Line, OpMIPS64CMPGED, TypeFlags)
-		v0.AddArg(y)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLeq64U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq64U x y)
-	// cond:
-	// result: (XOR (MOVVconst [1]) (SGTU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64XOR)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 1
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v1.AddArg(x)
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLeq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq8 x y)
-	// cond:
-	// result: (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64XOR)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 1
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGT, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-		v2.AddArg(x)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLeq8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq8U x y)
-	// cond:
-	// result: (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64XOR)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 1
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v2.AddArg(x)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLess16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less16 x y)
-	// cond:
-	// result: (SGT (SignExt16to64 y) (SignExt16to64 x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SGT)
-		v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-		v1.AddArg(x)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLess16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less16U x y)
-	// cond:
-	// result: (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SGTU)
-		v0 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLess32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less32 x y)
-	// cond:
-	// result: (SGT (SignExt32to64 y) (SignExt32to64 x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SGT)
-		v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-		v1.AddArg(x)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLess32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less32F x y)
-	// cond:
-	// result: (FPFlagTrue (CMPGTF y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64FPFlagTrue)
-		v0 := b.NewValue0(v.Line, OpMIPS64CMPGTF, TypeFlags)
-		v0.AddArg(y)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLess32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less32U x y)
-	// cond:
-	// result: (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SGTU)
-		v0 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLess64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less64 x y)
-	// cond:
-	// result: (SGT y x)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SGT)
-		v.AddArg(y)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLess64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less64F x y)
-	// cond:
-	// result: (FPFlagTrue (CMPGTD y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64FPFlagTrue)
-		v0 := b.NewValue0(v.Line, OpMIPS64CMPGTD, TypeFlags)
-		v0.AddArg(y)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLess64U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less64U x y)
-	// cond:
-	// result: (SGTU y x)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SGTU)
-		v.AddArg(y)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLess8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less8 x y)
-	// cond:
-	// result: (SGT (SignExt8to64 y) (SignExt8to64 x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SGT)
-		v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-		v1.AddArg(x)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLess8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less8U x y)
-	// cond:
-	// result: (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SGTU)
-		v0 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLoad(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Load <t> ptr mem)
-	// cond: t.IsBoolean()
-	// result: (MOVBUload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(t.IsBoolean()) {
-			break
-		}
-		v.reset(OpMIPS64MOVBUload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: (is8BitInt(t) && isSigned(t))
-	// result: (MOVBload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is8BitInt(t) && isSigned(t)) {
-			break
-		}
-		v.reset(OpMIPS64MOVBload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: (is8BitInt(t) && !isSigned(t))
-	// result: (MOVBUload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is8BitInt(t) && !isSigned(t)) {
-			break
-		}
-		v.reset(OpMIPS64MOVBUload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: (is16BitInt(t) && isSigned(t))
-	// result: (MOVHload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is16BitInt(t) && isSigned(t)) {
-			break
-		}
-		v.reset(OpMIPS64MOVHload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: (is16BitInt(t) && !isSigned(t))
-	// result: (MOVHUload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is16BitInt(t) && !isSigned(t)) {
-			break
-		}
-		v.reset(OpMIPS64MOVHUload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: (is32BitInt(t) && isSigned(t))
-	// result: (MOVWload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is32BitInt(t) && isSigned(t)) {
-			break
-		}
-		v.reset(OpMIPS64MOVWload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: (is32BitInt(t) && !isSigned(t))
-	// result: (MOVWUload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is32BitInt(t) && !isSigned(t)) {
-			break
-		}
-		v.reset(OpMIPS64MOVWUload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: (is64BitInt(t) || isPtr(t))
-	// result: (MOVVload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is64BitInt(t) || isPtr(t)) {
-			break
-		}
-		v.reset(OpMIPS64MOVVload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: is32BitFloat(t)
-	// result: (MOVFload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is32BitFloat(t)) {
-			break
-		}
-		v.reset(OpMIPS64MOVFload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: is64BitFloat(t)
-	// result: (MOVDload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is64BitFloat(t)) {
-			break
-		}
-		v.reset(OpMIPS64MOVDload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpLsh16x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x16 <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v4 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
-		v4.AddArg(x)
-		v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLsh16x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x32 <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v4 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
-		v4.AddArg(x)
-		v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLsh16x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x64 <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
-		v3.AddArg(x)
-		v3.AddArg(y)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLsh16x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x8  <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64  y))) (SLLV <t> x (ZeroExt8to64  y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v4 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
-		v4.AddArg(x)
-		v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLsh32x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x16 <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v4 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
-		v4.AddArg(x)
-		v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLsh32x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x32 <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v4 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
-		v4.AddArg(x)
-		v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLsh32x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x64 <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
-		v3.AddArg(x)
-		v3.AddArg(y)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLsh32x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x8  <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64  y))) (SLLV <t> x (ZeroExt8to64  y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v4 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
-		v4.AddArg(x)
-		v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLsh64x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh64x16 <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v4 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
-		v4.AddArg(x)
-		v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLsh64x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh64x32 <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v4 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
-		v4.AddArg(x)
-		v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLsh64x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh64x64 <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
-		v3.AddArg(x)
-		v3.AddArg(y)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLsh64x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh64x8  <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64  y))) (SLLV <t> x (ZeroExt8to64  y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v4 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
-		v4.AddArg(x)
-		v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLsh8x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x16 <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v4 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
-		v4.AddArg(x)
-		v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLsh8x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x32 <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v4 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
-		v4.AddArg(x)
-		v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLsh8x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x64 <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
-		v3.AddArg(x)
-		v3.AddArg(y)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpLsh8x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x8  <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64  y))) (SLLV <t> x (ZeroExt8to64  y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v4 := b.NewValue0(v.Line, OpMIPS64SLLV, t)
-		v4.AddArg(x)
-		v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpMIPS64ADDV(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDV (MOVVconst [c]) x)
-	// cond: is32Bit(c)
-	// result: (ADDVconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpMIPS64ADDVconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDV x (MOVVconst [c]))
-	// cond: is32Bit(c)
-	// result: (ADDVconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpMIPS64ADDVconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDV x (NEGV y))
-	// cond:
-	// result: (SUBV x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPS64NEGV {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpMIPS64SUBV)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDV (NEGV y) x)
-	// cond:
-	// result: (SUBV x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64NEGV {
-			break
-		}
-		y := v_0.Args[0]
-		x := v.Args[1]
-		v.reset(OpMIPS64SUBV)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64ADDVconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDVconst [off1] (MOVVaddr [off2] {sym} ptr))
-	// cond:
-	// result: (MOVVaddr [off1+off2] {sym} ptr)
-	for {
-		off1 := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym := v_0.Aux
-		ptr := v_0.Args[0]
-		v.reset(OpMIPS64MOVVaddr)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		return true
-	}
-	// match: (ADDVconst [0]  x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDVconst [c] (MOVVconst [d]))
-	// cond:
-	// result: (MOVVconst [c+d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = c + d
-		return true
-	}
-	// match: (ADDVconst [c] (ADDVconst [d] x))
-	// cond: is32Bit(c+d)
-	// result: (ADDVconst [c+d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64ADDVconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		if !(is32Bit(c + d)) {
-			break
-		}
-		v.reset(OpMIPS64ADDVconst)
-		v.AuxInt = c + d
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDVconst [c] (SUBVconst [d] x))
-	// cond: is32Bit(c-d)
-	// result: (ADDVconst [c-d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64SUBVconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		if !(is32Bit(c - d)) {
-			break
-		}
-		v.reset(OpMIPS64ADDVconst)
-		v.AuxInt = c - d
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64AND(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AND (MOVVconst [c]) x)
-	// cond: is32Bit(c)
-	// result: (ANDconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpMIPS64ANDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (AND x (MOVVconst [c]))
-	// cond: is32Bit(c)
-	// result: (ANDconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpMIPS64ANDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (AND x x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64ANDconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ANDconst [0]  _)
-	// cond:
-	// result: (MOVVconst [0])
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (ANDconst [-1] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != -1 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDconst [c] (MOVVconst [d]))
-	// cond:
-	// result: (MOVVconst [c&d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = c & d
-		return true
-	}
-	// match: (ANDconst [c] (ANDconst [d] x))
-	// cond:
-	// result: (ANDconst [c&d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64ANDconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpMIPS64ANDconst)
-		v.AuxInt = c & d
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64MOVBUload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVBUload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64ADDVconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVBUload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
-	// result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVBUload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64MOVBUreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBUreg x:(MOVBUload _ _))
-	// cond:
-	// result: (MOVVreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPS64MOVBUload {
-			break
-		}
-		v.reset(OpMIPS64MOVVreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBUreg x:(MOVBUreg _))
-	// cond:
-	// result: (MOVVreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPS64MOVBUreg {
-			break
-		}
-		v.reset(OpMIPS64MOVVreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBUreg (MOVVconst [c]))
-	// cond:
-	// result: (MOVVconst [int64(uint8(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = int64(uint8(c))
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64MOVBload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBload  [off1] {sym} (ADDVconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVBload  [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64ADDVconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVBload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
-	// result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVBload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64MOVBreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBreg x:(MOVBload _ _))
-	// cond:
-	// result: (MOVVreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPS64MOVBload {
-			break
-		}
-		v.reset(OpMIPS64MOVVreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBreg x:(MOVBreg _))
-	// cond:
-	// result: (MOVVreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPS64MOVBreg {
-			break
-		}
-		v.reset(OpMIPS64MOVVreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBreg  (MOVVconst [c]))
-	// cond:
-	// result: (MOVVconst [int64(int8(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = int64(int8(c))
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVBstore [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64ADDVconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVBstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
-	// result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVBstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVVconst [0]) mem)
-	// cond:
-	// result: (MOVBstorezero [off] {sym} ptr mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPS64MOVVconst {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		mem := v.Args[2]
-		v.reset(OpMIPS64MOVBstorezero)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
-	// cond:
-	// result: (MOVBstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPS64MOVBreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpMIPS64MOVBstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
-	// cond:
-	// result: (MOVBstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPS64MOVBUreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpMIPS64MOVBstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
-	// cond:
-	// result: (MOVBstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPS64MOVHreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpMIPS64MOVBstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
-	// cond:
-	// result: (MOVBstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPS64MOVHUreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpMIPS64MOVBstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem)
-	// cond:
-	// result: (MOVBstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPS64MOVWreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpMIPS64MOVBstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem)
-	// cond:
-	// result: (MOVBstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPS64MOVWUreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpMIPS64MOVBstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64MOVBstorezero(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVBstorezero [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64ADDVconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVBstorezero)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
-	// result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVBstorezero)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64MOVDload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVDload  [off1] {sym} (ADDVconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVDload  [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64ADDVconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVDload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
-	// result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVDload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64MOVDstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVDstore [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64ADDVconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVDstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
-	// result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVDstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64MOVFload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVFload  [off1] {sym} (ADDVconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVFload  [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64ADDVconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVFload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
-	// result: (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVFload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64MOVFstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVFstore [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64ADDVconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVFstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
-	// result: (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVFstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64MOVHUload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVHUload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64ADDVconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVHUload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
-	// result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVHUload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64MOVHUreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHUreg x:(MOVBUload _ _))
-	// cond:
-	// result: (MOVVreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPS64MOVBUload {
-			break
-		}
-		v.reset(OpMIPS64MOVVreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHUreg x:(MOVHUload _ _))
-	// cond:
-	// result: (MOVVreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPS64MOVHUload {
-			break
-		}
-		v.reset(OpMIPS64MOVVreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHUreg x:(MOVBUreg _))
-	// cond:
-	// result: (MOVVreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPS64MOVBUreg {
-			break
-		}
-		v.reset(OpMIPS64MOVVreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHUreg x:(MOVHUreg _))
-	// cond:
-	// result: (MOVVreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPS64MOVHUreg {
-			break
-		}
-		v.reset(OpMIPS64MOVVreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHUreg (MOVVconst [c]))
-	// cond:
-	// result: (MOVVconst [int64(uint16(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = int64(uint16(c))
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64MOVHload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHload  [off1] {sym} (ADDVconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVHload  [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64ADDVconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVHload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
-	// result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVHload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64MOVHreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHreg x:(MOVBload _ _))
-	// cond:
-	// result: (MOVVreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPS64MOVBload {
-			break
-		}
-		v.reset(OpMIPS64MOVVreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg x:(MOVBUload _ _))
-	// cond:
-	// result: (MOVVreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPS64MOVBUload {
-			break
-		}
-		v.reset(OpMIPS64MOVVreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg x:(MOVHload _ _))
-	// cond:
-	// result: (MOVVreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPS64MOVHload {
-			break
-		}
-		v.reset(OpMIPS64MOVVreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg x:(MOVBreg _))
-	// cond:
-	// result: (MOVVreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPS64MOVBreg {
-			break
-		}
-		v.reset(OpMIPS64MOVVreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg x:(MOVBUreg _))
-	// cond:
-	// result: (MOVVreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPS64MOVBUreg {
-			break
-		}
-		v.reset(OpMIPS64MOVVreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg x:(MOVHreg _))
-	// cond:
-	// result: (MOVVreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPS64MOVHreg {
-			break
-		}
-		v.reset(OpMIPS64MOVVreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg  (MOVVconst [c]))
-	// cond:
-	// result: (MOVVconst [int64(int16(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = int64(int16(c))
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVHstore [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64ADDVconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVHstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
-	// result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVHstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [off] {sym} ptr (MOVVconst [0]) mem)
-	// cond:
-	// result: (MOVHstorezero [off] {sym} ptr mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPS64MOVVconst {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		mem := v.Args[2]
-		v.reset(OpMIPS64MOVHstorezero)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
-	// cond:
-	// result: (MOVHstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPS64MOVHreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpMIPS64MOVHstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
-	// cond:
-	// result: (MOVHstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPS64MOVHUreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpMIPS64MOVHstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem)
-	// cond:
-	// result: (MOVHstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPS64MOVWreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpMIPS64MOVHstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem)
-	// cond:
-	// result: (MOVHstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPS64MOVWUreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpMIPS64MOVHstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64MOVHstorezero(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVHstorezero [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64ADDVconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVHstorezero)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
-	// result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVHstorezero)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64MOVVload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVVload  [off1] {sym} (ADDVconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVVload  [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64ADDVconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVVload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
-	// result: (MOVVload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVVload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64MOVVreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVVreg x)
-	// cond: x.Uses == 1
-	// result: (MOVVnop x)
-	for {
-		x := v.Args[0]
-		if !(x.Uses == 1) {
-			break
-		}
-		v.reset(OpMIPS64MOVVnop)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVVreg  (MOVVconst [c]))
-	// cond:
-	// result: (MOVVconst [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = c
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64MOVVstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVVstore [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64ADDVconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVVstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
-	// result: (MOVVstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVVstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVVstore [off] {sym} ptr (MOVVconst [0]) mem)
-	// cond:
-	// result: (MOVVstorezero [off] {sym} ptr mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPS64MOVVconst {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		mem := v.Args[2]
-		v.reset(OpMIPS64MOVVstorezero)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64MOVVstorezero(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVVstorezero [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64ADDVconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVVstorezero)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
-	// result: (MOVVstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVVstorezero)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64MOVWUload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVWUload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64ADDVconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVWUload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
-	// result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVWUload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64MOVWUreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWUreg x:(MOVBUload _ _))
-	// cond:
-	// result: (MOVVreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPS64MOVBUload {
-			break
-		}
-		v.reset(OpMIPS64MOVVreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWUreg x:(MOVHUload _ _))
-	// cond:
-	// result: (MOVVreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPS64MOVHUload {
-			break
-		}
-		v.reset(OpMIPS64MOVVreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWUreg x:(MOVWUload _ _))
-	// cond:
-	// result: (MOVVreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPS64MOVWUload {
-			break
-		}
-		v.reset(OpMIPS64MOVVreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWUreg x:(MOVBUreg _))
-	// cond:
-	// result: (MOVVreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPS64MOVBUreg {
-			break
-		}
-		v.reset(OpMIPS64MOVVreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWUreg x:(MOVHUreg _))
-	// cond:
-	// result: (MOVVreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPS64MOVHUreg {
-			break
-		}
-		v.reset(OpMIPS64MOVVreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWUreg x:(MOVWUreg _))
-	// cond:
-	// result: (MOVVreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPS64MOVWUreg {
-			break
-		}
-		v.reset(OpMIPS64MOVVreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWUreg (MOVVconst [c]))
-	// cond:
-	// result: (MOVVconst [int64(uint32(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = int64(uint32(c))
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64MOVWload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWload  [off1] {sym} (ADDVconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVWload  [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64ADDVconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVWload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
-	// result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVWload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64MOVWreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWreg x:(MOVBload _ _))
-	// cond:
-	// result: (MOVVreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPS64MOVBload {
-			break
-		}
-		v.reset(OpMIPS64MOVVreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg x:(MOVBUload _ _))
-	// cond:
-	// result: (MOVVreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPS64MOVBUload {
-			break
-		}
-		v.reset(OpMIPS64MOVVreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg x:(MOVHload _ _))
-	// cond:
-	// result: (MOVVreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPS64MOVHload {
-			break
-		}
-		v.reset(OpMIPS64MOVVreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg x:(MOVHUload _ _))
-	// cond:
-	// result: (MOVVreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPS64MOVHUload {
-			break
-		}
-		v.reset(OpMIPS64MOVVreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg x:(MOVWload _ _))
-	// cond:
-	// result: (MOVVreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPS64MOVWload {
-			break
-		}
-		v.reset(OpMIPS64MOVVreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg x:(MOVBreg _))
-	// cond:
-	// result: (MOVVreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPS64MOVBreg {
-			break
-		}
-		v.reset(OpMIPS64MOVVreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg x:(MOVBUreg _))
-	// cond:
-	// result: (MOVVreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPS64MOVBUreg {
-			break
-		}
-		v.reset(OpMIPS64MOVVreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg x:(MOVHreg _))
-	// cond:
-	// result: (MOVVreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPS64MOVHreg {
-			break
-		}
-		v.reset(OpMIPS64MOVVreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg x:(MOVHreg _))
-	// cond:
-	// result: (MOVVreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPS64MOVHreg {
-			break
-		}
-		v.reset(OpMIPS64MOVVreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg x:(MOVWreg _))
-	// cond:
-	// result: (MOVVreg x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpMIPS64MOVWreg {
-			break
-		}
-		v.reset(OpMIPS64MOVVreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg  (MOVVconst [c]))
-	// cond:
-	// result: (MOVVconst [int64(int32(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = int64(int32(c))
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64MOVWstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVWstore [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64ADDVconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVWstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
-	// result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVWstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [off] {sym} ptr (MOVVconst [0]) mem)
-	// cond:
-	// result: (MOVWstorezero [off] {sym} ptr mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPS64MOVVconst {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		mem := v.Args[2]
-		v.reset(OpMIPS64MOVWstorezero)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
-	// cond:
-	// result: (MOVWstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPS64MOVWreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpMIPS64MOVWstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem)
-	// cond:
-	// result: (MOVWstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPS64MOVWUreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpMIPS64MOVWstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64MOVWstorezero(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
-	// cond: is32Bit(off1+off2)
-	// result: (MOVWstorezero [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64ADDVconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVWstorezero)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
-	// result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
-			break
-		}
-		v.reset(OpMIPS64MOVWstorezero)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64NEGV(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NEGV (MOVVconst [c]))
-	// cond:
-	// result: (MOVVconst [-c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = -c
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64NOR(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NOR (MOVVconst [c]) x)
-	// cond: is32Bit(c)
-	// result: (NORconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpMIPS64NORconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (NOR x (MOVVconst [c]))
-	// cond: is32Bit(c)
-	// result: (NORconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpMIPS64NORconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64NORconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NORconst [c] (MOVVconst [d]))
-	// cond:
-	// result: (MOVVconst [^(c|d)])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = ^(c | d)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64OR(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (OR  (MOVVconst [c]) x)
-	// cond: is32Bit(c)
-	// result: (ORconst  [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpMIPS64ORconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (OR  x (MOVVconst [c]))
-	// cond: is32Bit(c)
-	// result: (ORconst  [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpMIPS64ORconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (OR  x x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64ORconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ORconst  [0]  x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ORconst  [-1] _)
-	// cond:
-	// result: (MOVVconst [-1])
-	for {
-		if v.AuxInt != -1 {
-			break
-		}
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = -1
-		return true
-	}
-	// match: (ORconst [c] (MOVVconst [d]))
-	// cond:
-	// result: (MOVVconst [c|d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = c | d
-		return true
-	}
-	// match: (ORconst [c] (ORconst [d] x))
-	// cond: is32Bit(c|d)
-	// result: (ORconst [c|d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64ORconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		if !(is32Bit(c | d)) {
-			break
-		}
-		v.reset(OpMIPS64ORconst)
-		v.AuxInt = c | d
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64SGT(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SGT  (MOVVconst [c]) x)
-	// cond: is32Bit(c)
-	// result: (SGTconst  [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpMIPS64SGTconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64SGTU(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SGTU (MOVVconst [c]) x)
-	// cond: is32Bit(c)
-	// result: (SGTUconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpMIPS64SGTUconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64SGTUconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SGTUconst [c] (MOVVconst [d]))
-	// cond: uint64(c)>uint64(d)
-	// result: (MOVVconst [1])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		d := v_0.AuxInt
-		if !(uint64(c) > uint64(d)) {
-			break
-		}
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SGTUconst [c] (MOVVconst [d]))
-	// cond: uint64(c)<=uint64(d)
-	// result: (MOVVconst [0])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		d := v_0.AuxInt
-		if !(uint64(c) <= uint64(d)) {
-			break
-		}
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SGTUconst [c] (MOVBUreg _))
-	// cond: 0xff < uint64(c)
-	// result: (MOVVconst [1])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVBUreg {
-			break
-		}
-		if !(0xff < uint64(c)) {
-			break
-		}
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SGTUconst [c] (MOVHUreg _))
-	// cond: 0xffff < uint64(c)
-	// result: (MOVVconst [1])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVHUreg {
-			break
-		}
-		if !(0xffff < uint64(c)) {
-			break
-		}
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SGTUconst [c] (ANDconst [m] _))
-	// cond: uint64(m) < uint64(c)
-	// result: (MOVVconst [1])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64ANDconst {
-			break
-		}
-		m := v_0.AuxInt
-		if !(uint64(m) < uint64(c)) {
-			break
-		}
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SGTUconst [c] (SRLVconst _ [d]))
-	// cond: 0 < d && d <= 63 && 1<<uint64(64-d) <= uint64(c)
-	// result: (MOVVconst [1])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64SRLVconst {
-			break
-		}
-		d := v_0.AuxInt
-		if !(0 < d && d <= 63 && 1<<uint64(64-d) <= uint64(c)) {
-			break
-		}
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = 1
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64SGTconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SGTconst [c] (MOVVconst [d]))
-	// cond: int64(c)>int64(d)
-	// result: (MOVVconst [1])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		d := v_0.AuxInt
-		if !(int64(c) > int64(d)) {
-			break
-		}
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SGTconst [c] (MOVVconst [d]))
-	// cond: int64(c)<=int64(d)
-	// result: (MOVVconst [0])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		d := v_0.AuxInt
-		if !(int64(c) <= int64(d)) {
-			break
-		}
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SGTconst [c] (MOVBreg _))
-	// cond: 0x7f < int64(c)
-	// result: (MOVVconst [1])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVBreg {
-			break
-		}
-		if !(0x7f < int64(c)) {
-			break
-		}
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SGTconst [c] (MOVBreg _))
-	// cond: int64(c) <= -0x80
-	// result: (MOVVconst [0])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVBreg {
-			break
-		}
-		if !(int64(c) <= -0x80) {
-			break
-		}
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SGTconst [c] (MOVBUreg _))
-	// cond: 0xff < int64(c)
-	// result: (MOVVconst [1])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVBUreg {
-			break
-		}
-		if !(0xff < int64(c)) {
-			break
-		}
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SGTconst [c] (MOVBUreg _))
-	// cond: int64(c) < 0
-	// result: (MOVVconst [0])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVBUreg {
-			break
-		}
-		if !(int64(c) < 0) {
-			break
-		}
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SGTconst [c] (MOVHreg _))
-	// cond: 0x7fff < int64(c)
-	// result: (MOVVconst [1])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVHreg {
-			break
-		}
-		if !(0x7fff < int64(c)) {
-			break
-		}
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SGTconst [c] (MOVHreg _))
-	// cond: int64(c) <= -0x8000
-	// result: (MOVVconst [0])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVHreg {
-			break
-		}
-		if !(int64(c) <= -0x8000) {
-			break
-		}
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SGTconst [c] (MOVHUreg _))
-	// cond: 0xffff < int64(c)
-	// result: (MOVVconst [1])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVHUreg {
-			break
-		}
-		if !(0xffff < int64(c)) {
-			break
-		}
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SGTconst [c] (MOVHUreg _))
-	// cond: int64(c) < 0
-	// result: (MOVVconst [0])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVHUreg {
-			break
-		}
-		if !(int64(c) < 0) {
-			break
-		}
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SGTconst [c] (MOVWUreg _))
-	// cond: int64(c) < 0
-	// result: (MOVVconst [0])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVWUreg {
-			break
-		}
-		if !(int64(c) < 0) {
-			break
-		}
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SGTconst [c] (ANDconst [m] _))
-	// cond: 0 <= m && m < c
-	// result: (MOVVconst [1])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64ANDconst {
-			break
-		}
-		m := v_0.AuxInt
-		if !(0 <= m && m < c) {
-			break
-		}
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (SGTconst [c] (SRLVconst _ [d]))
-	// cond: 0 <= c && 0 < d && d <= 63 && 1<<uint64(64-d) <= c
-	// result: (MOVVconst [1])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64SRLVconst {
-			break
-		}
-		d := v_0.AuxInt
-		if !(0 <= c && 0 < d && d <= 63 && 1<<uint64(64-d) <= c) {
-			break
-		}
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = 1
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64SLLV(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SLLV _ (MOVVconst [c]))
-	// cond: uint64(c)>=64
-	// result: (MOVVconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 64) {
-			break
-		}
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SLLV x (MOVVconst [c]))
-	// cond:
-	// result: (SLLVconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpMIPS64SLLVconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64SLLVconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SLLVconst [c] (MOVVconst [d]))
-	// cond:
-	// result: (MOVVconst [int64(d)<<uint64(c)])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = int64(d) << uint64(c)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64SRAV(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SRAV x (MOVVconst [c]))
-	// cond: uint64(c)>=64
-	// result: (SRAVconst x [63])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 64) {
-			break
-		}
-		v.reset(OpMIPS64SRAVconst)
-		v.AuxInt = 63
-		v.AddArg(x)
-		return true
-	}
-	// match: (SRAV x (MOVVconst [c]))
-	// cond:
-	// result: (SRAVconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpMIPS64SRAVconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64SRAVconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SRAVconst [c] (MOVVconst [d]))
-	// cond:
-	// result: (MOVVconst [int64(d)>>uint64(c)])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = int64(d) >> uint64(c)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64SRLV(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SRLV _ (MOVVconst [c]))
-	// cond: uint64(c)>=64
-	// result: (MOVVconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 64) {
-			break
-		}
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SRLV x (MOVVconst [c]))
-	// cond:
-	// result: (SRLVconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpMIPS64SRLVconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64SRLVconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SRLVconst [c] (MOVVconst [d]))
-	// cond:
-	// result: (MOVVconst [int64(uint64(d)>>uint64(c))])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = int64(uint64(d) >> uint64(c))
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64SUBV(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBV x (MOVVconst [c]))
-	// cond: is32Bit(c)
-	// result: (SUBVconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpMIPS64SUBVconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUBV x x)
-	// cond:
-	// result: (MOVVconst [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SUBV (MOVVconst [0]) x)
-	// cond:
-	// result: (NEGV x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpMIPS64NEGV)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64SUBVconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBVconst [0]  x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUBVconst [c] (MOVVconst [d]))
-	// cond:
-	// result: (MOVVconst [d-c])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = d - c
-		return true
-	}
-	// match: (SUBVconst [c] (SUBVconst [d] x))
-	// cond: is32Bit(-c-d)
-	// result: (ADDVconst [-c-d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64SUBVconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		if !(is32Bit(-c - d)) {
-			break
-		}
-		v.reset(OpMIPS64ADDVconst)
-		v.AuxInt = -c - d
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUBVconst [c] (ADDVconst [d] x))
-	// cond: is32Bit(-c+d)
-	// result: (ADDVconst [-c+d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64ADDVconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		if !(is32Bit(-c + d)) {
-			break
-		}
-		v.reset(OpMIPS64ADDVconst)
-		v.AuxInt = -c + d
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64XOR(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XOR (MOVVconst [c]) x)
-	// cond: is32Bit(c)
-	// result: (XORconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpMIPS64XORconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (XOR x (MOVVconst [c]))
-	// cond: is32Bit(c)
-	// result: (XORconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpMIPS64XORconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (XOR x x)
-	// cond:
-	// result: (MOVVconst [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMIPS64XORconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XORconst [0]  x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (XORconst [-1] x)
-	// cond:
-	// result: (NORconst [0] x)
-	for {
-		if v.AuxInt != -1 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpMIPS64NORconst)
-		v.AuxInt = 0
-		v.AddArg(x)
-		return true
-	}
-	// match: (XORconst [c] (MOVVconst [d]))
-	// cond:
-	// result: (MOVVconst [c^d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = c ^ d
-		return true
-	}
-	// match: (XORconst [c] (XORconst [d] x))
-	// cond: is32Bit(c^d)
-	// result: (XORconst [c^d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64XORconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		if !(is32Bit(c ^ d)) {
-			break
-		}
-		v.reset(OpMIPS64XORconst)
-		v.AuxInt = c ^ d
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMod16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod16 x y)
-	// cond:
-	// result: (Select0 (DIVV (SignExt16to64 x) (SignExt16to64 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect0)
-		v0 := b.NewValue0(v.Line, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
-		v1 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpMod16u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod16u x y)
-	// cond:
-	// result: (Select0 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect0)
-		v0 := b.NewValue0(v.Line, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
-		v1 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpMod32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod32 x y)
-	// cond:
-	// result: (Select0 (DIVV (SignExt32to64 x) (SignExt32to64 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect0)
-		v0 := b.NewValue0(v.Line, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
-		v1 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpMod32u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod32u x y)
-	// cond:
-	// result: (Select0 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect0)
-		v0 := b.NewValue0(v.Line, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
-		v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpMod64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod64 x y)
-	// cond:
-	// result: (Select0 (DIVV x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect0)
-		v0 := b.NewValue0(v.Line, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpMod64u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod64u x y)
-	// cond:
-	// result: (Select0 (DIVVU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect0)
-		v0 := b.NewValue0(v.Line, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpMod8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod8 x y)
-	// cond:
-	// result: (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect0)
-		v0 := b.NewValue0(v.Line, OpMIPS64DIVV, MakeTuple(config.fe.TypeInt64(), config.fe.TypeInt64()))
-		v1 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpMod8u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod8u x y)
-	// cond:
-	// result: (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect0)
-		v0 := b.NewValue0(v.Line, OpMIPS64DIVVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
-		v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpMove(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Move [s] _ _ mem)
-	// cond: SizeAndAlign(s).Size() == 0
-	// result: mem
-	for {
-		s := v.AuxInt
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 0) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = mem.Type
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 1
-	// result: (MOVBstore dst (MOVBload src mem) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 1) {
-			break
-		}
-		v.reset(OpMIPS64MOVBstore)
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVBload, config.fe.TypeInt8())
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
-	// result: (MOVHstore dst (MOVHload src mem) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
-			break
-		}
-		v.reset(OpMIPS64MOVHstore)
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVHload, config.fe.TypeInt16())
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 2
-	// result: (MOVBstore [1] dst (MOVBload [1] src mem) 		(MOVBstore dst (MOVBload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 2) {
-			break
-		}
-		v.reset(OpMIPS64MOVBstore)
-		v.AuxInt = 1
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVBload, config.fe.TypeInt8())
-		v0.AuxInt = 1
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64MOVBstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpMIPS64MOVBload, config.fe.TypeInt8())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
-	// result: (MOVWstore dst (MOVWload src mem) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
-			break
-		}
-		v.reset(OpMIPS64MOVWstore)
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVWload, config.fe.TypeInt32())
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
-	// result: (MOVHstore [2] dst (MOVHload [2] src mem) 		(MOVHstore dst (MOVHload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
-			break
-		}
-		v.reset(OpMIPS64MOVHstore)
-		v.AuxInt = 2
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVHload, config.fe.TypeInt16())
-		v0.AuxInt = 2
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64MOVHstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpMIPS64MOVHload, config.fe.TypeInt16())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 4
-	// result: (MOVBstore [3] dst (MOVBload [3] src mem) 		(MOVBstore [2] dst (MOVBload [2] src mem) 			(MOVBstore [1] dst (MOVBload [1] src mem) 				(MOVBstore dst (MOVBload src mem) mem))))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 4) {
-			break
-		}
-		v.reset(OpMIPS64MOVBstore)
-		v.AuxInt = 3
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVBload, config.fe.TypeInt8())
-		v0.AuxInt = 3
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64MOVBstore, TypeMem)
-		v1.AuxInt = 2
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpMIPS64MOVBload, config.fe.TypeInt8())
-		v2.AuxInt = 2
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPS64MOVBstore, TypeMem)
-		v3.AuxInt = 1
-		v3.AddArg(dst)
-		v4 := b.NewValue0(v.Line, OpMIPS64MOVBload, config.fe.TypeInt8())
-		v4.AuxInt = 1
-		v4.AddArg(src)
-		v4.AddArg(mem)
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpMIPS64MOVBstore, TypeMem)
-		v5.AddArg(dst)
-		v6 := b.NewValue0(v.Line, OpMIPS64MOVBload, config.fe.TypeInt8())
-		v6.AddArg(src)
-		v6.AddArg(mem)
-		v5.AddArg(v6)
-		v5.AddArg(mem)
-		v3.AddArg(v5)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0
-	// result: (MOVVstore dst (MOVVload src mem) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0) {
-			break
-		}
-		v.reset(OpMIPS64MOVVstore)
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVload, config.fe.TypeUInt64())
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0
-	// result: (MOVWstore [4] dst (MOVWload [4] src mem) 		(MOVWstore dst (MOVWload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0) {
-			break
-		}
-		v.reset(OpMIPS64MOVWstore)
-		v.AuxInt = 4
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVWload, config.fe.TypeInt32())
-		v0.AuxInt = 4
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64MOVWstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpMIPS64MOVWload, config.fe.TypeInt32())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0
-	// result: (MOVHstore [6] dst (MOVHload [6] src mem) 		(MOVHstore [4] dst (MOVHload [4] src mem) 			(MOVHstore [2] dst (MOVHload [2] src mem) 				(MOVHstore dst (MOVHload src mem) mem))))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0) {
-			break
-		}
-		v.reset(OpMIPS64MOVHstore)
-		v.AuxInt = 6
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVHload, config.fe.TypeInt16())
-		v0.AuxInt = 6
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64MOVHstore, TypeMem)
-		v1.AuxInt = 4
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpMIPS64MOVHload, config.fe.TypeInt16())
-		v2.AuxInt = 4
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPS64MOVHstore, TypeMem)
-		v3.AuxInt = 2
-		v3.AddArg(dst)
-		v4 := b.NewValue0(v.Line, OpMIPS64MOVHload, config.fe.TypeInt16())
-		v4.AuxInt = 2
-		v4.AddArg(src)
-		v4.AddArg(mem)
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpMIPS64MOVHstore, TypeMem)
-		v5.AddArg(dst)
-		v6 := b.NewValue0(v.Line, OpMIPS64MOVHload, config.fe.TypeInt16())
-		v6.AddArg(src)
-		v6.AddArg(mem)
-		v5.AddArg(v6)
-		v5.AddArg(mem)
-		v3.AddArg(v5)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 3
-	// result: (MOVBstore [2] dst (MOVBload [2] src mem) 		(MOVBstore [1] dst (MOVBload [1] src mem) 			(MOVBstore dst (MOVBload src mem) mem)))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 3) {
-			break
-		}
-		v.reset(OpMIPS64MOVBstore)
-		v.AuxInt = 2
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVBload, config.fe.TypeInt8())
-		v0.AuxInt = 2
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64MOVBstore, TypeMem)
-		v1.AuxInt = 1
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpMIPS64MOVBload, config.fe.TypeInt8())
-		v2.AuxInt = 1
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPS64MOVBstore, TypeMem)
-		v3.AddArg(dst)
-		v4 := b.NewValue0(v.Line, OpMIPS64MOVBload, config.fe.TypeInt8())
-		v4.AddArg(src)
-		v4.AddArg(mem)
-		v3.AddArg(v4)
-		v3.AddArg(mem)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 6 && SizeAndAlign(s).Align()%2 == 0
-	// result: (MOVHstore [4] dst (MOVHload [4] src mem) 		(MOVHstore [2] dst (MOVHload [2] src mem) 			(MOVHstore dst (MOVHload src mem) mem)))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 6 && SizeAndAlign(s).Align()%2 == 0) {
-			break
-		}
-		v.reset(OpMIPS64MOVHstore)
-		v.AuxInt = 4
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVHload, config.fe.TypeInt16())
-		v0.AuxInt = 4
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64MOVHstore, TypeMem)
-		v1.AuxInt = 2
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpMIPS64MOVHload, config.fe.TypeInt16())
-		v2.AuxInt = 2
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPS64MOVHstore, TypeMem)
-		v3.AddArg(dst)
-		v4 := b.NewValue0(v.Line, OpMIPS64MOVHload, config.fe.TypeInt16())
-		v4.AddArg(src)
-		v4.AddArg(mem)
-		v3.AddArg(v4)
-		v3.AddArg(mem)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 12 && SizeAndAlign(s).Align()%4 == 0
-	// result: (MOVWstore [8] dst (MOVWload [8] src mem) 		(MOVWstore [4] dst (MOVWload [4] src mem) 			(MOVWstore dst (MOVWload src mem) mem)))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 12 && SizeAndAlign(s).Align()%4 == 0) {
-			break
-		}
-		v.reset(OpMIPS64MOVWstore)
-		v.AuxInt = 8
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVWload, config.fe.TypeInt32())
-		v0.AuxInt = 8
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64MOVWstore, TypeMem)
-		v1.AuxInt = 4
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpMIPS64MOVWload, config.fe.TypeInt32())
-		v2.AuxInt = 4
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPS64MOVWstore, TypeMem)
-		v3.AddArg(dst)
-		v4 := b.NewValue0(v.Line, OpMIPS64MOVWload, config.fe.TypeInt32())
-		v4.AddArg(src)
-		v4.AddArg(mem)
-		v3.AddArg(v4)
-		v3.AddArg(mem)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0
-	// result: (MOVVstore [8] dst (MOVVload [8] src mem) 		(MOVVstore dst (MOVVload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0) {
-			break
-		}
-		v.reset(OpMIPS64MOVVstore)
-		v.AuxInt = 8
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVload, config.fe.TypeUInt64())
-		v0.AuxInt = 8
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64MOVVstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpMIPS64MOVVload, config.fe.TypeUInt64())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0
-	// result: (MOVVstore [16] dst (MOVVload [16] src mem) 		(MOVVstore [8] dst (MOVVload [8] src mem) 			(MOVVstore dst (MOVVload src mem) mem)))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0) {
-			break
-		}
-		v.reset(OpMIPS64MOVVstore)
-		v.AuxInt = 16
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVload, config.fe.TypeUInt64())
-		v0.AuxInt = 16
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64MOVVstore, TypeMem)
-		v1.AuxInt = 8
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpMIPS64MOVVload, config.fe.TypeUInt64())
-		v2.AuxInt = 8
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPS64MOVVstore, TypeMem)
-		v3.AddArg(dst)
-		v4 := b.NewValue0(v.Line, OpMIPS64MOVVload, config.fe.TypeUInt64())
-		v4.AddArg(src)
-		v4.AddArg(mem)
-		v3.AddArg(v4)
-		v3.AddArg(mem)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() > 24 || SizeAndAlign(s).Align()%8 != 0
-	// result: (LoweredMove [SizeAndAlign(s).Align()] 		dst 		src 		(ADDVconst <src.Type> src [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)]) 		mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() > 24 || SizeAndAlign(s).Align()%8 != 0) {
-			break
-		}
-		v.reset(OpMIPS64LoweredMove)
-		v.AuxInt = SizeAndAlign(s).Align()
-		v.AddArg(dst)
-		v.AddArg(src)
-		v0 := b.NewValue0(v.Line, OpMIPS64ADDVconst, src.Type)
-		v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
-		v0.AddArg(src)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpMul16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul16 x y)
-	// cond:
-	// result: (Select1 (MULVU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpMIPS64MULVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpMul32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul32 x y)
-	// cond:
-	// result: (Select1 (MULVU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpMIPS64MULVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpMul32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul32F x y)
-	// cond:
-	// result: (MULF x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64MULF)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpMul64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul64 x y)
-	// cond:
-	// result: (Select1 (MULVU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpMIPS64MULVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpMul64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul64F x y)
-	// cond:
-	// result: (MULD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64MULD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpMul8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul8 x y)
-	// cond:
-	// result: (Select1 (MULVU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpSelect1)
-		v0 := b.NewValue0(v.Line, OpMIPS64MULVU, MakeTuple(config.fe.TypeUInt64(), config.fe.TypeUInt64()))
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpNeg16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg16 x)
-	// cond:
-	// result: (NEGV x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64NEGV)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpNeg32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg32 x)
-	// cond:
-	// result: (NEGV x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64NEGV)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpNeg32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg32F x)
-	// cond:
-	// result: (NEGF x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64NEGF)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpNeg64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg64 x)
-	// cond:
-	// result: (NEGV x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64NEGV)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpNeg64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg64F x)
-	// cond:
-	// result: (NEGD x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64NEGD)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpNeg8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg8 x)
-	// cond:
-	// result: (NEGV x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64NEGV)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpNeq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq16 x y)
-	// cond:
-	// result: (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0]))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SGTU)
-		v0 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeUInt64())
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v3.AuxInt = 0
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpNeq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq32 x y)
-	// cond:
-	// result: (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0]))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SGTU)
-		v0 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeUInt64())
-		v1 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v3.AuxInt = 0
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpNeq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq32F x y)
-	// cond:
-	// result: (FPFlagFalse (CMPEQF x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64FPFlagFalse)
-		v0 := b.NewValue0(v.Line, OpMIPS64CMPEQF, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpNeq64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq64 x y)
-	// cond:
-	// result: (SGTU (XOR x y) (MOVVconst [0]))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SGTU)
-		v0 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeUInt64())
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v1.AuxInt = 0
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpNeq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq64F x y)
-	// cond:
-	// result: (FPFlagFalse (CMPEQD x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64FPFlagFalse)
-		v0 := b.NewValue0(v.Line, OpMIPS64CMPEQD, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpNeq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq8 x y)
-	// cond:
-	// result: (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0]))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SGTU)
-		v0 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeUInt64())
-		v1 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v3.AuxInt = 0
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpNeqB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NeqB x y)
-	// cond:
-	// result: (XOR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64XOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpNeqPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NeqPtr x y)
-	// cond:
-	// result: (SGTU (XOR x y) (MOVVconst [0]))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SGTU)
-		v0 := b.NewValue0(v.Line, OpMIPS64XOR, config.fe.TypeUInt64())
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v1.AuxInt = 0
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpNilCheck(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NilCheck ptr mem)
-	// cond:
-	// result: (LoweredNilCheck ptr mem)
-	for {
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		v.reset(OpMIPS64LoweredNilCheck)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpNot(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Not x)
-	// cond:
-	// result: (XORconst [1] x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64XORconst)
-		v.AuxInt = 1
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpOffPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (OffPtr [off] ptr:(SP))
-	// cond:
-	// result: (MOVVaddr [off] ptr)
-	for {
-		off := v.AuxInt
-		ptr := v.Args[0]
-		if ptr.Op != OpSP {
-			break
-		}
-		v.reset(OpMIPS64MOVVaddr)
-		v.AuxInt = off
-		v.AddArg(ptr)
-		return true
-	}
-	// match: (OffPtr [off] ptr)
-	// cond:
-	// result: (ADDVconst [off] ptr)
-	for {
-		off := v.AuxInt
-		ptr := v.Args[0]
-		v.reset(OpMIPS64ADDVconst)
-		v.AuxInt = off
-		v.AddArg(ptr)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpOr16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or16 x y)
-	// cond:
-	// result: (OR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64OR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpOr32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or32 x y)
-	// cond:
-	// result: (OR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64OR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpOr64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or64 x y)
-	// cond:
-	// result: (OR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64OR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpOr8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or8 x y)
-	// cond:
-	// result: (OR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64OR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpOrB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (OrB x y)
-	// cond:
-	// result: (OR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64OR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh16Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux16 <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v4 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
-		v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v5.AddArg(x)
-		v4.AddArg(v5)
-		v6 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v6.AddArg(y)
-		v4.AddArg(v6)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh16Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux32 <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v4 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
-		v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v5.AddArg(x)
-		v4.AddArg(v5)
-		v6 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v6.AddArg(y)
-		v4.AddArg(v6)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh16Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux64 <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SRLV <t> (ZeroExt16to64 x) y))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
-		v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v4.AddArg(x)
-		v3.AddArg(v4)
-		v3.AddArg(y)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh16Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux8  <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64  y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64  y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v4 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
-		v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v5.AddArg(x)
-		v4.AddArg(v5)
-		v6 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v6.AddArg(y)
-		v4.AddArg(v6)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh16x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x16 <t> x y)
-	// cond:
-	// result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SRAV)
-		v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64OR, t)
-		v2 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v3 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v5.AuxInt = 63
-		v3.AddArg(v5)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v6 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v6.AddArg(y)
-		v1.AddArg(v6)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh16x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x32 <t> x y)
-	// cond:
-	// result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SRAV)
-		v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64OR, t)
-		v2 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v3 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v5.AuxInt = 63
-		v3.AddArg(v5)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v6 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v6.AddArg(y)
-		v1.AddArg(v6)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh16x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x64 <t> x y)
-	// cond:
-	// result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <config.fe.TypeUInt64()> [63]))) y))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SRAV)
-		v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64OR, t)
-		v2 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v3 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v3.AddArg(y)
-		v4 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v4.AuxInt = 63
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh16x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x8  <t> x y)
-	// cond:
-	// result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64  y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt8to64  y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SRAV)
-		v0 := b.NewValue0(v.Line, OpSignExt16to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64OR, t)
-		v2 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v3 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v5.AuxInt = 63
-		v3.AddArg(v5)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v6 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v6.AddArg(y)
-		v1.AddArg(v6)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh32Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux16 <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt16to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v4 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
-		v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v5.AddArg(x)
-		v4.AddArg(v5)
-		v6 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v6.AddArg(y)
-		v4.AddArg(v6)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh32Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux32 <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt32to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v4 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
-		v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v5.AddArg(x)
-		v4.AddArg(v5)
-		v6 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v6.AddArg(y)
-		v4.AddArg(v6)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh32Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux64 <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SRLV <t> (ZeroExt32to64 x) y))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
-		v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v4.AddArg(x)
-		v3.AddArg(v4)
-		v3.AddArg(y)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh32Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux8  <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64  y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt8to64  y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v4 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
-		v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v5.AddArg(x)
-		v4.AddArg(v5)
-		v6 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v6.AddArg(y)
-		v4.AddArg(v6)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh32x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x16 <t> x y)
-	// cond:
-	// result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SRAV)
-		v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64OR, t)
-		v2 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v3 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v5.AuxInt = 63
-		v3.AddArg(v5)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v6 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v6.AddArg(y)
-		v1.AddArg(v6)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh32x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x32 <t> x y)
-	// cond:
-	// result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SRAV)
-		v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64OR, t)
-		v2 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v3 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v5.AuxInt = 63
-		v3.AddArg(v5)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v6 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v6.AddArg(y)
-		v1.AddArg(v6)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh32x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x64 <t> x y)
-	// cond:
-	// result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <config.fe.TypeUInt64()> [63]))) y))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SRAV)
-		v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64OR, t)
-		v2 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v3 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v3.AddArg(y)
-		v4 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v4.AuxInt = 63
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh32x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x8  <t> x y)
-	// cond:
-	// result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64  y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt8to64  y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SRAV)
-		v0 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64OR, t)
-		v2 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v3 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v5.AuxInt = 63
-		v3.AddArg(v5)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v6 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v6.AddArg(y)
-		v1.AddArg(v6)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh64Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64Ux16 <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> x (ZeroExt16to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v4 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
-		v4.AddArg(x)
-		v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh64Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64Ux32 <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> x (ZeroExt32to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v4 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
-		v4.AddArg(x)
-		v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh64Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64Ux64 <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SRLV <t> x y))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
-		v3.AddArg(x)
-		v3.AddArg(y)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh64Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64Ux8  <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64  y))) (SRLV <t> x (ZeroExt8to64  y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v4 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
-		v4.AddArg(x)
-		v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh64x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64x16 <t> x y)
-	// cond:
-	// result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SRAV)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpMIPS64OR, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v2 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v4.AuxInt = 63
-		v2.AddArg(v4)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v5 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v0.AddArg(v5)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh64x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64x32 <t> x y)
-	// cond:
-	// result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SRAV)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpMIPS64OR, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v2 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v4.AuxInt = 63
-		v2.AddArg(v4)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v5 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v0.AddArg(v5)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh64x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64x64 <t> x y)
-	// cond:
-	// result: (SRAV x (OR <t> (NEGV <t> (SGTU y (Const64 <config.fe.TypeUInt64()> [63]))) y))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SRAV)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpMIPS64OR, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v2 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2.AddArg(y)
-		v3 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v3.AuxInt = 63
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh64x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64x8  <t> x y)
-	// cond:
-	// result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64  y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt8to64  y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SRAV)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpMIPS64OR, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v2 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v4.AuxInt = 63
-		v2.AddArg(v4)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v0.AddArg(v5)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh8Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux16 <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v4 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
-		v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v5.AddArg(x)
-		v4.AddArg(v5)
-		v6 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v6.AddArg(y)
-		v4.AddArg(v6)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh8Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux32 <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v4 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
-		v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v5.AddArg(x)
-		v4.AddArg(v5)
-		v6 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v6.AddArg(y)
-		v4.AddArg(v6)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh8Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux64 <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) y)) (SRLV <t> (ZeroExt8to64 x) y))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
-		v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v4.AddArg(x)
-		v3.AddArg(v4)
-		v3.AddArg(y)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh8Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux8  <t> x y)
-	// cond:
-	// result: (AND (NEGV <t> (SGTU (Const64 <config.fe.TypeUInt64()> [64]) (ZeroExt8to64  y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64  y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64AND)
-		v0 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v1 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v2.AuxInt = 64
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v4 := b.NewValue0(v.Line, OpMIPS64SRLV, t)
-		v5 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v5.AddArg(x)
-		v4.AddArg(v5)
-		v6 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v6.AddArg(y)
-		v4.AddArg(v6)
-		v.AddArg(v4)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh8x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x16 <t> x y)
-	// cond:
-	// result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SRAV)
-		v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64OR, t)
-		v2 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v3 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v5.AuxInt = 63
-		v3.AddArg(v5)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v6 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v6.AddArg(y)
-		v1.AddArg(v6)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh8x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x32 <t> x y)
-	// cond:
-	// result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SRAV)
-		v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64OR, t)
-		v2 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v3 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v5.AuxInt = 63
-		v3.AddArg(v5)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v6 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v6.AddArg(y)
-		v1.AddArg(v6)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh8x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x64 <t> x y)
-	// cond:
-	// result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <config.fe.TypeUInt64()> [63]))) y))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SRAV)
-		v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64OR, t)
-		v2 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v3 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v3.AddArg(y)
-		v4 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v4.AuxInt = 63
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpRsh8x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x8  <t> x y)
-	// cond:
-	// result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64  y) (Const64 <config.fe.TypeUInt64()> [63]))) (ZeroExt8to64  y)))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SRAV)
-		v0 := b.NewValue0(v.Line, OpSignExt8to64, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64OR, t)
-		v2 := b.NewValue0(v.Line, OpMIPS64NEGV, t)
-		v3 := b.NewValue0(v.Line, OpMIPS64SGTU, config.fe.TypeBool())
-		v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v5.AuxInt = 63
-		v3.AddArg(v5)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v6 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v6.AddArg(y)
-		v1.AddArg(v6)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpSelect0(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Select0 (DIVVU _ (MOVVconst [1])))
-	// cond:
-	// result: (MOVVconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64DIVVU {
-			break
-		}
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpMIPS64MOVVconst {
-			break
-		}
-		if v_0_1.AuxInt != 1 {
-			break
-		}
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Select0 (DIVVU x (MOVVconst [c])))
-	// cond: isPowerOfTwo(c)
-	// result: (ANDconst [c-1] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64DIVVU {
-			break
-		}
-		x := v_0.Args[0]
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_0_1.AuxInt
-		if !(isPowerOfTwo(c)) {
-			break
-		}
-		v.reset(OpMIPS64ANDconst)
-		v.AuxInt = c - 1
-		v.AddArg(x)
-		return true
-	}
-	// match: (Select0 (DIVV  (MOVVconst [c]) (MOVVconst [d])))
-	// cond:
-	// result: (MOVVconst [int64(c)%int64(d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64DIVV {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_0_0.AuxInt
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpMIPS64MOVVconst {
-			break
-		}
-		d := v_0_1.AuxInt
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = int64(c) % int64(d)
-		return true
-	}
-	// match: (Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d])))
-	// cond:
-	// result: (MOVVconst [int64(uint64(c)%uint64(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64DIVVU {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_0_0.AuxInt
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpMIPS64MOVVconst {
-			break
-		}
-		d := v_0_1.AuxInt
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = int64(uint64(c) % uint64(d))
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpSelect1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Select1 (MULVU x (MOVVconst [-1])))
-	// cond:
-	// result: (NEGV x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MULVU {
-			break
-		}
-		x := v_0.Args[0]
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpMIPS64MOVVconst {
-			break
-		}
-		if v_0_1.AuxInt != -1 {
-			break
-		}
-		v.reset(OpMIPS64NEGV)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Select1 (MULVU _ (MOVVconst [0])))
-	// cond:
-	// result: (MOVVconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MULVU {
-			break
-		}
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpMIPS64MOVVconst {
-			break
-		}
-		if v_0_1.AuxInt != 0 {
-			break
-		}
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Select1 (MULVU x (MOVVconst [1])))
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MULVU {
-			break
-		}
-		x := v_0.Args[0]
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpMIPS64MOVVconst {
-			break
-		}
-		if v_0_1.AuxInt != 1 {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Select1 (MULVU x (MOVVconst [c])))
-	// cond: isPowerOfTwo(c)
-	// result: (SLLVconst [log2(c)] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MULVU {
-			break
-		}
-		x := v_0.Args[0]
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_0_1.AuxInt
-		if !(isPowerOfTwo(c)) {
-			break
-		}
-		v.reset(OpMIPS64SLLVconst)
-		v.AuxInt = log2(c)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Select1 (MULVU (MOVVconst [-1]) x))
-	// cond:
-	// result: (NEGV x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MULVU {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		if v_0_0.AuxInt != -1 {
-			break
-		}
-		x := v_0.Args[1]
-		v.reset(OpMIPS64NEGV)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Select1 (MULVU (MOVVconst [0]) _))
-	// cond:
-	// result: (MOVVconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MULVU {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		if v_0_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Select1 (MULVU (MOVVconst [1]) x))
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MULVU {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		if v_0_0.AuxInt != 1 {
-			break
-		}
-		x := v_0.Args[1]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Select1 (MULVU (MOVVconst [c]) x))
-	// cond: isPowerOfTwo(c)
-	// result: (SLLVconst [log2(c)] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MULVU {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_0_0.AuxInt
-		x := v_0.Args[1]
-		if !(isPowerOfTwo(c)) {
-			break
-		}
-		v.reset(OpMIPS64SLLVconst)
-		v.AuxInt = log2(c)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Select1 (DIVVU x (MOVVconst [1])))
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64DIVVU {
-			break
-		}
-		x := v_0.Args[0]
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpMIPS64MOVVconst {
-			break
-		}
-		if v_0_1.AuxInt != 1 {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Select1 (DIVVU x (MOVVconst [c])))
-	// cond: isPowerOfTwo(c)
-	// result: (SRLVconst [log2(c)] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64DIVVU {
-			break
-		}
-		x := v_0.Args[0]
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_0_1.AuxInt
-		if !(isPowerOfTwo(c)) {
-			break
-		}
-		v.reset(OpMIPS64SRLVconst)
-		v.AuxInt = log2(c)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Select1 (MULVU (MOVVconst [c]) (MOVVconst [d])))
-	// cond:
-	// result: (MOVVconst [c*d])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64MULVU {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_0_0.AuxInt
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpMIPS64MOVVconst {
-			break
-		}
-		d := v_0_1.AuxInt
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = c * d
-		return true
-	}
-	// match: (Select1 (DIVV  (MOVVconst [c]) (MOVVconst [d])))
-	// cond:
-	// result: (MOVVconst [int64(c)/int64(d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64DIVV {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_0_0.AuxInt
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpMIPS64MOVVconst {
-			break
-		}
-		d := v_0_1.AuxInt
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = int64(c) / int64(d)
-		return true
-	}
-	// match: (Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d])))
-	// cond:
-	// result: (MOVVconst [int64(uint64(c)/uint64(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMIPS64DIVVU {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpMIPS64MOVVconst {
-			break
-		}
-		c := v_0_0.AuxInt
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpMIPS64MOVVconst {
-			break
-		}
-		d := v_0_1.AuxInt
-		v.reset(OpMIPS64MOVVconst)
-		v.AuxInt = int64(uint64(c) / uint64(d))
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpSignExt16to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt16to32 x)
-	// cond:
-	// result: (MOVHreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64MOVHreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpSignExt16to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt16to64 x)
-	// cond:
-	// result: (MOVHreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64MOVHreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpSignExt32to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt32to64 x)
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64MOVWreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpSignExt8to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt8to16 x)
-	// cond:
-	// result: (MOVBreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64MOVBreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpSignExt8to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt8to32 x)
-	// cond:
-	// result: (MOVBreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64MOVBreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpSignExt8to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt8to64 x)
-	// cond:
-	// result: (MOVBreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64MOVBreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpSlicemask(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Slicemask <t> x)
-	// cond:
-	// result: (NORconst [0] (SRAVconst <t> (SUBVconst <t> x [1]) [63]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v.reset(OpMIPS64NORconst)
-		v.AuxInt = 0
-		v0 := b.NewValue0(v.Line, OpMIPS64SRAVconst, t)
-		v0.AuxInt = 63
-		v1 := b.NewValue0(v.Line, OpMIPS64SUBVconst, t)
-		v1.AuxInt = 1
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpStaticCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (StaticCall [argwid] {target} mem)
-	// cond:
-	// result: (CALLstatic [argwid] {target} mem)
-	for {
-		argwid := v.AuxInt
-		target := v.Aux
-		mem := v.Args[0]
-		v.reset(OpMIPS64CALLstatic)
-		v.AuxInt = argwid
-		v.Aux = target
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpStore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Store [1] ptr val mem)
-	// cond:
-	// result: (MOVBstore ptr val mem)
-	for {
-		if v.AuxInt != 1 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpMIPS64MOVBstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [2] ptr val mem)
-	// cond:
-	// result: (MOVHstore ptr val mem)
-	for {
-		if v.AuxInt != 2 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpMIPS64MOVHstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [4] ptr val mem)
-	// cond: !is32BitFloat(val.Type)
-	// result: (MOVWstore ptr val mem)
-	for {
-		if v.AuxInt != 4 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(!is32BitFloat(val.Type)) {
-			break
-		}
-		v.reset(OpMIPS64MOVWstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [8] ptr val mem)
-	// cond: !is64BitFloat(val.Type)
-	// result: (MOVVstore ptr val mem)
-	for {
-		if v.AuxInt != 8 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(!is64BitFloat(val.Type)) {
-			break
-		}
-		v.reset(OpMIPS64MOVVstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [4] ptr val mem)
-	// cond: is32BitFloat(val.Type)
-	// result: (MOVFstore ptr val mem)
-	for {
-		if v.AuxInt != 4 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32BitFloat(val.Type)) {
-			break
-		}
-		v.reset(OpMIPS64MOVFstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [8] ptr val mem)
-	// cond: is64BitFloat(val.Type)
-	// result: (MOVDstore ptr val mem)
-	for {
-		if v.AuxInt != 8 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is64BitFloat(val.Type)) {
-			break
-		}
-		v.reset(OpMIPS64MOVDstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpSub16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub16 x y)
-	// cond:
-	// result: (SUBV x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SUBV)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpSub32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub32 x y)
-	// cond:
-	// result: (SUBV x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SUBV)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpSub32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub32F x y)
-	// cond:
-	// result: (SUBF x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SUBF)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpSub64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub64 x y)
-	// cond:
-	// result: (SUBV x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SUBV)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpSub64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub64F x y)
-	// cond:
-	// result: (SUBD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SUBD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpSub8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub8 x y)
-	// cond:
-	// result: (SUBV x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SUBV)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpSubPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SubPtr x y)
-	// cond:
-	// result: (SUBV x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64SUBV)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpTrunc16to8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc16to8 x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpTrunc32to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc32to16 x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpTrunc32to8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc32to8 x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpTrunc64to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc64to16 x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpTrunc64to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc64to32 x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpTrunc64to8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc64to8 x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpXor16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor16 x y)
-	// cond:
-	// result: (XOR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64XOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpXor32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor32 x y)
-	// cond:
-	// result: (XOR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64XOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpXor64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor64 x y)
-	// cond:
-	// result: (XOR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64XOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpXor8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor8 x y)
-	// cond:
-	// result: (XOR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMIPS64XOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpZero(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Zero [s] _ mem)
-	// cond: SizeAndAlign(s).Size() == 0
-	// result: mem
-	for {
-		s := v.AuxInt
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 0) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = mem.Type
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 1
-	// result: (MOVBstore ptr (MOVVconst [0]) mem)
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 1) {
-			break
-		}
-		v.reset(OpMIPS64MOVBstore)
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
-	// result: (MOVHstore ptr (MOVVconst [0]) mem)
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
-			break
-		}
-		v.reset(OpMIPS64MOVHstore)
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 2
-	// result: (MOVBstore [1] ptr (MOVVconst [0]) 		(MOVBstore [0] ptr (MOVVconst [0]) mem))
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 2) {
-			break
-		}
-		v.reset(OpMIPS64MOVBstore)
-		v.AuxInt = 1
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64MOVBstore, TypeMem)
-		v1.AuxInt = 0
-		v1.AddArg(ptr)
-		v2 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v2.AuxInt = 0
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
-	// result: (MOVWstore ptr (MOVVconst [0]) mem)
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
-			break
-		}
-		v.reset(OpMIPS64MOVWstore)
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
-	// result: (MOVHstore [2] ptr (MOVVconst [0]) 		(MOVHstore [0] ptr (MOVVconst [0]) mem))
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
-			break
-		}
-		v.reset(OpMIPS64MOVHstore)
-		v.AuxInt = 2
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64MOVHstore, TypeMem)
-		v1.AuxInt = 0
-		v1.AddArg(ptr)
-		v2 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v2.AuxInt = 0
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 4
-	// result: (MOVBstore [3] ptr (MOVVconst [0]) 		(MOVBstore [2] ptr (MOVVconst [0]) 			(MOVBstore [1] ptr (MOVVconst [0]) 				(MOVBstore [0] ptr (MOVVconst [0]) mem))))
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 4) {
-			break
-		}
-		v.reset(OpMIPS64MOVBstore)
-		v.AuxInt = 3
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64MOVBstore, TypeMem)
-		v1.AuxInt = 2
-		v1.AddArg(ptr)
-		v2 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v2.AuxInt = 0
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPS64MOVBstore, TypeMem)
-		v3.AuxInt = 1
-		v3.AddArg(ptr)
-		v4 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v4.AuxInt = 0
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpMIPS64MOVBstore, TypeMem)
-		v5.AuxInt = 0
-		v5.AddArg(ptr)
-		v6 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v6.AuxInt = 0
-		v5.AddArg(v6)
-		v5.AddArg(mem)
-		v3.AddArg(v5)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0
-	// result: (MOVVstore ptr (MOVVconst [0]) mem)
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0) {
-			break
-		}
-		v.reset(OpMIPS64MOVVstore)
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0
-	// result: (MOVWstore [4] ptr (MOVVconst [0]) 		(MOVWstore [0] ptr (MOVVconst [0]) mem))
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0) {
-			break
-		}
-		v.reset(OpMIPS64MOVWstore)
-		v.AuxInt = 4
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64MOVWstore, TypeMem)
-		v1.AuxInt = 0
-		v1.AddArg(ptr)
-		v2 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v2.AuxInt = 0
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 4
-	// result: (MOVHstore [6] ptr (MOVVconst [0]) 		(MOVHstore [4] ptr (MOVVconst [0]) 			(MOVHstore [2] ptr (MOVVconst [0]) 				(MOVHstore [0] ptr (MOVVconst [0]) mem))))
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 4) {
-			break
-		}
-		v.reset(OpMIPS64MOVHstore)
-		v.AuxInt = 6
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64MOVHstore, TypeMem)
-		v1.AuxInt = 4
-		v1.AddArg(ptr)
-		v2 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v2.AuxInt = 0
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPS64MOVHstore, TypeMem)
-		v3.AuxInt = 2
-		v3.AddArg(ptr)
-		v4 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v4.AuxInt = 0
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpMIPS64MOVHstore, TypeMem)
-		v5.AuxInt = 0
-		v5.AddArg(ptr)
-		v6 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v6.AuxInt = 0
-		v5.AddArg(v6)
-		v5.AddArg(mem)
-		v3.AddArg(v5)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 3
-	// result: (MOVBstore [2] ptr (MOVVconst [0]) 		(MOVBstore [1] ptr (MOVVconst [0]) 			(MOVBstore [0] ptr (MOVVconst [0]) mem)))
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 3) {
-			break
-		}
-		v.reset(OpMIPS64MOVBstore)
-		v.AuxInt = 2
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64MOVBstore, TypeMem)
-		v1.AuxInt = 1
-		v1.AddArg(ptr)
-		v2 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v2.AuxInt = 0
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPS64MOVBstore, TypeMem)
-		v3.AuxInt = 0
-		v3.AddArg(ptr)
-		v4 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v4.AuxInt = 0
-		v3.AddArg(v4)
-		v3.AddArg(mem)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 6 && SizeAndAlign(s).Align()%2 == 0
-	// result: (MOVHstore [4] ptr (MOVVconst [0]) 		(MOVHstore [2] ptr (MOVVconst [0]) 			(MOVHstore [0] ptr (MOVVconst [0]) mem)))
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 6 && SizeAndAlign(s).Align()%2 == 0) {
-			break
-		}
-		v.reset(OpMIPS64MOVHstore)
-		v.AuxInt = 4
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64MOVHstore, TypeMem)
-		v1.AuxInt = 2
-		v1.AddArg(ptr)
-		v2 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v2.AuxInt = 0
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPS64MOVHstore, TypeMem)
-		v3.AuxInt = 0
-		v3.AddArg(ptr)
-		v4 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v4.AuxInt = 0
-		v3.AddArg(v4)
-		v3.AddArg(mem)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 12 && SizeAndAlign(s).Align()%4 == 0
-	// result: (MOVWstore [8] ptr (MOVVconst [0]) 		(MOVWstore [4] ptr (MOVVconst [0]) 			(MOVWstore [0] ptr (MOVVconst [0]) mem)))
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 12 && SizeAndAlign(s).Align()%4 == 0) {
-			break
-		}
-		v.reset(OpMIPS64MOVWstore)
-		v.AuxInt = 8
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64MOVWstore, TypeMem)
-		v1.AuxInt = 4
-		v1.AddArg(ptr)
-		v2 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v2.AuxInt = 0
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPS64MOVWstore, TypeMem)
-		v3.AuxInt = 0
-		v3.AddArg(ptr)
-		v4 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v4.AuxInt = 0
-		v3.AddArg(v4)
-		v3.AddArg(mem)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0
-	// result: (MOVVstore [8] ptr (MOVVconst [0]) 		(MOVVstore [0] ptr (MOVVconst [0]) mem))
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0) {
-			break
-		}
-		v.reset(OpMIPS64MOVVstore)
-		v.AuxInt = 8
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64MOVVstore, TypeMem)
-		v1.AuxInt = 0
-		v1.AddArg(ptr)
-		v2 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v2.AuxInt = 0
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0
-	// result: (MOVVstore [16] ptr (MOVVconst [0]) 		(MOVVstore [8] ptr (MOVVconst [0]) 			(MOVVstore [0] ptr (MOVVconst [0]) mem)))
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0) {
-			break
-		}
-		v.reset(OpMIPS64MOVVstore)
-		v.AuxInt = 16
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMIPS64MOVVstore, TypeMem)
-		v1.AuxInt = 8
-		v1.AddArg(ptr)
-		v2 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v2.AuxInt = 0
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpMIPS64MOVVstore, TypeMem)
-		v3.AuxInt = 0
-		v3.AddArg(ptr)
-		v4 := b.NewValue0(v.Line, OpMIPS64MOVVconst, config.fe.TypeUInt64())
-		v4.AuxInt = 0
-		v3.AddArg(v4)
-		v3.AddArg(mem)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size() > 24 && SizeAndAlign(s).Size() <= 8*128 	&& SizeAndAlign(s).Align()%8 == 0 && !config.noDuffDevice
-	// result: (DUFFZERO [8 * (128 - int64(SizeAndAlign(s).Size()/8))] ptr mem)
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size()%8 == 0 && SizeAndAlign(s).Size() > 24 && SizeAndAlign(s).Size() <= 8*128 && SizeAndAlign(s).Align()%8 == 0 && !config.noDuffDevice) {
-			break
-		}
-		v.reset(OpMIPS64DUFFZERO)
-		v.AuxInt = 8 * (128 - int64(SizeAndAlign(s).Size()/8))
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: (SizeAndAlign(s).Size() > 8*128 || config.noDuffDevice) || SizeAndAlign(s).Align()%8 != 0
-	// result: (LoweredZero [SizeAndAlign(s).Align()] 		ptr 		(ADDVconst <ptr.Type> ptr [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)]) 		mem)
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !((SizeAndAlign(s).Size() > 8*128 || config.noDuffDevice) || SizeAndAlign(s).Align()%8 != 0) {
-			break
-		}
-		v.reset(OpMIPS64LoweredZero)
-		v.AuxInt = SizeAndAlign(s).Align()
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMIPS64ADDVconst, ptr.Type)
-		v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
-		v0.AddArg(ptr)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueMIPS64_OpZeroExt16to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt16to32 x)
-	// cond:
-	// result: (MOVHUreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64MOVHUreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpZeroExt16to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt16to64 x)
-	// cond:
-	// result: (MOVHUreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64MOVHUreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpZeroExt32to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt32to64 x)
-	// cond:
-	// result: (MOVWUreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64MOVWUreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpZeroExt8to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt8to16 x)
-	// cond:
-	// result: (MOVBUreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64MOVBUreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpZeroExt8to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt8to32 x)
-	// cond:
-	// result: (MOVBUreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64MOVBUreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueMIPS64_OpZeroExt8to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt8to64 x)
-	// cond:
-	// result: (MOVBUreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpMIPS64MOVBUreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteBlockMIPS64(b *Block, config *Config) bool {
-	switch b.Kind {
-	case BlockMIPS64EQ:
-		// match: (EQ (FPFlagTrue cmp) yes no)
-		// cond:
-		// result: (FPF cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64FPFlagTrue {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPS64FPF
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (FPFlagFalse cmp) yes no)
-		// cond:
-		// result: (FPT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64FPFlagFalse {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPS64FPT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (XORconst [1] cmp:(SGT _ _)) yes no)
-		// cond:
-		// result: (NE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64XORconst {
-				break
-			}
-			if v.AuxInt != 1 {
-				break
-			}
-			cmp := v.Args[0]
-			if cmp.Op != OpMIPS64SGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPS64NE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (XORconst [1] cmp:(SGTU _ _)) yes no)
-		// cond:
-		// result: (NE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64XORconst {
-				break
-			}
-			if v.AuxInt != 1 {
-				break
-			}
-			cmp := v.Args[0]
-			if cmp.Op != OpMIPS64SGTU {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPS64NE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (XORconst [1] cmp:(SGTconst _)) yes no)
-		// cond:
-		// result: (NE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64XORconst {
-				break
-			}
-			if v.AuxInt != 1 {
-				break
-			}
-			cmp := v.Args[0]
-			if cmp.Op != OpMIPS64SGTconst {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPS64NE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (XORconst [1] cmp:(SGTUconst _)) yes no)
-		// cond:
-		// result: (NE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64XORconst {
-				break
-			}
-			if v.AuxInt != 1 {
-				break
-			}
-			cmp := v.Args[0]
-			if cmp.Op != OpMIPS64SGTUconst {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPS64NE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (SGTUconst [1] x) yes no)
-		// cond:
-		// result: (NE x yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64SGTUconst {
-				break
-			}
-			if v.AuxInt != 1 {
-				break
-			}
-			x := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPS64NE
-			b.SetControl(x)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (SGTU x (MOVVconst [0])) yes no)
-		// cond:
-		// result: (EQ x yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64SGTU {
-				break
-			}
-			x := v.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != OpMIPS64MOVVconst {
-				break
-			}
-			if v_1.AuxInt != 0 {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPS64EQ
-			b.SetControl(x)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (SGTconst [0] x) yes no)
-		// cond:
-		// result: (GEZ x yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64SGTconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			x := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPS64GEZ
-			b.SetControl(x)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (SGT x (MOVVconst [0])) yes no)
-		// cond:
-		// result: (LEZ x yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64SGT {
-				break
-			}
-			x := v.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != OpMIPS64MOVVconst {
-				break
-			}
-			if v_1.AuxInt != 0 {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPS64LEZ
-			b.SetControl(x)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ  (MOVVconst [0]) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64MOVVconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ  (MOVVconst [c]) yes no)
-		// cond: c != 0
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64MOVVconst {
-				break
-			}
-			c := v.AuxInt
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			if !(c != 0) {
-				break
-			}
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-	case BlockMIPS64GEZ:
-		// match: (GEZ (MOVVconst [c]) yes no)
-		// cond: c >= 0
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64MOVVconst {
-				break
-			}
-			c := v.AuxInt
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			if !(c >= 0) {
-				break
-			}
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (GEZ (MOVVconst [c]) yes no)
-		// cond: c <  0
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64MOVVconst {
-				break
-			}
-			c := v.AuxInt
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			if !(c < 0) {
-				break
-			}
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-	case BlockMIPS64GTZ:
-		// match: (GTZ (MOVVconst [c]) yes no)
-		// cond: c >  0
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64MOVVconst {
-				break
-			}
-			c := v.AuxInt
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			if !(c > 0) {
-				break
-			}
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (GTZ (MOVVconst [c]) yes no)
-		// cond: c <= 0
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64MOVVconst {
-				break
-			}
-			c := v.AuxInt
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			if !(c <= 0) {
-				break
-			}
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-	case BlockIf:
-		// match: (If cond yes no)
-		// cond:
-		// result: (NE cond yes no)
-		for {
-			v := b.Control
-			_ = v
-			cond := b.Control
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPS64NE
-			b.SetControl(cond)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockMIPS64LEZ:
-		// match: (LEZ (MOVVconst [c]) yes no)
-		// cond: c <= 0
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64MOVVconst {
-				break
-			}
-			c := v.AuxInt
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			if !(c <= 0) {
-				break
-			}
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LEZ (MOVVconst [c]) yes no)
-		// cond: c >  0
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64MOVVconst {
-				break
-			}
-			c := v.AuxInt
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			if !(c > 0) {
-				break
-			}
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-	case BlockMIPS64LTZ:
-		// match: (LTZ (MOVVconst [c]) yes no)
-		// cond: c <  0
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64MOVVconst {
-				break
-			}
-			c := v.AuxInt
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			if !(c < 0) {
-				break
-			}
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LTZ (MOVVconst [c]) yes no)
-		// cond: c >= 0
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64MOVVconst {
-				break
-			}
-			c := v.AuxInt
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			if !(c >= 0) {
-				break
-			}
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-	case BlockMIPS64NE:
-		// match: (NE (FPFlagTrue cmp) yes no)
-		// cond:
-		// result: (FPT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64FPFlagTrue {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPS64FPT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (FPFlagFalse cmp) yes no)
-		// cond:
-		// result: (FPF cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64FPFlagFalse {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPS64FPF
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (XORconst [1] cmp:(SGT _ _)) yes no)
-		// cond:
-		// result: (EQ cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64XORconst {
-				break
-			}
-			if v.AuxInt != 1 {
-				break
-			}
-			cmp := v.Args[0]
-			if cmp.Op != OpMIPS64SGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPS64EQ
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (XORconst [1] cmp:(SGTU _ _)) yes no)
-		// cond:
-		// result: (EQ cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64XORconst {
-				break
-			}
-			if v.AuxInt != 1 {
-				break
-			}
-			cmp := v.Args[0]
-			if cmp.Op != OpMIPS64SGTU {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPS64EQ
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (XORconst [1] cmp:(SGTconst _)) yes no)
-		// cond:
-		// result: (EQ cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64XORconst {
-				break
-			}
-			if v.AuxInt != 1 {
-				break
-			}
-			cmp := v.Args[0]
-			if cmp.Op != OpMIPS64SGTconst {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPS64EQ
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (XORconst [1] cmp:(SGTUconst _)) yes no)
-		// cond:
-		// result: (EQ cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64XORconst {
-				break
-			}
-			if v.AuxInt != 1 {
-				break
-			}
-			cmp := v.Args[0]
-			if cmp.Op != OpMIPS64SGTUconst {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPS64EQ
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (SGTUconst [1] x) yes no)
-		// cond:
-		// result: (EQ x yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64SGTUconst {
-				break
-			}
-			if v.AuxInt != 1 {
-				break
-			}
-			x := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPS64EQ
-			b.SetControl(x)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (SGTU x (MOVVconst [0])) yes no)
-		// cond:
-		// result: (NE x yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64SGTU {
-				break
-			}
-			x := v.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != OpMIPS64MOVVconst {
-				break
-			}
-			if v_1.AuxInt != 0 {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPS64NE
-			b.SetControl(x)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (SGTconst [0] x) yes no)
-		// cond:
-		// result: (LTZ x yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64SGTconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			x := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPS64LTZ
-			b.SetControl(x)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (SGT x (MOVVconst [0])) yes no)
-		// cond:
-		// result: (GTZ x yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64SGT {
-				break
-			}
-			x := v.Args[0]
-			v_1 := v.Args[1]
-			if v_1.Op != OpMIPS64MOVVconst {
-				break
-			}
-			if v_1.AuxInt != 0 {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockMIPS64GTZ
-			b.SetControl(x)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE  (MOVVconst [0]) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64MOVVconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (NE  (MOVVconst [c]) yes no)
-		// cond: c != 0
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpMIPS64MOVVconst {
-				break
-			}
-			c := v.AuxInt
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			if !(c != 0) {
-				break
-			}
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-	}
-	return false
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewritePPC64.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewritePPC64.go
deleted file mode 100644
index 4ee7c9f..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewritePPC64.go
+++ /dev/null
@@ -1,10851 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/rewritePPC64.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/rewritePPC64.go:1
-// autogenerated from gen/PPC64.rules: do not edit!
-// generated with: cd gen; go run *.go
-
-package ssa
-
-import "math"
-
-var _ = math.MinInt8 // in case not otherwise used
-func rewriteValuePPC64(v *Value, config *Config) bool {
-	switch v.Op {
-	case OpAdd16:
-		return rewriteValuePPC64_OpAdd16(v, config)
-	case OpAdd32:
-		return rewriteValuePPC64_OpAdd32(v, config)
-	case OpAdd32F:
-		return rewriteValuePPC64_OpAdd32F(v, config)
-	case OpAdd64:
-		return rewriteValuePPC64_OpAdd64(v, config)
-	case OpAdd64F:
-		return rewriteValuePPC64_OpAdd64F(v, config)
-	case OpAdd8:
-		return rewriteValuePPC64_OpAdd8(v, config)
-	case OpAddPtr:
-		return rewriteValuePPC64_OpAddPtr(v, config)
-	case OpAddr:
-		return rewriteValuePPC64_OpAddr(v, config)
-	case OpAnd16:
-		return rewriteValuePPC64_OpAnd16(v, config)
-	case OpAnd32:
-		return rewriteValuePPC64_OpAnd32(v, config)
-	case OpAnd64:
-		return rewriteValuePPC64_OpAnd64(v, config)
-	case OpAnd8:
-		return rewriteValuePPC64_OpAnd8(v, config)
-	case OpAndB:
-		return rewriteValuePPC64_OpAndB(v, config)
-	case OpAvg64u:
-		return rewriteValuePPC64_OpAvg64u(v, config)
-	case OpClosureCall:
-		return rewriteValuePPC64_OpClosureCall(v, config)
-	case OpCom16:
-		return rewriteValuePPC64_OpCom16(v, config)
-	case OpCom32:
-		return rewriteValuePPC64_OpCom32(v, config)
-	case OpCom64:
-		return rewriteValuePPC64_OpCom64(v, config)
-	case OpCom8:
-		return rewriteValuePPC64_OpCom8(v, config)
-	case OpConst16:
-		return rewriteValuePPC64_OpConst16(v, config)
-	case OpConst32:
-		return rewriteValuePPC64_OpConst32(v, config)
-	case OpConst32F:
-		return rewriteValuePPC64_OpConst32F(v, config)
-	case OpConst64:
-		return rewriteValuePPC64_OpConst64(v, config)
-	case OpConst64F:
-		return rewriteValuePPC64_OpConst64F(v, config)
-	case OpConst8:
-		return rewriteValuePPC64_OpConst8(v, config)
-	case OpConstBool:
-		return rewriteValuePPC64_OpConstBool(v, config)
-	case OpConstNil:
-		return rewriteValuePPC64_OpConstNil(v, config)
-	case OpConvert:
-		return rewriteValuePPC64_OpConvert(v, config)
-	case OpCvt32Fto32:
-		return rewriteValuePPC64_OpCvt32Fto32(v, config)
-	case OpCvt32Fto64:
-		return rewriteValuePPC64_OpCvt32Fto64(v, config)
-	case OpCvt32Fto64F:
-		return rewriteValuePPC64_OpCvt32Fto64F(v, config)
-	case OpCvt32to32F:
-		return rewriteValuePPC64_OpCvt32to32F(v, config)
-	case OpCvt32to64F:
-		return rewriteValuePPC64_OpCvt32to64F(v, config)
-	case OpCvt64Fto32:
-		return rewriteValuePPC64_OpCvt64Fto32(v, config)
-	case OpCvt64Fto32F:
-		return rewriteValuePPC64_OpCvt64Fto32F(v, config)
-	case OpCvt64Fto64:
-		return rewriteValuePPC64_OpCvt64Fto64(v, config)
-	case OpCvt64to32F:
-		return rewriteValuePPC64_OpCvt64to32F(v, config)
-	case OpCvt64to64F:
-		return rewriteValuePPC64_OpCvt64to64F(v, config)
-	case OpDeferCall:
-		return rewriteValuePPC64_OpDeferCall(v, config)
-	case OpDiv16:
-		return rewriteValuePPC64_OpDiv16(v, config)
-	case OpDiv16u:
-		return rewriteValuePPC64_OpDiv16u(v, config)
-	case OpDiv32:
-		return rewriteValuePPC64_OpDiv32(v, config)
-	case OpDiv32F:
-		return rewriteValuePPC64_OpDiv32F(v, config)
-	case OpDiv32u:
-		return rewriteValuePPC64_OpDiv32u(v, config)
-	case OpDiv64:
-		return rewriteValuePPC64_OpDiv64(v, config)
-	case OpDiv64F:
-		return rewriteValuePPC64_OpDiv64F(v, config)
-	case OpDiv64u:
-		return rewriteValuePPC64_OpDiv64u(v, config)
-	case OpDiv8:
-		return rewriteValuePPC64_OpDiv8(v, config)
-	case OpDiv8u:
-		return rewriteValuePPC64_OpDiv8u(v, config)
-	case OpEq16:
-		return rewriteValuePPC64_OpEq16(v, config)
-	case OpEq32:
-		return rewriteValuePPC64_OpEq32(v, config)
-	case OpEq32F:
-		return rewriteValuePPC64_OpEq32F(v, config)
-	case OpEq64:
-		return rewriteValuePPC64_OpEq64(v, config)
-	case OpEq64F:
-		return rewriteValuePPC64_OpEq64F(v, config)
-	case OpEq8:
-		return rewriteValuePPC64_OpEq8(v, config)
-	case OpEqB:
-		return rewriteValuePPC64_OpEqB(v, config)
-	case OpEqPtr:
-		return rewriteValuePPC64_OpEqPtr(v, config)
-	case OpGeq16:
-		return rewriteValuePPC64_OpGeq16(v, config)
-	case OpGeq16U:
-		return rewriteValuePPC64_OpGeq16U(v, config)
-	case OpGeq32:
-		return rewriteValuePPC64_OpGeq32(v, config)
-	case OpGeq32F:
-		return rewriteValuePPC64_OpGeq32F(v, config)
-	case OpGeq32U:
-		return rewriteValuePPC64_OpGeq32U(v, config)
-	case OpGeq64:
-		return rewriteValuePPC64_OpGeq64(v, config)
-	case OpGeq64F:
-		return rewriteValuePPC64_OpGeq64F(v, config)
-	case OpGeq64U:
-		return rewriteValuePPC64_OpGeq64U(v, config)
-	case OpGeq8:
-		return rewriteValuePPC64_OpGeq8(v, config)
-	case OpGeq8U:
-		return rewriteValuePPC64_OpGeq8U(v, config)
-	case OpGetClosurePtr:
-		return rewriteValuePPC64_OpGetClosurePtr(v, config)
-	case OpGoCall:
-		return rewriteValuePPC64_OpGoCall(v, config)
-	case OpGreater16:
-		return rewriteValuePPC64_OpGreater16(v, config)
-	case OpGreater16U:
-		return rewriteValuePPC64_OpGreater16U(v, config)
-	case OpGreater32:
-		return rewriteValuePPC64_OpGreater32(v, config)
-	case OpGreater32F:
-		return rewriteValuePPC64_OpGreater32F(v, config)
-	case OpGreater32U:
-		return rewriteValuePPC64_OpGreater32U(v, config)
-	case OpGreater64:
-		return rewriteValuePPC64_OpGreater64(v, config)
-	case OpGreater64F:
-		return rewriteValuePPC64_OpGreater64F(v, config)
-	case OpGreater64U:
-		return rewriteValuePPC64_OpGreater64U(v, config)
-	case OpGreater8:
-		return rewriteValuePPC64_OpGreater8(v, config)
-	case OpGreater8U:
-		return rewriteValuePPC64_OpGreater8U(v, config)
-	case OpHmul16:
-		return rewriteValuePPC64_OpHmul16(v, config)
-	case OpHmul16u:
-		return rewriteValuePPC64_OpHmul16u(v, config)
-	case OpHmul32:
-		return rewriteValuePPC64_OpHmul32(v, config)
-	case OpHmul32u:
-		return rewriteValuePPC64_OpHmul32u(v, config)
-	case OpHmul64:
-		return rewriteValuePPC64_OpHmul64(v, config)
-	case OpHmul64u:
-		return rewriteValuePPC64_OpHmul64u(v, config)
-	case OpHmul8:
-		return rewriteValuePPC64_OpHmul8(v, config)
-	case OpHmul8u:
-		return rewriteValuePPC64_OpHmul8u(v, config)
-	case OpInterCall:
-		return rewriteValuePPC64_OpInterCall(v, config)
-	case OpIsInBounds:
-		return rewriteValuePPC64_OpIsInBounds(v, config)
-	case OpIsNonNil:
-		return rewriteValuePPC64_OpIsNonNil(v, config)
-	case OpIsSliceInBounds:
-		return rewriteValuePPC64_OpIsSliceInBounds(v, config)
-	case OpLeq16:
-		return rewriteValuePPC64_OpLeq16(v, config)
-	case OpLeq16U:
-		return rewriteValuePPC64_OpLeq16U(v, config)
-	case OpLeq32:
-		return rewriteValuePPC64_OpLeq32(v, config)
-	case OpLeq32F:
-		return rewriteValuePPC64_OpLeq32F(v, config)
-	case OpLeq32U:
-		return rewriteValuePPC64_OpLeq32U(v, config)
-	case OpLeq64:
-		return rewriteValuePPC64_OpLeq64(v, config)
-	case OpLeq64F:
-		return rewriteValuePPC64_OpLeq64F(v, config)
-	case OpLeq64U:
-		return rewriteValuePPC64_OpLeq64U(v, config)
-	case OpLeq8:
-		return rewriteValuePPC64_OpLeq8(v, config)
-	case OpLeq8U:
-		return rewriteValuePPC64_OpLeq8U(v, config)
-	case OpLess16:
-		return rewriteValuePPC64_OpLess16(v, config)
-	case OpLess16U:
-		return rewriteValuePPC64_OpLess16U(v, config)
-	case OpLess32:
-		return rewriteValuePPC64_OpLess32(v, config)
-	case OpLess32F:
-		return rewriteValuePPC64_OpLess32F(v, config)
-	case OpLess32U:
-		return rewriteValuePPC64_OpLess32U(v, config)
-	case OpLess64:
-		return rewriteValuePPC64_OpLess64(v, config)
-	case OpLess64F:
-		return rewriteValuePPC64_OpLess64F(v, config)
-	case OpLess64U:
-		return rewriteValuePPC64_OpLess64U(v, config)
-	case OpLess8:
-		return rewriteValuePPC64_OpLess8(v, config)
-	case OpLess8U:
-		return rewriteValuePPC64_OpLess8U(v, config)
-	case OpLoad:
-		return rewriteValuePPC64_OpLoad(v, config)
-	case OpLsh16x16:
-		return rewriteValuePPC64_OpLsh16x16(v, config)
-	case OpLsh16x32:
-		return rewriteValuePPC64_OpLsh16x32(v, config)
-	case OpLsh16x64:
-		return rewriteValuePPC64_OpLsh16x64(v, config)
-	case OpLsh16x8:
-		return rewriteValuePPC64_OpLsh16x8(v, config)
-	case OpLsh32x16:
-		return rewriteValuePPC64_OpLsh32x16(v, config)
-	case OpLsh32x32:
-		return rewriteValuePPC64_OpLsh32x32(v, config)
-	case OpLsh32x64:
-		return rewriteValuePPC64_OpLsh32x64(v, config)
-	case OpLsh32x8:
-		return rewriteValuePPC64_OpLsh32x8(v, config)
-	case OpLsh64x16:
-		return rewriteValuePPC64_OpLsh64x16(v, config)
-	case OpLsh64x32:
-		return rewriteValuePPC64_OpLsh64x32(v, config)
-	case OpLsh64x64:
-		return rewriteValuePPC64_OpLsh64x64(v, config)
-	case OpLsh64x8:
-		return rewriteValuePPC64_OpLsh64x8(v, config)
-	case OpLsh8x16:
-		return rewriteValuePPC64_OpLsh8x16(v, config)
-	case OpLsh8x32:
-		return rewriteValuePPC64_OpLsh8x32(v, config)
-	case OpLsh8x64:
-		return rewriteValuePPC64_OpLsh8x64(v, config)
-	case OpLsh8x8:
-		return rewriteValuePPC64_OpLsh8x8(v, config)
-	case OpMod16:
-		return rewriteValuePPC64_OpMod16(v, config)
-	case OpMod16u:
-		return rewriteValuePPC64_OpMod16u(v, config)
-	case OpMod32:
-		return rewriteValuePPC64_OpMod32(v, config)
-	case OpMod32u:
-		return rewriteValuePPC64_OpMod32u(v, config)
-	case OpMod64:
-		return rewriteValuePPC64_OpMod64(v, config)
-	case OpMod64u:
-		return rewriteValuePPC64_OpMod64u(v, config)
-	case OpMod8:
-		return rewriteValuePPC64_OpMod8(v, config)
-	case OpMod8u:
-		return rewriteValuePPC64_OpMod8u(v, config)
-	case OpMove:
-		return rewriteValuePPC64_OpMove(v, config)
-	case OpMul16:
-		return rewriteValuePPC64_OpMul16(v, config)
-	case OpMul32:
-		return rewriteValuePPC64_OpMul32(v, config)
-	case OpMul32F:
-		return rewriteValuePPC64_OpMul32F(v, config)
-	case OpMul64:
-		return rewriteValuePPC64_OpMul64(v, config)
-	case OpMul64F:
-		return rewriteValuePPC64_OpMul64F(v, config)
-	case OpMul8:
-		return rewriteValuePPC64_OpMul8(v, config)
-	case OpNeg16:
-		return rewriteValuePPC64_OpNeg16(v, config)
-	case OpNeg32:
-		return rewriteValuePPC64_OpNeg32(v, config)
-	case OpNeg32F:
-		return rewriteValuePPC64_OpNeg32F(v, config)
-	case OpNeg64:
-		return rewriteValuePPC64_OpNeg64(v, config)
-	case OpNeg64F:
-		return rewriteValuePPC64_OpNeg64F(v, config)
-	case OpNeg8:
-		return rewriteValuePPC64_OpNeg8(v, config)
-	case OpNeq16:
-		return rewriteValuePPC64_OpNeq16(v, config)
-	case OpNeq32:
-		return rewriteValuePPC64_OpNeq32(v, config)
-	case OpNeq32F:
-		return rewriteValuePPC64_OpNeq32F(v, config)
-	case OpNeq64:
-		return rewriteValuePPC64_OpNeq64(v, config)
-	case OpNeq64F:
-		return rewriteValuePPC64_OpNeq64F(v, config)
-	case OpNeq8:
-		return rewriteValuePPC64_OpNeq8(v, config)
-	case OpNeqB:
-		return rewriteValuePPC64_OpNeqB(v, config)
-	case OpNeqPtr:
-		return rewriteValuePPC64_OpNeqPtr(v, config)
-	case OpNilCheck:
-		return rewriteValuePPC64_OpNilCheck(v, config)
-	case OpNot:
-		return rewriteValuePPC64_OpNot(v, config)
-	case OpOffPtr:
-		return rewriteValuePPC64_OpOffPtr(v, config)
-	case OpOr16:
-		return rewriteValuePPC64_OpOr16(v, config)
-	case OpOr32:
-		return rewriteValuePPC64_OpOr32(v, config)
-	case OpOr64:
-		return rewriteValuePPC64_OpOr64(v, config)
-	case OpOr8:
-		return rewriteValuePPC64_OpOr8(v, config)
-	case OpOrB:
-		return rewriteValuePPC64_OpOrB(v, config)
-	case OpPPC64ADD:
-		return rewriteValuePPC64_OpPPC64ADD(v, config)
-	case OpPPC64ADDconst:
-		return rewriteValuePPC64_OpPPC64ADDconst(v, config)
-	case OpPPC64AND:
-		return rewriteValuePPC64_OpPPC64AND(v, config)
-	case OpPPC64ANDconst:
-		return rewriteValuePPC64_OpPPC64ANDconst(v, config)
-	case OpPPC64CMP:
-		return rewriteValuePPC64_OpPPC64CMP(v, config)
-	case OpPPC64CMPU:
-		return rewriteValuePPC64_OpPPC64CMPU(v, config)
-	case OpPPC64CMPUconst:
-		return rewriteValuePPC64_OpPPC64CMPUconst(v, config)
-	case OpPPC64CMPW:
-		return rewriteValuePPC64_OpPPC64CMPW(v, config)
-	case OpPPC64CMPWU:
-		return rewriteValuePPC64_OpPPC64CMPWU(v, config)
-	case OpPPC64CMPWUconst:
-		return rewriteValuePPC64_OpPPC64CMPWUconst(v, config)
-	case OpPPC64CMPWconst:
-		return rewriteValuePPC64_OpPPC64CMPWconst(v, config)
-	case OpPPC64CMPconst:
-		return rewriteValuePPC64_OpPPC64CMPconst(v, config)
-	case OpPPC64Equal:
-		return rewriteValuePPC64_OpPPC64Equal(v, config)
-	case OpPPC64FMOVDload:
-		return rewriteValuePPC64_OpPPC64FMOVDload(v, config)
-	case OpPPC64FMOVDstore:
-		return rewriteValuePPC64_OpPPC64FMOVDstore(v, config)
-	case OpPPC64FMOVSload:
-		return rewriteValuePPC64_OpPPC64FMOVSload(v, config)
-	case OpPPC64FMOVSstore:
-		return rewriteValuePPC64_OpPPC64FMOVSstore(v, config)
-	case OpPPC64GreaterEqual:
-		return rewriteValuePPC64_OpPPC64GreaterEqual(v, config)
-	case OpPPC64GreaterThan:
-		return rewriteValuePPC64_OpPPC64GreaterThan(v, config)
-	case OpPPC64LessEqual:
-		return rewriteValuePPC64_OpPPC64LessEqual(v, config)
-	case OpPPC64LessThan:
-		return rewriteValuePPC64_OpPPC64LessThan(v, config)
-	case OpPPC64MOVBZload:
-		return rewriteValuePPC64_OpPPC64MOVBZload(v, config)
-	case OpPPC64MOVBZreg:
-		return rewriteValuePPC64_OpPPC64MOVBZreg(v, config)
-	case OpPPC64MOVBreg:
-		return rewriteValuePPC64_OpPPC64MOVBreg(v, config)
-	case OpPPC64MOVBstore:
-		return rewriteValuePPC64_OpPPC64MOVBstore(v, config)
-	case OpPPC64MOVBstorezero:
-		return rewriteValuePPC64_OpPPC64MOVBstorezero(v, config)
-	case OpPPC64MOVDload:
-		return rewriteValuePPC64_OpPPC64MOVDload(v, config)
-	case OpPPC64MOVDstore:
-		return rewriteValuePPC64_OpPPC64MOVDstore(v, config)
-	case OpPPC64MOVDstorezero:
-		return rewriteValuePPC64_OpPPC64MOVDstorezero(v, config)
-	case OpPPC64MOVHZload:
-		return rewriteValuePPC64_OpPPC64MOVHZload(v, config)
-	case OpPPC64MOVHZreg:
-		return rewriteValuePPC64_OpPPC64MOVHZreg(v, config)
-	case OpPPC64MOVHload:
-		return rewriteValuePPC64_OpPPC64MOVHload(v, config)
-	case OpPPC64MOVHreg:
-		return rewriteValuePPC64_OpPPC64MOVHreg(v, config)
-	case OpPPC64MOVHstore:
-		return rewriteValuePPC64_OpPPC64MOVHstore(v, config)
-	case OpPPC64MOVHstorezero:
-		return rewriteValuePPC64_OpPPC64MOVHstorezero(v, config)
-	case OpPPC64MOVWZload:
-		return rewriteValuePPC64_OpPPC64MOVWZload(v, config)
-	case OpPPC64MOVWZreg:
-		return rewriteValuePPC64_OpPPC64MOVWZreg(v, config)
-	case OpPPC64MOVWload:
-		return rewriteValuePPC64_OpPPC64MOVWload(v, config)
-	case OpPPC64MOVWreg:
-		return rewriteValuePPC64_OpPPC64MOVWreg(v, config)
-	case OpPPC64MOVWstore:
-		return rewriteValuePPC64_OpPPC64MOVWstore(v, config)
-	case OpPPC64MOVWstorezero:
-		return rewriteValuePPC64_OpPPC64MOVWstorezero(v, config)
-	case OpPPC64MaskIfNotCarry:
-		return rewriteValuePPC64_OpPPC64MaskIfNotCarry(v, config)
-	case OpPPC64NotEqual:
-		return rewriteValuePPC64_OpPPC64NotEqual(v, config)
-	case OpPPC64OR:
-		return rewriteValuePPC64_OpPPC64OR(v, config)
-	case OpPPC64ORN:
-		return rewriteValuePPC64_OpPPC64ORN(v, config)
-	case OpPPC64ORconst:
-		return rewriteValuePPC64_OpPPC64ORconst(v, config)
-	case OpPPC64SUB:
-		return rewriteValuePPC64_OpPPC64SUB(v, config)
-	case OpPPC64XOR:
-		return rewriteValuePPC64_OpPPC64XOR(v, config)
-	case OpPPC64XORconst:
-		return rewriteValuePPC64_OpPPC64XORconst(v, config)
-	case OpRsh16Ux16:
-		return rewriteValuePPC64_OpRsh16Ux16(v, config)
-	case OpRsh16Ux32:
-		return rewriteValuePPC64_OpRsh16Ux32(v, config)
-	case OpRsh16Ux64:
-		return rewriteValuePPC64_OpRsh16Ux64(v, config)
-	case OpRsh16Ux8:
-		return rewriteValuePPC64_OpRsh16Ux8(v, config)
-	case OpRsh16x16:
-		return rewriteValuePPC64_OpRsh16x16(v, config)
-	case OpRsh16x32:
-		return rewriteValuePPC64_OpRsh16x32(v, config)
-	case OpRsh16x64:
-		return rewriteValuePPC64_OpRsh16x64(v, config)
-	case OpRsh16x8:
-		return rewriteValuePPC64_OpRsh16x8(v, config)
-	case OpRsh32Ux16:
-		return rewriteValuePPC64_OpRsh32Ux16(v, config)
-	case OpRsh32Ux32:
-		return rewriteValuePPC64_OpRsh32Ux32(v, config)
-	case OpRsh32Ux64:
-		return rewriteValuePPC64_OpRsh32Ux64(v, config)
-	case OpRsh32Ux8:
-		return rewriteValuePPC64_OpRsh32Ux8(v, config)
-	case OpRsh32x16:
-		return rewriteValuePPC64_OpRsh32x16(v, config)
-	case OpRsh32x32:
-		return rewriteValuePPC64_OpRsh32x32(v, config)
-	case OpRsh32x64:
-		return rewriteValuePPC64_OpRsh32x64(v, config)
-	case OpRsh32x8:
-		return rewriteValuePPC64_OpRsh32x8(v, config)
-	case OpRsh64Ux16:
-		return rewriteValuePPC64_OpRsh64Ux16(v, config)
-	case OpRsh64Ux32:
-		return rewriteValuePPC64_OpRsh64Ux32(v, config)
-	case OpRsh64Ux64:
-		return rewriteValuePPC64_OpRsh64Ux64(v, config)
-	case OpRsh64Ux8:
-		return rewriteValuePPC64_OpRsh64Ux8(v, config)
-	case OpRsh64x16:
-		return rewriteValuePPC64_OpRsh64x16(v, config)
-	case OpRsh64x32:
-		return rewriteValuePPC64_OpRsh64x32(v, config)
-	case OpRsh64x64:
-		return rewriteValuePPC64_OpRsh64x64(v, config)
-	case OpRsh64x8:
-		return rewriteValuePPC64_OpRsh64x8(v, config)
-	case OpRsh8Ux16:
-		return rewriteValuePPC64_OpRsh8Ux16(v, config)
-	case OpRsh8Ux32:
-		return rewriteValuePPC64_OpRsh8Ux32(v, config)
-	case OpRsh8Ux64:
-		return rewriteValuePPC64_OpRsh8Ux64(v, config)
-	case OpRsh8Ux8:
-		return rewriteValuePPC64_OpRsh8Ux8(v, config)
-	case OpRsh8x16:
-		return rewriteValuePPC64_OpRsh8x16(v, config)
-	case OpRsh8x32:
-		return rewriteValuePPC64_OpRsh8x32(v, config)
-	case OpRsh8x64:
-		return rewriteValuePPC64_OpRsh8x64(v, config)
-	case OpRsh8x8:
-		return rewriteValuePPC64_OpRsh8x8(v, config)
-	case OpSignExt16to32:
-		return rewriteValuePPC64_OpSignExt16to32(v, config)
-	case OpSignExt16to64:
-		return rewriteValuePPC64_OpSignExt16to64(v, config)
-	case OpSignExt32to64:
-		return rewriteValuePPC64_OpSignExt32to64(v, config)
-	case OpSignExt8to16:
-		return rewriteValuePPC64_OpSignExt8to16(v, config)
-	case OpSignExt8to32:
-		return rewriteValuePPC64_OpSignExt8to32(v, config)
-	case OpSignExt8to64:
-		return rewriteValuePPC64_OpSignExt8to64(v, config)
-	case OpSlicemask:
-		return rewriteValuePPC64_OpSlicemask(v, config)
-	case OpSqrt:
-		return rewriteValuePPC64_OpSqrt(v, config)
-	case OpStaticCall:
-		return rewriteValuePPC64_OpStaticCall(v, config)
-	case OpStore:
-		return rewriteValuePPC64_OpStore(v, config)
-	case OpSub16:
-		return rewriteValuePPC64_OpSub16(v, config)
-	case OpSub32:
-		return rewriteValuePPC64_OpSub32(v, config)
-	case OpSub32F:
-		return rewriteValuePPC64_OpSub32F(v, config)
-	case OpSub64:
-		return rewriteValuePPC64_OpSub64(v, config)
-	case OpSub64F:
-		return rewriteValuePPC64_OpSub64F(v, config)
-	case OpSub8:
-		return rewriteValuePPC64_OpSub8(v, config)
-	case OpSubPtr:
-		return rewriteValuePPC64_OpSubPtr(v, config)
-	case OpTrunc16to8:
-		return rewriteValuePPC64_OpTrunc16to8(v, config)
-	case OpTrunc32to16:
-		return rewriteValuePPC64_OpTrunc32to16(v, config)
-	case OpTrunc32to8:
-		return rewriteValuePPC64_OpTrunc32to8(v, config)
-	case OpTrunc64to16:
-		return rewriteValuePPC64_OpTrunc64to16(v, config)
-	case OpTrunc64to32:
-		return rewriteValuePPC64_OpTrunc64to32(v, config)
-	case OpTrunc64to8:
-		return rewriteValuePPC64_OpTrunc64to8(v, config)
-	case OpXor16:
-		return rewriteValuePPC64_OpXor16(v, config)
-	case OpXor32:
-		return rewriteValuePPC64_OpXor32(v, config)
-	case OpXor64:
-		return rewriteValuePPC64_OpXor64(v, config)
-	case OpXor8:
-		return rewriteValuePPC64_OpXor8(v, config)
-	case OpZero:
-		return rewriteValuePPC64_OpZero(v, config)
-	case OpZeroExt16to32:
-		return rewriteValuePPC64_OpZeroExt16to32(v, config)
-	case OpZeroExt16to64:
-		return rewriteValuePPC64_OpZeroExt16to64(v, config)
-	case OpZeroExt32to64:
-		return rewriteValuePPC64_OpZeroExt32to64(v, config)
-	case OpZeroExt8to16:
-		return rewriteValuePPC64_OpZeroExt8to16(v, config)
-	case OpZeroExt8to32:
-		return rewriteValuePPC64_OpZeroExt8to32(v, config)
-	case OpZeroExt8to64:
-		return rewriteValuePPC64_OpZeroExt8to64(v, config)
-	}
-	return false
-}
-func rewriteValuePPC64_OpAdd16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add16  x y)
-	// cond:
-	// result: (ADD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64ADD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpAdd32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add32  x y)
-	// cond:
-	// result: (ADD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64ADD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpAdd32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add32F x y)
-	// cond:
-	// result: (FADDS x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64FADDS)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpAdd64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add64  x y)
-	// cond:
-	// result: (ADD  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64ADD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpAdd64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add64F x y)
-	// cond:
-	// result: (FADD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64FADD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpAdd8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add8   x y)
-	// cond:
-	// result: (ADD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64ADD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpAddPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AddPtr x y)
-	// cond:
-	// result: (ADD  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64ADD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpAddr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Addr {sym} base)
-	// cond:
-	// result: (MOVDaddr {sym} base)
-	for {
-		sym := v.Aux
-		base := v.Args[0]
-		v.reset(OpPPC64MOVDaddr)
-		v.Aux = sym
-		v.AddArg(base)
-		return true
-	}
-}
-func rewriteValuePPC64_OpAnd16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And16 x y)
-	// cond:
-	// result: (AND x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64AND)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpAnd32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And32 x y)
-	// cond:
-	// result: (AND x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64AND)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpAnd64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And64 x y)
-	// cond:
-	// result: (AND x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64AND)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpAnd8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And8  x y)
-	// cond:
-	// result: (AND x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64AND)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpAndB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AndB x y)
-	// cond:
-	// result: (AND x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64AND)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpAvg64u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Avg64u <t> x y)
-	// cond:
-	// result: (ADD (ADD <t> (SRD <t> x (MOVDconst <t> [1])) (SRD <t> y (MOVDconst <t> [1]))) (ANDconst <t> (AND <t> x y) [1]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64ADD)
-		v0 := b.NewValue0(v.Line, OpPPC64ADD, t)
-		v1 := b.NewValue0(v.Line, OpPPC64SRD, t)
-		v1.AddArg(x)
-		v2 := b.NewValue0(v.Line, OpPPC64MOVDconst, t)
-		v2.AuxInt = 1
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v3 := b.NewValue0(v.Line, OpPPC64SRD, t)
-		v3.AddArg(y)
-		v4 := b.NewValue0(v.Line, OpPPC64MOVDconst, t)
-		v4.AuxInt = 1
-		v3.AddArg(v4)
-		v0.AddArg(v3)
-		v.AddArg(v0)
-		v5 := b.NewValue0(v.Line, OpPPC64ANDconst, t)
-		v5.AuxInt = 1
-		v6 := b.NewValue0(v.Line, OpPPC64AND, t)
-		v6.AddArg(x)
-		v6.AddArg(y)
-		v5.AddArg(v6)
-		v.AddArg(v5)
-		return true
-	}
-}
-func rewriteValuePPC64_OpClosureCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ClosureCall [argwid] entry closure mem)
-	// cond:
-	// result: (CALLclosure [argwid] entry closure mem)
-	for {
-		argwid := v.AuxInt
-		entry := v.Args[0]
-		closure := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpPPC64CALLclosure)
-		v.AuxInt = argwid
-		v.AddArg(entry)
-		v.AddArg(closure)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValuePPC64_OpCom16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com16 x)
-	// cond:
-	// result: (XORconst [-1] x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64XORconst)
-		v.AuxInt = -1
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpCom32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com32 x)
-	// cond:
-	// result: (XORconst [-1] x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64XORconst)
-		v.AuxInt = -1
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpCom64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com64 x)
-	// cond:
-	// result: (XORconst [-1] x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64XORconst)
-		v.AuxInt = -1
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpCom8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com8  x)
-	// cond:
-	// result: (XORconst [-1] x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64XORconst)
-		v.AuxInt = -1
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpConst16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const16  [val])
-	// cond:
-	// result: (MOVDconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValuePPC64_OpConst32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const32  [val])
-	// cond:
-	// result: (MOVDconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValuePPC64_OpConst32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const32F [val])
-	// cond:
-	// result: (FMOVSconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpPPC64FMOVSconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValuePPC64_OpConst64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const64  [val])
-	// cond:
-	// result: (MOVDconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValuePPC64_OpConst64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const64F [val])
-	// cond:
-	// result: (FMOVDconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpPPC64FMOVDconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValuePPC64_OpConst8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const8   [val])
-	// cond:
-	// result: (MOVDconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValuePPC64_OpConstBool(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ConstBool [b])
-	// cond:
-	// result: (MOVDconst [b])
-	for {
-		b := v.AuxInt
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = b
-		return true
-	}
-}
-func rewriteValuePPC64_OpConstNil(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ConstNil)
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-}
-func rewriteValuePPC64_OpConvert(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Convert <t> x mem)
-	// cond:
-	// result: (MOVDconvert <t> x mem)
-	for {
-		t := v.Type
-		x := v.Args[0]
-		mem := v.Args[1]
-		v.reset(OpPPC64MOVDconvert)
-		v.Type = t
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValuePPC64_OpCvt32Fto32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32Fto32 x)
-	// cond:
-	// result: (Xf2i64 (FCTIWZ x))
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64Xf2i64)
-		v0 := b.NewValue0(v.Line, OpPPC64FCTIWZ, config.fe.TypeFloat64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpCvt32Fto64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32Fto64 x)
-	// cond:
-	// result: (Xf2i64 (FCTIDZ x))
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64Xf2i64)
-		v0 := b.NewValue0(v.Line, OpPPC64FCTIDZ, config.fe.TypeFloat64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpCvt32Fto64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32Fto64F x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpCvt32to32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32to32F x)
-	// cond:
-	// result: (FRSP (FCFID (Xi2f64 (SignExt32to64 x))))
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64FRSP)
-		v0 := b.NewValue0(v.Line, OpPPC64FCFID, config.fe.TypeFloat64())
-		v1 := b.NewValue0(v.Line, OpPPC64Xi2f64, config.fe.TypeFloat64())
-		v2 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-		v2.AddArg(x)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpCvt32to64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32to64F x)
-	// cond:
-	// result: (FCFID (Xi2f64 (SignExt32to64 x)))
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64FCFID)
-		v0 := b.NewValue0(v.Line, OpPPC64Xi2f64, config.fe.TypeFloat64())
-		v1 := b.NewValue0(v.Line, OpSignExt32to64, config.fe.TypeInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpCvt64Fto32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64Fto32 x)
-	// cond:
-	// result: (Xf2i64 (FCTIWZ x))
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64Xf2i64)
-		v0 := b.NewValue0(v.Line, OpPPC64FCTIWZ, config.fe.TypeFloat64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpCvt64Fto32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64Fto32F x)
-	// cond:
-	// result: (FRSP x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64FRSP)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpCvt64Fto64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64Fto64 x)
-	// cond:
-	// result: (Xf2i64 (FCTIDZ x))
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64Xf2i64)
-		v0 := b.NewValue0(v.Line, OpPPC64FCTIDZ, config.fe.TypeFloat64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpCvt64to32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64to32F x)
-	// cond:
-	// result: (FRSP (FCFID (Xi2f64 x)))
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64FRSP)
-		v0 := b.NewValue0(v.Line, OpPPC64FCFID, config.fe.TypeFloat64())
-		v1 := b.NewValue0(v.Line, OpPPC64Xi2f64, config.fe.TypeFloat64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpCvt64to64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64to64F x)
-	// cond:
-	// result: (FCFID (Xi2f64 x))
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64FCFID)
-		v0 := b.NewValue0(v.Line, OpPPC64Xi2f64, config.fe.TypeFloat64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpDeferCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (DeferCall [argwid] mem)
-	// cond:
-	// result: (CALLdefer [argwid] mem)
-	for {
-		argwid := v.AuxInt
-		mem := v.Args[0]
-		v.reset(OpPPC64CALLdefer)
-		v.AuxInt = argwid
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValuePPC64_OpDiv16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div16  x y)
-	// cond:
-	// result: (DIVW  (SignExt16to32 x) (SignExt16to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64DIVW)
-		v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValuePPC64_OpDiv16u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div16u x y)
-	// cond:
-	// result: (DIVWU (ZeroExt16to32 x) (ZeroExt16to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64DIVWU)
-		v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValuePPC64_OpDiv32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div32  x y)
-	// cond:
-	// result: (DIVW  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64DIVW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpDiv32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div32F x y)
-	// cond:
-	// result: (FDIVS x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64FDIVS)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpDiv32u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div32u x y)
-	// cond:
-	// result: (DIVWU x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64DIVWU)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpDiv64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div64  x y)
-	// cond:
-	// result: (DIVD  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64DIVD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpDiv64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div64F x y)
-	// cond:
-	// result: (FDIV x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64FDIV)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpDiv64u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div64u x y)
-	// cond:
-	// result: (DIVDU x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64DIVDU)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpDiv8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div8   x y)
-	// cond:
-	// result: (DIVW  (SignExt8to32 x) (SignExt8to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64DIVW)
-		v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValuePPC64_OpDiv8u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div8u  x y)
-	// cond:
-	// result: (DIVWU (ZeroExt8to32 x) (ZeroExt8to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64DIVWU)
-		v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValuePPC64_OpEq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq16 x y)
-	// cond: isSigned(x.Type) && isSigned(y.Type)
-	// result: (Equal (CMPW (SignExt16to32 x) (SignExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		if !(isSigned(x.Type) && isSigned(y.Type)) {
-			break
-		}
-		v.reset(OpPPC64Equal)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Eq16 x y)
-	// cond:
-	// result: (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64Equal)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpEq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq32 x y)
-	// cond:
-	// result: (Equal (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64Equal)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpEq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq32F x y)
-	// cond:
-	// result: (Equal (FCMPU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64Equal)
-		v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpEq64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq64 x y)
-	// cond:
-	// result: (Equal (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64Equal)
-		v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpEq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq64F x y)
-	// cond:
-	// result: (Equal (FCMPU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64Equal)
-		v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpEq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq8 x y)
-	// cond: isSigned(x.Type) && isSigned(y.Type)
-	// result: (Equal (CMPW (SignExt8to32 x) (SignExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		if !(isSigned(x.Type) && isSigned(y.Type)) {
-			break
-		}
-		v.reset(OpPPC64Equal)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Eq8 x y)
-	// cond:
-	// result: (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64Equal)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpEqB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (EqB x y)
-	// cond:
-	// result: (ANDconst [1] (EQV x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64ANDconst)
-		v.AuxInt = 1
-		v0 := b.NewValue0(v.Line, OpPPC64EQV, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpEqPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (EqPtr x y)
-	// cond:
-	// result: (Equal (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64Equal)
-		v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpGeq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq16 x y)
-	// cond:
-	// result: (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64GreaterEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpGeq16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq16U x y)
-	// cond:
-	// result: (GreaterEqual (CMPU (ZeroExt16to32 x) (ZeroExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64GreaterEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPU, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpGeq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq32 x y)
-	// cond:
-	// result: (GreaterEqual (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64GreaterEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpGeq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq32F x y)
-	// cond:
-	// result: (FGreaterEqual (FCMPU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64FGreaterEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpGeq32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq32U x y)
-	// cond:
-	// result: (GreaterEqual (CMPU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64GreaterEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPU, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpGeq64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq64 x y)
-	// cond:
-	// result: (GreaterEqual (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64GreaterEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpGeq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq64F x y)
-	// cond:
-	// result: (FGreaterEqual (FCMPU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64FGreaterEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpGeq64U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq64U x y)
-	// cond:
-	// result: (GreaterEqual (CMPU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64GreaterEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPU, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpGeq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq8 x y)
-	// cond:
-	// result: (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64GreaterEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpGeq8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq8U x y)
-	// cond:
-	// result: (GreaterEqual (CMPU (ZeroExt8to32 x) (ZeroExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64GreaterEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPU, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpGetClosurePtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (GetClosurePtr)
-	// cond:
-	// result: (LoweredGetClosurePtr)
-	for {
-		v.reset(OpPPC64LoweredGetClosurePtr)
-		return true
-	}
-}
-func rewriteValuePPC64_OpGoCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (GoCall [argwid] mem)
-	// cond:
-	// result: (CALLgo [argwid] mem)
-	for {
-		argwid := v.AuxInt
-		mem := v.Args[0]
-		v.reset(OpPPC64CALLgo)
-		v.AuxInt = argwid
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValuePPC64_OpGreater16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater16 x y)
-	// cond:
-	// result: (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64GreaterThan)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpGreater16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater16U x y)
-	// cond:
-	// result: (GreaterThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64GreaterThan)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPWU, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpGreater32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater32 x y)
-	// cond:
-	// result: (GreaterThan (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64GreaterThan)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpGreater32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater32F x y)
-	// cond:
-	// result: (FGreaterThan (FCMPU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64FGreaterThan)
-		v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpGreater32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater32U x y)
-	// cond:
-	// result: (GreaterThan (CMPWU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64GreaterThan)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPWU, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpGreater64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater64 x y)
-	// cond:
-	// result: (GreaterThan (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64GreaterThan)
-		v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpGreater64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater64F x y)
-	// cond:
-	// result: (FGreaterThan (FCMPU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64FGreaterThan)
-		v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpGreater64U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater64U x y)
-	// cond:
-	// result: (GreaterThan (CMPU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64GreaterThan)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPU, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpGreater8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater8 x y)
-	// cond:
-	// result: (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64GreaterThan)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpGreater8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater8U x y)
-	// cond:
-	// result: (GreaterThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64GreaterThan)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPWU, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpHmul16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul16 x y)
-	// cond:
-	// result: (SRAWconst (MULLW <config.fe.TypeInt32()> (SignExt16to32 x) (SignExt16to32 y)) [16])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRAWconst)
-		v.AuxInt = 16
-		v0 := b.NewValue0(v.Line, OpPPC64MULLW, config.fe.TypeInt32())
-		v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpHmul16u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul16u x y)
-	// cond:
-	// result: (SRWconst (MULLW <config.fe.TypeUInt32()> (ZeroExt16to32 x) (ZeroExt16to32 y)) [16])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRWconst)
-		v.AuxInt = 16
-		v0 := b.NewValue0(v.Line, OpPPC64MULLW, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpHmul32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul32  x y)
-	// cond:
-	// result: (MULHW  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64MULHW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpHmul32u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul32u  x y)
-	// cond:
-	// result: (MULHWU x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64MULHWU)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpHmul64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul64  x y)
-	// cond:
-	// result: (MULHD  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64MULHD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpHmul64u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul64u  x y)
-	// cond:
-	// result: (MULHDU x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64MULHDU)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpHmul8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul8 x y)
-	// cond:
-	// result: (SRAWconst (MULLW <config.fe.TypeInt16()> (SignExt8to32 x) (SignExt8to32 y)) [8])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRAWconst)
-		v.AuxInt = 8
-		v0 := b.NewValue0(v.Line, OpPPC64MULLW, config.fe.TypeInt16())
-		v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpHmul8u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul8u x y)
-	// cond:
-	// result: (SRWconst (MULLW <config.fe.TypeUInt16()> (ZeroExt8to32 x) (ZeroExt8to32 y)) [8])
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRWconst)
-		v.AuxInt = 8
-		v0 := b.NewValue0(v.Line, OpPPC64MULLW, config.fe.TypeUInt16())
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpInterCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (InterCall [argwid] entry mem)
-	// cond:
-	// result: (CALLinter [argwid] entry mem)
-	for {
-		argwid := v.AuxInt
-		entry := v.Args[0]
-		mem := v.Args[1]
-		v.reset(OpPPC64CALLinter)
-		v.AuxInt = argwid
-		v.AddArg(entry)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValuePPC64_OpIsInBounds(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (IsInBounds idx len)
-	// cond:
-	// result: (LessThan (CMPU idx len))
-	for {
-		idx := v.Args[0]
-		len := v.Args[1]
-		v.reset(OpPPC64LessThan)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPU, TypeFlags)
-		v0.AddArg(idx)
-		v0.AddArg(len)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpIsNonNil(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (IsNonNil ptr)
-	// cond:
-	// result: (NotEqual (CMPconst [0] ptr))
-	for {
-		ptr := v.Args[0]
-		v.reset(OpPPC64NotEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPconst, TypeFlags)
-		v0.AuxInt = 0
-		v0.AddArg(ptr)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpIsSliceInBounds(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (IsSliceInBounds idx len)
-	// cond:
-	// result: (LessEqual (CMPU idx len))
-	for {
-		idx := v.Args[0]
-		len := v.Args[1]
-		v.reset(OpPPC64LessEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPU, TypeFlags)
-		v0.AddArg(idx)
-		v0.AddArg(len)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLeq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq16 x y)
-	// cond:
-	// result: (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64LessEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLeq16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq16U x y)
-	// cond:
-	// result: (LessEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64LessEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPWU, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLeq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq32 x y)
-	// cond:
-	// result: (LessEqual (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64LessEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLeq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq32F x y)
-	// cond:
-	// result: (FLessEqual (FCMPU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64FLessEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLeq32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq32U x y)
-	// cond:
-	// result: (LessEqual (CMPWU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64LessEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPWU, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLeq64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq64 x y)
-	// cond:
-	// result: (LessEqual (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64LessEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLeq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq64F x y)
-	// cond:
-	// result: (FLessEqual (FCMPU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64FLessEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLeq64U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq64U x y)
-	// cond:
-	// result: (LessEqual (CMPU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64LessEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPU, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLeq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq8 x y)
-	// cond:
-	// result: (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64LessEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLeq8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq8U x y)
-	// cond:
-	// result: (LessEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64LessEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPWU, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLess16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less16 x y)
-	// cond:
-	// result: (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64LessThan)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLess16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less16U x y)
-	// cond:
-	// result: (LessThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64LessThan)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPWU, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLess32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less32 x y)
-	// cond:
-	// result: (LessThan (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64LessThan)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLess32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less32F x y)
-	// cond:
-	// result: (FLessThan (FCMPU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64FLessThan)
-		v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLess32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less32U x y)
-	// cond:
-	// result: (LessThan (CMPWU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64LessThan)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPWU, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLess64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less64 x y)
-	// cond:
-	// result: (LessThan (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64LessThan)
-		v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLess64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less64F x y)
-	// cond:
-	// result: (FLessThan (FCMPU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64FLessThan)
-		v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLess64U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less64U x y)
-	// cond:
-	// result: (LessThan (CMPU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64LessThan)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPU, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLess8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less8 x y)
-	// cond:
-	// result: (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64LessThan)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLess8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less8U x y)
-	// cond:
-	// result: (LessThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64LessThan)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPWU, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLoad(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Load <t> ptr mem)
-	// cond: (is64BitInt(t) || isPtr(t))
-	// result: (MOVDload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is64BitInt(t) || isPtr(t)) {
-			break
-		}
-		v.reset(OpPPC64MOVDload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: is32BitInt(t) && isSigned(t)
-	// result: (MOVWload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is32BitInt(t) && isSigned(t)) {
-			break
-		}
-		v.reset(OpPPC64MOVWload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: is32BitInt(t) && !isSigned(t)
-	// result: (MOVWZload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is32BitInt(t) && !isSigned(t)) {
-			break
-		}
-		v.reset(OpPPC64MOVWZload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: is16BitInt(t) && isSigned(t)
-	// result: (MOVHload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is16BitInt(t) && isSigned(t)) {
-			break
-		}
-		v.reset(OpPPC64MOVHload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: is16BitInt(t) && !isSigned(t)
-	// result: (MOVHZload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is16BitInt(t) && !isSigned(t)) {
-			break
-		}
-		v.reset(OpPPC64MOVHZload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: t.IsBoolean()
-	// result: (MOVBZload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(t.IsBoolean()) {
-			break
-		}
-		v.reset(OpPPC64MOVBZload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: is8BitInt(t) && isSigned(t)
-	// result: (MOVBreg (MOVBZload ptr mem))
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is8BitInt(t) && isSigned(t)) {
-			break
-		}
-		v.reset(OpPPC64MOVBreg)
-		v0 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: is8BitInt(t) && !isSigned(t)
-	// result: (MOVBZload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is8BitInt(t) && !isSigned(t)) {
-			break
-		}
-		v.reset(OpPPC64MOVBZload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: is32BitFloat(t)
-	// result: (FMOVSload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is32BitFloat(t)) {
-			break
-		}
-		v.reset(OpPPC64FMOVSload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: is64BitFloat(t)
-	// result: (FMOVDload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is64BitFloat(t)) {
-			break
-		}
-		v.reset(OpPPC64FMOVDload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpLsh16x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x16 x y)
-	// cond:
-	// result: (SLW  x                 (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SLW)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -16
-		v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLsh16x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x32  x (Const64 [c]))
-	// cond: uint32(c) < 16
-	// result: (SLWconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 16) {
-			break
-		}
-		v.reset(OpPPC64SLWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh16x32  x (MOVDconst [c]))
-	// cond: uint32(c) < 16
-	// result: (SLWconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 16) {
-			break
-		}
-		v.reset(OpPPC64SLWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh16x32 x y)
-	// cond:
-	// result: (SLW  x                 (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SLW)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -16
-		v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLsh16x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x64  x (Const64 [c]))
-	// cond: uint64(c) < 16
-	// result: (SLWconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 16) {
-			break
-		}
-		v.reset(OpPPC64SLWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh16x64  _ (Const64 [c]))
-	// cond: uint64(c) >= 16
-	// result: (MOVDconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 16) {
-			break
-		}
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Lsh16x64  x (MOVDconst [c]))
-	// cond: uint64(c) < 16
-	// result: (SLWconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 16) {
-			break
-		}
-		v.reset(OpPPC64SLWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh16x64 x y)
-	// cond:
-	// result: (SLW  x                 (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SLW)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -16
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLsh16x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x8 x y)
-	// cond:
-	// result: (SLW  x                 (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SLW)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -16
-		v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLsh32x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x16 x y)
-	// cond:
-	// result: (SLW x  (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SLW)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -32
-		v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLsh32x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x32  x (Const64 [c]))
-	// cond: uint32(c) < 32
-	// result: (SLWconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 32) {
-			break
-		}
-		v.reset(OpPPC64SLWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh32x32  x (MOVDconst [c]))
-	// cond: uint32(c) < 32
-	// result: (SLWconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 32) {
-			break
-		}
-		v.reset(OpPPC64SLWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh32x32 x y)
-	// cond:
-	// result: (SLW x  (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SLW)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -32
-		v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLsh32x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x64  x (Const64 [c]))
-	// cond: uint64(c) < 32
-	// result: (SLWconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 32) {
-			break
-		}
-		v.reset(OpPPC64SLWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh32x64  _ (Const64 [c]))
-	// cond: uint64(c) >= 32
-	// result: (MOVDconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 32) {
-			break
-		}
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Lsh32x64  x (MOVDconst [c]))
-	// cond: uint64(c) < 32
-	// result: (SLWconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 32) {
-			break
-		}
-		v.reset(OpPPC64SLWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh32x64 x y)
-	// cond:
-	// result: (SLW  x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SLW)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLsh32x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x8 x y)
-	// cond:
-	// result: (SLW x  (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SLW)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -32
-		v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLsh64x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh64x16 x y)
-	// cond:
-	// result: (SLD x  (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SLD)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -64
-		v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLsh64x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh64x32  x (Const64 [c]))
-	// cond: uint32(c) < 64
-	// result: (SLDconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 64) {
-			break
-		}
-		v.reset(OpPPC64SLDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh64x32  x (MOVDconst [c]))
-	// cond: uint32(c) < 64
-	// result: (SLDconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 64) {
-			break
-		}
-		v.reset(OpPPC64SLDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh64x32 x y)
-	// cond:
-	// result: (SLD x  (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SLD)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -64
-		v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLsh64x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh64x64  x (Const64 [c]))
-	// cond: uint64(c) < 64
-	// result: (SLDconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 64) {
-			break
-		}
-		v.reset(OpPPC64SLDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh64x64  _ (Const64 [c]))
-	// cond: uint64(c) >= 64
-	// result: (MOVDconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 64) {
-			break
-		}
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Lsh64x64  x (MOVDconst [c]))
-	// cond: uint64(c) < 64
-	// result: (SLDconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 64) {
-			break
-		}
-		v.reset(OpPPC64SLDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh64x64 x y)
-	// cond:
-	// result: (SLD  x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SLD)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -64
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLsh64x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh64x8 x y)
-	// cond:
-	// result: (SLD x  (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SLD)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -64
-		v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLsh8x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x16 x y)
-	// cond:
-	// result: (SLW  x                (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SLW)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -8
-		v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLsh8x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x32   x (Const64 [c]))
-	// cond: uint32(c) < 8
-	// result: (SLWconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 8) {
-			break
-		}
-		v.reset(OpPPC64SLWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh8x32   x (MOVDconst [c]))
-	// cond: uint32(c) < 8
-	// result: (SLWconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 8) {
-			break
-		}
-		v.reset(OpPPC64SLWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh8x32 x y)
-	// cond:
-	// result: (SLW  x                (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SLW)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -8
-		v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLsh8x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x64   x (Const64 [c]))
-	// cond: uint64(c) < 8
-	// result: (SLWconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 8) {
-			break
-		}
-		v.reset(OpPPC64SLWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh8x64   _ (Const64 [c]))
-	// cond: uint64(c) >= 8
-	// result: (MOVDconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 8) {
-			break
-		}
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Lsh8x64   x (MOVDconst [c]))
-	// cond: uint64(c) < 8
-	// result: (SLWconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 8) {
-			break
-		}
-		v.reset(OpPPC64SLWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh8x64 x y)
-	// cond:
-	// result: (SLW  x                (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SLW)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -8
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpLsh8x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x8 x y)
-	// cond:
-	// result: (SLW  x                (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SLW)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -8
-		v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpMod16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod16 x y)
-	// cond:
-	// result: (Mod32 (SignExt16to32 x) (SignExt16to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMod32)
-		v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValuePPC64_OpMod16u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod16u x y)
-	// cond:
-	// result: (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMod32u)
-		v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValuePPC64_OpMod32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod32 x y)
-	// cond:
-	// result: (SUB x (MULLW y (DIVW x y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SUB)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64MULLW, config.fe.TypeInt32())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64DIVW, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpMod32u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod32u x y)
-	// cond:
-	// result: (SUB x (MULLW y (DIVWU x y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SUB)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64MULLW, config.fe.TypeInt32())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64DIVWU, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpMod64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod64 x y)
-	// cond:
-	// result: (SUB x (MULLD y (DIVD x y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SUB)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64MULLD, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64DIVD, config.fe.TypeInt64())
-		v1.AddArg(x)
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpMod64u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod64u x y)
-	// cond:
-	// result: (SUB x (MULLD y (DIVDU x y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SUB)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64MULLD, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64DIVDU, config.fe.TypeInt64())
-		v1.AddArg(x)
-		v1.AddArg(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpMod8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod8 x y)
-	// cond:
-	// result: (Mod32 (SignExt8to32 x) (SignExt8to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMod32)
-		v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValuePPC64_OpMod8u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod8u x y)
-	// cond:
-	// result: (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpMod32u)
-		v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValuePPC64_OpMove(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Move [s] _ _ mem)
-	// cond: SizeAndAlign(s).Size() == 0
-	// result: mem
-	for {
-		s := v.AuxInt
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 0) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = mem.Type
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 1
-	// result: (MOVBstore dst (MOVBZload src mem) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 1) {
-			break
-		}
-		v.reset(OpPPC64MOVBstore)
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
-	// result: (MOVHstore dst (MOVHZload src mem) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
-			break
-		}
-		v.reset(OpPPC64MOVHstore)
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpPPC64MOVHZload, config.fe.TypeUInt16())
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 2
-	// result: (MOVBstore [1] dst (MOVBZload [1] src mem) 		(MOVBstore dst (MOVBZload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 2) {
-			break
-		}
-		v.reset(OpPPC64MOVBstore)
-		v.AuxInt = 1
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
-		v0.AuxInt = 1
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpPPC64MOVBstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
-	// result: (MOVWstore dst (MOVWload src mem) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
-			break
-		}
-		v.reset(OpPPC64MOVWstore)
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpPPC64MOVWload, config.fe.TypeInt32())
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
-	// result: (MOVHstore [2] dst (MOVHZload [2] src mem) 		(MOVHstore dst (MOVHZload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
-			break
-		}
-		v.reset(OpPPC64MOVHstore)
-		v.AuxInt = 2
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpPPC64MOVHZload, config.fe.TypeUInt16())
-		v0.AuxInt = 2
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpPPC64MOVHstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpPPC64MOVHZload, config.fe.TypeUInt16())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 4
-	// result: (MOVBstore [3] dst (MOVBZload [3] src mem) 		(MOVBstore [2] dst (MOVBZload [2] src mem) 			(MOVBstore [1] dst (MOVBZload [1] src mem) 				(MOVBstore dst (MOVBZload src mem) mem))))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 4) {
-			break
-		}
-		v.reset(OpPPC64MOVBstore)
-		v.AuxInt = 3
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
-		v0.AuxInt = 3
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpPPC64MOVBstore, TypeMem)
-		v1.AuxInt = 2
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
-		v2.AuxInt = 2
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpPPC64MOVBstore, TypeMem)
-		v3.AuxInt = 1
-		v3.AddArg(dst)
-		v4 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
-		v4.AuxInt = 1
-		v4.AddArg(src)
-		v4.AddArg(mem)
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpPPC64MOVBstore, TypeMem)
-		v5.AddArg(dst)
-		v6 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
-		v6.AddArg(src)
-		v6.AddArg(mem)
-		v5.AddArg(v6)
-		v5.AddArg(mem)
-		v3.AddArg(v5)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0
-	// result: (MOVDstore dst (MOVDload src mem) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0) {
-			break
-		}
-		v.reset(OpPPC64MOVDstore)
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpPPC64MOVDload, config.fe.TypeInt64())
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0
-	// result: (MOVWstore [4] dst (MOVWZload [4] src mem) 		(MOVWstore dst (MOVWZload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0) {
-			break
-		}
-		v.reset(OpPPC64MOVWstore)
-		v.AuxInt = 4
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpPPC64MOVWZload, config.fe.TypeUInt32())
-		v0.AuxInt = 4
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpPPC64MOVWstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpPPC64MOVWZload, config.fe.TypeUInt32())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0
-	// result: (MOVHstore [6] dst (MOVHZload [6] src mem) 		(MOVHstore [4] dst (MOVHZload [4] src mem) 			(MOVHstore [2] dst (MOVHZload [2] src mem) 				(MOVHstore dst (MOVHZload src mem) mem))))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0) {
-			break
-		}
-		v.reset(OpPPC64MOVHstore)
-		v.AuxInt = 6
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpPPC64MOVHZload, config.fe.TypeUInt16())
-		v0.AuxInt = 6
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpPPC64MOVHstore, TypeMem)
-		v1.AuxInt = 4
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpPPC64MOVHZload, config.fe.TypeUInt16())
-		v2.AuxInt = 4
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpPPC64MOVHstore, TypeMem)
-		v3.AuxInt = 2
-		v3.AddArg(dst)
-		v4 := b.NewValue0(v.Line, OpPPC64MOVHZload, config.fe.TypeUInt16())
-		v4.AuxInt = 2
-		v4.AddArg(src)
-		v4.AddArg(mem)
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpPPC64MOVHstore, TypeMem)
-		v5.AddArg(dst)
-		v6 := b.NewValue0(v.Line, OpPPC64MOVHZload, config.fe.TypeUInt16())
-		v6.AddArg(src)
-		v6.AddArg(mem)
-		v5.AddArg(v6)
-		v5.AddArg(mem)
-		v3.AddArg(v5)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 3
-	// result: (MOVBstore [2] dst (MOVBZload [2] src mem) 		(MOVBstore [1] dst (MOVBZload [1] src mem) 			(MOVBstore dst (MOVBZload src mem) mem)))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 3) {
-			break
-		}
-		v.reset(OpPPC64MOVBstore)
-		v.AuxInt = 2
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
-		v0.AuxInt = 2
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpPPC64MOVBstore, TypeMem)
-		v1.AuxInt = 1
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
-		v2.AuxInt = 1
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpPPC64MOVBstore, TypeMem)
-		v3.AddArg(dst)
-		v4 := b.NewValue0(v.Line, OpPPC64MOVBZload, config.fe.TypeUInt8())
-		v4.AddArg(src)
-		v4.AddArg(mem)
-		v3.AddArg(v4)
-		v3.AddArg(mem)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: (SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%8 != 0
-	// result: (LoweredMove [SizeAndAlign(s).Align()] 		dst 		src 		(ADDconst <src.Type> src [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)]) 		mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !((SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%8 != 0) {
-			break
-		}
-		v.reset(OpPPC64LoweredMove)
-		v.AuxInt = SizeAndAlign(s).Align()
-		v.AddArg(dst)
-		v.AddArg(src)
-		v0 := b.NewValue0(v.Line, OpPPC64ADDconst, src.Type)
-		v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
-		v0.AddArg(src)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpMul16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul16  x y)
-	// cond:
-	// result: (MULLW x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64MULLW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpMul32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul32  x y)
-	// cond:
-	// result: (MULLW  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64MULLW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpMul32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul32F x y)
-	// cond:
-	// result: (FMULS x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64FMULS)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpMul64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul64  x y)
-	// cond:
-	// result: (MULLD  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64MULLD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpMul64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul64F x y)
-	// cond:
-	// result: (FMUL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64FMUL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpMul8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul8   x y)
-	// cond:
-	// result: (MULLW x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64MULLW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpNeg16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg16  x)
-	// cond:
-	// result: (NEG x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64NEG)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpNeg32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg32  x)
-	// cond:
-	// result: (NEG x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64NEG)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpNeg32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg32F x)
-	// cond:
-	// result: (FNEG x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64FNEG)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpNeg64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg64  x)
-	// cond:
-	// result: (NEG x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64NEG)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpNeg64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg64F x)
-	// cond:
-	// result: (FNEG x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64FNEG)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpNeg8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg8   x)
-	// cond:
-	// result: (NEG x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64NEG)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpNeq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq16 x y)
-	// cond: isSigned(x.Type) && isSigned(y.Type)
-	// result: (NotEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		if !(isSigned(x.Type) && isSigned(y.Type)) {
-			break
-		}
-		v.reset(OpPPC64NotEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Neq16 x y)
-	// cond:
-	// result: (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64NotEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpNeq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq32 x y)
-	// cond:
-	// result: (NotEqual (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64NotEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpNeq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq32F x y)
-	// cond:
-	// result: (NotEqual (FCMPU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64NotEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpNeq64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq64 x y)
-	// cond:
-	// result: (NotEqual (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64NotEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpNeq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq64F x y)
-	// cond:
-	// result: (NotEqual (FCMPU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64NotEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64FCMPU, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpNeq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq8 x y)
-	// cond: isSigned(x.Type) && isSigned(y.Type)
-	// result: (NotEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		if !(isSigned(x.Type) && isSigned(y.Type)) {
-			break
-		}
-		v.reset(OpPPC64NotEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Neq8 x y)
-	// cond:
-	// result: (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64NotEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPW, TypeFlags)
-		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpNeqB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NeqB x y)
-	// cond:
-	// result: (XOR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64XOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpNeqPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NeqPtr x y)
-	// cond:
-	// result: (NotEqual (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64NotEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64CMP, TypeFlags)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpNilCheck(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NilCheck ptr mem)
-	// cond:
-	// result: (LoweredNilCheck ptr mem)
-	for {
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		v.reset(OpPPC64LoweredNilCheck)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValuePPC64_OpNot(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Not x)
-	// cond:
-	// result: (XORconst [1] x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64XORconst)
-		v.AuxInt = 1
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpOffPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (OffPtr [off] ptr)
-	// cond:
-	// result: (ADD (MOVDconst <config.Frontend().TypeInt64()> [off]) ptr)
-	for {
-		off := v.AuxInt
-		ptr := v.Args[0]
-		v.reset(OpPPC64ADD)
-		v0 := b.NewValue0(v.Line, OpPPC64MOVDconst, config.Frontend().TypeInt64())
-		v0.AuxInt = off
-		v.AddArg(v0)
-		v.AddArg(ptr)
-		return true
-	}
-}
-func rewriteValuePPC64_OpOr16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or16 x y)
-	// cond:
-	// result: (OR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64OR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpOr32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or32 x y)
-	// cond:
-	// result: (OR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64OR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpOr64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or64 x y)
-	// cond:
-	// result: (OR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64OR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpOr8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or8  x y)
-	// cond:
-	// result: (OR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64OR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpOrB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (OrB x y)
-	// cond:
-	// result: (OR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64OR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpPPC64ADD(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADD (MOVDconst [c]) x)
-	// cond: is32Bit(c)
-	// result: (ADDconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpPPC64ADDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADD x (MOVDconst [c]))
-	// cond: is32Bit(c)
-	// result: (ADDconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpPPC64ADDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64ADDconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDconst [c] (ADDconst [d] x))
-	// cond: is32Bit(c+d)
-	// result: (ADDconst [c+d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64ADDconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		if !(is32Bit(c + d)) {
-			break
-		}
-		v.reset(OpPPC64ADDconst)
-		v.AuxInt = c + d
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDconst [0] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDconst [c] (MOVDaddr [d] {sym} x))
-	// cond:
-	// result: (MOVDaddr [c+d] {sym} x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDaddr {
-			break
-		}
-		d := v_0.AuxInt
-		sym := v_0.Aux
-		x := v_0.Args[0]
-		v.reset(OpPPC64MOVDaddr)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64AND(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AND x (XORconst [-1] y))
-	// cond:
-	// result: (ANDN x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64XORconst {
-			break
-		}
-		if v_1.AuxInt != -1 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpPPC64ANDN)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (AND (MOVDconst [c]) (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [c&d])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = c & d
-		return true
-	}
-	// match: (AND x (MOVDconst [c]))
-	// cond: isU16Bit(c)
-	// result: (ANDconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(isU16Bit(c)) {
-			break
-		}
-		v.reset(OpPPC64ANDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (AND (MOVDconst [c]) x)
-	// cond: isU16Bit(c)
-	// result: (ANDconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(isU16Bit(c)) {
-			break
-		}
-		v.reset(OpPPC64ANDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (AND (MOVDconst [c]) x:(MOVBZload _ _))
-	// cond:
-	// result: (ANDconst [c&0xFF] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if x.Op != OpPPC64MOVBZload {
-			break
-		}
-		v.reset(OpPPC64ANDconst)
-		v.AuxInt = c & 0xFF
-		v.AddArg(x)
-		return true
-	}
-	// match: (AND x:(MOVBZload _ _) (MOVDconst [c]))
-	// cond:
-	// result: (ANDconst [c&0xFF] x)
-	for {
-		x := v.Args[0]
-		if x.Op != OpPPC64MOVBZload {
-			break
-		}
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpPPC64ANDconst)
-		v.AuxInt = c & 0xFF
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64ANDconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ANDconst [c] (ANDconst [d] x))
-	// cond:
-	// result: (ANDconst [c&d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64ANDconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpPPC64ANDconst)
-		v.AuxInt = c & d
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDconst [-1] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != -1 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDconst [0] _)
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (ANDconst [c] y:(MOVBZreg _))
-	// cond: c&0xFF == 0xFF
-	// result: y
-	for {
-		c := v.AuxInt
-		y := v.Args[0]
-		if y.Op != OpPPC64MOVBZreg {
-			break
-		}
-		if !(c&0xFF == 0xFF) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (ANDconst [c] y:(MOVHZreg _))
-	// cond: c&0xFFFF == 0xFFFF
-	// result: y
-	for {
-		c := v.AuxInt
-		y := v.Args[0]
-		if y.Op != OpPPC64MOVHZreg {
-			break
-		}
-		if !(c&0xFFFF == 0xFFFF) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (ANDconst [c] y:(MOVWZreg _))
-	// cond: c&0xFFFFFFFF == 0xFFFFFFFF
-	// result: y
-	for {
-		c := v.AuxInt
-		y := v.Args[0]
-		if y.Op != OpPPC64MOVWZreg {
-			break
-		}
-		if !(c&0xFFFFFFFF == 0xFFFFFFFF) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (ANDconst [c] (MOVBZreg x))
-	// cond:
-	// result: (ANDconst [c&0xFF] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVBZreg {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpPPC64ANDconst)
-		v.AuxInt = c & 0xFF
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDconst [c] (MOVHZreg x))
-	// cond:
-	// result: (ANDconst [c&0xFFFF] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVHZreg {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpPPC64ANDconst)
-		v.AuxInt = c & 0xFFFF
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDconst [c] (MOVWZreg x))
-	// cond:
-	// result: (ANDconst [c&0xFFFFFFFF] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVWZreg {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpPPC64ANDconst)
-		v.AuxInt = c & 0xFFFFFFFF
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64CMP(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMP x (MOVDconst [c]))
-	// cond: is16Bit(c)
-	// result: (CMPconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(is16Bit(c)) {
-			break
-		}
-		v.reset(OpPPC64CMPconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (CMP (MOVDconst [c]) y)
-	// cond: is16Bit(c)
-	// result: (InvertFlags (CMPconst y [c]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v.Args[1]
-		if !(is16Bit(c)) {
-			break
-		}
-		v.reset(OpPPC64InvertFlags)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPconst, TypeFlags)
-		v0.AuxInt = c
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64CMPU(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPU x (MOVDconst [c]))
-	// cond: isU16Bit(c)
-	// result: (CMPUconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(isU16Bit(c)) {
-			break
-		}
-		v.reset(OpPPC64CMPUconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (CMPU (MOVDconst [c]) y)
-	// cond: isU16Bit(c)
-	// result: (InvertFlags (CMPUconst y [c]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v.Args[1]
-		if !(isU16Bit(c)) {
-			break
-		}
-		v.reset(OpPPC64InvertFlags)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPUconst, TypeFlags)
-		v0.AuxInt = c
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64CMPUconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPUconst (MOVDconst [x]) [y])
-	// cond: int64(x)==int64(y)
-	// result: (FlagEQ)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int64(x) == int64(y)) {
-			break
-		}
-		v.reset(OpPPC64FlagEQ)
-		return true
-	}
-	// match: (CMPUconst (MOVDconst [x]) [y])
-	// cond: uint64(x)<uint64(y)
-	// result: (FlagLT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(uint64(x) < uint64(y)) {
-			break
-		}
-		v.reset(OpPPC64FlagLT)
-		return true
-	}
-	// match: (CMPUconst (MOVDconst [x]) [y])
-	// cond: uint64(x)>uint64(y)
-	// result: (FlagGT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(uint64(x) > uint64(y)) {
-			break
-		}
-		v.reset(OpPPC64FlagGT)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64CMPW(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPW x (MOVWreg y))
-	// cond:
-	// result: (CMPW x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVWreg {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpPPC64CMPW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (CMPW (MOVWreg x) y)
-	// cond:
-	// result: (CMPW x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVWreg {
-			break
-		}
-		x := v_0.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64CMPW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (CMPW x (MOVDconst [c]))
-	// cond: is16Bit(c)
-	// result: (CMPWconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(is16Bit(c)) {
-			break
-		}
-		v.reset(OpPPC64CMPWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (CMPW (MOVDconst [c]) y)
-	// cond: is16Bit(c)
-	// result: (InvertFlags (CMPWconst y [c]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v.Args[1]
-		if !(is16Bit(c)) {
-			break
-		}
-		v.reset(OpPPC64InvertFlags)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPWconst, TypeFlags)
-		v0.AuxInt = c
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64CMPWU(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPWU x (MOVWZreg y))
-	// cond:
-	// result: (CMPWU x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVWZreg {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpPPC64CMPWU)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (CMPWU (MOVWZreg x) y)
-	// cond:
-	// result: (CMPWU x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVWZreg {
-			break
-		}
-		x := v_0.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64CMPWU)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (CMPWU x (MOVDconst [c]))
-	// cond: isU16Bit(c)
-	// result: (CMPWUconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(isU16Bit(c)) {
-			break
-		}
-		v.reset(OpPPC64CMPWUconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (CMPWU (MOVDconst [c]) y)
-	// cond: isU16Bit(c)
-	// result: (InvertFlags (CMPWUconst y [c]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		y := v.Args[1]
-		if !(isU16Bit(c)) {
-			break
-		}
-		v.reset(OpPPC64InvertFlags)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPWUconst, TypeFlags)
-		v0.AuxInt = c
-		v0.AddArg(y)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64CMPWUconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPWUconst (MOVDconst [x]) [y])
-	// cond: int32(x)==int32(y)
-	// result: (FlagEQ)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int32(x) == int32(y)) {
-			break
-		}
-		v.reset(OpPPC64FlagEQ)
-		return true
-	}
-	// match: (CMPWUconst (MOVDconst [x]) [y])
-	// cond: uint32(x)<uint32(y)
-	// result: (FlagLT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(uint32(x) < uint32(y)) {
-			break
-		}
-		v.reset(OpPPC64FlagLT)
-		return true
-	}
-	// match: (CMPWUconst (MOVDconst [x]) [y])
-	// cond: uint32(x)>uint32(y)
-	// result: (FlagGT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(uint32(x) > uint32(y)) {
-			break
-		}
-		v.reset(OpPPC64FlagGT)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64CMPWconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPWconst (MOVDconst [x]) [y])
-	// cond: int32(x)==int32(y)
-	// result: (FlagEQ)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int32(x) == int32(y)) {
-			break
-		}
-		v.reset(OpPPC64FlagEQ)
-		return true
-	}
-	// match: (CMPWconst (MOVDconst [x]) [y])
-	// cond: int32(x)<int32(y)
-	// result: (FlagLT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int32(x) < int32(y)) {
-			break
-		}
-		v.reset(OpPPC64FlagLT)
-		return true
-	}
-	// match: (CMPWconst (MOVDconst [x]) [y])
-	// cond: int32(x)>int32(y)
-	// result: (FlagGT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int32(x) > int32(y)) {
-			break
-		}
-		v.reset(OpPPC64FlagGT)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64CMPconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPconst (MOVDconst [x]) [y])
-	// cond: int64(x)==int64(y)
-	// result: (FlagEQ)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int64(x) == int64(y)) {
-			break
-		}
-		v.reset(OpPPC64FlagEQ)
-		return true
-	}
-	// match: (CMPconst (MOVDconst [x]) [y])
-	// cond: int64(x)<int64(y)
-	// result: (FlagLT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int64(x) < int64(y)) {
-			break
-		}
-		v.reset(OpPPC64FlagLT)
-		return true
-	}
-	// match: (CMPconst (MOVDconst [x]) [y])
-	// cond: int64(x)>int64(y)
-	// result: (FlagGT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int64(x) > int64(y)) {
-			break
-		}
-		v.reset(OpPPC64FlagGT)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64Equal(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Equal (FlagEQ))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64FlagEQ {
-			break
-		}
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (Equal (FlagLT))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64FlagLT {
-			break
-		}
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Equal (FlagGT))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64FlagGT {
-			break
-		}
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Equal (InvertFlags x))
-	// cond:
-	// result: (Equal x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpPPC64Equal)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64FMOVDload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpPPC64FMOVDload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: is16Bit(off1+off2)
-	// result: (FMOVDload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is16Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpPPC64FMOVDload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64FMOVDstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	// cond: is16Bit(off1+off2)
-	// result: (FMOVDstore [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is16Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpPPC64FMOVDstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpPPC64FMOVDstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64FMOVSload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpPPC64FMOVSload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: is16Bit(off1+off2)
-	// result: (FMOVSload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is16Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpPPC64FMOVSload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64FMOVSstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	// cond: is16Bit(off1+off2)
-	// result: (FMOVSstore [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is16Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpPPC64FMOVSstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpPPC64FMOVSstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64GreaterEqual(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (GreaterEqual (FlagEQ))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64FlagEQ {
-			break
-		}
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (GreaterEqual (FlagLT))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64FlagLT {
-			break
-		}
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (GreaterEqual (FlagGT))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64FlagGT {
-			break
-		}
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (GreaterEqual (InvertFlags x))
-	// cond:
-	// result: (LessEqual x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpPPC64LessEqual)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64GreaterThan(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (GreaterThan (FlagEQ))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64FlagEQ {
-			break
-		}
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (GreaterThan (FlagLT))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64FlagLT {
-			break
-		}
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (GreaterThan (FlagGT))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64FlagGT {
-			break
-		}
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (GreaterThan (InvertFlags x))
-	// cond:
-	// result: (LessThan x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpPPC64LessThan)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64LessEqual(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (LessEqual (FlagEQ))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64FlagEQ {
-			break
-		}
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (LessEqual (FlagLT))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64FlagLT {
-			break
-		}
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (LessEqual (FlagGT))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64FlagGT {
-			break
-		}
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (LessEqual (InvertFlags x))
-	// cond:
-	// result: (GreaterEqual x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpPPC64GreaterEqual)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64LessThan(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (LessThan (FlagEQ))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64FlagEQ {
-			break
-		}
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (LessThan (FlagLT))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64FlagLT {
-			break
-		}
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (LessThan (FlagGT))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64FlagGT {
-			break
-		}
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (LessThan (InvertFlags x))
-	// cond:
-	// result: (GreaterThan x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpPPC64GreaterThan)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64MOVBZload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpPPC64MOVBZload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBZload [off1] {sym} (ADDconst [off2] x) mem)
-	// cond: is16Bit(off1+off2)
-	// result: (MOVBZload [off1+off2] {sym} x mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		x := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is16Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpPPC64MOVBZload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBZreg y:(ANDconst [c] _))
-	// cond: uint64(c) <= 0xFF
-	// result: y
-	for {
-		y := v.Args[0]
-		if y.Op != OpPPC64ANDconst {
-			break
-		}
-		c := y.AuxInt
-		if !(uint64(c) <= 0xFF) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (MOVBZreg y:(MOVBZreg _))
-	// cond:
-	// result: y
-	for {
-		y := v.Args[0]
-		if y.Op != OpPPC64MOVBZreg {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (MOVBZreg (MOVBreg x))
-	// cond:
-	// result: (MOVBZreg x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVBreg {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpPPC64MOVBZreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBZreg x:(MOVBZload _ _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpPPC64MOVBZload {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBZreg (MOVDconst [c]))
-	// cond:
-	// result: (MOVDconst [int64(uint8(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = int64(uint8(c))
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64MOVBreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBreg y:(ANDconst [c] _))
-	// cond: uint64(c) <= 0x7F
-	// result: y
-	for {
-		y := v.Args[0]
-		if y.Op != OpPPC64ANDconst {
-			break
-		}
-		c := y.AuxInt
-		if !(uint64(c) <= 0x7F) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (MOVBreg y:(MOVBreg _))
-	// cond:
-	// result: y
-	for {
-		y := v.Args[0]
-		if y.Op != OpPPC64MOVBreg {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (MOVBreg (MOVBZreg x))
-	// cond:
-	// result: (MOVBreg x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVBZreg {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpPPC64MOVBreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBreg (MOVDconst [c]))
-	// cond:
-	// result: (MOVDconst [int64(int8(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = int64(int8(c))
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64MOVBstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem)
-	// cond: is16Bit(off1+off2)
-	// result: (MOVBstore [off1+off2] {sym} x val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		x := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is16Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpPPC64MOVBstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpPPC64MOVBstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem)
-	// cond: c == 0
-	// result: (MOVBstorezero [off] {sym} ptr mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		mem := v.Args[2]
-		if !(c == 0) {
-			break
-		}
-		v.reset(OpPPC64MOVBstorezero)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
-	// cond:
-	// result: (MOVBstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVBreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpPPC64MOVBstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVBZreg x) mem)
-	// cond:
-	// result: (MOVBstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVBZreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpPPC64MOVBstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64MOVBstorezero(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem)
-	// cond: is16Bit(off1+off2)
-	// result: (MOVBstorezero [off1+off2] {sym} x mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		x := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is16Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpPPC64MOVBstorezero)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} x) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		x := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpPPC64MOVBstorezero)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64MOVDload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpPPC64MOVDload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVDload [off1] {sym} (ADDconst [off2] x) mem)
-	// cond: is16Bit(off1+off2)
-	// result: (MOVDload [off1+off2] {sym} x mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		x := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is16Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpPPC64MOVDload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64MOVDstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem)
-	// cond: is16Bit(off1+off2)
-	// result: (MOVDstore [off1+off2] {sym} x val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		x := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is16Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpPPC64MOVDstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpPPC64MOVDstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem)
-	// cond: c == 0
-	// result: (MOVDstorezero [off] {sym} ptr mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		mem := v.Args[2]
-		if !(c == 0) {
-			break
-		}
-		v.reset(OpPPC64MOVDstorezero)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem)
-	// cond: is16Bit(off1+off2)
-	// result: (MOVDstorezero [off1+off2] {sym} x mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		x := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is16Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpPPC64MOVDstorezero)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} x) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		x := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpPPC64MOVDstorezero)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64MOVHZload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHZload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpPPC64MOVHZload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHZload [off1] {sym} (ADDconst [off2] x) mem)
-	// cond: is16Bit(off1+off2)
-	// result: (MOVHZload [off1+off2] {sym} x mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		x := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is16Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpPPC64MOVHZload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHZreg y:(ANDconst [c] _))
-	// cond: uint64(c) <= 0xFFFF
-	// result: y
-	for {
-		y := v.Args[0]
-		if y.Op != OpPPC64ANDconst {
-			break
-		}
-		c := y.AuxInt
-		if !(uint64(c) <= 0xFFFF) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (MOVHZreg y:(MOVHZreg _))
-	// cond:
-	// result: y
-	for {
-		y := v.Args[0]
-		if y.Op != OpPPC64MOVHZreg {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (MOVHZreg y:(MOVBZreg _))
-	// cond:
-	// result: y
-	for {
-		y := v.Args[0]
-		if y.Op != OpPPC64MOVBZreg {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (MOVHZreg y:(MOVHreg x))
-	// cond:
-	// result: (MOVHZreg x)
-	for {
-		y := v.Args[0]
-		if y.Op != OpPPC64MOVHreg {
-			break
-		}
-		x := y.Args[0]
-		v.reset(OpPPC64MOVHZreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHZreg x:(MOVHZload _ _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpPPC64MOVHZload {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHZreg (MOVDconst [c]))
-	// cond:
-	// result: (MOVDconst [int64(uint16(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = int64(uint16(c))
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64MOVHload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpPPC64MOVHload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHload [off1] {sym} (ADDconst [off2] x) mem)
-	// cond: is16Bit(off1+off2)
-	// result: (MOVHload [off1+off2] {sym} x mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		x := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is16Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpPPC64MOVHload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64MOVHreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHreg y:(ANDconst [c] _))
-	// cond: uint64(c) <= 0x7FFF
-	// result: y
-	for {
-		y := v.Args[0]
-		if y.Op != OpPPC64ANDconst {
-			break
-		}
-		c := y.AuxInt
-		if !(uint64(c) <= 0x7FFF) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (MOVHreg y:(MOVHreg _))
-	// cond:
-	// result: y
-	for {
-		y := v.Args[0]
-		if y.Op != OpPPC64MOVHreg {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (MOVHreg y:(MOVBreg _))
-	// cond:
-	// result: y
-	for {
-		y := v.Args[0]
-		if y.Op != OpPPC64MOVBreg {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (MOVHreg y:(MOVHZreg x))
-	// cond:
-	// result: (MOVHreg x)
-	for {
-		y := v.Args[0]
-		if y.Op != OpPPC64MOVHZreg {
-			break
-		}
-		x := y.Args[0]
-		v.reset(OpPPC64MOVHreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg x:(MOVHload _ _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpPPC64MOVHload {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg (MOVDconst [c]))
-	// cond:
-	// result: (MOVDconst [int64(int16(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = int64(int16(c))
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64MOVHstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem)
-	// cond: is16Bit(off1+off2)
-	// result: (MOVHstore [off1+off2] {sym} x val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		x := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is16Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpPPC64MOVHstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpPPC64MOVHstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem)
-	// cond: c == 0
-	// result: (MOVHstorezero [off] {sym} ptr mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		mem := v.Args[2]
-		if !(c == 0) {
-			break
-		}
-		v.reset(OpPPC64MOVHstorezero)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
-	// cond:
-	// result: (MOVHstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVHreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpPPC64MOVHstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [off] {sym} ptr (MOVHZreg x) mem)
-	// cond:
-	// result: (MOVHstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVHZreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpPPC64MOVHstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64MOVHstorezero(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem)
-	// cond: is16Bit(off1+off2)
-	// result: (MOVHstorezero [off1+off2] {sym} x mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		x := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is16Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpPPC64MOVHstorezero)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} x) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		x := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpPPC64MOVHstorezero)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64MOVWZload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWZload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpPPC64MOVWZload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWZload [off1] {sym} (ADDconst [off2] x) mem)
-	// cond: is16Bit(off1+off2)
-	// result: (MOVWZload [off1+off2] {sym} x mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		x := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is16Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpPPC64MOVWZload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWZreg y:(ANDconst [c] _))
-	// cond: uint64(c) <= 0xFFFFFFFF
-	// result: y
-	for {
-		y := v.Args[0]
-		if y.Op != OpPPC64ANDconst {
-			break
-		}
-		c := y.AuxInt
-		if !(uint64(c) <= 0xFFFFFFFF) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (MOVWZreg y:(AND (MOVDconst [c]) _))
-	// cond: uint64(c) <= 0xFFFFFFFF
-	// result: y
-	for {
-		y := v.Args[0]
-		if y.Op != OpPPC64AND {
-			break
-		}
-		y_0 := y.Args[0]
-		if y_0.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := y_0.AuxInt
-		if !(uint64(c) <= 0xFFFFFFFF) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (MOVWZreg y:(MOVWZreg _))
-	// cond:
-	// result: y
-	for {
-		y := v.Args[0]
-		if y.Op != OpPPC64MOVWZreg {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (MOVWZreg y:(MOVHZreg _))
-	// cond:
-	// result: y
-	for {
-		y := v.Args[0]
-		if y.Op != OpPPC64MOVHZreg {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (MOVWZreg y:(MOVBZreg _))
-	// cond:
-	// result: y
-	for {
-		y := v.Args[0]
-		if y.Op != OpPPC64MOVBZreg {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (MOVWZreg y:(MOVWreg x))
-	// cond:
-	// result: (MOVWZreg x)
-	for {
-		y := v.Args[0]
-		if y.Op != OpPPC64MOVWreg {
-			break
-		}
-		x := y.Args[0]
-		v.reset(OpPPC64MOVWZreg)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64MOVWload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpPPC64MOVWload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWload [off1] {sym} (ADDconst [off2] x) mem)
-	// cond: is16Bit(off1+off2)
-	// result: (MOVWload [off1+off2] {sym} x mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		x := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is16Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpPPC64MOVWload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64MOVWreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWreg y:(ANDconst [c] _))
-	// cond: uint64(c) <= 0xFFFF
-	// result: y
-	for {
-		y := v.Args[0]
-		if y.Op != OpPPC64ANDconst {
-			break
-		}
-		c := y.AuxInt
-		if !(uint64(c) <= 0xFFFF) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (MOVWreg y:(AND (MOVDconst [c]) _))
-	// cond: uint64(c) <= 0x7FFFFFFF
-	// result: y
-	for {
-		y := v.Args[0]
-		if y.Op != OpPPC64AND {
-			break
-		}
-		y_0 := y.Args[0]
-		if y_0.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := y_0.AuxInt
-		if !(uint64(c) <= 0x7FFFFFFF) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (MOVWreg y:(MOVWreg _))
-	// cond:
-	// result: y
-	for {
-		y := v.Args[0]
-		if y.Op != OpPPC64MOVWreg {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (MOVWreg y:(MOVHreg _))
-	// cond:
-	// result: y
-	for {
-		y := v.Args[0]
-		if y.Op != OpPPC64MOVHreg {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (MOVWreg y:(MOVBreg _))
-	// cond:
-	// result: y
-	for {
-		y := v.Args[0]
-		if y.Op != OpPPC64MOVBreg {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (MOVWreg y:(MOVWZreg x))
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		y := v.Args[0]
-		if y.Op != OpPPC64MOVWZreg {
-			break
-		}
-		x := y.Args[0]
-		v.reset(OpPPC64MOVWreg)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64MOVWstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem)
-	// cond: is16Bit(off1+off2)
-	// result: (MOVWstore [off1+off2] {sym} x val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		x := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is16Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpPPC64MOVWstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpPPC64MOVWstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem)
-	// cond: c == 0
-	// result: (MOVWstorezero [off] {sym} ptr mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		mem := v.Args[2]
-		if !(c == 0) {
-			break
-		}
-		v.reset(OpPPC64MOVWstorezero)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
-	// cond:
-	// result: (MOVWstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVWreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpPPC64MOVWstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [off] {sym} ptr (MOVWZreg x) mem)
-	// cond:
-	// result: (MOVWstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVWZreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpPPC64MOVWstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64MOVWstorezero(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem)
-	// cond: is16Bit(off1+off2)
-	// result: (MOVWstorezero [off1+off2] {sym} x mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64ADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		x := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is16Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpPPC64MOVWstorezero)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} x) mem)
-	// cond: canMergeSym(sym1,sym2)
-	// result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		x := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpPPC64MOVWstorezero)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64MaskIfNotCarry(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MaskIfNotCarry (ADDconstForCarry [c] (ANDconst [d] _)))
-	// cond: c < 0 && d > 0 && c + d < 0
-	// result: (MOVDconst [-1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64ADDconstForCarry {
-			break
-		}
-		c := v_0.AuxInt
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpPPC64ANDconst {
-			break
-		}
-		d := v_0_0.AuxInt
-		if !(c < 0 && d > 0 && c+d < 0) {
-			break
-		}
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = -1
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64NotEqual(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NotEqual (FlagEQ))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64FlagEQ {
-			break
-		}
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (NotEqual (FlagLT))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64FlagLT {
-			break
-		}
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (NotEqual (FlagGT))
-	// cond:
-	// result: (MOVDconst [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64FlagGT {
-			break
-		}
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (NotEqual (InvertFlags x))
-	// cond:
-	// result: (NotEqual x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64InvertFlags {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpPPC64NotEqual)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64OR(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (OR (MOVDconst [c]) (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [c|d])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = c | d
-		return true
-	}
-	// match: (OR x (MOVDconst [c]))
-	// cond: isU32Bit(c)
-	// result: (ORconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(isU32Bit(c)) {
-			break
-		}
-		v.reset(OpPPC64ORconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (OR (MOVDconst [c]) x)
-	// cond: isU32Bit(c)
-	// result: (ORconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(isU32Bit(c)) {
-			break
-		}
-		v.reset(OpPPC64ORconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64ORN(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ORN x (MOVDconst [-1]))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		if v_1.AuxInt != -1 {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64ORconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ORconst [c] (ORconst [d] x))
-	// cond:
-	// result: (ORconst [c|d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64ORconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpPPC64ORconst)
-		v.AuxInt = c | d
-		v.AddArg(x)
-		return true
-	}
-	// match: (ORconst [-1] _)
-	// cond:
-	// result: (MOVDconst [-1])
-	for {
-		if v.AuxInt != -1 {
-			break
-		}
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = -1
-		return true
-	}
-	// match: (ORconst [0] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64SUB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUB x (MOVDconst [c]))
-	// cond: is32Bit(-c)
-	// result: (ADDconst [-c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(is32Bit(-c)) {
-			break
-		}
-		v.reset(OpPPC64ADDconst)
-		v.AuxInt = -c
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64XOR(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XOR (MOVDconst [c]) (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [c^d])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = c ^ d
-		return true
-	}
-	// match: (XOR x (MOVDconst [c]))
-	// cond: isU32Bit(c)
-	// result: (XORconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(isU32Bit(c)) {
-			break
-		}
-		v.reset(OpPPC64XORconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (XOR (MOVDconst [c]) x)
-	// cond: isU32Bit(c)
-	// result: (XORconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(isU32Bit(c)) {
-			break
-		}
-		v.reset(OpPPC64XORconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpPPC64XORconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XORconst [c] (XORconst [d] x))
-	// cond:
-	// result: (XORconst [c^d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpPPC64XORconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpPPC64XORconst)
-		v.AuxInt = c ^ d
-		v.AddArg(x)
-		return true
-	}
-	// match: (XORconst [0] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpRsh16Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux16 x y)
-	// cond:
-	// result: (SRW  (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRW)
-		v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v1.AddArg(y)
-		v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v3.AuxInt = -16
-		v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh16Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux32 x (Const64 [c]))
-	// cond: uint32(c) < 16
-	// result: (SRWconst (ZeroExt16to32 x) [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 16) {
-			break
-		}
-		v.reset(OpPPC64SRWconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh16Ux32 x (MOVDconst [c]))
-	// cond: uint32(c) < 16
-	// result: (SRWconst (ZeroExt16to32 x) [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 16) {
-			break
-		}
-		v.reset(OpPPC64SRWconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh16Ux32 x y)
-	// cond:
-	// result: (SRW  (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRW)
-		v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v1.AddArg(y)
-		v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v3.AuxInt = -16
-		v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh16Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux64 x (Const64 [c]))
-	// cond: uint64(c) < 16
-	// result: (SRWconst (ZeroExt16to32 x) [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 16) {
-			break
-		}
-		v.reset(OpPPC64SRWconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh16Ux64 _ (Const64 [c]))
-	// cond: uint64(c) >= 16
-	// result: (MOVDconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 16) {
-			break
-		}
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Rsh16Ux64 x (MOVDconst [c]))
-	// cond: uint64(c) < 16
-	// result: (SRWconst (ZeroExt16to32 x) [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 16) {
-			break
-		}
-		v.reset(OpPPC64SRWconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh16Ux64 x y)
-	// cond:
-	// result: (SRW  (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRW)
-		v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v1.AddArg(y)
-		v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v3.AuxInt = -16
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh16Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux8 x y)
-	// cond:
-	// result: (SRW  (ZeroExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRW)
-		v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v1.AddArg(y)
-		v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v3.AuxInt = -16
-		v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh16x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x16 x y)
-	// cond:
-	// result: (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRAW)
-		v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v1.AddArg(y)
-		v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v3.AuxInt = -16
-		v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh16x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x32  x (Const64 [c]))
-	// cond: uint32(c) < 16
-	// result: (SRAWconst (SignExt16to32 x) [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 16) {
-			break
-		}
-		v.reset(OpPPC64SRAWconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh16x32  x (MOVDconst [c]))
-	// cond: uint32(c) < 16
-	// result: (SRAWconst (SignExt16to32 x) [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 16) {
-			break
-		}
-		v.reset(OpPPC64SRAWconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh16x32 x y)
-	// cond:
-	// result: (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRAW)
-		v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v1.AddArg(y)
-		v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v3.AuxInt = -16
-		v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh16x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x64  x (Const64 [c]))
-	// cond: uint64(c) < 16
-	// result: (SRAWconst (SignExt16to32 x) [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 16) {
-			break
-		}
-		v.reset(OpPPC64SRAWconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh16x64 x (Const64 [c]))
-	// cond: uint64(c) >= 16
-	// result: (SRAWconst (SignExt16to32 x) [63])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 16) {
-			break
-		}
-		v.reset(OpPPC64SRAWconst)
-		v.AuxInt = 63
-		v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh16x64  x (MOVDconst [c]))
-	// cond: uint64(c) < 16
-	// result: (SRAWconst (SignExt16to32 x) [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 16) {
-			break
-		}
-		v.reset(OpPPC64SRAWconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh16x64 x y)
-	// cond:
-	// result: (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRAW)
-		v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v1.AddArg(y)
-		v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v3.AuxInt = -16
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh16x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x8 x y)
-	// cond:
-	// result: (SRAW (SignExt16to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRAW)
-		v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v1.AddArg(y)
-		v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v3.AuxInt = -16
-		v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh32Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux16 x y)
-	// cond:
-	// result: (SRW x  (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRW)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -32
-		v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh32Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux32 x (Const64 [c]))
-	// cond: uint32(c) < 32
-	// result: (SRWconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 32) {
-			break
-		}
-		v.reset(OpPPC64SRWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh32Ux32 x (MOVDconst [c]))
-	// cond: uint32(c) < 32
-	// result: (SRWconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 32) {
-			break
-		}
-		v.reset(OpPPC64SRWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh32Ux32 x y)
-	// cond:
-	// result: (SRW x  (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRW)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -32
-		v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh32Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux64 x (Const64 [c]))
-	// cond: uint64(c) < 32
-	// result: (SRWconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 32) {
-			break
-		}
-		v.reset(OpPPC64SRWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh32Ux64 _ (Const64 [c]))
-	// cond: uint64(c) >= 32
-	// result: (MOVDconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 32) {
-			break
-		}
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Rsh32Ux64 x (MOVDconst [c]))
-	// cond: uint64(c) < 32
-	// result: (SRWconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 32) {
-			break
-		}
-		v.reset(OpPPC64SRWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh32Ux64 x y)
-	// cond:
-	// result: (SRW  x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRW)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh32Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux8 x y)
-	// cond:
-	// result: (SRW x  (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRW)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -32
-		v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh32x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x16 x y)
-	// cond:
-	// result: (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRAW)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -32
-		v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh32x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x32  x (Const64 [c]))
-	// cond: uint32(c) < 32
-	// result: (SRAWconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 32) {
-			break
-		}
-		v.reset(OpPPC64SRAWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh32x32  x (MOVDconst [c]))
-	// cond: uint32(c) < 32
-	// result: (SRAWconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 32) {
-			break
-		}
-		v.reset(OpPPC64SRAWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh32x32 x y)
-	// cond:
-	// result: (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRAW)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -32
-		v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh32x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x64  x (Const64 [c]))
-	// cond: uint64(c) < 32
-	// result: (SRAWconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 32) {
-			break
-		}
-		v.reset(OpPPC64SRAWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh32x64 x (Const64 [c]))
-	// cond: uint64(c) >= 32
-	// result: (SRAWconst x [63])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 32) {
-			break
-		}
-		v.reset(OpPPC64SRAWconst)
-		v.AuxInt = 63
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh32x64  x (MOVDconst [c]))
-	// cond: uint64(c) < 32
-	// result: (SRAWconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 32) {
-			break
-		}
-		v.reset(OpPPC64SRAWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh32x64 x y)
-	// cond:
-	// result: (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRAW)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -32
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh32x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x8 x y)
-	// cond:
-	// result: (SRAW x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRAW)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -32
-		v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh64Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64Ux16 x y)
-	// cond:
-	// result: (SRD x  (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRD)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -64
-		v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh64Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64Ux32 x (Const64 [c]))
-	// cond: uint32(c) < 64
-	// result: (SRDconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 64) {
-			break
-		}
-		v.reset(OpPPC64SRDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh64Ux32 x (MOVDconst [c]))
-	// cond: uint32(c) < 64
-	// result: (SRDconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 64) {
-			break
-		}
-		v.reset(OpPPC64SRDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh64Ux32 x y)
-	// cond:
-	// result: (SRD x  (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRD)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -64
-		v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh64Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64Ux64 x (Const64 [c]))
-	// cond: uint64(c) < 64
-	// result: (SRDconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 64) {
-			break
-		}
-		v.reset(OpPPC64SRDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh64Ux64 _ (Const64 [c]))
-	// cond: uint64(c) >= 64
-	// result: (MOVDconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 64) {
-			break
-		}
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Rsh64Ux64 x (MOVDconst [c]))
-	// cond: uint64(c) < 64
-	// result: (SRDconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 64) {
-			break
-		}
-		v.reset(OpPPC64SRDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh64Ux64 x y)
-	// cond:
-	// result: (SRD  x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRD)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -64
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh64Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64Ux8 x y)
-	// cond:
-	// result: (SRD x  (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRD)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -64
-		v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh64x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64x16 x y)
-	// cond:
-	// result: (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRAD)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -64
-		v3 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh64x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64x32  x (Const64 [c]))
-	// cond: uint32(c) < 64
-	// result: (SRADconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 64) {
-			break
-		}
-		v.reset(OpPPC64SRADconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh64x32  x (MOVDconst [c]))
-	// cond: uint32(c) < 64
-	// result: (SRADconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 64) {
-			break
-		}
-		v.reset(OpPPC64SRADconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh64x32 x y)
-	// cond:
-	// result: (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRAD)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -64
-		v3 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh64x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64x64  x (Const64 [c]))
-	// cond: uint64(c) < 64
-	// result: (SRADconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 64) {
-			break
-		}
-		v.reset(OpPPC64SRADconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh64x64 x (Const64 [c]))
-	// cond: uint64(c) >= 64
-	// result: (SRADconst x [63])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 64) {
-			break
-		}
-		v.reset(OpPPC64SRADconst)
-		v.AuxInt = 63
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh64x64  x (MOVDconst [c]))
-	// cond: uint64(c) < 64
-	// result: (SRADconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 64) {
-			break
-		}
-		v.reset(OpPPC64SRADconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh64x64 x y)
-	// cond:
-	// result: (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRAD)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -64
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh64x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64x8 x y)
-	// cond:
-	// result: (SRAD x (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRAD)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v2 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v2.AuxInt = -64
-		v3 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh8Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux16 x y)
-	// cond:
-	// result: (SRW  (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRW)
-		v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v1.AddArg(y)
-		v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v3.AuxInt = -8
-		v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh8Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux32  x (Const64 [c]))
-	// cond: uint32(c) < 8
-	// result: (SRWconst (ZeroExt8to32  x) [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 8) {
-			break
-		}
-		v.reset(OpPPC64SRWconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh8Ux32  x (MOVDconst [c]))
-	// cond: uint32(c) < 8
-	// result: (SRWconst (ZeroExt8to32  x) [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 8) {
-			break
-		}
-		v.reset(OpPPC64SRWconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh8Ux32 x y)
-	// cond:
-	// result: (SRW  (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRW)
-		v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v1.AddArg(y)
-		v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v3.AuxInt = -8
-		v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh8Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux64  x (Const64 [c]))
-	// cond: uint64(c) < 8
-	// result: (SRWconst (ZeroExt8to32  x) [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 8) {
-			break
-		}
-		v.reset(OpPPC64SRWconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh8Ux64  _ (Const64 [c]))
-	// cond: uint64(c) >= 8
-	// result: (MOVDconst [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 8) {
-			break
-		}
-		v.reset(OpPPC64MOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Rsh8Ux64  x (MOVDconst [c]))
-	// cond: uint64(c) < 8
-	// result: (SRWconst (ZeroExt8to32  x) [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 8) {
-			break
-		}
-		v.reset(OpPPC64SRWconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh8Ux64 x y)
-	// cond:
-	// result: (SRW  (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRW)
-		v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v1.AddArg(y)
-		v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v3.AuxInt = -8
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh8Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux8 x y)
-	// cond:
-	// result: (SRW  (ZeroExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRW)
-		v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v1.AddArg(y)
-		v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v3.AuxInt = -8
-		v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh8x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x16 x y)
-	// cond:
-	// result: (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRAW)
-		v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v1.AddArg(y)
-		v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v3.AuxInt = -8
-		v4 := b.NewValue0(v.Line, OpZeroExt16to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh8x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x32   x (Const64 [c]))
-	// cond: uint32(c) < 8
-	// result: (SRAWconst (SignExt8to32  x) [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 8) {
-			break
-		}
-		v.reset(OpPPC64SRAWconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh8x32   x (MOVDconst [c]))
-	// cond: uint32(c) < 8
-	// result: (SRAWconst (SignExt8to32  x) [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint32(c) < 8) {
-			break
-		}
-		v.reset(OpPPC64SRAWconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh8x32 x y)
-	// cond:
-	// result: (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRAW)
-		v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v1.AddArg(y)
-		v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v3.AuxInt = -8
-		v4 := b.NewValue0(v.Line, OpZeroExt32to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh8x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x64   x (Const64 [c]))
-	// cond: uint64(c) < 8
-	// result: (SRAWconst (SignExt8to32  x) [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 8) {
-			break
-		}
-		v.reset(OpPPC64SRAWconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh8x64  x (Const64 [c]))
-	// cond: uint64(c) >= 8
-	// result: (SRAWconst (SignExt8to32  x) [63])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 8) {
-			break
-		}
-		v.reset(OpPPC64SRAWconst)
-		v.AuxInt = 63
-		v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh8x64   x (MOVDconst [c]))
-	// cond: uint64(c) < 8
-	// result: (SRAWconst (SignExt8to32  x) [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpPPC64MOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) < 8) {
-			break
-		}
-		v.reset(OpPPC64SRAWconst)
-		v.AuxInt = c
-		v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh8x64 x y)
-	// cond:
-	// result: (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRAW)
-		v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v1.AddArg(y)
-		v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v3.AuxInt = -8
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValuePPC64_OpRsh8x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x8 x y)
-	// cond:
-	// result: (SRAW (SignExt8to32 x) (ORN y <config.fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SRAW)
-		v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpPPC64ORN, config.fe.TypeInt64())
-		v1.AddArg(y)
-		v2 := b.NewValue0(v.Line, OpPPC64MaskIfNotCarry, config.fe.TypeInt64())
-		v3 := b.NewValue0(v.Line, OpPPC64ADDconstForCarry, TypeFlags)
-		v3.AuxInt = -8
-		v4 := b.NewValue0(v.Line, OpZeroExt8to64, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValuePPC64_OpSignExt16to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt16to32 x)
-	// cond:
-	// result: (MOVHreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64MOVHreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpSignExt16to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt16to64 x)
-	// cond:
-	// result: (MOVHreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64MOVHreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpSignExt32to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt32to64 x)
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64MOVWreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpSignExt8to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt8to16  x)
-	// cond:
-	// result: (MOVBreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64MOVBreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpSignExt8to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt8to32  x)
-	// cond:
-	// result: (MOVBreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64MOVBreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpSignExt8to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt8to64  x)
-	// cond:
-	// result: (MOVBreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64MOVBreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpSlicemask(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Slicemask <t> x)
-	// cond:
-	// result: (XORconst [-1] (SRADconst <t> (ADDconst <t> x [-1]) [63]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v.reset(OpPPC64XORconst)
-		v.AuxInt = -1
-		v0 := b.NewValue0(v.Line, OpPPC64SRADconst, t)
-		v0.AuxInt = 63
-		v1 := b.NewValue0(v.Line, OpPPC64ADDconst, t)
-		v1.AuxInt = -1
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuePPC64_OpSqrt(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sqrt x)
-	// cond:
-	// result: (FSQRT x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64FSQRT)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpStaticCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (StaticCall [argwid] {target} mem)
-	// cond:
-	// result: (CALLstatic [argwid] {target} mem)
-	for {
-		argwid := v.AuxInt
-		target := v.Aux
-		mem := v.Args[0]
-		v.reset(OpPPC64CALLstatic)
-		v.AuxInt = argwid
-		v.Aux = target
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValuePPC64_OpStore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Store [8] ptr val mem)
-	// cond: is64BitFloat(val.Type)
-	// result: (FMOVDstore ptr val mem)
-	for {
-		if v.AuxInt != 8 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is64BitFloat(val.Type)) {
-			break
-		}
-		v.reset(OpPPC64FMOVDstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [8] ptr val mem)
-	// cond: is32BitFloat(val.Type)
-	// result: (FMOVDstore ptr val mem)
-	for {
-		if v.AuxInt != 8 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32BitFloat(val.Type)) {
-			break
-		}
-		v.reset(OpPPC64FMOVDstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [4] ptr val mem)
-	// cond: is32BitFloat(val.Type)
-	// result: (FMOVSstore ptr val mem)
-	for {
-		if v.AuxInt != 4 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32BitFloat(val.Type)) {
-			break
-		}
-		v.reset(OpPPC64FMOVSstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [8] ptr val mem)
-	// cond: (is64BitInt(val.Type) || isPtr(val.Type))
-	// result: (MOVDstore ptr val mem)
-	for {
-		if v.AuxInt != 8 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is64BitInt(val.Type) || isPtr(val.Type)) {
-			break
-		}
-		v.reset(OpPPC64MOVDstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [4] ptr val mem)
-	// cond: is32BitInt(val.Type)
-	// result: (MOVWstore ptr val mem)
-	for {
-		if v.AuxInt != 4 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32BitInt(val.Type)) {
-			break
-		}
-		v.reset(OpPPC64MOVWstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [2] ptr val mem)
-	// cond:
-	// result: (MOVHstore ptr val mem)
-	for {
-		if v.AuxInt != 2 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpPPC64MOVHstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [1] ptr val mem)
-	// cond:
-	// result: (MOVBstore ptr val mem)
-	for {
-		if v.AuxInt != 1 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpPPC64MOVBstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpSub16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub16  x y)
-	// cond:
-	// result: (SUB x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SUB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpSub32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub32  x y)
-	// cond:
-	// result: (SUB x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SUB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpSub32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub32F x y)
-	// cond:
-	// result: (FSUBS x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64FSUBS)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpSub64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub64  x y)
-	// cond:
-	// result: (SUB  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SUB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpSub64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub64F x y)
-	// cond:
-	// result: (FSUB x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64FSUB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpSub8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub8   x y)
-	// cond:
-	// result: (SUB x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SUB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpSubPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SubPtr x y)
-	// cond:
-	// result: (SUB  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64SUB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpTrunc16to8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc16to8  x)
-	// cond:
-	// result: (MOVBreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64MOVBreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpTrunc32to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc32to16 x)
-	// cond:
-	// result: (MOVHreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64MOVHreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpTrunc32to8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc32to8  x)
-	// cond:
-	// result: (MOVBreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64MOVBreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpTrunc64to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc64to16 x)
-	// cond:
-	// result: (MOVHreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64MOVHreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpTrunc64to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc64to32 x)
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64MOVWreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpTrunc64to8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc64to8  x)
-	// cond:
-	// result: (MOVBreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64MOVBreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpXor16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor16 x y)
-	// cond:
-	// result: (XOR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64XOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpXor32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor32 x y)
-	// cond:
-	// result: (XOR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64XOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpXor64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor64 x y)
-	// cond:
-	// result: (XOR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64XOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpXor8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor8  x y)
-	// cond:
-	// result: (XOR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpPPC64XOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValuePPC64_OpZero(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Zero [s] _ mem)
-	// cond: SizeAndAlign(s).Size() == 0
-	// result: mem
-	for {
-		s := v.AuxInt
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 0) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = mem.Type
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 1
-	// result: (MOVBstorezero destptr mem)
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 1) {
-			break
-		}
-		v.reset(OpPPC64MOVBstorezero)
-		v.AddArg(destptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0
-	// result: (MOVHstorezero destptr mem)
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) {
-			break
-		}
-		v.reset(OpPPC64MOVHstorezero)
-		v.AddArg(destptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 2
-	// result: (MOVBstorezero [1] destptr 		(MOVBstorezero [0] destptr mem))
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 2) {
-			break
-		}
-		v.reset(OpPPC64MOVBstorezero)
-		v.AuxInt = 1
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, OpPPC64MOVBstorezero, TypeMem)
-		v0.AuxInt = 0
-		v0.AddArg(destptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0
-	// result: (MOVWstorezero destptr mem)
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) {
-			break
-		}
-		v.reset(OpPPC64MOVWstorezero)
-		v.AddArg(destptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0
-	// result: (MOVHstorezero [2] destptr 		(MOVHstorezero [0] destptr mem))
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) {
-			break
-		}
-		v.reset(OpPPC64MOVHstorezero)
-		v.AuxInt = 2
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, OpPPC64MOVHstorezero, TypeMem)
-		v0.AuxInt = 0
-		v0.AddArg(destptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 4
-	// result: (MOVBstorezero [3] destptr 		(MOVBstorezero [2] destptr 			(MOVBstorezero [1] destptr 				(MOVBstorezero [0] destptr mem))))
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 4) {
-			break
-		}
-		v.reset(OpPPC64MOVBstorezero)
-		v.AuxInt = 3
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, OpPPC64MOVBstorezero, TypeMem)
-		v0.AuxInt = 2
-		v0.AddArg(destptr)
-		v1 := b.NewValue0(v.Line, OpPPC64MOVBstorezero, TypeMem)
-		v1.AuxInt = 1
-		v1.AddArg(destptr)
-		v2 := b.NewValue0(v.Line, OpPPC64MOVBstorezero, TypeMem)
-		v2.AuxInt = 0
-		v2.AddArg(destptr)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0
-	// result: (MOVDstorezero [0] destptr mem)
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%8 == 0) {
-			break
-		}
-		v.reset(OpPPC64MOVDstorezero)
-		v.AuxInt = 0
-		v.AddArg(destptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0
-	// result: (MOVWstorezero [4] destptr 		(MOVWstorezero [0] destptr mem))
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%4 == 0) {
-			break
-		}
-		v.reset(OpPPC64MOVWstorezero)
-		v.AuxInt = 4
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, OpPPC64MOVWstorezero, TypeMem)
-		v0.AuxInt = 0
-		v0.AddArg(destptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0
-	// result: (MOVHstorezero [6] destptr 		(MOVHstorezero [4] destptr 			(MOVHstorezero [2] destptr 				(MOVHstorezero [0] destptr mem))))
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 8 && SizeAndAlign(s).Align()%2 == 0) {
-			break
-		}
-		v.reset(OpPPC64MOVHstorezero)
-		v.AuxInt = 6
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, OpPPC64MOVHstorezero, TypeMem)
-		v0.AuxInt = 4
-		v0.AddArg(destptr)
-		v1 := b.NewValue0(v.Line, OpPPC64MOVHstorezero, TypeMem)
-		v1.AuxInt = 2
-		v1.AddArg(destptr)
-		v2 := b.NewValue0(v.Line, OpPPC64MOVHstorezero, TypeMem)
-		v2.AuxInt = 0
-		v2.AddArg(destptr)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 3
-	// result: (MOVBstorezero [2] destptr 		(MOVBstorezero [1] destptr 			(MOVBstorezero [0] destptr mem)))
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 3) {
-			break
-		}
-		v.reset(OpPPC64MOVBstorezero)
-		v.AuxInt = 2
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, OpPPC64MOVBstorezero, TypeMem)
-		v0.AuxInt = 1
-		v0.AddArg(destptr)
-		v1 := b.NewValue0(v.Line, OpPPC64MOVBstorezero, TypeMem)
-		v1.AuxInt = 0
-		v1.AddArg(destptr)
-		v1.AddArg(mem)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0
-	// result: (MOVDstorezero [8] destptr                 (MOVDstorezero [0] destptr mem))
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 16 && SizeAndAlign(s).Align()%8 == 0) {
-			break
-		}
-		v.reset(OpPPC64MOVDstorezero)
-		v.AuxInt = 8
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, OpPPC64MOVDstorezero, TypeMem)
-		v0.AuxInt = 0
-		v0.AddArg(destptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0
-	// result: (MOVDstorezero [16] destptr 		(MOVDstorezero [8] destptr 			(MOVDstorezero [0] destptr mem)))
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 24 && SizeAndAlign(s).Align()%8 == 0) {
-			break
-		}
-		v.reset(OpPPC64MOVDstorezero)
-		v.AuxInt = 16
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, OpPPC64MOVDstorezero, TypeMem)
-		v0.AuxInt = 8
-		v0.AddArg(destptr)
-		v1 := b.NewValue0(v.Line, OpPPC64MOVDstorezero, TypeMem)
-		v1.AuxInt = 0
-		v1.AddArg(destptr)
-		v1.AddArg(mem)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 32 && SizeAndAlign(s).Align()%8 == 0
-	// result: (MOVDstorezero [24] destptr 		(MOVDstorezero [16] destptr 			(MOVDstorezero [8] destptr 				(MOVDstorezero [0] destptr mem))))
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 32 && SizeAndAlign(s).Align()%8 == 0) {
-			break
-		}
-		v.reset(OpPPC64MOVDstorezero)
-		v.AuxInt = 24
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, OpPPC64MOVDstorezero, TypeMem)
-		v0.AuxInt = 16
-		v0.AddArg(destptr)
-		v1 := b.NewValue0(v.Line, OpPPC64MOVDstorezero, TypeMem)
-		v1.AuxInt = 8
-		v1.AddArg(destptr)
-		v2 := b.NewValue0(v.Line, OpPPC64MOVDstorezero, TypeMem)
-		v2.AuxInt = 0
-		v2.AddArg(destptr)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Zero [s] ptr mem)
-	// cond: (SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%8 != 0
-	// result: (LoweredZero [SizeAndAlign(s).Align()] 		ptr 		(ADDconst <ptr.Type> ptr [SizeAndAlign(s).Size()-moveSize(SizeAndAlign(s).Align(), config)]) 		mem)
-	for {
-		s := v.AuxInt
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !((SizeAndAlign(s).Size() > 512 || config.noDuffDevice) || SizeAndAlign(s).Align()%8 != 0) {
-			break
-		}
-		v.reset(OpPPC64LoweredZero)
-		v.AuxInt = SizeAndAlign(s).Align()
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpPPC64ADDconst, ptr.Type)
-		v0.AuxInt = SizeAndAlign(s).Size() - moveSize(SizeAndAlign(s).Align(), config)
-		v0.AddArg(ptr)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValuePPC64_OpZeroExt16to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt16to32 x)
-	// cond:
-	// result: (MOVHZreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64MOVHZreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpZeroExt16to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt16to64 x)
-	// cond:
-	// result: (MOVHZreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64MOVHZreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpZeroExt32to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt32to64 x)
-	// cond:
-	// result: (MOVWZreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64MOVWZreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpZeroExt8to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt8to16  x)
-	// cond:
-	// result: (MOVBZreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64MOVBZreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpZeroExt8to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt8to32  x)
-	// cond:
-	// result: (MOVBZreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64MOVBZreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuePPC64_OpZeroExt8to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt8to64  x)
-	// cond:
-	// result: (MOVBZreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpPPC64MOVBZreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteBlockPPC64(b *Block, config *Config) bool {
-	switch b.Kind {
-	case BlockPPC64EQ:
-		// match: (EQ (CMPconst [0] (ANDconst [c] x)) yes no)
-		// cond:
-		// result: (EQ (ANDCCconst [c] x) yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64CMPconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpPPC64ANDconst {
-				break
-			}
-			c := v_0.AuxInt
-			x := v_0.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64EQ
-			v0 := b.NewValue0(v.Line, OpPPC64ANDCCconst, TypeFlags)
-			v0.AuxInt = c
-			v0.AddArg(x)
-			b.SetControl(v0)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (CMPWconst [0] (ANDconst [c] x)) yes no)
-		// cond:
-		// result: (EQ (ANDCCconst [c] x) yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64CMPWconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpPPC64ANDconst {
-				break
-			}
-			c := v_0.AuxInt
-			x := v_0.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64EQ
-			v0 := b.NewValue0(v.Line, OpPPC64ANDCCconst, TypeFlags)
-			v0.AuxInt = c
-			v0.AddArg(x)
-			b.SetControl(v0)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (FlagEQ) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (FlagLT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64FlagLT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (EQ (FlagGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64FlagGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (EQ (InvertFlags cmp) yes no)
-		// cond:
-		// result: (EQ cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64EQ
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockPPC64GE:
-		// match: (GE (FlagEQ) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (GE (FlagLT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64FlagLT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (GE (FlagGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64FlagGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (GE (InvertFlags cmp) yes no)
-		// cond:
-		// result: (LE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64LE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockPPC64GT:
-		// match: (GT (FlagEQ) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (GT (FlagLT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64FlagLT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (GT (FlagGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64FlagGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (GT (InvertFlags cmp) yes no)
-		// cond:
-		// result: (LT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64LT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockIf:
-		// match: (If (Equal cc) yes no)
-		// cond:
-		// result: (EQ cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64Equal {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64EQ
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (NotEqual cc) yes no)
-		// cond:
-		// result: (NE cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64NotEqual {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64NE
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (LessThan cc) yes no)
-		// cond:
-		// result: (LT cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64LessThan {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64LT
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (LessEqual cc) yes no)
-		// cond:
-		// result: (LE cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64LessEqual {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64LE
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (GreaterThan cc) yes no)
-		// cond:
-		// result: (GT cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64GreaterThan {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64GT
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (GreaterEqual cc) yes no)
-		// cond:
-		// result: (GE cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64GreaterEqual {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64GE
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (FLessThan cc) yes no)
-		// cond:
-		// result: (FLT cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64FLessThan {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64FLT
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (FLessEqual cc) yes no)
-		// cond:
-		// result: (FLE cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64FLessEqual {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64FLE
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (FGreaterThan cc) yes no)
-		// cond:
-		// result: (FGT cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64FGreaterThan {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64FGT
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (FGreaterEqual cc) yes no)
-		// cond:
-		// result: (FGE cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64FGreaterEqual {
-				break
-			}
-			cc := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64FGE
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If cond yes no)
-		// cond:
-		// result: (NE (CMPWconst [0] cond) yes no)
-		for {
-			v := b.Control
-			_ = v
-			cond := b.Control
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64NE
-			v0 := b.NewValue0(v.Line, OpPPC64CMPWconst, TypeFlags)
-			v0.AuxInt = 0
-			v0.AddArg(cond)
-			b.SetControl(v0)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockPPC64LE:
-		// match: (LE (FlagEQ) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LE (FlagLT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64FlagLT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LE (FlagGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64FlagGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (LE (InvertFlags cmp) yes no)
-		// cond:
-		// result: (GE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64GE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockPPC64LT:
-		// match: (LT (FlagEQ) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (LT (FlagLT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64FlagLT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LT (FlagGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64FlagGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (LT (InvertFlags cmp) yes no)
-		// cond:
-		// result: (GT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64GT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockPPC64NE:
-		// match: (NE (CMPWconst [0] (Equal cc)) yes no)
-		// cond:
-		// result: (EQ cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64CMPWconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpPPC64Equal {
-				break
-			}
-			cc := v_0.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64EQ
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (CMPWconst [0] (NotEqual cc)) yes no)
-		// cond:
-		// result: (NE cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64CMPWconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpPPC64NotEqual {
-				break
-			}
-			cc := v_0.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64NE
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (CMPWconst [0] (LessThan cc)) yes no)
-		// cond:
-		// result: (LT cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64CMPWconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpPPC64LessThan {
-				break
-			}
-			cc := v_0.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64LT
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (CMPWconst [0] (LessEqual cc)) yes no)
-		// cond:
-		// result: (LE cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64CMPWconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpPPC64LessEqual {
-				break
-			}
-			cc := v_0.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64LE
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (CMPWconst [0] (GreaterThan cc)) yes no)
-		// cond:
-		// result: (GT cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64CMPWconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpPPC64GreaterThan {
-				break
-			}
-			cc := v_0.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64GT
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (CMPWconst [0] (GreaterEqual cc)) yes no)
-		// cond:
-		// result: (GE cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64CMPWconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpPPC64GreaterEqual {
-				break
-			}
-			cc := v_0.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64GE
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (CMPWconst [0] (FLessThan cc)) yes no)
-		// cond:
-		// result: (FLT cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64CMPWconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpPPC64FLessThan {
-				break
-			}
-			cc := v_0.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64FLT
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (CMPWconst [0] (FLessEqual cc)) yes no)
-		// cond:
-		// result: (FLE cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64CMPWconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpPPC64FLessEqual {
-				break
-			}
-			cc := v_0.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64FLE
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (CMPWconst [0] (FGreaterThan cc)) yes no)
-		// cond:
-		// result: (FGT cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64CMPWconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpPPC64FGreaterThan {
-				break
-			}
-			cc := v_0.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64FGT
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (CMPWconst [0] (FGreaterEqual cc)) yes no)
-		// cond:
-		// result: (FGE cc yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64CMPWconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpPPC64FGreaterEqual {
-				break
-			}
-			cc := v_0.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64FGE
-			b.SetControl(cc)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (CMPconst [0] (ANDconst [c] x)) yes no)
-		// cond:
-		// result: (NE (ANDCCconst [c] x) yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64CMPconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpPPC64ANDconst {
-				break
-			}
-			c := v_0.AuxInt
-			x := v_0.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64NE
-			v0 := b.NewValue0(v.Line, OpPPC64ANDCCconst, TypeFlags)
-			v0.AuxInt = c
-			v0.AddArg(x)
-			b.SetControl(v0)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (CMPWconst [0] (ANDconst [c] x)) yes no)
-		// cond:
-		// result: (NE (ANDCCconst [c] x) yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64CMPWconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpPPC64ANDconst {
-				break
-			}
-			c := v_0.AuxInt
-			x := v_0.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64NE
-			v0 := b.NewValue0(v.Line, OpPPC64ANDCCconst, TypeFlags)
-			v0.AuxInt = c
-			v0.AddArg(x)
-			b.SetControl(v0)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (FlagEQ) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64FlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (NE (FlagLT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64FlagLT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (FlagGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64FlagGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (InvertFlags cmp) yes no)
-		// cond:
-		// result: (NE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpPPC64InvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockPPC64NE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-	}
-	return false
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewriteS390X.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewriteS390X.go
deleted file mode 100644
index 2d43eaa..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewriteS390X.go
+++ /dev/null
@@ -1,18697 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/rewriteS390X.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/rewriteS390X.go:1
-// autogenerated from gen/S390X.rules: do not edit!
-// generated with: cd gen; go run *.go
-
-package ssa
-
-import "math"
-
-var _ = math.MinInt8 // in case not otherwise used
-func rewriteValueS390X(v *Value, config *Config) bool {
-	switch v.Op {
-	case OpAdd16:
-		return rewriteValueS390X_OpAdd16(v, config)
-	case OpAdd32:
-		return rewriteValueS390X_OpAdd32(v, config)
-	case OpAdd32F:
-		return rewriteValueS390X_OpAdd32F(v, config)
-	case OpAdd64:
-		return rewriteValueS390X_OpAdd64(v, config)
-	case OpAdd64F:
-		return rewriteValueS390X_OpAdd64F(v, config)
-	case OpAdd8:
-		return rewriteValueS390X_OpAdd8(v, config)
-	case OpAddPtr:
-		return rewriteValueS390X_OpAddPtr(v, config)
-	case OpAddr:
-		return rewriteValueS390X_OpAddr(v, config)
-	case OpAnd16:
-		return rewriteValueS390X_OpAnd16(v, config)
-	case OpAnd32:
-		return rewriteValueS390X_OpAnd32(v, config)
-	case OpAnd64:
-		return rewriteValueS390X_OpAnd64(v, config)
-	case OpAnd8:
-		return rewriteValueS390X_OpAnd8(v, config)
-	case OpAndB:
-		return rewriteValueS390X_OpAndB(v, config)
-	case OpAtomicAdd32:
-		return rewriteValueS390X_OpAtomicAdd32(v, config)
-	case OpAtomicAdd64:
-		return rewriteValueS390X_OpAtomicAdd64(v, config)
-	case OpAtomicCompareAndSwap32:
-		return rewriteValueS390X_OpAtomicCompareAndSwap32(v, config)
-	case OpAtomicCompareAndSwap64:
-		return rewriteValueS390X_OpAtomicCompareAndSwap64(v, config)
-	case OpAtomicExchange32:
-		return rewriteValueS390X_OpAtomicExchange32(v, config)
-	case OpAtomicExchange64:
-		return rewriteValueS390X_OpAtomicExchange64(v, config)
-	case OpAtomicLoad32:
-		return rewriteValueS390X_OpAtomicLoad32(v, config)
-	case OpAtomicLoad64:
-		return rewriteValueS390X_OpAtomicLoad64(v, config)
-	case OpAtomicLoadPtr:
-		return rewriteValueS390X_OpAtomicLoadPtr(v, config)
-	case OpAtomicStore32:
-		return rewriteValueS390X_OpAtomicStore32(v, config)
-	case OpAtomicStore64:
-		return rewriteValueS390X_OpAtomicStore64(v, config)
-	case OpAtomicStorePtrNoWB:
-		return rewriteValueS390X_OpAtomicStorePtrNoWB(v, config)
-	case OpAvg64u:
-		return rewriteValueS390X_OpAvg64u(v, config)
-	case OpBswap32:
-		return rewriteValueS390X_OpBswap32(v, config)
-	case OpBswap64:
-		return rewriteValueS390X_OpBswap64(v, config)
-	case OpClosureCall:
-		return rewriteValueS390X_OpClosureCall(v, config)
-	case OpCom16:
-		return rewriteValueS390X_OpCom16(v, config)
-	case OpCom32:
-		return rewriteValueS390X_OpCom32(v, config)
-	case OpCom64:
-		return rewriteValueS390X_OpCom64(v, config)
-	case OpCom8:
-		return rewriteValueS390X_OpCom8(v, config)
-	case OpConst16:
-		return rewriteValueS390X_OpConst16(v, config)
-	case OpConst32:
-		return rewriteValueS390X_OpConst32(v, config)
-	case OpConst32F:
-		return rewriteValueS390X_OpConst32F(v, config)
-	case OpConst64:
-		return rewriteValueS390X_OpConst64(v, config)
-	case OpConst64F:
-		return rewriteValueS390X_OpConst64F(v, config)
-	case OpConst8:
-		return rewriteValueS390X_OpConst8(v, config)
-	case OpConstBool:
-		return rewriteValueS390X_OpConstBool(v, config)
-	case OpConstNil:
-		return rewriteValueS390X_OpConstNil(v, config)
-	case OpConvert:
-		return rewriteValueS390X_OpConvert(v, config)
-	case OpCtz32:
-		return rewriteValueS390X_OpCtz32(v, config)
-	case OpCtz64:
-		return rewriteValueS390X_OpCtz64(v, config)
-	case OpCvt32Fto32:
-		return rewriteValueS390X_OpCvt32Fto32(v, config)
-	case OpCvt32Fto64:
-		return rewriteValueS390X_OpCvt32Fto64(v, config)
-	case OpCvt32Fto64F:
-		return rewriteValueS390X_OpCvt32Fto64F(v, config)
-	case OpCvt32to32F:
-		return rewriteValueS390X_OpCvt32to32F(v, config)
-	case OpCvt32to64F:
-		return rewriteValueS390X_OpCvt32to64F(v, config)
-	case OpCvt64Fto32:
-		return rewriteValueS390X_OpCvt64Fto32(v, config)
-	case OpCvt64Fto32F:
-		return rewriteValueS390X_OpCvt64Fto32F(v, config)
-	case OpCvt64Fto64:
-		return rewriteValueS390X_OpCvt64Fto64(v, config)
-	case OpCvt64to32F:
-		return rewriteValueS390X_OpCvt64to32F(v, config)
-	case OpCvt64to64F:
-		return rewriteValueS390X_OpCvt64to64F(v, config)
-	case OpDeferCall:
-		return rewriteValueS390X_OpDeferCall(v, config)
-	case OpDiv16:
-		return rewriteValueS390X_OpDiv16(v, config)
-	case OpDiv16u:
-		return rewriteValueS390X_OpDiv16u(v, config)
-	case OpDiv32:
-		return rewriteValueS390X_OpDiv32(v, config)
-	case OpDiv32F:
-		return rewriteValueS390X_OpDiv32F(v, config)
-	case OpDiv32u:
-		return rewriteValueS390X_OpDiv32u(v, config)
-	case OpDiv64:
-		return rewriteValueS390X_OpDiv64(v, config)
-	case OpDiv64F:
-		return rewriteValueS390X_OpDiv64F(v, config)
-	case OpDiv64u:
-		return rewriteValueS390X_OpDiv64u(v, config)
-	case OpDiv8:
-		return rewriteValueS390X_OpDiv8(v, config)
-	case OpDiv8u:
-		return rewriteValueS390X_OpDiv8u(v, config)
-	case OpEq16:
-		return rewriteValueS390X_OpEq16(v, config)
-	case OpEq32:
-		return rewriteValueS390X_OpEq32(v, config)
-	case OpEq32F:
-		return rewriteValueS390X_OpEq32F(v, config)
-	case OpEq64:
-		return rewriteValueS390X_OpEq64(v, config)
-	case OpEq64F:
-		return rewriteValueS390X_OpEq64F(v, config)
-	case OpEq8:
-		return rewriteValueS390X_OpEq8(v, config)
-	case OpEqB:
-		return rewriteValueS390X_OpEqB(v, config)
-	case OpEqPtr:
-		return rewriteValueS390X_OpEqPtr(v, config)
-	case OpGeq16:
-		return rewriteValueS390X_OpGeq16(v, config)
-	case OpGeq16U:
-		return rewriteValueS390X_OpGeq16U(v, config)
-	case OpGeq32:
-		return rewriteValueS390X_OpGeq32(v, config)
-	case OpGeq32F:
-		return rewriteValueS390X_OpGeq32F(v, config)
-	case OpGeq32U:
-		return rewriteValueS390X_OpGeq32U(v, config)
-	case OpGeq64:
-		return rewriteValueS390X_OpGeq64(v, config)
-	case OpGeq64F:
-		return rewriteValueS390X_OpGeq64F(v, config)
-	case OpGeq64U:
-		return rewriteValueS390X_OpGeq64U(v, config)
-	case OpGeq8:
-		return rewriteValueS390X_OpGeq8(v, config)
-	case OpGeq8U:
-		return rewriteValueS390X_OpGeq8U(v, config)
-	case OpGetClosurePtr:
-		return rewriteValueS390X_OpGetClosurePtr(v, config)
-	case OpGetG:
-		return rewriteValueS390X_OpGetG(v, config)
-	case OpGoCall:
-		return rewriteValueS390X_OpGoCall(v, config)
-	case OpGreater16:
-		return rewriteValueS390X_OpGreater16(v, config)
-	case OpGreater16U:
-		return rewriteValueS390X_OpGreater16U(v, config)
-	case OpGreater32:
-		return rewriteValueS390X_OpGreater32(v, config)
-	case OpGreater32F:
-		return rewriteValueS390X_OpGreater32F(v, config)
-	case OpGreater32U:
-		return rewriteValueS390X_OpGreater32U(v, config)
-	case OpGreater64:
-		return rewriteValueS390X_OpGreater64(v, config)
-	case OpGreater64F:
-		return rewriteValueS390X_OpGreater64F(v, config)
-	case OpGreater64U:
-		return rewriteValueS390X_OpGreater64U(v, config)
-	case OpGreater8:
-		return rewriteValueS390X_OpGreater8(v, config)
-	case OpGreater8U:
-		return rewriteValueS390X_OpGreater8U(v, config)
-	case OpHmul16:
-		return rewriteValueS390X_OpHmul16(v, config)
-	case OpHmul16u:
-		return rewriteValueS390X_OpHmul16u(v, config)
-	case OpHmul32:
-		return rewriteValueS390X_OpHmul32(v, config)
-	case OpHmul32u:
-		return rewriteValueS390X_OpHmul32u(v, config)
-	case OpHmul64:
-		return rewriteValueS390X_OpHmul64(v, config)
-	case OpHmul64u:
-		return rewriteValueS390X_OpHmul64u(v, config)
-	case OpHmul8:
-		return rewriteValueS390X_OpHmul8(v, config)
-	case OpHmul8u:
-		return rewriteValueS390X_OpHmul8u(v, config)
-	case OpITab:
-		return rewriteValueS390X_OpITab(v, config)
-	case OpInterCall:
-		return rewriteValueS390X_OpInterCall(v, config)
-	case OpIsInBounds:
-		return rewriteValueS390X_OpIsInBounds(v, config)
-	case OpIsNonNil:
-		return rewriteValueS390X_OpIsNonNil(v, config)
-	case OpIsSliceInBounds:
-		return rewriteValueS390X_OpIsSliceInBounds(v, config)
-	case OpLeq16:
-		return rewriteValueS390X_OpLeq16(v, config)
-	case OpLeq16U:
-		return rewriteValueS390X_OpLeq16U(v, config)
-	case OpLeq32:
-		return rewriteValueS390X_OpLeq32(v, config)
-	case OpLeq32F:
-		return rewriteValueS390X_OpLeq32F(v, config)
-	case OpLeq32U:
-		return rewriteValueS390X_OpLeq32U(v, config)
-	case OpLeq64:
-		return rewriteValueS390X_OpLeq64(v, config)
-	case OpLeq64F:
-		return rewriteValueS390X_OpLeq64F(v, config)
-	case OpLeq64U:
-		return rewriteValueS390X_OpLeq64U(v, config)
-	case OpLeq8:
-		return rewriteValueS390X_OpLeq8(v, config)
-	case OpLeq8U:
-		return rewriteValueS390X_OpLeq8U(v, config)
-	case OpLess16:
-		return rewriteValueS390X_OpLess16(v, config)
-	case OpLess16U:
-		return rewriteValueS390X_OpLess16U(v, config)
-	case OpLess32:
-		return rewriteValueS390X_OpLess32(v, config)
-	case OpLess32F:
-		return rewriteValueS390X_OpLess32F(v, config)
-	case OpLess32U:
-		return rewriteValueS390X_OpLess32U(v, config)
-	case OpLess64:
-		return rewriteValueS390X_OpLess64(v, config)
-	case OpLess64F:
-		return rewriteValueS390X_OpLess64F(v, config)
-	case OpLess64U:
-		return rewriteValueS390X_OpLess64U(v, config)
-	case OpLess8:
-		return rewriteValueS390X_OpLess8(v, config)
-	case OpLess8U:
-		return rewriteValueS390X_OpLess8U(v, config)
-	case OpLoad:
-		return rewriteValueS390X_OpLoad(v, config)
-	case OpLrot32:
-		return rewriteValueS390X_OpLrot32(v, config)
-	case OpLrot64:
-		return rewriteValueS390X_OpLrot64(v, config)
-	case OpLsh16x16:
-		return rewriteValueS390X_OpLsh16x16(v, config)
-	case OpLsh16x32:
-		return rewriteValueS390X_OpLsh16x32(v, config)
-	case OpLsh16x64:
-		return rewriteValueS390X_OpLsh16x64(v, config)
-	case OpLsh16x8:
-		return rewriteValueS390X_OpLsh16x8(v, config)
-	case OpLsh32x16:
-		return rewriteValueS390X_OpLsh32x16(v, config)
-	case OpLsh32x32:
-		return rewriteValueS390X_OpLsh32x32(v, config)
-	case OpLsh32x64:
-		return rewriteValueS390X_OpLsh32x64(v, config)
-	case OpLsh32x8:
-		return rewriteValueS390X_OpLsh32x8(v, config)
-	case OpLsh64x16:
-		return rewriteValueS390X_OpLsh64x16(v, config)
-	case OpLsh64x32:
-		return rewriteValueS390X_OpLsh64x32(v, config)
-	case OpLsh64x64:
-		return rewriteValueS390X_OpLsh64x64(v, config)
-	case OpLsh64x8:
-		return rewriteValueS390X_OpLsh64x8(v, config)
-	case OpLsh8x16:
-		return rewriteValueS390X_OpLsh8x16(v, config)
-	case OpLsh8x32:
-		return rewriteValueS390X_OpLsh8x32(v, config)
-	case OpLsh8x64:
-		return rewriteValueS390X_OpLsh8x64(v, config)
-	case OpLsh8x8:
-		return rewriteValueS390X_OpLsh8x8(v, config)
-	case OpMod16:
-		return rewriteValueS390X_OpMod16(v, config)
-	case OpMod16u:
-		return rewriteValueS390X_OpMod16u(v, config)
-	case OpMod32:
-		return rewriteValueS390X_OpMod32(v, config)
-	case OpMod32u:
-		return rewriteValueS390X_OpMod32u(v, config)
-	case OpMod64:
-		return rewriteValueS390X_OpMod64(v, config)
-	case OpMod64u:
-		return rewriteValueS390X_OpMod64u(v, config)
-	case OpMod8:
-		return rewriteValueS390X_OpMod8(v, config)
-	case OpMod8u:
-		return rewriteValueS390X_OpMod8u(v, config)
-	case OpMove:
-		return rewriteValueS390X_OpMove(v, config)
-	case OpMul16:
-		return rewriteValueS390X_OpMul16(v, config)
-	case OpMul32:
-		return rewriteValueS390X_OpMul32(v, config)
-	case OpMul32F:
-		return rewriteValueS390X_OpMul32F(v, config)
-	case OpMul64:
-		return rewriteValueS390X_OpMul64(v, config)
-	case OpMul64F:
-		return rewriteValueS390X_OpMul64F(v, config)
-	case OpMul8:
-		return rewriteValueS390X_OpMul8(v, config)
-	case OpNeg16:
-		return rewriteValueS390X_OpNeg16(v, config)
-	case OpNeg32:
-		return rewriteValueS390X_OpNeg32(v, config)
-	case OpNeg32F:
-		return rewriteValueS390X_OpNeg32F(v, config)
-	case OpNeg64:
-		return rewriteValueS390X_OpNeg64(v, config)
-	case OpNeg64F:
-		return rewriteValueS390X_OpNeg64F(v, config)
-	case OpNeg8:
-		return rewriteValueS390X_OpNeg8(v, config)
-	case OpNeq16:
-		return rewriteValueS390X_OpNeq16(v, config)
-	case OpNeq32:
-		return rewriteValueS390X_OpNeq32(v, config)
-	case OpNeq32F:
-		return rewriteValueS390X_OpNeq32F(v, config)
-	case OpNeq64:
-		return rewriteValueS390X_OpNeq64(v, config)
-	case OpNeq64F:
-		return rewriteValueS390X_OpNeq64F(v, config)
-	case OpNeq8:
-		return rewriteValueS390X_OpNeq8(v, config)
-	case OpNeqB:
-		return rewriteValueS390X_OpNeqB(v, config)
-	case OpNeqPtr:
-		return rewriteValueS390X_OpNeqPtr(v, config)
-	case OpNilCheck:
-		return rewriteValueS390X_OpNilCheck(v, config)
-	case OpNot:
-		return rewriteValueS390X_OpNot(v, config)
-	case OpOffPtr:
-		return rewriteValueS390X_OpOffPtr(v, config)
-	case OpOr16:
-		return rewriteValueS390X_OpOr16(v, config)
-	case OpOr32:
-		return rewriteValueS390X_OpOr32(v, config)
-	case OpOr64:
-		return rewriteValueS390X_OpOr64(v, config)
-	case OpOr8:
-		return rewriteValueS390X_OpOr8(v, config)
-	case OpOrB:
-		return rewriteValueS390X_OpOrB(v, config)
-	case OpRsh16Ux16:
-		return rewriteValueS390X_OpRsh16Ux16(v, config)
-	case OpRsh16Ux32:
-		return rewriteValueS390X_OpRsh16Ux32(v, config)
-	case OpRsh16Ux64:
-		return rewriteValueS390X_OpRsh16Ux64(v, config)
-	case OpRsh16Ux8:
-		return rewriteValueS390X_OpRsh16Ux8(v, config)
-	case OpRsh16x16:
-		return rewriteValueS390X_OpRsh16x16(v, config)
-	case OpRsh16x32:
-		return rewriteValueS390X_OpRsh16x32(v, config)
-	case OpRsh16x64:
-		return rewriteValueS390X_OpRsh16x64(v, config)
-	case OpRsh16x8:
-		return rewriteValueS390X_OpRsh16x8(v, config)
-	case OpRsh32Ux16:
-		return rewriteValueS390X_OpRsh32Ux16(v, config)
-	case OpRsh32Ux32:
-		return rewriteValueS390X_OpRsh32Ux32(v, config)
-	case OpRsh32Ux64:
-		return rewriteValueS390X_OpRsh32Ux64(v, config)
-	case OpRsh32Ux8:
-		return rewriteValueS390X_OpRsh32Ux8(v, config)
-	case OpRsh32x16:
-		return rewriteValueS390X_OpRsh32x16(v, config)
-	case OpRsh32x32:
-		return rewriteValueS390X_OpRsh32x32(v, config)
-	case OpRsh32x64:
-		return rewriteValueS390X_OpRsh32x64(v, config)
-	case OpRsh32x8:
-		return rewriteValueS390X_OpRsh32x8(v, config)
-	case OpRsh64Ux16:
-		return rewriteValueS390X_OpRsh64Ux16(v, config)
-	case OpRsh64Ux32:
-		return rewriteValueS390X_OpRsh64Ux32(v, config)
-	case OpRsh64Ux64:
-		return rewriteValueS390X_OpRsh64Ux64(v, config)
-	case OpRsh64Ux8:
-		return rewriteValueS390X_OpRsh64Ux8(v, config)
-	case OpRsh64x16:
-		return rewriteValueS390X_OpRsh64x16(v, config)
-	case OpRsh64x32:
-		return rewriteValueS390X_OpRsh64x32(v, config)
-	case OpRsh64x64:
-		return rewriteValueS390X_OpRsh64x64(v, config)
-	case OpRsh64x8:
-		return rewriteValueS390X_OpRsh64x8(v, config)
-	case OpRsh8Ux16:
-		return rewriteValueS390X_OpRsh8Ux16(v, config)
-	case OpRsh8Ux32:
-		return rewriteValueS390X_OpRsh8Ux32(v, config)
-	case OpRsh8Ux64:
-		return rewriteValueS390X_OpRsh8Ux64(v, config)
-	case OpRsh8Ux8:
-		return rewriteValueS390X_OpRsh8Ux8(v, config)
-	case OpRsh8x16:
-		return rewriteValueS390X_OpRsh8x16(v, config)
-	case OpRsh8x32:
-		return rewriteValueS390X_OpRsh8x32(v, config)
-	case OpRsh8x64:
-		return rewriteValueS390X_OpRsh8x64(v, config)
-	case OpRsh8x8:
-		return rewriteValueS390X_OpRsh8x8(v, config)
-	case OpS390XADD:
-		return rewriteValueS390X_OpS390XADD(v, config)
-	case OpS390XADDW:
-		return rewriteValueS390X_OpS390XADDW(v, config)
-	case OpS390XADDWconst:
-		return rewriteValueS390X_OpS390XADDWconst(v, config)
-	case OpS390XADDconst:
-		return rewriteValueS390X_OpS390XADDconst(v, config)
-	case OpS390XAND:
-		return rewriteValueS390X_OpS390XAND(v, config)
-	case OpS390XANDW:
-		return rewriteValueS390X_OpS390XANDW(v, config)
-	case OpS390XANDWconst:
-		return rewriteValueS390X_OpS390XANDWconst(v, config)
-	case OpS390XANDconst:
-		return rewriteValueS390X_OpS390XANDconst(v, config)
-	case OpS390XCMP:
-		return rewriteValueS390X_OpS390XCMP(v, config)
-	case OpS390XCMPU:
-		return rewriteValueS390X_OpS390XCMPU(v, config)
-	case OpS390XCMPUconst:
-		return rewriteValueS390X_OpS390XCMPUconst(v, config)
-	case OpS390XCMPW:
-		return rewriteValueS390X_OpS390XCMPW(v, config)
-	case OpS390XCMPWU:
-		return rewriteValueS390X_OpS390XCMPWU(v, config)
-	case OpS390XCMPWUconst:
-		return rewriteValueS390X_OpS390XCMPWUconst(v, config)
-	case OpS390XCMPWconst:
-		return rewriteValueS390X_OpS390XCMPWconst(v, config)
-	case OpS390XCMPconst:
-		return rewriteValueS390X_OpS390XCMPconst(v, config)
-	case OpS390XFMOVDload:
-		return rewriteValueS390X_OpS390XFMOVDload(v, config)
-	case OpS390XFMOVDloadidx:
-		return rewriteValueS390X_OpS390XFMOVDloadidx(v, config)
-	case OpS390XFMOVDstore:
-		return rewriteValueS390X_OpS390XFMOVDstore(v, config)
-	case OpS390XFMOVDstoreidx:
-		return rewriteValueS390X_OpS390XFMOVDstoreidx(v, config)
-	case OpS390XFMOVSload:
-		return rewriteValueS390X_OpS390XFMOVSload(v, config)
-	case OpS390XFMOVSloadidx:
-		return rewriteValueS390X_OpS390XFMOVSloadidx(v, config)
-	case OpS390XFMOVSstore:
-		return rewriteValueS390X_OpS390XFMOVSstore(v, config)
-	case OpS390XFMOVSstoreidx:
-		return rewriteValueS390X_OpS390XFMOVSstoreidx(v, config)
-	case OpS390XMOVBZload:
-		return rewriteValueS390X_OpS390XMOVBZload(v, config)
-	case OpS390XMOVBZloadidx:
-		return rewriteValueS390X_OpS390XMOVBZloadidx(v, config)
-	case OpS390XMOVBZreg:
-		return rewriteValueS390X_OpS390XMOVBZreg(v, config)
-	case OpS390XMOVBload:
-		return rewriteValueS390X_OpS390XMOVBload(v, config)
-	case OpS390XMOVBreg:
-		return rewriteValueS390X_OpS390XMOVBreg(v, config)
-	case OpS390XMOVBstore:
-		return rewriteValueS390X_OpS390XMOVBstore(v, config)
-	case OpS390XMOVBstoreconst:
-		return rewriteValueS390X_OpS390XMOVBstoreconst(v, config)
-	case OpS390XMOVBstoreidx:
-		return rewriteValueS390X_OpS390XMOVBstoreidx(v, config)
-	case OpS390XMOVDEQ:
-		return rewriteValueS390X_OpS390XMOVDEQ(v, config)
-	case OpS390XMOVDGE:
-		return rewriteValueS390X_OpS390XMOVDGE(v, config)
-	case OpS390XMOVDGT:
-		return rewriteValueS390X_OpS390XMOVDGT(v, config)
-	case OpS390XMOVDLE:
-		return rewriteValueS390X_OpS390XMOVDLE(v, config)
-	case OpS390XMOVDLT:
-		return rewriteValueS390X_OpS390XMOVDLT(v, config)
-	case OpS390XMOVDNE:
-		return rewriteValueS390X_OpS390XMOVDNE(v, config)
-	case OpS390XMOVDaddridx:
-		return rewriteValueS390X_OpS390XMOVDaddridx(v, config)
-	case OpS390XMOVDload:
-		return rewriteValueS390X_OpS390XMOVDload(v, config)
-	case OpS390XMOVDloadidx:
-		return rewriteValueS390X_OpS390XMOVDloadidx(v, config)
-	case OpS390XMOVDstore:
-		return rewriteValueS390X_OpS390XMOVDstore(v, config)
-	case OpS390XMOVDstoreconst:
-		return rewriteValueS390X_OpS390XMOVDstoreconst(v, config)
-	case OpS390XMOVDstoreidx:
-		return rewriteValueS390X_OpS390XMOVDstoreidx(v, config)
-	case OpS390XMOVHBRstore:
-		return rewriteValueS390X_OpS390XMOVHBRstore(v, config)
-	case OpS390XMOVHBRstoreidx:
-		return rewriteValueS390X_OpS390XMOVHBRstoreidx(v, config)
-	case OpS390XMOVHZload:
-		return rewriteValueS390X_OpS390XMOVHZload(v, config)
-	case OpS390XMOVHZloadidx:
-		return rewriteValueS390X_OpS390XMOVHZloadidx(v, config)
-	case OpS390XMOVHZreg:
-		return rewriteValueS390X_OpS390XMOVHZreg(v, config)
-	case OpS390XMOVHload:
-		return rewriteValueS390X_OpS390XMOVHload(v, config)
-	case OpS390XMOVHreg:
-		return rewriteValueS390X_OpS390XMOVHreg(v, config)
-	case OpS390XMOVHstore:
-		return rewriteValueS390X_OpS390XMOVHstore(v, config)
-	case OpS390XMOVHstoreconst:
-		return rewriteValueS390X_OpS390XMOVHstoreconst(v, config)
-	case OpS390XMOVHstoreidx:
-		return rewriteValueS390X_OpS390XMOVHstoreidx(v, config)
-	case OpS390XMOVWBRstore:
-		return rewriteValueS390X_OpS390XMOVWBRstore(v, config)
-	case OpS390XMOVWBRstoreidx:
-		return rewriteValueS390X_OpS390XMOVWBRstoreidx(v, config)
-	case OpS390XMOVWZload:
-		return rewriteValueS390X_OpS390XMOVWZload(v, config)
-	case OpS390XMOVWZloadidx:
-		return rewriteValueS390X_OpS390XMOVWZloadidx(v, config)
-	case OpS390XMOVWZreg:
-		return rewriteValueS390X_OpS390XMOVWZreg(v, config)
-	case OpS390XMOVWload:
-		return rewriteValueS390X_OpS390XMOVWload(v, config)
-	case OpS390XMOVWreg:
-		return rewriteValueS390X_OpS390XMOVWreg(v, config)
-	case OpS390XMOVWstore:
-		return rewriteValueS390X_OpS390XMOVWstore(v, config)
-	case OpS390XMOVWstoreconst:
-		return rewriteValueS390X_OpS390XMOVWstoreconst(v, config)
-	case OpS390XMOVWstoreidx:
-		return rewriteValueS390X_OpS390XMOVWstoreidx(v, config)
-	case OpS390XMULLD:
-		return rewriteValueS390X_OpS390XMULLD(v, config)
-	case OpS390XMULLDconst:
-		return rewriteValueS390X_OpS390XMULLDconst(v, config)
-	case OpS390XMULLW:
-		return rewriteValueS390X_OpS390XMULLW(v, config)
-	case OpS390XMULLWconst:
-		return rewriteValueS390X_OpS390XMULLWconst(v, config)
-	case OpS390XNEG:
-		return rewriteValueS390X_OpS390XNEG(v, config)
-	case OpS390XNEGW:
-		return rewriteValueS390X_OpS390XNEGW(v, config)
-	case OpS390XNOT:
-		return rewriteValueS390X_OpS390XNOT(v, config)
-	case OpS390XNOTW:
-		return rewriteValueS390X_OpS390XNOTW(v, config)
-	case OpS390XOR:
-		return rewriteValueS390X_OpS390XOR(v, config)
-	case OpS390XORW:
-		return rewriteValueS390X_OpS390XORW(v, config)
-	case OpS390XORWconst:
-		return rewriteValueS390X_OpS390XORWconst(v, config)
-	case OpS390XORconst:
-		return rewriteValueS390X_OpS390XORconst(v, config)
-	case OpS390XSLD:
-		return rewriteValueS390X_OpS390XSLD(v, config)
-	case OpS390XSLW:
-		return rewriteValueS390X_OpS390XSLW(v, config)
-	case OpS390XSRAD:
-		return rewriteValueS390X_OpS390XSRAD(v, config)
-	case OpS390XSRADconst:
-		return rewriteValueS390X_OpS390XSRADconst(v, config)
-	case OpS390XSRAW:
-		return rewriteValueS390X_OpS390XSRAW(v, config)
-	case OpS390XSRAWconst:
-		return rewriteValueS390X_OpS390XSRAWconst(v, config)
-	case OpS390XSRD:
-		return rewriteValueS390X_OpS390XSRD(v, config)
-	case OpS390XSRW:
-		return rewriteValueS390X_OpS390XSRW(v, config)
-	case OpS390XSTM2:
-		return rewriteValueS390X_OpS390XSTM2(v, config)
-	case OpS390XSTMG2:
-		return rewriteValueS390X_OpS390XSTMG2(v, config)
-	case OpS390XSUB:
-		return rewriteValueS390X_OpS390XSUB(v, config)
-	case OpS390XSUBEWcarrymask:
-		return rewriteValueS390X_OpS390XSUBEWcarrymask(v, config)
-	case OpS390XSUBEcarrymask:
-		return rewriteValueS390X_OpS390XSUBEcarrymask(v, config)
-	case OpS390XSUBW:
-		return rewriteValueS390X_OpS390XSUBW(v, config)
-	case OpS390XSUBWconst:
-		return rewriteValueS390X_OpS390XSUBWconst(v, config)
-	case OpS390XSUBconst:
-		return rewriteValueS390X_OpS390XSUBconst(v, config)
-	case OpS390XXOR:
-		return rewriteValueS390X_OpS390XXOR(v, config)
-	case OpS390XXORW:
-		return rewriteValueS390X_OpS390XXORW(v, config)
-	case OpS390XXORWconst:
-		return rewriteValueS390X_OpS390XXORWconst(v, config)
-	case OpS390XXORconst:
-		return rewriteValueS390X_OpS390XXORconst(v, config)
-	case OpSelect0:
-		return rewriteValueS390X_OpSelect0(v, config)
-	case OpSelect1:
-		return rewriteValueS390X_OpSelect1(v, config)
-	case OpSignExt16to32:
-		return rewriteValueS390X_OpSignExt16to32(v, config)
-	case OpSignExt16to64:
-		return rewriteValueS390X_OpSignExt16to64(v, config)
-	case OpSignExt32to64:
-		return rewriteValueS390X_OpSignExt32to64(v, config)
-	case OpSignExt8to16:
-		return rewriteValueS390X_OpSignExt8to16(v, config)
-	case OpSignExt8to32:
-		return rewriteValueS390X_OpSignExt8to32(v, config)
-	case OpSignExt8to64:
-		return rewriteValueS390X_OpSignExt8to64(v, config)
-	case OpSlicemask:
-		return rewriteValueS390X_OpSlicemask(v, config)
-	case OpSqrt:
-		return rewriteValueS390X_OpSqrt(v, config)
-	case OpStaticCall:
-		return rewriteValueS390X_OpStaticCall(v, config)
-	case OpStore:
-		return rewriteValueS390X_OpStore(v, config)
-	case OpSub16:
-		return rewriteValueS390X_OpSub16(v, config)
-	case OpSub32:
-		return rewriteValueS390X_OpSub32(v, config)
-	case OpSub32F:
-		return rewriteValueS390X_OpSub32F(v, config)
-	case OpSub64:
-		return rewriteValueS390X_OpSub64(v, config)
-	case OpSub64F:
-		return rewriteValueS390X_OpSub64F(v, config)
-	case OpSub8:
-		return rewriteValueS390X_OpSub8(v, config)
-	case OpSubPtr:
-		return rewriteValueS390X_OpSubPtr(v, config)
-	case OpTrunc16to8:
-		return rewriteValueS390X_OpTrunc16to8(v, config)
-	case OpTrunc32to16:
-		return rewriteValueS390X_OpTrunc32to16(v, config)
-	case OpTrunc32to8:
-		return rewriteValueS390X_OpTrunc32to8(v, config)
-	case OpTrunc64to16:
-		return rewriteValueS390X_OpTrunc64to16(v, config)
-	case OpTrunc64to32:
-		return rewriteValueS390X_OpTrunc64to32(v, config)
-	case OpTrunc64to8:
-		return rewriteValueS390X_OpTrunc64to8(v, config)
-	case OpXor16:
-		return rewriteValueS390X_OpXor16(v, config)
-	case OpXor32:
-		return rewriteValueS390X_OpXor32(v, config)
-	case OpXor64:
-		return rewriteValueS390X_OpXor64(v, config)
-	case OpXor8:
-		return rewriteValueS390X_OpXor8(v, config)
-	case OpZero:
-		return rewriteValueS390X_OpZero(v, config)
-	case OpZeroExt16to32:
-		return rewriteValueS390X_OpZeroExt16to32(v, config)
-	case OpZeroExt16to64:
-		return rewriteValueS390X_OpZeroExt16to64(v, config)
-	case OpZeroExt32to64:
-		return rewriteValueS390X_OpZeroExt32to64(v, config)
-	case OpZeroExt8to16:
-		return rewriteValueS390X_OpZeroExt8to16(v, config)
-	case OpZeroExt8to32:
-		return rewriteValueS390X_OpZeroExt8to32(v, config)
-	case OpZeroExt8to64:
-		return rewriteValueS390X_OpZeroExt8to64(v, config)
-	}
-	return false
-}
-func rewriteValueS390X_OpAdd16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add16  x y)
-	// cond:
-	// result: (ADDW  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XADDW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpAdd32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add32  x y)
-	// cond:
-	// result: (ADDW  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XADDW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpAdd32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add32F x y)
-	// cond:
-	// result: (FADDS x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XFADDS)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpAdd64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add64  x y)
-	// cond:
-	// result: (ADD  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XADD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpAdd64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add64F x y)
-	// cond:
-	// result: (FADD x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XFADD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpAdd8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add8   x y)
-	// cond:
-	// result: (ADDW  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XADDW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpAddPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AddPtr x y)
-	// cond:
-	// result: (ADD  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XADD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpAddr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Addr {sym} base)
-	// cond:
-	// result: (MOVDaddr {sym} base)
-	for {
-		sym := v.Aux
-		base := v.Args[0]
-		v.reset(OpS390XMOVDaddr)
-		v.Aux = sym
-		v.AddArg(base)
-		return true
-	}
-}
-func rewriteValueS390X_OpAnd16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And16 x y)
-	// cond:
-	// result: (ANDW x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XANDW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpAnd32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And32 x y)
-	// cond:
-	// result: (ANDW x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XANDW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpAnd64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And64 x y)
-	// cond:
-	// result: (AND x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XAND)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpAnd8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And8  x y)
-	// cond:
-	// result: (ANDW x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XANDW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpAndB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AndB x y)
-	// cond:
-	// result: (ANDW x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XANDW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpAtomicAdd32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicAdd32 ptr val mem)
-	// cond:
-	// result: (AddTupleFirst32 (LAA ptr val mem) val)
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpS390XAddTupleFirst32)
-		v0 := b.NewValue0(v.Line, OpS390XLAA, MakeTuple(config.fe.TypeUInt32(), TypeMem))
-		v0.AddArg(ptr)
-		v0.AddArg(val)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(val)
-		return true
-	}
-}
-func rewriteValueS390X_OpAtomicAdd64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicAdd64 ptr val mem)
-	// cond:
-	// result: (AddTupleFirst64 (LAAG ptr val mem) val)
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpS390XAddTupleFirst64)
-		v0 := b.NewValue0(v.Line, OpS390XLAAG, MakeTuple(config.fe.TypeUInt64(), TypeMem))
-		v0.AddArg(ptr)
-		v0.AddArg(val)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(val)
-		return true
-	}
-}
-func rewriteValueS390X_OpAtomicCompareAndSwap32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicCompareAndSwap32 ptr old new_ mem)
-	// cond:
-	// result: (LoweredAtomicCas32 ptr old new_ mem)
-	for {
-		ptr := v.Args[0]
-		old := v.Args[1]
-		new_ := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpS390XLoweredAtomicCas32)
-		v.AddArg(ptr)
-		v.AddArg(old)
-		v.AddArg(new_)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueS390X_OpAtomicCompareAndSwap64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicCompareAndSwap64 ptr old new_ mem)
-	// cond:
-	// result: (LoweredAtomicCas64 ptr old new_ mem)
-	for {
-		ptr := v.Args[0]
-		old := v.Args[1]
-		new_ := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpS390XLoweredAtomicCas64)
-		v.AddArg(ptr)
-		v.AddArg(old)
-		v.AddArg(new_)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueS390X_OpAtomicExchange32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicExchange32 ptr val mem)
-	// cond:
-	// result: (LoweredAtomicExchange32 ptr val mem)
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpS390XLoweredAtomicExchange32)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueS390X_OpAtomicExchange64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicExchange64 ptr val mem)
-	// cond:
-	// result: (LoweredAtomicExchange64 ptr val mem)
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpS390XLoweredAtomicExchange64)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueS390X_OpAtomicLoad32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicLoad32 ptr mem)
-	// cond:
-	// result: (MOVWZatomicload ptr mem)
-	for {
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		v.reset(OpS390XMOVWZatomicload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueS390X_OpAtomicLoad64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicLoad64 ptr mem)
-	// cond:
-	// result: (MOVDatomicload ptr mem)
-	for {
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		v.reset(OpS390XMOVDatomicload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueS390X_OpAtomicLoadPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicLoadPtr ptr mem)
-	// cond:
-	// result: (MOVDatomicload ptr mem)
-	for {
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		v.reset(OpS390XMOVDatomicload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueS390X_OpAtomicStore32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicStore32 ptr val mem)
-	// cond:
-	// result: (MOVWatomicstore ptr val mem)
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpS390XMOVWatomicstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueS390X_OpAtomicStore64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicStore64 ptr val mem)
-	// cond:
-	// result: (MOVDatomicstore ptr val mem)
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpS390XMOVDatomicstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueS390X_OpAtomicStorePtrNoWB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AtomicStorePtrNoWB ptr val mem)
-	// cond:
-	// result: (MOVDatomicstore ptr val mem)
-	for {
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpS390XMOVDatomicstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueS390X_OpAvg64u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Avg64u <t> x y)
-	// cond:
-	// result: (ADD (ADD <t> (SRDconst <t> x [1]) (SRDconst <t> y [1])) (ANDconst <t> (AND <t> x y) [1]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XADD)
-		v0 := b.NewValue0(v.Line, OpS390XADD, t)
-		v1 := b.NewValue0(v.Line, OpS390XSRDconst, t)
-		v1.AuxInt = 1
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XSRDconst, t)
-		v2.AuxInt = 1
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpS390XANDconst, t)
-		v3.AuxInt = 1
-		v4 := b.NewValue0(v.Line, OpS390XAND, t)
-		v4.AddArg(x)
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValueS390X_OpBswap32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Bswap32 x)
-	// cond:
-	// result: (MOVWBR x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XMOVWBR)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpBswap64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Bswap64 x)
-	// cond:
-	// result: (MOVDBR x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XMOVDBR)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpClosureCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ClosureCall [argwid] entry closure mem)
-	// cond:
-	// result: (CALLclosure [argwid] entry closure mem)
-	for {
-		argwid := v.AuxInt
-		entry := v.Args[0]
-		closure := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpS390XCALLclosure)
-		v.AuxInt = argwid
-		v.AddArg(entry)
-		v.AddArg(closure)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueS390X_OpCom16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com16 x)
-	// cond:
-	// result: (NOTW x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XNOTW)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpCom32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com32 x)
-	// cond:
-	// result: (NOTW x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XNOTW)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpCom64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com64 x)
-	// cond:
-	// result: (NOT x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XNOT)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpCom8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com8  x)
-	// cond:
-	// result: (NOTW x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XNOTW)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpConst16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const16  [val])
-	// cond:
-	// result: (MOVDconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueS390X_OpConst32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const32  [val])
-	// cond:
-	// result: (MOVDconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueS390X_OpConst32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const32F [val])
-	// cond:
-	// result: (FMOVSconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpS390XFMOVSconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueS390X_OpConst64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const64  [val])
-	// cond:
-	// result: (MOVDconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueS390X_OpConst64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const64F [val])
-	// cond:
-	// result: (FMOVDconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpS390XFMOVDconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueS390X_OpConst8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const8   [val])
-	// cond:
-	// result: (MOVDconst [val])
-	for {
-		val := v.AuxInt
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = val
-		return true
-	}
-}
-func rewriteValueS390X_OpConstBool(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ConstBool [b])
-	// cond:
-	// result: (MOVDconst [b])
-	for {
-		b := v.AuxInt
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = b
-		return true
-	}
-}
-func rewriteValueS390X_OpConstNil(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ConstNil)
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-}
-func rewriteValueS390X_OpConvert(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Convert <t> x mem)
-	// cond:
-	// result: (MOVDconvert <t> x mem)
-	for {
-		t := v.Type
-		x := v.Args[0]
-		mem := v.Args[1]
-		v.reset(OpS390XMOVDconvert)
-		v.Type = t
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueS390X_OpCtz32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Ctz32 <t> x)
-	// cond:
-	// result: (SUB (MOVDconst [64]) (FLOGR (MOVWZreg (ANDW <t> (SUBWconst <t> [1] x) (NOTW <t> x)))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v.reset(OpS390XSUB)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 64
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XFLOGR, config.fe.TypeUInt64())
-		v2 := b.NewValue0(v.Line, OpS390XMOVWZreg, config.fe.TypeUInt64())
-		v3 := b.NewValue0(v.Line, OpS390XANDW, t)
-		v4 := b.NewValue0(v.Line, OpS390XSUBWconst, t)
-		v4.AuxInt = 1
-		v4.AddArg(x)
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpS390XNOTW, t)
-		v5.AddArg(x)
-		v3.AddArg(v5)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpCtz64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Ctz64 <t> x)
-	// cond:
-	// result: (SUB (MOVDconst [64]) (FLOGR (AND <t> (SUBconst <t> [1] x) (NOT <t> x))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v.reset(OpS390XSUB)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 64
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XFLOGR, config.fe.TypeUInt64())
-		v2 := b.NewValue0(v.Line, OpS390XAND, t)
-		v3 := b.NewValue0(v.Line, OpS390XSUBconst, t)
-		v3.AuxInt = 1
-		v3.AddArg(x)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpS390XNOT, t)
-		v4.AddArg(x)
-		v2.AddArg(v4)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpCvt32Fto32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32Fto32 x)
-	// cond:
-	// result: (CFEBRA x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XCFEBRA)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpCvt32Fto64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32Fto64 x)
-	// cond:
-	// result: (CGEBRA x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XCGEBRA)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpCvt32Fto64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32Fto64F x)
-	// cond:
-	// result: (LDEBR x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XLDEBR)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpCvt32to32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32to32F x)
-	// cond:
-	// result: (CEFBRA x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XCEFBRA)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpCvt32to64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32to64F x)
-	// cond:
-	// result: (CDFBRA x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XCDFBRA)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpCvt64Fto32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64Fto32 x)
-	// cond:
-	// result: (CFDBRA x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XCFDBRA)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpCvt64Fto32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64Fto32F x)
-	// cond:
-	// result: (LEDBR x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XLEDBR)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpCvt64Fto64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64Fto64 x)
-	// cond:
-	// result: (CGDBRA x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XCGDBRA)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpCvt64to32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64to32F x)
-	// cond:
-	// result: (CEGBRA x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XCEGBRA)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpCvt64to64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64to64F x)
-	// cond:
-	// result: (CDGBRA x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XCDGBRA)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpDeferCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (DeferCall [argwid] mem)
-	// cond:
-	// result: (CALLdefer [argwid] mem)
-	for {
-		argwid := v.AuxInt
-		mem := v.Args[0]
-		v.reset(OpS390XCALLdefer)
-		v.AuxInt = argwid
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueS390X_OpDiv16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div16  x y)
-	// cond:
-	// result: (DIVW  (MOVHreg x) (MOVHreg y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XDIVW)
-		v0 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpDiv16u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div16u x y)
-	// cond:
-	// result: (DIVWU (MOVHZreg x) (MOVHZreg y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XDIVWU)
-		v0 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpDiv32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div32  x y)
-	// cond:
-	// result: (DIVW  (MOVWreg x) y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XDIVW)
-		v0 := b.NewValue0(v.Line, OpS390XMOVWreg, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpDiv32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div32F x y)
-	// cond:
-	// result: (FDIVS x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XFDIVS)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpDiv32u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div32u x y)
-	// cond:
-	// result: (DIVWU (MOVWZreg x) y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XDIVWU)
-		v0 := b.NewValue0(v.Line, OpS390XMOVWZreg, config.fe.TypeUInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpDiv64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div64  x y)
-	// cond:
-	// result: (DIVD  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XDIVD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpDiv64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div64F x y)
-	// cond:
-	// result: (FDIV x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XFDIV)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpDiv64u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div64u x y)
-	// cond:
-	// result: (DIVDU x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XDIVDU)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpDiv8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div8   x y)
-	// cond:
-	// result: (DIVW  (MOVBreg x) (MOVBreg y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XDIVW)
-		v0 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpDiv8u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div8u  x y)
-	// cond:
-	// result: (DIVWU (MOVBZreg x) (MOVBZreg y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XDIVWU)
-		v0 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpEq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq16  x y)
-	// cond:
-	// result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDEQ)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
-		v3 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
-		v3.AddArg(x)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
-		v4.AddArg(y)
-		v2.AddArg(v4)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpEq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq32  x y)
-	// cond:
-	// result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDEQ)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMPW, TypeFlags)
-		v2.AddArg(x)
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpEq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq32F x y)
-	// cond:
-	// result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDEQ)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XFCMPS, TypeFlags)
-		v2.AddArg(x)
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpEq64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq64  x y)
-	// cond:
-	// result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDEQ)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
-		v2.AddArg(x)
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpEq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq64F x y)
-	// cond:
-	// result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDEQ)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XFCMP, TypeFlags)
-		v2.AddArg(x)
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpEq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq8   x y)
-	// cond:
-	// result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDEQ)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
-		v3 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
-		v3.AddArg(x)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
-		v4.AddArg(y)
-		v2.AddArg(v4)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpEqB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (EqB   x y)
-	// cond:
-	// result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDEQ)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
-		v3 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
-		v3.AddArg(x)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
-		v4.AddArg(y)
-		v2.AddArg(v4)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpEqPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (EqPtr x y)
-	// cond:
-	// result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDEQ)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
-		v2.AddArg(x)
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpGeq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq16  x y)
-	// cond:
-	// result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDGE)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
-		v3 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
-		v3.AddArg(x)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
-		v4.AddArg(y)
-		v2.AddArg(v4)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpGeq16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq16U x y)
-	// cond:
-	// result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDGE)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags)
-		v3 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v3.AddArg(x)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v2.AddArg(v4)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpGeq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq32  x y)
-	// cond:
-	// result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDGE)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMPW, TypeFlags)
-		v2.AddArg(x)
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpGeq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq32F x y)
-	// cond:
-	// result: (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDGEnoinv)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XFCMPS, TypeFlags)
-		v2.AddArg(x)
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpGeq32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq32U x y)
-	// cond:
-	// result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDGE)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMPWU, TypeFlags)
-		v2.AddArg(x)
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpGeq64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq64  x y)
-	// cond:
-	// result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDGE)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
-		v2.AddArg(x)
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpGeq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq64F x y)
-	// cond:
-	// result: (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDGEnoinv)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XFCMP, TypeFlags)
-		v2.AddArg(x)
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpGeq64U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq64U x y)
-	// cond:
-	// result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDGE)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags)
-		v2.AddArg(x)
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpGeq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq8   x y)
-	// cond:
-	// result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDGE)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
-		v3 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
-		v3.AddArg(x)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
-		v4.AddArg(y)
-		v2.AddArg(v4)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpGeq8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq8U  x y)
-	// cond:
-	// result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDGE)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags)
-		v3 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-		v3.AddArg(x)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v2.AddArg(v4)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpGetClosurePtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (GetClosurePtr)
-	// cond:
-	// result: (LoweredGetClosurePtr)
-	for {
-		v.reset(OpS390XLoweredGetClosurePtr)
-		return true
-	}
-}
-func rewriteValueS390X_OpGetG(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (GetG mem)
-	// cond:
-	// result: (LoweredGetG mem)
-	for {
-		mem := v.Args[0]
-		v.reset(OpS390XLoweredGetG)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueS390X_OpGoCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (GoCall [argwid] mem)
-	// cond:
-	// result: (CALLgo [argwid] mem)
-	for {
-		argwid := v.AuxInt
-		mem := v.Args[0]
-		v.reset(OpS390XCALLgo)
-		v.AuxInt = argwid
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueS390X_OpGreater16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater16  x y)
-	// cond:
-	// result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDGT)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
-		v3 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
-		v3.AddArg(x)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
-		v4.AddArg(y)
-		v2.AddArg(v4)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpGreater16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater16U x y)
-	// cond:
-	// result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDGT)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags)
-		v3 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v3.AddArg(x)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v2.AddArg(v4)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpGreater32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater32  x y)
-	// cond:
-	// result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDGT)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMPW, TypeFlags)
-		v2.AddArg(x)
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpGreater32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater32F x y)
-	// cond:
-	// result: (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDGTnoinv)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XFCMPS, TypeFlags)
-		v2.AddArg(x)
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpGreater32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater32U x y)
-	// cond:
-	// result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDGT)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMPWU, TypeFlags)
-		v2.AddArg(x)
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpGreater64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater64  x y)
-	// cond:
-	// result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDGT)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
-		v2.AddArg(x)
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpGreater64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater64F x y)
-	// cond:
-	// result: (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDGTnoinv)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XFCMP, TypeFlags)
-		v2.AddArg(x)
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpGreater64U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater64U x y)
-	// cond:
-	// result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDGT)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags)
-		v2.AddArg(x)
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpGreater8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater8   x y)
-	// cond:
-	// result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDGT)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
-		v3 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
-		v3.AddArg(x)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
-		v4.AddArg(y)
-		v2.AddArg(v4)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpGreater8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater8U  x y)
-	// cond:
-	// result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDGT)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags)
-		v3 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-		v3.AddArg(x)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v2.AddArg(v4)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpHmul16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul16  x y)
-	// cond:
-	// result: (SRDconst [16] (MULLW (MOVHreg x) (MOVHreg y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XSRDconst)
-		v.AuxInt = 16
-		v0 := b.NewValue0(v.Line, OpS390XMULLW, config.fe.TypeInt32())
-		v1 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueS390X_OpHmul16u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul16u x y)
-	// cond:
-	// result: (SRDconst [16] (MULLW (MOVHZreg x) (MOVHZreg y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XSRDconst)
-		v.AuxInt = 16
-		v0 := b.NewValue0(v.Line, OpS390XMULLW, config.fe.TypeInt32())
-		v1 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueS390X_OpHmul32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul32  x y)
-	// cond:
-	// result: (SRDconst [32] (MULLD (MOVWreg x) (MOVWreg y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XSRDconst)
-		v.AuxInt = 32
-		v0 := b.NewValue0(v.Line, OpS390XMULLD, config.fe.TypeInt64())
-		v1 := b.NewValue0(v.Line, OpS390XMOVWreg, config.fe.TypeInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XMOVWreg, config.fe.TypeInt64())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueS390X_OpHmul32u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul32u x y)
-	// cond:
-	// result: (SRDconst [32] (MULLD (MOVWZreg x) (MOVWZreg y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XSRDconst)
-		v.AuxInt = 32
-		v0 := b.NewValue0(v.Line, OpS390XMULLD, config.fe.TypeInt64())
-		v1 := b.NewValue0(v.Line, OpS390XMOVWZreg, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XMOVWZreg, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueS390X_OpHmul64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul64  x y)
-	// cond:
-	// result: (MULHD  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMULHD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpHmul64u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul64u x y)
-	// cond:
-	// result: (MULHDU x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMULHDU)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpHmul8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul8   x y)
-	// cond:
-	// result: (SRDconst [8] (MULLW (MOVBreg x) (MOVBreg y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XSRDconst)
-		v.AuxInt = 8
-		v0 := b.NewValue0(v.Line, OpS390XMULLW, config.fe.TypeInt32())
-		v1 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueS390X_OpHmul8u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Hmul8u  x y)
-	// cond:
-	// result: (SRDconst [8] (MULLW (MOVBZreg x) (MOVBZreg y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XSRDconst)
-		v.AuxInt = 8
-		v0 := b.NewValue0(v.Line, OpS390XMULLW, config.fe.TypeInt32())
-		v1 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueS390X_OpITab(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ITab (Load ptr mem))
-	// cond:
-	// result: (MOVDload ptr mem)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpLoad {
-			break
-		}
-		ptr := v_0.Args[0]
-		mem := v_0.Args[1]
-		v.reset(OpS390XMOVDload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpInterCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (InterCall [argwid] entry mem)
-	// cond:
-	// result: (CALLinter [argwid] entry mem)
-	for {
-		argwid := v.AuxInt
-		entry := v.Args[0]
-		mem := v.Args[1]
-		v.reset(OpS390XCALLinter)
-		v.AuxInt = argwid
-		v.AddArg(entry)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueS390X_OpIsInBounds(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (IsInBounds idx len)
-	// cond:
-	// result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
-	for {
-		idx := v.Args[0]
-		len := v.Args[1]
-		v.reset(OpS390XMOVDLT)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags)
-		v2.AddArg(idx)
-		v2.AddArg(len)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpIsNonNil(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (IsNonNil p)
-	// cond:
-	// result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMPconst p [0]))
-	for {
-		p := v.Args[0]
-		v.reset(OpS390XMOVDNE)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMPconst, TypeFlags)
-		v2.AuxInt = 0
-		v2.AddArg(p)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpIsSliceInBounds(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (IsSliceInBounds idx len)
-	// cond:
-	// result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
-	for {
-		idx := v.Args[0]
-		len := v.Args[1]
-		v.reset(OpS390XMOVDLE)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags)
-		v2.AddArg(idx)
-		v2.AddArg(len)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpLeq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq16  x y)
-	// cond:
-	// result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDLE)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
-		v3 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
-		v3.AddArg(x)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
-		v4.AddArg(y)
-		v2.AddArg(v4)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpLeq16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq16U x y)
-	// cond:
-	// result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDLE)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags)
-		v3 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v3.AddArg(x)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v2.AddArg(v4)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpLeq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq32  x y)
-	// cond:
-	// result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDLE)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMPW, TypeFlags)
-		v2.AddArg(x)
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpLeq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq32F x y)
-	// cond:
-	// result: (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDGEnoinv)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XFCMPS, TypeFlags)
-		v2.AddArg(y)
-		v2.AddArg(x)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpLeq32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq32U x y)
-	// cond:
-	// result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDLE)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMPWU, TypeFlags)
-		v2.AddArg(x)
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpLeq64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq64  x y)
-	// cond:
-	// result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDLE)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
-		v2.AddArg(x)
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpLeq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq64F x y)
-	// cond:
-	// result: (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDGEnoinv)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XFCMP, TypeFlags)
-		v2.AddArg(y)
-		v2.AddArg(x)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpLeq64U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq64U x y)
-	// cond:
-	// result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDLE)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags)
-		v2.AddArg(x)
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpLeq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq8   x y)
-	// cond:
-	// result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDLE)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
-		v3 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
-		v3.AddArg(x)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
-		v4.AddArg(y)
-		v2.AddArg(v4)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpLeq8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq8U  x y)
-	// cond:
-	// result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDLE)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags)
-		v3 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-		v3.AddArg(x)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v2.AddArg(v4)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpLess16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less16  x y)
-	// cond:
-	// result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDLT)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
-		v3 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
-		v3.AddArg(x)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
-		v4.AddArg(y)
-		v2.AddArg(v4)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpLess16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less16U x y)
-	// cond:
-	// result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVHZreg x) (MOVHZreg y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDLT)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags)
-		v3 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v3.AddArg(x)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v2.AddArg(v4)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpLess32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less32  x y)
-	// cond:
-	// result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDLT)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMPW, TypeFlags)
-		v2.AddArg(x)
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpLess32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less32F x y)
-	// cond:
-	// result: (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMPS y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDGTnoinv)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XFCMPS, TypeFlags)
-		v2.AddArg(y)
-		v2.AddArg(x)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpLess32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less32U x y)
-	// cond:
-	// result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDLT)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMPWU, TypeFlags)
-		v2.AddArg(x)
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpLess64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less64  x y)
-	// cond:
-	// result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDLT)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
-		v2.AddArg(x)
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpLess64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less64F x y)
-	// cond:
-	// result: (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) (FCMP y x))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDGTnoinv)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XFCMP, TypeFlags)
-		v2.AddArg(y)
-		v2.AddArg(x)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpLess64U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less64U x y)
-	// cond:
-	// result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDLT)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags)
-		v2.AddArg(x)
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpLess8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less8   x y)
-	// cond:
-	// result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDLT)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
-		v3 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
-		v3.AddArg(x)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
-		v4.AddArg(y)
-		v2.AddArg(v4)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpLess8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less8U  x y)
-	// cond:
-	// result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDLT)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMPU, TypeFlags)
-		v3 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-		v3.AddArg(x)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v2.AddArg(v4)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpLoad(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Load <t> ptr mem)
-	// cond: (is64BitInt(t) || isPtr(t))
-	// result: (MOVDload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is64BitInt(t) || isPtr(t)) {
-			break
-		}
-		v.reset(OpS390XMOVDload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: is32BitInt(t)
-	// result: (MOVWZload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is32BitInt(t)) {
-			break
-		}
-		v.reset(OpS390XMOVWZload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: is16BitInt(t)
-	// result: (MOVHZload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is16BitInt(t)) {
-			break
-		}
-		v.reset(OpS390XMOVHZload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: (t.IsBoolean() || is8BitInt(t))
-	// result: (MOVBZload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(t.IsBoolean() || is8BitInt(t)) {
-			break
-		}
-		v.reset(OpS390XMOVBZload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: is32BitFloat(t)
-	// result: (FMOVSload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is32BitFloat(t)) {
-			break
-		}
-		v.reset(OpS390XFMOVSload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: is64BitFloat(t)
-	// result: (FMOVDload ptr mem)
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is64BitFloat(t)) {
-			break
-		}
-		v.reset(OpS390XFMOVDload)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpLrot32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lrot32 <t> x [c])
-	// cond:
-	// result: (RLLconst <t> [c&31] x)
-	for {
-		t := v.Type
-		c := v.AuxInt
-		x := v.Args[0]
-		v.reset(OpS390XRLLconst)
-		v.Type = t
-		v.AuxInt = c & 31
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpLrot64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lrot64 <t> x [c])
-	// cond:
-	// result: (RLLGconst <t> [c&63] x)
-	for {
-		t := v.Type
-		c := v.AuxInt
-		x := v.Args[0]
-		v.reset(OpS390XRLLGconst)
-		v.Type = t
-		v.AuxInt = c & 63
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpLsh16x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x16 <t> x y)
-	// cond:
-	// result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [31])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XANDW)
-		v0 := b.NewValue0(v.Line, OpS390XSLW, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v2.AuxInt = 31
-		v3 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpLsh16x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x32 <t> x y)
-	// cond:
-	// result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst y [31])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XANDW)
-		v0 := b.NewValue0(v.Line, OpS390XSLW, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v2.AuxInt = 31
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpLsh16x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x64 <t> x y)
-	// cond:
-	// result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPUconst y [31])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XANDW)
-		v0 := b.NewValue0(v.Line, OpS390XSLW, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags)
-		v2.AuxInt = 31
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpLsh16x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x8  <t> x y)
-	// cond:
-	// result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [31])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XANDW)
-		v0 := b.NewValue0(v.Line, OpS390XSLW, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v2.AuxInt = 31
-		v3 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpLsh32x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x16 <t> x y)
-	// cond:
-	// result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [31])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XANDW)
-		v0 := b.NewValue0(v.Line, OpS390XSLW, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v2.AuxInt = 31
-		v3 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpLsh32x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x32 <t> x y)
-	// cond:
-	// result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst y [31])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XANDW)
-		v0 := b.NewValue0(v.Line, OpS390XSLW, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v2.AuxInt = 31
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpLsh32x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x64 <t> x y)
-	// cond:
-	// result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPUconst y [31])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XANDW)
-		v0 := b.NewValue0(v.Line, OpS390XSLW, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags)
-		v2.AuxInt = 31
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpLsh32x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x8  <t> x y)
-	// cond:
-	// result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [31])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XANDW)
-		v0 := b.NewValue0(v.Line, OpS390XSLW, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v2.AuxInt = 31
-		v3 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpLsh64x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh64x16 <t> x y)
-	// cond:
-	// result: (AND (SLD <t> x y) (SUBEcarrymask <t> (CMPWUconst (MOVHZreg y) [63])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XAND)
-		v0 := b.NewValue0(v.Line, OpS390XSLD, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v2.AuxInt = 63
-		v3 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpLsh64x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh64x32 <t> x y)
-	// cond:
-	// result: (AND (SLD <t> x y) (SUBEcarrymask <t> (CMPWUconst y [63])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XAND)
-		v0 := b.NewValue0(v.Line, OpS390XSLD, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v2.AuxInt = 63
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpLsh64x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh64x64 <t> x y)
-	// cond:
-	// result: (AND (SLD <t> x y) (SUBEcarrymask <t> (CMPUconst y [63])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XAND)
-		v0 := b.NewValue0(v.Line, OpS390XSLD, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags)
-		v2.AuxInt = 63
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpLsh64x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh64x8  <t> x y)
-	// cond:
-	// result: (AND (SLD <t> x y) (SUBEcarrymask <t> (CMPWUconst (MOVBZreg y) [63])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XAND)
-		v0 := b.NewValue0(v.Line, OpS390XSLD, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v2.AuxInt = 63
-		v3 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpLsh8x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x16 <t> x y)
-	// cond:
-	// result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [31])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XANDW)
-		v0 := b.NewValue0(v.Line, OpS390XSLW, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v2.AuxInt = 31
-		v3 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpLsh8x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x32 <t> x y)
-	// cond:
-	// result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst y [31])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XANDW)
-		v0 := b.NewValue0(v.Line, OpS390XSLW, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v2.AuxInt = 31
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpLsh8x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x64 <t> x y)
-	// cond:
-	// result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPUconst y [31])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XANDW)
-		v0 := b.NewValue0(v.Line, OpS390XSLW, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags)
-		v2.AuxInt = 31
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpLsh8x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x8  <t> x y)
-	// cond:
-	// result: (ANDW (SLW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [31])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XANDW)
-		v0 := b.NewValue0(v.Line, OpS390XSLW, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v2.AuxInt = 31
-		v3 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpMod16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod16  x y)
-	// cond:
-	// result: (MODW  (MOVHreg x) (MOVHreg y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMODW)
-		v0 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpMod16u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod16u x y)
-	// cond:
-	// result: (MODWU (MOVHZreg x) (MOVHZreg y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMODWU)
-		v0 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpMod32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod32  x y)
-	// cond:
-	// result: (MODW  (MOVWreg x) y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMODW)
-		v0 := b.NewValue0(v.Line, OpS390XMOVWreg, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpMod32u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod32u x y)
-	// cond:
-	// result: (MODWU (MOVWZreg x) y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMODWU)
-		v0 := b.NewValue0(v.Line, OpS390XMOVWZreg, config.fe.TypeUInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpMod64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod64  x y)
-	// cond:
-	// result: (MODD  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMODD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpMod64u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod64u x y)
-	// cond:
-	// result: (MODDU x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMODDU)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpMod8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod8   x y)
-	// cond:
-	// result: (MODW  (MOVBreg x) (MOVBreg y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMODW)
-		v0 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpMod8u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod8u  x y)
-	// cond:
-	// result: (MODWU (MOVBZreg x) (MOVBZreg y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMODWU)
-		v0 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpMove(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Move [s] _ _ mem)
-	// cond: SizeAndAlign(s).Size() == 0
-	// result: mem
-	for {
-		s := v.AuxInt
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 0) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = mem.Type
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 1
-	// result: (MOVBstore dst (MOVBZload src mem) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 1) {
-			break
-		}
-		v.reset(OpS390XMOVBstore)
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpS390XMOVBZload, config.fe.TypeUInt8())
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 2
-	// result: (MOVHstore dst (MOVHZload src mem) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 2) {
-			break
-		}
-		v.reset(OpS390XMOVHstore)
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpS390XMOVHZload, config.fe.TypeUInt16())
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 4
-	// result: (MOVWstore dst (MOVWZload src mem) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 4) {
-			break
-		}
-		v.reset(OpS390XMOVWstore)
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpS390XMOVWZload, config.fe.TypeUInt32())
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 8
-	// result: (MOVDstore dst (MOVDload src mem) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 8) {
-			break
-		}
-		v.reset(OpS390XMOVDstore)
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDload, config.fe.TypeUInt64())
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 16
-	// result: (MOVDstore [8] dst (MOVDload [8] src mem) 		(MOVDstore dst (MOVDload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 16) {
-			break
-		}
-		v.reset(OpS390XMOVDstore)
-		v.AuxInt = 8
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDload, config.fe.TypeUInt64())
-		v0.AuxInt = 8
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpS390XMOVDload, config.fe.TypeUInt64())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 24
-	// result: (MOVDstore [16] dst (MOVDload [16] src mem) 	        (MOVDstore [8] dst (MOVDload [8] src mem)                 (MOVDstore dst (MOVDload src mem) mem)))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 24) {
-			break
-		}
-		v.reset(OpS390XMOVDstore)
-		v.AuxInt = 16
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDload, config.fe.TypeUInt64())
-		v0.AuxInt = 16
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDstore, TypeMem)
-		v1.AuxInt = 8
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpS390XMOVDload, config.fe.TypeUInt64())
-		v2.AuxInt = 8
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpS390XMOVDstore, TypeMem)
-		v3.AddArg(dst)
-		v4 := b.NewValue0(v.Line, OpS390XMOVDload, config.fe.TypeUInt64())
-		v4.AddArg(src)
-		v4.AddArg(mem)
-		v3.AddArg(v4)
-		v3.AddArg(mem)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 3
-	// result: (MOVBstore [2] dst (MOVBZload [2] src mem) 		(MOVHstore dst (MOVHZload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 3) {
-			break
-		}
-		v.reset(OpS390XMOVBstore)
-		v.AuxInt = 2
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpS390XMOVBZload, config.fe.TypeUInt8())
-		v0.AuxInt = 2
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVHstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpS390XMOVHZload, config.fe.TypeUInt16())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 5
-	// result: (MOVBstore [4] dst (MOVBZload [4] src mem) 		(MOVWstore dst (MOVWZload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 5) {
-			break
-		}
-		v.reset(OpS390XMOVBstore)
-		v.AuxInt = 4
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpS390XMOVBZload, config.fe.TypeUInt8())
-		v0.AuxInt = 4
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVWstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpS390XMOVWZload, config.fe.TypeUInt32())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 6
-	// result: (MOVHstore [4] dst (MOVHZload [4] src mem) 		(MOVWstore dst (MOVWZload src mem) mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 6) {
-			break
-		}
-		v.reset(OpS390XMOVHstore)
-		v.AuxInt = 4
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpS390XMOVHZload, config.fe.TypeUInt16())
-		v0.AuxInt = 4
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVWstore, TypeMem)
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpS390XMOVWZload, config.fe.TypeUInt32())
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() == 7
-	// result: (MOVBstore [6] dst (MOVBZload [6] src mem) 		(MOVHstore [4] dst (MOVHZload [4] src mem) 			(MOVWstore dst (MOVWZload src mem) mem)))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() == 7) {
-			break
-		}
-		v.reset(OpS390XMOVBstore)
-		v.AuxInt = 6
-		v.AddArg(dst)
-		v0 := b.NewValue0(v.Line, OpS390XMOVBZload, config.fe.TypeUInt8())
-		v0.AuxInt = 6
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVHstore, TypeMem)
-		v1.AuxInt = 4
-		v1.AddArg(dst)
-		v2 := b.NewValue0(v.Line, OpS390XMOVHZload, config.fe.TypeUInt16())
-		v2.AuxInt = 4
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpS390XMOVWstore, TypeMem)
-		v3.AddArg(dst)
-		v4 := b.NewValue0(v.Line, OpS390XMOVWZload, config.fe.TypeUInt32())
-		v4.AddArg(src)
-		v4.AddArg(mem)
-		v3.AddArg(v4)
-		v3.AddArg(mem)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() > 0 && SizeAndAlign(s).Size() <= 256
-	// result: (MVC [makeValAndOff(SizeAndAlign(s).Size(), 0)] dst src mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() > 0 && SizeAndAlign(s).Size() <= 256) {
-			break
-		}
-		v.reset(OpS390XMVC)
-		v.AuxInt = makeValAndOff(SizeAndAlign(s).Size(), 0)
-		v.AddArg(dst)
-		v.AddArg(src)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() > 256 && SizeAndAlign(s).Size() <= 512
-	// result: (MVC [makeValAndOff(SizeAndAlign(s).Size()-256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() > 256 && SizeAndAlign(s).Size() <= 512) {
-			break
-		}
-		v.reset(OpS390XMVC)
-		v.AuxInt = makeValAndOff(SizeAndAlign(s).Size()-256, 256)
-		v.AddArg(dst)
-		v.AddArg(src)
-		v0 := b.NewValue0(v.Line, OpS390XMVC, TypeMem)
-		v0.AuxInt = makeValAndOff(256, 0)
-		v0.AddArg(dst)
-		v0.AddArg(src)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Size() <= 768
-	// result: (MVC [makeValAndOff(SizeAndAlign(s).Size()-512, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem)))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Size() <= 768) {
-			break
-		}
-		v.reset(OpS390XMVC)
-		v.AuxInt = makeValAndOff(SizeAndAlign(s).Size()-512, 512)
-		v.AddArg(dst)
-		v.AddArg(src)
-		v0 := b.NewValue0(v.Line, OpS390XMVC, TypeMem)
-		v0.AuxInt = makeValAndOff(256, 256)
-		v0.AddArg(dst)
-		v0.AddArg(src)
-		v1 := b.NewValue0(v.Line, OpS390XMVC, TypeMem)
-		v1.AuxInt = makeValAndOff(256, 0)
-		v1.AddArg(dst)
-		v1.AddArg(src)
-		v1.AddArg(mem)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() > 768 && SizeAndAlign(s).Size() <= 1024
-	// result: (MVC [makeValAndOff(SizeAndAlign(s).Size()-768, 768)] dst src (MVC [makeValAndOff(256, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))))
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() > 768 && SizeAndAlign(s).Size() <= 1024) {
-			break
-		}
-		v.reset(OpS390XMVC)
-		v.AuxInt = makeValAndOff(SizeAndAlign(s).Size()-768, 768)
-		v.AddArg(dst)
-		v.AddArg(src)
-		v0 := b.NewValue0(v.Line, OpS390XMVC, TypeMem)
-		v0.AuxInt = makeValAndOff(256, 512)
-		v0.AddArg(dst)
-		v0.AddArg(src)
-		v1 := b.NewValue0(v.Line, OpS390XMVC, TypeMem)
-		v1.AuxInt = makeValAndOff(256, 256)
-		v1.AddArg(dst)
-		v1.AddArg(src)
-		v2 := b.NewValue0(v.Line, OpS390XMVC, TypeMem)
-		v2.AuxInt = makeValAndOff(256, 0)
-		v2.AddArg(dst)
-		v2.AddArg(src)
-		v2.AddArg(mem)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Move [s] dst src mem)
-	// cond: SizeAndAlign(s).Size() > 1024
-	// result: (LoweredMove [SizeAndAlign(s).Size()%256] dst src (ADDconst <src.Type> src [(SizeAndAlign(s).Size()/256)*256]) mem)
-	for {
-		s := v.AuxInt
-		dst := v.Args[0]
-		src := v.Args[1]
-		mem := v.Args[2]
-		if !(SizeAndAlign(s).Size() > 1024) {
-			break
-		}
-		v.reset(OpS390XLoweredMove)
-		v.AuxInt = SizeAndAlign(s).Size() % 256
-		v.AddArg(dst)
-		v.AddArg(src)
-		v0 := b.NewValue0(v.Line, OpS390XADDconst, src.Type)
-		v0.AuxInt = (SizeAndAlign(s).Size() / 256) * 256
-		v0.AddArg(src)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpMul16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul16  x y)
-	// cond:
-	// result: (MULLW  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMULLW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpMul32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul32  x y)
-	// cond:
-	// result: (MULLW  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMULLW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpMul32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul32F x y)
-	// cond:
-	// result: (FMULS x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XFMULS)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpMul64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul64  x y)
-	// cond:
-	// result: (MULLD  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMULLD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpMul64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul64F x y)
-	// cond:
-	// result: (FMUL x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XFMUL)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpMul8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul8   x y)
-	// cond:
-	// result: (MULLW  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMULLW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpNeg16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg16  x)
-	// cond:
-	// result: (NEGW (MOVHreg x))
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XNEGW)
-		v0 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueS390X_OpNeg32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg32  x)
-	// cond:
-	// result: (NEGW x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XNEGW)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpNeg32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg32F x)
-	// cond:
-	// result: (FNEGS x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XFNEGS)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpNeg64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg64  x)
-	// cond:
-	// result: (NEG x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XNEG)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpNeg64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg64F x)
-	// cond:
-	// result: (FNEG x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XFNEG)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpNeg8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg8   x)
-	// cond:
-	// result: (NEGW (MOVBreg x))
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XNEGW)
-		v0 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueS390X_OpNeq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq16  x y)
-	// cond:
-	// result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDNE)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
-		v3 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
-		v3.AddArg(x)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
-		v4.AddArg(y)
-		v2.AddArg(v4)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpNeq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq32  x y)
-	// cond:
-	// result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDNE)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMPW, TypeFlags)
-		v2.AddArg(x)
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpNeq32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq32F x y)
-	// cond:
-	// result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDNE)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XFCMPS, TypeFlags)
-		v2.AddArg(x)
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpNeq64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq64  x y)
-	// cond:
-	// result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDNE)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
-		v2.AddArg(x)
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpNeq64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq64F x y)
-	// cond:
-	// result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDNE)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XFCMP, TypeFlags)
-		v2.AddArg(x)
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpNeq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq8   x y)
-	// cond:
-	// result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDNE)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
-		v3 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
-		v3.AddArg(x)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
-		v4.AddArg(y)
-		v2.AddArg(v4)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpNeqB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NeqB   x y)
-	// cond:
-	// result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDNE)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
-		v3 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
-		v3.AddArg(x)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
-		v4.AddArg(y)
-		v2.AddArg(v4)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpNeqPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NeqPtr x y)
-	// cond:
-	// result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XMOVDNE)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v1.AuxInt = 1
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpS390XCMP, TypeFlags)
-		v2.AddArg(x)
-		v2.AddArg(y)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpNilCheck(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NilCheck ptr mem)
-	// cond:
-	// result: (LoweredNilCheck ptr mem)
-	for {
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		v.reset(OpS390XLoweredNilCheck)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueS390X_OpNot(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Not x)
-	// cond:
-	// result: (XORWconst [1] x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XXORWconst)
-		v.AuxInt = 1
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpOffPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (OffPtr [off] ptr:(SP))
-	// cond:
-	// result: (MOVDaddr [off] ptr)
-	for {
-		off := v.AuxInt
-		ptr := v.Args[0]
-		if ptr.Op != OpSP {
-			break
-		}
-		v.reset(OpS390XMOVDaddr)
-		v.AuxInt = off
-		v.AddArg(ptr)
-		return true
-	}
-	// match: (OffPtr [off] ptr)
-	// cond: is32Bit(off)
-	// result: (ADDconst [off] ptr)
-	for {
-		off := v.AuxInt
-		ptr := v.Args[0]
-		if !(is32Bit(off)) {
-			break
-		}
-		v.reset(OpS390XADDconst)
-		v.AuxInt = off
-		v.AddArg(ptr)
-		return true
-	}
-	// match: (OffPtr [off] ptr)
-	// cond:
-	// result: (ADD (MOVDconst [off]) ptr)
-	for {
-		off := v.AuxInt
-		ptr := v.Args[0]
-		v.reset(OpS390XADD)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = off
-		v.AddArg(v0)
-		v.AddArg(ptr)
-		return true
-	}
-}
-func rewriteValueS390X_OpOr16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or16 x y)
-	// cond:
-	// result: (ORW x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XORW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpOr32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or32 x y)
-	// cond:
-	// result: (ORW x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XORW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpOr64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or64 x y)
-	// cond:
-	// result: (OR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpOr8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or8  x y)
-	// cond:
-	// result: (ORW x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XORW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpOrB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (OrB x y)
-	// cond:
-	// result: (ORW x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XORW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh16Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux16 <t> x y)
-	// cond:
-	// result: (ANDW (SRW <t> (MOVHZreg x) y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [15])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XANDW)
-		v0 := b.NewValue0(v.Line, OpS390XSRW, t)
-		v1 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
-		v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v3.AuxInt = 15
-		v4 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh16Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux32 <t> x y)
-	// cond:
-	// result: (ANDW (SRW <t> (MOVHZreg x) y) (SUBEWcarrymask <t> (CMPWUconst y [15])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XANDW)
-		v0 := b.NewValue0(v.Line, OpS390XSRW, t)
-		v1 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
-		v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v3.AuxInt = 15
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh16Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux64 <t> x y)
-	// cond:
-	// result: (ANDW (SRW <t> (MOVHZreg x) y) (SUBEWcarrymask <t> (CMPUconst y [15])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XANDW)
-		v0 := b.NewValue0(v.Line, OpS390XSRW, t)
-		v1 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
-		v3 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags)
-		v3.AuxInt = 15
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh16Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux8  <t> x y)
-	// cond:
-	// result: (ANDW (SRW <t> (MOVHZreg x) y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [15])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XANDW)
-		v0 := b.NewValue0(v.Line, OpS390XSRW, t)
-		v1 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
-		v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v3.AuxInt = 15
-		v4 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh16x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x16 <t> x y)
-	// cond:
-	// result: (SRAW <t> (MOVHreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVHZreg y) [15])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XSRAW)
-		v.Type = t
-		v0 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XORW, y.Type)
-		v1.AddArg(y)
-		v2 := b.NewValue0(v.Line, OpS390XNOTW, y.Type)
-		v3 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type)
-		v4 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v4.AuxInt = 15
-		v5 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh16x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x32 <t> x y)
-	// cond:
-	// result: (SRAW <t> (MOVHreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst y [15])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XSRAW)
-		v.Type = t
-		v0 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XORW, y.Type)
-		v1.AddArg(y)
-		v2 := b.NewValue0(v.Line, OpS390XNOTW, y.Type)
-		v3 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type)
-		v4 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v4.AuxInt = 15
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh16x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x64 <t> x y)
-	// cond:
-	// result: (SRAW <t> (MOVHreg x) (OR <y.Type> y (NOT <y.Type> (SUBEcarrymask <y.Type> (CMPUconst y [15])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XSRAW)
-		v.Type = t
-		v0 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XOR, y.Type)
-		v1.AddArg(y)
-		v2 := b.NewValue0(v.Line, OpS390XNOT, y.Type)
-		v3 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, y.Type)
-		v4 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags)
-		v4.AuxInt = 15
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh16x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x8  <t> x y)
-	// cond:
-	// result: (SRAW <t> (MOVHreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVBZreg y) [15])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XSRAW)
-		v.Type = t
-		v0 := b.NewValue0(v.Line, OpS390XMOVHreg, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XORW, y.Type)
-		v1.AddArg(y)
-		v2 := b.NewValue0(v.Line, OpS390XNOTW, y.Type)
-		v3 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type)
-		v4 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v4.AuxInt = 15
-		v5 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh32Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux16 <t> x y)
-	// cond:
-	// result: (ANDW (SRW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [31])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XANDW)
-		v0 := b.NewValue0(v.Line, OpS390XSRW, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v2.AuxInt = 31
-		v3 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh32Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux32 <t> x y)
-	// cond:
-	// result: (ANDW (SRW <t> x y) (SUBEWcarrymask <t> (CMPWUconst y [31])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XANDW)
-		v0 := b.NewValue0(v.Line, OpS390XSRW, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v2.AuxInt = 31
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh32Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux64 <t> x y)
-	// cond:
-	// result: (ANDW (SRW <t> x y) (SUBEWcarrymask <t> (CMPUconst y [31])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XANDW)
-		v0 := b.NewValue0(v.Line, OpS390XSRW, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags)
-		v2.AuxInt = 31
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh32Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux8  <t> x y)
-	// cond:
-	// result: (ANDW (SRW <t> x y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [31])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XANDW)
-		v0 := b.NewValue0(v.Line, OpS390XSRW, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v2.AuxInt = 31
-		v3 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh32x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x16 <t> x y)
-	// cond:
-	// result: (SRAW <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVHZreg y) [31])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XSRAW)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpS390XORW, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpS390XNOTW, y.Type)
-		v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v3.AuxInt = 31
-		v4 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh32x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x32 <t> x y)
-	// cond:
-	// result: (SRAW <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst y [31])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XSRAW)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpS390XORW, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpS390XNOTW, y.Type)
-		v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v3.AuxInt = 31
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh32x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x64 <t> x y)
-	// cond:
-	// result: (SRAW <t> x (OR <y.Type> y (NOT <y.Type> (SUBEcarrymask <y.Type> (CMPUconst y [31])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XSRAW)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpS390XOR, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpS390XNOT, y.Type)
-		v2 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags)
-		v3.AuxInt = 31
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh32x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x8  <t> x y)
-	// cond:
-	// result: (SRAW <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVBZreg y) [31])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XSRAW)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpS390XORW, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpS390XNOTW, y.Type)
-		v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v3.AuxInt = 31
-		v4 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh64Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64Ux16 <t> x y)
-	// cond:
-	// result: (AND (SRD <t> x y) (SUBEcarrymask <t> (CMPWUconst (MOVHZreg y) [63])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XAND)
-		v0 := b.NewValue0(v.Line, OpS390XSRD, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v2.AuxInt = 63
-		v3 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh64Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64Ux32 <t> x y)
-	// cond:
-	// result: (AND (SRD <t> x y) (SUBEcarrymask <t> (CMPWUconst y [63])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XAND)
-		v0 := b.NewValue0(v.Line, OpS390XSRD, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v2.AuxInt = 63
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh64Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64Ux64 <t> x y)
-	// cond:
-	// result: (AND (SRD <t> x y) (SUBEcarrymask <t> (CMPUconst y [63])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XAND)
-		v0 := b.NewValue0(v.Line, OpS390XSRD, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags)
-		v2.AuxInt = 63
-		v2.AddArg(y)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh64Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64Ux8  <t> x y)
-	// cond:
-	// result: (AND (SRD <t> x y) (SUBEcarrymask <t> (CMPWUconst (MOVBZreg y) [63])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XAND)
-		v0 := b.NewValue0(v.Line, OpS390XSRD, t)
-		v0.AddArg(x)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, t)
-		v2 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v2.AuxInt = 63
-		v3 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh64x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64x16 <t> x y)
-	// cond:
-	// result: (SRAD <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVHZreg y) [63])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XSRAD)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpS390XORW, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpS390XNOTW, y.Type)
-		v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v3.AuxInt = 63
-		v4 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh64x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64x32 <t> x y)
-	// cond:
-	// result: (SRAD <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst y [63])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XSRAD)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpS390XORW, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpS390XNOTW, y.Type)
-		v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v3.AuxInt = 63
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh64x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64x64 <t> x y)
-	// cond:
-	// result: (SRAD <t> x (OR <y.Type> y (NOT <y.Type> (SUBEcarrymask <y.Type> (CMPUconst y [63])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XSRAD)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpS390XOR, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpS390XNOT, y.Type)
-		v2 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags)
-		v3.AuxInt = 63
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh64x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64x8  <t> x y)
-	// cond:
-	// result: (SRAD <t> x (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVBZreg y) [63])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XSRAD)
-		v.Type = t
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpS390XORW, y.Type)
-		v0.AddArg(y)
-		v1 := b.NewValue0(v.Line, OpS390XNOTW, y.Type)
-		v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type)
-		v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v3.AuxInt = 63
-		v4 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh8Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux16 <t> x y)
-	// cond:
-	// result: (ANDW (SRW <t> (MOVBZreg x) y) (SUBEWcarrymask <t> (CMPWUconst (MOVHZreg y) [7])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XANDW)
-		v0 := b.NewValue0(v.Line, OpS390XSRW, t)
-		v1 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
-		v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v3.AuxInt = 7
-		v4 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh8Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux32 <t> x y)
-	// cond:
-	// result: (ANDW (SRW <t> (MOVBZreg x) y) (SUBEWcarrymask <t> (CMPWUconst y [7])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XANDW)
-		v0 := b.NewValue0(v.Line, OpS390XSRW, t)
-		v1 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
-		v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v3.AuxInt = 7
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh8Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux64 <t> x y)
-	// cond:
-	// result: (ANDW (SRW <t> (MOVBZreg x) y) (SUBEWcarrymask <t> (CMPUconst y [7])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XANDW)
-		v0 := b.NewValue0(v.Line, OpS390XSRW, t)
-		v1 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
-		v3 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags)
-		v3.AuxInt = 7
-		v3.AddArg(y)
-		v2.AddArg(v3)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh8Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux8  <t> x y)
-	// cond:
-	// result: (ANDW (SRW <t> (MOVBZreg x) y) (SUBEWcarrymask <t> (CMPWUconst (MOVBZreg y) [7])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XANDW)
-		v0 := b.NewValue0(v.Line, OpS390XSRW, t)
-		v1 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v0.AddArg(y)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, t)
-		v3 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v3.AuxInt = 7
-		v4 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh8x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x16 <t> x y)
-	// cond:
-	// result: (SRAW <t> (MOVBreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVHZreg y) [7])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XSRAW)
-		v.Type = t
-		v0 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XORW, y.Type)
-		v1.AddArg(y)
-		v2 := b.NewValue0(v.Line, OpS390XNOTW, y.Type)
-		v3 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type)
-		v4 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v4.AuxInt = 7
-		v5 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh8x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x32 <t> x y)
-	// cond:
-	// result: (SRAW <t> (MOVBreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst y [7])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XSRAW)
-		v.Type = t
-		v0 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XORW, y.Type)
-		v1.AddArg(y)
-		v2 := b.NewValue0(v.Line, OpS390XNOTW, y.Type)
-		v3 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type)
-		v4 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v4.AuxInt = 7
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh8x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x64 <t> x y)
-	// cond:
-	// result: (SRAW <t> (MOVBreg x) (OR <y.Type> y (NOT <y.Type> (SUBEcarrymask <y.Type> (CMPUconst y [7])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XSRAW)
-		v.Type = t
-		v0 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XOR, y.Type)
-		v1.AddArg(y)
-		v2 := b.NewValue0(v.Line, OpS390XNOT, y.Type)
-		v3 := b.NewValue0(v.Line, OpS390XSUBEcarrymask, y.Type)
-		v4 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags)
-		v4.AuxInt = 7
-		v4.AddArg(y)
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpRsh8x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x8  <t> x y)
-	// cond:
-	// result: (SRAW <t> (MOVBreg x) (ORW <y.Type> y (NOTW <y.Type> (SUBEWcarrymask <y.Type> (CMPWUconst (MOVBZreg y) [7])))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XSRAW)
-		v.Type = t
-		v0 := b.NewValue0(v.Line, OpS390XMOVBreg, config.fe.TypeInt64())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XORW, y.Type)
-		v1.AddArg(y)
-		v2 := b.NewValue0(v.Line, OpS390XNOTW, y.Type)
-		v3 := b.NewValue0(v.Line, OpS390XSUBEWcarrymask, y.Type)
-		v4 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v4.AuxInt = 7
-		v5 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-		v5.AddArg(y)
-		v4.AddArg(v5)
-		v3.AddArg(v4)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpS390XADD(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADD x (MOVDconst [c]))
-	// cond: is32Bit(c)
-	// result: (ADDconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpS390XADDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADD (MOVDconst [c]) x)
-	// cond: is32Bit(c)
-	// result: (ADDconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpS390XADDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADD x (MOVDaddr [c] {s} y))
-	// cond: x.Op != OpSB && y.Op != OpSB
-	// result: (MOVDaddridx [c] {s} x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDaddr {
-			break
-		}
-		c := v_1.AuxInt
-		s := v_1.Aux
-		y := v_1.Args[0]
-		if !(x.Op != OpSB && y.Op != OpSB) {
-			break
-		}
-		v.reset(OpS390XMOVDaddridx)
-		v.AuxInt = c
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADD (MOVDaddr [c] {s} x) y)
-	// cond: x.Op != OpSB && y.Op != OpSB
-	// result: (MOVDaddridx [c] {s} x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddr {
-			break
-		}
-		c := v_0.AuxInt
-		s := v_0.Aux
-		x := v_0.Args[0]
-		y := v.Args[1]
-		if !(x.Op != OpSB && y.Op != OpSB) {
-			break
-		}
-		v.reset(OpS390XMOVDaddridx)
-		v.AuxInt = c
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADD x (NEG y))
-	// cond:
-	// result: (SUB x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XNEG {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpS390XSUB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADD <t> x g:(MOVDload [off] {sym} ptr mem))
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (ADDload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		x := v.Args[0]
-		g := v.Args[1]
-		if g.Op != OpS390XMOVDload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XADDload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (ADD <t> g:(MOVDload [off] {sym} ptr mem) x)
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (ADDload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		g := v.Args[0]
-		if g.Op != OpS390XMOVDload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		x := v.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XADDload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XADDW(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDW x (MOVDconst [c]))
-	// cond:
-	// result: (ADDWconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpS390XADDWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDW (MOVDconst [c]) x)
-	// cond:
-	// result: (ADDWconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpS390XADDWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDW x (NEGW y))
-	// cond:
-	// result: (SUBW x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XNEGW {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpS390XSUBW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDW <t> x g:(MOVWload [off] {sym} ptr mem))
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (ADDWload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		x := v.Args[0]
-		g := v.Args[1]
-		if g.Op != OpS390XMOVWload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XADDWload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (ADDW <t> g:(MOVWload [off] {sym} ptr mem) x)
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (ADDWload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		g := v.Args[0]
-		if g.Op != OpS390XMOVWload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		x := v.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XADDWload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (ADDW <t> x g:(MOVWZload [off] {sym} ptr mem))
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (ADDWload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		x := v.Args[0]
-		g := v.Args[1]
-		if g.Op != OpS390XMOVWZload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XADDWload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (ADDW <t> g:(MOVWZload [off] {sym} ptr mem) x)
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (ADDWload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		g := v.Args[0]
-		if g.Op != OpS390XMOVWZload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		x := v.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XADDWload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XADDWconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDWconst [c] x)
-	// cond: int32(c)==0
-	// result: x
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(int32(c) == 0) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDWconst [c] (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [int64(int32(c+d))])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = int64(int32(c + d))
-		return true
-	}
-	// match: (ADDWconst [c] (ADDWconst [d] x))
-	// cond:
-	// result: (ADDWconst [int64(int32(c+d))] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDWconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpS390XADDWconst)
-		v.AuxInt = int64(int32(c + d))
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XADDconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ADDconst [c] (MOVDaddr [d] {s} x:(SB)))
-	// cond: ((c+d)&1 == 0) && is32Bit(c+d)
-	// result: (MOVDaddr [c+d] {s} x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddr {
-			break
-		}
-		d := v_0.AuxInt
-		s := v_0.Aux
-		x := v_0.Args[0]
-		if x.Op != OpSB {
-			break
-		}
-		if !(((c+d)&1 == 0) && is32Bit(c+d)) {
-			break
-		}
-		v.reset(OpS390XMOVDaddr)
-		v.AuxInt = c + d
-		v.Aux = s
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDconst [c] (MOVDaddr [d] {s} x))
-	// cond: x.Op != OpSB && is20Bit(c+d)
-	// result: (MOVDaddr [c+d] {s} x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddr {
-			break
-		}
-		d := v_0.AuxInt
-		s := v_0.Aux
-		x := v_0.Args[0]
-		if !(x.Op != OpSB && is20Bit(c+d)) {
-			break
-		}
-		v.reset(OpS390XMOVDaddr)
-		v.AuxInt = c + d
-		v.Aux = s
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDconst [c] (MOVDaddridx [d] {s} x y))
-	// cond: is20Bit(c+d)
-	// result: (MOVDaddridx [c+d] {s} x y)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddridx {
-			break
-		}
-		d := v_0.AuxInt
-		s := v_0.Aux
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if !(is20Bit(c + d)) {
-			break
-		}
-		v.reset(OpS390XMOVDaddridx)
-		v.AuxInt = c + d
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (ADDconst [0] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ADDconst [c] (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [c+d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = c + d
-		return true
-	}
-	// match: (ADDconst [c] (ADDconst [d] x))
-	// cond: is32Bit(c+d)
-	// result: (ADDconst [c+d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		if !(is32Bit(c + d)) {
-			break
-		}
-		v.reset(OpS390XADDconst)
-		v.AuxInt = c + d
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XAND(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AND x (MOVDconst [c]))
-	// cond: is32Bit(c) && c < 0
-	// result: (ANDconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(is32Bit(c) && c < 0) {
-			break
-		}
-		v.reset(OpS390XANDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (AND (MOVDconst [c]) x)
-	// cond: is32Bit(c) && c < 0
-	// result: (ANDconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(is32Bit(c) && c < 0) {
-			break
-		}
-		v.reset(OpS390XANDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (AND (MOVDconst [0xFF]) x)
-	// cond:
-	// result: (MOVBZreg x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		if v_0.AuxInt != 0xFF {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpS390XMOVBZreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (AND x (MOVDconst [0xFF]))
-	// cond:
-	// result: (MOVBZreg x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		if v_1.AuxInt != 0xFF {
-			break
-		}
-		v.reset(OpS390XMOVBZreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (AND (MOVDconst [0xFFFF]) x)
-	// cond:
-	// result: (MOVHZreg x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		if v_0.AuxInt != 0xFFFF {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpS390XMOVHZreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (AND x (MOVDconst [0xFFFF]))
-	// cond:
-	// result: (MOVHZreg x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		if v_1.AuxInt != 0xFFFF {
-			break
-		}
-		v.reset(OpS390XMOVHZreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (AND (MOVDconst [0xFFFFFFFF]) x)
-	// cond:
-	// result: (MOVWZreg x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		if v_0.AuxInt != 0xFFFFFFFF {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpS390XMOVWZreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (AND x (MOVDconst [0xFFFFFFFF]))
-	// cond:
-	// result: (MOVWZreg x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		if v_1.AuxInt != 0xFFFFFFFF {
-			break
-		}
-		v.reset(OpS390XMOVWZreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (AND (MOVDconst [c]) (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [c&d])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = c & d
-		return true
-	}
-	// match: (AND x x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (AND <t> x g:(MOVDload [off] {sym} ptr mem))
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (ANDload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		x := v.Args[0]
-		g := v.Args[1]
-		if g.Op != OpS390XMOVDload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XANDload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (AND <t> g:(MOVDload [off] {sym} ptr mem) x)
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (ANDload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		g := v.Args[0]
-		if g.Op != OpS390XMOVDload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		x := v.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XANDload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XANDW(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ANDW x (MOVDconst [c]))
-	// cond:
-	// result: (ANDWconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpS390XANDWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDW (MOVDconst [c]) x)
-	// cond:
-	// result: (ANDWconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpS390XANDWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDW x x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDW <t> x g:(MOVWload [off] {sym} ptr mem))
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (ANDWload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		x := v.Args[0]
-		g := v.Args[1]
-		if g.Op != OpS390XMOVWload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XANDWload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (ANDW <t> g:(MOVWload [off] {sym} ptr mem) x)
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (ANDWload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		g := v.Args[0]
-		if g.Op != OpS390XMOVWload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		x := v.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XANDWload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (ANDW <t> x g:(MOVWZload [off] {sym} ptr mem))
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (ANDWload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		x := v.Args[0]
-		g := v.Args[1]
-		if g.Op != OpS390XMOVWZload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XANDWload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (ANDW <t> g:(MOVWZload [off] {sym} ptr mem) x)
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (ANDWload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		g := v.Args[0]
-		if g.Op != OpS390XMOVWZload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		x := v.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XANDWload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XANDWconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ANDWconst [c] (ANDWconst [d] x))
-	// cond:
-	// result: (ANDWconst [c & d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XANDWconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpS390XANDWconst)
-		v.AuxInt = c & d
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDWconst [0xFF] x)
-	// cond:
-	// result: (MOVBZreg x)
-	for {
-		if v.AuxInt != 0xFF {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpS390XMOVBZreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDWconst [0xFFFF] x)
-	// cond:
-	// result: (MOVHZreg x)
-	for {
-		if v.AuxInt != 0xFFFF {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpS390XMOVHZreg)
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDWconst [c] _)
-	// cond: int32(c)==0
-	// result: (MOVDconst [0])
-	for {
-		c := v.AuxInt
-		if !(int32(c) == 0) {
-			break
-		}
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (ANDWconst [c] x)
-	// cond: int32(c)==-1
-	// result: x
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(int32(c) == -1) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDWconst [c] (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [c&d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = c & d
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XANDconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ANDconst [c] (ANDconst [d] x))
-	// cond:
-	// result: (ANDconst [c & d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XANDconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		v.reset(OpS390XANDconst)
-		v.AuxInt = c & d
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDconst [0] _)
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (ANDconst [-1] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != -1 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ANDconst [c] (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [c&d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = c & d
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XCMP(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMP x (MOVDconst [c]))
-	// cond: is32Bit(c)
-	// result: (CMPconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpS390XCMPconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (CMP (MOVDconst [c]) x)
-	// cond: is32Bit(c)
-	// result: (InvertFlags (CMPconst x [c]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpS390XInvertFlags)
-		v0 := b.NewValue0(v.Line, OpS390XCMPconst, TypeFlags)
-		v0.AuxInt = c
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XCMPU(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPU x (MOVDconst [c]))
-	// cond: is32Bit(c)
-	// result: (CMPUconst x [int64(uint32(c))])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpS390XCMPUconst)
-		v.AuxInt = int64(uint32(c))
-		v.AddArg(x)
-		return true
-	}
-	// match: (CMPU (MOVDconst [c]) x)
-	// cond: is32Bit(c)
-	// result: (InvertFlags (CMPUconst x [int64(uint32(c))]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpS390XInvertFlags)
-		v0 := b.NewValue0(v.Line, OpS390XCMPUconst, TypeFlags)
-		v0.AuxInt = int64(uint32(c))
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XCMPUconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPUconst (MOVDconst [x]) [y])
-	// cond: uint64(x)==uint64(y)
-	// result: (FlagEQ)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(uint64(x) == uint64(y)) {
-			break
-		}
-		v.reset(OpS390XFlagEQ)
-		return true
-	}
-	// match: (CMPUconst (MOVDconst [x]) [y])
-	// cond: uint64(x)<uint64(y)
-	// result: (FlagLT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(uint64(x) < uint64(y)) {
-			break
-		}
-		v.reset(OpS390XFlagLT)
-		return true
-	}
-	// match: (CMPUconst (MOVDconst [x]) [y])
-	// cond: uint64(x)>uint64(y)
-	// result: (FlagGT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(uint64(x) > uint64(y)) {
-			break
-		}
-		v.reset(OpS390XFlagGT)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XCMPW(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPW x (MOVDconst [c]))
-	// cond:
-	// result: (CMPWconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpS390XCMPWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (CMPW (MOVDconst [c]) x)
-	// cond:
-	// result: (InvertFlags (CMPWconst x [c]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpS390XInvertFlags)
-		v0 := b.NewValue0(v.Line, OpS390XCMPWconst, TypeFlags)
-		v0.AuxInt = c
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XCMPWU(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPWU x (MOVDconst [c]))
-	// cond:
-	// result: (CMPWUconst x [int64(uint32(c))])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpS390XCMPWUconst)
-		v.AuxInt = int64(uint32(c))
-		v.AddArg(x)
-		return true
-	}
-	// match: (CMPWU (MOVDconst [c]) x)
-	// cond:
-	// result: (InvertFlags (CMPWUconst x [int64(uint32(c))]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpS390XInvertFlags)
-		v0 := b.NewValue0(v.Line, OpS390XCMPWUconst, TypeFlags)
-		v0.AuxInt = int64(uint32(c))
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XCMPWUconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPWUconst (MOVDconst [x]) [y])
-	// cond: uint32(x)==uint32(y)
-	// result: (FlagEQ)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(uint32(x) == uint32(y)) {
-			break
-		}
-		v.reset(OpS390XFlagEQ)
-		return true
-	}
-	// match: (CMPWUconst (MOVDconst [x]) [y])
-	// cond: uint32(x)<uint32(y)
-	// result: (FlagLT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(uint32(x) < uint32(y)) {
-			break
-		}
-		v.reset(OpS390XFlagLT)
-		return true
-	}
-	// match: (CMPWUconst (MOVDconst [x]) [y])
-	// cond: uint32(x)>uint32(y)
-	// result: (FlagGT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(uint32(x) > uint32(y)) {
-			break
-		}
-		v.reset(OpS390XFlagGT)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XCMPWconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPWconst (MOVDconst [x]) [y])
-	// cond: int32(x)==int32(y)
-	// result: (FlagEQ)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int32(x) == int32(y)) {
-			break
-		}
-		v.reset(OpS390XFlagEQ)
-		return true
-	}
-	// match: (CMPWconst (MOVDconst [x]) [y])
-	// cond: int32(x)<int32(y)
-	// result: (FlagLT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int32(x) < int32(y)) {
-			break
-		}
-		v.reset(OpS390XFlagLT)
-		return true
-	}
-	// match: (CMPWconst (MOVDconst [x]) [y])
-	// cond: int32(x)>int32(y)
-	// result: (FlagGT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(int32(x) > int32(y)) {
-			break
-		}
-		v.reset(OpS390XFlagGT)
-		return true
-	}
-	// match: (CMPWconst (SRWconst _ [c]) [n])
-	// cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)
-	// result: (FlagLT)
-	for {
-		n := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XSRWconst {
-			break
-		}
-		c := v_0.AuxInt
-		if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
-			break
-		}
-		v.reset(OpS390XFlagLT)
-		return true
-	}
-	// match: (CMPWconst (ANDWconst _ [m]) [n])
-	// cond: 0 <= int32(m) && int32(m) < int32(n)
-	// result: (FlagLT)
-	for {
-		n := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XANDWconst {
-			break
-		}
-		m := v_0.AuxInt
-		if !(0 <= int32(m) && int32(m) < int32(n)) {
-			break
-		}
-		v.reset(OpS390XFlagLT)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XCMPconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (CMPconst (MOVDconst [x]) [y])
-	// cond: x==y
-	// result: (FlagEQ)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(x == y) {
-			break
-		}
-		v.reset(OpS390XFlagEQ)
-		return true
-	}
-	// match: (CMPconst (MOVDconst [x]) [y])
-	// cond: x<y
-	// result: (FlagLT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(x < y) {
-			break
-		}
-		v.reset(OpS390XFlagLT)
-		return true
-	}
-	// match: (CMPconst (MOVDconst [x]) [y])
-	// cond: x>y
-	// result: (FlagGT)
-	for {
-		y := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		x := v_0.AuxInt
-		if !(x > y) {
-			break
-		}
-		v.reset(OpS390XFlagGT)
-		return true
-	}
-	// match: (CMPconst (MOVBZreg _) [c])
-	// cond: 0xFF < c
-	// result: (FlagLT)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVBZreg {
-			break
-		}
-		if !(0xFF < c) {
-			break
-		}
-		v.reset(OpS390XFlagLT)
-		return true
-	}
-	// match: (CMPconst (MOVHZreg _) [c])
-	// cond: 0xFFFF < c
-	// result: (FlagLT)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVHZreg {
-			break
-		}
-		if !(0xFFFF < c) {
-			break
-		}
-		v.reset(OpS390XFlagLT)
-		return true
-	}
-	// match: (CMPconst (MOVWZreg _) [c])
-	// cond: 0xFFFFFFFF < c
-	// result: (FlagLT)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVWZreg {
-			break
-		}
-		if !(0xFFFFFFFF < c) {
-			break
-		}
-		v.reset(OpS390XFlagLT)
-		return true
-	}
-	// match: (CMPconst (SRDconst _ [c]) [n])
-	// cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)
-	// result: (FlagLT)
-	for {
-		n := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XSRDconst {
-			break
-		}
-		c := v_0.AuxInt
-		if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
-			break
-		}
-		v.reset(OpS390XFlagLT)
-		return true
-	}
-	// match: (CMPconst (ANDconst _ [m]) [n])
-	// cond: 0 <= m && m < n
-	// result: (FlagLT)
-	for {
-		n := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XANDconst {
-			break
-		}
-		m := v_0.AuxInt
-		if !(0 <= m && m < n) {
-			break
-		}
-		v.reset(OpS390XFlagLT)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XFMOVDload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (FMOVDload  [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: is20Bit(off1+off2)
-	// result: (FMOVDload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is20Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpS390XFMOVDload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpS390XFMOVDload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (FMOVDload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (FMOVDloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddridx {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpS390XFMOVDloadidx)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (FMOVDload [off] {sym} (ADD ptr idx) mem)
-	// cond: ptr.Op != OpSB
-	// result: (FMOVDloadidx [off] {sym} ptr idx mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADD {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(OpS390XFMOVDloadidx)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XFMOVDloadidx(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (FMOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem)
-	// cond:
-	// result: (FMOVDloadidx [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpS390XFMOVDloadidx)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (FMOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem)
-	// cond:
-	// result: (FMOVDloadidx [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XADDconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpS390XFMOVDloadidx)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XFMOVDstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	// cond: is20Bit(off1+off2)
-	// result: (FMOVDstore [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is20Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpS390XFMOVDstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpS390XFMOVDstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (FMOVDstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (FMOVDstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddridx {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpS390XFMOVDstoreidx)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (FMOVDstore [off] {sym} (ADD ptr idx) val mem)
-	// cond: ptr.Op != OpSB
-	// result: (FMOVDstoreidx [off] {sym} ptr idx val mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADD {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(OpS390XFMOVDstoreidx)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XFMOVDstoreidx(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (FMOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem)
-	// cond:
-	// result: (FMOVDstoreidx [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpS390XFMOVDstoreidx)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (FMOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem)
-	// cond:
-	// result: (FMOVDstoreidx [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XADDconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpS390XFMOVDstoreidx)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XFMOVSload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (FMOVSload  [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: is20Bit(off1+off2)
-	// result: (FMOVSload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is20Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpS390XFMOVSload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpS390XFMOVSload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (FMOVSload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (FMOVSloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddridx {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpS390XFMOVSloadidx)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (FMOVSload [off] {sym} (ADD ptr idx) mem)
-	// cond: ptr.Op != OpSB
-	// result: (FMOVSloadidx [off] {sym} ptr idx mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADD {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(OpS390XFMOVSloadidx)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XFMOVSloadidx(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (FMOVSloadidx [c] {sym} (ADDconst [d] ptr) idx mem)
-	// cond:
-	// result: (FMOVSloadidx [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpS390XFMOVSloadidx)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (FMOVSloadidx [c] {sym} ptr (ADDconst [d] idx) mem)
-	// cond:
-	// result: (FMOVSloadidx [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XADDconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpS390XFMOVSloadidx)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XFMOVSstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	// cond: is20Bit(off1+off2)
-	// result: (FMOVSstore [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is20Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpS390XFMOVSstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpS390XFMOVSstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (FMOVSstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (FMOVSstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddridx {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpS390XFMOVSstoreidx)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (FMOVSstore [off] {sym} (ADD ptr idx) val mem)
-	// cond: ptr.Op != OpSB
-	// result: (FMOVSstoreidx [off] {sym} ptr idx val mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADD {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(OpS390XFMOVSstoreidx)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XFMOVSstoreidx(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (FMOVSstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem)
-	// cond:
-	// result: (FMOVSstoreidx [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpS390XFMOVSstoreidx)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (FMOVSstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem)
-	// cond:
-	// result: (FMOVSstoreidx [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XADDconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpS390XFMOVSstoreidx)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVBZload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBZload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-	// result: x
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVBstore {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		x := v_1.Args[1]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBZload  [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: is20Bit(off1+off2)
-	// result: (MOVBZload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is20Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpS390XMOVBZload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBZload  [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVBZload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpS390XMOVBZload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVBZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddridx {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpS390XMOVBZloadidx)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBZload [off] {sym} (ADD ptr idx) mem)
-	// cond: ptr.Op != OpSB
-	// result: (MOVBZloadidx [off] {sym} ptr idx mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADD {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(OpS390XMOVBZloadidx)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVBZloadidx(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBZloadidx [c] {sym} (ADDconst [d] ptr) idx mem)
-	// cond:
-	// result: (MOVBZloadidx [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpS390XMOVBZloadidx)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBZloadidx [c] {sym} ptr (ADDconst [d] idx) mem)
-	// cond:
-	// result: (MOVBZloadidx [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XADDconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpS390XMOVBZloadidx)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVBZreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBZreg x:(MOVDLT (MOVDconst [c]) (MOVDconst [d]) _))
-	// cond: int64(uint8(c)) == c && int64(uint8(d)) == d
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVDLT {
-			break
-		}
-		x_0 := x.Args[0]
-		if x_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := x_0.AuxInt
-		x_1 := x.Args[1]
-		if x_1.Op != OpS390XMOVDconst {
-			break
-		}
-		d := x_1.AuxInt
-		if !(int64(uint8(c)) == c && int64(uint8(d)) == d) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBZreg x:(MOVDLE (MOVDconst [c]) (MOVDconst [d]) _))
-	// cond: int64(uint8(c)) == c && int64(uint8(d)) == d
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVDLE {
-			break
-		}
-		x_0 := x.Args[0]
-		if x_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := x_0.AuxInt
-		x_1 := x.Args[1]
-		if x_1.Op != OpS390XMOVDconst {
-			break
-		}
-		d := x_1.AuxInt
-		if !(int64(uint8(c)) == c && int64(uint8(d)) == d) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBZreg x:(MOVDGT (MOVDconst [c]) (MOVDconst [d]) _))
-	// cond: int64(uint8(c)) == c && int64(uint8(d)) == d
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVDGT {
-			break
-		}
-		x_0 := x.Args[0]
-		if x_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := x_0.AuxInt
-		x_1 := x.Args[1]
-		if x_1.Op != OpS390XMOVDconst {
-			break
-		}
-		d := x_1.AuxInt
-		if !(int64(uint8(c)) == c && int64(uint8(d)) == d) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBZreg x:(MOVDGE (MOVDconst [c]) (MOVDconst [d]) _))
-	// cond: int64(uint8(c)) == c && int64(uint8(d)) == d
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVDGE {
-			break
-		}
-		x_0 := x.Args[0]
-		if x_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := x_0.AuxInt
-		x_1 := x.Args[1]
-		if x_1.Op != OpS390XMOVDconst {
-			break
-		}
-		d := x_1.AuxInt
-		if !(int64(uint8(c)) == c && int64(uint8(d)) == d) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBZreg x:(MOVDEQ (MOVDconst [c]) (MOVDconst [d]) _))
-	// cond: int64(uint8(c)) == c && int64(uint8(d)) == d
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVDEQ {
-			break
-		}
-		x_0 := x.Args[0]
-		if x_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := x_0.AuxInt
-		x_1 := x.Args[1]
-		if x_1.Op != OpS390XMOVDconst {
-			break
-		}
-		d := x_1.AuxInt
-		if !(int64(uint8(c)) == c && int64(uint8(d)) == d) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBZreg x:(MOVDNE (MOVDconst [c]) (MOVDconst [d]) _))
-	// cond: int64(uint8(c)) == c && int64(uint8(d)) == d
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVDNE {
-			break
-		}
-		x_0 := x.Args[0]
-		if x_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := x_0.AuxInt
-		x_1 := x.Args[1]
-		if x_1.Op != OpS390XMOVDconst {
-			break
-		}
-		d := x_1.AuxInt
-		if !(int64(uint8(c)) == c && int64(uint8(d)) == d) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBZreg x:(MOVDGTnoinv (MOVDconst [c]) (MOVDconst [d]) _))
-	// cond: int64(uint8(c)) == c && int64(uint8(d)) == d
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVDGTnoinv {
-			break
-		}
-		x_0 := x.Args[0]
-		if x_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := x_0.AuxInt
-		x_1 := x.Args[1]
-		if x_1.Op != OpS390XMOVDconst {
-			break
-		}
-		d := x_1.AuxInt
-		if !(int64(uint8(c)) == c && int64(uint8(d)) == d) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBZreg x:(MOVDGEnoinv (MOVDconst [c]) (MOVDconst [d]) _))
-	// cond: int64(uint8(c)) == c && int64(uint8(d)) == d
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVDGEnoinv {
-			break
-		}
-		x_0 := x.Args[0]
-		if x_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := x_0.AuxInt
-		x_1 := x.Args[1]
-		if x_1.Op != OpS390XMOVDconst {
-			break
-		}
-		d := x_1.AuxInt
-		if !(int64(uint8(c)) == c && int64(uint8(d)) == d) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBZreg x:(MOVBZload _ _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVBZload {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBZreg x:(Arg <t>))
-	// cond: is8BitInt(t) && !isSigned(t)
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpArg {
-			break
-		}
-		t := x.Type
-		if !(is8BitInt(t) && !isSigned(t)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBZreg x:(MOVBZreg _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVBZreg {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBZreg (MOVDconst [c]))
-	// cond:
-	// result: (MOVDconst [int64(uint8(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = int64(uint8(c))
-		return true
-	}
-	// match: (MOVBZreg x:(MOVBZload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVBZload <v.Type> [off] {sym} ptr mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVBZload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpS390XMOVBZload, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVBZreg x:(MOVBZloadidx [off] {sym} ptr idx mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVBZloadidx <v.Type> [off] {sym} ptr idx mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVBZloadidx {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		idx := x.Args[1]
-		mem := x.Args[2]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpS390XMOVBZloadidx, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(idx)
-		v0.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVBload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBload   [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: is20Bit(off1+off2)
-	// result: (MOVBload  [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is20Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpS390XMOVBload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpS390XMOVBload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVBreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBreg x:(MOVBload _ _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVBload {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBreg x:(Arg <t>))
-	// cond: is8BitInt(t) && isSigned(t)
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpArg {
-			break
-		}
-		t := x.Type
-		if !(is8BitInt(t) && isSigned(t)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBreg x:(MOVBreg _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVBreg {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVBreg (MOVDconst [c]))
-	// cond:
-	// result: (MOVDconst [int64(int8(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = int64(int8(c))
-		return true
-	}
-	// match: (MOVBreg x:(MOVBZload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVBZload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpS390XMOVBload, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVBstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
-	// cond:
-	// result: (MOVBstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVBreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpS390XMOVBstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVBZreg x) mem)
-	// cond:
-	// result: (MOVBstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVBZreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpS390XMOVBstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore  [off1] {sym} (ADDconst [off2] ptr) val mem)
-	// cond: is20Bit(off1+off2)
-	// result: (MOVBstore  [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is20Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpS390XMOVBstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem)
-	// cond: validOff(off) && ptr.Op != OpSB
-	// result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		mem := v.Args[2]
-		if !(validOff(off) && ptr.Op != OpSB) {
-			break
-		}
-		v.reset(OpS390XMOVBstoreconst)
-		v.AuxInt = makeValAndOff(int64(int8(c)), off)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore  [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVBstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpS390XMOVBstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVBstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddridx {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpS390XMOVBstoreidx)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [off] {sym} (ADD ptr idx) val mem)
-	// cond: ptr.Op != OpSB
-	// result: (MOVBstoreidx [off] {sym} ptr idx val mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADD {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(OpS390XMOVBstoreidx)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [i] {s} p w x:(MOVBstore [i-1] {s} p (SRDconst [8] w) mem))
-	// cond: p.Op != OpSB   && x.Uses == 1   && clobber(x)
-	// result: (MOVHstore [i-1] {s} p w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		w := v.Args[1]
-		x := v.Args[2]
-		if x.Op != OpS390XMOVBstore {
-			break
-		}
-		if x.AuxInt != i-1 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		x_1 := x.Args[1]
-		if x_1.Op != OpS390XSRDconst {
-			break
-		}
-		if x_1.AuxInt != 8 {
-			break
-		}
-		if w != x_1.Args[0] {
-			break
-		}
-		mem := x.Args[2]
-		if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVHstore)
-		v.AuxInt = i - 1
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [i] {s} p w0:(SRDconst [j] w) x:(MOVBstore [i-1] {s} p (SRDconst [j+8] w) mem))
-	// cond: p.Op != OpSB   && x.Uses == 1   && clobber(x)
-	// result: (MOVHstore [i-1] {s} p w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		w0 := v.Args[1]
-		if w0.Op != OpS390XSRDconst {
-			break
-		}
-		j := w0.AuxInt
-		w := w0.Args[0]
-		x := v.Args[2]
-		if x.Op != OpS390XMOVBstore {
-			break
-		}
-		if x.AuxInt != i-1 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		x_1 := x.Args[1]
-		if x_1.Op != OpS390XSRDconst {
-			break
-		}
-		if x_1.AuxInt != j+8 {
-			break
-		}
-		if w != x_1.Args[0] {
-			break
-		}
-		mem := x.Args[2]
-		if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVHstore)
-		v.AuxInt = i - 1
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [i] {s} p w x:(MOVBstore [i-1] {s} p (SRWconst [8] w) mem))
-	// cond: p.Op != OpSB   && x.Uses == 1   && clobber(x)
-	// result: (MOVHstore [i-1] {s} p w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		w := v.Args[1]
-		x := v.Args[2]
-		if x.Op != OpS390XMOVBstore {
-			break
-		}
-		if x.AuxInt != i-1 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		x_1 := x.Args[1]
-		if x_1.Op != OpS390XSRWconst {
-			break
-		}
-		if x_1.AuxInt != 8 {
-			break
-		}
-		if w != x_1.Args[0] {
-			break
-		}
-		mem := x.Args[2]
-		if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVHstore)
-		v.AuxInt = i - 1
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [i] {s} p w0:(SRWconst [j] w) x:(MOVBstore [i-1] {s} p (SRWconst [j+8] w) mem))
-	// cond: p.Op != OpSB   && x.Uses == 1   && clobber(x)
-	// result: (MOVHstore [i-1] {s} p w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		w0 := v.Args[1]
-		if w0.Op != OpS390XSRWconst {
-			break
-		}
-		j := w0.AuxInt
-		w := w0.Args[0]
-		x := v.Args[2]
-		if x.Op != OpS390XMOVBstore {
-			break
-		}
-		if x.AuxInt != i-1 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		x_1 := x.Args[1]
-		if x_1.Op != OpS390XSRWconst {
-			break
-		}
-		if x_1.AuxInt != j+8 {
-			break
-		}
-		if w != x_1.Args[0] {
-			break
-		}
-		mem := x.Args[2]
-		if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVHstore)
-		v.AuxInt = i - 1
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [i] {s} p (SRDconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
-	// cond: p.Op != OpSB   && x.Uses == 1   && clobber(x)
-	// result: (MOVHBRstore [i-1] {s} p w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XSRDconst {
-			break
-		}
-		if v_1.AuxInt != 8 {
-			break
-		}
-		w := v_1.Args[0]
-		x := v.Args[2]
-		if x.Op != OpS390XMOVBstore {
-			break
-		}
-		if x.AuxInt != i-1 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if w != x.Args[1] {
-			break
-		}
-		mem := x.Args[2]
-		if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVHBRstore)
-		v.AuxInt = i - 1
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [i] {s} p (SRDconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SRDconst [j-8] w) mem))
-	// cond: p.Op != OpSB   && x.Uses == 1   && clobber(x)
-	// result: (MOVHBRstore [i-1] {s} p w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XSRDconst {
-			break
-		}
-		j := v_1.AuxInt
-		w := v_1.Args[0]
-		x := v.Args[2]
-		if x.Op != OpS390XMOVBstore {
-			break
-		}
-		if x.AuxInt != i-1 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		w0 := x.Args[1]
-		if w0.Op != OpS390XSRDconst {
-			break
-		}
-		if w0.AuxInt != j-8 {
-			break
-		}
-		if w != w0.Args[0] {
-			break
-		}
-		mem := x.Args[2]
-		if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVHBRstore)
-		v.AuxInt = i - 1
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [i] {s} p (SRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
-	// cond: p.Op != OpSB   && x.Uses == 1   && clobber(x)
-	// result: (MOVHBRstore [i-1] {s} p w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XSRWconst {
-			break
-		}
-		if v_1.AuxInt != 8 {
-			break
-		}
-		w := v_1.Args[0]
-		x := v.Args[2]
-		if x.Op != OpS390XMOVBstore {
-			break
-		}
-		if x.AuxInt != i-1 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if w != x.Args[1] {
-			break
-		}
-		mem := x.Args[2]
-		if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVHBRstore)
-		v.AuxInt = i - 1
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstore [i] {s} p (SRWconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SRWconst [j-8] w) mem))
-	// cond: p.Op != OpSB   && x.Uses == 1   && clobber(x)
-	// result: (MOVHBRstore [i-1] {s} p w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XSRWconst {
-			break
-		}
-		j := v_1.AuxInt
-		w := v_1.Args[0]
-		x := v.Args[2]
-		if x.Op != OpS390XMOVBstore {
-			break
-		}
-		if x.AuxInt != i-1 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		w0 := x.Args[1]
-		if w0.Op != OpS390XSRWconst {
-			break
-		}
-		if w0.AuxInt != j-8 {
-			break
-		}
-		if w != w0.Args[0] {
-			break
-		}
-		mem := x.Args[2]
-		if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVHBRstore)
-		v.AuxInt = i - 1
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVBstoreconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBstoreconst [sc] {s} (ADDconst [off] ptr) mem)
-	// cond: ValAndOff(sc).canAdd(off)
-	// result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
-	for {
-		sc := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		off := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(ValAndOff(sc).canAdd(off)) {
-			break
-		}
-		v.reset(OpS390XMOVBstoreconst)
-		v.AuxInt = ValAndOff(sc).add(off)
-		v.Aux = s
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
-	// result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
-	for {
-		sc := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddr {
-			break
-		}
-		off := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
-			break
-		}
-		v.reset(OpS390XMOVBstoreconst)
-		v.AuxInt = ValAndOff(sc).add(off)
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
-	// cond: p.Op != OpSB   && x.Uses == 1   && ValAndOff(a).Off() + 1 == ValAndOff(c).Off()   && clobber(x)
-	// result: (MOVHstoreconst [makeValAndOff(ValAndOff(c).Val()&0xff | ValAndOff(a).Val()<<8, ValAndOff(a).Off())] {s} p mem)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		x := v.Args[1]
-		if x.Op != OpS390XMOVBstoreconst {
-			break
-		}
-		a := x.AuxInt
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		mem := x.Args[1]
-		if !(p.Op != OpSB && x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVHstoreconst)
-		v.AuxInt = makeValAndOff(ValAndOff(c).Val()&0xff|ValAndOff(a).Val()<<8, ValAndOff(a).Off())
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVBstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem)
-	// cond:
-	// result: (MOVBstoreidx [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpS390XMOVBstoreidx)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem)
-	// cond:
-	// result: (MOVBstoreidx [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XADDconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpS390XMOVBstoreidx)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreidx [i] {s} p idx w x:(MOVBstoreidx [i-1] {s} p idx (SRDconst [8] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVHstoreidx [i-1] {s} p idx w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		w := v.Args[2]
-		x := v.Args[3]
-		if x.Op != OpS390XMOVBstoreidx {
-			break
-		}
-		if x.AuxInt != i-1 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		x_2 := x.Args[2]
-		if x_2.Op != OpS390XSRDconst {
-			break
-		}
-		if x_2.AuxInt != 8 {
-			break
-		}
-		if w != x_2.Args[0] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVHstoreidx)
-		v.AuxInt = i - 1
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(idx)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx (SRDconst [j+8] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVHstoreidx [i-1] {s} p idx w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		w0 := v.Args[2]
-		if w0.Op != OpS390XSRDconst {
-			break
-		}
-		j := w0.AuxInt
-		w := w0.Args[0]
-		x := v.Args[3]
-		if x.Op != OpS390XMOVBstoreidx {
-			break
-		}
-		if x.AuxInt != i-1 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		x_2 := x.Args[2]
-		if x_2.Op != OpS390XSRDconst {
-			break
-		}
-		if x_2.AuxInt != j+8 {
-			break
-		}
-		if w != x_2.Args[0] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVHstoreidx)
-		v.AuxInt = i - 1
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(idx)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreidx [i] {s} p idx w x:(MOVBstoreidx [i-1] {s} p idx (SRWconst [8] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVHstoreidx [i-1] {s} p idx w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		w := v.Args[2]
-		x := v.Args[3]
-		if x.Op != OpS390XMOVBstoreidx {
-			break
-		}
-		if x.AuxInt != i-1 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		x_2 := x.Args[2]
-		if x_2.Op != OpS390XSRWconst {
-			break
-		}
-		if x_2.AuxInt != 8 {
-			break
-		}
-		if w != x_2.Args[0] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVHstoreidx)
-		v.AuxInt = i - 1
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(idx)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreidx [i] {s} p idx w0:(SRWconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx (SRWconst [j+8] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVHstoreidx [i-1] {s} p idx w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		w0 := v.Args[2]
-		if w0.Op != OpS390XSRWconst {
-			break
-		}
-		j := w0.AuxInt
-		w := w0.Args[0]
-		x := v.Args[3]
-		if x.Op != OpS390XMOVBstoreidx {
-			break
-		}
-		if x.AuxInt != i-1 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		x_2 := x.Args[2]
-		if x_2.Op != OpS390XSRWconst {
-			break
-		}
-		if x_2.AuxInt != j+8 {
-			break
-		}
-		if w != x_2.Args[0] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVHstoreidx)
-		v.AuxInt = i - 1
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(idx)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreidx [i] {s} p idx (SRDconst [8] w) x:(MOVBstoreidx [i-1] {s} p idx w mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVHBRstoreidx [i-1] {s} p idx w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XSRDconst {
-			break
-		}
-		if v_2.AuxInt != 8 {
-			break
-		}
-		w := v_2.Args[0]
-		x := v.Args[3]
-		if x.Op != OpS390XMOVBstoreidx {
-			break
-		}
-		if x.AuxInt != i-1 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		if w != x.Args[2] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVHBRstoreidx)
-		v.AuxInt = i - 1
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(idx)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreidx [i] {s} p idx (SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx w0:(SRDconst [j-8] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVHBRstoreidx [i-1] {s} p idx w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XSRDconst {
-			break
-		}
-		j := v_2.AuxInt
-		w := v_2.Args[0]
-		x := v.Args[3]
-		if x.Op != OpS390XMOVBstoreidx {
-			break
-		}
-		if x.AuxInt != i-1 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		w0 := x.Args[2]
-		if w0.Op != OpS390XSRDconst {
-			break
-		}
-		if w0.AuxInt != j-8 {
-			break
-		}
-		if w != w0.Args[0] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVHBRstoreidx)
-		v.AuxInt = i - 1
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(idx)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreidx [i] {s} p idx (SRWconst [8] w) x:(MOVBstoreidx [i-1] {s} p idx w mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVHBRstoreidx [i-1] {s} p idx w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XSRWconst {
-			break
-		}
-		if v_2.AuxInt != 8 {
-			break
-		}
-		w := v_2.Args[0]
-		x := v.Args[3]
-		if x.Op != OpS390XMOVBstoreidx {
-			break
-		}
-		if x.AuxInt != i-1 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		if w != x.Args[2] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVHBRstoreidx)
-		v.AuxInt = i - 1
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(idx)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVBstoreidx [i] {s} p idx (SRWconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx w0:(SRWconst [j-8] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVHBRstoreidx [i-1] {s} p idx w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XSRWconst {
-			break
-		}
-		j := v_2.AuxInt
-		w := v_2.Args[0]
-		x := v.Args[3]
-		if x.Op != OpS390XMOVBstoreidx {
-			break
-		}
-		if x.AuxInt != i-1 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		w0 := x.Args[2]
-		if w0.Op != OpS390XSRWconst {
-			break
-		}
-		if w0.AuxInt != j-8 {
-			break
-		}
-		if w != w0.Args[0] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVHBRstoreidx)
-		v.AuxInt = i - 1
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(idx)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVDEQ(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVDEQ x y (InvertFlags cmp))
-	// cond:
-	// result: (MOVDEQ x y cmp)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XInvertFlags {
-			break
-		}
-		cmp := v_2.Args[0]
-		v.reset(OpS390XMOVDEQ)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(cmp)
-		return true
-	}
-	// match: (MOVDEQ _ x (FlagEQ))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XFlagEQ {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVDEQ y _ (FlagLT))
-	// cond:
-	// result: y
-	for {
-		y := v.Args[0]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XFlagLT {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (MOVDEQ y _ (FlagGT))
-	// cond:
-	// result: y
-	for {
-		y := v.Args[0]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XFlagGT {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVDGE(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVDGE x y (InvertFlags cmp))
-	// cond:
-	// result: (MOVDLE x y cmp)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XInvertFlags {
-			break
-		}
-		cmp := v_2.Args[0]
-		v.reset(OpS390XMOVDLE)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(cmp)
-		return true
-	}
-	// match: (MOVDGE _ x (FlagEQ))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XFlagEQ {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVDGE y _ (FlagLT))
-	// cond:
-	// result: y
-	for {
-		y := v.Args[0]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XFlagLT {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (MOVDGE _ x (FlagGT))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XFlagGT {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVDGT(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVDGT x y (InvertFlags cmp))
-	// cond:
-	// result: (MOVDLT x y cmp)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XInvertFlags {
-			break
-		}
-		cmp := v_2.Args[0]
-		v.reset(OpS390XMOVDLT)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(cmp)
-		return true
-	}
-	// match: (MOVDGT y _ (FlagEQ))
-	// cond:
-	// result: y
-	for {
-		y := v.Args[0]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XFlagEQ {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (MOVDGT y _ (FlagLT))
-	// cond:
-	// result: y
-	for {
-		y := v.Args[0]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XFlagLT {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (MOVDGT _ x (FlagGT))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XFlagGT {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVDLE(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVDLE x y (InvertFlags cmp))
-	// cond:
-	// result: (MOVDGE x y cmp)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XInvertFlags {
-			break
-		}
-		cmp := v_2.Args[0]
-		v.reset(OpS390XMOVDGE)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(cmp)
-		return true
-	}
-	// match: (MOVDLE _ x (FlagEQ))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XFlagEQ {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVDLE _ x (FlagLT))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XFlagLT {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVDLE y _ (FlagGT))
-	// cond:
-	// result: y
-	for {
-		y := v.Args[0]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XFlagGT {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVDLT(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVDLT x y (InvertFlags cmp))
-	// cond:
-	// result: (MOVDGT x y cmp)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XInvertFlags {
-			break
-		}
-		cmp := v_2.Args[0]
-		v.reset(OpS390XMOVDGT)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(cmp)
-		return true
-	}
-	// match: (MOVDLT y _ (FlagEQ))
-	// cond:
-	// result: y
-	for {
-		y := v.Args[0]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XFlagEQ {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (MOVDLT _ x (FlagLT))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XFlagLT {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVDLT y _ (FlagGT))
-	// cond:
-	// result: y
-	for {
-		y := v.Args[0]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XFlagGT {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVDNE(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVDNE x y (InvertFlags cmp))
-	// cond:
-	// result: (MOVDNE x y cmp)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XInvertFlags {
-			break
-		}
-		cmp := v_2.Args[0]
-		v.reset(OpS390XMOVDNE)
-		v.AddArg(x)
-		v.AddArg(y)
-		v.AddArg(cmp)
-		return true
-	}
-	// match: (MOVDNE _ y (FlagEQ))
-	// cond:
-	// result: y
-	for {
-		y := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XFlagEQ {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (MOVDNE x _ (FlagLT))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XFlagLT {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVDNE x _ (FlagGT))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XFlagGT {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVDaddridx(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVDaddridx [c] {s} (ADDconst [d] x) y)
-	// cond: is20Bit(c+d) && x.Op != OpSB
-	// result: (MOVDaddridx [c+d] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		y := v.Args[1]
-		if !(is20Bit(c+d) && x.Op != OpSB) {
-			break
-		}
-		v.reset(OpS390XMOVDaddridx)
-		v.AuxInt = c + d
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (MOVDaddridx [c] {s} x (ADDconst [d] y))
-	// cond: is20Bit(c+d) && y.Op != OpSB
-	// result: (MOVDaddridx [c+d] {s} x y)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XADDconst {
-			break
-		}
-		d := v_1.AuxInt
-		y := v_1.Args[0]
-		if !(is20Bit(c+d) && y.Op != OpSB) {
-			break
-		}
-		v.reset(OpS390XMOVDaddridx)
-		v.AuxInt = c + d
-		v.Aux = s
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (MOVDaddridx [off1] {sym1} (MOVDaddr [off2] {sym2} x) y)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB
-	// result: (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		x := v_0.Args[0]
-		y := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
-			break
-		}
-		v.reset(OpS390XMOVDaddridx)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (MOVDaddridx [off1] {sym1} x (MOVDaddr [off2] {sym2} y))
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB
-	// result: (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDaddr {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		y := v_1.Args[0]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB) {
-			break
-		}
-		v.reset(OpS390XMOVDaddridx)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVDload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-	// result: x
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDstore {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		x := v_1.Args[1]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVDload   [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: is20Bit(off1+off2)
-	// result: (MOVDload  [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is20Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpS390XMOVDload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVDload  [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVDload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpS390XMOVDload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVDload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVDloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddridx {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpS390XMOVDloadidx)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVDload [off] {sym} (ADD ptr idx) mem)
-	// cond: ptr.Op != OpSB
-	// result: (MOVDloadidx [off] {sym} ptr idx mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADD {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(OpS390XMOVDloadidx)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVDloadidx(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem)
-	// cond:
-	// result: (MOVDloadidx [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpS390XMOVDloadidx)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem)
-	// cond:
-	// result: (MOVDloadidx [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XADDconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpS390XMOVDloadidx)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVDstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVDstore  [off1] {sym} (ADDconst [off2] ptr) val mem)
-	// cond: is20Bit(off1+off2)
-	// result: (MOVDstore  [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is20Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpS390XMOVDstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem)
-	// cond: validValAndOff(c,off) && int64(int16(c)) == c && ptr.Op != OpSB
-	// result: (MOVDstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		mem := v.Args[2]
-		if !(validValAndOff(c, off) && int64(int16(c)) == c && ptr.Op != OpSB) {
-			break
-		}
-		v.reset(OpS390XMOVDstoreconst)
-		v.AuxInt = makeValAndOff(c, off)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVDstore  [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVDstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpS390XMOVDstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVDstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVDstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddridx {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpS390XMOVDstoreidx)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVDstore [off] {sym} (ADD ptr idx) val mem)
-	// cond: ptr.Op != OpSB
-	// result: (MOVDstoreidx [off] {sym} ptr idx val mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADD {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(OpS390XMOVDstoreidx)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVDstore [i] {s} p w1 x:(MOVDstore [i-8] {s} p w0 mem))
-	// cond: p.Op != OpSB   && x.Uses == 1   && is20Bit(i-8)   && clobber(x)
-	// result: (STMG2 [i-8] {s} p w0 w1 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		w1 := v.Args[1]
-		x := v.Args[2]
-		if x.Op != OpS390XMOVDstore {
-			break
-		}
-		if x.AuxInt != i-8 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		w0 := x.Args[1]
-		mem := x.Args[2]
-		if !(p.Op != OpSB && x.Uses == 1 && is20Bit(i-8) && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XSTMG2)
-		v.AuxInt = i - 8
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w0)
-		v.AddArg(w1)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVDstore [i] {s} p w2 x:(STMG2 [i-16] {s} p w0 w1 mem))
-	// cond: x.Uses == 1   && is20Bit(i-16)   && clobber(x)
-	// result: (STMG3 [i-16] {s} p w0 w1 w2 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		w2 := v.Args[1]
-		x := v.Args[2]
-		if x.Op != OpS390XSTMG2 {
-			break
-		}
-		if x.AuxInt != i-16 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		w0 := x.Args[1]
-		w1 := x.Args[2]
-		mem := x.Args[3]
-		if !(x.Uses == 1 && is20Bit(i-16) && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XSTMG3)
-		v.AuxInt = i - 16
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w0)
-		v.AddArg(w1)
-		v.AddArg(w2)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVDstore [i] {s} p w3 x:(STMG3 [i-24] {s} p w0 w1 w2 mem))
-	// cond: x.Uses == 1   && is20Bit(i-24)   && clobber(x)
-	// result: (STMG4 [i-24] {s} p w0 w1 w2 w3 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		w3 := v.Args[1]
-		x := v.Args[2]
-		if x.Op != OpS390XSTMG3 {
-			break
-		}
-		if x.AuxInt != i-24 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		w0 := x.Args[1]
-		w1 := x.Args[2]
-		w2 := x.Args[3]
-		mem := x.Args[4]
-		if !(x.Uses == 1 && is20Bit(i-24) && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XSTMG4)
-		v.AuxInt = i - 24
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w0)
-		v.AddArg(w1)
-		v.AddArg(w2)
-		v.AddArg(w3)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVDstoreconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVDstoreconst [sc] {s} (ADDconst [off] ptr) mem)
-	// cond: ValAndOff(sc).canAdd(off)
-	// result: (MOVDstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
-	for {
-		sc := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		off := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(ValAndOff(sc).canAdd(off)) {
-			break
-		}
-		v.reset(OpS390XMOVDstoreconst)
-		v.AuxInt = ValAndOff(sc).add(off)
-		v.Aux = s
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVDstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
-	// result: (MOVDstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
-	for {
-		sc := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddr {
-			break
-		}
-		off := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
-			break
-		}
-		v.reset(OpS390XMOVDstoreconst)
-		v.AuxInt = ValAndOff(sc).add(off)
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVDstoreidx(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem)
-	// cond:
-	// result: (MOVDstoreidx [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpS390XMOVDstoreidx)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem)
-	// cond:
-	// result: (MOVDstoreidx [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XADDconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpS390XMOVDstoreidx)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVHBRstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHBRstore [i] {s} p (SRDconst [16] w) x:(MOVHBRstore [i-2] {s} p w mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVWBRstore [i-2] {s} p w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XSRDconst {
-			break
-		}
-		if v_1.AuxInt != 16 {
-			break
-		}
-		w := v_1.Args[0]
-		x := v.Args[2]
-		if x.Op != OpS390XMOVHBRstore {
-			break
-		}
-		if x.AuxInt != i-2 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if w != x.Args[1] {
-			break
-		}
-		mem := x.Args[2]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVWBRstore)
-		v.AuxInt = i - 2
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHBRstore [i] {s} p (SRDconst [j] w) x:(MOVHBRstore [i-2] {s} p w0:(SRDconst [j-16] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVWBRstore [i-2] {s} p w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XSRDconst {
-			break
-		}
-		j := v_1.AuxInt
-		w := v_1.Args[0]
-		x := v.Args[2]
-		if x.Op != OpS390XMOVHBRstore {
-			break
-		}
-		if x.AuxInt != i-2 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		w0 := x.Args[1]
-		if w0.Op != OpS390XSRDconst {
-			break
-		}
-		if w0.AuxInt != j-16 {
-			break
-		}
-		if w != w0.Args[0] {
-			break
-		}
-		mem := x.Args[2]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVWBRstore)
-		v.AuxInt = i - 2
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHBRstore [i] {s} p (SRWconst [16] w) x:(MOVHBRstore [i-2] {s} p w mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVWBRstore [i-2] {s} p w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XSRWconst {
-			break
-		}
-		if v_1.AuxInt != 16 {
-			break
-		}
-		w := v_1.Args[0]
-		x := v.Args[2]
-		if x.Op != OpS390XMOVHBRstore {
-			break
-		}
-		if x.AuxInt != i-2 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if w != x.Args[1] {
-			break
-		}
-		mem := x.Args[2]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVWBRstore)
-		v.AuxInt = i - 2
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHBRstore [i] {s} p (SRWconst [j] w) x:(MOVHBRstore [i-2] {s} p w0:(SRWconst [j-16] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVWBRstore [i-2] {s} p w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XSRWconst {
-			break
-		}
-		j := v_1.AuxInt
-		w := v_1.Args[0]
-		x := v.Args[2]
-		if x.Op != OpS390XMOVHBRstore {
-			break
-		}
-		if x.AuxInt != i-2 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		w0 := x.Args[1]
-		if w0.Op != OpS390XSRWconst {
-			break
-		}
-		if w0.AuxInt != j-16 {
-			break
-		}
-		if w != w0.Args[0] {
-			break
-		}
-		mem := x.Args[2]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVWBRstore)
-		v.AuxInt = i - 2
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVHBRstoreidx(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHBRstoreidx [i] {s} p idx (SRDconst [16] w) x:(MOVHBRstoreidx [i-2] {s} p idx w mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVWBRstoreidx [i-2] {s} p idx w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XSRDconst {
-			break
-		}
-		if v_2.AuxInt != 16 {
-			break
-		}
-		w := v_2.Args[0]
-		x := v.Args[3]
-		if x.Op != OpS390XMOVHBRstoreidx {
-			break
-		}
-		if x.AuxInt != i-2 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		if w != x.Args[2] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVWBRstoreidx)
-		v.AuxInt = i - 2
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(idx)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHBRstoreidx [i] {s} p idx (SRDconst [j] w) x:(MOVHBRstoreidx [i-2] {s} p idx w0:(SRDconst [j-16] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVWBRstoreidx [i-2] {s} p idx w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XSRDconst {
-			break
-		}
-		j := v_2.AuxInt
-		w := v_2.Args[0]
-		x := v.Args[3]
-		if x.Op != OpS390XMOVHBRstoreidx {
-			break
-		}
-		if x.AuxInt != i-2 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		w0 := x.Args[2]
-		if w0.Op != OpS390XSRDconst {
-			break
-		}
-		if w0.AuxInt != j-16 {
-			break
-		}
-		if w != w0.Args[0] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVWBRstoreidx)
-		v.AuxInt = i - 2
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(idx)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHBRstoreidx [i] {s} p idx (SRWconst [16] w) x:(MOVHBRstoreidx [i-2] {s} p idx w mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVWBRstoreidx [i-2] {s} p idx w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XSRWconst {
-			break
-		}
-		if v_2.AuxInt != 16 {
-			break
-		}
-		w := v_2.Args[0]
-		x := v.Args[3]
-		if x.Op != OpS390XMOVHBRstoreidx {
-			break
-		}
-		if x.AuxInt != i-2 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		if w != x.Args[2] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVWBRstoreidx)
-		v.AuxInt = i - 2
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(idx)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHBRstoreidx [i] {s} p idx (SRWconst [j] w) x:(MOVHBRstoreidx [i-2] {s} p idx w0:(SRWconst [j-16] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVWBRstoreidx [i-2] {s} p idx w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XSRWconst {
-			break
-		}
-		j := v_2.AuxInt
-		w := v_2.Args[0]
-		x := v.Args[3]
-		if x.Op != OpS390XMOVHBRstoreidx {
-			break
-		}
-		if x.AuxInt != i-2 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		w0 := x.Args[2]
-		if w0.Op != OpS390XSRWconst {
-			break
-		}
-		if w0.AuxInt != j-16 {
-			break
-		}
-		if w != w0.Args[0] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVWBRstoreidx)
-		v.AuxInt = i - 2
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(idx)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVHZload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHZload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-	// result: x
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVHstore {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		x := v_1.Args[1]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHZload  [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: is20Bit(off1+off2)
-	// result: (MOVHZload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is20Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpS390XMOVHZload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHZload  [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVHZload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpS390XMOVHZload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVHZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddridx {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpS390XMOVHZloadidx)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHZload [off] {sym} (ADD ptr idx) mem)
-	// cond: ptr.Op != OpSB
-	// result: (MOVHZloadidx [off] {sym} ptr idx mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADD {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(OpS390XMOVHZloadidx)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVHZloadidx(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHZloadidx [c] {sym} (ADDconst [d] ptr) idx mem)
-	// cond:
-	// result: (MOVHZloadidx [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpS390XMOVHZloadidx)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHZloadidx [c] {sym} ptr (ADDconst [d] idx) mem)
-	// cond:
-	// result: (MOVHZloadidx [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XADDconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpS390XMOVHZloadidx)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVHZreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHZreg x:(MOVBZload _ _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVBZload {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHZreg x:(MOVHZload _ _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVHZload {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHZreg x:(Arg <t>))
-	// cond: (is8BitInt(t) || is16BitInt(t)) && !isSigned(t)
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpArg {
-			break
-		}
-		t := x.Type
-		if !((is8BitInt(t) || is16BitInt(t)) && !isSigned(t)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHZreg x:(MOVBZreg _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVBZreg {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHZreg x:(MOVHZreg _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVHZreg {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHZreg (MOVDconst [c]))
-	// cond:
-	// result: (MOVDconst [int64(uint16(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = int64(uint16(c))
-		return true
-	}
-	// match: (MOVHZreg x:(MOVHZload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVHZload <v.Type> [off] {sym} ptr mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVHZload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpS390XMOVHZload, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVHZreg x:(MOVHZloadidx [off] {sym} ptr idx mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVHZloadidx <v.Type> [off] {sym} ptr idx mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVHZloadidx {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		idx := x.Args[1]
-		mem := x.Args[2]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpS390XMOVHZloadidx, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(idx)
-		v0.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVHload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHload   [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: is20Bit(off1+off2)
-	// result: (MOVHload  [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is20Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpS390XMOVHload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpS390XMOVHload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVHreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHreg x:(MOVBload _ _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVBload {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg x:(MOVBZload _ _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVBZload {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg x:(MOVHload _ _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVHload {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg x:(Arg <t>))
-	// cond: (is8BitInt(t) || is16BitInt(t)) && isSigned(t)
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpArg {
-			break
-		}
-		t := x.Type
-		if !((is8BitInt(t) || is16BitInt(t)) && isSigned(t)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg x:(MOVBreg _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVBreg {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg x:(MOVBZreg _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVBZreg {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg x:(MOVHreg _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVHreg {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVHreg (MOVDconst [c]))
-	// cond:
-	// result: (MOVDconst [int64(int16(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = int64(int16(c))
-		return true
-	}
-	// match: (MOVHreg x:(MOVHZload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVHload <v.Type> [off] {sym} ptr mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVHZload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpS390XMOVHload, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVHstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
-	// cond:
-	// result: (MOVHstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVHreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpS390XMOVHstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [off] {sym} ptr (MOVHZreg x) mem)
-	// cond:
-	// result: (MOVHstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVHZreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpS390XMOVHstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore  [off1] {sym} (ADDconst [off2] ptr) val mem)
-	// cond: is20Bit(off1+off2)
-	// result: (MOVHstore  [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is20Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpS390XMOVHstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem)
-	// cond: validOff(off) && ptr.Op != OpSB
-	// result: (MOVHstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		mem := v.Args[2]
-		if !(validOff(off) && ptr.Op != OpSB) {
-			break
-		}
-		v.reset(OpS390XMOVHstoreconst)
-		v.AuxInt = makeValAndOff(int64(int16(c)), off)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore  [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVHstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpS390XMOVHstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVHstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddridx {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpS390XMOVHstoreidx)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [off] {sym} (ADD ptr idx) val mem)
-	// cond: ptr.Op != OpSB
-	// result: (MOVHstoreidx [off] {sym} ptr idx val mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADD {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(OpS390XMOVHstoreidx)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRDconst [16] w) mem))
-	// cond: p.Op != OpSB   && x.Uses == 1   && clobber(x)
-	// result: (MOVWstore [i-2] {s} p w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		w := v.Args[1]
-		x := v.Args[2]
-		if x.Op != OpS390XMOVHstore {
-			break
-		}
-		if x.AuxInt != i-2 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		x_1 := x.Args[1]
-		if x_1.Op != OpS390XSRDconst {
-			break
-		}
-		if x_1.AuxInt != 16 {
-			break
-		}
-		if w != x_1.Args[0] {
-			break
-		}
-		mem := x.Args[2]
-		if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVWstore)
-		v.AuxInt = i - 2
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [i] {s} p w0:(SRDconst [j] w) x:(MOVHstore [i-2] {s} p (SRDconst [j+16] w) mem))
-	// cond: p.Op != OpSB   && x.Uses == 1   && clobber(x)
-	// result: (MOVWstore [i-2] {s} p w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		w0 := v.Args[1]
-		if w0.Op != OpS390XSRDconst {
-			break
-		}
-		j := w0.AuxInt
-		w := w0.Args[0]
-		x := v.Args[2]
-		if x.Op != OpS390XMOVHstore {
-			break
-		}
-		if x.AuxInt != i-2 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		x_1 := x.Args[1]
-		if x_1.Op != OpS390XSRDconst {
-			break
-		}
-		if x_1.AuxInt != j+16 {
-			break
-		}
-		if w != x_1.Args[0] {
-			break
-		}
-		mem := x.Args[2]
-		if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVWstore)
-		v.AuxInt = i - 2
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRWconst [16] w) mem))
-	// cond: p.Op != OpSB   && x.Uses == 1   && clobber(x)
-	// result: (MOVWstore [i-2] {s} p w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		w := v.Args[1]
-		x := v.Args[2]
-		if x.Op != OpS390XMOVHstore {
-			break
-		}
-		if x.AuxInt != i-2 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		x_1 := x.Args[1]
-		if x_1.Op != OpS390XSRWconst {
-			break
-		}
-		if x_1.AuxInt != 16 {
-			break
-		}
-		if w != x_1.Args[0] {
-			break
-		}
-		mem := x.Args[2]
-		if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVWstore)
-		v.AuxInt = i - 2
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstore [i] {s} p w0:(SRWconst [j] w) x:(MOVHstore [i-2] {s} p (SRWconst [j+16] w) mem))
-	// cond: p.Op != OpSB   && x.Uses == 1   && clobber(x)
-	// result: (MOVWstore [i-2] {s} p w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		w0 := v.Args[1]
-		if w0.Op != OpS390XSRWconst {
-			break
-		}
-		j := w0.AuxInt
-		w := w0.Args[0]
-		x := v.Args[2]
-		if x.Op != OpS390XMOVHstore {
-			break
-		}
-		if x.AuxInt != i-2 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		x_1 := x.Args[1]
-		if x_1.Op != OpS390XSRWconst {
-			break
-		}
-		if x_1.AuxInt != j+16 {
-			break
-		}
-		if w != x_1.Args[0] {
-			break
-		}
-		mem := x.Args[2]
-		if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVWstore)
-		v.AuxInt = i - 2
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVHstoreconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem)
-	// cond: ValAndOff(sc).canAdd(off)
-	// result: (MOVHstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
-	for {
-		sc := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		off := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(ValAndOff(sc).canAdd(off)) {
-			break
-		}
-		v.reset(OpS390XMOVHstoreconst)
-		v.AuxInt = ValAndOff(sc).add(off)
-		v.Aux = s
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
-	// result: (MOVHstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
-	for {
-		sc := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddr {
-			break
-		}
-		off := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
-			break
-		}
-		v.reset(OpS390XMOVHstoreconst)
-		v.AuxInt = ValAndOff(sc).add(off)
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstoreconst [c] {s} p x:(MOVHstoreconst [a] {s} p mem))
-	// cond: p.Op != OpSB   && x.Uses == 1   && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()   && clobber(x)
-	// result: (MOVWstoreconst [makeValAndOff(ValAndOff(c).Val()&0xffff | ValAndOff(a).Val()<<16, ValAndOff(a).Off())] {s} p mem)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		x := v.Args[1]
-		if x.Op != OpS390XMOVHstoreconst {
-			break
-		}
-		a := x.AuxInt
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		mem := x.Args[1]
-		if !(p.Op != OpSB && x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVWstoreconst)
-		v.AuxInt = makeValAndOff(ValAndOff(c).Val()&0xffff|ValAndOff(a).Val()<<16, ValAndOff(a).Off())
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVHstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem)
-	// cond:
-	// result: (MOVHstoreidx [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpS390XMOVHstoreidx)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem)
-	// cond:
-	// result: (MOVHstoreidx [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XADDconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpS390XMOVHstoreidx)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstoreidx [i] {s} p idx w x:(MOVHstoreidx [i-2] {s} p idx (SRDconst [16] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVWstoreidx [i-2] {s} p idx w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		w := v.Args[2]
-		x := v.Args[3]
-		if x.Op != OpS390XMOVHstoreidx {
-			break
-		}
-		if x.AuxInt != i-2 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		x_2 := x.Args[2]
-		if x_2.Op != OpS390XSRDconst {
-			break
-		}
-		if x_2.AuxInt != 16 {
-			break
-		}
-		if w != x_2.Args[0] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVWstoreidx)
-		v.AuxInt = i - 2
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(idx)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVHstoreidx [i-2] {s} p idx (SRDconst [j+16] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVWstoreidx [i-2] {s} p idx w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		w0 := v.Args[2]
-		if w0.Op != OpS390XSRDconst {
-			break
-		}
-		j := w0.AuxInt
-		w := w0.Args[0]
-		x := v.Args[3]
-		if x.Op != OpS390XMOVHstoreidx {
-			break
-		}
-		if x.AuxInt != i-2 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		x_2 := x.Args[2]
-		if x_2.Op != OpS390XSRDconst {
-			break
-		}
-		if x_2.AuxInt != j+16 {
-			break
-		}
-		if w != x_2.Args[0] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVWstoreidx)
-		v.AuxInt = i - 2
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(idx)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstoreidx [i] {s} p idx w x:(MOVHstoreidx [i-2] {s} p idx (SRWconst [16] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVWstoreidx [i-2] {s} p idx w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		w := v.Args[2]
-		x := v.Args[3]
-		if x.Op != OpS390XMOVHstoreidx {
-			break
-		}
-		if x.AuxInt != i-2 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		x_2 := x.Args[2]
-		if x_2.Op != OpS390XSRWconst {
-			break
-		}
-		if x_2.AuxInt != 16 {
-			break
-		}
-		if w != x_2.Args[0] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVWstoreidx)
-		v.AuxInt = i - 2
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(idx)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVHstoreidx [i] {s} p idx w0:(SRWconst [j] w) x:(MOVHstoreidx [i-2] {s} p idx (SRWconst [j+16] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVWstoreidx [i-2] {s} p idx w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		w0 := v.Args[2]
-		if w0.Op != OpS390XSRWconst {
-			break
-		}
-		j := w0.AuxInt
-		w := w0.Args[0]
-		x := v.Args[3]
-		if x.Op != OpS390XMOVHstoreidx {
-			break
-		}
-		if x.AuxInt != i-2 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		x_2 := x.Args[2]
-		if x_2.Op != OpS390XSRWconst {
-			break
-		}
-		if x_2.AuxInt != j+16 {
-			break
-		}
-		if w != x_2.Args[0] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVWstoreidx)
-		v.AuxInt = i - 2
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(idx)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVWBRstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWBRstore [i] {s} p (SRDconst [32] w) x:(MOVWBRstore [i-4] {s} p w mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVDBRstore [i-4] {s} p w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XSRDconst {
-			break
-		}
-		if v_1.AuxInt != 32 {
-			break
-		}
-		w := v_1.Args[0]
-		x := v.Args[2]
-		if x.Op != OpS390XMOVWBRstore {
-			break
-		}
-		if x.AuxInt != i-4 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if w != x.Args[1] {
-			break
-		}
-		mem := x.Args[2]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVDBRstore)
-		v.AuxInt = i - 4
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWBRstore [i] {s} p (SRDconst [j] w) x:(MOVWBRstore [i-4] {s} p w0:(SRDconst [j-32] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVDBRstore [i-4] {s} p w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XSRDconst {
-			break
-		}
-		j := v_1.AuxInt
-		w := v_1.Args[0]
-		x := v.Args[2]
-		if x.Op != OpS390XMOVWBRstore {
-			break
-		}
-		if x.AuxInt != i-4 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		w0 := x.Args[1]
-		if w0.Op != OpS390XSRDconst {
-			break
-		}
-		if w0.AuxInt != j-32 {
-			break
-		}
-		if w != w0.Args[0] {
-			break
-		}
-		mem := x.Args[2]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVDBRstore)
-		v.AuxInt = i - 4
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVWBRstoreidx(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWBRstoreidx [i] {s} p idx (SRDconst [32] w) x:(MOVWBRstoreidx [i-4] {s} p idx w mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVDBRstoreidx [i-4] {s} p idx w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XSRDconst {
-			break
-		}
-		if v_2.AuxInt != 32 {
-			break
-		}
-		w := v_2.Args[0]
-		x := v.Args[3]
-		if x.Op != OpS390XMOVWBRstoreidx {
-			break
-		}
-		if x.AuxInt != i-4 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		if w != x.Args[2] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVDBRstoreidx)
-		v.AuxInt = i - 4
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(idx)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWBRstoreidx [i] {s} p idx (SRDconst [j] w) x:(MOVWBRstoreidx [i-4] {s} p idx w0:(SRDconst [j-32] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVDBRstoreidx [i-4] {s} p idx w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpS390XSRDconst {
-			break
-		}
-		j := v_2.AuxInt
-		w := v_2.Args[0]
-		x := v.Args[3]
-		if x.Op != OpS390XMOVWBRstoreidx {
-			break
-		}
-		if x.AuxInt != i-4 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		w0 := x.Args[2]
-		if w0.Op != OpS390XSRDconst {
-			break
-		}
-		if w0.AuxInt != j-32 {
-			break
-		}
-		if w != w0.Args[0] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVDBRstoreidx)
-		v.AuxInt = i - 4
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(idx)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVWZload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWZload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
-	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-	// result: x
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVWstore {
-			break
-		}
-		off2 := v_1.AuxInt
-		sym2 := v_1.Aux
-		ptr2 := v_1.Args[0]
-		x := v_1.Args[1]
-		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWZload  [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: is20Bit(off1+off2)
-	// result: (MOVWZload [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is20Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpS390XMOVWZload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWZload  [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVWZload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpS390XMOVWZload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVWZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddridx {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpS390XMOVWZloadidx)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWZload [off] {sym} (ADD ptr idx) mem)
-	// cond: ptr.Op != OpSB
-	// result: (MOVWZloadidx [off] {sym} ptr idx mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADD {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		mem := v.Args[1]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(OpS390XMOVWZloadidx)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVWZloadidx(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWZloadidx [c] {sym} (ADDconst [d] ptr) idx mem)
-	// cond:
-	// result: (MOVWZloadidx [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpS390XMOVWZloadidx)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWZloadidx [c] {sym} ptr (ADDconst [d] idx) mem)
-	// cond:
-	// result: (MOVWZloadidx [c+d] {sym} ptr idx mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XADDconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpS390XMOVWZloadidx)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVWZreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWZreg x:(MOVBZload _ _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVBZload {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWZreg x:(MOVHZload _ _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVHZload {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWZreg x:(MOVWZload _ _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVWZload {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWZreg x:(Arg <t>))
-	// cond: (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t)
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpArg {
-			break
-		}
-		t := x.Type
-		if !((is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWZreg x:(MOVBZreg _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVBZreg {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWZreg x:(MOVHZreg _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVHZreg {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWZreg x:(MOVWZreg _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVWZreg {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWZreg (MOVDconst [c]))
-	// cond:
-	// result: (MOVDconst [int64(uint32(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = int64(uint32(c))
-		return true
-	}
-	// match: (MOVWZreg x:(MOVWZload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVWZload <v.Type> [off] {sym} ptr mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVWZload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpS390XMOVWZload, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (MOVWZreg x:(MOVWZloadidx [off] {sym} ptr idx mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVWZloadidx <v.Type> [off] {sym} ptr idx mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVWZloadidx {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		idx := x.Args[1]
-		mem := x.Args[2]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpS390XMOVWZloadidx, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(idx)
-		v0.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVWload(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWload   [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: is20Bit(off1+off2)
-	// result: (MOVWload  [off1+off2] {sym} ptr mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is20Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpS390XMOVWload)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		mem := v.Args[1]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpS390XMOVWload)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVWreg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWreg x:(MOVBload _ _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVBload {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg x:(MOVBZload _ _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVBZload {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg x:(MOVHload _ _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVHload {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg x:(MOVHZload _ _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVHZload {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg x:(MOVWload _ _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVWload {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg x:(Arg <t>))
-	// cond: (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t)
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpArg {
-			break
-		}
-		t := x.Type
-		if !((is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t)) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg x:(MOVBreg _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVBreg {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg x:(MOVBZreg _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVBZreg {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg x:(MOVHreg _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVHreg {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg x:(MOVHreg _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVHreg {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg x:(MOVWreg _))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVWreg {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MOVWreg (MOVDconst [c]))
-	// cond:
-	// result: (MOVDconst [int64(int32(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = int64(int32(c))
-		return true
-	}
-	// match: (MOVWreg x:(MOVWZload [off] {sym} ptr mem))
-	// cond: x.Uses == 1 && clobber(x)
-	// result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
-	for {
-		x := v.Args[0]
-		if x.Op != OpS390XMOVWZload {
-			break
-		}
-		off := x.AuxInt
-		sym := x.Aux
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpS390XMOVWload, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = off
-		v0.Aux = sym
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVWstore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
-	// cond:
-	// result: (MOVWstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVWreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpS390XMOVWstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [off] {sym} ptr (MOVWZreg x) mem)
-	// cond:
-	// result: (MOVWstore [off] {sym} ptr x mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVWZreg {
-			break
-		}
-		x := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpS390XMOVWstore)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore  [off1] {sym} (ADDconst [off2] ptr) val mem)
-	// cond: is20Bit(off1+off2)
-	// result: (MOVWstore  [off1+off2] {sym} ptr val mem)
-	for {
-		off1 := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		off2 := v_0.AuxInt
-		ptr := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is20Bit(off1 + off2)) {
-			break
-		}
-		v.reset(OpS390XMOVWstore)
-		v.AuxInt = off1 + off2
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem)
-	// cond: validOff(off) && int64(int16(c)) == c && ptr.Op != OpSB
-	// result: (MOVWstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		mem := v.Args[2]
-		if !(validOff(off) && int64(int16(c)) == c && ptr.Op != OpSB) {
-			break
-		}
-		v.reset(OpS390XMOVWstoreconst)
-		v.AuxInt = makeValAndOff(int64(int32(c)), off)
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore  [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVWstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddr {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		base := v_0.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpS390XMOVWstore)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(base)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem)
-	// cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
-	// result: (MOVWstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem)
-	for {
-		off1 := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddridx {
-			break
-		}
-		off2 := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
-			break
-		}
-		v.reset(OpS390XMOVWstoreidx)
-		v.AuxInt = off1 + off2
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [off] {sym} (ADD ptr idx) val mem)
-	// cond: ptr.Op != OpSB
-	// result: (MOVWstoreidx [off] {sym} ptr idx val mem)
-	for {
-		off := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADD {
-			break
-		}
-		ptr := v_0.Args[0]
-		idx := v_0.Args[1]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(ptr.Op != OpSB) {
-			break
-		}
-		v.reset(OpS390XMOVWstoreidx)
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [i] {s} p (SRDconst [32] w) x:(MOVWstore [i-4] {s} p w mem))
-	// cond: p.Op != OpSB   && x.Uses == 1   && clobber(x)
-	// result: (MOVDstore [i-4] {s} p w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XSRDconst {
-			break
-		}
-		if v_1.AuxInt != 32 {
-			break
-		}
-		w := v_1.Args[0]
-		x := v.Args[2]
-		if x.Op != OpS390XMOVWstore {
-			break
-		}
-		if x.AuxInt != i-4 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if w != x.Args[1] {
-			break
-		}
-		mem := x.Args[2]
-		if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVDstore)
-		v.AuxInt = i - 4
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [i] {s} p w0:(SRDconst [j] w) x:(MOVWstore [i-4] {s} p (SRDconst [j+32] w) mem))
-	// cond: p.Op != OpSB   && x.Uses == 1   && clobber(x)
-	// result: (MOVDstore [i-4] {s} p w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		w0 := v.Args[1]
-		if w0.Op != OpS390XSRDconst {
-			break
-		}
-		j := w0.AuxInt
-		w := w0.Args[0]
-		x := v.Args[2]
-		if x.Op != OpS390XMOVWstore {
-			break
-		}
-		if x.AuxInt != i-4 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		x_1 := x.Args[1]
-		if x_1.Op != OpS390XSRDconst {
-			break
-		}
-		if x_1.AuxInt != j+32 {
-			break
-		}
-		if w != x_1.Args[0] {
-			break
-		}
-		mem := x.Args[2]
-		if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVDstore)
-		v.AuxInt = i - 4
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [i] {s} p w1 x:(MOVWstore [i-4] {s} p w0 mem))
-	// cond: p.Op != OpSB   && x.Uses == 1   && is20Bit(i-4)   && clobber(x)
-	// result: (STM2 [i-4] {s} p w0 w1 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		w1 := v.Args[1]
-		x := v.Args[2]
-		if x.Op != OpS390XMOVWstore {
-			break
-		}
-		if x.AuxInt != i-4 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		w0 := x.Args[1]
-		mem := x.Args[2]
-		if !(p.Op != OpSB && x.Uses == 1 && is20Bit(i-4) && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XSTM2)
-		v.AuxInt = i - 4
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w0)
-		v.AddArg(w1)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [i] {s} p w2 x:(STM2 [i-8] {s} p w0 w1 mem))
-	// cond: x.Uses == 1   && is20Bit(i-8)   && clobber(x)
-	// result: (STM3 [i-8] {s} p w0 w1 w2 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		w2 := v.Args[1]
-		x := v.Args[2]
-		if x.Op != OpS390XSTM2 {
-			break
-		}
-		if x.AuxInt != i-8 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		w0 := x.Args[1]
-		w1 := x.Args[2]
-		mem := x.Args[3]
-		if !(x.Uses == 1 && is20Bit(i-8) && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XSTM3)
-		v.AuxInt = i - 8
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w0)
-		v.AddArg(w1)
-		v.AddArg(w2)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstore [i] {s} p w3 x:(STM3 [i-12] {s} p w0 w1 w2 mem))
-	// cond: x.Uses == 1   && is20Bit(i-12)   && clobber(x)
-	// result: (STM4 [i-12] {s} p w0 w1 w2 w3 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		w3 := v.Args[1]
-		x := v.Args[2]
-		if x.Op != OpS390XSTM3 {
-			break
-		}
-		if x.AuxInt != i-12 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		w0 := x.Args[1]
-		w1 := x.Args[2]
-		w2 := x.Args[3]
-		mem := x.Args[4]
-		if !(x.Uses == 1 && is20Bit(i-12) && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XSTM4)
-		v.AuxInt = i - 12
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w0)
-		v.AddArg(w1)
-		v.AddArg(w2)
-		v.AddArg(w3)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVWstoreconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem)
-	// cond: ValAndOff(sc).canAdd(off)
-	// result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
-	for {
-		sc := v.AuxInt
-		s := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		off := v_0.AuxInt
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(ValAndOff(sc).canAdd(off)) {
-			break
-		}
-		v.reset(OpS390XMOVWstoreconst)
-		v.AuxInt = ValAndOff(sc).add(off)
-		v.Aux = s
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)
-	// result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem)
-	for {
-		sc := v.AuxInt
-		sym1 := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDaddr {
-			break
-		}
-		off := v_0.AuxInt
-		sym2 := v_0.Aux
-		ptr := v_0.Args[0]
-		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
-			break
-		}
-		v.reset(OpS390XMOVWstoreconst)
-		v.AuxInt = ValAndOff(sc).add(off)
-		v.Aux = mergeSym(sym1, sym2)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
-	// cond: p.Op != OpSB   && x.Uses == 1   && ValAndOff(a).Off() + 4 == ValAndOff(c).Off()   && clobber(x)
-	// result: (MOVDstore [ValAndOff(a).Off()] {s} p (MOVDconst [ValAndOff(c).Val()&0xffffffff | ValAndOff(a).Val()<<32]) mem)
-	for {
-		c := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		x := v.Args[1]
-		if x.Op != OpS390XMOVWstoreconst {
-			break
-		}
-		a := x.AuxInt
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		mem := x.Args[1]
-		if !(p.Op != OpSB && x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVDstore)
-		v.AuxInt = ValAndOff(a).Off()
-		v.Aux = s
-		v.AddArg(p)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = ValAndOff(c).Val()&0xffffffff | ValAndOff(a).Val()<<32
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMOVWstoreidx(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MOVWstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem)
-	// cond:
-	// result: (MOVWstoreidx [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XADDconst {
-			break
-		}
-		d := v_0.AuxInt
-		ptr := v_0.Args[0]
-		idx := v.Args[1]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpS390XMOVWstoreidx)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem)
-	// cond:
-	// result: (MOVWstoreidx [c+d] {sym} ptr idx val mem)
-	for {
-		c := v.AuxInt
-		sym := v.Aux
-		ptr := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XADDconst {
-			break
-		}
-		d := v_1.AuxInt
-		idx := v_1.Args[0]
-		val := v.Args[2]
-		mem := v.Args[3]
-		v.reset(OpS390XMOVWstoreidx)
-		v.AuxInt = c + d
-		v.Aux = sym
-		v.AddArg(ptr)
-		v.AddArg(idx)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreidx [i] {s} p idx w x:(MOVWstoreidx [i-4] {s} p idx (SRDconst [32] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVDstoreidx [i-4] {s} p idx w mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		w := v.Args[2]
-		x := v.Args[3]
-		if x.Op != OpS390XMOVWstoreidx {
-			break
-		}
-		if x.AuxInt != i-4 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		x_2 := x.Args[2]
-		if x_2.Op != OpS390XSRDconst {
-			break
-		}
-		if x_2.AuxInt != 32 {
-			break
-		}
-		if w != x_2.Args[0] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVDstoreidx)
-		v.AuxInt = i - 4
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(idx)
-		v.AddArg(w)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MOVWstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVWstoreidx [i-4] {s} p idx (SRDconst [j+32] w) mem))
-	// cond: x.Uses == 1   && clobber(x)
-	// result: (MOVDstoreidx [i-4] {s} p idx w0 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		idx := v.Args[1]
-		w0 := v.Args[2]
-		if w0.Op != OpS390XSRDconst {
-			break
-		}
-		j := w0.AuxInt
-		w := w0.Args[0]
-		x := v.Args[3]
-		if x.Op != OpS390XMOVWstoreidx {
-			break
-		}
-		if x.AuxInt != i-4 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		if idx != x.Args[1] {
-			break
-		}
-		x_2 := x.Args[2]
-		if x_2.Op != OpS390XSRDconst {
-			break
-		}
-		if x_2.AuxInt != j+32 {
-			break
-		}
-		if w != x_2.Args[0] {
-			break
-		}
-		mem := x.Args[3]
-		if !(x.Uses == 1 && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XMOVDstoreidx)
-		v.AuxInt = i - 4
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(idx)
-		v.AddArg(w0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMULLD(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MULLD x (MOVDconst [c]))
-	// cond: is32Bit(c)
-	// result: (MULLDconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpS390XMULLDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULLD (MOVDconst [c]) x)
-	// cond: is32Bit(c)
-	// result: (MULLDconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpS390XMULLDconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULLD <t> x g:(MOVDload [off] {sym} ptr mem))
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (MULLDload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		x := v.Args[0]
-		g := v.Args[1]
-		if g.Op != OpS390XMOVDload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XMULLDload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MULLD <t> g:(MOVDload [off] {sym} ptr mem) x)
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (MULLDload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		g := v.Args[0]
-		if g.Op != OpS390XMOVDload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		x := v.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XMULLDload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMULLDconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MULLDconst [-1] x)
-	// cond:
-	// result: (NEG x)
-	for {
-		if v.AuxInt != -1 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpS390XNEG)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULLDconst [0] _)
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (MULLDconst [1] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 1 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULLDconst [c] x)
-	// cond: isPowerOfTwo(c)
-	// result: (SLDconst [log2(c)] x)
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(isPowerOfTwo(c)) {
-			break
-		}
-		v.reset(OpS390XSLDconst)
-		v.AuxInt = log2(c)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULLDconst [c] x)
-	// cond: isPowerOfTwo(c+1) && c >= 15
-	// result: (SUB (SLDconst <v.Type> [log2(c+1)] x) x)
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(isPowerOfTwo(c+1) && c >= 15) {
-			break
-		}
-		v.reset(OpS390XSUB)
-		v0 := b.NewValue0(v.Line, OpS390XSLDconst, v.Type)
-		v0.AuxInt = log2(c + 1)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULLDconst [c] x)
-	// cond: isPowerOfTwo(c-1) && c >= 17
-	// result: (ADD (SLDconst <v.Type> [log2(c-1)] x) x)
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(isPowerOfTwo(c-1) && c >= 17) {
-			break
-		}
-		v.reset(OpS390XADD)
-		v0 := b.NewValue0(v.Line, OpS390XSLDconst, v.Type)
-		v0.AuxInt = log2(c - 1)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULLDconst [c] (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [c*d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = c * d
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMULLW(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MULLW x (MOVDconst [c]))
-	// cond:
-	// result: (MULLWconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpS390XMULLWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULLW (MOVDconst [c]) x)
-	// cond:
-	// result: (MULLWconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpS390XMULLWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULLW <t> x g:(MOVWload [off] {sym} ptr mem))
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (MULLWload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		x := v.Args[0]
-		g := v.Args[1]
-		if g.Op != OpS390XMOVWload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XMULLWload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MULLW <t> g:(MOVWload [off] {sym} ptr mem) x)
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (MULLWload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		g := v.Args[0]
-		if g.Op != OpS390XMOVWload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		x := v.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XMULLWload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MULLW <t> x g:(MOVWZload [off] {sym} ptr mem))
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (MULLWload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		x := v.Args[0]
-		g := v.Args[1]
-		if g.Op != OpS390XMOVWZload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XMULLWload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (MULLW <t> g:(MOVWZload [off] {sym} ptr mem) x)
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (MULLWload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		g := v.Args[0]
-		if g.Op != OpS390XMOVWZload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		x := v.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XMULLWload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XMULLWconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (MULLWconst [-1] x)
-	// cond:
-	// result: (NEGW x)
-	for {
-		if v.AuxInt != -1 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpS390XNEGW)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULLWconst [0] _)
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (MULLWconst [1] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 1 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULLWconst [c] x)
-	// cond: isPowerOfTwo(c)
-	// result: (SLWconst [log2(c)] x)
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(isPowerOfTwo(c)) {
-			break
-		}
-		v.reset(OpS390XSLWconst)
-		v.AuxInt = log2(c)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULLWconst [c] x)
-	// cond: isPowerOfTwo(c+1) && c >= 15
-	// result: (SUBW (SLWconst <v.Type> [log2(c+1)] x) x)
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(isPowerOfTwo(c+1) && c >= 15) {
-			break
-		}
-		v.reset(OpS390XSUBW)
-		v0 := b.NewValue0(v.Line, OpS390XSLWconst, v.Type)
-		v0.AuxInt = log2(c + 1)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULLWconst [c] x)
-	// cond: isPowerOfTwo(c-1) && c >= 17
-	// result: (ADDW (SLWconst <v.Type> [log2(c-1)] x) x)
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(isPowerOfTwo(c-1) && c >= 17) {
-			break
-		}
-		v.reset(OpS390XADDW)
-		v0 := b.NewValue0(v.Line, OpS390XSLWconst, v.Type)
-		v0.AuxInt = log2(c - 1)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (MULLWconst [c] (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [int64(int32(c*d))])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = int64(int32(c * d))
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XNEG(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NEG (MOVDconst [c]))
-	// cond:
-	// result: (MOVDconst [-c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = -c
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XNEGW(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NEGW (MOVDconst [c]))
-	// cond:
-	// result: (MOVDconst [int64(int32(-c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = int64(int32(-c))
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XNOT(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NOT x)
-	// cond: true
-	// result: (XOR (MOVDconst [-1]) x)
-	for {
-		x := v.Args[0]
-		if !(true) {
-			break
-		}
-		v.reset(OpS390XXOR)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = -1
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XNOTW(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NOTW x)
-	// cond: true
-	// result: (XORWconst [-1] x)
-	for {
-		x := v.Args[0]
-		if !(true) {
-			break
-		}
-		v.reset(OpS390XXORWconst)
-		v.AuxInt = -1
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XOR(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (OR x (MOVDconst [c]))
-	// cond: isU32Bit(c)
-	// result: (ORconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(isU32Bit(c)) {
-			break
-		}
-		v.reset(OpS390XORconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (OR (MOVDconst [c]) x)
-	// cond: isU32Bit(c)
-	// result: (ORconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(isU32Bit(c)) {
-			break
-		}
-		v.reset(OpS390XORconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (OR (MOVDconst [c]) (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [c|d])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = c | d
-		return true
-	}
-	// match: (OR x x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (OR <t> x g:(MOVDload [off] {sym} ptr mem))
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (ORload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		x := v.Args[0]
-		g := v.Args[1]
-		if g.Op != OpS390XMOVDload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XORload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (OR <t> g:(MOVDload [off] {sym} ptr mem) x)
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (ORload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		g := v.Args[0]
-		if g.Op != OpS390XMOVDload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		x := v.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XORload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR                       x0:(MOVBZload [i]   {s} p mem)     s0:(SLDconst [8]  x1:(MOVBZload [i+1] {s} p mem)))     s1:(SLDconst [16] x2:(MOVBZload [i+2] {s} p mem)))     s2:(SLDconst [24] x3:(MOVBZload [i+3] {s} p mem)))     s3:(SLDconst [32] x4:(MOVBZload [i+4] {s} p mem)))     s4:(SLDconst [40] x5:(MOVBZload [i+5] {s} p mem)))     s5:(SLDconst [48] x6:(MOVBZload [i+6] {s} p mem)))     s6:(SLDconst [56] x7:(MOVBZload [i+7] {s} p mem)))
-	// cond: p.Op != OpSB   && x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && x4.Uses == 1   && x5.Uses == 1   && x6.Uses == 1   && x7.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && s3.Uses == 1   && s4.Uses == 1   && s5.Uses == 1   && s6.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && o2.Uses == 1   && o3.Uses == 1   && o4.Uses == 1   && o5.Uses == 1   && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(x4)   && clobber(x5)   && clobber(x6)   && clobber(x7)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(s3)   && clobber(s4)   && clobber(s5)   && clobber(s6)   && clobber(o0)   && clobber(o1)   && clobber(o2)   && clobber(o3)   && clobber(o4)   && clobber(o5)
-	// result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDBRload [i] {s} p mem)
-	for {
-		o0 := v.Args[0]
-		if o0.Op != OpS390XOR {
-			break
-		}
-		o1 := o0.Args[0]
-		if o1.Op != OpS390XOR {
-			break
-		}
-		o2 := o1.Args[0]
-		if o2.Op != OpS390XOR {
-			break
-		}
-		o3 := o2.Args[0]
-		if o3.Op != OpS390XOR {
-			break
-		}
-		o4 := o3.Args[0]
-		if o4.Op != OpS390XOR {
-			break
-		}
-		o5 := o4.Args[0]
-		if o5.Op != OpS390XOR {
-			break
-		}
-		x0 := o5.Args[0]
-		if x0.Op != OpS390XMOVBZload {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		mem := x0.Args[1]
-		s0 := o5.Args[1]
-		if s0.Op != OpS390XSLDconst {
-			break
-		}
-		if s0.AuxInt != 8 {
-			break
-		}
-		x1 := s0.Args[0]
-		if x1.Op != OpS390XMOVBZload {
-			break
-		}
-		if x1.AuxInt != i+1 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if mem != x1.Args[1] {
-			break
-		}
-		s1 := o4.Args[1]
-		if s1.Op != OpS390XSLDconst {
-			break
-		}
-		if s1.AuxInt != 16 {
-			break
-		}
-		x2 := s1.Args[0]
-		if x2.Op != OpS390XMOVBZload {
-			break
-		}
-		if x2.AuxInt != i+2 {
-			break
-		}
-		if x2.Aux != s {
-			break
-		}
-		if p != x2.Args[0] {
-			break
-		}
-		if mem != x2.Args[1] {
-			break
-		}
-		s2 := o3.Args[1]
-		if s2.Op != OpS390XSLDconst {
-			break
-		}
-		if s2.AuxInt != 24 {
-			break
-		}
-		x3 := s2.Args[0]
-		if x3.Op != OpS390XMOVBZload {
-			break
-		}
-		if x3.AuxInt != i+3 {
-			break
-		}
-		if x3.Aux != s {
-			break
-		}
-		if p != x3.Args[0] {
-			break
-		}
-		if mem != x3.Args[1] {
-			break
-		}
-		s3 := o2.Args[1]
-		if s3.Op != OpS390XSLDconst {
-			break
-		}
-		if s3.AuxInt != 32 {
-			break
-		}
-		x4 := s3.Args[0]
-		if x4.Op != OpS390XMOVBZload {
-			break
-		}
-		if x4.AuxInt != i+4 {
-			break
-		}
-		if x4.Aux != s {
-			break
-		}
-		if p != x4.Args[0] {
-			break
-		}
-		if mem != x4.Args[1] {
-			break
-		}
-		s4 := o1.Args[1]
-		if s4.Op != OpS390XSLDconst {
-			break
-		}
-		if s4.AuxInt != 40 {
-			break
-		}
-		x5 := s4.Args[0]
-		if x5.Op != OpS390XMOVBZload {
-			break
-		}
-		if x5.AuxInt != i+5 {
-			break
-		}
-		if x5.Aux != s {
-			break
-		}
-		if p != x5.Args[0] {
-			break
-		}
-		if mem != x5.Args[1] {
-			break
-		}
-		s5 := o0.Args[1]
-		if s5.Op != OpS390XSLDconst {
-			break
-		}
-		if s5.AuxInt != 48 {
-			break
-		}
-		x6 := s5.Args[0]
-		if x6.Op != OpS390XMOVBZload {
-			break
-		}
-		if x6.AuxInt != i+6 {
-			break
-		}
-		if x6.Aux != s {
-			break
-		}
-		if p != x6.Args[0] {
-			break
-		}
-		if mem != x6.Args[1] {
-			break
-		}
-		s6 := v.Args[1]
-		if s6.Op != OpS390XSLDconst {
-			break
-		}
-		if s6.AuxInt != 56 {
-			break
-		}
-		x7 := s6.Args[0]
-		if x7.Op != OpS390XMOVBZload {
-			break
-		}
-		if x7.AuxInt != i+7 {
-			break
-		}
-		if x7.Aux != s {
-			break
-		}
-		if p != x7.Args[0] {
-			break
-		}
-		if mem != x7.Args[1] {
-			break
-		}
-		if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) {
-			break
-		}
-		b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDBRload, config.fe.TypeUInt64())
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = i
-		v0.Aux = s
-		v0.AddArg(p)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR                       x0:(MOVBZloadidx [i]   {s} p idx mem)     s0:(SLDconst [8]  x1:(MOVBZloadidx [i+1] {s} p idx mem)))     s1:(SLDconst [16] x2:(MOVBZloadidx [i+2] {s} p idx mem)))     s2:(SLDconst [24] x3:(MOVBZloadidx [i+3] {s} p idx mem)))     s3:(SLDconst [32] x4:(MOVBZloadidx [i+4] {s} p idx mem)))     s4:(SLDconst [40] x5:(MOVBZloadidx [i+5] {s} p idx mem)))     s5:(SLDconst [48] x6:(MOVBZloadidx [i+6] {s} p idx mem)))     s6:(SLDconst [56] x7:(MOVBZloadidx [i+7] {s} p idx mem)))
-	// cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && x4.Uses == 1   && x5.Uses == 1   && x6.Uses == 1   && x7.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && s3.Uses == 1   && s4.Uses == 1   && s5.Uses == 1   && s6.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && o2.Uses == 1   && o3.Uses == 1   && o4.Uses == 1   && o5.Uses == 1   && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(x4)   && clobber(x5)   && clobber(x6)   && clobber(x7)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(s3)   && clobber(s4)   && clobber(s5)   && clobber(s6)   && clobber(o0)   && clobber(o1)   && clobber(o2)   && clobber(o3)   && clobber(o4)   && clobber(o5)
-	// result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDBRloadidx <v.Type> [i] {s} p idx mem)
-	for {
-		o0 := v.Args[0]
-		if o0.Op != OpS390XOR {
-			break
-		}
-		o1 := o0.Args[0]
-		if o1.Op != OpS390XOR {
-			break
-		}
-		o2 := o1.Args[0]
-		if o2.Op != OpS390XOR {
-			break
-		}
-		o3 := o2.Args[0]
-		if o3.Op != OpS390XOR {
-			break
-		}
-		o4 := o3.Args[0]
-		if o4.Op != OpS390XOR {
-			break
-		}
-		o5 := o4.Args[0]
-		if o5.Op != OpS390XOR {
-			break
-		}
-		x0 := o5.Args[0]
-		if x0.Op != OpS390XMOVBZloadidx {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		idx := x0.Args[1]
-		mem := x0.Args[2]
-		s0 := o5.Args[1]
-		if s0.Op != OpS390XSLDconst {
-			break
-		}
-		if s0.AuxInt != 8 {
-			break
-		}
-		x1 := s0.Args[0]
-		if x1.Op != OpS390XMOVBZloadidx {
-			break
-		}
-		if x1.AuxInt != i+1 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if idx != x1.Args[1] {
-			break
-		}
-		if mem != x1.Args[2] {
-			break
-		}
-		s1 := o4.Args[1]
-		if s1.Op != OpS390XSLDconst {
-			break
-		}
-		if s1.AuxInt != 16 {
-			break
-		}
-		x2 := s1.Args[0]
-		if x2.Op != OpS390XMOVBZloadidx {
-			break
-		}
-		if x2.AuxInt != i+2 {
-			break
-		}
-		if x2.Aux != s {
-			break
-		}
-		if p != x2.Args[0] {
-			break
-		}
-		if idx != x2.Args[1] {
-			break
-		}
-		if mem != x2.Args[2] {
-			break
-		}
-		s2 := o3.Args[1]
-		if s2.Op != OpS390XSLDconst {
-			break
-		}
-		if s2.AuxInt != 24 {
-			break
-		}
-		x3 := s2.Args[0]
-		if x3.Op != OpS390XMOVBZloadidx {
-			break
-		}
-		if x3.AuxInt != i+3 {
-			break
-		}
-		if x3.Aux != s {
-			break
-		}
-		if p != x3.Args[0] {
-			break
-		}
-		if idx != x3.Args[1] {
-			break
-		}
-		if mem != x3.Args[2] {
-			break
-		}
-		s3 := o2.Args[1]
-		if s3.Op != OpS390XSLDconst {
-			break
-		}
-		if s3.AuxInt != 32 {
-			break
-		}
-		x4 := s3.Args[0]
-		if x4.Op != OpS390XMOVBZloadidx {
-			break
-		}
-		if x4.AuxInt != i+4 {
-			break
-		}
-		if x4.Aux != s {
-			break
-		}
-		if p != x4.Args[0] {
-			break
-		}
-		if idx != x4.Args[1] {
-			break
-		}
-		if mem != x4.Args[2] {
-			break
-		}
-		s4 := o1.Args[1]
-		if s4.Op != OpS390XSLDconst {
-			break
-		}
-		if s4.AuxInt != 40 {
-			break
-		}
-		x5 := s4.Args[0]
-		if x5.Op != OpS390XMOVBZloadidx {
-			break
-		}
-		if x5.AuxInt != i+5 {
-			break
-		}
-		if x5.Aux != s {
-			break
-		}
-		if p != x5.Args[0] {
-			break
-		}
-		if idx != x5.Args[1] {
-			break
-		}
-		if mem != x5.Args[2] {
-			break
-		}
-		s5 := o0.Args[1]
-		if s5.Op != OpS390XSLDconst {
-			break
-		}
-		if s5.AuxInt != 48 {
-			break
-		}
-		x6 := s5.Args[0]
-		if x6.Op != OpS390XMOVBZloadidx {
-			break
-		}
-		if x6.AuxInt != i+6 {
-			break
-		}
-		if x6.Aux != s {
-			break
-		}
-		if p != x6.Args[0] {
-			break
-		}
-		if idx != x6.Args[1] {
-			break
-		}
-		if mem != x6.Args[2] {
-			break
-		}
-		s6 := v.Args[1]
-		if s6.Op != OpS390XSLDconst {
-			break
-		}
-		if s6.AuxInt != 56 {
-			break
-		}
-		x7 := s6.Args[0]
-		if x7.Op != OpS390XMOVBZloadidx {
-			break
-		}
-		if x7.AuxInt != i+7 {
-			break
-		}
-		if x7.Aux != s {
-			break
-		}
-		if p != x7.Args[0] {
-			break
-		}
-		if idx != x7.Args[1] {
-			break
-		}
-		if mem != x7.Args[2] {
-			break
-		}
-		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) {
-			break
-		}
-		b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDBRloadidx, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = i
-		v0.Aux = s
-		v0.AddArg(p)
-		v0.AddArg(idx)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR                       x0:(MOVBZload [i]   {s} p mem)     s0:(SLDconst [8]  x1:(MOVBZload [i-1] {s} p mem)))     s1:(SLDconst [16] x2:(MOVBZload [i-2] {s} p mem)))     s2:(SLDconst [24] x3:(MOVBZload [i-3] {s} p mem)))     s3:(SLDconst [32] x4:(MOVBZload [i-4] {s} p mem)))     s4:(SLDconst [40] x5:(MOVBZload [i-5] {s} p mem)))     s5:(SLDconst [48] x6:(MOVBZload [i-6] {s} p mem)))     s6:(SLDconst [56] x7:(MOVBZload [i-7] {s} p mem)))
-	// cond: p.Op != OpSB   && x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && x4.Uses == 1   && x5.Uses == 1   && x6.Uses == 1   && x7.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && s3.Uses == 1   && s4.Uses == 1   && s5.Uses == 1   && s6.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && o2.Uses == 1   && o3.Uses == 1   && o4.Uses == 1   && o5.Uses == 1   && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(x4)   && clobber(x5)   && clobber(x6)   && clobber(x7)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(s3)   && clobber(s4)   && clobber(s5)   && clobber(s6)   && clobber(o0)   && clobber(o1)   && clobber(o2)   && clobber(o3)   && clobber(o4)   && clobber(o5)
-	// result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload [i-7] {s} p mem)
-	for {
-		o0 := v.Args[0]
-		if o0.Op != OpS390XOR {
-			break
-		}
-		o1 := o0.Args[0]
-		if o1.Op != OpS390XOR {
-			break
-		}
-		o2 := o1.Args[0]
-		if o2.Op != OpS390XOR {
-			break
-		}
-		o3 := o2.Args[0]
-		if o3.Op != OpS390XOR {
-			break
-		}
-		o4 := o3.Args[0]
-		if o4.Op != OpS390XOR {
-			break
-		}
-		o5 := o4.Args[0]
-		if o5.Op != OpS390XOR {
-			break
-		}
-		x0 := o5.Args[0]
-		if x0.Op != OpS390XMOVBZload {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		mem := x0.Args[1]
-		s0 := o5.Args[1]
-		if s0.Op != OpS390XSLDconst {
-			break
-		}
-		if s0.AuxInt != 8 {
-			break
-		}
-		x1 := s0.Args[0]
-		if x1.Op != OpS390XMOVBZload {
-			break
-		}
-		if x1.AuxInt != i-1 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if mem != x1.Args[1] {
-			break
-		}
-		s1 := o4.Args[1]
-		if s1.Op != OpS390XSLDconst {
-			break
-		}
-		if s1.AuxInt != 16 {
-			break
-		}
-		x2 := s1.Args[0]
-		if x2.Op != OpS390XMOVBZload {
-			break
-		}
-		if x2.AuxInt != i-2 {
-			break
-		}
-		if x2.Aux != s {
-			break
-		}
-		if p != x2.Args[0] {
-			break
-		}
-		if mem != x2.Args[1] {
-			break
-		}
-		s2 := o3.Args[1]
-		if s2.Op != OpS390XSLDconst {
-			break
-		}
-		if s2.AuxInt != 24 {
-			break
-		}
-		x3 := s2.Args[0]
-		if x3.Op != OpS390XMOVBZload {
-			break
-		}
-		if x3.AuxInt != i-3 {
-			break
-		}
-		if x3.Aux != s {
-			break
-		}
-		if p != x3.Args[0] {
-			break
-		}
-		if mem != x3.Args[1] {
-			break
-		}
-		s3 := o2.Args[1]
-		if s3.Op != OpS390XSLDconst {
-			break
-		}
-		if s3.AuxInt != 32 {
-			break
-		}
-		x4 := s3.Args[0]
-		if x4.Op != OpS390XMOVBZload {
-			break
-		}
-		if x4.AuxInt != i-4 {
-			break
-		}
-		if x4.Aux != s {
-			break
-		}
-		if p != x4.Args[0] {
-			break
-		}
-		if mem != x4.Args[1] {
-			break
-		}
-		s4 := o1.Args[1]
-		if s4.Op != OpS390XSLDconst {
-			break
-		}
-		if s4.AuxInt != 40 {
-			break
-		}
-		x5 := s4.Args[0]
-		if x5.Op != OpS390XMOVBZload {
-			break
-		}
-		if x5.AuxInt != i-5 {
-			break
-		}
-		if x5.Aux != s {
-			break
-		}
-		if p != x5.Args[0] {
-			break
-		}
-		if mem != x5.Args[1] {
-			break
-		}
-		s5 := o0.Args[1]
-		if s5.Op != OpS390XSLDconst {
-			break
-		}
-		if s5.AuxInt != 48 {
-			break
-		}
-		x6 := s5.Args[0]
-		if x6.Op != OpS390XMOVBZload {
-			break
-		}
-		if x6.AuxInt != i-6 {
-			break
-		}
-		if x6.Aux != s {
-			break
-		}
-		if p != x6.Args[0] {
-			break
-		}
-		if mem != x6.Args[1] {
-			break
-		}
-		s6 := v.Args[1]
-		if s6.Op != OpS390XSLDconst {
-			break
-		}
-		if s6.AuxInt != 56 {
-			break
-		}
-		x7 := s6.Args[0]
-		if x7.Op != OpS390XMOVBZload {
-			break
-		}
-		if x7.AuxInt != i-7 {
-			break
-		}
-		if x7.Aux != s {
-			break
-		}
-		if p != x7.Args[0] {
-			break
-		}
-		if mem != x7.Args[1] {
-			break
-		}
-		if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) {
-			break
-		}
-		b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDload, config.fe.TypeUInt64())
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = i - 7
-		v0.Aux = s
-		v0.AddArg(p)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR                       x0:(MOVBZloadidx [i]   {s} p idx mem)     s0:(SLDconst [8]  x1:(MOVBZloadidx [i-1] {s} p idx mem)))     s1:(SLDconst [16] x2:(MOVBZloadidx [i-2] {s} p idx mem)))     s2:(SLDconst [24] x3:(MOVBZloadidx [i-3] {s} p idx mem)))     s3:(SLDconst [32] x4:(MOVBZloadidx [i-4] {s} p idx mem)))     s4:(SLDconst [40] x5:(MOVBZloadidx [i-5] {s} p idx mem)))     s5:(SLDconst [48] x6:(MOVBZloadidx [i-6] {s} p idx mem)))     s6:(SLDconst [56] x7:(MOVBZloadidx [i-7] {s} p idx mem)))
-	// cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && x3.Uses == 1   && x4.Uses == 1   && x5.Uses == 1   && x6.Uses == 1   && x7.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && s2.Uses == 1   && s3.Uses == 1   && s4.Uses == 1   && s5.Uses == 1   && s6.Uses == 1   && o0.Uses == 1   && o1.Uses == 1   && o2.Uses == 1   && o3.Uses == 1   && o4.Uses == 1   && o5.Uses == 1   && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(x3)   && clobber(x4)   && clobber(x5)   && clobber(x6)   && clobber(x7)   && clobber(s0)   && clobber(s1)   && clobber(s2)   && clobber(s3)   && clobber(s4)   && clobber(s5)   && clobber(s6)   && clobber(o0)   && clobber(o1)   && clobber(o2)   && clobber(o3)   && clobber(o4)   && clobber(o5)
-	// result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx <v.Type> [i-7] {s} p idx mem)
-	for {
-		o0 := v.Args[0]
-		if o0.Op != OpS390XOR {
-			break
-		}
-		o1 := o0.Args[0]
-		if o1.Op != OpS390XOR {
-			break
-		}
-		o2 := o1.Args[0]
-		if o2.Op != OpS390XOR {
-			break
-		}
-		o3 := o2.Args[0]
-		if o3.Op != OpS390XOR {
-			break
-		}
-		o4 := o3.Args[0]
-		if o4.Op != OpS390XOR {
-			break
-		}
-		o5 := o4.Args[0]
-		if o5.Op != OpS390XOR {
-			break
-		}
-		x0 := o5.Args[0]
-		if x0.Op != OpS390XMOVBZloadidx {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		idx := x0.Args[1]
-		mem := x0.Args[2]
-		s0 := o5.Args[1]
-		if s0.Op != OpS390XSLDconst {
-			break
-		}
-		if s0.AuxInt != 8 {
-			break
-		}
-		x1 := s0.Args[0]
-		if x1.Op != OpS390XMOVBZloadidx {
-			break
-		}
-		if x1.AuxInt != i-1 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if idx != x1.Args[1] {
-			break
-		}
-		if mem != x1.Args[2] {
-			break
-		}
-		s1 := o4.Args[1]
-		if s1.Op != OpS390XSLDconst {
-			break
-		}
-		if s1.AuxInt != 16 {
-			break
-		}
-		x2 := s1.Args[0]
-		if x2.Op != OpS390XMOVBZloadidx {
-			break
-		}
-		if x2.AuxInt != i-2 {
-			break
-		}
-		if x2.Aux != s {
-			break
-		}
-		if p != x2.Args[0] {
-			break
-		}
-		if idx != x2.Args[1] {
-			break
-		}
-		if mem != x2.Args[2] {
-			break
-		}
-		s2 := o3.Args[1]
-		if s2.Op != OpS390XSLDconst {
-			break
-		}
-		if s2.AuxInt != 24 {
-			break
-		}
-		x3 := s2.Args[0]
-		if x3.Op != OpS390XMOVBZloadidx {
-			break
-		}
-		if x3.AuxInt != i-3 {
-			break
-		}
-		if x3.Aux != s {
-			break
-		}
-		if p != x3.Args[0] {
-			break
-		}
-		if idx != x3.Args[1] {
-			break
-		}
-		if mem != x3.Args[2] {
-			break
-		}
-		s3 := o2.Args[1]
-		if s3.Op != OpS390XSLDconst {
-			break
-		}
-		if s3.AuxInt != 32 {
-			break
-		}
-		x4 := s3.Args[0]
-		if x4.Op != OpS390XMOVBZloadidx {
-			break
-		}
-		if x4.AuxInt != i-4 {
-			break
-		}
-		if x4.Aux != s {
-			break
-		}
-		if p != x4.Args[0] {
-			break
-		}
-		if idx != x4.Args[1] {
-			break
-		}
-		if mem != x4.Args[2] {
-			break
-		}
-		s4 := o1.Args[1]
-		if s4.Op != OpS390XSLDconst {
-			break
-		}
-		if s4.AuxInt != 40 {
-			break
-		}
-		x5 := s4.Args[0]
-		if x5.Op != OpS390XMOVBZloadidx {
-			break
-		}
-		if x5.AuxInt != i-5 {
-			break
-		}
-		if x5.Aux != s {
-			break
-		}
-		if p != x5.Args[0] {
-			break
-		}
-		if idx != x5.Args[1] {
-			break
-		}
-		if mem != x5.Args[2] {
-			break
-		}
-		s5 := o0.Args[1]
-		if s5.Op != OpS390XSLDconst {
-			break
-		}
-		if s5.AuxInt != 48 {
-			break
-		}
-		x6 := s5.Args[0]
-		if x6.Op != OpS390XMOVBZloadidx {
-			break
-		}
-		if x6.AuxInt != i-6 {
-			break
-		}
-		if x6.Aux != s {
-			break
-		}
-		if p != x6.Args[0] {
-			break
-		}
-		if idx != x6.Args[1] {
-			break
-		}
-		if mem != x6.Args[2] {
-			break
-		}
-		s6 := v.Args[1]
-		if s6.Op != OpS390XSLDconst {
-			break
-		}
-		if s6.AuxInt != 56 {
-			break
-		}
-		x7 := s6.Args[0]
-		if x7.Op != OpS390XMOVBZloadidx {
-			break
-		}
-		if x7.AuxInt != i-7 {
-			break
-		}
-		if x7.Aux != s {
-			break
-		}
-		if p != x7.Args[0] {
-			break
-		}
-		if idx != x7.Args[1] {
-			break
-		}
-		if mem != x7.Args[2] {
-			break
-		}
-		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) {
-			break
-		}
-		b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDloadidx, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = i - 7
-		v0.Aux = s
-		v0.AddArg(p)
-		v0.AddArg(idx)
-		v0.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XORW(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ORW x (MOVDconst [c]))
-	// cond:
-	// result: (ORWconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpS390XORWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ORW (MOVDconst [c]) x)
-	// cond:
-	// result: (ORWconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpS390XORWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (ORW x x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ORW <t> x g:(MOVWload [off] {sym} ptr mem))
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (ORWload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		x := v.Args[0]
-		g := v.Args[1]
-		if g.Op != OpS390XMOVWload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XORWload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (ORW <t> g:(MOVWload [off] {sym} ptr mem) x)
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (ORWload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		g := v.Args[0]
-		if g.Op != OpS390XMOVWload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		x := v.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XORWload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (ORW <t> x g:(MOVWZload [off] {sym} ptr mem))
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (ORWload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		x := v.Args[0]
-		g := v.Args[1]
-		if g.Op != OpS390XMOVWZload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XORWload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (ORW <t> g:(MOVWZload [off] {sym} ptr mem) x)
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (ORWload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		g := v.Args[0]
-		if g.Op != OpS390XMOVWZload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		x := v.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XORWload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (ORW                 x0:(MOVBZload [i]   {s} p mem)     s0:(SLWconst [8] x1:(MOVBZload [i+1] {s} p mem)))
-	// cond: p.Op != OpSB   && x0.Uses == 1   && x1.Uses == 1   && s0.Uses == 1   && mergePoint(b,x0,x1) != nil   && clobber(x0)   && clobber(x1)   && clobber(s0)
-	// result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i] {s} p mem))
-	for {
-		x0 := v.Args[0]
-		if x0.Op != OpS390XMOVBZload {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		mem := x0.Args[1]
-		s0 := v.Args[1]
-		if s0.Op != OpS390XSLWconst {
-			break
-		}
-		if s0.AuxInt != 8 {
-			break
-		}
-		x1 := s0.Args[0]
-		if x1.Op != OpS390XMOVBZload {
-			break
-		}
-		if x1.AuxInt != i+1 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if mem != x1.Args[1] {
-			break
-		}
-		if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) {
-			break
-		}
-		b = mergePoint(b, x0, x1)
-		v0 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVHBRload, config.fe.TypeUInt16())
-		v1.AuxInt = i
-		v1.Aux = s
-		v1.AddArg(p)
-		v1.AddArg(mem)
-		v0.AddArg(v1)
-		return true
-	}
-	// match: (ORW o0:(ORW z0:(MOVHZreg x0:(MOVHBRload [i] {s} p mem))     s0:(SLWconst [16] x1:(MOVBZload [i+2] {s} p mem)))     s1:(SLWconst [24] x2:(MOVBZload [i+3] {s} p mem)))
-	// cond: p.Op != OpSB   && z0.Uses == 1   && x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && o0.Uses == 1   && mergePoint(b,x0,x1,x2) != nil   && clobber(z0)   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(s0)   && clobber(s1)   && clobber(o0)
-	// result: @mergePoint(b,x0,x1,x2) (MOVWBRload [i] {s} p mem)
-	for {
-		o0 := v.Args[0]
-		if o0.Op != OpS390XORW {
-			break
-		}
-		z0 := o0.Args[0]
-		if z0.Op != OpS390XMOVHZreg {
-			break
-		}
-		x0 := z0.Args[0]
-		if x0.Op != OpS390XMOVHBRload {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		mem := x0.Args[1]
-		s0 := o0.Args[1]
-		if s0.Op != OpS390XSLWconst {
-			break
-		}
-		if s0.AuxInt != 16 {
-			break
-		}
-		x1 := s0.Args[0]
-		if x1.Op != OpS390XMOVBZload {
-			break
-		}
-		if x1.AuxInt != i+2 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if mem != x1.Args[1] {
-			break
-		}
-		s1 := v.Args[1]
-		if s1.Op != OpS390XSLWconst {
-			break
-		}
-		if s1.AuxInt != 24 {
-			break
-		}
-		x2 := s1.Args[0]
-		if x2.Op != OpS390XMOVBZload {
-			break
-		}
-		if x2.AuxInt != i+3 {
-			break
-		}
-		if x2.Aux != s {
-			break
-		}
-		if p != x2.Args[0] {
-			break
-		}
-		if mem != x2.Args[1] {
-			break
-		}
-		if !(p.Op != OpSB && z0.Uses == 1 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(z0) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) {
-			break
-		}
-		b = mergePoint(b, x0, x1, x2)
-		v0 := b.NewValue0(v.Line, OpS390XMOVWBRload, config.fe.TypeUInt32())
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = i
-		v0.Aux = s
-		v0.AddArg(p)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (ORW                 x0:(MOVBZloadidx [i]   {s} p idx mem)     s0:(SLWconst [8] x1:(MOVBZloadidx [i+1] {s} p idx mem)))
-	// cond: x0.Uses == 1   && x1.Uses == 1   && s0.Uses == 1   && mergePoint(b,x0,x1) != nil   && clobber(x0)   && clobber(x1)   && clobber(s0)
-	// result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx <v.Type> [i] {s} p idx mem))
-	for {
-		x0 := v.Args[0]
-		if x0.Op != OpS390XMOVBZloadidx {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		idx := x0.Args[1]
-		mem := x0.Args[2]
-		s0 := v.Args[1]
-		if s0.Op != OpS390XSLWconst {
-			break
-		}
-		if s0.AuxInt != 8 {
-			break
-		}
-		x1 := s0.Args[0]
-		if x1.Op != OpS390XMOVBZloadidx {
-			break
-		}
-		if x1.AuxInt != i+1 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if idx != x1.Args[1] {
-			break
-		}
-		if mem != x1.Args[2] {
-			break
-		}
-		if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) {
-			break
-		}
-		b = mergePoint(b, x0, x1)
-		v0 := b.NewValue0(v.Line, OpS390XMOVHZreg, config.fe.TypeUInt64())
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVHBRloadidx, v.Type)
-		v1.AuxInt = i
-		v1.Aux = s
-		v1.AddArg(p)
-		v1.AddArg(idx)
-		v1.AddArg(mem)
-		v0.AddArg(v1)
-		return true
-	}
-	// match: (ORW o0:(ORW z0:(MOVHZreg x0:(MOVHBRloadidx [i] {s} p idx mem))     s0:(SLWconst [16] x1:(MOVBZloadidx [i+2] {s} p idx mem)))     s1:(SLWconst [24] x2:(MOVBZloadidx [i+3] {s} p idx mem)))
-	// cond: z0.Uses == 1   && x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && o0.Uses == 1   && mergePoint(b,x0,x1,x2) != nil   && clobber(z0)   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(s0)   && clobber(s1)   && clobber(o0)
-	// result: @mergePoint(b,x0,x1,x2) (MOVWZreg (MOVWBRloadidx <v.Type> [i] {s} p idx mem))
-	for {
-		o0 := v.Args[0]
-		if o0.Op != OpS390XORW {
-			break
-		}
-		z0 := o0.Args[0]
-		if z0.Op != OpS390XMOVHZreg {
-			break
-		}
-		x0 := z0.Args[0]
-		if x0.Op != OpS390XMOVHBRloadidx {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		idx := x0.Args[1]
-		mem := x0.Args[2]
-		s0 := o0.Args[1]
-		if s0.Op != OpS390XSLWconst {
-			break
-		}
-		if s0.AuxInt != 16 {
-			break
-		}
-		x1 := s0.Args[0]
-		if x1.Op != OpS390XMOVBZloadidx {
-			break
-		}
-		if x1.AuxInt != i+2 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if idx != x1.Args[1] {
-			break
-		}
-		if mem != x1.Args[2] {
-			break
-		}
-		s1 := v.Args[1]
-		if s1.Op != OpS390XSLWconst {
-			break
-		}
-		if s1.AuxInt != 24 {
-			break
-		}
-		x2 := s1.Args[0]
-		if x2.Op != OpS390XMOVBZloadidx {
-			break
-		}
-		if x2.AuxInt != i+3 {
-			break
-		}
-		if x2.Aux != s {
-			break
-		}
-		if p != x2.Args[0] {
-			break
-		}
-		if idx != x2.Args[1] {
-			break
-		}
-		if mem != x2.Args[2] {
-			break
-		}
-		if !(z0.Uses == 1 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(z0) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) {
-			break
-		}
-		b = mergePoint(b, x0, x1, x2)
-		v0 := b.NewValue0(v.Line, OpS390XMOVWZreg, config.fe.TypeUInt64())
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XMOVWBRloadidx, v.Type)
-		v1.AuxInt = i
-		v1.Aux = s
-		v1.AddArg(p)
-		v1.AddArg(idx)
-		v1.AddArg(mem)
-		v0.AddArg(v1)
-		return true
-	}
-	// match: (ORW                  x0:(MOVBZload [i]   {s} p mem)     s0:(SLWconst [8] x1:(MOVBZload [i-1] {s} p mem)))
-	// cond: p.Op != OpSB   && x0.Uses == 1   && x1.Uses == 1   && s0.Uses == 1   && mergePoint(b,x0,x1) != nil   && clobber(x0)   && clobber(x1)   && clobber(s0)
-	// result: @mergePoint(b,x0,x1) (MOVHZload [i-1] {s} p mem)
-	for {
-		x0 := v.Args[0]
-		if x0.Op != OpS390XMOVBZload {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		mem := x0.Args[1]
-		s0 := v.Args[1]
-		if s0.Op != OpS390XSLWconst {
-			break
-		}
-		if s0.AuxInt != 8 {
-			break
-		}
-		x1 := s0.Args[0]
-		if x1.Op != OpS390XMOVBZload {
-			break
-		}
-		if x1.AuxInt != i-1 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if mem != x1.Args[1] {
-			break
-		}
-		if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) {
-			break
-		}
-		b = mergePoint(b, x0, x1)
-		v0 := b.NewValue0(v.Line, OpS390XMOVHZload, config.fe.TypeUInt16())
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = i - 1
-		v0.Aux = s
-		v0.AddArg(p)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (ORW o0:(ORW x0:(MOVHZload [i] {s} p mem)     s0:(SLWconst [16] x1:(MOVBZload [i-1] {s} p mem)))     s1:(SLWconst [24] x2:(MOVBZload [i-2] {s} p mem)))
-	// cond: p.Op != OpSB   && x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && o0.Uses == 1   && mergePoint(b,x0,x1,x2) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(s0)   && clobber(s1)   && clobber(o0)
-	// result: @mergePoint(b,x0,x1,x2) (MOVWZload [i-2] {s} p mem)
-	for {
-		o0 := v.Args[0]
-		if o0.Op != OpS390XORW {
-			break
-		}
-		x0 := o0.Args[0]
-		if x0.Op != OpS390XMOVHZload {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		mem := x0.Args[1]
-		s0 := o0.Args[1]
-		if s0.Op != OpS390XSLWconst {
-			break
-		}
-		if s0.AuxInt != 16 {
-			break
-		}
-		x1 := s0.Args[0]
-		if x1.Op != OpS390XMOVBZload {
-			break
-		}
-		if x1.AuxInt != i-1 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if mem != x1.Args[1] {
-			break
-		}
-		s1 := v.Args[1]
-		if s1.Op != OpS390XSLWconst {
-			break
-		}
-		if s1.AuxInt != 24 {
-			break
-		}
-		x2 := s1.Args[0]
-		if x2.Op != OpS390XMOVBZload {
-			break
-		}
-		if x2.AuxInt != i-2 {
-			break
-		}
-		if x2.Aux != s {
-			break
-		}
-		if p != x2.Args[0] {
-			break
-		}
-		if mem != x2.Args[1] {
-			break
-		}
-		if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) {
-			break
-		}
-		b = mergePoint(b, x0, x1, x2)
-		v0 := b.NewValue0(v.Line, OpS390XMOVWZload, config.fe.TypeUInt32())
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = i - 2
-		v0.Aux = s
-		v0.AddArg(p)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (ORW                 x0:(MOVBZloadidx [i]   {s} p idx mem)     s0:(SLWconst [8] x1:(MOVBZloadidx [i-1] {s} p idx mem)))
-	// cond: x0.Uses == 1   && x1.Uses == 1   && s0.Uses == 1   && mergePoint(b,x0,x1) != nil   && clobber(x0)   && clobber(x1)   && clobber(s0)
-	// result: @mergePoint(b,x0,x1) (MOVHZloadidx <v.Type> [i-1] {s} p idx mem)
-	for {
-		x0 := v.Args[0]
-		if x0.Op != OpS390XMOVBZloadidx {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		idx := x0.Args[1]
-		mem := x0.Args[2]
-		s0 := v.Args[1]
-		if s0.Op != OpS390XSLWconst {
-			break
-		}
-		if s0.AuxInt != 8 {
-			break
-		}
-		x1 := s0.Args[0]
-		if x1.Op != OpS390XMOVBZloadidx {
-			break
-		}
-		if x1.AuxInt != i-1 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if idx != x1.Args[1] {
-			break
-		}
-		if mem != x1.Args[2] {
-			break
-		}
-		if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) {
-			break
-		}
-		b = mergePoint(b, x0, x1)
-		v0 := b.NewValue0(v.Line, OpS390XMOVHZloadidx, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = i - 1
-		v0.Aux = s
-		v0.AddArg(p)
-		v0.AddArg(idx)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (ORW o0:(ORW x0:(MOVHZloadidx [i] {s} p idx mem)     s0:(SLWconst [16] x1:(MOVBZloadidx [i-1] {s} p idx mem)))     s1:(SLWconst [24] x2:(MOVBZloadidx [i-2] {s} p idx mem)))
-	// cond: x0.Uses == 1   && x1.Uses == 1   && x2.Uses == 1   && s0.Uses == 1   && s1.Uses == 1   && o0.Uses == 1   && mergePoint(b,x0,x1,x2) != nil   && clobber(x0)   && clobber(x1)   && clobber(x2)   && clobber(s0)   && clobber(s1)   && clobber(o0)
-	// result: @mergePoint(b,x0,x1,x2) (MOVWZloadidx <v.Type> [i-2] {s} p idx mem)
-	for {
-		o0 := v.Args[0]
-		if o0.Op != OpS390XORW {
-			break
-		}
-		x0 := o0.Args[0]
-		if x0.Op != OpS390XMOVHZloadidx {
-			break
-		}
-		i := x0.AuxInt
-		s := x0.Aux
-		p := x0.Args[0]
-		idx := x0.Args[1]
-		mem := x0.Args[2]
-		s0 := o0.Args[1]
-		if s0.Op != OpS390XSLWconst {
-			break
-		}
-		if s0.AuxInt != 16 {
-			break
-		}
-		x1 := s0.Args[0]
-		if x1.Op != OpS390XMOVBZloadidx {
-			break
-		}
-		if x1.AuxInt != i-1 {
-			break
-		}
-		if x1.Aux != s {
-			break
-		}
-		if p != x1.Args[0] {
-			break
-		}
-		if idx != x1.Args[1] {
-			break
-		}
-		if mem != x1.Args[2] {
-			break
-		}
-		s1 := v.Args[1]
-		if s1.Op != OpS390XSLWconst {
-			break
-		}
-		if s1.AuxInt != 24 {
-			break
-		}
-		x2 := s1.Args[0]
-		if x2.Op != OpS390XMOVBZloadidx {
-			break
-		}
-		if x2.AuxInt != i-2 {
-			break
-		}
-		if x2.Aux != s {
-			break
-		}
-		if p != x2.Args[0] {
-			break
-		}
-		if idx != x2.Args[1] {
-			break
-		}
-		if mem != x2.Args[2] {
-			break
-		}
-		if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) {
-			break
-		}
-		b = mergePoint(b, x0, x1, x2)
-		v0 := b.NewValue0(v.Line, OpS390XMOVWZloadidx, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v0.AuxInt = i - 2
-		v0.Aux = s
-		v0.AddArg(p)
-		v0.AddArg(idx)
-		v0.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XORWconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ORWconst [c] x)
-	// cond: int32(c)==0
-	// result: x
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(int32(c) == 0) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ORWconst [c] _)
-	// cond: int32(c)==-1
-	// result: (MOVDconst [-1])
-	for {
-		c := v.AuxInt
-		if !(int32(c) == -1) {
-			break
-		}
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = -1
-		return true
-	}
-	// match: (ORWconst [c] (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [c|d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = c | d
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XORconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ORconst [0] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ORconst [-1] _)
-	// cond:
-	// result: (MOVDconst [-1])
-	for {
-		if v.AuxInt != -1 {
-			break
-		}
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = -1
-		return true
-	}
-	// match: (ORconst [c] (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [c|d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = c | d
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XSLD(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SLD x (MOVDconst [c]))
-	// cond:
-	// result: (SLDconst [c&63] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpS390XSLDconst)
-		v.AuxInt = c & 63
-		v.AddArg(x)
-		return true
-	}
-	// match: (SLD x (ANDconst [63] y))
-	// cond:
-	// result: (SLD x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XANDconst {
-			break
-		}
-		if v_1.AuxInt != 63 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpS390XSLD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XSLW(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SLW x (MOVDconst [c]))
-	// cond:
-	// result: (SLWconst [c&63] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpS390XSLWconst)
-		v.AuxInt = c & 63
-		v.AddArg(x)
-		return true
-	}
-	// match: (SLW x (ANDWconst [63] y))
-	// cond:
-	// result: (SLW x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XANDWconst {
-			break
-		}
-		if v_1.AuxInt != 63 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpS390XSLW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XSRAD(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SRAD x (MOVDconst [c]))
-	// cond:
-	// result: (SRADconst [c&63] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpS390XSRADconst)
-		v.AuxInt = c & 63
-		v.AddArg(x)
-		return true
-	}
-	// match: (SRAD x (ANDconst [63] y))
-	// cond:
-	// result: (SRAD x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XANDconst {
-			break
-		}
-		if v_1.AuxInt != 63 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpS390XSRAD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XSRADconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SRADconst [c] (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [d>>uint64(c)])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = d >> uint64(c)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XSRAW(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SRAW x (MOVDconst [c]))
-	// cond:
-	// result: (SRAWconst [c&63] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpS390XSRAWconst)
-		v.AuxInt = c & 63
-		v.AddArg(x)
-		return true
-	}
-	// match: (SRAW x (ANDWconst [63] y))
-	// cond:
-	// result: (SRAW x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XANDWconst {
-			break
-		}
-		if v_1.AuxInt != 63 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpS390XSRAW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XSRAWconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SRAWconst [c] (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [d>>uint64(c)])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = d >> uint64(c)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XSRD(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SRD x (MOVDconst [c]))
-	// cond:
-	// result: (SRDconst [c&63] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpS390XSRDconst)
-		v.AuxInt = c & 63
-		v.AddArg(x)
-		return true
-	}
-	// match: (SRD x (ANDconst [63] y))
-	// cond:
-	// result: (SRD x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XANDconst {
-			break
-		}
-		if v_1.AuxInt != 63 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpS390XSRD)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XSRW(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SRW x (MOVDconst [c]))
-	// cond:
-	// result: (SRWconst [c&63] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpS390XSRWconst)
-		v.AuxInt = c & 63
-		v.AddArg(x)
-		return true
-	}
-	// match: (SRW x (ANDWconst [63] y))
-	// cond:
-	// result: (SRW x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XANDWconst {
-			break
-		}
-		if v_1.AuxInt != 63 {
-			break
-		}
-		y := v_1.Args[0]
-		v.reset(OpS390XSRW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XSTM2(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (STM2 [i] {s} p w2 w3 x:(STM2 [i-8] {s} p w0 w1 mem))
-	// cond: x.Uses == 1   && is20Bit(i-8)   && clobber(x)
-	// result: (STM4 [i-8] {s} p w0 w1 w2 w3 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		w2 := v.Args[1]
-		w3 := v.Args[2]
-		x := v.Args[3]
-		if x.Op != OpS390XSTM2 {
-			break
-		}
-		if x.AuxInt != i-8 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		w0 := x.Args[1]
-		w1 := x.Args[2]
-		mem := x.Args[3]
-		if !(x.Uses == 1 && is20Bit(i-8) && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XSTM4)
-		v.AuxInt = i - 8
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w0)
-		v.AddArg(w1)
-		v.AddArg(w2)
-		v.AddArg(w3)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (STM2 [i] {s} p (SRDconst [32] x) x mem)
-	// cond:
-	// result: (MOVDstore [i] {s} p x mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XSRDconst {
-			break
-		}
-		if v_1.AuxInt != 32 {
-			break
-		}
-		x := v_1.Args[0]
-		if x != v.Args[2] {
-			break
-		}
-		mem := v.Args[3]
-		v.reset(OpS390XMOVDstore)
-		v.AuxInt = i
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(x)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XSTMG2(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (STMG2 [i] {s} p w2 w3 x:(STMG2 [i-16] {s} p w0 w1 mem))
-	// cond: x.Uses == 1   && is20Bit(i-16)   && clobber(x)
-	// result: (STMG4 [i-16] {s} p w0 w1 w2 w3 mem)
-	for {
-		i := v.AuxInt
-		s := v.Aux
-		p := v.Args[0]
-		w2 := v.Args[1]
-		w3 := v.Args[2]
-		x := v.Args[3]
-		if x.Op != OpS390XSTMG2 {
-			break
-		}
-		if x.AuxInt != i-16 {
-			break
-		}
-		if x.Aux != s {
-			break
-		}
-		if p != x.Args[0] {
-			break
-		}
-		w0 := x.Args[1]
-		w1 := x.Args[2]
-		mem := x.Args[3]
-		if !(x.Uses == 1 && is20Bit(i-16) && clobber(x)) {
-			break
-		}
-		v.reset(OpS390XSTMG4)
-		v.AuxInt = i - 16
-		v.Aux = s
-		v.AddArg(p)
-		v.AddArg(w0)
-		v.AddArg(w1)
-		v.AddArg(w2)
-		v.AddArg(w3)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XSUB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUB x (MOVDconst [c]))
-	// cond: is32Bit(c)
-	// result: (SUBconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpS390XSUBconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUB (MOVDconst [c]) x)
-	// cond: is32Bit(c)
-	// result: (NEG (SUBconst <v.Type> x [c]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(is32Bit(c)) {
-			break
-		}
-		v.reset(OpS390XNEG)
-		v0 := b.NewValue0(v.Line, OpS390XSUBconst, v.Type)
-		v0.AuxInt = c
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (SUB x x)
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SUB <t> x g:(MOVDload [off] {sym} ptr mem))
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (SUBload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		x := v.Args[0]
-		g := v.Args[1]
-		if g.Op != OpS390XMOVDload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XSUBload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XSUBEWcarrymask(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBEWcarrymask (FlagEQ))
-	// cond:
-	// result: (MOVDconst [-1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XFlagEQ {
-			break
-		}
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = -1
-		return true
-	}
-	// match: (SUBEWcarrymask (FlagLT))
-	// cond:
-	// result: (MOVDconst [-1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XFlagLT {
-			break
-		}
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = -1
-		return true
-	}
-	// match: (SUBEWcarrymask (FlagGT))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XFlagGT {
-			break
-		}
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XSUBEcarrymask(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBEcarrymask (FlagEQ))
-	// cond:
-	// result: (MOVDconst [-1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XFlagEQ {
-			break
-		}
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = -1
-		return true
-	}
-	// match: (SUBEcarrymask (FlagLT))
-	// cond:
-	// result: (MOVDconst [-1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XFlagLT {
-			break
-		}
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = -1
-		return true
-	}
-	// match: (SUBEcarrymask (FlagGT))
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XFlagGT {
-			break
-		}
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XSUBW(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBW x (MOVDconst [c]))
-	// cond:
-	// result: (SUBWconst x [c])
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpS390XSUBWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUBW (MOVDconst [c]) x)
-	// cond:
-	// result: (NEGW (SUBWconst <v.Type> x [c]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpS390XNEGW)
-		v0 := b.NewValue0(v.Line, OpS390XSUBWconst, v.Type)
-		v0.AuxInt = c
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (SUBW x x)
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (SUBW <t> x g:(MOVWload [off] {sym} ptr mem))
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (SUBWload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		x := v.Args[0]
-		g := v.Args[1]
-		if g.Op != OpS390XMOVWload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XSUBWload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (SUBW <t> x g:(MOVWZload [off] {sym} ptr mem))
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (SUBWload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		x := v.Args[0]
-		g := v.Args[1]
-		if g.Op != OpS390XMOVWZload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XSUBWload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XSUBWconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBWconst [c] x)
-	// cond: int32(c) == 0
-	// result: x
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(int32(c) == 0) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUBWconst [c] x)
-	// cond:
-	// result: (ADDWconst [int64(int32(-c))] x)
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		v.reset(OpS390XADDWconst)
-		v.AuxInt = int64(int32(-c))
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpS390XSUBconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SUBconst [0] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUBconst [c] x)
-	// cond: c != -(1<<31)
-	// result: (ADDconst [-c] x)
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(c != -(1 << 31)) {
-			break
-		}
-		v.reset(OpS390XADDconst)
-		v.AuxInt = -c
-		v.AddArg(x)
-		return true
-	}
-	// match: (SUBconst (MOVDconst [d]) [c])
-	// cond:
-	// result: (MOVDconst [d-c])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = d - c
-		return true
-	}
-	// match: (SUBconst (SUBconst x [d]) [c])
-	// cond: is32Bit(-c-d)
-	// result: (ADDconst [-c-d] x)
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XSUBconst {
-			break
-		}
-		d := v_0.AuxInt
-		x := v_0.Args[0]
-		if !(is32Bit(-c - d)) {
-			break
-		}
-		v.reset(OpS390XADDconst)
-		v.AuxInt = -c - d
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XXOR(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XOR x (MOVDconst [c]))
-	// cond: isU32Bit(c)
-	// result: (XORconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		if !(isU32Bit(c)) {
-			break
-		}
-		v.reset(OpS390XXORconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (XOR (MOVDconst [c]) x)
-	// cond: isU32Bit(c)
-	// result: (XORconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		if !(isU32Bit(c)) {
-			break
-		}
-		v.reset(OpS390XXORconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (XOR (MOVDconst [c]) (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [c^d])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = c ^ d
-		return true
-	}
-	// match: (XOR x x)
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (XOR <t> x g:(MOVDload [off] {sym} ptr mem))
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (XORload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		x := v.Args[0]
-		g := v.Args[1]
-		if g.Op != OpS390XMOVDload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XXORload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (XOR <t> g:(MOVDload [off] {sym} ptr mem) x)
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (XORload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		g := v.Args[0]
-		if g.Op != OpS390XMOVDload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		x := v.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XXORload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XXORW(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XORW x (MOVDconst [c]))
-	// cond:
-	// result: (XORWconst [c] x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpS390XXORWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (XORW (MOVDconst [c]) x)
-	// cond:
-	// result: (XORWconst [c] x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		c := v_0.AuxInt
-		x := v.Args[1]
-		v.reset(OpS390XXORWconst)
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	// match: (XORW x x)
-	// cond:
-	// result: (MOVDconst [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (XORW <t> x g:(MOVWload [off] {sym} ptr mem))
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (XORWload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		x := v.Args[0]
-		g := v.Args[1]
-		if g.Op != OpS390XMOVWload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XXORWload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (XORW <t> g:(MOVWload [off] {sym} ptr mem) x)
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (XORWload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		g := v.Args[0]
-		if g.Op != OpS390XMOVWload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		x := v.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XXORWload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (XORW <t> x g:(MOVWZload [off] {sym} ptr mem))
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (XORWload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		x := v.Args[0]
-		g := v.Args[1]
-		if g.Op != OpS390XMOVWZload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XXORWload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (XORW <t> g:(MOVWZload [off] {sym} ptr mem) x)
-	// cond: g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)
-	// result: (XORWload <t> [off] {sym} x ptr mem)
-	for {
-		t := v.Type
-		g := v.Args[0]
-		if g.Op != OpS390XMOVWZload {
-			break
-		}
-		off := g.AuxInt
-		sym := g.Aux
-		ptr := g.Args[0]
-		mem := g.Args[1]
-		x := v.Args[1]
-		if !(g.Uses == 1 && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g) && clobber(g)) {
-			break
-		}
-		v.reset(OpS390XXORWload)
-		v.Type = t
-		v.AuxInt = off
-		v.Aux = sym
-		v.AddArg(x)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XXORWconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XORWconst [c] x)
-	// cond: int32(c)==0
-	// result: x
-	for {
-		c := v.AuxInt
-		x := v.Args[0]
-		if !(int32(c) == 0) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (XORWconst [c] (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [c^d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = c ^ d
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpS390XXORconst(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (XORconst [0] x)
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (XORconst [c] (MOVDconst [d]))
-	// cond:
-	// result: (MOVDconst [c^d])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XMOVDconst {
-			break
-		}
-		d := v_0.AuxInt
-		v.reset(OpS390XMOVDconst)
-		v.AuxInt = c ^ d
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpSelect0(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Select0 <t> (AddTupleFirst32 tuple val))
-	// cond:
-	// result: (ADDW val (Select0 <t> tuple))
-	for {
-		t := v.Type
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XAddTupleFirst32 {
-			break
-		}
-		tuple := v_0.Args[0]
-		val := v_0.Args[1]
-		v.reset(OpS390XADDW)
-		v.AddArg(val)
-		v0 := b.NewValue0(v.Line, OpSelect0, t)
-		v0.AddArg(tuple)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Select0 <t> (AddTupleFirst64 tuple val))
-	// cond:
-	// result: (ADD val (Select0 <t> tuple))
-	for {
-		t := v.Type
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XAddTupleFirst64 {
-			break
-		}
-		tuple := v_0.Args[0]
-		val := v_0.Args[1]
-		v.reset(OpS390XADD)
-		v.AddArg(val)
-		v0 := b.NewValue0(v.Line, OpSelect0, t)
-		v0.AddArg(tuple)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpSelect1(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Select1     (AddTupleFirst32 tuple _  ))
-	// cond:
-	// result: (Select1 tuple)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XAddTupleFirst32 {
-			break
-		}
-		tuple := v_0.Args[0]
-		v.reset(OpSelect1)
-		v.AddArg(tuple)
-		return true
-	}
-	// match: (Select1     (AddTupleFirst64 tuple _  ))
-	// cond:
-	// result: (Select1 tuple)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpS390XAddTupleFirst64 {
-			break
-		}
-		tuple := v_0.Args[0]
-		v.reset(OpSelect1)
-		v.AddArg(tuple)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpSignExt16to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt16to32 x)
-	// cond:
-	// result: (MOVHreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XMOVHreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpSignExt16to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt16to64 x)
-	// cond:
-	// result: (MOVHreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XMOVHreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpSignExt32to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt32to64 x)
-	// cond:
-	// result: (MOVWreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XMOVWreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpSignExt8to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt8to16  x)
-	// cond:
-	// result: (MOVBreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XMOVBreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpSignExt8to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt8to32  x)
-	// cond:
-	// result: (MOVBreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XMOVBreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpSignExt8to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt8to64  x)
-	// cond:
-	// result: (MOVBreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XMOVBreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpSlicemask(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Slicemask <t> x)
-	// cond:
-	// result: (XOR (MOVDconst [-1]) (SRADconst <t> (SUBconst <t> x [1]) [63]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v.reset(OpS390XXOR)
-		v0 := b.NewValue0(v.Line, OpS390XMOVDconst, config.fe.TypeUInt64())
-		v0.AuxInt = -1
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpS390XSRADconst, t)
-		v1.AuxInt = 63
-		v2 := b.NewValue0(v.Line, OpS390XSUBconst, t)
-		v2.AuxInt = 1
-		v2.AddArg(x)
-		v1.AddArg(v2)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValueS390X_OpSqrt(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sqrt x)
-	// cond:
-	// result: (FSQRT x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XFSQRT)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpStaticCall(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (StaticCall [argwid] {target} mem)
-	// cond:
-	// result: (CALLstatic [argwid] {target} mem)
-	for {
-		argwid := v.AuxInt
-		target := v.Aux
-		mem := v.Args[0]
-		v.reset(OpS390XCALLstatic)
-		v.AuxInt = argwid
-		v.Aux = target
-		v.AddArg(mem)
-		return true
-	}
-}
-func rewriteValueS390X_OpStore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Store [8] ptr val mem)
-	// cond: is64BitFloat(val.Type)
-	// result: (FMOVDstore ptr val mem)
-	for {
-		if v.AuxInt != 8 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is64BitFloat(val.Type)) {
-			break
-		}
-		v.reset(OpS390XFMOVDstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [4] ptr val mem)
-	// cond: is32BitFloat(val.Type)
-	// result: (FMOVSstore ptr val mem)
-	for {
-		if v.AuxInt != 4 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		if !(is32BitFloat(val.Type)) {
-			break
-		}
-		v.reset(OpS390XFMOVSstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [8] ptr val mem)
-	// cond:
-	// result: (MOVDstore ptr val mem)
-	for {
-		if v.AuxInt != 8 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpS390XMOVDstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [4] ptr val mem)
-	// cond:
-	// result: (MOVWstore ptr val mem)
-	for {
-		if v.AuxInt != 4 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpS390XMOVWstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [2] ptr val mem)
-	// cond:
-	// result: (MOVHstore ptr val mem)
-	for {
-		if v.AuxInt != 2 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpS390XMOVHstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [1] ptr val mem)
-	// cond:
-	// result: (MOVBstore ptr val mem)
-	for {
-		if v.AuxInt != 1 {
-			break
-		}
-		ptr := v.Args[0]
-		val := v.Args[1]
-		mem := v.Args[2]
-		v.reset(OpS390XMOVBstore)
-		v.AddArg(ptr)
-		v.AddArg(val)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpSub16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub16  x y)
-	// cond:
-	// result: (SUBW  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XSUBW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpSub32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub32  x y)
-	// cond:
-	// result: (SUBW  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XSUBW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpSub32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub32F x y)
-	// cond:
-	// result: (FSUBS x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XFSUBS)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpSub64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub64  x y)
-	// cond:
-	// result: (SUB  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XSUB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpSub64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub64F x y)
-	// cond:
-	// result: (FSUB x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XFSUB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpSub8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub8   x y)
-	// cond:
-	// result: (SUBW  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XSUBW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpSubPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SubPtr x y)
-	// cond:
-	// result: (SUB  x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XSUB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpTrunc16to8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc16to8  x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpTrunc32to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc32to16 x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpTrunc32to8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc32to8  x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpTrunc64to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc64to16 x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpTrunc64to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc64to32 x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpTrunc64to8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc64to8  x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpXor16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor16 x y)
-	// cond:
-	// result: (XORW x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XXORW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpXor32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor32 x y)
-	// cond:
-	// result: (XORW x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XXORW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpXor64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor64 x y)
-	// cond:
-	// result: (XOR x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XXOR)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpXor8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor8  x y)
-	// cond:
-	// result: (XORW x y)
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpS390XXORW)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-}
-func rewriteValueS390X_OpZero(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Zero [s] _ mem)
-	// cond: SizeAndAlign(s).Size() == 0
-	// result: mem
-	for {
-		s := v.AuxInt
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 0) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = mem.Type
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 1
-	// result: (MOVBstoreconst [0] destptr mem)
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 1) {
-			break
-		}
-		v.reset(OpS390XMOVBstoreconst)
-		v.AuxInt = 0
-		v.AddArg(destptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 2
-	// result: (MOVHstoreconst [0] destptr mem)
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 2) {
-			break
-		}
-		v.reset(OpS390XMOVHstoreconst)
-		v.AuxInt = 0
-		v.AddArg(destptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 4
-	// result: (MOVWstoreconst [0] destptr mem)
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 4) {
-			break
-		}
-		v.reset(OpS390XMOVWstoreconst)
-		v.AuxInt = 0
-		v.AddArg(destptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 8
-	// result: (MOVDstoreconst [0] destptr mem)
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 8) {
-			break
-		}
-		v.reset(OpS390XMOVDstoreconst)
-		v.AuxInt = 0
-		v.AddArg(destptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 3
-	// result: (MOVBstoreconst [makeValAndOff(0,2)] destptr 		(MOVHstoreconst [0] destptr mem))
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 3) {
-			break
-		}
-		v.reset(OpS390XMOVBstoreconst)
-		v.AuxInt = makeValAndOff(0, 2)
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, OpS390XMOVHstoreconst, TypeMem)
-		v0.AuxInt = 0
-		v0.AddArg(destptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 5
-	// result: (MOVBstoreconst [makeValAndOff(0,4)] destptr 		(MOVWstoreconst [0] destptr mem))
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 5) {
-			break
-		}
-		v.reset(OpS390XMOVBstoreconst)
-		v.AuxInt = makeValAndOff(0, 4)
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, OpS390XMOVWstoreconst, TypeMem)
-		v0.AuxInt = 0
-		v0.AddArg(destptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 6
-	// result: (MOVHstoreconst [makeValAndOff(0,4)] destptr 		(MOVWstoreconst [0] destptr mem))
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 6) {
-			break
-		}
-		v.reset(OpS390XMOVHstoreconst)
-		v.AuxInt = makeValAndOff(0, 4)
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, OpS390XMOVWstoreconst, TypeMem)
-		v0.AuxInt = 0
-		v0.AddArg(destptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() == 7
-	// result: (MOVWstoreconst [makeValAndOff(0,3)] destptr 		(MOVWstoreconst [0] destptr mem))
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() == 7) {
-			break
-		}
-		v.reset(OpS390XMOVWstoreconst)
-		v.AuxInt = makeValAndOff(0, 3)
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, OpS390XMOVWstoreconst, TypeMem)
-		v0.AuxInt = 0
-		v0.AddArg(destptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() > 0 && SizeAndAlign(s).Size() <= 1024
-	// result: (CLEAR [makeValAndOff(SizeAndAlign(s).Size(), 0)] destptr mem)
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() > 0 && SizeAndAlign(s).Size() <= 1024) {
-			break
-		}
-		v.reset(OpS390XCLEAR)
-		v.AuxInt = makeValAndOff(SizeAndAlign(s).Size(), 0)
-		v.AddArg(destptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Zero [s] destptr mem)
-	// cond: SizeAndAlign(s).Size() > 1024
-	// result: (LoweredZero [SizeAndAlign(s).Size()%256] destptr (ADDconst <destptr.Type> destptr [(SizeAndAlign(s).Size()/256)*256]) mem)
-	for {
-		s := v.AuxInt
-		destptr := v.Args[0]
-		mem := v.Args[1]
-		if !(SizeAndAlign(s).Size() > 1024) {
-			break
-		}
-		v.reset(OpS390XLoweredZero)
-		v.AuxInt = SizeAndAlign(s).Size() % 256
-		v.AddArg(destptr)
-		v0 := b.NewValue0(v.Line, OpS390XADDconst, destptr.Type)
-		v0.AuxInt = (SizeAndAlign(s).Size() / 256) * 256
-		v0.AddArg(destptr)
-		v.AddArg(v0)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValueS390X_OpZeroExt16to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt16to32 x)
-	// cond:
-	// result: (MOVHZreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XMOVHZreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpZeroExt16to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt16to64 x)
-	// cond:
-	// result: (MOVHZreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XMOVHZreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpZeroExt32to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt32to64 x)
-	// cond:
-	// result: (MOVWZreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XMOVWZreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpZeroExt8to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt8to16  x)
-	// cond:
-	// result: (MOVBZreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XMOVBZreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpZeroExt8to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt8to32  x)
-	// cond:
-	// result: (MOVBZreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XMOVBZreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValueS390X_OpZeroExt8to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt8to64  x)
-	// cond:
-	// result: (MOVBZreg x)
-	for {
-		x := v.Args[0]
-		v.reset(OpS390XMOVBZreg)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteBlockS390X(b *Block, config *Config) bool {
-	switch b.Kind {
-	case BlockS390XEQ:
-		// match: (EQ (InvertFlags cmp) yes no)
-		// cond:
-		// result: (EQ cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XInvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockS390XEQ
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (FlagEQ) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XFlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (EQ (FlagLT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpS390XFlagLT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (EQ (FlagGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpS390XFlagGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-	case BlockS390XGE:
-		// match: (GE (InvertFlags cmp) yes no)
-		// cond:
-		// result: (LE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XInvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockS390XLE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (GE (FlagEQ) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XFlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (GE (FlagLT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpS390XFlagLT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (GE (FlagGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XFlagGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockS390XGT:
-		// match: (GT (InvertFlags cmp) yes no)
-		// cond:
-		// result: (LT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XInvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockS390XLT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (GT (FlagEQ) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpS390XFlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (GT (FlagLT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpS390XFlagLT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (GT (FlagGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XFlagGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockIf:
-		// match: (If (MOVDLT (MOVDconst [0]) (MOVDconst [1]) cmp) yes no)
-		// cond:
-		// result: (LT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XMOVDLT {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_0.AuxInt != 0 {
-				break
-			}
-			v_1 := v.Args[1]
-			if v_1.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_1.AuxInt != 1 {
-				break
-			}
-			cmp := v.Args[2]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockS390XLT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (MOVDLE (MOVDconst [0]) (MOVDconst [1]) cmp) yes no)
-		// cond:
-		// result: (LE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XMOVDLE {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_0.AuxInt != 0 {
-				break
-			}
-			v_1 := v.Args[1]
-			if v_1.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_1.AuxInt != 1 {
-				break
-			}
-			cmp := v.Args[2]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockS390XLE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (MOVDGT (MOVDconst [0]) (MOVDconst [1]) cmp) yes no)
-		// cond:
-		// result: (GT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XMOVDGT {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_0.AuxInt != 0 {
-				break
-			}
-			v_1 := v.Args[1]
-			if v_1.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_1.AuxInt != 1 {
-				break
-			}
-			cmp := v.Args[2]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockS390XGT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (MOVDGE (MOVDconst [0]) (MOVDconst [1]) cmp) yes no)
-		// cond:
-		// result: (GE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XMOVDGE {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_0.AuxInt != 0 {
-				break
-			}
-			v_1 := v.Args[1]
-			if v_1.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_1.AuxInt != 1 {
-				break
-			}
-			cmp := v.Args[2]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockS390XGE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) cmp) yes no)
-		// cond:
-		// result: (EQ cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XMOVDEQ {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_0.AuxInt != 0 {
-				break
-			}
-			v_1 := v.Args[1]
-			if v_1.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_1.AuxInt != 1 {
-				break
-			}
-			cmp := v.Args[2]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockS390XEQ
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (MOVDNE (MOVDconst [0]) (MOVDconst [1]) cmp) yes no)
-		// cond:
-		// result: (NE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XMOVDNE {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_0.AuxInt != 0 {
-				break
-			}
-			v_1 := v.Args[1]
-			if v_1.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_1.AuxInt != 1 {
-				break
-			}
-			cmp := v.Args[2]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockS390XNE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) cmp) yes no)
-		// cond:
-		// result: (GTF cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XMOVDGTnoinv {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_0.AuxInt != 0 {
-				break
-			}
-			v_1 := v.Args[1]
-			if v_1.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_1.AuxInt != 1 {
-				break
-			}
-			cmp := v.Args[2]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockS390XGTF
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) cmp) yes no)
-		// cond:
-		// result: (GEF cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XMOVDGEnoinv {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_0.AuxInt != 0 {
-				break
-			}
-			v_1 := v.Args[1]
-			if v_1.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_1.AuxInt != 1 {
-				break
-			}
-			cmp := v.Args[2]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockS390XGEF
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If cond yes no)
-		// cond:
-		// result: (NE (CMPWconst [0] (MOVBZreg cond)) yes no)
-		for {
-			v := b.Control
-			_ = v
-			cond := b.Control
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockS390XNE
-			v0 := b.NewValue0(v.Line, OpS390XCMPWconst, TypeFlags)
-			v0.AuxInt = 0
-			v1 := b.NewValue0(v.Line, OpS390XMOVBZreg, config.fe.TypeUInt64())
-			v1.AddArg(cond)
-			v0.AddArg(v1)
-			b.SetControl(v0)
-			_ = yes
-			_ = no
-			return true
-		}
-	case BlockS390XLE:
-		// match: (LE (InvertFlags cmp) yes no)
-		// cond:
-		// result: (GE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XInvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockS390XGE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LE (FlagEQ) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XFlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LE (FlagLT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XFlagLT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LE (FlagGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpS390XFlagGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-	case BlockS390XLT:
-		// match: (LT (InvertFlags cmp) yes no)
-		// cond:
-		// result: (GT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XInvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockS390XGT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LT (FlagEQ) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpS390XFlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (LT (FlagLT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XFlagLT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (LT (FlagGT) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpS390XFlagGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-	case BlockS390XNE:
-		// match: (NE (CMPWconst [0] (MOVDLT (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no)
-		// cond:
-		// result: (LT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XCMPWconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpS390XMOVDLT {
-				break
-			}
-			v_0_0 := v_0.Args[0]
-			if v_0_0.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_0_0.AuxInt != 0 {
-				break
-			}
-			v_0_1 := v_0.Args[1]
-			if v_0_1.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_0_1.AuxInt != 1 {
-				break
-			}
-			cmp := v_0.Args[2]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockS390XLT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (CMPWconst [0] (MOVDLE (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no)
-		// cond:
-		// result: (LE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XCMPWconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpS390XMOVDLE {
-				break
-			}
-			v_0_0 := v_0.Args[0]
-			if v_0_0.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_0_0.AuxInt != 0 {
-				break
-			}
-			v_0_1 := v_0.Args[1]
-			if v_0_1.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_0_1.AuxInt != 1 {
-				break
-			}
-			cmp := v_0.Args[2]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockS390XLE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (CMPWconst [0] (MOVDGT (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no)
-		// cond:
-		// result: (GT cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XCMPWconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpS390XMOVDGT {
-				break
-			}
-			v_0_0 := v_0.Args[0]
-			if v_0_0.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_0_0.AuxInt != 0 {
-				break
-			}
-			v_0_1 := v_0.Args[1]
-			if v_0_1.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_0_1.AuxInt != 1 {
-				break
-			}
-			cmp := v_0.Args[2]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockS390XGT
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (CMPWconst [0] (MOVDGE (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no)
-		// cond:
-		// result: (GE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XCMPWconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpS390XMOVDGE {
-				break
-			}
-			v_0_0 := v_0.Args[0]
-			if v_0_0.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_0_0.AuxInt != 0 {
-				break
-			}
-			v_0_1 := v_0.Args[1]
-			if v_0_1.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_0_1.AuxInt != 1 {
-				break
-			}
-			cmp := v_0.Args[2]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockS390XGE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (CMPWconst [0] (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no)
-		// cond:
-		// result: (EQ cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XCMPWconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpS390XMOVDEQ {
-				break
-			}
-			v_0_0 := v_0.Args[0]
-			if v_0_0.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_0_0.AuxInt != 0 {
-				break
-			}
-			v_0_1 := v_0.Args[1]
-			if v_0_1.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_0_1.AuxInt != 1 {
-				break
-			}
-			cmp := v_0.Args[2]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockS390XEQ
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (CMPWconst [0] (MOVDNE (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no)
-		// cond:
-		// result: (NE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XCMPWconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpS390XMOVDNE {
-				break
-			}
-			v_0_0 := v_0.Args[0]
-			if v_0_0.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_0_0.AuxInt != 0 {
-				break
-			}
-			v_0_1 := v_0.Args[1]
-			if v_0_1.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_0_1.AuxInt != 1 {
-				break
-			}
-			cmp := v_0.Args[2]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockS390XNE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (CMPWconst [0] (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no)
-		// cond:
-		// result: (GTF cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XCMPWconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpS390XMOVDGTnoinv {
-				break
-			}
-			v_0_0 := v_0.Args[0]
-			if v_0_0.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_0_0.AuxInt != 0 {
-				break
-			}
-			v_0_1 := v_0.Args[1]
-			if v_0_1.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_0_1.AuxInt != 1 {
-				break
-			}
-			cmp := v_0.Args[2]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockS390XGTF
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (CMPWconst [0] (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no)
-		// cond:
-		// result: (GEF cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XCMPWconst {
-				break
-			}
-			if v.AuxInt != 0 {
-				break
-			}
-			v_0 := v.Args[0]
-			if v_0.Op != OpS390XMOVDGEnoinv {
-				break
-			}
-			v_0_0 := v_0.Args[0]
-			if v_0_0.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_0_0.AuxInt != 0 {
-				break
-			}
-			v_0_1 := v_0.Args[1]
-			if v_0_1.Op != OpS390XMOVDconst {
-				break
-			}
-			if v_0_1.AuxInt != 1 {
-				break
-			}
-			cmp := v_0.Args[2]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockS390XGEF
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (InvertFlags cmp) yes no)
-		// cond:
-		// result: (NE cmp yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XInvertFlags {
-				break
-			}
-			cmp := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockS390XNE
-			b.SetControl(cmp)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (FlagEQ) yes no)
-		// cond:
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpS390XFlagEQ {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (NE (FlagLT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XFlagLT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (NE (FlagGT) yes no)
-		// cond:
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpS390XFlagGT {
-				break
-			}
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-	}
-	return false
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewrite_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewrite_test.go
deleted file mode 100644
index 9a766a3..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewrite_test.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/rewrite_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/rewrite_test.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import "testing"
-
-// TestNlzNto tests nlz/nto of the same number which is used in some of
-// the rewrite rules.
-func TestNlzNto(t *testing.T) {
-	// construct the bit pattern 000...111, nlz(x) + nto(0) = 64
-	var x int64
-	for i := int64(0); i < 64; i++ {
-		if got := nto(x); got != i {
-			t.Errorf("expected nto(0x%X) = %d, got %d", x, i, got)
-		}
-		if got := nlz(x); got != 64-i {
-			t.Errorf("expected nlz(0x%X) = %d, got %d", x, 64-i, got)
-		}
-		x = (x << 1) | 1
-	}
-
-	x = 0
-	// construct the bit pattern 000...111, with bit 33 set as well.
-	for i := int64(0); i < 64; i++ {
-		tx := x | (1 << 32)
-		// nto should be the the number of bits we've shifted on, with an extra bit
-		// at iter 32
-		ntoExp := i
-		if ntoExp == 32 {
-			ntoExp = 33
-		}
-		if got := nto(tx); got != ntoExp {
-			t.Errorf("expected nto(0x%X) = %d, got %d", tx, ntoExp, got)
-		}
-
-		// sinec bit 33 is set, nlz can be no greater than 31
-		nlzExp := 64 - i
-		if nlzExp > 31 {
-			nlzExp = 31
-		}
-		if got := nlz(tx); got != nlzExp {
-			t.Errorf("expected nlz(0x%X) = %d, got %d", tx, nlzExp, got)
-		}
-		x = (x << 1) | 1
-	}
-
-}
-
-func TestNlz(t *testing.T) {
-	var nlzTests = []struct {
-		v   int64
-		exp int64
-	}{{0x00, 64},
-		{0x01, 63},
-		{0x0F, 60},
-		{0xFF, 56},
-		{0xffffFFFF, 32},
-		{-0x01, 0}}
-
-	for _, tc := range nlzTests {
-		if got := nlz(tc.v); got != tc.exp {
-			t.Errorf("expected nlz(0x%X) = %d, got %d", tc.v, tc.exp, got)
-		}
-	}
-}
-
-func TestNto(t *testing.T) {
-	var ntoTests = []struct {
-		v   int64
-		exp int64
-	}{{0x00, 0},
-		{0x01, 1},
-		{0x0F, 4},
-		{0xFF, 8},
-		{0xffffFFFF, 32},
-		{-0x01, 64}}
-
-	for _, tc := range ntoTests {
-		if got := nto(tc.v); got != tc.exp {
-			t.Errorf("expected nto(0x%X) = %d, got %d", tc.v, tc.exp, got)
-		}
-	}
-}
-
-func TestLog2(t *testing.T) {
-	var log2Tests = []struct {
-		v   int64
-		exp int64
-	}{{0, -1}, // nlz expects log2(0) == -1
-		{1, 0},
-		{2, 1},
-		{4, 2},
-		{7, 2},
-		{8, 3},
-		{9, 3},
-		{1024, 10}}
-
-	for _, tc := range log2Tests {
-		if got := log2(tc.v); got != tc.exp {
-			t.Errorf("expected log2(%d) = %d, got %d", tc.v, tc.exp, got)
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewritedec.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewritedec.go
deleted file mode 100644
index 17ed90d..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewritedec.go
+++ /dev/null
@@ -1,510 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/rewritedec.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/rewritedec.go:1
-// autogenerated from gen/dec.rules: do not edit!
-// generated with: cd gen; go run *.go
-
-package ssa
-
-import "math"
-
-var _ = math.MinInt8 // in case not otherwise used
-func rewriteValuedec(v *Value, config *Config) bool {
-	switch v.Op {
-	case OpComplexImag:
-		return rewriteValuedec_OpComplexImag(v, config)
-	case OpComplexReal:
-		return rewriteValuedec_OpComplexReal(v, config)
-	case OpIData:
-		return rewriteValuedec_OpIData(v, config)
-	case OpITab:
-		return rewriteValuedec_OpITab(v, config)
-	case OpLoad:
-		return rewriteValuedec_OpLoad(v, config)
-	case OpSliceCap:
-		return rewriteValuedec_OpSliceCap(v, config)
-	case OpSliceLen:
-		return rewriteValuedec_OpSliceLen(v, config)
-	case OpSlicePtr:
-		return rewriteValuedec_OpSlicePtr(v, config)
-	case OpStore:
-		return rewriteValuedec_OpStore(v, config)
-	case OpStringLen:
-		return rewriteValuedec_OpStringLen(v, config)
-	case OpStringPtr:
-		return rewriteValuedec_OpStringPtr(v, config)
-	}
-	return false
-}
-func rewriteValuedec_OpComplexImag(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ComplexImag (ComplexMake _ imag ))
-	// cond:
-	// result: imag
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpComplexMake {
-			break
-		}
-		imag := v_0.Args[1]
-		v.reset(OpCopy)
-		v.Type = imag.Type
-		v.AddArg(imag)
-		return true
-	}
-	return false
-}
-func rewriteValuedec_OpComplexReal(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ComplexReal (ComplexMake real _  ))
-	// cond:
-	// result: real
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpComplexMake {
-			break
-		}
-		real := v_0.Args[0]
-		v.reset(OpCopy)
-		v.Type = real.Type
-		v.AddArg(real)
-		return true
-	}
-	return false
-}
-func rewriteValuedec_OpIData(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (IData (IMake _ data))
-	// cond:
-	// result: data
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpIMake {
-			break
-		}
-		data := v_0.Args[1]
-		v.reset(OpCopy)
-		v.Type = data.Type
-		v.AddArg(data)
-		return true
-	}
-	return false
-}
-func rewriteValuedec_OpITab(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ITab (IMake itab _))
-	// cond:
-	// result: itab
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpIMake {
-			break
-		}
-		itab := v_0.Args[0]
-		v.reset(OpCopy)
-		v.Type = itab.Type
-		v.AddArg(itab)
-		return true
-	}
-	return false
-}
-func rewriteValuedec_OpLoad(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Load <t> ptr mem)
-	// cond: t.IsComplex() && t.Size() == 8
-	// result: (ComplexMake     (Load <config.fe.TypeFloat32()> ptr mem)     (Load <config.fe.TypeFloat32()>       (OffPtr <config.fe.TypeFloat32().PtrTo()> [4] ptr)       mem)     )
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(t.IsComplex() && t.Size() == 8) {
-			break
-		}
-		v.reset(OpComplexMake)
-		v0 := b.NewValue0(v.Line, OpLoad, config.fe.TypeFloat32())
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpLoad, config.fe.TypeFloat32())
-		v2 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeFloat32().PtrTo())
-		v2.AuxInt = 4
-		v2.AddArg(ptr)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: t.IsComplex() && t.Size() == 16
-	// result: (ComplexMake     (Load <config.fe.TypeFloat64()> ptr mem)     (Load <config.fe.TypeFloat64()>       (OffPtr <config.fe.TypeFloat64().PtrTo()> [8] ptr)       mem)     )
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(t.IsComplex() && t.Size() == 16) {
-			break
-		}
-		v.reset(OpComplexMake)
-		v0 := b.NewValue0(v.Line, OpLoad, config.fe.TypeFloat64())
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpLoad, config.fe.TypeFloat64())
-		v2 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeFloat64().PtrTo())
-		v2.AuxInt = 8
-		v2.AddArg(ptr)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: t.IsString()
-	// result: (StringMake     (Load <config.fe.TypeBytePtr()> ptr mem)     (Load <config.fe.TypeInt()>       (OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] ptr)       mem))
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(t.IsString()) {
-			break
-		}
-		v.reset(OpStringMake)
-		v0 := b.NewValue0(v.Line, OpLoad, config.fe.TypeBytePtr())
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpLoad, config.fe.TypeInt())
-		v2 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeInt().PtrTo())
-		v2.AuxInt = config.PtrSize
-		v2.AddArg(ptr)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: t.IsSlice()
-	// result: (SliceMake     (Load <t.ElemType().PtrTo()> ptr mem)     (Load <config.fe.TypeInt()>       (OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] ptr)       mem)     (Load <config.fe.TypeInt()>       (OffPtr <config.fe.TypeInt().PtrTo()> [2*config.PtrSize] ptr)       mem))
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(t.IsSlice()) {
-			break
-		}
-		v.reset(OpSliceMake)
-		v0 := b.NewValue0(v.Line, OpLoad, t.ElemType().PtrTo())
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpLoad, config.fe.TypeInt())
-		v2 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeInt().PtrTo())
-		v2.AuxInt = config.PtrSize
-		v2.AddArg(ptr)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		v3 := b.NewValue0(v.Line, OpLoad, config.fe.TypeInt())
-		v4 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeInt().PtrTo())
-		v4.AuxInt = 2 * config.PtrSize
-		v4.AddArg(ptr)
-		v3.AddArg(v4)
-		v3.AddArg(mem)
-		v.AddArg(v3)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: t.IsInterface()
-	// result: (IMake     (Load <config.fe.TypeBytePtr()> ptr mem)     (Load <config.fe.TypeBytePtr()>       (OffPtr <config.fe.TypeBytePtr().PtrTo()> [config.PtrSize] ptr)       mem))
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(t.IsInterface()) {
-			break
-		}
-		v.reset(OpIMake)
-		v0 := b.NewValue0(v.Line, OpLoad, config.fe.TypeBytePtr())
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpLoad, config.fe.TypeBytePtr())
-		v2 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeBytePtr().PtrTo())
-		v2.AuxInt = config.PtrSize
-		v2.AddArg(ptr)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	return false
-}
-func rewriteValuedec_OpSliceCap(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SliceCap (SliceMake _ _ cap))
-	// cond:
-	// result: cap
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpSliceMake {
-			break
-		}
-		cap := v_0.Args[2]
-		v.reset(OpCopy)
-		v.Type = cap.Type
-		v.AddArg(cap)
-		return true
-	}
-	return false
-}
-func rewriteValuedec_OpSliceLen(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SliceLen (SliceMake _ len _))
-	// cond:
-	// result: len
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpSliceMake {
-			break
-		}
-		len := v_0.Args[1]
-		v.reset(OpCopy)
-		v.Type = len.Type
-		v.AddArg(len)
-		return true
-	}
-	return false
-}
-func rewriteValuedec_OpSlicePtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SlicePtr (SliceMake ptr _ _ ))
-	// cond:
-	// result: ptr
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpSliceMake {
-			break
-		}
-		ptr := v_0.Args[0]
-		v.reset(OpCopy)
-		v.Type = ptr.Type
-		v.AddArg(ptr)
-		return true
-	}
-	return false
-}
-func rewriteValuedec_OpStore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Store [8] dst (ComplexMake real imag) mem)
-	// cond:
-	// result: (Store [4]     (OffPtr <config.fe.TypeFloat32().PtrTo()> [4] dst)     imag     (Store [4] dst real mem))
-	for {
-		if v.AuxInt != 8 {
-			break
-		}
-		dst := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpComplexMake {
-			break
-		}
-		real := v_1.Args[0]
-		imag := v_1.Args[1]
-		mem := v.Args[2]
-		v.reset(OpStore)
-		v.AuxInt = 4
-		v0 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeFloat32().PtrTo())
-		v0.AuxInt = 4
-		v0.AddArg(dst)
-		v.AddArg(v0)
-		v.AddArg(imag)
-		v1 := b.NewValue0(v.Line, OpStore, TypeMem)
-		v1.AuxInt = 4
-		v1.AddArg(dst)
-		v1.AddArg(real)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Store [16] dst (ComplexMake real imag) mem)
-	// cond:
-	// result: (Store [8]     (OffPtr <config.fe.TypeFloat64().PtrTo()> [8] dst)     imag     (Store [8] dst real mem))
-	for {
-		if v.AuxInt != 16 {
-			break
-		}
-		dst := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpComplexMake {
-			break
-		}
-		real := v_1.Args[0]
-		imag := v_1.Args[1]
-		mem := v.Args[2]
-		v.reset(OpStore)
-		v.AuxInt = 8
-		v0 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeFloat64().PtrTo())
-		v0.AuxInt = 8
-		v0.AddArg(dst)
-		v.AddArg(v0)
-		v.AddArg(imag)
-		v1 := b.NewValue0(v.Line, OpStore, TypeMem)
-		v1.AuxInt = 8
-		v1.AddArg(dst)
-		v1.AddArg(real)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Store [2*config.PtrSize] dst (StringMake ptr len) mem)
-	// cond:
-	// result: (Store [config.PtrSize]     (OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] dst)     len     (Store [config.PtrSize] dst ptr mem))
-	for {
-		if v.AuxInt != 2*config.PtrSize {
-			break
-		}
-		dst := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpStringMake {
-			break
-		}
-		ptr := v_1.Args[0]
-		len := v_1.Args[1]
-		mem := v.Args[2]
-		v.reset(OpStore)
-		v.AuxInt = config.PtrSize
-		v0 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeInt().PtrTo())
-		v0.AuxInt = config.PtrSize
-		v0.AddArg(dst)
-		v.AddArg(v0)
-		v.AddArg(len)
-		v1 := b.NewValue0(v.Line, OpStore, TypeMem)
-		v1.AuxInt = config.PtrSize
-		v1.AddArg(dst)
-		v1.AddArg(ptr)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Store [3*config.PtrSize] dst (SliceMake ptr len cap) mem)
-	// cond:
-	// result: (Store [config.PtrSize]     (OffPtr <config.fe.TypeInt().PtrTo()> [2*config.PtrSize] dst)     cap     (Store [config.PtrSize]       (OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] dst)       len       (Store [config.PtrSize] dst ptr mem)))
-	for {
-		if v.AuxInt != 3*config.PtrSize {
-			break
-		}
-		dst := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpSliceMake {
-			break
-		}
-		ptr := v_1.Args[0]
-		len := v_1.Args[1]
-		cap := v_1.Args[2]
-		mem := v.Args[2]
-		v.reset(OpStore)
-		v.AuxInt = config.PtrSize
-		v0 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeInt().PtrTo())
-		v0.AuxInt = 2 * config.PtrSize
-		v0.AddArg(dst)
-		v.AddArg(v0)
-		v.AddArg(cap)
-		v1 := b.NewValue0(v.Line, OpStore, TypeMem)
-		v1.AuxInt = config.PtrSize
-		v2 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeInt().PtrTo())
-		v2.AuxInt = config.PtrSize
-		v2.AddArg(dst)
-		v1.AddArg(v2)
-		v1.AddArg(len)
-		v3 := b.NewValue0(v.Line, OpStore, TypeMem)
-		v3.AuxInt = config.PtrSize
-		v3.AddArg(dst)
-		v3.AddArg(ptr)
-		v3.AddArg(mem)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Store [2*config.PtrSize] dst (IMake itab data) mem)
-	// cond:
-	// result: (Store [config.PtrSize]     (OffPtr <config.fe.TypeBytePtr().PtrTo()> [config.PtrSize] dst)     data     (Store [config.PtrSize] dst itab mem))
-	for {
-		if v.AuxInt != 2*config.PtrSize {
-			break
-		}
-		dst := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpIMake {
-			break
-		}
-		itab := v_1.Args[0]
-		data := v_1.Args[1]
-		mem := v.Args[2]
-		v.reset(OpStore)
-		v.AuxInt = config.PtrSize
-		v0 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeBytePtr().PtrTo())
-		v0.AuxInt = config.PtrSize
-		v0.AddArg(dst)
-		v.AddArg(v0)
-		v.AddArg(data)
-		v1 := b.NewValue0(v.Line, OpStore, TypeMem)
-		v1.AuxInt = config.PtrSize
-		v1.AddArg(dst)
-		v1.AddArg(itab)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	return false
-}
-func rewriteValuedec_OpStringLen(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (StringLen (StringMake _ len))
-	// cond:
-	// result: len
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpStringMake {
-			break
-		}
-		len := v_0.Args[1]
-		v.reset(OpCopy)
-		v.Type = len.Type
-		v.AddArg(len)
-		return true
-	}
-	return false
-}
-func rewriteValuedec_OpStringPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (StringPtr (StringMake ptr _))
-	// cond:
-	// result: ptr
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpStringMake {
-			break
-		}
-		ptr := v_0.Args[0]
-		v.reset(OpCopy)
-		v.Type = ptr.Type
-		v.AddArg(ptr)
-		return true
-	}
-	return false
-}
-func rewriteBlockdec(b *Block, config *Config) bool {
-	switch b.Kind {
-	}
-	return false
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewritedec64.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewritedec64.go
deleted file mode 100644
index 819d0dd..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewritedec64.go
+++ /dev/null
@@ -1,2723 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/rewritedec64.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/rewritedec64.go:1
-// autogenerated from gen/dec64.rules: do not edit!
-// generated with: cd gen; go run *.go
-
-package ssa
-
-import "math"
-
-var _ = math.MinInt8 // in case not otherwise used
-func rewriteValuedec64(v *Value, config *Config) bool {
-	switch v.Op {
-	case OpAdd64:
-		return rewriteValuedec64_OpAdd64(v, config)
-	case OpAnd64:
-		return rewriteValuedec64_OpAnd64(v, config)
-	case OpArg:
-		return rewriteValuedec64_OpArg(v, config)
-	case OpBswap64:
-		return rewriteValuedec64_OpBswap64(v, config)
-	case OpCom64:
-		return rewriteValuedec64_OpCom64(v, config)
-	case OpConst64:
-		return rewriteValuedec64_OpConst64(v, config)
-	case OpCtz64:
-		return rewriteValuedec64_OpCtz64(v, config)
-	case OpEq64:
-		return rewriteValuedec64_OpEq64(v, config)
-	case OpGeq64:
-		return rewriteValuedec64_OpGeq64(v, config)
-	case OpGeq64U:
-		return rewriteValuedec64_OpGeq64U(v, config)
-	case OpGreater64:
-		return rewriteValuedec64_OpGreater64(v, config)
-	case OpGreater64U:
-		return rewriteValuedec64_OpGreater64U(v, config)
-	case OpInt64Hi:
-		return rewriteValuedec64_OpInt64Hi(v, config)
-	case OpInt64Lo:
-		return rewriteValuedec64_OpInt64Lo(v, config)
-	case OpLeq64:
-		return rewriteValuedec64_OpLeq64(v, config)
-	case OpLeq64U:
-		return rewriteValuedec64_OpLeq64U(v, config)
-	case OpLess64:
-		return rewriteValuedec64_OpLess64(v, config)
-	case OpLess64U:
-		return rewriteValuedec64_OpLess64U(v, config)
-	case OpLoad:
-		return rewriteValuedec64_OpLoad(v, config)
-	case OpLrot64:
-		return rewriteValuedec64_OpLrot64(v, config)
-	case OpLsh16x64:
-		return rewriteValuedec64_OpLsh16x64(v, config)
-	case OpLsh32x64:
-		return rewriteValuedec64_OpLsh32x64(v, config)
-	case OpLsh64x16:
-		return rewriteValuedec64_OpLsh64x16(v, config)
-	case OpLsh64x32:
-		return rewriteValuedec64_OpLsh64x32(v, config)
-	case OpLsh64x64:
-		return rewriteValuedec64_OpLsh64x64(v, config)
-	case OpLsh64x8:
-		return rewriteValuedec64_OpLsh64x8(v, config)
-	case OpLsh8x64:
-		return rewriteValuedec64_OpLsh8x64(v, config)
-	case OpMul64:
-		return rewriteValuedec64_OpMul64(v, config)
-	case OpNeg64:
-		return rewriteValuedec64_OpNeg64(v, config)
-	case OpNeq64:
-		return rewriteValuedec64_OpNeq64(v, config)
-	case OpOr64:
-		return rewriteValuedec64_OpOr64(v, config)
-	case OpRsh16Ux64:
-		return rewriteValuedec64_OpRsh16Ux64(v, config)
-	case OpRsh16x64:
-		return rewriteValuedec64_OpRsh16x64(v, config)
-	case OpRsh32Ux64:
-		return rewriteValuedec64_OpRsh32Ux64(v, config)
-	case OpRsh32x64:
-		return rewriteValuedec64_OpRsh32x64(v, config)
-	case OpRsh64Ux16:
-		return rewriteValuedec64_OpRsh64Ux16(v, config)
-	case OpRsh64Ux32:
-		return rewriteValuedec64_OpRsh64Ux32(v, config)
-	case OpRsh64Ux64:
-		return rewriteValuedec64_OpRsh64Ux64(v, config)
-	case OpRsh64Ux8:
-		return rewriteValuedec64_OpRsh64Ux8(v, config)
-	case OpRsh64x16:
-		return rewriteValuedec64_OpRsh64x16(v, config)
-	case OpRsh64x32:
-		return rewriteValuedec64_OpRsh64x32(v, config)
-	case OpRsh64x64:
-		return rewriteValuedec64_OpRsh64x64(v, config)
-	case OpRsh64x8:
-		return rewriteValuedec64_OpRsh64x8(v, config)
-	case OpRsh8Ux64:
-		return rewriteValuedec64_OpRsh8Ux64(v, config)
-	case OpRsh8x64:
-		return rewriteValuedec64_OpRsh8x64(v, config)
-	case OpSignExt16to64:
-		return rewriteValuedec64_OpSignExt16to64(v, config)
-	case OpSignExt32to64:
-		return rewriteValuedec64_OpSignExt32to64(v, config)
-	case OpSignExt8to64:
-		return rewriteValuedec64_OpSignExt8to64(v, config)
-	case OpStore:
-		return rewriteValuedec64_OpStore(v, config)
-	case OpSub64:
-		return rewriteValuedec64_OpSub64(v, config)
-	case OpTrunc64to16:
-		return rewriteValuedec64_OpTrunc64to16(v, config)
-	case OpTrunc64to32:
-		return rewriteValuedec64_OpTrunc64to32(v, config)
-	case OpTrunc64to8:
-		return rewriteValuedec64_OpTrunc64to8(v, config)
-	case OpXor64:
-		return rewriteValuedec64_OpXor64(v, config)
-	case OpZeroExt16to64:
-		return rewriteValuedec64_OpZeroExt16to64(v, config)
-	case OpZeroExt32to64:
-		return rewriteValuedec64_OpZeroExt32to64(v, config)
-	case OpZeroExt8to64:
-		return rewriteValuedec64_OpZeroExt8to64(v, config)
-	}
-	return false
-}
-func rewriteValuedec64_OpAdd64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add64 x y)
-	// cond:
-	// result: (Int64Make 		(Add32withcarry <config.fe.TypeInt32()> 			(Int64Hi x) 			(Int64Hi y) 			(Select1 <TypeFlags> (Add32carry (Int64Lo x) (Int64Lo y)))) 		(Select0 <config.fe.TypeUInt32()> (Add32carry (Int64Lo x) (Int64Lo y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpAdd32withcarry, config.fe.TypeInt32())
-		v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpSelect1, TypeFlags)
-		v4 := b.NewValue0(v.Line, OpAdd32carry, MakeTuple(config.fe.TypeUInt32(), TypeFlags))
-		v5 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v5.AddArg(x)
-		v4.AddArg(v5)
-		v6 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v6.AddArg(y)
-		v4.AddArg(v6)
-		v3.AddArg(v4)
-		v0.AddArg(v3)
-		v.AddArg(v0)
-		v7 := b.NewValue0(v.Line, OpSelect0, config.fe.TypeUInt32())
-		v8 := b.NewValue0(v.Line, OpAdd32carry, MakeTuple(config.fe.TypeUInt32(), TypeFlags))
-		v9 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v9.AddArg(x)
-		v8.AddArg(v9)
-		v10 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v10.AddArg(y)
-		v8.AddArg(v10)
-		v7.AddArg(v8)
-		v.AddArg(v7)
-		return true
-	}
-}
-func rewriteValuedec64_OpAnd64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And64 x y)
-	// cond:
-	// result: (Int64Make 		(And32 <config.fe.TypeUInt32()> (Int64Hi x) (Int64Hi y)) 		(And32 <config.fe.TypeUInt32()> (Int64Lo x) (Int64Lo y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpAnd32, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpAnd32, config.fe.TypeUInt32())
-		v4 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v4.AddArg(x)
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v5.AddArg(y)
-		v3.AddArg(v5)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValuedec64_OpArg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Arg {n} [off])
-	// cond: is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned()
-	// result: (Int64Make     (Arg <config.fe.TypeInt32()> {n} [off+4])     (Arg <config.fe.TypeUInt32()> {n} [off]))
-	for {
-		off := v.AuxInt
-		n := v.Aux
-		if !(is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned()) {
-			break
-		}
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeInt32())
-		v0.AuxInt = off + 4
-		v0.Aux = n
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeUInt32())
-		v1.AuxInt = off
-		v1.Aux = n
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Arg {n} [off])
-	// cond: is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned()
-	// result: (Int64Make     (Arg <config.fe.TypeUInt32()> {n} [off+4])     (Arg <config.fe.TypeUInt32()> {n} [off]))
-	for {
-		off := v.AuxInt
-		n := v.Aux
-		if !(is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned()) {
-			break
-		}
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeUInt32())
-		v0.AuxInt = off + 4
-		v0.Aux = n
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeUInt32())
-		v1.AuxInt = off
-		v1.Aux = n
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Arg {n} [off])
-	// cond: is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned()
-	// result: (Int64Make     (Arg <config.fe.TypeInt32()> {n} [off])     (Arg <config.fe.TypeUInt32()> {n} [off+4]))
-	for {
-		off := v.AuxInt
-		n := v.Aux
-		if !(is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned()) {
-			break
-		}
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeInt32())
-		v0.AuxInt = off
-		v0.Aux = n
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeUInt32())
-		v1.AuxInt = off + 4
-		v1.Aux = n
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Arg {n} [off])
-	// cond: is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned()
-	// result: (Int64Make     (Arg <config.fe.TypeUInt32()> {n} [off])     (Arg <config.fe.TypeUInt32()> {n} [off+4]))
-	for {
-		off := v.AuxInt
-		n := v.Aux
-		if !(is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned()) {
-			break
-		}
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeUInt32())
-		v0.AuxInt = off
-		v0.Aux = n
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeUInt32())
-		v1.AuxInt = off + 4
-		v1.Aux = n
-		v.AddArg(v1)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpBswap64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Bswap64 x)
-	// cond:
-	// result: (Int64Make 		(Bswap32 <config.fe.TypeUInt32()> (Int64Lo x)) 		(Bswap32 <config.fe.TypeUInt32()> (Int64Hi x)))
-	for {
-		x := v.Args[0]
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpBswap32, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpBswap32, config.fe.TypeUInt32())
-		v3 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v3.AddArg(x)
-		v2.AddArg(v3)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValuedec64_OpCom64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com64 x)
-	// cond:
-	// result: (Int64Make 		(Com32 <config.fe.TypeUInt32()> (Int64Hi x)) 		(Com32 <config.fe.TypeUInt32()> (Int64Lo x)))
-	for {
-		x := v.Args[0]
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpCom32, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpCom32, config.fe.TypeUInt32())
-		v3 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v3.AddArg(x)
-		v2.AddArg(v3)
-		v.AddArg(v2)
-		return true
-	}
-}
-func rewriteValuedec64_OpConst64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Const64 <t> [c])
-	// cond: t.IsSigned()
-	// result: (Int64Make (Const32 <config.fe.TypeInt32()> [c>>32]) (Const32 <config.fe.TypeUInt32()> [int64(int32(c))]))
-	for {
-		t := v.Type
-		c := v.AuxInt
-		if !(t.IsSigned()) {
-			break
-		}
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpConst32, config.fe.TypeInt32())
-		v0.AuxInt = c >> 32
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
-		v1.AuxInt = int64(int32(c))
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Const64 <t> [c])
-	// cond: !t.IsSigned()
-	// result: (Int64Make (Const32 <config.fe.TypeUInt32()> [c>>32]) (Const32 <config.fe.TypeUInt32()> [int64(int32(c))]))
-	for {
-		t := v.Type
-		c := v.AuxInt
-		if !(!t.IsSigned()) {
-			break
-		}
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
-		v0.AuxInt = c >> 32
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
-		v1.AuxInt = int64(int32(c))
-		v.AddArg(v1)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpCtz64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Ctz64 x)
-	// cond:
-	// result: (Int64Make 		(Const32 <config.fe.TypeUInt32()> [0]) 		(Add32 <config.fe.TypeUInt32()> 			(Ctz32 <config.fe.TypeUInt32()> (Int64Lo x)) 			(And32 <config.fe.TypeUInt32()> 				(Com32 <config.fe.TypeUInt32()> (Zeromask (Int64Lo x))) 				(Ctz32 <config.fe.TypeUInt32()> (Int64Hi x)))))
-	for {
-		x := v.Args[0]
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpAdd32, config.fe.TypeUInt32())
-		v2 := b.NewValue0(v.Line, OpCtz32, config.fe.TypeUInt32())
-		v3 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v3.AddArg(x)
-		v2.AddArg(v3)
-		v1.AddArg(v2)
-		v4 := b.NewValue0(v.Line, OpAnd32, config.fe.TypeUInt32())
-		v5 := b.NewValue0(v.Line, OpCom32, config.fe.TypeUInt32())
-		v6 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
-		v7 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v7.AddArg(x)
-		v6.AddArg(v7)
-		v5.AddArg(v6)
-		v4.AddArg(v5)
-		v8 := b.NewValue0(v.Line, OpCtz32, config.fe.TypeUInt32())
-		v9 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v9.AddArg(x)
-		v8.AddArg(v9)
-		v4.AddArg(v8)
-		v1.AddArg(v4)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValuedec64_OpEq64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq64 x y)
-	// cond:
-	// result: (AndB 		(Eq32 (Int64Hi x) (Int64Hi y)) 		(Eq32 (Int64Lo x) (Int64Lo y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpAndB)
-		v0 := b.NewValue0(v.Line, OpEq32, config.fe.TypeBool())
-		v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpEq32, config.fe.TypeBool())
-		v4 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v4.AddArg(x)
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v5.AddArg(y)
-		v3.AddArg(v5)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValuedec64_OpGeq64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq64 x y)
-	// cond:
-	// result: (OrB 		(Greater32 (Int64Hi x) (Int64Hi y)) 		(AndB 			(Eq32 (Int64Hi x) (Int64Hi y)) 			(Geq32U (Int64Lo x) (Int64Lo y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpOrB)
-		v0 := b.NewValue0(v.Line, OpGreater32, config.fe.TypeBool())
-		v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpAndB, config.fe.TypeBool())
-		v4 := b.NewValue0(v.Line, OpEq32, config.fe.TypeBool())
-		v5 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v5.AddArg(x)
-		v4.AddArg(v5)
-		v6 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v6.AddArg(y)
-		v4.AddArg(v6)
-		v3.AddArg(v4)
-		v7 := b.NewValue0(v.Line, OpGeq32U, config.fe.TypeBool())
-		v8 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v8.AddArg(x)
-		v7.AddArg(v8)
-		v9 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v9.AddArg(y)
-		v7.AddArg(v9)
-		v3.AddArg(v7)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValuedec64_OpGeq64U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq64U x y)
-	// cond:
-	// result: (OrB 		(Greater32U (Int64Hi x) (Int64Hi y)) 		(AndB 			(Eq32 (Int64Hi x) (Int64Hi y)) 			(Geq32U (Int64Lo x) (Int64Lo y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpOrB)
-		v0 := b.NewValue0(v.Line, OpGreater32U, config.fe.TypeBool())
-		v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpAndB, config.fe.TypeBool())
-		v4 := b.NewValue0(v.Line, OpEq32, config.fe.TypeBool())
-		v5 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v5.AddArg(x)
-		v4.AddArg(v5)
-		v6 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v6.AddArg(y)
-		v4.AddArg(v6)
-		v3.AddArg(v4)
-		v7 := b.NewValue0(v.Line, OpGeq32U, config.fe.TypeBool())
-		v8 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v8.AddArg(x)
-		v7.AddArg(v8)
-		v9 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v9.AddArg(y)
-		v7.AddArg(v9)
-		v3.AddArg(v7)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValuedec64_OpGreater64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater64 x y)
-	// cond:
-	// result: (OrB 		(Greater32 (Int64Hi x) (Int64Hi y)) 		(AndB 			(Eq32 (Int64Hi x) (Int64Hi y)) 			(Greater32U (Int64Lo x) (Int64Lo y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpOrB)
-		v0 := b.NewValue0(v.Line, OpGreater32, config.fe.TypeBool())
-		v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpAndB, config.fe.TypeBool())
-		v4 := b.NewValue0(v.Line, OpEq32, config.fe.TypeBool())
-		v5 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v5.AddArg(x)
-		v4.AddArg(v5)
-		v6 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v6.AddArg(y)
-		v4.AddArg(v6)
-		v3.AddArg(v4)
-		v7 := b.NewValue0(v.Line, OpGreater32U, config.fe.TypeBool())
-		v8 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v8.AddArg(x)
-		v7.AddArg(v8)
-		v9 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v9.AddArg(y)
-		v7.AddArg(v9)
-		v3.AddArg(v7)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValuedec64_OpGreater64U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater64U x y)
-	// cond:
-	// result: (OrB 		(Greater32U (Int64Hi x) (Int64Hi y)) 		(AndB 			(Eq32 (Int64Hi x) (Int64Hi y)) 			(Greater32U (Int64Lo x) (Int64Lo y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpOrB)
-		v0 := b.NewValue0(v.Line, OpGreater32U, config.fe.TypeBool())
-		v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpAndB, config.fe.TypeBool())
-		v4 := b.NewValue0(v.Line, OpEq32, config.fe.TypeBool())
-		v5 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v5.AddArg(x)
-		v4.AddArg(v5)
-		v6 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v6.AddArg(y)
-		v4.AddArg(v6)
-		v3.AddArg(v4)
-		v7 := b.NewValue0(v.Line, OpGreater32U, config.fe.TypeBool())
-		v8 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v8.AddArg(x)
-		v7.AddArg(v8)
-		v9 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v9.AddArg(y)
-		v7.AddArg(v9)
-		v3.AddArg(v7)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValuedec64_OpInt64Hi(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Int64Hi (Int64Make hi _))
-	// cond:
-	// result: hi
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpInt64Make {
-			break
-		}
-		hi := v_0.Args[0]
-		v.reset(OpCopy)
-		v.Type = hi.Type
-		v.AddArg(hi)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpInt64Lo(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Int64Lo (Int64Make _ lo))
-	// cond:
-	// result: lo
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpInt64Make {
-			break
-		}
-		lo := v_0.Args[1]
-		v.reset(OpCopy)
-		v.Type = lo.Type
-		v.AddArg(lo)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpLeq64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq64 x y)
-	// cond:
-	// result: (OrB 		(Less32 (Int64Hi x) (Int64Hi y)) 		(AndB 			(Eq32 (Int64Hi x) (Int64Hi y)) 			(Leq32U (Int64Lo x) (Int64Lo y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpOrB)
-		v0 := b.NewValue0(v.Line, OpLess32, config.fe.TypeBool())
-		v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpAndB, config.fe.TypeBool())
-		v4 := b.NewValue0(v.Line, OpEq32, config.fe.TypeBool())
-		v5 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v5.AddArg(x)
-		v4.AddArg(v5)
-		v6 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v6.AddArg(y)
-		v4.AddArg(v6)
-		v3.AddArg(v4)
-		v7 := b.NewValue0(v.Line, OpLeq32U, config.fe.TypeBool())
-		v8 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v8.AddArg(x)
-		v7.AddArg(v8)
-		v9 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v9.AddArg(y)
-		v7.AddArg(v9)
-		v3.AddArg(v7)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValuedec64_OpLeq64U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq64U x y)
-	// cond:
-	// result: (OrB 		(Less32U (Int64Hi x) (Int64Hi y)) 		(AndB 			(Eq32 (Int64Hi x) (Int64Hi y)) 			(Leq32U (Int64Lo x) (Int64Lo y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpOrB)
-		v0 := b.NewValue0(v.Line, OpLess32U, config.fe.TypeBool())
-		v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpAndB, config.fe.TypeBool())
-		v4 := b.NewValue0(v.Line, OpEq32, config.fe.TypeBool())
-		v5 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v5.AddArg(x)
-		v4.AddArg(v5)
-		v6 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v6.AddArg(y)
-		v4.AddArg(v6)
-		v3.AddArg(v4)
-		v7 := b.NewValue0(v.Line, OpLeq32U, config.fe.TypeBool())
-		v8 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v8.AddArg(x)
-		v7.AddArg(v8)
-		v9 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v9.AddArg(y)
-		v7.AddArg(v9)
-		v3.AddArg(v7)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValuedec64_OpLess64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less64 x y)
-	// cond:
-	// result: (OrB 		(Less32 (Int64Hi x) (Int64Hi y)) 		(AndB 			(Eq32 (Int64Hi x) (Int64Hi y)) 			(Less32U (Int64Lo x) (Int64Lo y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpOrB)
-		v0 := b.NewValue0(v.Line, OpLess32, config.fe.TypeBool())
-		v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpAndB, config.fe.TypeBool())
-		v4 := b.NewValue0(v.Line, OpEq32, config.fe.TypeBool())
-		v5 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v5.AddArg(x)
-		v4.AddArg(v5)
-		v6 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v6.AddArg(y)
-		v4.AddArg(v6)
-		v3.AddArg(v4)
-		v7 := b.NewValue0(v.Line, OpLess32U, config.fe.TypeBool())
-		v8 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v8.AddArg(x)
-		v7.AddArg(v8)
-		v9 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v9.AddArg(y)
-		v7.AddArg(v9)
-		v3.AddArg(v7)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValuedec64_OpLess64U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less64U x y)
-	// cond:
-	// result: (OrB 		(Less32U (Int64Hi x) (Int64Hi y)) 		(AndB 			(Eq32 (Int64Hi x) (Int64Hi y)) 			(Less32U (Int64Lo x) (Int64Lo y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpOrB)
-		v0 := b.NewValue0(v.Line, OpLess32U, config.fe.TypeBool())
-		v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpAndB, config.fe.TypeBool())
-		v4 := b.NewValue0(v.Line, OpEq32, config.fe.TypeBool())
-		v5 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v5.AddArg(x)
-		v4.AddArg(v5)
-		v6 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v6.AddArg(y)
-		v4.AddArg(v6)
-		v3.AddArg(v4)
-		v7 := b.NewValue0(v.Line, OpLess32U, config.fe.TypeBool())
-		v8 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v8.AddArg(x)
-		v7.AddArg(v8)
-		v9 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v9.AddArg(y)
-		v7.AddArg(v9)
-		v3.AddArg(v7)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValuedec64_OpLoad(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Load <t> ptr mem)
-	// cond: is64BitInt(t) && !config.BigEndian && t.IsSigned()
-	// result: (Int64Make 		(Load <config.fe.TypeInt32()> (OffPtr <config.fe.TypeInt32().PtrTo()> [4] ptr) mem) 		(Load <config.fe.TypeUInt32()> ptr mem))
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is64BitInt(t) && !config.BigEndian && t.IsSigned()) {
-			break
-		}
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpLoad, config.fe.TypeInt32())
-		v1 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeInt32().PtrTo())
-		v1.AuxInt = 4
-		v1.AddArg(ptr)
-		v0.AddArg(v1)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpLoad, config.fe.TypeUInt32())
-		v2.AddArg(ptr)
-		v2.AddArg(mem)
-		v.AddArg(v2)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: is64BitInt(t) && !config.BigEndian && !t.IsSigned()
-	// result: (Int64Make 		(Load <config.fe.TypeUInt32()> (OffPtr <config.fe.TypeUInt32().PtrTo()> [4] ptr) mem) 		(Load <config.fe.TypeUInt32()> ptr mem))
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is64BitInt(t) && !config.BigEndian && !t.IsSigned()) {
-			break
-		}
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpLoad, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeUInt32().PtrTo())
-		v1.AuxInt = 4
-		v1.AddArg(ptr)
-		v0.AddArg(v1)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpLoad, config.fe.TypeUInt32())
-		v2.AddArg(ptr)
-		v2.AddArg(mem)
-		v.AddArg(v2)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: is64BitInt(t) && config.BigEndian && t.IsSigned()
-	// result: (Int64Make 		(Load <config.fe.TypeInt32()> ptr mem) 		(Load <config.fe.TypeUInt32()> (OffPtr <config.fe.TypeUInt32().PtrTo()> [4] ptr) mem))
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is64BitInt(t) && config.BigEndian && t.IsSigned()) {
-			break
-		}
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpLoad, config.fe.TypeInt32())
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpLoad, config.fe.TypeUInt32())
-		v2 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeUInt32().PtrTo())
-		v2.AuxInt = 4
-		v2.AddArg(ptr)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: is64BitInt(t) && config.BigEndian && !t.IsSigned()
-	// result: (Int64Make 		(Load <config.fe.TypeUInt32()> ptr mem) 		(Load <config.fe.TypeUInt32()> (OffPtr <config.fe.TypeUInt32().PtrTo()> [4] ptr) mem))
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(is64BitInt(t) && config.BigEndian && !t.IsSigned()) {
-			break
-		}
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpLoad, config.fe.TypeUInt32())
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpLoad, config.fe.TypeUInt32())
-		v2 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeUInt32().PtrTo())
-		v2.AuxInt = 4
-		v2.AddArg(ptr)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpLrot64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lrot64 (Int64Make hi lo) [c])
-	// cond: c <= 32
-	// result: (Int64Make 		(Or32 <config.fe.TypeUInt32()> 			(Lsh32x32 <config.fe.TypeUInt32()> hi (Const32 <config.fe.TypeUInt32()> [c])) 			(Rsh32Ux32 <config.fe.TypeUInt32()> lo (Const32 <config.fe.TypeUInt32()> [32-c]))) 		(Or32 <config.fe.TypeUInt32()> 			(Lsh32x32 <config.fe.TypeUInt32()> lo (Const32 <config.fe.TypeUInt32()> [c])) 			(Rsh32Ux32 <config.fe.TypeUInt32()> hi (Const32 <config.fe.TypeUInt32()> [32-c]))))
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpInt64Make {
-			break
-		}
-		hi := v_0.Args[0]
-		lo := v_0.Args[1]
-		if !(c <= 32) {
-			break
-		}
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpLsh32x32, config.fe.TypeUInt32())
-		v1.AddArg(hi)
-		v2 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
-		v2.AuxInt = c
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v3 := b.NewValue0(v.Line, OpRsh32Ux32, config.fe.TypeUInt32())
-		v3.AddArg(lo)
-		v4 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
-		v4.AuxInt = 32 - c
-		v3.AddArg(v4)
-		v0.AddArg(v3)
-		v.AddArg(v0)
-		v5 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v6 := b.NewValue0(v.Line, OpLsh32x32, config.fe.TypeUInt32())
-		v6.AddArg(lo)
-		v7 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
-		v7.AuxInt = c
-		v6.AddArg(v7)
-		v5.AddArg(v6)
-		v8 := b.NewValue0(v.Line, OpRsh32Ux32, config.fe.TypeUInt32())
-		v8.AddArg(hi)
-		v9 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
-		v9.AuxInt = 32 - c
-		v8.AddArg(v9)
-		v5.AddArg(v8)
-		v.AddArg(v5)
-		return true
-	}
-	// match: (Lrot64 (Int64Make hi lo) [c])
-	// cond: c > 32
-	// result: (Lrot64 (Int64Make lo hi) [c-32])
-	for {
-		c := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpInt64Make {
-			break
-		}
-		hi := v_0.Args[0]
-		lo := v_0.Args[1]
-		if !(c > 32) {
-			break
-		}
-		v.reset(OpLrot64)
-		v.AuxInt = c - 32
-		v0 := b.NewValue0(v.Line, OpInt64Make, config.fe.TypeUInt64())
-		v0.AddArg(lo)
-		v0.AddArg(hi)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpLsh16x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x64 _ (Int64Make (Const32 [c]) _))
-	// cond: c != 0
-	// result: (Const32 [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst32 {
-			break
-		}
-		c := v_1_0.AuxInt
-		if !(c != 0) {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Lsh16x64 x (Int64Make (Const32 [0]) lo))
-	// cond:
-	// result: (Lsh16x32 x lo)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst32 {
-			break
-		}
-		if v_1_0.AuxInt != 0 {
-			break
-		}
-		lo := v_1.Args[1]
-		v.reset(OpLsh16x32)
-		v.AddArg(x)
-		v.AddArg(lo)
-		return true
-	}
-	// match: (Lsh16x64 x (Int64Make hi lo))
-	// cond: hi.Op != OpConst32
-	// result: (Lsh16x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		hi := v_1.Args[0]
-		lo := v_1.Args[1]
-		if !(hi.Op != OpConst32) {
-			break
-		}
-		v.reset(OpLsh16x32)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
-		v1.AddArg(hi)
-		v0.AddArg(v1)
-		v0.AddArg(lo)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpLsh32x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x64 _ (Int64Make (Const32 [c]) _))
-	// cond: c != 0
-	// result: (Const32 [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst32 {
-			break
-		}
-		c := v_1_0.AuxInt
-		if !(c != 0) {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Lsh32x64 x (Int64Make (Const32 [0]) lo))
-	// cond:
-	// result: (Lsh32x32 x lo)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst32 {
-			break
-		}
-		if v_1_0.AuxInt != 0 {
-			break
-		}
-		lo := v_1.Args[1]
-		v.reset(OpLsh32x32)
-		v.AddArg(x)
-		v.AddArg(lo)
-		return true
-	}
-	// match: (Lsh32x64 x (Int64Make hi lo))
-	// cond: hi.Op != OpConst32
-	// result: (Lsh32x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		hi := v_1.Args[0]
-		lo := v_1.Args[1]
-		if !(hi.Op != OpConst32) {
-			break
-		}
-		v.reset(OpLsh32x32)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
-		v1.AddArg(hi)
-		v0.AddArg(v1)
-		v0.AddArg(lo)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpLsh64x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh64x16 (Int64Make hi lo) s)
-	// cond:
-	// result: (Int64Make 		(Or32 <config.fe.TypeUInt32()> 			(Or32 <config.fe.TypeUInt32()> 				(Lsh32x16 <config.fe.TypeUInt32()> hi s) 				(Rsh32Ux16 <config.fe.TypeUInt32()> 					lo 					(Sub16 <config.fe.TypeUInt16()> (Const16 <config.fe.TypeUInt16()> [32]) s))) 			(Lsh32x16 <config.fe.TypeUInt32()> 				lo 				(Sub16 <config.fe.TypeUInt16()> s (Const16 <config.fe.TypeUInt16()> [32])))) 		(Lsh32x16 <config.fe.TypeUInt32()> lo s))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpInt64Make {
-			break
-		}
-		hi := v_0.Args[0]
-		lo := v_0.Args[1]
-		s := v.Args[1]
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v2 := b.NewValue0(v.Line, OpLsh32x16, config.fe.TypeUInt32())
-		v2.AddArg(hi)
-		v2.AddArg(s)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpRsh32Ux16, config.fe.TypeUInt32())
-		v3.AddArg(lo)
-		v4 := b.NewValue0(v.Line, OpSub16, config.fe.TypeUInt16())
-		v5 := b.NewValue0(v.Line, OpConst16, config.fe.TypeUInt16())
-		v5.AuxInt = 32
-		v4.AddArg(v5)
-		v4.AddArg(s)
-		v3.AddArg(v4)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v6 := b.NewValue0(v.Line, OpLsh32x16, config.fe.TypeUInt32())
-		v6.AddArg(lo)
-		v7 := b.NewValue0(v.Line, OpSub16, config.fe.TypeUInt16())
-		v7.AddArg(s)
-		v8 := b.NewValue0(v.Line, OpConst16, config.fe.TypeUInt16())
-		v8.AuxInt = 32
-		v7.AddArg(v8)
-		v6.AddArg(v7)
-		v0.AddArg(v6)
-		v.AddArg(v0)
-		v9 := b.NewValue0(v.Line, OpLsh32x16, config.fe.TypeUInt32())
-		v9.AddArg(lo)
-		v9.AddArg(s)
-		v.AddArg(v9)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpLsh64x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh64x32 (Int64Make hi lo) s)
-	// cond:
-	// result: (Int64Make 		(Or32 <config.fe.TypeUInt32()> 			(Or32 <config.fe.TypeUInt32()> 				(Lsh32x32 <config.fe.TypeUInt32()> hi s) 				(Rsh32Ux32 <config.fe.TypeUInt32()> 					lo 					(Sub32 <config.fe.TypeUInt32()> (Const32 <config.fe.TypeUInt32()> [32]) s))) 			(Lsh32x32 <config.fe.TypeUInt32()> 				lo 				(Sub32 <config.fe.TypeUInt32()> s (Const32 <config.fe.TypeUInt32()> [32])))) 		(Lsh32x32 <config.fe.TypeUInt32()> lo s))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpInt64Make {
-			break
-		}
-		hi := v_0.Args[0]
-		lo := v_0.Args[1]
-		s := v.Args[1]
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v2 := b.NewValue0(v.Line, OpLsh32x32, config.fe.TypeUInt32())
-		v2.AddArg(hi)
-		v2.AddArg(s)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpRsh32Ux32, config.fe.TypeUInt32())
-		v3.AddArg(lo)
-		v4 := b.NewValue0(v.Line, OpSub32, config.fe.TypeUInt32())
-		v5 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
-		v5.AuxInt = 32
-		v4.AddArg(v5)
-		v4.AddArg(s)
-		v3.AddArg(v4)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v6 := b.NewValue0(v.Line, OpLsh32x32, config.fe.TypeUInt32())
-		v6.AddArg(lo)
-		v7 := b.NewValue0(v.Line, OpSub32, config.fe.TypeUInt32())
-		v7.AddArg(s)
-		v8 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
-		v8.AuxInt = 32
-		v7.AddArg(v8)
-		v6.AddArg(v7)
-		v0.AddArg(v6)
-		v.AddArg(v0)
-		v9 := b.NewValue0(v.Line, OpLsh32x32, config.fe.TypeUInt32())
-		v9.AddArg(lo)
-		v9.AddArg(s)
-		v.AddArg(v9)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpLsh64x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh64x64 _ (Int64Make (Const32 [c]) _))
-	// cond: c != 0
-	// result: (Const64 [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst32 {
-			break
-		}
-		c := v_1_0.AuxInt
-		if !(c != 0) {
-			break
-		}
-		v.reset(OpConst64)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Lsh64x64 x (Int64Make (Const32 [0]) lo))
-	// cond:
-	// result: (Lsh64x32 x lo)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst32 {
-			break
-		}
-		if v_1_0.AuxInt != 0 {
-			break
-		}
-		lo := v_1.Args[1]
-		v.reset(OpLsh64x32)
-		v.AddArg(x)
-		v.AddArg(lo)
-		return true
-	}
-	// match: (Lsh64x64 x (Int64Make hi lo))
-	// cond: hi.Op != OpConst32
-	// result: (Lsh64x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		hi := v_1.Args[0]
-		lo := v_1.Args[1]
-		if !(hi.Op != OpConst32) {
-			break
-		}
-		v.reset(OpLsh64x32)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
-		v1.AddArg(hi)
-		v0.AddArg(v1)
-		v0.AddArg(lo)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpLsh64x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh64x8 (Int64Make hi lo) s)
-	// cond:
-	// result: (Int64Make 		(Or32 <config.fe.TypeUInt32()> 			(Or32 <config.fe.TypeUInt32()> 				(Lsh32x8 <config.fe.TypeUInt32()> hi s) 				(Rsh32Ux8 <config.fe.TypeUInt32()> 					lo 					(Sub8 <config.fe.TypeUInt8()> (Const8 <config.fe.TypeUInt8()> [32]) s))) 			(Lsh32x8 <config.fe.TypeUInt32()> 				lo 				(Sub8 <config.fe.TypeUInt8()> s (Const8 <config.fe.TypeUInt8()> [32])))) 		(Lsh32x8 <config.fe.TypeUInt32()> lo s))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpInt64Make {
-			break
-		}
-		hi := v_0.Args[0]
-		lo := v_0.Args[1]
-		s := v.Args[1]
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v2 := b.NewValue0(v.Line, OpLsh32x8, config.fe.TypeUInt32())
-		v2.AddArg(hi)
-		v2.AddArg(s)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpRsh32Ux8, config.fe.TypeUInt32())
-		v3.AddArg(lo)
-		v4 := b.NewValue0(v.Line, OpSub8, config.fe.TypeUInt8())
-		v5 := b.NewValue0(v.Line, OpConst8, config.fe.TypeUInt8())
-		v5.AuxInt = 32
-		v4.AddArg(v5)
-		v4.AddArg(s)
-		v3.AddArg(v4)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v6 := b.NewValue0(v.Line, OpLsh32x8, config.fe.TypeUInt32())
-		v6.AddArg(lo)
-		v7 := b.NewValue0(v.Line, OpSub8, config.fe.TypeUInt8())
-		v7.AddArg(s)
-		v8 := b.NewValue0(v.Line, OpConst8, config.fe.TypeUInt8())
-		v8.AuxInt = 32
-		v7.AddArg(v8)
-		v6.AddArg(v7)
-		v0.AddArg(v6)
-		v.AddArg(v0)
-		v9 := b.NewValue0(v.Line, OpLsh32x8, config.fe.TypeUInt32())
-		v9.AddArg(lo)
-		v9.AddArg(s)
-		v.AddArg(v9)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpLsh8x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x64 _ (Int64Make (Const32 [c]) _))
-	// cond: c != 0
-	// result: (Const32 [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst32 {
-			break
-		}
-		c := v_1_0.AuxInt
-		if !(c != 0) {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Lsh8x64 x (Int64Make (Const32 [0]) lo))
-	// cond:
-	// result: (Lsh8x32 x lo)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst32 {
-			break
-		}
-		if v_1_0.AuxInt != 0 {
-			break
-		}
-		lo := v_1.Args[1]
-		v.reset(OpLsh8x32)
-		v.AddArg(x)
-		v.AddArg(lo)
-		return true
-	}
-	// match: (Lsh8x64 x (Int64Make hi lo))
-	// cond: hi.Op != OpConst32
-	// result: (Lsh8x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		hi := v_1.Args[0]
-		lo := v_1.Args[1]
-		if !(hi.Op != OpConst32) {
-			break
-		}
-		v.reset(OpLsh8x32)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
-		v1.AddArg(hi)
-		v0.AddArg(v1)
-		v0.AddArg(lo)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpMul64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul64 x y)
-	// cond:
-	// result: (Int64Make 		(Add32 <config.fe.TypeUInt32()> 			(Mul32 <config.fe.TypeUInt32()> (Int64Lo x) (Int64Hi y)) 			(Add32 <config.fe.TypeUInt32()> 				(Mul32 <config.fe.TypeUInt32()> (Int64Hi x) (Int64Lo y)) 				(Select0 <config.fe.TypeUInt32()> (Mul32uhilo (Int64Lo x) (Int64Lo y))))) 		(Select1 <config.fe.TypeUInt32()> (Mul32uhilo (Int64Lo x) (Int64Lo y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpAdd32, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpMul32, config.fe.TypeUInt32())
-		v2 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v2.AddArg(x)
-		v1.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v3.AddArg(y)
-		v1.AddArg(v3)
-		v0.AddArg(v1)
-		v4 := b.NewValue0(v.Line, OpAdd32, config.fe.TypeUInt32())
-		v5 := b.NewValue0(v.Line, OpMul32, config.fe.TypeUInt32())
-		v6 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v6.AddArg(x)
-		v5.AddArg(v6)
-		v7 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v7.AddArg(y)
-		v5.AddArg(v7)
-		v4.AddArg(v5)
-		v8 := b.NewValue0(v.Line, OpSelect0, config.fe.TypeUInt32())
-		v9 := b.NewValue0(v.Line, OpMul32uhilo, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
-		v10 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v10.AddArg(x)
-		v9.AddArg(v10)
-		v11 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v11.AddArg(y)
-		v9.AddArg(v11)
-		v8.AddArg(v9)
-		v4.AddArg(v8)
-		v0.AddArg(v4)
-		v.AddArg(v0)
-		v12 := b.NewValue0(v.Line, OpSelect1, config.fe.TypeUInt32())
-		v13 := b.NewValue0(v.Line, OpMul32uhilo, MakeTuple(config.fe.TypeUInt32(), config.fe.TypeUInt32()))
-		v14 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v14.AddArg(x)
-		v13.AddArg(v14)
-		v15 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v15.AddArg(y)
-		v13.AddArg(v15)
-		v12.AddArg(v13)
-		v.AddArg(v12)
-		return true
-	}
-}
-func rewriteValuedec64_OpNeg64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg64 <t> x)
-	// cond:
-	// result: (Sub64 (Const64 <t> [0]) x)
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v.reset(OpSub64)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuedec64_OpNeq64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq64 x y)
-	// cond:
-	// result: (OrB 		(Neq32 (Int64Hi x) (Int64Hi y)) 		(Neq32 (Int64Lo x) (Int64Lo y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpOrB)
-		v0 := b.NewValue0(v.Line, OpNeq32, config.fe.TypeBool())
-		v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpNeq32, config.fe.TypeBool())
-		v4 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v4.AddArg(x)
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v5.AddArg(y)
-		v3.AddArg(v5)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValuedec64_OpOr64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or64 x y)
-	// cond:
-	// result: (Int64Make 		(Or32 <config.fe.TypeUInt32()> (Int64Hi x) (Int64Hi y)) 		(Or32 <config.fe.TypeUInt32()> (Int64Lo x) (Int64Lo y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v4 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v4.AddArg(x)
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v5.AddArg(y)
-		v3.AddArg(v5)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValuedec64_OpRsh16Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux64 _ (Int64Make (Const32 [c]) _))
-	// cond: c != 0
-	// result: (Const32 [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst32 {
-			break
-		}
-		c := v_1_0.AuxInt
-		if !(c != 0) {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Rsh16Ux64 x (Int64Make (Const32 [0]) lo))
-	// cond:
-	// result: (Rsh16Ux32 x lo)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst32 {
-			break
-		}
-		if v_1_0.AuxInt != 0 {
-			break
-		}
-		lo := v_1.Args[1]
-		v.reset(OpRsh16Ux32)
-		v.AddArg(x)
-		v.AddArg(lo)
-		return true
-	}
-	// match: (Rsh16Ux64 x (Int64Make hi lo))
-	// cond: hi.Op != OpConst32
-	// result: (Rsh16Ux32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		hi := v_1.Args[0]
-		lo := v_1.Args[1]
-		if !(hi.Op != OpConst32) {
-			break
-		}
-		v.reset(OpRsh16Ux32)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
-		v1.AddArg(hi)
-		v0.AddArg(v1)
-		v0.AddArg(lo)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpRsh16x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x64 x (Int64Make (Const32 [c]) _))
-	// cond: c != 0
-	// result: (Signmask (SignExt16to32 x))
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst32 {
-			break
-		}
-		c := v_1_0.AuxInt
-		if !(c != 0) {
-			break
-		}
-		v.reset(OpSignmask)
-		v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh16x64 x (Int64Make (Const32 [0]) lo))
-	// cond:
-	// result: (Rsh16x32 x lo)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst32 {
-			break
-		}
-		if v_1_0.AuxInt != 0 {
-			break
-		}
-		lo := v_1.Args[1]
-		v.reset(OpRsh16x32)
-		v.AddArg(x)
-		v.AddArg(lo)
-		return true
-	}
-	// match: (Rsh16x64 x (Int64Make hi lo))
-	// cond: hi.Op != OpConst32
-	// result: (Rsh16x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		hi := v_1.Args[0]
-		lo := v_1.Args[1]
-		if !(hi.Op != OpConst32) {
-			break
-		}
-		v.reset(OpRsh16x32)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
-		v1.AddArg(hi)
-		v0.AddArg(v1)
-		v0.AddArg(lo)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpRsh32Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux64 _ (Int64Make (Const32 [c]) _))
-	// cond: c != 0
-	// result: (Const32 [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst32 {
-			break
-		}
-		c := v_1_0.AuxInt
-		if !(c != 0) {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Rsh32Ux64 x (Int64Make (Const32 [0]) lo))
-	// cond:
-	// result: (Rsh32Ux32 x lo)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst32 {
-			break
-		}
-		if v_1_0.AuxInt != 0 {
-			break
-		}
-		lo := v_1.Args[1]
-		v.reset(OpRsh32Ux32)
-		v.AddArg(x)
-		v.AddArg(lo)
-		return true
-	}
-	// match: (Rsh32Ux64 x (Int64Make hi lo))
-	// cond: hi.Op != OpConst32
-	// result: (Rsh32Ux32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		hi := v_1.Args[0]
-		lo := v_1.Args[1]
-		if !(hi.Op != OpConst32) {
-			break
-		}
-		v.reset(OpRsh32Ux32)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
-		v1.AddArg(hi)
-		v0.AddArg(v1)
-		v0.AddArg(lo)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpRsh32x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x64 x (Int64Make (Const32 [c]) _))
-	// cond: c != 0
-	// result: (Signmask x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst32 {
-			break
-		}
-		c := v_1_0.AuxInt
-		if !(c != 0) {
-			break
-		}
-		v.reset(OpSignmask)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh32x64 x (Int64Make (Const32 [0]) lo))
-	// cond:
-	// result: (Rsh32x32 x lo)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst32 {
-			break
-		}
-		if v_1_0.AuxInt != 0 {
-			break
-		}
-		lo := v_1.Args[1]
-		v.reset(OpRsh32x32)
-		v.AddArg(x)
-		v.AddArg(lo)
-		return true
-	}
-	// match: (Rsh32x64 x (Int64Make hi lo))
-	// cond: hi.Op != OpConst32
-	// result: (Rsh32x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		hi := v_1.Args[0]
-		lo := v_1.Args[1]
-		if !(hi.Op != OpConst32) {
-			break
-		}
-		v.reset(OpRsh32x32)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
-		v1.AddArg(hi)
-		v0.AddArg(v1)
-		v0.AddArg(lo)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpRsh64Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64Ux16 (Int64Make hi lo) s)
-	// cond:
-	// result: (Int64Make 		(Rsh32Ux16 <config.fe.TypeUInt32()> hi s) 		(Or32 <config.fe.TypeUInt32()> 			(Or32 <config.fe.TypeUInt32()> 				(Rsh32Ux16 <config.fe.TypeUInt32()> lo s) 				(Lsh32x16 <config.fe.TypeUInt32()> 					hi 					(Sub16 <config.fe.TypeUInt16()> (Const16 <config.fe.TypeUInt16()> [32]) s))) 			(Rsh32Ux16 <config.fe.TypeUInt32()> 				hi 				(Sub16 <config.fe.TypeUInt16()> s (Const16 <config.fe.TypeUInt16()> [32])))))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpInt64Make {
-			break
-		}
-		hi := v_0.Args[0]
-		lo := v_0.Args[1]
-		s := v.Args[1]
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpRsh32Ux16, config.fe.TypeUInt32())
-		v0.AddArg(hi)
-		v0.AddArg(s)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v2 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v3 := b.NewValue0(v.Line, OpRsh32Ux16, config.fe.TypeUInt32())
-		v3.AddArg(lo)
-		v3.AddArg(s)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpLsh32x16, config.fe.TypeUInt32())
-		v4.AddArg(hi)
-		v5 := b.NewValue0(v.Line, OpSub16, config.fe.TypeUInt16())
-		v6 := b.NewValue0(v.Line, OpConst16, config.fe.TypeUInt16())
-		v6.AuxInt = 32
-		v5.AddArg(v6)
-		v5.AddArg(s)
-		v4.AddArg(v5)
-		v2.AddArg(v4)
-		v1.AddArg(v2)
-		v7 := b.NewValue0(v.Line, OpRsh32Ux16, config.fe.TypeUInt32())
-		v7.AddArg(hi)
-		v8 := b.NewValue0(v.Line, OpSub16, config.fe.TypeUInt16())
-		v8.AddArg(s)
-		v9 := b.NewValue0(v.Line, OpConst16, config.fe.TypeUInt16())
-		v9.AuxInt = 32
-		v8.AddArg(v9)
-		v7.AddArg(v8)
-		v1.AddArg(v7)
-		v.AddArg(v1)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpRsh64Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64Ux32 (Int64Make hi lo) s)
-	// cond:
-	// result: (Int64Make 		(Rsh32Ux32 <config.fe.TypeUInt32()> hi s) 		(Or32 <config.fe.TypeUInt32()> 			(Or32 <config.fe.TypeUInt32()> 				(Rsh32Ux32 <config.fe.TypeUInt32()> lo s) 				(Lsh32x32 <config.fe.TypeUInt32()> 					hi 					(Sub32 <config.fe.TypeUInt32()> (Const32 <config.fe.TypeUInt32()> [32]) s))) 			(Rsh32Ux32 <config.fe.TypeUInt32()> 				hi 				(Sub32 <config.fe.TypeUInt32()> s (Const32 <config.fe.TypeUInt32()> [32])))))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpInt64Make {
-			break
-		}
-		hi := v_0.Args[0]
-		lo := v_0.Args[1]
-		s := v.Args[1]
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpRsh32Ux32, config.fe.TypeUInt32())
-		v0.AddArg(hi)
-		v0.AddArg(s)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v2 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v3 := b.NewValue0(v.Line, OpRsh32Ux32, config.fe.TypeUInt32())
-		v3.AddArg(lo)
-		v3.AddArg(s)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpLsh32x32, config.fe.TypeUInt32())
-		v4.AddArg(hi)
-		v5 := b.NewValue0(v.Line, OpSub32, config.fe.TypeUInt32())
-		v6 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
-		v6.AuxInt = 32
-		v5.AddArg(v6)
-		v5.AddArg(s)
-		v4.AddArg(v5)
-		v2.AddArg(v4)
-		v1.AddArg(v2)
-		v7 := b.NewValue0(v.Line, OpRsh32Ux32, config.fe.TypeUInt32())
-		v7.AddArg(hi)
-		v8 := b.NewValue0(v.Line, OpSub32, config.fe.TypeUInt32())
-		v8.AddArg(s)
-		v9 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
-		v9.AuxInt = 32
-		v8.AddArg(v9)
-		v7.AddArg(v8)
-		v1.AddArg(v7)
-		v.AddArg(v1)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpRsh64Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64Ux64 _ (Int64Make (Const32 [c]) _))
-	// cond: c != 0
-	// result: (Const64 [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst32 {
-			break
-		}
-		c := v_1_0.AuxInt
-		if !(c != 0) {
-			break
-		}
-		v.reset(OpConst64)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Rsh64Ux64 x (Int64Make (Const32 [0]) lo))
-	// cond:
-	// result: (Rsh64Ux32 x lo)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst32 {
-			break
-		}
-		if v_1_0.AuxInt != 0 {
-			break
-		}
-		lo := v_1.Args[1]
-		v.reset(OpRsh64Ux32)
-		v.AddArg(x)
-		v.AddArg(lo)
-		return true
-	}
-	// match: (Rsh64Ux64 x (Int64Make hi lo))
-	// cond: hi.Op != OpConst32
-	// result: (Rsh64Ux32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		hi := v_1.Args[0]
-		lo := v_1.Args[1]
-		if !(hi.Op != OpConst32) {
-			break
-		}
-		v.reset(OpRsh64Ux32)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
-		v1.AddArg(hi)
-		v0.AddArg(v1)
-		v0.AddArg(lo)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpRsh64Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64Ux8 (Int64Make hi lo) s)
-	// cond:
-	// result: (Int64Make 		(Rsh32Ux8 <config.fe.TypeUInt32()> hi s) 		(Or32 <config.fe.TypeUInt32()> 			(Or32 <config.fe.TypeUInt32()> 				(Rsh32Ux8 <config.fe.TypeUInt32()> lo s) 				(Lsh32x8 <config.fe.TypeUInt32()> 					hi 					(Sub8 <config.fe.TypeUInt8()> (Const8 <config.fe.TypeUInt8()> [32]) s))) 			(Rsh32Ux8 <config.fe.TypeUInt32()> 				hi 				(Sub8 <config.fe.TypeUInt8()> s (Const8 <config.fe.TypeUInt8()> [32])))))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpInt64Make {
-			break
-		}
-		hi := v_0.Args[0]
-		lo := v_0.Args[1]
-		s := v.Args[1]
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpRsh32Ux8, config.fe.TypeUInt32())
-		v0.AddArg(hi)
-		v0.AddArg(s)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v2 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v3 := b.NewValue0(v.Line, OpRsh32Ux8, config.fe.TypeUInt32())
-		v3.AddArg(lo)
-		v3.AddArg(s)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpLsh32x8, config.fe.TypeUInt32())
-		v4.AddArg(hi)
-		v5 := b.NewValue0(v.Line, OpSub8, config.fe.TypeUInt8())
-		v6 := b.NewValue0(v.Line, OpConst8, config.fe.TypeUInt8())
-		v6.AuxInt = 32
-		v5.AddArg(v6)
-		v5.AddArg(s)
-		v4.AddArg(v5)
-		v2.AddArg(v4)
-		v1.AddArg(v2)
-		v7 := b.NewValue0(v.Line, OpRsh32Ux8, config.fe.TypeUInt32())
-		v7.AddArg(hi)
-		v8 := b.NewValue0(v.Line, OpSub8, config.fe.TypeUInt8())
-		v8.AddArg(s)
-		v9 := b.NewValue0(v.Line, OpConst8, config.fe.TypeUInt8())
-		v9.AuxInt = 32
-		v8.AddArg(v9)
-		v7.AddArg(v8)
-		v1.AddArg(v7)
-		v.AddArg(v1)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpRsh64x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64x16 (Int64Make hi lo) s)
-	// cond:
-	// result: (Int64Make 		(Rsh32x16 <config.fe.TypeUInt32()> hi s) 		(Or32 <config.fe.TypeUInt32()> 			(Or32 <config.fe.TypeUInt32()> 				(Rsh32Ux16 <config.fe.TypeUInt32()> lo s) 				(Lsh32x16 <config.fe.TypeUInt32()> 					hi 					(Sub16 <config.fe.TypeUInt16()> (Const16 <config.fe.TypeUInt16()> [32]) s))) 			(And32 <config.fe.TypeUInt32()> 				(Rsh32x16 <config.fe.TypeUInt32()> 					hi 					(Sub16 <config.fe.TypeUInt16()> s (Const16 <config.fe.TypeUInt16()> [32]))) 				(Zeromask 					(ZeroExt16to32 						(Rsh16Ux32 <config.fe.TypeUInt16()> s (Const32 <config.fe.TypeUInt32()> [5])))))))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpInt64Make {
-			break
-		}
-		hi := v_0.Args[0]
-		lo := v_0.Args[1]
-		s := v.Args[1]
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpRsh32x16, config.fe.TypeUInt32())
-		v0.AddArg(hi)
-		v0.AddArg(s)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v2 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v3 := b.NewValue0(v.Line, OpRsh32Ux16, config.fe.TypeUInt32())
-		v3.AddArg(lo)
-		v3.AddArg(s)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpLsh32x16, config.fe.TypeUInt32())
-		v4.AddArg(hi)
-		v5 := b.NewValue0(v.Line, OpSub16, config.fe.TypeUInt16())
-		v6 := b.NewValue0(v.Line, OpConst16, config.fe.TypeUInt16())
-		v6.AuxInt = 32
-		v5.AddArg(v6)
-		v5.AddArg(s)
-		v4.AddArg(v5)
-		v2.AddArg(v4)
-		v1.AddArg(v2)
-		v7 := b.NewValue0(v.Line, OpAnd32, config.fe.TypeUInt32())
-		v8 := b.NewValue0(v.Line, OpRsh32x16, config.fe.TypeUInt32())
-		v8.AddArg(hi)
-		v9 := b.NewValue0(v.Line, OpSub16, config.fe.TypeUInt16())
-		v9.AddArg(s)
-		v10 := b.NewValue0(v.Line, OpConst16, config.fe.TypeUInt16())
-		v10.AuxInt = 32
-		v9.AddArg(v10)
-		v8.AddArg(v9)
-		v7.AddArg(v8)
-		v11 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
-		v12 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v13 := b.NewValue0(v.Line, OpRsh16Ux32, config.fe.TypeUInt16())
-		v13.AddArg(s)
-		v14 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
-		v14.AuxInt = 5
-		v13.AddArg(v14)
-		v12.AddArg(v13)
-		v11.AddArg(v12)
-		v7.AddArg(v11)
-		v1.AddArg(v7)
-		v.AddArg(v1)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpRsh64x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64x32 (Int64Make hi lo) s)
-	// cond:
-	// result: (Int64Make 		(Rsh32x32 <config.fe.TypeUInt32()> hi s) 		(Or32 <config.fe.TypeUInt32()> 			(Or32 <config.fe.TypeUInt32()> 				(Rsh32Ux32 <config.fe.TypeUInt32()> lo s) 				(Lsh32x32 <config.fe.TypeUInt32()> 					hi 					(Sub32 <config.fe.TypeUInt32()> (Const32 <config.fe.TypeUInt32()> [32]) s))) 			(And32 <config.fe.TypeUInt32()> 				(Rsh32x32 <config.fe.TypeUInt32()> 					hi 					(Sub32 <config.fe.TypeUInt32()> s (Const32 <config.fe.TypeUInt32()> [32]))) 				(Zeromask 					(Rsh32Ux32 <config.fe.TypeUInt32()> s (Const32 <config.fe.TypeUInt32()> [5]))))))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpInt64Make {
-			break
-		}
-		hi := v_0.Args[0]
-		lo := v_0.Args[1]
-		s := v.Args[1]
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpRsh32x32, config.fe.TypeUInt32())
-		v0.AddArg(hi)
-		v0.AddArg(s)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v2 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v3 := b.NewValue0(v.Line, OpRsh32Ux32, config.fe.TypeUInt32())
-		v3.AddArg(lo)
-		v3.AddArg(s)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpLsh32x32, config.fe.TypeUInt32())
-		v4.AddArg(hi)
-		v5 := b.NewValue0(v.Line, OpSub32, config.fe.TypeUInt32())
-		v6 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
-		v6.AuxInt = 32
-		v5.AddArg(v6)
-		v5.AddArg(s)
-		v4.AddArg(v5)
-		v2.AddArg(v4)
-		v1.AddArg(v2)
-		v7 := b.NewValue0(v.Line, OpAnd32, config.fe.TypeUInt32())
-		v8 := b.NewValue0(v.Line, OpRsh32x32, config.fe.TypeUInt32())
-		v8.AddArg(hi)
-		v9 := b.NewValue0(v.Line, OpSub32, config.fe.TypeUInt32())
-		v9.AddArg(s)
-		v10 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
-		v10.AuxInt = 32
-		v9.AddArg(v10)
-		v8.AddArg(v9)
-		v7.AddArg(v8)
-		v11 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
-		v12 := b.NewValue0(v.Line, OpRsh32Ux32, config.fe.TypeUInt32())
-		v12.AddArg(s)
-		v13 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
-		v13.AuxInt = 5
-		v12.AddArg(v13)
-		v11.AddArg(v12)
-		v7.AddArg(v11)
-		v1.AddArg(v7)
-		v.AddArg(v1)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpRsh64x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64x64 x (Int64Make (Const32 [c]) _))
-	// cond: c != 0
-	// result: (Int64Make (Signmask (Int64Hi x)) (Signmask (Int64Hi x)))
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst32 {
-			break
-		}
-		c := v_1_0.AuxInt
-		if !(c != 0) {
-			break
-		}
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpSignmask, config.fe.TypeInt32())
-		v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpSignmask, config.fe.TypeInt32())
-		v3 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v3.AddArg(x)
-		v2.AddArg(v3)
-		v.AddArg(v2)
-		return true
-	}
-	// match: (Rsh64x64 x (Int64Make (Const32 [0]) lo))
-	// cond:
-	// result: (Rsh64x32 x lo)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst32 {
-			break
-		}
-		if v_1_0.AuxInt != 0 {
-			break
-		}
-		lo := v_1.Args[1]
-		v.reset(OpRsh64x32)
-		v.AddArg(x)
-		v.AddArg(lo)
-		return true
-	}
-	// match: (Rsh64x64 x (Int64Make hi lo))
-	// cond: hi.Op != OpConst32
-	// result: (Rsh64x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		hi := v_1.Args[0]
-		lo := v_1.Args[1]
-		if !(hi.Op != OpConst32) {
-			break
-		}
-		v.reset(OpRsh64x32)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
-		v1.AddArg(hi)
-		v0.AddArg(v1)
-		v0.AddArg(lo)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpRsh64x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64x8 (Int64Make hi lo) s)
-	// cond:
-	// result: (Int64Make 		(Rsh32x8 <config.fe.TypeUInt32()> hi s) 		(Or32 <config.fe.TypeUInt32()> 			(Or32 <config.fe.TypeUInt32()> 				(Rsh32Ux8 <config.fe.TypeUInt32()> lo s) 				(Lsh32x8 <config.fe.TypeUInt32()> 					hi 					(Sub8 <config.fe.TypeUInt8()> (Const8 <config.fe.TypeUInt8()> [32]) s))) 			(And32 <config.fe.TypeUInt32()> 				(Rsh32x8 <config.fe.TypeUInt32()> 					hi 					(Sub8 <config.fe.TypeUInt8()> s (Const8 <config.fe.TypeUInt8()> [32]))) 				(Zeromask 					(ZeroExt8to32 						(Rsh8Ux32 <config.fe.TypeUInt8()> s (Const32 <config.fe.TypeUInt32()> [5])))))))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpInt64Make {
-			break
-		}
-		hi := v_0.Args[0]
-		lo := v_0.Args[1]
-		s := v.Args[1]
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpRsh32x8, config.fe.TypeUInt32())
-		v0.AddArg(hi)
-		v0.AddArg(s)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v2 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v3 := b.NewValue0(v.Line, OpRsh32Ux8, config.fe.TypeUInt32())
-		v3.AddArg(lo)
-		v3.AddArg(s)
-		v2.AddArg(v3)
-		v4 := b.NewValue0(v.Line, OpLsh32x8, config.fe.TypeUInt32())
-		v4.AddArg(hi)
-		v5 := b.NewValue0(v.Line, OpSub8, config.fe.TypeUInt8())
-		v6 := b.NewValue0(v.Line, OpConst8, config.fe.TypeUInt8())
-		v6.AuxInt = 32
-		v5.AddArg(v6)
-		v5.AddArg(s)
-		v4.AddArg(v5)
-		v2.AddArg(v4)
-		v1.AddArg(v2)
-		v7 := b.NewValue0(v.Line, OpAnd32, config.fe.TypeUInt32())
-		v8 := b.NewValue0(v.Line, OpRsh32x8, config.fe.TypeUInt32())
-		v8.AddArg(hi)
-		v9 := b.NewValue0(v.Line, OpSub8, config.fe.TypeUInt8())
-		v9.AddArg(s)
-		v10 := b.NewValue0(v.Line, OpConst8, config.fe.TypeUInt8())
-		v10.AuxInt = 32
-		v9.AddArg(v10)
-		v8.AddArg(v9)
-		v7.AddArg(v8)
-		v11 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
-		v12 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v13 := b.NewValue0(v.Line, OpRsh8Ux32, config.fe.TypeUInt8())
-		v13.AddArg(s)
-		v14 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
-		v14.AuxInt = 5
-		v13.AddArg(v14)
-		v12.AddArg(v13)
-		v11.AddArg(v12)
-		v7.AddArg(v11)
-		v1.AddArg(v7)
-		v.AddArg(v1)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpRsh8Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux64 _ (Int64Make (Const32 [c]) _))
-	// cond: c != 0
-	// result: (Const32 [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst32 {
-			break
-		}
-		c := v_1_0.AuxInt
-		if !(c != 0) {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Rsh8Ux64 x (Int64Make (Const32 [0]) lo))
-	// cond:
-	// result: (Rsh8Ux32 x lo)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst32 {
-			break
-		}
-		if v_1_0.AuxInt != 0 {
-			break
-		}
-		lo := v_1.Args[1]
-		v.reset(OpRsh8Ux32)
-		v.AddArg(x)
-		v.AddArg(lo)
-		return true
-	}
-	// match: (Rsh8Ux64 x (Int64Make hi lo))
-	// cond: hi.Op != OpConst32
-	// result: (Rsh8Ux32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		hi := v_1.Args[0]
-		lo := v_1.Args[1]
-		if !(hi.Op != OpConst32) {
-			break
-		}
-		v.reset(OpRsh8Ux32)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
-		v1.AddArg(hi)
-		v0.AddArg(v1)
-		v0.AddArg(lo)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpRsh8x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x64 x (Int64Make (Const32 [c]) _))
-	// cond: c != 0
-	// result: (Signmask (SignExt8to32 x))
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst32 {
-			break
-		}
-		c := v_1_0.AuxInt
-		if !(c != 0) {
-			break
-		}
-		v.reset(OpSignmask)
-		v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh8x64 x (Int64Make (Const32 [0]) lo))
-	// cond:
-	// result: (Rsh8x32 x lo)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst32 {
-			break
-		}
-		if v_1_0.AuxInt != 0 {
-			break
-		}
-		lo := v_1.Args[1]
-		v.reset(OpRsh8x32)
-		v.AddArg(x)
-		v.AddArg(lo)
-		return true
-	}
-	// match: (Rsh8x64 x (Int64Make hi lo))
-	// cond: hi.Op != OpConst32
-	// result: (Rsh8x32 x (Or32 <config.fe.TypeUInt32()> (Zeromask hi) lo))
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		hi := v_1.Args[0]
-		lo := v_1.Args[1]
-		if !(hi.Op != OpConst32) {
-			break
-		}
-		v.reset(OpRsh8x32)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpOr32, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpZeromask, config.fe.TypeUInt32())
-		v1.AddArg(hi)
-		v0.AddArg(v1)
-		v0.AddArg(lo)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpSignExt16to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt16to64 x)
-	// cond:
-	// result: (SignExt32to64 (SignExt16to32 x))
-	for {
-		x := v.Args[0]
-		v.reset(OpSignExt32to64)
-		v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuedec64_OpSignExt32to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt32to64 x)
-	// cond:
-	// result: (Int64Make (Signmask x) x)
-	for {
-		x := v.Args[0]
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpSignmask, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuedec64_OpSignExt8to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt8to64 x)
-	// cond:
-	// result: (SignExt32to64 (SignExt8to32 x))
-	for {
-		x := v.Args[0]
-		v.reset(OpSignExt32to64)
-		v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuedec64_OpStore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Store [8] dst (Int64Make hi lo) mem)
-	// cond: !config.BigEndian
-	// result: (Store [4] 		(OffPtr <hi.Type.PtrTo()> [4] dst) 		hi 		(Store [4] dst lo mem))
-	for {
-		if v.AuxInt != 8 {
-			break
-		}
-		dst := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		hi := v_1.Args[0]
-		lo := v_1.Args[1]
-		mem := v.Args[2]
-		if !(!config.BigEndian) {
-			break
-		}
-		v.reset(OpStore)
-		v.AuxInt = 4
-		v0 := b.NewValue0(v.Line, OpOffPtr, hi.Type.PtrTo())
-		v0.AuxInt = 4
-		v0.AddArg(dst)
-		v.AddArg(v0)
-		v.AddArg(hi)
-		v1 := b.NewValue0(v.Line, OpStore, TypeMem)
-		v1.AuxInt = 4
-		v1.AddArg(dst)
-		v1.AddArg(lo)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Store [8] dst (Int64Make hi lo) mem)
-	// cond: config.BigEndian
-	// result: (Store [4] 		(OffPtr <lo.Type.PtrTo()> [4] dst) 		lo 		(Store [4] dst hi mem))
-	for {
-		if v.AuxInt != 8 {
-			break
-		}
-		dst := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpInt64Make {
-			break
-		}
-		hi := v_1.Args[0]
-		lo := v_1.Args[1]
-		mem := v.Args[2]
-		if !(config.BigEndian) {
-			break
-		}
-		v.reset(OpStore)
-		v.AuxInt = 4
-		v0 := b.NewValue0(v.Line, OpOffPtr, lo.Type.PtrTo())
-		v0.AuxInt = 4
-		v0.AddArg(dst)
-		v.AddArg(v0)
-		v.AddArg(lo)
-		v1 := b.NewValue0(v.Line, OpStore, TypeMem)
-		v1.AuxInt = 4
-		v1.AddArg(dst)
-		v1.AddArg(hi)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpSub64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub64 x y)
-	// cond:
-	// result: (Int64Make 		(Sub32withcarry <config.fe.TypeInt32()> 			(Int64Hi x) 			(Int64Hi y) 			(Select1 <TypeFlags> (Sub32carry (Int64Lo x) (Int64Lo y)))) 		(Select0 <config.fe.TypeUInt32()> (Sub32carry (Int64Lo x) (Int64Lo y))))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpSub32withcarry, config.fe.TypeInt32())
-		v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpSelect1, TypeFlags)
-		v4 := b.NewValue0(v.Line, OpSub32carry, MakeTuple(config.fe.TypeUInt32(), TypeFlags))
-		v5 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v5.AddArg(x)
-		v4.AddArg(v5)
-		v6 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v6.AddArg(y)
-		v4.AddArg(v6)
-		v3.AddArg(v4)
-		v0.AddArg(v3)
-		v.AddArg(v0)
-		v7 := b.NewValue0(v.Line, OpSelect0, config.fe.TypeUInt32())
-		v8 := b.NewValue0(v.Line, OpSub32carry, MakeTuple(config.fe.TypeUInt32(), TypeFlags))
-		v9 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v9.AddArg(x)
-		v8.AddArg(v9)
-		v10 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v10.AddArg(y)
-		v8.AddArg(v10)
-		v7.AddArg(v8)
-		v.AddArg(v7)
-		return true
-	}
-}
-func rewriteValuedec64_OpTrunc64to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc64to16 (Int64Make _ lo))
-	// cond:
-	// result: (Trunc32to16 lo)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpInt64Make {
-			break
-		}
-		lo := v_0.Args[1]
-		v.reset(OpTrunc32to16)
-		v.AddArg(lo)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpTrunc64to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc64to32 (Int64Make _ lo))
-	// cond:
-	// result: lo
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpInt64Make {
-			break
-		}
-		lo := v_0.Args[1]
-		v.reset(OpCopy)
-		v.Type = lo.Type
-		v.AddArg(lo)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpTrunc64to8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc64to8 (Int64Make _ lo))
-	// cond:
-	// result: (Trunc32to8 lo)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpInt64Make {
-			break
-		}
-		lo := v_0.Args[1]
-		v.reset(OpTrunc32to8)
-		v.AddArg(lo)
-		return true
-	}
-	return false
-}
-func rewriteValuedec64_OpXor64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor64 x y)
-	// cond:
-	// result: (Int64Make 		(Xor32 <config.fe.TypeUInt32()> (Int64Hi x) (Int64Hi y)) 		(Xor32 <config.fe.TypeUInt32()> (Int64Lo x) (Int64Lo y)))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpXor32, config.fe.TypeUInt32())
-		v1 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpInt64Hi, config.fe.TypeUInt32())
-		v2.AddArg(y)
-		v0.AddArg(v2)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpXor32, config.fe.TypeUInt32())
-		v4 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v4.AddArg(x)
-		v3.AddArg(v4)
-		v5 := b.NewValue0(v.Line, OpInt64Lo, config.fe.TypeUInt32())
-		v5.AddArg(y)
-		v3.AddArg(v5)
-		v.AddArg(v3)
-		return true
-	}
-}
-func rewriteValuedec64_OpZeroExt16to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt16to64 x)
-	// cond:
-	// result: (ZeroExt32to64 (ZeroExt16to32 x))
-	for {
-		x := v.Args[0]
-		v.reset(OpZeroExt32to64)
-		v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteValuedec64_OpZeroExt32to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt32to64 x)
-	// cond:
-	// result: (Int64Make (Const32 <config.fe.TypeUInt32()> [0]) x)
-	for {
-		x := v.Args[0]
-		v.reset(OpInt64Make)
-		v0 := b.NewValue0(v.Line, OpConst32, config.fe.TypeUInt32())
-		v0.AuxInt = 0
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-}
-func rewriteValuedec64_OpZeroExt8to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt8to64 x)
-	// cond:
-	// result: (ZeroExt32to64 (ZeroExt8to32 x))
-	for {
-		x := v.Args[0]
-		v.reset(OpZeroExt32to64)
-		v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		return true
-	}
-}
-func rewriteBlockdec64(b *Block, config *Config) bool {
-	switch b.Kind {
-	}
-	return false
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewritegeneric.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewritegeneric.go
deleted file mode 100644
index 636d9db..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/rewritegeneric.go
+++ /dev/null
@@ -1,12453 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/rewritegeneric.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/rewritegeneric.go:1
-// autogenerated from gen/generic.rules: do not edit!
-// generated with: cd gen; go run *.go
-
-package ssa
-
-import "math"
-
-var _ = math.MinInt8 // in case not otherwise used
-func rewriteValuegeneric(v *Value, config *Config) bool {
-	switch v.Op {
-	case OpAdd16:
-		return rewriteValuegeneric_OpAdd16(v, config)
-	case OpAdd32:
-		return rewriteValuegeneric_OpAdd32(v, config)
-	case OpAdd32F:
-		return rewriteValuegeneric_OpAdd32F(v, config)
-	case OpAdd64:
-		return rewriteValuegeneric_OpAdd64(v, config)
-	case OpAdd64F:
-		return rewriteValuegeneric_OpAdd64F(v, config)
-	case OpAdd8:
-		return rewriteValuegeneric_OpAdd8(v, config)
-	case OpAddPtr:
-		return rewriteValuegeneric_OpAddPtr(v, config)
-	case OpAnd16:
-		return rewriteValuegeneric_OpAnd16(v, config)
-	case OpAnd32:
-		return rewriteValuegeneric_OpAnd32(v, config)
-	case OpAnd64:
-		return rewriteValuegeneric_OpAnd64(v, config)
-	case OpAnd8:
-		return rewriteValuegeneric_OpAnd8(v, config)
-	case OpArg:
-		return rewriteValuegeneric_OpArg(v, config)
-	case OpArraySelect:
-		return rewriteValuegeneric_OpArraySelect(v, config)
-	case OpCom16:
-		return rewriteValuegeneric_OpCom16(v, config)
-	case OpCom32:
-		return rewriteValuegeneric_OpCom32(v, config)
-	case OpCom64:
-		return rewriteValuegeneric_OpCom64(v, config)
-	case OpCom8:
-		return rewriteValuegeneric_OpCom8(v, config)
-	case OpConstInterface:
-		return rewriteValuegeneric_OpConstInterface(v, config)
-	case OpConstSlice:
-		return rewriteValuegeneric_OpConstSlice(v, config)
-	case OpConstString:
-		return rewriteValuegeneric_OpConstString(v, config)
-	case OpConvert:
-		return rewriteValuegeneric_OpConvert(v, config)
-	case OpCvt32Fto64F:
-		return rewriteValuegeneric_OpCvt32Fto64F(v, config)
-	case OpCvt64Fto32F:
-		return rewriteValuegeneric_OpCvt64Fto32F(v, config)
-	case OpDiv32F:
-		return rewriteValuegeneric_OpDiv32F(v, config)
-	case OpDiv64:
-		return rewriteValuegeneric_OpDiv64(v, config)
-	case OpDiv64F:
-		return rewriteValuegeneric_OpDiv64F(v, config)
-	case OpDiv64u:
-		return rewriteValuegeneric_OpDiv64u(v, config)
-	case OpEq16:
-		return rewriteValuegeneric_OpEq16(v, config)
-	case OpEq32:
-		return rewriteValuegeneric_OpEq32(v, config)
-	case OpEq64:
-		return rewriteValuegeneric_OpEq64(v, config)
-	case OpEq8:
-		return rewriteValuegeneric_OpEq8(v, config)
-	case OpEqB:
-		return rewriteValuegeneric_OpEqB(v, config)
-	case OpEqInter:
-		return rewriteValuegeneric_OpEqInter(v, config)
-	case OpEqPtr:
-		return rewriteValuegeneric_OpEqPtr(v, config)
-	case OpEqSlice:
-		return rewriteValuegeneric_OpEqSlice(v, config)
-	case OpGeq16:
-		return rewriteValuegeneric_OpGeq16(v, config)
-	case OpGeq16U:
-		return rewriteValuegeneric_OpGeq16U(v, config)
-	case OpGeq32:
-		return rewriteValuegeneric_OpGeq32(v, config)
-	case OpGeq32U:
-		return rewriteValuegeneric_OpGeq32U(v, config)
-	case OpGeq64:
-		return rewriteValuegeneric_OpGeq64(v, config)
-	case OpGeq64U:
-		return rewriteValuegeneric_OpGeq64U(v, config)
-	case OpGeq8:
-		return rewriteValuegeneric_OpGeq8(v, config)
-	case OpGeq8U:
-		return rewriteValuegeneric_OpGeq8U(v, config)
-	case OpGreater16:
-		return rewriteValuegeneric_OpGreater16(v, config)
-	case OpGreater16U:
-		return rewriteValuegeneric_OpGreater16U(v, config)
-	case OpGreater32:
-		return rewriteValuegeneric_OpGreater32(v, config)
-	case OpGreater32U:
-		return rewriteValuegeneric_OpGreater32U(v, config)
-	case OpGreater64:
-		return rewriteValuegeneric_OpGreater64(v, config)
-	case OpGreater64U:
-		return rewriteValuegeneric_OpGreater64U(v, config)
-	case OpGreater8:
-		return rewriteValuegeneric_OpGreater8(v, config)
-	case OpGreater8U:
-		return rewriteValuegeneric_OpGreater8U(v, config)
-	case OpIMake:
-		return rewriteValuegeneric_OpIMake(v, config)
-	case OpIsInBounds:
-		return rewriteValuegeneric_OpIsInBounds(v, config)
-	case OpIsSliceInBounds:
-		return rewriteValuegeneric_OpIsSliceInBounds(v, config)
-	case OpLeq16:
-		return rewriteValuegeneric_OpLeq16(v, config)
-	case OpLeq16U:
-		return rewriteValuegeneric_OpLeq16U(v, config)
-	case OpLeq32:
-		return rewriteValuegeneric_OpLeq32(v, config)
-	case OpLeq32U:
-		return rewriteValuegeneric_OpLeq32U(v, config)
-	case OpLeq64:
-		return rewriteValuegeneric_OpLeq64(v, config)
-	case OpLeq64U:
-		return rewriteValuegeneric_OpLeq64U(v, config)
-	case OpLeq8:
-		return rewriteValuegeneric_OpLeq8(v, config)
-	case OpLeq8U:
-		return rewriteValuegeneric_OpLeq8U(v, config)
-	case OpLess16:
-		return rewriteValuegeneric_OpLess16(v, config)
-	case OpLess16U:
-		return rewriteValuegeneric_OpLess16U(v, config)
-	case OpLess32:
-		return rewriteValuegeneric_OpLess32(v, config)
-	case OpLess32U:
-		return rewriteValuegeneric_OpLess32U(v, config)
-	case OpLess64:
-		return rewriteValuegeneric_OpLess64(v, config)
-	case OpLess64U:
-		return rewriteValuegeneric_OpLess64U(v, config)
-	case OpLess8:
-		return rewriteValuegeneric_OpLess8(v, config)
-	case OpLess8U:
-		return rewriteValuegeneric_OpLess8U(v, config)
-	case OpLoad:
-		return rewriteValuegeneric_OpLoad(v, config)
-	case OpLsh16x16:
-		return rewriteValuegeneric_OpLsh16x16(v, config)
-	case OpLsh16x32:
-		return rewriteValuegeneric_OpLsh16x32(v, config)
-	case OpLsh16x64:
-		return rewriteValuegeneric_OpLsh16x64(v, config)
-	case OpLsh16x8:
-		return rewriteValuegeneric_OpLsh16x8(v, config)
-	case OpLsh32x16:
-		return rewriteValuegeneric_OpLsh32x16(v, config)
-	case OpLsh32x32:
-		return rewriteValuegeneric_OpLsh32x32(v, config)
-	case OpLsh32x64:
-		return rewriteValuegeneric_OpLsh32x64(v, config)
-	case OpLsh32x8:
-		return rewriteValuegeneric_OpLsh32x8(v, config)
-	case OpLsh64x16:
-		return rewriteValuegeneric_OpLsh64x16(v, config)
-	case OpLsh64x32:
-		return rewriteValuegeneric_OpLsh64x32(v, config)
-	case OpLsh64x64:
-		return rewriteValuegeneric_OpLsh64x64(v, config)
-	case OpLsh64x8:
-		return rewriteValuegeneric_OpLsh64x8(v, config)
-	case OpLsh8x16:
-		return rewriteValuegeneric_OpLsh8x16(v, config)
-	case OpLsh8x32:
-		return rewriteValuegeneric_OpLsh8x32(v, config)
-	case OpLsh8x64:
-		return rewriteValuegeneric_OpLsh8x64(v, config)
-	case OpLsh8x8:
-		return rewriteValuegeneric_OpLsh8x8(v, config)
-	case OpMod16:
-		return rewriteValuegeneric_OpMod16(v, config)
-	case OpMod16u:
-		return rewriteValuegeneric_OpMod16u(v, config)
-	case OpMod32:
-		return rewriteValuegeneric_OpMod32(v, config)
-	case OpMod32u:
-		return rewriteValuegeneric_OpMod32u(v, config)
-	case OpMod64:
-		return rewriteValuegeneric_OpMod64(v, config)
-	case OpMod64u:
-		return rewriteValuegeneric_OpMod64u(v, config)
-	case OpMod8:
-		return rewriteValuegeneric_OpMod8(v, config)
-	case OpMod8u:
-		return rewriteValuegeneric_OpMod8u(v, config)
-	case OpMul16:
-		return rewriteValuegeneric_OpMul16(v, config)
-	case OpMul32:
-		return rewriteValuegeneric_OpMul32(v, config)
-	case OpMul32F:
-		return rewriteValuegeneric_OpMul32F(v, config)
-	case OpMul64:
-		return rewriteValuegeneric_OpMul64(v, config)
-	case OpMul64F:
-		return rewriteValuegeneric_OpMul64F(v, config)
-	case OpMul8:
-		return rewriteValuegeneric_OpMul8(v, config)
-	case OpNeg16:
-		return rewriteValuegeneric_OpNeg16(v, config)
-	case OpNeg32:
-		return rewriteValuegeneric_OpNeg32(v, config)
-	case OpNeg64:
-		return rewriteValuegeneric_OpNeg64(v, config)
-	case OpNeg8:
-		return rewriteValuegeneric_OpNeg8(v, config)
-	case OpNeq16:
-		return rewriteValuegeneric_OpNeq16(v, config)
-	case OpNeq32:
-		return rewriteValuegeneric_OpNeq32(v, config)
-	case OpNeq64:
-		return rewriteValuegeneric_OpNeq64(v, config)
-	case OpNeq8:
-		return rewriteValuegeneric_OpNeq8(v, config)
-	case OpNeqB:
-		return rewriteValuegeneric_OpNeqB(v, config)
-	case OpNeqInter:
-		return rewriteValuegeneric_OpNeqInter(v, config)
-	case OpNeqPtr:
-		return rewriteValuegeneric_OpNeqPtr(v, config)
-	case OpNeqSlice:
-		return rewriteValuegeneric_OpNeqSlice(v, config)
-	case OpNilCheck:
-		return rewriteValuegeneric_OpNilCheck(v, config)
-	case OpNot:
-		return rewriteValuegeneric_OpNot(v, config)
-	case OpOffPtr:
-		return rewriteValuegeneric_OpOffPtr(v, config)
-	case OpOr16:
-		return rewriteValuegeneric_OpOr16(v, config)
-	case OpOr32:
-		return rewriteValuegeneric_OpOr32(v, config)
-	case OpOr64:
-		return rewriteValuegeneric_OpOr64(v, config)
-	case OpOr8:
-		return rewriteValuegeneric_OpOr8(v, config)
-	case OpPhi:
-		return rewriteValuegeneric_OpPhi(v, config)
-	case OpPtrIndex:
-		return rewriteValuegeneric_OpPtrIndex(v, config)
-	case OpRsh16Ux16:
-		return rewriteValuegeneric_OpRsh16Ux16(v, config)
-	case OpRsh16Ux32:
-		return rewriteValuegeneric_OpRsh16Ux32(v, config)
-	case OpRsh16Ux64:
-		return rewriteValuegeneric_OpRsh16Ux64(v, config)
-	case OpRsh16Ux8:
-		return rewriteValuegeneric_OpRsh16Ux8(v, config)
-	case OpRsh16x16:
-		return rewriteValuegeneric_OpRsh16x16(v, config)
-	case OpRsh16x32:
-		return rewriteValuegeneric_OpRsh16x32(v, config)
-	case OpRsh16x64:
-		return rewriteValuegeneric_OpRsh16x64(v, config)
-	case OpRsh16x8:
-		return rewriteValuegeneric_OpRsh16x8(v, config)
-	case OpRsh32Ux16:
-		return rewriteValuegeneric_OpRsh32Ux16(v, config)
-	case OpRsh32Ux32:
-		return rewriteValuegeneric_OpRsh32Ux32(v, config)
-	case OpRsh32Ux64:
-		return rewriteValuegeneric_OpRsh32Ux64(v, config)
-	case OpRsh32Ux8:
-		return rewriteValuegeneric_OpRsh32Ux8(v, config)
-	case OpRsh32x16:
-		return rewriteValuegeneric_OpRsh32x16(v, config)
-	case OpRsh32x32:
-		return rewriteValuegeneric_OpRsh32x32(v, config)
-	case OpRsh32x64:
-		return rewriteValuegeneric_OpRsh32x64(v, config)
-	case OpRsh32x8:
-		return rewriteValuegeneric_OpRsh32x8(v, config)
-	case OpRsh64Ux16:
-		return rewriteValuegeneric_OpRsh64Ux16(v, config)
-	case OpRsh64Ux32:
-		return rewriteValuegeneric_OpRsh64Ux32(v, config)
-	case OpRsh64Ux64:
-		return rewriteValuegeneric_OpRsh64Ux64(v, config)
-	case OpRsh64Ux8:
-		return rewriteValuegeneric_OpRsh64Ux8(v, config)
-	case OpRsh64x16:
-		return rewriteValuegeneric_OpRsh64x16(v, config)
-	case OpRsh64x32:
-		return rewriteValuegeneric_OpRsh64x32(v, config)
-	case OpRsh64x64:
-		return rewriteValuegeneric_OpRsh64x64(v, config)
-	case OpRsh64x8:
-		return rewriteValuegeneric_OpRsh64x8(v, config)
-	case OpRsh8Ux16:
-		return rewriteValuegeneric_OpRsh8Ux16(v, config)
-	case OpRsh8Ux32:
-		return rewriteValuegeneric_OpRsh8Ux32(v, config)
-	case OpRsh8Ux64:
-		return rewriteValuegeneric_OpRsh8Ux64(v, config)
-	case OpRsh8Ux8:
-		return rewriteValuegeneric_OpRsh8Ux8(v, config)
-	case OpRsh8x16:
-		return rewriteValuegeneric_OpRsh8x16(v, config)
-	case OpRsh8x32:
-		return rewriteValuegeneric_OpRsh8x32(v, config)
-	case OpRsh8x64:
-		return rewriteValuegeneric_OpRsh8x64(v, config)
-	case OpRsh8x8:
-		return rewriteValuegeneric_OpRsh8x8(v, config)
-	case OpSignExt16to32:
-		return rewriteValuegeneric_OpSignExt16to32(v, config)
-	case OpSignExt16to64:
-		return rewriteValuegeneric_OpSignExt16to64(v, config)
-	case OpSignExt32to64:
-		return rewriteValuegeneric_OpSignExt32to64(v, config)
-	case OpSignExt8to16:
-		return rewriteValuegeneric_OpSignExt8to16(v, config)
-	case OpSignExt8to32:
-		return rewriteValuegeneric_OpSignExt8to32(v, config)
-	case OpSignExt8to64:
-		return rewriteValuegeneric_OpSignExt8to64(v, config)
-	case OpSliceCap:
-		return rewriteValuegeneric_OpSliceCap(v, config)
-	case OpSliceLen:
-		return rewriteValuegeneric_OpSliceLen(v, config)
-	case OpSlicePtr:
-		return rewriteValuegeneric_OpSlicePtr(v, config)
-	case OpSlicemask:
-		return rewriteValuegeneric_OpSlicemask(v, config)
-	case OpSqrt:
-		return rewriteValuegeneric_OpSqrt(v, config)
-	case OpStore:
-		return rewriteValuegeneric_OpStore(v, config)
-	case OpStringLen:
-		return rewriteValuegeneric_OpStringLen(v, config)
-	case OpStringPtr:
-		return rewriteValuegeneric_OpStringPtr(v, config)
-	case OpStructSelect:
-		return rewriteValuegeneric_OpStructSelect(v, config)
-	case OpSub16:
-		return rewriteValuegeneric_OpSub16(v, config)
-	case OpSub32:
-		return rewriteValuegeneric_OpSub32(v, config)
-	case OpSub32F:
-		return rewriteValuegeneric_OpSub32F(v, config)
-	case OpSub64:
-		return rewriteValuegeneric_OpSub64(v, config)
-	case OpSub64F:
-		return rewriteValuegeneric_OpSub64F(v, config)
-	case OpSub8:
-		return rewriteValuegeneric_OpSub8(v, config)
-	case OpTrunc16to8:
-		return rewriteValuegeneric_OpTrunc16to8(v, config)
-	case OpTrunc32to16:
-		return rewriteValuegeneric_OpTrunc32to16(v, config)
-	case OpTrunc32to8:
-		return rewriteValuegeneric_OpTrunc32to8(v, config)
-	case OpTrunc64to16:
-		return rewriteValuegeneric_OpTrunc64to16(v, config)
-	case OpTrunc64to32:
-		return rewriteValuegeneric_OpTrunc64to32(v, config)
-	case OpTrunc64to8:
-		return rewriteValuegeneric_OpTrunc64to8(v, config)
-	case OpXor16:
-		return rewriteValuegeneric_OpXor16(v, config)
-	case OpXor32:
-		return rewriteValuegeneric_OpXor32(v, config)
-	case OpXor64:
-		return rewriteValuegeneric_OpXor64(v, config)
-	case OpXor8:
-		return rewriteValuegeneric_OpXor8(v, config)
-	case OpZero:
-		return rewriteValuegeneric_OpZero(v, config)
-	case OpZeroExt16to32:
-		return rewriteValuegeneric_OpZeroExt16to32(v, config)
-	case OpZeroExt16to64:
-		return rewriteValuegeneric_OpZeroExt16to64(v, config)
-	case OpZeroExt32to64:
-		return rewriteValuegeneric_OpZeroExt32to64(v, config)
-	case OpZeroExt8to16:
-		return rewriteValuegeneric_OpZeroExt8to16(v, config)
-	case OpZeroExt8to32:
-		return rewriteValuegeneric_OpZeroExt8to32(v, config)
-	case OpZeroExt8to64:
-		return rewriteValuegeneric_OpZeroExt8to64(v, config)
-	}
-	return false
-}
-func rewriteValuegeneric_OpAdd16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add16  (Const16 [c])  (Const16 [d]))
-	// cond:
-	// result: (Const16 [int64(int16(c+d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConst16)
-		v.AuxInt = int64(int16(c + d))
-		return true
-	}
-	// match: (Add16 x (Const16 <t> [c]))
-	// cond: x.Op != OpConst16
-	// result: (Add16 (Const16 <t> [c]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst16) {
-			break
-		}
-		v.reset(OpAdd16)
-		v0 := b.NewValue0(v.Line, OpConst16, t)
-		v0.AuxInt = c
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Add16 (Const16 [0]) x)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpAdd32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add32  (Const32 [c])  (Const32 [d]))
-	// cond:
-	// result: (Const32 [int64(int32(c+d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConst32)
-		v.AuxInt = int64(int32(c + d))
-		return true
-	}
-	// match: (Add32 x (Const32 <t> [c]))
-	// cond: x.Op != OpConst32
-	// result: (Add32 (Const32 <t> [c]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst32) {
-			break
-		}
-		v.reset(OpAdd32)
-		v0 := b.NewValue0(v.Line, OpConst32, t)
-		v0.AuxInt = c
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Add32 (Const32 [0]) x)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpAdd32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add32F (Const32F [c]) (Const32F [d]))
-	// cond:
-	// result: (Const32F [f2i(float64(i2f32(c) + i2f32(d)))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32F {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32F {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConst32F)
-		v.AuxInt = f2i(float64(i2f32(c) + i2f32(d)))
-		return true
-	}
-	// match: (Add32F x (Const32F [0]))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32F {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Add32F (Const32F [0]) x)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32F {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpAdd64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add64  (Const64 [c])  (Const64 [d]))
-	// cond:
-	// result: (Const64 [c+d])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConst64)
-		v.AuxInt = c + d
-		return true
-	}
-	// match: (Add64 x (Const64 <t> [c]))
-	// cond: x.Op != OpConst64
-	// result: (Add64 (Const64 <t> [c]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst64) {
-			break
-		}
-		v.reset(OpAdd64)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = c
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Add64 (Const64 [0]) x)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpAdd64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add64F (Const64F [c]) (Const64F [d]))
-	// cond:
-	// result: (Const64F [f2i(i2f(c) + i2f(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64F {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64F {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConst64F)
-		v.AuxInt = f2i(i2f(c) + i2f(d))
-		return true
-	}
-	// match: (Add64F x (Const64F [0]))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64F {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Add64F (Const64F [0]) x)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64F {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpAdd8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Add8   (Const8 [c])   (Const8 [d]))
-	// cond:
-	// result: (Const8  [int64(int8(c+d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConst8)
-		v.AuxInt = int64(int8(c + d))
-		return true
-	}
-	// match: (Add8  x (Const8  <t> [c]))
-	// cond: x.Op != OpConst8
-	// result: (Add8  (Const8  <t> [c]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst8) {
-			break
-		}
-		v.reset(OpAdd8)
-		v0 := b.NewValue0(v.Line, OpConst8, t)
-		v0.AuxInt = c
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Add8  (Const8  [0]) x)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpAddPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (AddPtr <t> x (Const64 [c]))
-	// cond:
-	// result: (OffPtr <t> x [c])
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpOffPtr)
-		v.Type = t
-		v.AuxInt = c
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpAnd16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And16 x (Const16 <t> [c]))
-	// cond: x.Op != OpConst16
-	// result: (And16 (Const16 <t> [c]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst16) {
-			break
-		}
-		v.reset(OpAnd16)
-		v0 := b.NewValue0(v.Line, OpConst16, t)
-		v0.AuxInt = c
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (And16 x x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (And16 (Const16 [-1]) x)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		if v_0.AuxInt != -1 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (And16 (Const16 [0]) _)
-	// cond:
-	// result: (Const16 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst16)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (And16 x (And16 x y))
-	// cond:
-	// result: (And16 x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAnd16 {
-			break
-		}
-		if x != v_1.Args[0] {
-			break
-		}
-		y := v_1.Args[1]
-		v.reset(OpAnd16)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (And16 x (And16 y x))
-	// cond:
-	// result: (And16 x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAnd16 {
-			break
-		}
-		y := v_1.Args[0]
-		if x != v_1.Args[1] {
-			break
-		}
-		v.reset(OpAnd16)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (And16 (And16 x y) x)
-	// cond:
-	// result: (And16 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAnd16 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpAnd16)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (And16 (And16 x y) y)
-	// cond:
-	// result: (And16 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAnd16 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if y != v.Args[1] {
-			break
-		}
-		v.reset(OpAnd16)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpAnd32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And32 x (Const32 <t> [c]))
-	// cond: x.Op != OpConst32
-	// result: (And32 (Const32 <t> [c]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst32) {
-			break
-		}
-		v.reset(OpAnd32)
-		v0 := b.NewValue0(v.Line, OpConst32, t)
-		v0.AuxInt = c
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (And32 x x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (And32 (Const32 [-1]) x)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		if v_0.AuxInt != -1 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (And32 (Const32 [0]) _)
-	// cond:
-	// result: (Const32 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (And32 x (And32 x y))
-	// cond:
-	// result: (And32 x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAnd32 {
-			break
-		}
-		if x != v_1.Args[0] {
-			break
-		}
-		y := v_1.Args[1]
-		v.reset(OpAnd32)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (And32 x (And32 y x))
-	// cond:
-	// result: (And32 x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAnd32 {
-			break
-		}
-		y := v_1.Args[0]
-		if x != v_1.Args[1] {
-			break
-		}
-		v.reset(OpAnd32)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (And32 (And32 x y) x)
-	// cond:
-	// result: (And32 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAnd32 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpAnd32)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (And32 (And32 x y) y)
-	// cond:
-	// result: (And32 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAnd32 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if y != v.Args[1] {
-			break
-		}
-		v.reset(OpAnd32)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpAnd64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And64 x (Const64 <t> [c]))
-	// cond: x.Op != OpConst64
-	// result: (And64 (Const64 <t> [c]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst64) {
-			break
-		}
-		v.reset(OpAnd64)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = c
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (And64 x x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (And64 (Const64 [-1]) x)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		if v_0.AuxInt != -1 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (And64 (Const64 [0]) _)
-	// cond:
-	// result: (Const64 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst64)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (And64 x (And64 x y))
-	// cond:
-	// result: (And64 x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAnd64 {
-			break
-		}
-		if x != v_1.Args[0] {
-			break
-		}
-		y := v_1.Args[1]
-		v.reset(OpAnd64)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (And64 x (And64 y x))
-	// cond:
-	// result: (And64 x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAnd64 {
-			break
-		}
-		y := v_1.Args[0]
-		if x != v_1.Args[1] {
-			break
-		}
-		v.reset(OpAnd64)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (And64 (And64 x y) x)
-	// cond:
-	// result: (And64 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAnd64 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpAnd64)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (And64 (And64 x y) y)
-	// cond:
-	// result: (And64 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAnd64 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if y != v.Args[1] {
-			break
-		}
-		v.reset(OpAnd64)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (And64 <t> (Const64 [y]) x)
-	// cond: nlz(y) + nto(y) == 64 && nto(y) >= 32
-	// result: (Rsh64Ux64 (Lsh64x64 <t> x (Const64 <t> [nlz(y)])) (Const64 <t> [nlz(y)]))
-	for {
-		t := v.Type
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		y := v_0.AuxInt
-		x := v.Args[1]
-		if !(nlz(y)+nto(y) == 64 && nto(y) >= 32) {
-			break
-		}
-		v.reset(OpRsh64Ux64)
-		v0 := b.NewValue0(v.Line, OpLsh64x64, t)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpConst64, t)
-		v1.AuxInt = nlz(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpConst64, t)
-		v2.AuxInt = nlz(y)
-		v.AddArg(v2)
-		return true
-	}
-	// match: (And64 <t> (Const64 [y]) x)
-	// cond: nlo(y) + ntz(y) == 64 && ntz(y) >= 32
-	// result: (Lsh64x64 (Rsh64Ux64 <t> x (Const64 <t> [ntz(y)])) (Const64 <t> [ntz(y)]))
-	for {
-		t := v.Type
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		y := v_0.AuxInt
-		x := v.Args[1]
-		if !(nlo(y)+ntz(y) == 64 && ntz(y) >= 32) {
-			break
-		}
-		v.reset(OpLsh64x64)
-		v0 := b.NewValue0(v.Line, OpRsh64Ux64, t)
-		v0.AddArg(x)
-		v1 := b.NewValue0(v.Line, OpConst64, t)
-		v1.AuxInt = ntz(y)
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpConst64, t)
-		v2.AuxInt = ntz(y)
-		v.AddArg(v2)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpAnd8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (And8  x (Const8  <t> [c]))
-	// cond: x.Op != OpConst8
-	// result: (And8  (Const8  <t> [c]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst8) {
-			break
-		}
-		v.reset(OpAnd8)
-		v0 := b.NewValue0(v.Line, OpConst8, t)
-		v0.AuxInt = c
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (And8  x x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (And8  (Const8  [-1]) x)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		if v_0.AuxInt != -1 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (And8  (Const8  [0]) _)
-	// cond:
-	// result: (Const8  [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst8)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (And8  x (And8  x y))
-	// cond:
-	// result: (And8  x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAnd8 {
-			break
-		}
-		if x != v_1.Args[0] {
-			break
-		}
-		y := v_1.Args[1]
-		v.reset(OpAnd8)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (And8  x (And8  y x))
-	// cond:
-	// result: (And8  x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpAnd8 {
-			break
-		}
-		y := v_1.Args[0]
-		if x != v_1.Args[1] {
-			break
-		}
-		v.reset(OpAnd8)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (And8  (And8  x y) x)
-	// cond:
-	// result: (And8  x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAnd8 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpAnd8)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (And8  (And8  x y) y)
-	// cond:
-	// result: (And8  x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAnd8 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if y != v.Args[1] {
-			break
-		}
-		v.reset(OpAnd8)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Arg {n} [off])
-	// cond: v.Type.IsString()
-	// result: (StringMake     (Arg <config.fe.TypeBytePtr()> {n} [off])     (Arg <config.fe.TypeInt()> {n} [off+config.PtrSize]))
-	for {
-		off := v.AuxInt
-		n := v.Aux
-		if !(v.Type.IsString()) {
-			break
-		}
-		v.reset(OpStringMake)
-		v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeBytePtr())
-		v0.AuxInt = off
-		v0.Aux = n
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeInt())
-		v1.AuxInt = off + config.PtrSize
-		v1.Aux = n
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Arg {n} [off])
-	// cond: v.Type.IsSlice()
-	// result: (SliceMake     (Arg <v.Type.ElemType().PtrTo()> {n} [off])     (Arg <config.fe.TypeInt()> {n} [off+config.PtrSize])     (Arg <config.fe.TypeInt()> {n} [off+2*config.PtrSize]))
-	for {
-		off := v.AuxInt
-		n := v.Aux
-		if !(v.Type.IsSlice()) {
-			break
-		}
-		v.reset(OpSliceMake)
-		v0 := b.NewValue0(v.Line, OpArg, v.Type.ElemType().PtrTo())
-		v0.AuxInt = off
-		v0.Aux = n
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeInt())
-		v1.AuxInt = off + config.PtrSize
-		v1.Aux = n
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpArg, config.fe.TypeInt())
-		v2.AuxInt = off + 2*config.PtrSize
-		v2.Aux = n
-		v.AddArg(v2)
-		return true
-	}
-	// match: (Arg {n} [off])
-	// cond: v.Type.IsInterface()
-	// result: (IMake     (Arg <config.fe.TypeBytePtr()> {n} [off])     (Arg <config.fe.TypeBytePtr()> {n} [off+config.PtrSize]))
-	for {
-		off := v.AuxInt
-		n := v.Aux
-		if !(v.Type.IsInterface()) {
-			break
-		}
-		v.reset(OpIMake)
-		v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeBytePtr())
-		v0.AuxInt = off
-		v0.Aux = n
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeBytePtr())
-		v1.AuxInt = off + config.PtrSize
-		v1.Aux = n
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Arg {n} [off])
-	// cond: v.Type.IsComplex() && v.Type.Size() == 16
-	// result: (ComplexMake     (Arg <config.fe.TypeFloat64()> {n} [off])     (Arg <config.fe.TypeFloat64()> {n} [off+8]))
-	for {
-		off := v.AuxInt
-		n := v.Aux
-		if !(v.Type.IsComplex() && v.Type.Size() == 16) {
-			break
-		}
-		v.reset(OpComplexMake)
-		v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeFloat64())
-		v0.AuxInt = off
-		v0.Aux = n
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeFloat64())
-		v1.AuxInt = off + 8
-		v1.Aux = n
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Arg {n} [off])
-	// cond: v.Type.IsComplex() && v.Type.Size() == 8
-	// result: (ComplexMake     (Arg <config.fe.TypeFloat32()> {n} [off])     (Arg <config.fe.TypeFloat32()> {n} [off+4]))
-	for {
-		off := v.AuxInt
-		n := v.Aux
-		if !(v.Type.IsComplex() && v.Type.Size() == 8) {
-			break
-		}
-		v.reset(OpComplexMake)
-		v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeFloat32())
-		v0.AuxInt = off
-		v0.Aux = n
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeFloat32())
-		v1.AuxInt = off + 4
-		v1.Aux = n
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Arg <t>)
-	// cond: t.IsStruct() && t.NumFields() == 0 && config.fe.CanSSA(t)
-	// result: (StructMake0)
-	for {
-		t := v.Type
-		if !(t.IsStruct() && t.NumFields() == 0 && config.fe.CanSSA(t)) {
-			break
-		}
-		v.reset(OpStructMake0)
-		return true
-	}
-	// match: (Arg <t> {n} [off])
-	// cond: t.IsStruct() && t.NumFields() == 1 && config.fe.CanSSA(t)
-	// result: (StructMake1     (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)]))
-	for {
-		t := v.Type
-		off := v.AuxInt
-		n := v.Aux
-		if !(t.IsStruct() && t.NumFields() == 1 && config.fe.CanSSA(t)) {
-			break
-		}
-		v.reset(OpStructMake1)
-		v0 := b.NewValue0(v.Line, OpArg, t.FieldType(0))
-		v0.AuxInt = off + t.FieldOff(0)
-		v0.Aux = n
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Arg <t> {n} [off])
-	// cond: t.IsStruct() && t.NumFields() == 2 && config.fe.CanSSA(t)
-	// result: (StructMake2     (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)])     (Arg <t.FieldType(1)> {n} [off+t.FieldOff(1)]))
-	for {
-		t := v.Type
-		off := v.AuxInt
-		n := v.Aux
-		if !(t.IsStruct() && t.NumFields() == 2 && config.fe.CanSSA(t)) {
-			break
-		}
-		v.reset(OpStructMake2)
-		v0 := b.NewValue0(v.Line, OpArg, t.FieldType(0))
-		v0.AuxInt = off + t.FieldOff(0)
-		v0.Aux = n
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpArg, t.FieldType(1))
-		v1.AuxInt = off + t.FieldOff(1)
-		v1.Aux = n
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Arg <t> {n} [off])
-	// cond: t.IsStruct() && t.NumFields() == 3 && config.fe.CanSSA(t)
-	// result: (StructMake3     (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)])     (Arg <t.FieldType(1)> {n} [off+t.FieldOff(1)])     (Arg <t.FieldType(2)> {n} [off+t.FieldOff(2)]))
-	for {
-		t := v.Type
-		off := v.AuxInt
-		n := v.Aux
-		if !(t.IsStruct() && t.NumFields() == 3 && config.fe.CanSSA(t)) {
-			break
-		}
-		v.reset(OpStructMake3)
-		v0 := b.NewValue0(v.Line, OpArg, t.FieldType(0))
-		v0.AuxInt = off + t.FieldOff(0)
-		v0.Aux = n
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpArg, t.FieldType(1))
-		v1.AuxInt = off + t.FieldOff(1)
-		v1.Aux = n
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpArg, t.FieldType(2))
-		v2.AuxInt = off + t.FieldOff(2)
-		v2.Aux = n
-		v.AddArg(v2)
-		return true
-	}
-	// match: (Arg <t> {n} [off])
-	// cond: t.IsStruct() && t.NumFields() == 4 && config.fe.CanSSA(t)
-	// result: (StructMake4     (Arg <t.FieldType(0)> {n} [off+t.FieldOff(0)])     (Arg <t.FieldType(1)> {n} [off+t.FieldOff(1)])     (Arg <t.FieldType(2)> {n} [off+t.FieldOff(2)])     (Arg <t.FieldType(3)> {n} [off+t.FieldOff(3)]))
-	for {
-		t := v.Type
-		off := v.AuxInt
-		n := v.Aux
-		if !(t.IsStruct() && t.NumFields() == 4 && config.fe.CanSSA(t)) {
-			break
-		}
-		v.reset(OpStructMake4)
-		v0 := b.NewValue0(v.Line, OpArg, t.FieldType(0))
-		v0.AuxInt = off + t.FieldOff(0)
-		v0.Aux = n
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpArg, t.FieldType(1))
-		v1.AuxInt = off + t.FieldOff(1)
-		v1.Aux = n
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpArg, t.FieldType(2))
-		v2.AuxInt = off + t.FieldOff(2)
-		v2.Aux = n
-		v.AddArg(v2)
-		v3 := b.NewValue0(v.Line, OpArg, t.FieldType(3))
-		v3.AuxInt = off + t.FieldOff(3)
-		v3.Aux = n
-		v.AddArg(v3)
-		return true
-	}
-	// match: (Arg <t>)
-	// cond: t.IsArray() && t.NumElem() == 0
-	// result: (ArrayMake0)
-	for {
-		t := v.Type
-		if !(t.IsArray() && t.NumElem() == 0) {
-			break
-		}
-		v.reset(OpArrayMake0)
-		return true
-	}
-	// match: (Arg <t> {n} [off])
-	// cond: t.IsArray() && t.NumElem() == 1 && config.fe.CanSSA(t)
-	// result: (ArrayMake1 (Arg <t.ElemType()> {n} [off]))
-	for {
-		t := v.Type
-		off := v.AuxInt
-		n := v.Aux
-		if !(t.IsArray() && t.NumElem() == 1 && config.fe.CanSSA(t)) {
-			break
-		}
-		v.reset(OpArrayMake1)
-		v0 := b.NewValue0(v.Line, OpArg, t.ElemType())
-		v0.AuxInt = off
-		v0.Aux = n
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpArraySelect(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ArraySelect (ArrayMake1 x))
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpArrayMake1 {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (ArraySelect [0] (Load ptr mem))
-	// cond:
-	// result: (Load ptr mem)
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		v_0 := v.Args[0]
-		if v_0.Op != OpLoad {
-			break
-		}
-		ptr := v_0.Args[0]
-		mem := v_0.Args[1]
-		v.reset(OpLoad)
-		v.AddArg(ptr)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (ArraySelect [0] x:(IData _))
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		if x.Op != OpIData {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpCom16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com16 (Com16 x))
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpCom16 {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpCom32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com32 (Com32 x))
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpCom32 {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpCom64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com64 (Com64 x))
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpCom64 {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpCom8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Com8  (Com8  x))
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpCom8 {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpConstInterface(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ConstInterface)
-	// cond:
-	// result: (IMake     (ConstNil <config.fe.TypeBytePtr()>)     (ConstNil <config.fe.TypeBytePtr()>))
-	for {
-		v.reset(OpIMake)
-		v0 := b.NewValue0(v.Line, OpConstNil, config.fe.TypeBytePtr())
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpConstNil, config.fe.TypeBytePtr())
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValuegeneric_OpConstSlice(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ConstSlice)
-	// cond: config.PtrSize == 4
-	// result: (SliceMake     (ConstNil <v.Type.ElemType().PtrTo()>)     (Const32 <config.fe.TypeInt()> [0])     (Const32 <config.fe.TypeInt()> [0]))
-	for {
-		if !(config.PtrSize == 4) {
-			break
-		}
-		v.reset(OpSliceMake)
-		v0 := b.NewValue0(v.Line, OpConstNil, v.Type.ElemType().PtrTo())
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpConst32, config.fe.TypeInt())
-		v1.AuxInt = 0
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpConst32, config.fe.TypeInt())
-		v2.AuxInt = 0
-		v.AddArg(v2)
-		return true
-	}
-	// match: (ConstSlice)
-	// cond: config.PtrSize == 8
-	// result: (SliceMake     (ConstNil <v.Type.ElemType().PtrTo()>)     (Const64 <config.fe.TypeInt()> [0])     (Const64 <config.fe.TypeInt()> [0]))
-	for {
-		if !(config.PtrSize == 8) {
-			break
-		}
-		v.reset(OpSliceMake)
-		v0 := b.NewValue0(v.Line, OpConstNil, v.Type.ElemType().PtrTo())
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpConst64, config.fe.TypeInt())
-		v1.AuxInt = 0
-		v.AddArg(v1)
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeInt())
-		v2.AuxInt = 0
-		v.AddArg(v2)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpConstString(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ConstString {s})
-	// cond: config.PtrSize == 4 && s.(string) == ""
-	// result: (StringMake (ConstNil) (Const32 <config.fe.TypeInt()> [0]))
-	for {
-		s := v.Aux
-		if !(config.PtrSize == 4 && s.(string) == "") {
-			break
-		}
-		v.reset(OpStringMake)
-		v0 := b.NewValue0(v.Line, OpConstNil, config.fe.TypeBytePtr())
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpConst32, config.fe.TypeInt())
-		v1.AuxInt = 0
-		v.AddArg(v1)
-		return true
-	}
-	// match: (ConstString {s})
-	// cond: config.PtrSize == 8 && s.(string) == ""
-	// result: (StringMake (ConstNil) (Const64 <config.fe.TypeInt()> [0]))
-	for {
-		s := v.Aux
-		if !(config.PtrSize == 8 && s.(string) == "") {
-			break
-		}
-		v.reset(OpStringMake)
-		v0 := b.NewValue0(v.Line, OpConstNil, config.fe.TypeBytePtr())
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpConst64, config.fe.TypeInt())
-		v1.AuxInt = 0
-		v.AddArg(v1)
-		return true
-	}
-	// match: (ConstString {s})
-	// cond: config.PtrSize == 4 && s.(string) != ""
-	// result: (StringMake     (Addr <config.fe.TypeBytePtr()> {config.fe.StringData(s.(string))}       (SB))     (Const32 <config.fe.TypeInt()> [int64(len(s.(string)))]))
-	for {
-		s := v.Aux
-		if !(config.PtrSize == 4 && s.(string) != "") {
-			break
-		}
-		v.reset(OpStringMake)
-		v0 := b.NewValue0(v.Line, OpAddr, config.fe.TypeBytePtr())
-		v0.Aux = config.fe.StringData(s.(string))
-		v1 := b.NewValue0(v.Line, OpSB, config.fe.TypeUintptr())
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpConst32, config.fe.TypeInt())
-		v2.AuxInt = int64(len(s.(string)))
-		v.AddArg(v2)
-		return true
-	}
-	// match: (ConstString {s})
-	// cond: config.PtrSize == 8 && s.(string) != ""
-	// result: (StringMake     (Addr <config.fe.TypeBytePtr()> {config.fe.StringData(s.(string))}       (SB))     (Const64 <config.fe.TypeInt()> [int64(len(s.(string)))]))
-	for {
-		s := v.Aux
-		if !(config.PtrSize == 8 && s.(string) != "") {
-			break
-		}
-		v.reset(OpStringMake)
-		v0 := b.NewValue0(v.Line, OpAddr, config.fe.TypeBytePtr())
-		v0.Aux = config.fe.StringData(s.(string))
-		v1 := b.NewValue0(v.Line, OpSB, config.fe.TypeUintptr())
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpConst64, config.fe.TypeInt())
-		v2.AuxInt = int64(len(s.(string)))
-		v.AddArg(v2)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpConvert(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Convert (Add64 (Convert ptr mem) off) mem)
-	// cond:
-	// result: (Add64 ptr off)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAdd64 {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpConvert {
-			break
-		}
-		ptr := v_0_0.Args[0]
-		mem := v_0_0.Args[1]
-		off := v_0.Args[1]
-		if mem != v.Args[1] {
-			break
-		}
-		v.reset(OpAdd64)
-		v.AddArg(ptr)
-		v.AddArg(off)
-		return true
-	}
-	// match: (Convert (Add64 off (Convert ptr mem)) mem)
-	// cond:
-	// result: (Add64 ptr off)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAdd64 {
-			break
-		}
-		off := v_0.Args[0]
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpConvert {
-			break
-		}
-		ptr := v_0_1.Args[0]
-		mem := v_0_1.Args[1]
-		if mem != v.Args[1] {
-			break
-		}
-		v.reset(OpAdd64)
-		v.AddArg(ptr)
-		v.AddArg(off)
-		return true
-	}
-	// match: (Convert (Convert ptr mem) mem)
-	// cond:
-	// result: ptr
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConvert {
-			break
-		}
-		ptr := v_0.Args[0]
-		mem := v_0.Args[1]
-		if mem != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = ptr.Type
-		v.AddArg(ptr)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpCvt32Fto64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt32Fto64F (Const32F [c]))
-	// cond:
-	// result: (Const64F [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32F {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpConst64F)
-		v.AuxInt = c
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpCvt64Fto32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Cvt64Fto32F (Const64F [c]))
-	// cond:
-	// result: (Const32F [f2i(float64(i2f32(c)))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64F {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpConst32F)
-		v.AuxInt = f2i(float64(i2f32(c)))
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpDiv32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div32F x (Const32F [f2i(1)]))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32F {
-			break
-		}
-		if v_1.AuxInt != f2i(1) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Div32F x (Const32F [f2i(-1)]))
-	// cond:
-	// result: (Neg32F x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32F {
-			break
-		}
-		if v_1.AuxInt != f2i(-1) {
-			break
-		}
-		v.reset(OpNeg32F)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpDiv64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div64 <t> x (Const64 [c]))
-	// cond: c > 0 && smagic64ok(c) && smagic64m(c) > 0
-	// result: (Sub64 <t>     (Rsh64x64 <t>       (Hmul64 <t>         (Const64 <t> [smagic64m(c)])         x)       (Const64 <t> [smagic64s(c)]))     (Rsh64x64 <t>       x       (Const64 <t> [63])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(c > 0 && smagic64ok(c) && smagic64m(c) > 0) {
-			break
-		}
-		v.reset(OpSub64)
-		v.Type = t
-		v0 := b.NewValue0(v.Line, OpRsh64x64, t)
-		v1 := b.NewValue0(v.Line, OpHmul64, t)
-		v2 := b.NewValue0(v.Line, OpConst64, t)
-		v2.AuxInt = smagic64m(c)
-		v1.AddArg(v2)
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v3 := b.NewValue0(v.Line, OpConst64, t)
-		v3.AuxInt = smagic64s(c)
-		v0.AddArg(v3)
-		v.AddArg(v0)
-		v4 := b.NewValue0(v.Line, OpRsh64x64, t)
-		v4.AddArg(x)
-		v5 := b.NewValue0(v.Line, OpConst64, t)
-		v5.AuxInt = 63
-		v4.AddArg(v5)
-		v.AddArg(v4)
-		return true
-	}
-	// match: (Div64 <t> x (Const64 [c]))
-	// cond: c > 0 && smagic64ok(c) && smagic64m(c) < 0
-	// result: (Sub64 <t>     (Rsh64x64 <t>       (Add64 <t>         (Hmul64 <t>           (Const64 <t> [smagic64m(c)])           x)         x)       (Const64 <t> [smagic64s(c)]))     (Rsh64x64 <t>       x       (Const64 <t> [63])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(c > 0 && smagic64ok(c) && smagic64m(c) < 0) {
-			break
-		}
-		v.reset(OpSub64)
-		v.Type = t
-		v0 := b.NewValue0(v.Line, OpRsh64x64, t)
-		v1 := b.NewValue0(v.Line, OpAdd64, t)
-		v2 := b.NewValue0(v.Line, OpHmul64, t)
-		v3 := b.NewValue0(v.Line, OpConst64, t)
-		v3.AuxInt = smagic64m(c)
-		v2.AddArg(v3)
-		v2.AddArg(x)
-		v1.AddArg(v2)
-		v1.AddArg(x)
-		v0.AddArg(v1)
-		v4 := b.NewValue0(v.Line, OpConst64, t)
-		v4.AuxInt = smagic64s(c)
-		v0.AddArg(v4)
-		v.AddArg(v0)
-		v5 := b.NewValue0(v.Line, OpRsh64x64, t)
-		v5.AddArg(x)
-		v6 := b.NewValue0(v.Line, OpConst64, t)
-		v6.AuxInt = 63
-		v5.AddArg(v6)
-		v.AddArg(v5)
-		return true
-	}
-	// match: (Div64 <t> x (Const64 [c]))
-	// cond: c < 0 && smagic64ok(c) && smagic64m(c) > 0
-	// result: (Neg64 <t>     (Sub64 <t>       (Rsh64x64 <t>         (Hmul64 <t>           (Const64 <t> [smagic64m(c)])           x)         (Const64 <t> [smagic64s(c)]))       (Rsh64x64 <t>         x         (Const64 <t> [63]))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(c < 0 && smagic64ok(c) && smagic64m(c) > 0) {
-			break
-		}
-		v.reset(OpNeg64)
-		v.Type = t
-		v0 := b.NewValue0(v.Line, OpSub64, t)
-		v1 := b.NewValue0(v.Line, OpRsh64x64, t)
-		v2 := b.NewValue0(v.Line, OpHmul64, t)
-		v3 := b.NewValue0(v.Line, OpConst64, t)
-		v3.AuxInt = smagic64m(c)
-		v2.AddArg(v3)
-		v2.AddArg(x)
-		v1.AddArg(v2)
-		v4 := b.NewValue0(v.Line, OpConst64, t)
-		v4.AuxInt = smagic64s(c)
-		v1.AddArg(v4)
-		v0.AddArg(v1)
-		v5 := b.NewValue0(v.Line, OpRsh64x64, t)
-		v5.AddArg(x)
-		v6 := b.NewValue0(v.Line, OpConst64, t)
-		v6.AuxInt = 63
-		v5.AddArg(v6)
-		v0.AddArg(v5)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Div64 <t> x (Const64 [c]))
-	// cond: c < 0 && smagic64ok(c) && smagic64m(c) < 0
-	// result: (Neg64 <t>     (Sub64 <t>       (Rsh64x64 <t>         (Add64 <t>           (Hmul64 <t>             (Const64 <t> [smagic64m(c)])             x)           x)         (Const64 <t> [smagic64s(c)]))       (Rsh64x64 <t>         x         (Const64 <t> [63]))))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(c < 0 && smagic64ok(c) && smagic64m(c) < 0) {
-			break
-		}
-		v.reset(OpNeg64)
-		v.Type = t
-		v0 := b.NewValue0(v.Line, OpSub64, t)
-		v1 := b.NewValue0(v.Line, OpRsh64x64, t)
-		v2 := b.NewValue0(v.Line, OpAdd64, t)
-		v3 := b.NewValue0(v.Line, OpHmul64, t)
-		v4 := b.NewValue0(v.Line, OpConst64, t)
-		v4.AuxInt = smagic64m(c)
-		v3.AddArg(v4)
-		v3.AddArg(x)
-		v2.AddArg(v3)
-		v2.AddArg(x)
-		v1.AddArg(v2)
-		v5 := b.NewValue0(v.Line, OpConst64, t)
-		v5.AuxInt = smagic64s(c)
-		v1.AddArg(v5)
-		v0.AddArg(v1)
-		v6 := b.NewValue0(v.Line, OpRsh64x64, t)
-		v6.AddArg(x)
-		v7 := b.NewValue0(v.Line, OpConst64, t)
-		v7.AuxInt = 63
-		v6.AddArg(v7)
-		v0.AddArg(v6)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpDiv64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div64F x (Const64F [f2i(1)]))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64F {
-			break
-		}
-		if v_1.AuxInt != f2i(1) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Div64F x (Const64F [f2i(-1)]))
-	// cond:
-	// result: (Neg32F x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64F {
-			break
-		}
-		if v_1.AuxInt != f2i(-1) {
-			break
-		}
-		v.reset(OpNeg32F)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpDiv64u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Div64u <t> n (Const64 [c]))
-	// cond: isPowerOfTwo(c)
-	// result: (Rsh64Ux64 n (Const64 <t> [log2(c)]))
-	for {
-		t := v.Type
-		n := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(isPowerOfTwo(c)) {
-			break
-		}
-		v.reset(OpRsh64Ux64)
-		v.AddArg(n)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = log2(c)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Div64u <t> x (Const64 [c]))
-	// cond: umagic64ok(c) && !umagic64a(c)
-	// result: (Rsh64Ux64     (Hmul64u <t>       (Const64 <t> [umagic64m(c)])       x)     (Const64 <t> [umagic64s(c)]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(umagic64ok(c) && !umagic64a(c)) {
-			break
-		}
-		v.reset(OpRsh64Ux64)
-		v0 := b.NewValue0(v.Line, OpHmul64u, t)
-		v1 := b.NewValue0(v.Line, OpConst64, t)
-		v1.AuxInt = umagic64m(c)
-		v0.AddArg(v1)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v2 := b.NewValue0(v.Line, OpConst64, t)
-		v2.AuxInt = umagic64s(c)
-		v.AddArg(v2)
-		return true
-	}
-	// match: (Div64u <t> x (Const64 [c]))
-	// cond: umagic64ok(c) && umagic64a(c)
-	// result: (Rsh64Ux64     (Avg64u <t>       (Hmul64u <t>         x         (Const64 <t> [umagic64m(c)]))       x)     (Const64 <t> [umagic64s(c)-1]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(umagic64ok(c) && umagic64a(c)) {
-			break
-		}
-		v.reset(OpRsh64Ux64)
-		v0 := b.NewValue0(v.Line, OpAvg64u, t)
-		v1 := b.NewValue0(v.Line, OpHmul64u, t)
-		v1.AddArg(x)
-		v2 := b.NewValue0(v.Line, OpConst64, t)
-		v2.AuxInt = umagic64m(c)
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v3 := b.NewValue0(v.Line, OpConst64, t)
-		v3.AuxInt = umagic64s(c) - 1
-		v.AddArg(v3)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpEq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq16 x x)
-	// cond:
-	// result: (ConstBool [1])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpConstBool)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (Eq16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x))
-	// cond:
-	// result: (Eq16 (Const16 <t> [int64(int16(c-d))]) x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		t := v_0.Type
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpAdd16 {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst16 {
-			break
-		}
-		if v_1_0.Type != t {
-			break
-		}
-		d := v_1_0.AuxInt
-		x := v_1.Args[1]
-		v.reset(OpEq16)
-		v0 := b.NewValue0(v.Line, OpConst16, t)
-		v0.AuxInt = int64(int16(c - d))
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Eq16 x (Const16 <t> [c]))
-	// cond: x.Op != OpConst16
-	// result: (Eq16 (Const16 <t> [c]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst16) {
-			break
-		}
-		v.reset(OpEq16)
-		v0 := b.NewValue0(v.Line, OpConst16, t)
-		v0.AuxInt = c
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Eq16 (Const16 [c]) (Const16 [d]))
-	// cond:
-	// result: (ConstBool [b2i(c == d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(c == d)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpEq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq32 x x)
-	// cond:
-	// result: (ConstBool [1])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpConstBool)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (Eq32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x))
-	// cond:
-	// result: (Eq32 (Const32 <t> [int64(int32(c-d))]) x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		t := v_0.Type
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpAdd32 {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst32 {
-			break
-		}
-		if v_1_0.Type != t {
-			break
-		}
-		d := v_1_0.AuxInt
-		x := v_1.Args[1]
-		v.reset(OpEq32)
-		v0 := b.NewValue0(v.Line, OpConst32, t)
-		v0.AuxInt = int64(int32(c - d))
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Eq32 x (Const32 <t> [c]))
-	// cond: x.Op != OpConst32
-	// result: (Eq32 (Const32 <t> [c]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst32) {
-			break
-		}
-		v.reset(OpEq32)
-		v0 := b.NewValue0(v.Line, OpConst32, t)
-		v0.AuxInt = c
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Eq32 (Const32 [c]) (Const32 [d]))
-	// cond:
-	// result: (ConstBool [b2i(c == d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(c == d)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpEq64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq64 x x)
-	// cond:
-	// result: (ConstBool [1])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpConstBool)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (Eq64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x))
-	// cond:
-	// result: (Eq64 (Const64 <t> [c-d]) x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		t := v_0.Type
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpAdd64 {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst64 {
-			break
-		}
-		if v_1_0.Type != t {
-			break
-		}
-		d := v_1_0.AuxInt
-		x := v_1.Args[1]
-		v.reset(OpEq64)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = c - d
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Eq64 x (Const64 <t> [c]))
-	// cond: x.Op != OpConst64
-	// result: (Eq64 (Const64 <t> [c]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst64) {
-			break
-		}
-		v.reset(OpEq64)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = c
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Eq64 (Const64 [c]) (Const64 [d]))
-	// cond:
-	// result: (ConstBool [b2i(c == d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(c == d)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpEq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Eq8  x x)
-	// cond:
-	// result: (ConstBool [1])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpConstBool)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (Eq8  (Const8  <t> [c]) (Add8  (Const8  <t> [d]) x))
-	// cond:
-	// result: (Eq8  (Const8 <t> [int64(int8(c-d))]) x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		t := v_0.Type
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpAdd8 {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst8 {
-			break
-		}
-		if v_1_0.Type != t {
-			break
-		}
-		d := v_1_0.AuxInt
-		x := v_1.Args[1]
-		v.reset(OpEq8)
-		v0 := b.NewValue0(v.Line, OpConst8, t)
-		v0.AuxInt = int64(int8(c - d))
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Eq8  x (Const8  <t> [c]))
-	// cond: x.Op != OpConst8
-	// result: (Eq8  (Const8  <t> [c]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst8) {
-			break
-		}
-		v.reset(OpEq8)
-		v0 := b.NewValue0(v.Line, OpConst8, t)
-		v0.AuxInt = c
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Eq8  (Const8  [c]) (Const8  [d]))
-	// cond:
-	// result: (ConstBool [b2i(c == d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(c == d)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpEqB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (EqB (ConstBool [c]) (ConstBool [d]))
-	// cond:
-	// result: (ConstBool [b2i(c == d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConstBool {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConstBool {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(c == d)
-		return true
-	}
-	// match: (EqB (ConstBool [0]) x)
-	// cond:
-	// result: (Not x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConstBool {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpNot)
-		v.AddArg(x)
-		return true
-	}
-	// match: (EqB (ConstBool [1]) x)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConstBool {
-			break
-		}
-		if v_0.AuxInt != 1 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpEqInter(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (EqInter x y)
-	// cond:
-	// result: (EqPtr  (ITab x) (ITab y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpEqPtr)
-		v0 := b.NewValue0(v.Line, OpITab, config.fe.TypeBytePtr())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpITab, config.fe.TypeBytePtr())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValuegeneric_OpEqPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (EqPtr p (ConstNil))
-	// cond:
-	// result: (Not (IsNonNil p))
-	for {
-		p := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConstNil {
-			break
-		}
-		v.reset(OpNot)
-		v0 := b.NewValue0(v.Line, OpIsNonNil, config.fe.TypeBool())
-		v0.AddArg(p)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (EqPtr (ConstNil) p)
-	// cond:
-	// result: (Not (IsNonNil p))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConstNil {
-			break
-		}
-		p := v.Args[1]
-		v.reset(OpNot)
-		v0 := b.NewValue0(v.Line, OpIsNonNil, config.fe.TypeBool())
-		v0.AddArg(p)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpEqSlice(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (EqSlice x y)
-	// cond:
-	// result: (EqPtr  (SlicePtr x) (SlicePtr y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpEqPtr)
-		v0 := b.NewValue0(v.Line, OpSlicePtr, config.fe.TypeBytePtr())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpSlicePtr, config.fe.TypeBytePtr())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValuegeneric_OpGeq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq16 (Const16 [c]) (Const16 [d]))
-	// cond:
-	// result: (ConstBool [b2i(c >= d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(c >= d)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpGeq16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq16U (Const16 [c]) (Const16 [d]))
-	// cond:
-	// result: (ConstBool [b2i(uint16(c) >= uint16(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(uint16(c) >= uint16(d))
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpGeq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq32 (Const32 [c]) (Const32 [d]))
-	// cond:
-	// result: (ConstBool [b2i(c >= d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(c >= d)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpGeq32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq32U (Const32 [c]) (Const32 [d]))
-	// cond:
-	// result: (ConstBool [b2i(uint32(c) >= uint32(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(uint32(c) >= uint32(d))
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpGeq64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq64 (Const64 [c]) (Const64 [d]))
-	// cond:
-	// result: (ConstBool [b2i(c >= d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(c >= d)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpGeq64U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq64U (Const64 [c]) (Const64 [d]))
-	// cond:
-	// result: (ConstBool [b2i(uint64(c) >= uint64(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(uint64(c) >= uint64(d))
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpGeq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq8  (Const8  [c]) (Const8  [d]))
-	// cond:
-	// result: (ConstBool [b2i(c >= d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(c >= d)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpGeq8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Geq8U  (Const8  [c]) (Const8  [d]))
-	// cond:
-	// result: (ConstBool [b2i(uint8(c)  >= uint8(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(uint8(c) >= uint8(d))
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpGreater16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater16 (Const16 [c]) (Const16 [d]))
-	// cond:
-	// result: (ConstBool [b2i(c > d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(c > d)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpGreater16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater16U (Const16 [c]) (Const16 [d]))
-	// cond:
-	// result: (ConstBool [b2i(uint16(c) > uint16(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(uint16(c) > uint16(d))
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpGreater32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater32 (Const32 [c]) (Const32 [d]))
-	// cond:
-	// result: (ConstBool [b2i(c > d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(c > d)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpGreater32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater32U (Const32 [c]) (Const32 [d]))
-	// cond:
-	// result: (ConstBool [b2i(uint32(c) > uint32(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(uint32(c) > uint32(d))
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpGreater64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater64 (Const64 [c]) (Const64 [d]))
-	// cond:
-	// result: (ConstBool [b2i(c > d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(c > d)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpGreater64U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater64U (Const64 [c]) (Const64 [d]))
-	// cond:
-	// result: (ConstBool [b2i(uint64(c) > uint64(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(uint64(c) > uint64(d))
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpGreater8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater8  (Const8  [c]) (Const8  [d]))
-	// cond:
-	// result: (ConstBool [b2i(c > d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(c > d)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpGreater8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Greater8U  (Const8  [c]) (Const8  [d]))
-	// cond:
-	// result: (ConstBool [b2i(uint8(c)  > uint8(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(uint8(c) > uint8(d))
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpIMake(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (IMake typ (StructMake1 val))
-	// cond:
-	// result: (IMake typ val)
-	for {
-		typ := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpStructMake1 {
-			break
-		}
-		val := v_1.Args[0]
-		v.reset(OpIMake)
-		v.AddArg(typ)
-		v.AddArg(val)
-		return true
-	}
-	// match: (IMake typ (ArrayMake1 val))
-	// cond:
-	// result: (IMake typ val)
-	for {
-		typ := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpArrayMake1 {
-			break
-		}
-		val := v_1.Args[0]
-		v.reset(OpIMake)
-		v.AddArg(typ)
-		v.AddArg(val)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpIsInBounds(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (IsInBounds (ZeroExt8to32  _) (Const32 [c]))
-	// cond: (1 << 8)  <= c
-	// result: (ConstBool [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpZeroExt8to32 {
-			break
-		}
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		c := v_1.AuxInt
-		if !((1 << 8) <= c) {
-			break
-		}
-		v.reset(OpConstBool)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (IsInBounds (ZeroExt8to64  _) (Const64 [c]))
-	// cond: (1 << 8)  <= c
-	// result: (ConstBool [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpZeroExt8to64 {
-			break
-		}
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !((1 << 8) <= c) {
-			break
-		}
-		v.reset(OpConstBool)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (IsInBounds (ZeroExt16to32 _) (Const32 [c]))
-	// cond: (1 << 16) <= c
-	// result: (ConstBool [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpZeroExt16to32 {
-			break
-		}
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		c := v_1.AuxInt
-		if !((1 << 16) <= c) {
-			break
-		}
-		v.reset(OpConstBool)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (IsInBounds (ZeroExt16to64 _) (Const64 [c]))
-	// cond: (1 << 16) <= c
-	// result: (ConstBool [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpZeroExt16to64 {
-			break
-		}
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !((1 << 16) <= c) {
-			break
-		}
-		v.reset(OpConstBool)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (IsInBounds x x)
-	// cond:
-	// result: (ConstBool [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpConstBool)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (IsInBounds (And32 (Const32 [c]) _) (Const32 [d]))
-	// cond: 0 <= c && c < d
-	// result: (ConstBool [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAnd32 {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpConst32 {
-			break
-		}
-		c := v_0_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		d := v_1.AuxInt
-		if !(0 <= c && c < d) {
-			break
-		}
-		v.reset(OpConstBool)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (IsInBounds (And64 (Const64 [c]) _) (Const64 [d]))
-	// cond: 0 <= c && c < d
-	// result: (ConstBool [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAnd64 {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpConst64 {
-			break
-		}
-		c := v_0_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		if !(0 <= c && c < d) {
-			break
-		}
-		v.reset(OpConstBool)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (IsInBounds (Const32 [c]) (Const32 [d]))
-	// cond:
-	// result: (ConstBool [b2i(0 <= c && c < d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(0 <= c && c < d)
-		return true
-	}
-	// match: (IsInBounds (Const64 [c]) (Const64 [d]))
-	// cond:
-	// result: (ConstBool [b2i(0 <= c && c < d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(0 <= c && c < d)
-		return true
-	}
-	// match: (IsInBounds (Mod32u _ y) y)
-	// cond:
-	// result: (ConstBool [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMod32u {
-			break
-		}
-		y := v_0.Args[1]
-		if y != v.Args[1] {
-			break
-		}
-		v.reset(OpConstBool)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (IsInBounds (Mod64u _ y) y)
-	// cond:
-	// result: (ConstBool [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpMod64u {
-			break
-		}
-		y := v_0.Args[1]
-		if y != v.Args[1] {
-			break
-		}
-		v.reset(OpConstBool)
-		v.AuxInt = 1
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpIsSliceInBounds(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (IsSliceInBounds x x)
-	// cond:
-	// result: (ConstBool [1])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpConstBool)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (IsSliceInBounds (And32 (Const32 [c]) _) (Const32 [d]))
-	// cond: 0 <= c && c <= d
-	// result: (ConstBool [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAnd32 {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpConst32 {
-			break
-		}
-		c := v_0_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		d := v_1.AuxInt
-		if !(0 <= c && c <= d) {
-			break
-		}
-		v.reset(OpConstBool)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (IsSliceInBounds (And64 (Const64 [c]) _) (Const64 [d]))
-	// cond: 0 <= c && c <= d
-	// result: (ConstBool [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAnd64 {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpConst64 {
-			break
-		}
-		c := v_0_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		if !(0 <= c && c <= d) {
-			break
-		}
-		v.reset(OpConstBool)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (IsSliceInBounds (Const32 [0]) _)
-	// cond:
-	// result: (ConstBool [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConstBool)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (IsSliceInBounds (Const64 [0]) _)
-	// cond:
-	// result: (ConstBool [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConstBool)
-		v.AuxInt = 1
-		return true
-	}
-	// match: (IsSliceInBounds (Const32 [c]) (Const32 [d]))
-	// cond:
-	// result: (ConstBool [b2i(0 <= c && c <= d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(0 <= c && c <= d)
-		return true
-	}
-	// match: (IsSliceInBounds (Const64 [c]) (Const64 [d]))
-	// cond:
-	// result: (ConstBool [b2i(0 <= c && c <= d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(0 <= c && c <= d)
-		return true
-	}
-	// match: (IsSliceInBounds (SliceLen x) (SliceCap x))
-	// cond:
-	// result: (ConstBool [1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpSliceLen {
-			break
-		}
-		x := v_0.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpSliceCap {
-			break
-		}
-		if x != v_1.Args[0] {
-			break
-		}
-		v.reset(OpConstBool)
-		v.AuxInt = 1
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLeq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq16 (Const16 [c]) (Const16 [d]))
-	// cond:
-	// result: (ConstBool [b2i(c <= d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(c <= d)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLeq16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq16U (Const16 [c]) (Const16 [d]))
-	// cond:
-	// result: (ConstBool [b2i(uint16(c) <= uint16(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(uint16(c) <= uint16(d))
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLeq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq32 (Const32 [c]) (Const32 [d]))
-	// cond:
-	// result: (ConstBool [b2i(c <= d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(c <= d)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLeq32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq32U (Const32 [c]) (Const32 [d]))
-	// cond:
-	// result: (ConstBool [b2i(uint32(c) <= uint32(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(uint32(c) <= uint32(d))
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLeq64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq64 (Const64 [c]) (Const64 [d]))
-	// cond:
-	// result: (ConstBool [b2i(c <= d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(c <= d)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLeq64U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq64U (Const64 [c]) (Const64 [d]))
-	// cond:
-	// result: (ConstBool [b2i(uint64(c) <= uint64(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(uint64(c) <= uint64(d))
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLeq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq8  (Const8  [c]) (Const8  [d]))
-	// cond:
-	// result: (ConstBool [b2i(c <= d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(c <= d)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLeq8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Leq8U  (Const8  [c]) (Const8  [d]))
-	// cond:
-	// result: (ConstBool [b2i(uint8(c)  <= uint8(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(uint8(c) <= uint8(d))
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLess16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less16 (Const16 [c]) (Const16 [d]))
-	// cond:
-	// result: (ConstBool [b2i(c < d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(c < d)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLess16U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less16U (Const16 [c]) (Const16 [d]))
-	// cond:
-	// result: (ConstBool [b2i(uint16(c) < uint16(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(uint16(c) < uint16(d))
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLess32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less32 (Const32 [c]) (Const32 [d]))
-	// cond:
-	// result: (ConstBool [b2i(c < d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(c < d)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLess32U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less32U (Const32 [c]) (Const32 [d]))
-	// cond:
-	// result: (ConstBool [b2i(uint32(c) < uint32(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(uint32(c) < uint32(d))
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLess64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less64 (Const64 [c]) (Const64 [d]))
-	// cond:
-	// result: (ConstBool [b2i(c < d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(c < d)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLess64U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less64U (Const64 [c]) (Const64 [d]))
-	// cond:
-	// result: (ConstBool [b2i(uint64(c) < uint64(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(uint64(c) < uint64(d))
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLess8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less8  (Const8  [c]) (Const8  [d]))
-	// cond:
-	// result: (ConstBool [b2i(c < d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(c < d)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLess8U(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Less8U  (Const8  [c]) (Const8  [d]))
-	// cond:
-	// result: (ConstBool [b2i(uint8(c)  < uint8(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(uint8(c) < uint8(d))
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLoad(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Load <t1> p1 (Store [w] p2 x _))
-	// cond: isSamePtr(p1,p2) && t1.Compare(x.Type)==CMPeq && w == t1.Size()
-	// result: x
-	for {
-		t1 := v.Type
-		p1 := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpStore {
-			break
-		}
-		w := v_1.AuxInt
-		p2 := v_1.Args[0]
-		x := v_1.Args[1]
-		if !(isSamePtr(p1, p2) && t1.Compare(x.Type) == CMPeq && w == t1.Size()) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Load <t> _ _)
-	// cond: t.IsStruct() && t.NumFields() == 0 && config.fe.CanSSA(t)
-	// result: (StructMake0)
-	for {
-		t := v.Type
-		if !(t.IsStruct() && t.NumFields() == 0 && config.fe.CanSSA(t)) {
-			break
-		}
-		v.reset(OpStructMake0)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: t.IsStruct() && t.NumFields() == 1 && config.fe.CanSSA(t)
-	// result: (StructMake1     (Load <t.FieldType(0)> ptr mem))
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(t.IsStruct() && t.NumFields() == 1 && config.fe.CanSSA(t)) {
-			break
-		}
-		v.reset(OpStructMake1)
-		v0 := b.NewValue0(v.Line, OpLoad, t.FieldType(0))
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: t.IsStruct() && t.NumFields() == 2 && config.fe.CanSSA(t)
-	// result: (StructMake2     (Load <t.FieldType(0)> ptr mem)     (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem))
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(t.IsStruct() && t.NumFields() == 2 && config.fe.CanSSA(t)) {
-			break
-		}
-		v.reset(OpStructMake2)
-		v0 := b.NewValue0(v.Line, OpLoad, t.FieldType(0))
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpLoad, t.FieldType(1))
-		v2 := b.NewValue0(v.Line, OpOffPtr, t.FieldType(1).PtrTo())
-		v2.AuxInt = t.FieldOff(1)
-		v2.AddArg(ptr)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: t.IsStruct() && t.NumFields() == 3 && config.fe.CanSSA(t)
-	// result: (StructMake3     (Load <t.FieldType(0)> ptr mem)     (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem)     (Load <t.FieldType(2)> (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] ptr) mem))
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(t.IsStruct() && t.NumFields() == 3 && config.fe.CanSSA(t)) {
-			break
-		}
-		v.reset(OpStructMake3)
-		v0 := b.NewValue0(v.Line, OpLoad, t.FieldType(0))
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpLoad, t.FieldType(1))
-		v2 := b.NewValue0(v.Line, OpOffPtr, t.FieldType(1).PtrTo())
-		v2.AuxInt = t.FieldOff(1)
-		v2.AddArg(ptr)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		v3 := b.NewValue0(v.Line, OpLoad, t.FieldType(2))
-		v4 := b.NewValue0(v.Line, OpOffPtr, t.FieldType(2).PtrTo())
-		v4.AuxInt = t.FieldOff(2)
-		v4.AddArg(ptr)
-		v3.AddArg(v4)
-		v3.AddArg(mem)
-		v.AddArg(v3)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: t.IsStruct() && t.NumFields() == 4 && config.fe.CanSSA(t)
-	// result: (StructMake4     (Load <t.FieldType(0)> ptr mem)     (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem)     (Load <t.FieldType(2)> (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] ptr) mem)     (Load <t.FieldType(3)> (OffPtr <t.FieldType(3).PtrTo()> [t.FieldOff(3)] ptr) mem))
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(t.IsStruct() && t.NumFields() == 4 && config.fe.CanSSA(t)) {
-			break
-		}
-		v.reset(OpStructMake4)
-		v0 := b.NewValue0(v.Line, OpLoad, t.FieldType(0))
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpLoad, t.FieldType(1))
-		v2 := b.NewValue0(v.Line, OpOffPtr, t.FieldType(1).PtrTo())
-		v2.AuxInt = t.FieldOff(1)
-		v2.AddArg(ptr)
-		v1.AddArg(v2)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		v3 := b.NewValue0(v.Line, OpLoad, t.FieldType(2))
-		v4 := b.NewValue0(v.Line, OpOffPtr, t.FieldType(2).PtrTo())
-		v4.AuxInt = t.FieldOff(2)
-		v4.AddArg(ptr)
-		v3.AddArg(v4)
-		v3.AddArg(mem)
-		v.AddArg(v3)
-		v5 := b.NewValue0(v.Line, OpLoad, t.FieldType(3))
-		v6 := b.NewValue0(v.Line, OpOffPtr, t.FieldType(3).PtrTo())
-		v6.AuxInt = t.FieldOff(3)
-		v6.AddArg(ptr)
-		v5.AddArg(v6)
-		v5.AddArg(mem)
-		v.AddArg(v5)
-		return true
-	}
-	// match: (Load <t> _ _)
-	// cond: t.IsArray() && t.NumElem() == 0
-	// result: (ArrayMake0)
-	for {
-		t := v.Type
-		if !(t.IsArray() && t.NumElem() == 0) {
-			break
-		}
-		v.reset(OpArrayMake0)
-		return true
-	}
-	// match: (Load <t> ptr mem)
-	// cond: t.IsArray() && t.NumElem() == 1 && config.fe.CanSSA(t)
-	// result: (ArrayMake1 (Load <t.ElemType()> ptr mem))
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		mem := v.Args[1]
-		if !(t.IsArray() && t.NumElem() == 1 && config.fe.CanSSA(t)) {
-			break
-		}
-		v.reset(OpArrayMake1)
-		v0 := b.NewValue0(v.Line, OpLoad, t.ElemType())
-		v0.AddArg(ptr)
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLsh16x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x16  <t> x (Const16 [c]))
-	// cond:
-	// result: (Lsh16x64  x (Const64 <t> [int64(uint16(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpLsh16x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint16(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Lsh16x16  (Const16 [0]) _)
-	// cond:
-	// result: (Const16 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst16)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLsh16x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x32  <t> x (Const32 [c]))
-	// cond:
-	// result: (Lsh16x64  x (Const64 <t> [int64(uint32(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpLsh16x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint32(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Lsh16x32  (Const16 [0]) _)
-	// cond:
-	// result: (Const16 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst16)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLsh16x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x64  (Const16 [c]) (Const64 [d]))
-	// cond:
-	// result: (Const16 [int64(int16(c) << uint64(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConst16)
-		v.AuxInt = int64(int16(c) << uint64(d))
-		return true
-	}
-	// match: (Lsh16x64  x (Const64 [0]))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh16x64  (Const16 [0]) _)
-	// cond:
-	// result: (Const16 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst16)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Lsh16x64  _ (Const64 [c]))
-	// cond: uint64(c) >= 16
-	// result: (Const16 [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 16) {
-			break
-		}
-		v.reset(OpConst16)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Lsh16x64 <t> (Lsh16x64 x (Const64 [c])) (Const64 [d]))
-	// cond: !uaddOvf(c,d)
-	// result: (Lsh16x64 x (Const64 <t> [c+d]))
-	for {
-		t := v.Type
-		v_0 := v.Args[0]
-		if v_0.Op != OpLsh16x64 {
-			break
-		}
-		x := v_0.Args[0]
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpConst64 {
-			break
-		}
-		c := v_0_1.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		if !(!uaddOvf(c, d)) {
-			break
-		}
-		v.reset(OpLsh16x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = c + d
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Lsh16x64 (Rsh16Ux64 (Lsh16x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
-	// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
-	// result: (Lsh16x64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpRsh16Ux64 {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpLsh16x64 {
-			break
-		}
-		x := v_0_0.Args[0]
-		v_0_0_1 := v_0_0.Args[1]
-		if v_0_0_1.Op != OpConst64 {
-			break
-		}
-		c1 := v_0_0_1.AuxInt
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpConst64 {
-			break
-		}
-		c2 := v_0_1.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c3 := v_1.AuxInt
-		if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
-			break
-		}
-		v.reset(OpLsh16x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v0.AuxInt = c1 - c2 + c3
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLsh16x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh16x8   <t> x (Const8  [c]))
-	// cond:
-	// result: (Lsh16x64  x (Const64 <t> [int64(uint8(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpLsh16x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint8(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Lsh16x8  (Const16 [0]) _)
-	// cond:
-	// result: (Const16 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst16)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLsh32x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x16  <t> x (Const16 [c]))
-	// cond:
-	// result: (Lsh32x64  x (Const64 <t> [int64(uint16(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpLsh32x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint16(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Lsh32x16  (Const32 [0]) _)
-	// cond:
-	// result: (Const32 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLsh32x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x32  <t> x (Const32 [c]))
-	// cond:
-	// result: (Lsh32x64  x (Const64 <t> [int64(uint32(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpLsh32x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint32(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Lsh32x32  (Const32 [0]) _)
-	// cond:
-	// result: (Const32 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLsh32x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x64  (Const32 [c]) (Const64 [d]))
-	// cond:
-	// result: (Const32 [int64(int32(c) << uint64(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConst32)
-		v.AuxInt = int64(int32(c) << uint64(d))
-		return true
-	}
-	// match: (Lsh32x64  x (Const64 [0]))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh32x64  (Const32 [0]) _)
-	// cond:
-	// result: (Const32 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Lsh32x64  _ (Const64 [c]))
-	// cond: uint64(c) >= 32
-	// result: (Const32 [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 32) {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Lsh32x64 <t> (Lsh32x64 x (Const64 [c])) (Const64 [d]))
-	// cond: !uaddOvf(c,d)
-	// result: (Lsh32x64 x (Const64 <t> [c+d]))
-	for {
-		t := v.Type
-		v_0 := v.Args[0]
-		if v_0.Op != OpLsh32x64 {
-			break
-		}
-		x := v_0.Args[0]
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpConst64 {
-			break
-		}
-		c := v_0_1.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		if !(!uaddOvf(c, d)) {
-			break
-		}
-		v.reset(OpLsh32x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = c + d
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Lsh32x64 (Rsh32Ux64 (Lsh32x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
-	// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
-	// result: (Lsh32x64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpRsh32Ux64 {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpLsh32x64 {
-			break
-		}
-		x := v_0_0.Args[0]
-		v_0_0_1 := v_0_0.Args[1]
-		if v_0_0_1.Op != OpConst64 {
-			break
-		}
-		c1 := v_0_0_1.AuxInt
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpConst64 {
-			break
-		}
-		c2 := v_0_1.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c3 := v_1.AuxInt
-		if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
-			break
-		}
-		v.reset(OpLsh32x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v0.AuxInt = c1 - c2 + c3
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLsh32x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh32x8   <t> x (Const8  [c]))
-	// cond:
-	// result: (Lsh32x64  x (Const64 <t> [int64(uint8(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpLsh32x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint8(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Lsh32x8  (Const32 [0]) _)
-	// cond:
-	// result: (Const32 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLsh64x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh64x16  <t> x (Const16 [c]))
-	// cond:
-	// result: (Lsh64x64  x (Const64 <t> [int64(uint16(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpLsh64x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint16(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Lsh64x16  (Const64 [0]) _)
-	// cond:
-	// result: (Const64 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst64)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLsh64x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh64x32  <t> x (Const32 [c]))
-	// cond:
-	// result: (Lsh64x64  x (Const64 <t> [int64(uint32(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpLsh64x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint32(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Lsh64x32  (Const64 [0]) _)
-	// cond:
-	// result: (Const64 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst64)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLsh64x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh64x64  (Const64 [c]) (Const64 [d]))
-	// cond:
-	// result: (Const64 [c << uint64(d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConst64)
-		v.AuxInt = c << uint64(d)
-		return true
-	}
-	// match: (Lsh64x64  x (Const64 [0]))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh64x64  (Const64 [0]) _)
-	// cond:
-	// result: (Const64 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst64)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Lsh64x64  _ (Const64 [c]))
-	// cond: uint64(c) >= 64
-	// result: (Const64 [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 64) {
-			break
-		}
-		v.reset(OpConst64)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Lsh64x64 <t> (Lsh64x64 x (Const64 [c])) (Const64 [d]))
-	// cond: !uaddOvf(c,d)
-	// result: (Lsh64x64 x (Const64 <t> [c+d]))
-	for {
-		t := v.Type
-		v_0 := v.Args[0]
-		if v_0.Op != OpLsh64x64 {
-			break
-		}
-		x := v_0.Args[0]
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpConst64 {
-			break
-		}
-		c := v_0_1.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		if !(!uaddOvf(c, d)) {
-			break
-		}
-		v.reset(OpLsh64x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = c + d
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Lsh64x64 (Rsh64Ux64 (Lsh64x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
-	// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
-	// result: (Lsh64x64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpRsh64Ux64 {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpLsh64x64 {
-			break
-		}
-		x := v_0_0.Args[0]
-		v_0_0_1 := v_0_0.Args[1]
-		if v_0_0_1.Op != OpConst64 {
-			break
-		}
-		c1 := v_0_0_1.AuxInt
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpConst64 {
-			break
-		}
-		c2 := v_0_1.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c3 := v_1.AuxInt
-		if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
-			break
-		}
-		v.reset(OpLsh64x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v0.AuxInt = c1 - c2 + c3
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLsh64x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh64x8   <t> x (Const8  [c]))
-	// cond:
-	// result: (Lsh64x64  x (Const64 <t> [int64(uint8(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpLsh64x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint8(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Lsh64x8  (Const64 [0]) _)
-	// cond:
-	// result: (Const64 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst64)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLsh8x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x16  <t> x (Const16 [c]))
-	// cond:
-	// result: (Lsh8x64  x (Const64 <t> [int64(uint16(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpLsh8x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint16(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Lsh8x16   (Const8 [0]) _)
-	// cond:
-	// result: (Const8  [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst8)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLsh8x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x32  <t> x (Const32 [c]))
-	// cond:
-	// result: (Lsh8x64  x (Const64 <t> [int64(uint32(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpLsh8x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint32(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Lsh8x32   (Const8 [0]) _)
-	// cond:
-	// result: (Const8  [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst8)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLsh8x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x64   (Const8  [c]) (Const64 [d]))
-	// cond:
-	// result: (Const8  [int64(int8(c) << uint64(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConst8)
-		v.AuxInt = int64(int8(c) << uint64(d))
-		return true
-	}
-	// match: (Lsh8x64   x (Const64 [0]))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Lsh8x64   (Const8 [0]) _)
-	// cond:
-	// result: (Const8  [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst8)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Lsh8x64   _ (Const64 [c]))
-	// cond: uint64(c) >= 8
-	// result: (Const8  [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 8) {
-			break
-		}
-		v.reset(OpConst8)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Lsh8x64  <t> (Lsh8x64  x (Const64 [c])) (Const64 [d]))
-	// cond: !uaddOvf(c,d)
-	// result: (Lsh8x64  x (Const64 <t> [c+d]))
-	for {
-		t := v.Type
-		v_0 := v.Args[0]
-		if v_0.Op != OpLsh8x64 {
-			break
-		}
-		x := v_0.Args[0]
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpConst64 {
-			break
-		}
-		c := v_0_1.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		if !(!uaddOvf(c, d)) {
-			break
-		}
-		v.reset(OpLsh8x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = c + d
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Lsh8x64 (Rsh8Ux64 (Lsh8x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
-	// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
-	// result: (Lsh8x64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpRsh8Ux64 {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpLsh8x64 {
-			break
-		}
-		x := v_0_0.Args[0]
-		v_0_0_1 := v_0_0.Args[1]
-		if v_0_0_1.Op != OpConst64 {
-			break
-		}
-		c1 := v_0_0_1.AuxInt
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpConst64 {
-			break
-		}
-		c2 := v_0_1.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c3 := v_1.AuxInt
-		if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
-			break
-		}
-		v.reset(OpLsh8x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v0.AuxInt = c1 - c2 + c3
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpLsh8x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Lsh8x8   <t> x (Const8  [c]))
-	// cond:
-	// result: (Lsh8x64  x (Const64 <t> [int64(uint8(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpLsh8x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint8(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Lsh8x8   (Const8 [0]) _)
-	// cond:
-	// result: (Const8  [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst8)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpMod16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod16 (Const16 [c]) (Const16 [d]))
-	// cond: d != 0
-	// result: (Const16 [int64(int16(c % d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		d := v_1.AuxInt
-		if !(d != 0) {
-			break
-		}
-		v.reset(OpConst16)
-		v.AuxInt = int64(int16(c % d))
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpMod16u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod16u (Const16 [c]) (Const16 [d]))
-	// cond: d != 0
-	// result: (Const16 [int64(uint16(c) % uint16(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		d := v_1.AuxInt
-		if !(d != 0) {
-			break
-		}
-		v.reset(OpConst16)
-		v.AuxInt = int64(uint16(c) % uint16(d))
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpMod32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod32 (Const32 [c]) (Const32 [d]))
-	// cond: d != 0
-	// result: (Const32 [int64(int32(c % d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		d := v_1.AuxInt
-		if !(d != 0) {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = int64(int32(c % d))
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpMod32u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod32u (Const32 [c]) (Const32 [d]))
-	// cond: d != 0
-	// result: (Const32 [int64(uint32(c) % uint32(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		d := v_1.AuxInt
-		if !(d != 0) {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = int64(uint32(c) % uint32(d))
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpMod64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod64 (Const64 [c]) (Const64 [d]))
-	// cond: d != 0
-	// result: (Const64 [c % d])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		if !(d != 0) {
-			break
-		}
-		v.reset(OpConst64)
-		v.AuxInt = c % d
-		return true
-	}
-	// match: (Mod64  <t> x (Const64 [c]))
-	// cond: x.Op != OpConst64 && smagic64ok(c)
-	// result: (Sub64 x (Mul64 <t> (Div64  <t> x (Const64 <t> [c])) (Const64 <t> [c])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(x.Op != OpConst64 && smagic64ok(c)) {
-			break
-		}
-		v.reset(OpSub64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpMul64, t)
-		v1 := b.NewValue0(v.Line, OpDiv64, t)
-		v1.AddArg(x)
-		v2 := b.NewValue0(v.Line, OpConst64, t)
-		v2.AuxInt = c
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v3 := b.NewValue0(v.Line, OpConst64, t)
-		v3.AuxInt = c
-		v0.AddArg(v3)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpMod64u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod64u (Const64 [c]) (Const64 [d]))
-	// cond: d != 0
-	// result: (Const64 [int64(uint64(c) % uint64(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		if !(d != 0) {
-			break
-		}
-		v.reset(OpConst64)
-		v.AuxInt = int64(uint64(c) % uint64(d))
-		return true
-	}
-	// match: (Mod64u <t> n (Const64 [c]))
-	// cond: isPowerOfTwo(c)
-	// result: (And64 n (Const64 <t> [c-1]))
-	for {
-		t := v.Type
-		n := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(isPowerOfTwo(c)) {
-			break
-		}
-		v.reset(OpAnd64)
-		v.AddArg(n)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = c - 1
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Mod64u <t> x (Const64 [c]))
-	// cond: x.Op != OpConst64 && umagic64ok(c)
-	// result: (Sub64 x (Mul64 <t> (Div64u <t> x (Const64 <t> [c])) (Const64 <t> [c])))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(x.Op != OpConst64 && umagic64ok(c)) {
-			break
-		}
-		v.reset(OpSub64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpMul64, t)
-		v1 := b.NewValue0(v.Line, OpDiv64u, t)
-		v1.AddArg(x)
-		v2 := b.NewValue0(v.Line, OpConst64, t)
-		v2.AuxInt = c
-		v1.AddArg(v2)
-		v0.AddArg(v1)
-		v3 := b.NewValue0(v.Line, OpConst64, t)
-		v3.AuxInt = c
-		v0.AddArg(v3)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpMod8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod8  (Const8  [c]) (Const8  [d]))
-	// cond: d != 0
-	// result: (Const8  [int64(int8(c % d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		d := v_1.AuxInt
-		if !(d != 0) {
-			break
-		}
-		v.reset(OpConst8)
-		v.AuxInt = int64(int8(c % d))
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpMod8u(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mod8u  (Const8 [c])  (Const8  [d]))
-	// cond: d != 0
-	// result: (Const8  [int64(uint8(c) % uint8(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		d := v_1.AuxInt
-		if !(d != 0) {
-			break
-		}
-		v.reset(OpConst8)
-		v.AuxInt = int64(uint8(c) % uint8(d))
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpMul16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul16  (Const16 [c])  (Const16 [d]))
-	// cond:
-	// result: (Const16 [int64(int16(c*d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConst16)
-		v.AuxInt = int64(int16(c * d))
-		return true
-	}
-	// match: (Mul16 (Const16 [-1]) x)
-	// cond:
-	// result: (Neg16 x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		if v_0.AuxInt != -1 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpNeg16)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Mul16 x (Const16 <t> [c]))
-	// cond: x.Op != OpConst16
-	// result: (Mul16 (Const16 <t> [c]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst16) {
-			break
-		}
-		v.reset(OpMul16)
-		v0 := b.NewValue0(v.Line, OpConst16, t)
-		v0.AuxInt = c
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Mul16 (Const16 [0]) _)
-	// cond:
-	// result: (Const16 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst16)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpMul32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul32  (Const32 [c])  (Const32 [d]))
-	// cond:
-	// result: (Const32 [int64(int32(c*d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConst32)
-		v.AuxInt = int64(int32(c * d))
-		return true
-	}
-	// match: (Mul32 (Const32 [-1]) x)
-	// cond:
-	// result: (Neg32 x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		if v_0.AuxInt != -1 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpNeg32)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Mul32 x (Const32 <t> [c]))
-	// cond: x.Op != OpConst32
-	// result: (Mul32 (Const32 <t> [c]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst32) {
-			break
-		}
-		v.reset(OpMul32)
-		v0 := b.NewValue0(v.Line, OpConst32, t)
-		v0.AuxInt = c
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Mul32 (Const32 <t> [c]) (Add32 <t> (Const32 <t> [d]) x))
-	// cond:
-	// result: (Add32 (Const32 <t> [int64(int32(c*d))]) (Mul32 <t> (Const32 <t> [c]) x))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		t := v_0.Type
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpAdd32 {
-			break
-		}
-		if v_1.Type != t {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst32 {
-			break
-		}
-		if v_1_0.Type != t {
-			break
-		}
-		d := v_1_0.AuxInt
-		x := v_1.Args[1]
-		v.reset(OpAdd32)
-		v0 := b.NewValue0(v.Line, OpConst32, t)
-		v0.AuxInt = int64(int32(c * d))
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMul32, t)
-		v2 := b.NewValue0(v.Line, OpConst32, t)
-		v2.AuxInt = c
-		v1.AddArg(v2)
-		v1.AddArg(x)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Mul32 (Const32 [0]) _)
-	// cond:
-	// result: (Const32 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpMul32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul32F (Const32F [c]) (Const32F [d]))
-	// cond:
-	// result: (Const32F [f2i(float64(i2f32(c) * i2f32(d)))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32F {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32F {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConst32F)
-		v.AuxInt = f2i(float64(i2f32(c) * i2f32(d)))
-		return true
-	}
-	// match: (Mul32F x (Const32F [f2i(1)]))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32F {
-			break
-		}
-		if v_1.AuxInt != f2i(1) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Mul32F (Const32F [f2i(1)]) x)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32F {
-			break
-		}
-		if v_0.AuxInt != f2i(1) {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Mul32F x (Const32F [f2i(-1)]))
-	// cond:
-	// result: (Neg32F x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32F {
-			break
-		}
-		if v_1.AuxInt != f2i(-1) {
-			break
-		}
-		v.reset(OpNeg32F)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Mul32F (Const32F [f2i(-1)]) x)
-	// cond:
-	// result: (Neg32F x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32F {
-			break
-		}
-		if v_0.AuxInt != f2i(-1) {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpNeg32F)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpMul64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul64  (Const64 [c])  (Const64 [d]))
-	// cond:
-	// result: (Const64 [c*d])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConst64)
-		v.AuxInt = c * d
-		return true
-	}
-	// match: (Mul64 (Const64 [-1]) x)
-	// cond:
-	// result: (Neg64 x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		if v_0.AuxInt != -1 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpNeg64)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Mul64 x (Const64 <t> [c]))
-	// cond: x.Op != OpConst64
-	// result: (Mul64 (Const64 <t> [c]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst64) {
-			break
-		}
-		v.reset(OpMul64)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = c
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Mul64 (Const64 <t> [c]) (Add64 <t> (Const64 <t> [d]) x))
-	// cond:
-	// result: (Add64 (Const64 <t> [c*d]) (Mul64 <t> (Const64 <t> [c]) x))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		t := v_0.Type
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpAdd64 {
-			break
-		}
-		if v_1.Type != t {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst64 {
-			break
-		}
-		if v_1_0.Type != t {
-			break
-		}
-		d := v_1_0.AuxInt
-		x := v_1.Args[1]
-		v.reset(OpAdd64)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = c * d
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpMul64, t)
-		v2 := b.NewValue0(v.Line, OpConst64, t)
-		v2.AuxInt = c
-		v1.AddArg(v2)
-		v1.AddArg(x)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Mul64 (Const64 [0]) _)
-	// cond:
-	// result: (Const64 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst64)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpMul64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul64F (Const64F [c]) (Const64F [d]))
-	// cond:
-	// result: (Const64F [f2i(i2f(c) * i2f(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64F {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64F {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConst64F)
-		v.AuxInt = f2i(i2f(c) * i2f(d))
-		return true
-	}
-	// match: (Mul64F x (Const64F [f2i(1)]))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64F {
-			break
-		}
-		if v_1.AuxInt != f2i(1) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Mul64F (Const64F [f2i(1)]) x)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64F {
-			break
-		}
-		if v_0.AuxInt != f2i(1) {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Mul64F x (Const64F [f2i(-1)]))
-	// cond:
-	// result: (Neg64F x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64F {
-			break
-		}
-		if v_1.AuxInt != f2i(-1) {
-			break
-		}
-		v.reset(OpNeg64F)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Mul64F (Const64F [f2i(-1)]) x)
-	// cond:
-	// result: (Neg64F x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64F {
-			break
-		}
-		if v_0.AuxInt != f2i(-1) {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpNeg64F)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpMul8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Mul8   (Const8 [c])   (Const8 [d]))
-	// cond:
-	// result: (Const8  [int64(int8(c*d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConst8)
-		v.AuxInt = int64(int8(c * d))
-		return true
-	}
-	// match: (Mul8  (Const8  [-1]) x)
-	// cond:
-	// result: (Neg8  x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		if v_0.AuxInt != -1 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpNeg8)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Mul8  x (Const8  <t> [c]))
-	// cond: x.Op != OpConst8
-	// result: (Mul8  (Const8  <t> [c]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst8) {
-			break
-		}
-		v.reset(OpMul8)
-		v0 := b.NewValue0(v.Line, OpConst8, t)
-		v0.AuxInt = c
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Mul8  (Const8  [0]) _)
-	// cond:
-	// result: (Const8  [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst8)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpNeg16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg16 (Sub16 x y))
-	// cond:
-	// result: (Sub16 y x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpSub16 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpSub16)
-		v.AddArg(y)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpNeg32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg32 (Sub32 x y))
-	// cond:
-	// result: (Sub32 y x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpSub32 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpSub32)
-		v.AddArg(y)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpNeg64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg64 (Sub64 x y))
-	// cond:
-	// result: (Sub64 y x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpSub64 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpSub64)
-		v.AddArg(y)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpNeg8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neg8  (Sub8  x y))
-	// cond:
-	// result: (Sub8  y x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpSub8 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpSub8)
-		v.AddArg(y)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpNeq16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq16 x x)
-	// cond:
-	// result: (ConstBool [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpConstBool)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Neq16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x))
-	// cond:
-	// result: (Neq16 (Const16 <t> [int64(int16(c-d))]) x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		t := v_0.Type
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpAdd16 {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst16 {
-			break
-		}
-		if v_1_0.Type != t {
-			break
-		}
-		d := v_1_0.AuxInt
-		x := v_1.Args[1]
-		v.reset(OpNeq16)
-		v0 := b.NewValue0(v.Line, OpConst16, t)
-		v0.AuxInt = int64(int16(c - d))
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Neq16 x (Const16 <t> [c]))
-	// cond: x.Op != OpConst16
-	// result: (Neq16 (Const16 <t> [c]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst16) {
-			break
-		}
-		v.reset(OpNeq16)
-		v0 := b.NewValue0(v.Line, OpConst16, t)
-		v0.AuxInt = c
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Neq16 (Const16 [c]) (Const16 [d]))
-	// cond:
-	// result: (ConstBool [b2i(c != d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(c != d)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpNeq32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq32 x x)
-	// cond:
-	// result: (ConstBool [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpConstBool)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Neq32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x))
-	// cond:
-	// result: (Neq32 (Const32 <t> [int64(int32(c-d))]) x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		t := v_0.Type
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpAdd32 {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst32 {
-			break
-		}
-		if v_1_0.Type != t {
-			break
-		}
-		d := v_1_0.AuxInt
-		x := v_1.Args[1]
-		v.reset(OpNeq32)
-		v0 := b.NewValue0(v.Line, OpConst32, t)
-		v0.AuxInt = int64(int32(c - d))
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Neq32 x (Const32 <t> [c]))
-	// cond: x.Op != OpConst32
-	// result: (Neq32 (Const32 <t> [c]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst32) {
-			break
-		}
-		v.reset(OpNeq32)
-		v0 := b.NewValue0(v.Line, OpConst32, t)
-		v0.AuxInt = c
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Neq32 (Const32 [c]) (Const32 [d]))
-	// cond:
-	// result: (ConstBool [b2i(c != d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(c != d)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpNeq64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq64 x x)
-	// cond:
-	// result: (ConstBool [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpConstBool)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Neq64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x))
-	// cond:
-	// result: (Neq64 (Const64 <t> [c-d]) x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		t := v_0.Type
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpAdd64 {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst64 {
-			break
-		}
-		if v_1_0.Type != t {
-			break
-		}
-		d := v_1_0.AuxInt
-		x := v_1.Args[1]
-		v.reset(OpNeq64)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = c - d
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Neq64 x (Const64 <t> [c]))
-	// cond: x.Op != OpConst64
-	// result: (Neq64 (Const64 <t> [c]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst64) {
-			break
-		}
-		v.reset(OpNeq64)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = c
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Neq64 (Const64 [c]) (Const64 [d]))
-	// cond:
-	// result: (ConstBool [b2i(c != d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(c != d)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpNeq8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Neq8  x x)
-	// cond:
-	// result: (ConstBool [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpConstBool)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Neq8  (Const8  <t> [c]) (Add8  (Const8  <t> [d]) x))
-	// cond:
-	// result: (Neq8 (Const8 <t> [int64(int8(c-d))]) x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		t := v_0.Type
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpAdd8 {
-			break
-		}
-		v_1_0 := v_1.Args[0]
-		if v_1_0.Op != OpConst8 {
-			break
-		}
-		if v_1_0.Type != t {
-			break
-		}
-		d := v_1_0.AuxInt
-		x := v_1.Args[1]
-		v.reset(OpNeq8)
-		v0 := b.NewValue0(v.Line, OpConst8, t)
-		v0.AuxInt = int64(int8(c - d))
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Neq8  x (Const8 <t>  [c]))
-	// cond: x.Op != OpConst8
-	// result: (Neq8  (Const8  <t> [c]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst8) {
-			break
-		}
-		v.reset(OpNeq8)
-		v0 := b.NewValue0(v.Line, OpConst8, t)
-		v0.AuxInt = c
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Neq8  (Const8  [c]) (Const8  [d]))
-	// cond:
-	// result: (ConstBool [b2i(c != d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(c != d)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpNeqB(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NeqB (ConstBool [c]) (ConstBool [d]))
-	// cond:
-	// result: (ConstBool [b2i(c != d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConstBool {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConstBool {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConstBool)
-		v.AuxInt = b2i(c != d)
-		return true
-	}
-	// match: (NeqB (ConstBool [0]) x)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConstBool {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (NeqB (ConstBool [1]) x)
-	// cond:
-	// result: (Not x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConstBool {
-			break
-		}
-		if v_0.AuxInt != 1 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpNot)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpNeqInter(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NeqInter x y)
-	// cond:
-	// result: (NeqPtr (ITab x) (ITab y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpNeqPtr)
-		v0 := b.NewValue0(v.Line, OpITab, config.fe.TypeBytePtr())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpITab, config.fe.TypeBytePtr())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValuegeneric_OpNeqPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NeqPtr p (ConstNil))
-	// cond:
-	// result: (IsNonNil p)
-	for {
-		p := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConstNil {
-			break
-		}
-		v.reset(OpIsNonNil)
-		v.AddArg(p)
-		return true
-	}
-	// match: (NeqPtr (ConstNil) p)
-	// cond:
-	// result: (IsNonNil p)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConstNil {
-			break
-		}
-		p := v.Args[1]
-		v.reset(OpIsNonNil)
-		v.AddArg(p)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpNeqSlice(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NeqSlice x y)
-	// cond:
-	// result: (NeqPtr (SlicePtr x) (SlicePtr y))
-	for {
-		x := v.Args[0]
-		y := v.Args[1]
-		v.reset(OpNeqPtr)
-		v0 := b.NewValue0(v.Line, OpSlicePtr, config.fe.TypeBytePtr())
-		v0.AddArg(x)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpSlicePtr, config.fe.TypeBytePtr())
-		v1.AddArg(y)
-		v.AddArg(v1)
-		return true
-	}
-}
-func rewriteValuegeneric_OpNilCheck(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (NilCheck (GetG mem) mem)
-	// cond:
-	// result: mem
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpGetG {
-			break
-		}
-		mem := v_0.Args[0]
-		if mem != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = mem.Type
-		v.AddArg(mem)
-		return true
-	}
-	// match: (NilCheck (Load (OffPtr [c] (SP)) mem) mem)
-	// cond: mem.Op == OpStaticCall 	&& isSameSym(mem.Aux, "runtime.newobject") 	&& c == config.ctxt.FixedFrameSize() + config.RegSize 	&& warnRule(config.Debug_checknil() && int(v.Line) > 1, v, "removed nil check")
-	// result: (Invalid)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpLoad {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpOffPtr {
-			break
-		}
-		c := v_0_0.AuxInt
-		v_0_0_0 := v_0_0.Args[0]
-		if v_0_0_0.Op != OpSP {
-			break
-		}
-		mem := v_0.Args[1]
-		if mem != v.Args[1] {
-			break
-		}
-		if !(mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize && warnRule(config.Debug_checknil() && int(v.Line) > 1, v, "removed nil check")) {
-			break
-		}
-		v.reset(OpInvalid)
-		return true
-	}
-	// match: (NilCheck (OffPtr (Load (OffPtr [c] (SP)) mem)) mem)
-	// cond: mem.Op == OpStaticCall 	&& isSameSym(mem.Aux, "runtime.newobject") 	&& c == config.ctxt.FixedFrameSize() + config.RegSize 	&& warnRule(config.Debug_checknil() && int(v.Line) > 1, v, "removed nil check")
-	// result: (Invalid)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpOffPtr {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpLoad {
-			break
-		}
-		v_0_0_0 := v_0_0.Args[0]
-		if v_0_0_0.Op != OpOffPtr {
-			break
-		}
-		c := v_0_0_0.AuxInt
-		v_0_0_0_0 := v_0_0_0.Args[0]
-		if v_0_0_0_0.Op != OpSP {
-			break
-		}
-		mem := v_0_0.Args[1]
-		if mem != v.Args[1] {
-			break
-		}
-		if !(mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize && warnRule(config.Debug_checknil() && int(v.Line) > 1, v, "removed nil check")) {
-			break
-		}
-		v.reset(OpInvalid)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpNot(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Not (Eq64 x y))
-	// cond:
-	// result: (Neq64 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpEq64 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpNeq64)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Eq32 x y))
-	// cond:
-	// result: (Neq32 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpEq32 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpNeq32)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Eq16 x y))
-	// cond:
-	// result: (Neq16 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpEq16 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpNeq16)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Eq8  x y))
-	// cond:
-	// result: (Neq8  x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpEq8 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpNeq8)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (EqB  x y))
-	// cond:
-	// result: (NeqB  x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpEqB {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpNeqB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Neq64 x y))
-	// cond:
-	// result: (Eq64 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpNeq64 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpEq64)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Neq32 x y))
-	// cond:
-	// result: (Eq32 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpNeq32 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpEq32)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Neq16 x y))
-	// cond:
-	// result: (Eq16 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpNeq16 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpEq16)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Neq8  x y))
-	// cond:
-	// result: (Eq8  x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpNeq8 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpEq8)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (NeqB  x y))
-	// cond:
-	// result: (EqB  x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpNeqB {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpEqB)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Greater64 x y))
-	// cond:
-	// result: (Leq64 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpGreater64 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpLeq64)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Greater32 x y))
-	// cond:
-	// result: (Leq32 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpGreater32 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpLeq32)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Greater16 x y))
-	// cond:
-	// result: (Leq16 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpGreater16 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpLeq16)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Greater8  x y))
-	// cond:
-	// result: (Leq8  x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpGreater8 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpLeq8)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Greater64U x y))
-	// cond:
-	// result: (Leq64U x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpGreater64U {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpLeq64U)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Greater32U x y))
-	// cond:
-	// result: (Leq32U x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpGreater32U {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpLeq32U)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Greater16U x y))
-	// cond:
-	// result: (Leq16U x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpGreater16U {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpLeq16U)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Greater8U  x y))
-	// cond:
-	// result: (Leq8U  x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpGreater8U {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpLeq8U)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Geq64 x y))
-	// cond:
-	// result: (Less64 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpGeq64 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpLess64)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Geq32 x y))
-	// cond:
-	// result: (Less32 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpGeq32 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpLess32)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Geq16 x y))
-	// cond:
-	// result: (Less16 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpGeq16 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpLess16)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Geq8  x y))
-	// cond:
-	// result: (Less8  x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpGeq8 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpLess8)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Geq64U x y))
-	// cond:
-	// result: (Less64U x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpGeq64U {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpLess64U)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Geq32U x y))
-	// cond:
-	// result: (Less32U x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpGeq32U {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpLess32U)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Geq16U x y))
-	// cond:
-	// result: (Less16U x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpGeq16U {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpLess16U)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Geq8U  x y))
-	// cond:
-	// result: (Less8U  x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpGeq8U {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpLess8U)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Less64 x y))
-	// cond:
-	// result: (Geq64 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpLess64 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpGeq64)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Less32 x y))
-	// cond:
-	// result: (Geq32 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpLess32 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpGeq32)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Less16 x y))
-	// cond:
-	// result: (Geq16 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpLess16 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpGeq16)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Less8  x y))
-	// cond:
-	// result: (Geq8  x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpLess8 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpGeq8)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Less64U x y))
-	// cond:
-	// result: (Geq64U x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpLess64U {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpGeq64U)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Less32U x y))
-	// cond:
-	// result: (Geq32U x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpLess32U {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpGeq32U)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Less16U x y))
-	// cond:
-	// result: (Geq16U x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpLess16U {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpGeq16U)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Less8U  x y))
-	// cond:
-	// result: (Geq8U  x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpLess8U {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpGeq8U)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Leq64 x y))
-	// cond:
-	// result: (Greater64 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpLeq64 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpGreater64)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Leq32 x y))
-	// cond:
-	// result: (Greater32 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpLeq32 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpGreater32)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Leq16 x y))
-	// cond:
-	// result: (Greater16 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpLeq16 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpGreater16)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Leq8  x y))
-	// cond:
-	// result: (Greater8 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpLeq8 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpGreater8)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Leq64U x y))
-	// cond:
-	// result: (Greater64U x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpLeq64U {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpGreater64U)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Leq32U x y))
-	// cond:
-	// result: (Greater32U x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpLeq32U {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpGreater32U)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Leq16U x y))
-	// cond:
-	// result: (Greater16U x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpLeq16U {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpGreater16U)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Not (Leq8U  x y))
-	// cond:
-	// result: (Greater8U  x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpLeq8U {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		v.reset(OpGreater8U)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpOffPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (OffPtr (OffPtr p [b]) [a])
-	// cond:
-	// result: (OffPtr p [a+b])
-	for {
-		a := v.AuxInt
-		v_0 := v.Args[0]
-		if v_0.Op != OpOffPtr {
-			break
-		}
-		b := v_0.AuxInt
-		p := v_0.Args[0]
-		v.reset(OpOffPtr)
-		v.AuxInt = a + b
-		v.AddArg(p)
-		return true
-	}
-	// match: (OffPtr p [0])
-	// cond: v.Type.Compare(p.Type) == CMPeq
-	// result: p
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		p := v.Args[0]
-		if !(v.Type.Compare(p.Type) == CMPeq) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = p.Type
-		v.AddArg(p)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpOr16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or16 x (Const16 <t> [c]))
-	// cond: x.Op != OpConst16
-	// result: (Or16 (Const16 <t> [c]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst16) {
-			break
-		}
-		v.reset(OpOr16)
-		v0 := b.NewValue0(v.Line, OpConst16, t)
-		v0.AuxInt = c
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Or16 x x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Or16 (Const16 [0]) x)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Or16 (Const16 [-1]) _)
-	// cond:
-	// result: (Const16 [-1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		if v_0.AuxInt != -1 {
-			break
-		}
-		v.reset(OpConst16)
-		v.AuxInt = -1
-		return true
-	}
-	// match: (Or16 x (Or16 x y))
-	// cond:
-	// result: (Or16 x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpOr16 {
-			break
-		}
-		if x != v_1.Args[0] {
-			break
-		}
-		y := v_1.Args[1]
-		v.reset(OpOr16)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Or16 x (Or16 y x))
-	// cond:
-	// result: (Or16 x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpOr16 {
-			break
-		}
-		y := v_1.Args[0]
-		if x != v_1.Args[1] {
-			break
-		}
-		v.reset(OpOr16)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Or16 (Or16 x y) x)
-	// cond:
-	// result: (Or16 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpOr16 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpOr16)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Or16 (Or16 x y) y)
-	// cond:
-	// result: (Or16 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpOr16 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if y != v.Args[1] {
-			break
-		}
-		v.reset(OpOr16)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpOr32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or32 x (Const32 <t> [c]))
-	// cond: x.Op != OpConst32
-	// result: (Or32 (Const32 <t> [c]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst32) {
-			break
-		}
-		v.reset(OpOr32)
-		v0 := b.NewValue0(v.Line, OpConst32, t)
-		v0.AuxInt = c
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Or32 x x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Or32 (Const32 [0]) x)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Or32 (Const32 [-1]) _)
-	// cond:
-	// result: (Const32 [-1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		if v_0.AuxInt != -1 {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = -1
-		return true
-	}
-	// match: (Or32 x (Or32 x y))
-	// cond:
-	// result: (Or32 x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpOr32 {
-			break
-		}
-		if x != v_1.Args[0] {
-			break
-		}
-		y := v_1.Args[1]
-		v.reset(OpOr32)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Or32 x (Or32 y x))
-	// cond:
-	// result: (Or32 x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpOr32 {
-			break
-		}
-		y := v_1.Args[0]
-		if x != v_1.Args[1] {
-			break
-		}
-		v.reset(OpOr32)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Or32 (Or32 x y) x)
-	// cond:
-	// result: (Or32 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpOr32 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpOr32)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Or32 (Or32 x y) y)
-	// cond:
-	// result: (Or32 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpOr32 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if y != v.Args[1] {
-			break
-		}
-		v.reset(OpOr32)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpOr64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or64 x (Const64 <t> [c]))
-	// cond: x.Op != OpConst64
-	// result: (Or64 (Const64 <t> [c]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst64) {
-			break
-		}
-		v.reset(OpOr64)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = c
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Or64 x x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Or64 (Const64 [0]) x)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Or64 (Const64 [-1]) _)
-	// cond:
-	// result: (Const64 [-1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		if v_0.AuxInt != -1 {
-			break
-		}
-		v.reset(OpConst64)
-		v.AuxInt = -1
-		return true
-	}
-	// match: (Or64 x (Or64 x y))
-	// cond:
-	// result: (Or64 x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpOr64 {
-			break
-		}
-		if x != v_1.Args[0] {
-			break
-		}
-		y := v_1.Args[1]
-		v.reset(OpOr64)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Or64 x (Or64 y x))
-	// cond:
-	// result: (Or64 x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpOr64 {
-			break
-		}
-		y := v_1.Args[0]
-		if x != v_1.Args[1] {
-			break
-		}
-		v.reset(OpOr64)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Or64 (Or64 x y) x)
-	// cond:
-	// result: (Or64 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpOr64 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpOr64)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Or64 (Or64 x y) y)
-	// cond:
-	// result: (Or64 x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpOr64 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if y != v.Args[1] {
-			break
-		}
-		v.reset(OpOr64)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpOr8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Or8  x (Const8  <t> [c]))
-	// cond: x.Op != OpConst8
-	// result: (Or8  (Const8  <t> [c]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst8) {
-			break
-		}
-		v.reset(OpOr8)
-		v0 := b.NewValue0(v.Line, OpConst8, t)
-		v0.AuxInt = c
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Or8  x x)
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Or8  (Const8  [0]) x)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Or8  (Const8  [-1]) _)
-	// cond:
-	// result: (Const8  [-1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		if v_0.AuxInt != -1 {
-			break
-		}
-		v.reset(OpConst8)
-		v.AuxInt = -1
-		return true
-	}
-	// match: (Or8  x (Or8  x y))
-	// cond:
-	// result: (Or8  x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpOr8 {
-			break
-		}
-		if x != v_1.Args[0] {
-			break
-		}
-		y := v_1.Args[1]
-		v.reset(OpOr8)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Or8  x (Or8  y x))
-	// cond:
-	// result: (Or8  x y)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpOr8 {
-			break
-		}
-		y := v_1.Args[0]
-		if x != v_1.Args[1] {
-			break
-		}
-		v.reset(OpOr8)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Or8  (Or8  x y) x)
-	// cond:
-	// result: (Or8  x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpOr8 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpOr8)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	// match: (Or8  (Or8  x y) y)
-	// cond:
-	// result: (Or8  x y)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpOr8 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if y != v.Args[1] {
-			break
-		}
-		v.reset(OpOr8)
-		v.AddArg(x)
-		v.AddArg(y)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpPhi(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Phi (Const8  [c]) (Const8  [c]))
-	// cond:
-	// result: (Const8  [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		if v_1.AuxInt != c {
-			break
-		}
-		if len(v.Args) != 2 {
-			break
-		}
-		v.reset(OpConst8)
-		v.AuxInt = c
-		return true
-	}
-	// match: (Phi (Const16 [c]) (Const16 [c]))
-	// cond:
-	// result: (Const16 [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		if v_1.AuxInt != c {
-			break
-		}
-		if len(v.Args) != 2 {
-			break
-		}
-		v.reset(OpConst16)
-		v.AuxInt = c
-		return true
-	}
-	// match: (Phi (Const32 [c]) (Const32 [c]))
-	// cond:
-	// result: (Const32 [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		if v_1.AuxInt != c {
-			break
-		}
-		if len(v.Args) != 2 {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = c
-		return true
-	}
-	// match: (Phi (Const64 [c]) (Const64 [c]))
-	// cond:
-	// result: (Const64 [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		if v_1.AuxInt != c {
-			break
-		}
-		if len(v.Args) != 2 {
-			break
-		}
-		v.reset(OpConst64)
-		v.AuxInt = c
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpPtrIndex(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (PtrIndex <t> ptr idx)
-	// cond: config.PtrSize == 4
-	// result: (AddPtr ptr (Mul32 <config.fe.TypeInt()> idx (Const32 <config.fe.TypeInt()> [t.ElemType().Size()])))
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		idx := v.Args[1]
-		if !(config.PtrSize == 4) {
-			break
-		}
-		v.reset(OpAddPtr)
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMul32, config.fe.TypeInt())
-		v0.AddArg(idx)
-		v1 := b.NewValue0(v.Line, OpConst32, config.fe.TypeInt())
-		v1.AuxInt = t.ElemType().Size()
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (PtrIndex <t> ptr idx)
-	// cond: config.PtrSize == 8
-	// result: (AddPtr ptr (Mul64 <config.fe.TypeInt()> idx (Const64 <config.fe.TypeInt()> [t.ElemType().Size()])))
-	for {
-		t := v.Type
-		ptr := v.Args[0]
-		idx := v.Args[1]
-		if !(config.PtrSize == 8) {
-			break
-		}
-		v.reset(OpAddPtr)
-		v.AddArg(ptr)
-		v0 := b.NewValue0(v.Line, OpMul64, config.fe.TypeInt())
-		v0.AddArg(idx)
-		v1 := b.NewValue0(v.Line, OpConst64, config.fe.TypeInt())
-		v1.AuxInt = t.ElemType().Size()
-		v0.AddArg(v1)
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh16Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux16 <t> x (Const16 [c]))
-	// cond:
-	// result: (Rsh16Ux64 x (Const64 <t> [int64(uint16(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpRsh16Ux64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint16(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh16Ux16 (Const16 [0]) _)
-	// cond:
-	// result: (Const16 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst16)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh16Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux32 <t> x (Const32 [c]))
-	// cond:
-	// result: (Rsh16Ux64 x (Const64 <t> [int64(uint32(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpRsh16Ux64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint32(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh16Ux32 (Const16 [0]) _)
-	// cond:
-	// result: (Const16 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst16)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh16Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux64 (Const16 [c]) (Const64 [d]))
-	// cond:
-	// result: (Const16 [int64(int16(uint16(c) >> uint64(d)))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConst16)
-		v.AuxInt = int64(int16(uint16(c) >> uint64(d)))
-		return true
-	}
-	// match: (Rsh16Ux64 x (Const64 [0]))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh16Ux64 (Const16 [0]) _)
-	// cond:
-	// result: (Const16 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst16)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Rsh16Ux64 _ (Const64 [c]))
-	// cond: uint64(c) >= 16
-	// result: (Const16 [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 16) {
-			break
-		}
-		v.reset(OpConst16)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Rsh16Ux64 <t> (Rsh16Ux64 x (Const64 [c])) (Const64 [d]))
-	// cond: !uaddOvf(c,d)
-	// result: (Rsh16Ux64 x (Const64 <t> [c+d]))
-	for {
-		t := v.Type
-		v_0 := v.Args[0]
-		if v_0.Op != OpRsh16Ux64 {
-			break
-		}
-		x := v_0.Args[0]
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpConst64 {
-			break
-		}
-		c := v_0_1.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		if !(!uaddOvf(c, d)) {
-			break
-		}
-		v.reset(OpRsh16Ux64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = c + d
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh16Ux64 (Lsh16x64 (Rsh16Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
-	// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
-	// result: (Rsh16Ux64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpLsh16x64 {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpRsh16Ux64 {
-			break
-		}
-		x := v_0_0.Args[0]
-		v_0_0_1 := v_0_0.Args[1]
-		if v_0_0_1.Op != OpConst64 {
-			break
-		}
-		c1 := v_0_0_1.AuxInt
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpConst64 {
-			break
-		}
-		c2 := v_0_1.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c3 := v_1.AuxInt
-		if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
-			break
-		}
-		v.reset(OpRsh16Ux64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v0.AuxInt = c1 - c2 + c3
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh16Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16Ux8  <t> x (Const8  [c]))
-	// cond:
-	// result: (Rsh16Ux64 x (Const64 <t> [int64(uint8(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpRsh16Ux64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint8(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh16Ux8 (Const16 [0]) _)
-	// cond:
-	// result: (Const16 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst16)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh16x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x16  <t> x (Const16 [c]))
-	// cond:
-	// result: (Rsh16x64  x (Const64 <t> [int64(uint16(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpRsh16x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint16(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh16x16  (Const16 [0]) _)
-	// cond:
-	// result: (Const16 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst16)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh16x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x32  <t> x (Const32 [c]))
-	// cond:
-	// result: (Rsh16x64  x (Const64 <t> [int64(uint32(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpRsh16x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint32(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh16x32  (Const16 [0]) _)
-	// cond:
-	// result: (Const16 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst16)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh16x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x64  (Const16 [c]) (Const64 [d]))
-	// cond:
-	// result: (Const16 [int64(int16(c) >> uint64(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConst16)
-		v.AuxInt = int64(int16(c) >> uint64(d))
-		return true
-	}
-	// match: (Rsh16x64  x (Const64 [0]))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh16x64  (Const16 [0]) _)
-	// cond:
-	// result: (Const16 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst16)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Rsh16x64 <t> (Rsh16x64 x (Const64 [c])) (Const64 [d]))
-	// cond: !uaddOvf(c,d)
-	// result: (Rsh16x64 x (Const64 <t> [c+d]))
-	for {
-		t := v.Type
-		v_0 := v.Args[0]
-		if v_0.Op != OpRsh16x64 {
-			break
-		}
-		x := v_0.Args[0]
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpConst64 {
-			break
-		}
-		c := v_0_1.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		if !(!uaddOvf(c, d)) {
-			break
-		}
-		v.reset(OpRsh16x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = c + d
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh16x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh16x8   <t> x (Const8  [c]))
-	// cond:
-	// result: (Rsh16x64  x (Const64 <t> [int64(uint8(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpRsh16x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint8(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh16x8  (Const16 [0]) _)
-	// cond:
-	// result: (Const16 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst16)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh32Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux16 <t> x (Const16 [c]))
-	// cond:
-	// result: (Rsh32Ux64 x (Const64 <t> [int64(uint16(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpRsh32Ux64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint16(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh32Ux16 (Const32 [0]) _)
-	// cond:
-	// result: (Const32 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh32Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux32 <t> x (Const32 [c]))
-	// cond:
-	// result: (Rsh32Ux64 x (Const64 <t> [int64(uint32(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpRsh32Ux64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint32(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh32Ux32 (Const32 [0]) _)
-	// cond:
-	// result: (Const32 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh32Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux64 (Const32 [c]) (Const64 [d]))
-	// cond:
-	// result: (Const32 [int64(int32(uint32(c) >> uint64(d)))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConst32)
-		v.AuxInt = int64(int32(uint32(c) >> uint64(d)))
-		return true
-	}
-	// match: (Rsh32Ux64 x (Const64 [0]))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh32Ux64 (Const32 [0]) _)
-	// cond:
-	// result: (Const32 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Rsh32Ux64 _ (Const64 [c]))
-	// cond: uint64(c) >= 32
-	// result: (Const32 [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 32) {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Rsh32Ux64 <t> (Rsh32Ux64 x (Const64 [c])) (Const64 [d]))
-	// cond: !uaddOvf(c,d)
-	// result: (Rsh32Ux64 x (Const64 <t> [c+d]))
-	for {
-		t := v.Type
-		v_0 := v.Args[0]
-		if v_0.Op != OpRsh32Ux64 {
-			break
-		}
-		x := v_0.Args[0]
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpConst64 {
-			break
-		}
-		c := v_0_1.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		if !(!uaddOvf(c, d)) {
-			break
-		}
-		v.reset(OpRsh32Ux64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = c + d
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh32Ux64 (Lsh32x64 (Rsh32Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
-	// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
-	// result: (Rsh32Ux64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpLsh32x64 {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpRsh32Ux64 {
-			break
-		}
-		x := v_0_0.Args[0]
-		v_0_0_1 := v_0_0.Args[1]
-		if v_0_0_1.Op != OpConst64 {
-			break
-		}
-		c1 := v_0_0_1.AuxInt
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpConst64 {
-			break
-		}
-		c2 := v_0_1.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c3 := v_1.AuxInt
-		if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
-			break
-		}
-		v.reset(OpRsh32Ux64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v0.AuxInt = c1 - c2 + c3
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh32Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32Ux8  <t> x (Const8  [c]))
-	// cond:
-	// result: (Rsh32Ux64 x (Const64 <t> [int64(uint8(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpRsh32Ux64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint8(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh32Ux8 (Const32 [0]) _)
-	// cond:
-	// result: (Const32 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh32x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x16  <t> x (Const16 [c]))
-	// cond:
-	// result: (Rsh32x64  x (Const64 <t> [int64(uint16(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpRsh32x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint16(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh32x16  (Const32 [0]) _)
-	// cond:
-	// result: (Const32 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh32x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x32  <t> x (Const32 [c]))
-	// cond:
-	// result: (Rsh32x64  x (Const64 <t> [int64(uint32(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpRsh32x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint32(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh32x32  (Const32 [0]) _)
-	// cond:
-	// result: (Const32 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh32x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x64  (Const32 [c]) (Const64 [d]))
-	// cond:
-	// result: (Const32 [int64(int32(c) >> uint64(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConst32)
-		v.AuxInt = int64(int32(c) >> uint64(d))
-		return true
-	}
-	// match: (Rsh32x64  x (Const64 [0]))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh32x64  (Const32 [0]) _)
-	// cond:
-	// result: (Const32 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Rsh32x64 <t> (Rsh32x64 x (Const64 [c])) (Const64 [d]))
-	// cond: !uaddOvf(c,d)
-	// result: (Rsh32x64 x (Const64 <t> [c+d]))
-	for {
-		t := v.Type
-		v_0 := v.Args[0]
-		if v_0.Op != OpRsh32x64 {
-			break
-		}
-		x := v_0.Args[0]
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpConst64 {
-			break
-		}
-		c := v_0_1.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		if !(!uaddOvf(c, d)) {
-			break
-		}
-		v.reset(OpRsh32x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = c + d
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh32x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh32x8   <t> x (Const8  [c]))
-	// cond:
-	// result: (Rsh32x64  x (Const64 <t> [int64(uint8(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpRsh32x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint8(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh32x8  (Const32 [0]) _)
-	// cond:
-	// result: (Const32 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh64Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64Ux16 <t> x (Const16 [c]))
-	// cond:
-	// result: (Rsh64Ux64 x (Const64 <t> [int64(uint16(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpRsh64Ux64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint16(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh64Ux16 (Const64 [0]) _)
-	// cond:
-	// result: (Const64 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst64)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh64Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64Ux32 <t> x (Const32 [c]))
-	// cond:
-	// result: (Rsh64Ux64 x (Const64 <t> [int64(uint32(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpRsh64Ux64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint32(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh64Ux32 (Const64 [0]) _)
-	// cond:
-	// result: (Const64 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst64)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh64Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64Ux64 (Const64 [c]) (Const64 [d]))
-	// cond:
-	// result: (Const64 [int64(uint64(c) >> uint64(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConst64)
-		v.AuxInt = int64(uint64(c) >> uint64(d))
-		return true
-	}
-	// match: (Rsh64Ux64 x (Const64 [0]))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh64Ux64 (Const64 [0]) _)
-	// cond:
-	// result: (Const64 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst64)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Rsh64Ux64 _ (Const64 [c]))
-	// cond: uint64(c) >= 64
-	// result: (Const64 [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 64) {
-			break
-		}
-		v.reset(OpConst64)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Rsh64Ux64 <t> (Rsh64Ux64 x (Const64 [c])) (Const64 [d]))
-	// cond: !uaddOvf(c,d)
-	// result: (Rsh64Ux64 x (Const64 <t> [c+d]))
-	for {
-		t := v.Type
-		v_0 := v.Args[0]
-		if v_0.Op != OpRsh64Ux64 {
-			break
-		}
-		x := v_0.Args[0]
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpConst64 {
-			break
-		}
-		c := v_0_1.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		if !(!uaddOvf(c, d)) {
-			break
-		}
-		v.reset(OpRsh64Ux64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = c + d
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh64Ux64 (Lsh64x64 (Rsh64Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
-	// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
-	// result: (Rsh64Ux64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpLsh64x64 {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpRsh64Ux64 {
-			break
-		}
-		x := v_0_0.Args[0]
-		v_0_0_1 := v_0_0.Args[1]
-		if v_0_0_1.Op != OpConst64 {
-			break
-		}
-		c1 := v_0_0_1.AuxInt
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpConst64 {
-			break
-		}
-		c2 := v_0_1.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c3 := v_1.AuxInt
-		if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
-			break
-		}
-		v.reset(OpRsh64Ux64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v0.AuxInt = c1 - c2 + c3
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh64Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64Ux8  <t> x (Const8  [c]))
-	// cond:
-	// result: (Rsh64Ux64 x (Const64 <t> [int64(uint8(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpRsh64Ux64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint8(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh64Ux8 (Const64 [0]) _)
-	// cond:
-	// result: (Const64 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst64)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh64x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64x16  <t> x (Const16 [c]))
-	// cond:
-	// result: (Rsh64x64  x (Const64 <t> [int64(uint16(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpRsh64x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint16(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh64x16  (Const64 [0]) _)
-	// cond:
-	// result: (Const64 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst64)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh64x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64x32  <t> x (Const32 [c]))
-	// cond:
-	// result: (Rsh64x64  x (Const64 <t> [int64(uint32(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpRsh64x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint32(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh64x32  (Const64 [0]) _)
-	// cond:
-	// result: (Const64 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst64)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh64x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64x64  (Const64 [c]) (Const64 [d]))
-	// cond:
-	// result: (Const64 [c >> uint64(d)])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConst64)
-		v.AuxInt = c >> uint64(d)
-		return true
-	}
-	// match: (Rsh64x64  x (Const64 [0]))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh64x64  (Const64 [0]) _)
-	// cond:
-	// result: (Const64 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst64)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Rsh64x64 <t> (Rsh64x64 x (Const64 [c])) (Const64 [d]))
-	// cond: !uaddOvf(c,d)
-	// result: (Rsh64x64 x (Const64 <t> [c+d]))
-	for {
-		t := v.Type
-		v_0 := v.Args[0]
-		if v_0.Op != OpRsh64x64 {
-			break
-		}
-		x := v_0.Args[0]
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpConst64 {
-			break
-		}
-		c := v_0_1.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		if !(!uaddOvf(c, d)) {
-			break
-		}
-		v.reset(OpRsh64x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = c + d
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh64x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh64x8   <t> x (Const8  [c]))
-	// cond:
-	// result: (Rsh64x64  x (Const64 <t> [int64(uint8(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpRsh64x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint8(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh64x8  (Const64 [0]) _)
-	// cond:
-	// result: (Const64 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst64)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh8Ux16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux16 <t> x (Const16 [c]))
-	// cond:
-	// result: (Rsh8Ux64 x (Const64 <t> [int64(uint16(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpRsh8Ux64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint16(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh8Ux16  (Const8 [0]) _)
-	// cond:
-	// result: (Const8  [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst8)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh8Ux32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux32 <t> x (Const32 [c]))
-	// cond:
-	// result: (Rsh8Ux64 x (Const64 <t> [int64(uint32(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpRsh8Ux64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint32(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh8Ux32  (Const8 [0]) _)
-	// cond:
-	// result: (Const8  [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst8)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh8Ux64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux64  (Const8  [c]) (Const64 [d]))
-	// cond:
-	// result: (Const8  [int64(int8(uint8(c) >> uint64(d)))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConst8)
-		v.AuxInt = int64(int8(uint8(c) >> uint64(d)))
-		return true
-	}
-	// match: (Rsh8Ux64  x (Const64 [0]))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh8Ux64  (Const8 [0]) _)
-	// cond:
-	// result: (Const8  [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst8)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Rsh8Ux64  _ (Const64 [c]))
-	// cond: uint64(c) >= 8
-	// result: (Const8  [0])
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c := v_1.AuxInt
-		if !(uint64(c) >= 8) {
-			break
-		}
-		v.reset(OpConst8)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Rsh8Ux64  <t> (Rsh8Ux64  x (Const64 [c])) (Const64 [d]))
-	// cond: !uaddOvf(c,d)
-	// result: (Rsh8Ux64  x (Const64 <t> [c+d]))
-	for {
-		t := v.Type
-		v_0 := v.Args[0]
-		if v_0.Op != OpRsh8Ux64 {
-			break
-		}
-		x := v_0.Args[0]
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpConst64 {
-			break
-		}
-		c := v_0_1.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		if !(!uaddOvf(c, d)) {
-			break
-		}
-		v.reset(OpRsh8Ux64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = c + d
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh8Ux64 (Lsh8x64 (Rsh8Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
-	// cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
-	// result: (Rsh8Ux64 x (Const64 <config.fe.TypeUInt64()> [c1-c2+c3]))
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpLsh8x64 {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpRsh8Ux64 {
-			break
-		}
-		x := v_0_0.Args[0]
-		v_0_0_1 := v_0_0.Args[1]
-		if v_0_0_1.Op != OpConst64 {
-			break
-		}
-		c1 := v_0_0_1.AuxInt
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpConst64 {
-			break
-		}
-		c2 := v_0_1.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		c3 := v_1.AuxInt
-		if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
-			break
-		}
-		v.reset(OpRsh8Ux64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, config.fe.TypeUInt64())
-		v0.AuxInt = c1 - c2 + c3
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh8Ux8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8Ux8  <t> x (Const8  [c]))
-	// cond:
-	// result: (Rsh8Ux64 x (Const64 <t> [int64(uint8(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpRsh8Ux64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint8(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh8Ux8  (Const8 [0]) _)
-	// cond:
-	// result: (Const8  [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst8)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh8x16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x16  <t> x (Const16 [c]))
-	// cond:
-	// result: (Rsh8x64  x (Const64 <t> [int64(uint16(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpRsh8x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint16(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh8x16   (Const8 [0]) _)
-	// cond:
-	// result: (Const8  [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst8)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh8x32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x32  <t> x (Const32 [c]))
-	// cond:
-	// result: (Rsh8x64  x (Const64 <t> [int64(uint32(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpRsh8x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint32(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh8x32   (Const8 [0]) _)
-	// cond:
-	// result: (Const8  [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst8)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh8x64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x64   (Const8  [c]) (Const64 [d]))
-	// cond:
-	// result: (Const8  [int64(int8(c) >> uint64(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConst8)
-		v.AuxInt = int64(int8(c) >> uint64(d))
-		return true
-	}
-	// match: (Rsh8x64   x (Const64 [0]))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Rsh8x64   (Const8 [0]) _)
-	// cond:
-	// result: (Const8  [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst8)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Rsh8x64  <t> (Rsh8x64  x (Const64 [c])) (Const64 [d]))
-	// cond: !uaddOvf(c,d)
-	// result: (Rsh8x64  x (Const64 <t> [c+d]))
-	for {
-		t := v.Type
-		v_0 := v.Args[0]
-		if v_0.Op != OpRsh8x64 {
-			break
-		}
-		x := v_0.Args[0]
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpConst64 {
-			break
-		}
-		c := v_0_1.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		if !(!uaddOvf(c, d)) {
-			break
-		}
-		v.reset(OpRsh8x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = c + d
-		v.AddArg(v0)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpRsh8x8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Rsh8x8   <t> x (Const8  [c]))
-	// cond:
-	// result: (Rsh8x64  x (Const64 <t> [int64(uint8(c))]))
-	for {
-		t := v.Type
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		c := v_1.AuxInt
-		v.reset(OpRsh8x64)
-		v.AddArg(x)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = int64(uint8(c))
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Rsh8x8   (Const8 [0]) _)
-	// cond:
-	// result: (Const8  [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst8)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpSignExt16to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt16to32 (Trunc32to16 x:(Rsh32x64 _ (Const64 [s]))))
-	// cond: s >= 16
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpTrunc32to16 {
-			break
-		}
-		x := v_0.Args[0]
-		if x.Op != OpRsh32x64 {
-			break
-		}
-		x_1 := x.Args[1]
-		if x_1.Op != OpConst64 {
-			break
-		}
-		s := x_1.AuxInt
-		if !(s >= 16) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpSignExt16to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt16to64 (Trunc64to16 x:(Rsh64x64 _ (Const64 [s]))))
-	// cond: s >= 48
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpTrunc64to16 {
-			break
-		}
-		x := v_0.Args[0]
-		if x.Op != OpRsh64x64 {
-			break
-		}
-		x_1 := x.Args[1]
-		if x_1.Op != OpConst64 {
-			break
-		}
-		s := x_1.AuxInt
-		if !(s >= 48) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpSignExt32to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt32to64 (Trunc64to32 x:(Rsh64x64 _ (Const64 [s]))))
-	// cond: s >= 32
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpTrunc64to32 {
-			break
-		}
-		x := v_0.Args[0]
-		if x.Op != OpRsh64x64 {
-			break
-		}
-		x_1 := x.Args[1]
-		if x_1.Op != OpConst64 {
-			break
-		}
-		s := x_1.AuxInt
-		if !(s >= 32) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpSignExt8to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt8to16  (Trunc16to8  x:(Rsh16x64 _ (Const64 [s]))))
-	// cond: s >= 8
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpTrunc16to8 {
-			break
-		}
-		x := v_0.Args[0]
-		if x.Op != OpRsh16x64 {
-			break
-		}
-		x_1 := x.Args[1]
-		if x_1.Op != OpConst64 {
-			break
-		}
-		s := x_1.AuxInt
-		if !(s >= 8) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpSignExt8to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt8to32  (Trunc32to8  x:(Rsh32x64 _ (Const64 [s]))))
-	// cond: s >= 24
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpTrunc32to8 {
-			break
-		}
-		x := v_0.Args[0]
-		if x.Op != OpRsh32x64 {
-			break
-		}
-		x_1 := x.Args[1]
-		if x_1.Op != OpConst64 {
-			break
-		}
-		s := x_1.AuxInt
-		if !(s >= 24) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpSignExt8to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SignExt8to64  (Trunc64to8  x:(Rsh64x64 _ (Const64 [s]))))
-	// cond: s >= 56
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpTrunc64to8 {
-			break
-		}
-		x := v_0.Args[0]
-		if x.Op != OpRsh64x64 {
-			break
-		}
-		x_1 := x.Args[1]
-		if x_1.Op != OpConst64 {
-			break
-		}
-		s := x_1.AuxInt
-		if !(s >= 56) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpSliceCap(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SliceCap (SliceMake _ _ (Const64 <t> [c])))
-	// cond:
-	// result: (Const64 <t> [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpSliceMake {
-			break
-		}
-		v_0_2 := v_0.Args[2]
-		if v_0_2.Op != OpConst64 {
-			break
-		}
-		t := v_0_2.Type
-		c := v_0_2.AuxInt
-		v.reset(OpConst64)
-		v.Type = t
-		v.AuxInt = c
-		return true
-	}
-	// match: (SliceCap (SliceMake _ _ (Const32 <t> [c])))
-	// cond:
-	// result: (Const32 <t> [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpSliceMake {
-			break
-		}
-		v_0_2 := v_0.Args[2]
-		if v_0_2.Op != OpConst32 {
-			break
-		}
-		t := v_0_2.Type
-		c := v_0_2.AuxInt
-		v.reset(OpConst32)
-		v.Type = t
-		v.AuxInt = c
-		return true
-	}
-	// match: (SliceCap (SliceMake _ _ (SliceCap x)))
-	// cond:
-	// result: (SliceCap x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpSliceMake {
-			break
-		}
-		v_0_2 := v_0.Args[2]
-		if v_0_2.Op != OpSliceCap {
-			break
-		}
-		x := v_0_2.Args[0]
-		v.reset(OpSliceCap)
-		v.AddArg(x)
-		return true
-	}
-	// match: (SliceCap (SliceMake _ _ (SliceLen x)))
-	// cond:
-	// result: (SliceLen x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpSliceMake {
-			break
-		}
-		v_0_2 := v_0.Args[2]
-		if v_0_2.Op != OpSliceLen {
-			break
-		}
-		x := v_0_2.Args[0]
-		v.reset(OpSliceLen)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpSliceLen(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SliceLen (SliceMake _ (Const64 <t> [c]) _))
-	// cond:
-	// result: (Const64 <t> [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpSliceMake {
-			break
-		}
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpConst64 {
-			break
-		}
-		t := v_0_1.Type
-		c := v_0_1.AuxInt
-		v.reset(OpConst64)
-		v.Type = t
-		v.AuxInt = c
-		return true
-	}
-	// match: (SliceLen (SliceMake _ (Const32 <t> [c]) _))
-	// cond:
-	// result: (Const32 <t> [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpSliceMake {
-			break
-		}
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpConst32 {
-			break
-		}
-		t := v_0_1.Type
-		c := v_0_1.AuxInt
-		v.reset(OpConst32)
-		v.Type = t
-		v.AuxInt = c
-		return true
-	}
-	// match: (SliceLen (SliceMake _ (SliceLen x) _))
-	// cond:
-	// result: (SliceLen x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpSliceMake {
-			break
-		}
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpSliceLen {
-			break
-		}
-		x := v_0_1.Args[0]
-		v.reset(OpSliceLen)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpSlicePtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (SlicePtr (SliceMake (SlicePtr x) _ _))
-	// cond:
-	// result: (SlicePtr x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpSliceMake {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpSlicePtr {
-			break
-		}
-		x := v_0_0.Args[0]
-		v.reset(OpSlicePtr)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpSlicemask(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Slicemask (Const32 [x]))
-	// cond: x > 0
-	// result: (Const32 [-1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		x := v_0.AuxInt
-		if !(x > 0) {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = -1
-		return true
-	}
-	// match: (Slicemask (Const32 [0]))
-	// cond:
-	// result: (Const32 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Slicemask (Const64 [x]))
-	// cond: x > 0
-	// result: (Const64 [-1])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		x := v_0.AuxInt
-		if !(x > 0) {
-			break
-		}
-		v.reset(OpConst64)
-		v.AuxInt = -1
-		return true
-	}
-	// match: (Slicemask (Const64 [0]))
-	// cond:
-	// result: (Const64 [0])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		v.reset(OpConst64)
-		v.AuxInt = 0
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpSqrt(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sqrt (Const64F [c]))
-	// cond:
-	// result: (Const64F [f2i(math.Sqrt(i2f(c)))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64F {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpConst64F)
-		v.AuxInt = f2i(math.Sqrt(i2f(c)))
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpStore(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Store _ (StructMake0) mem)
-	// cond:
-	// result: mem
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpStructMake0 {
-			break
-		}
-		mem := v.Args[2]
-		v.reset(OpCopy)
-		v.Type = mem.Type
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store dst (StructMake1 <t> f0) mem)
-	// cond:
-	// result: (Store [t.FieldType(0).Size()] dst f0 mem)
-	for {
-		dst := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpStructMake1 {
-			break
-		}
-		t := v_1.Type
-		f0 := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpStore)
-		v.AuxInt = t.FieldType(0).Size()
-		v.AddArg(dst)
-		v.AddArg(f0)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store dst (StructMake2 <t> f0 f1) mem)
-	// cond:
-	// result: (Store [t.FieldType(1).Size()]     (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst)     f1     (Store [t.FieldType(0).Size()] dst f0 mem))
-	for {
-		dst := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpStructMake2 {
-			break
-		}
-		t := v_1.Type
-		f0 := v_1.Args[0]
-		f1 := v_1.Args[1]
-		mem := v.Args[2]
-		v.reset(OpStore)
-		v.AuxInt = t.FieldType(1).Size()
-		v0 := b.NewValue0(v.Line, OpOffPtr, t.FieldType(1).PtrTo())
-		v0.AuxInt = t.FieldOff(1)
-		v0.AddArg(dst)
-		v.AddArg(v0)
-		v.AddArg(f1)
-		v1 := b.NewValue0(v.Line, OpStore, TypeMem)
-		v1.AuxInt = t.FieldType(0).Size()
-		v1.AddArg(dst)
-		v1.AddArg(f0)
-		v1.AddArg(mem)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Store dst (StructMake3 <t> f0 f1 f2) mem)
-	// cond:
-	// result: (Store [t.FieldType(2).Size()]     (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] dst)     f2     (Store [t.FieldType(1).Size()]       (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst)       f1       (Store [t.FieldType(0).Size()] dst f0 mem)))
-	for {
-		dst := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpStructMake3 {
-			break
-		}
-		t := v_1.Type
-		f0 := v_1.Args[0]
-		f1 := v_1.Args[1]
-		f2 := v_1.Args[2]
-		mem := v.Args[2]
-		v.reset(OpStore)
-		v.AuxInt = t.FieldType(2).Size()
-		v0 := b.NewValue0(v.Line, OpOffPtr, t.FieldType(2).PtrTo())
-		v0.AuxInt = t.FieldOff(2)
-		v0.AddArg(dst)
-		v.AddArg(v0)
-		v.AddArg(f2)
-		v1 := b.NewValue0(v.Line, OpStore, TypeMem)
-		v1.AuxInt = t.FieldType(1).Size()
-		v2 := b.NewValue0(v.Line, OpOffPtr, t.FieldType(1).PtrTo())
-		v2.AuxInt = t.FieldOff(1)
-		v2.AddArg(dst)
-		v1.AddArg(v2)
-		v1.AddArg(f1)
-		v3 := b.NewValue0(v.Line, OpStore, TypeMem)
-		v3.AuxInt = t.FieldType(0).Size()
-		v3.AddArg(dst)
-		v3.AddArg(f0)
-		v3.AddArg(mem)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Store dst (StructMake4 <t> f0 f1 f2 f3) mem)
-	// cond:
-	// result: (Store [t.FieldType(3).Size()]     (OffPtr <t.FieldType(3).PtrTo()> [t.FieldOff(3)] dst)     f3     (Store [t.FieldType(2).Size()]       (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] dst)       f2       (Store [t.FieldType(1).Size()]         (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst)         f1         (Store [t.FieldType(0).Size()] dst f0 mem))))
-	for {
-		dst := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpStructMake4 {
-			break
-		}
-		t := v_1.Type
-		f0 := v_1.Args[0]
-		f1 := v_1.Args[1]
-		f2 := v_1.Args[2]
-		f3 := v_1.Args[3]
-		mem := v.Args[2]
-		v.reset(OpStore)
-		v.AuxInt = t.FieldType(3).Size()
-		v0 := b.NewValue0(v.Line, OpOffPtr, t.FieldType(3).PtrTo())
-		v0.AuxInt = t.FieldOff(3)
-		v0.AddArg(dst)
-		v.AddArg(v0)
-		v.AddArg(f3)
-		v1 := b.NewValue0(v.Line, OpStore, TypeMem)
-		v1.AuxInt = t.FieldType(2).Size()
-		v2 := b.NewValue0(v.Line, OpOffPtr, t.FieldType(2).PtrTo())
-		v2.AuxInt = t.FieldOff(2)
-		v2.AddArg(dst)
-		v1.AddArg(v2)
-		v1.AddArg(f2)
-		v3 := b.NewValue0(v.Line, OpStore, TypeMem)
-		v3.AuxInt = t.FieldType(1).Size()
-		v4 := b.NewValue0(v.Line, OpOffPtr, t.FieldType(1).PtrTo())
-		v4.AuxInt = t.FieldOff(1)
-		v4.AddArg(dst)
-		v3.AddArg(v4)
-		v3.AddArg(f1)
-		v5 := b.NewValue0(v.Line, OpStore, TypeMem)
-		v5.AuxInt = t.FieldType(0).Size()
-		v5.AddArg(dst)
-		v5.AddArg(f0)
-		v5.AddArg(mem)
-		v3.AddArg(v5)
-		v1.AddArg(v3)
-		v.AddArg(v1)
-		return true
-	}
-	// match: (Store [size] dst (Load <t> src mem) mem)
-	// cond: !config.fe.CanSSA(t)
-	// result: (Move [MakeSizeAndAlign(size, t.Alignment()).Int64()] dst src mem)
-	for {
-		size := v.AuxInt
-		dst := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpLoad {
-			break
-		}
-		t := v_1.Type
-		src := v_1.Args[0]
-		mem := v_1.Args[1]
-		if mem != v.Args[2] {
-			break
-		}
-		if !(!config.fe.CanSSA(t)) {
-			break
-		}
-		v.reset(OpMove)
-		v.AuxInt = MakeSizeAndAlign(size, t.Alignment()).Int64()
-		v.AddArg(dst)
-		v.AddArg(src)
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [size] dst (Load <t> src mem) (VarDef {x} mem))
-	// cond: !config.fe.CanSSA(t)
-	// result: (Move [MakeSizeAndAlign(size, t.Alignment()).Int64()] dst src (VarDef {x} mem))
-	for {
-		size := v.AuxInt
-		dst := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpLoad {
-			break
-		}
-		t := v_1.Type
-		src := v_1.Args[0]
-		mem := v_1.Args[1]
-		v_2 := v.Args[2]
-		if v_2.Op != OpVarDef {
-			break
-		}
-		x := v_2.Aux
-		if mem != v_2.Args[0] {
-			break
-		}
-		if !(!config.fe.CanSSA(t)) {
-			break
-		}
-		v.reset(OpMove)
-		v.AuxInt = MakeSizeAndAlign(size, t.Alignment()).Int64()
-		v.AddArg(dst)
-		v.AddArg(src)
-		v0 := b.NewValue0(v.Line, OpVarDef, TypeMem)
-		v0.Aux = x
-		v0.AddArg(mem)
-		v.AddArg(v0)
-		return true
-	}
-	// match: (Store _ (ArrayMake0) mem)
-	// cond:
-	// result: mem
-	for {
-		v_1 := v.Args[1]
-		if v_1.Op != OpArrayMake0 {
-			break
-		}
-		mem := v.Args[2]
-		v.reset(OpCopy)
-		v.Type = mem.Type
-		v.AddArg(mem)
-		return true
-	}
-	// match: (Store [size] dst (ArrayMake1 e) mem)
-	// cond:
-	// result: (Store [size] dst e mem)
-	for {
-		size := v.AuxInt
-		dst := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpArrayMake1 {
-			break
-		}
-		e := v_1.Args[0]
-		mem := v.Args[2]
-		v.reset(OpStore)
-		v.AuxInt = size
-		v.AddArg(dst)
-		v.AddArg(e)
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpStringLen(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (StringLen (StringMake _ (Const64 <t> [c])))
-	// cond:
-	// result: (Const64 <t> [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpStringMake {
-			break
-		}
-		v_0_1 := v_0.Args[1]
-		if v_0_1.Op != OpConst64 {
-			break
-		}
-		t := v_0_1.Type
-		c := v_0_1.AuxInt
-		v.reset(OpConst64)
-		v.Type = t
-		v.AuxInt = c
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpStringPtr(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (StringPtr (StringMake (Const64 <t> [c]) _))
-	// cond:
-	// result: (Const64 <t> [c])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpStringMake {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpConst64 {
-			break
-		}
-		t := v_0_0.Type
-		c := v_0_0.AuxInt
-		v.reset(OpConst64)
-		v.Type = t
-		v.AuxInt = c
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpStructSelect(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (StructSelect (StructMake1 x))
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpStructMake1 {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (StructSelect [0] (StructMake2 x _))
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		v_0 := v.Args[0]
-		if v_0.Op != OpStructMake2 {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (StructSelect [1] (StructMake2 _ x))
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 1 {
-			break
-		}
-		v_0 := v.Args[0]
-		if v_0.Op != OpStructMake2 {
-			break
-		}
-		x := v_0.Args[1]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (StructSelect [0] (StructMake3 x _ _))
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		v_0 := v.Args[0]
-		if v_0.Op != OpStructMake3 {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (StructSelect [1] (StructMake3 _ x _))
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 1 {
-			break
-		}
-		v_0 := v.Args[0]
-		if v_0.Op != OpStructMake3 {
-			break
-		}
-		x := v_0.Args[1]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (StructSelect [2] (StructMake3 _ _ x))
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 2 {
-			break
-		}
-		v_0 := v.Args[0]
-		if v_0.Op != OpStructMake3 {
-			break
-		}
-		x := v_0.Args[2]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (StructSelect [0] (StructMake4 x _ _ _))
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		v_0 := v.Args[0]
-		if v_0.Op != OpStructMake4 {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (StructSelect [1] (StructMake4 _ x _ _))
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 1 {
-			break
-		}
-		v_0 := v.Args[0]
-		if v_0.Op != OpStructMake4 {
-			break
-		}
-		x := v_0.Args[1]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (StructSelect [2] (StructMake4 _ _ x _))
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 2 {
-			break
-		}
-		v_0 := v.Args[0]
-		if v_0.Op != OpStructMake4 {
-			break
-		}
-		x := v_0.Args[2]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (StructSelect [3] (StructMake4 _ _ _ x))
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 3 {
-			break
-		}
-		v_0 := v.Args[0]
-		if v_0.Op != OpStructMake4 {
-			break
-		}
-		x := v_0.Args[3]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (StructSelect [i] x:(Load <t> ptr mem))
-	// cond: !config.fe.CanSSA(t)
-	// result: @x.Block (Load <v.Type> (OffPtr <v.Type.PtrTo()> [t.FieldOff(int(i))] ptr) mem)
-	for {
-		i := v.AuxInt
-		x := v.Args[0]
-		if x.Op != OpLoad {
-			break
-		}
-		t := x.Type
-		ptr := x.Args[0]
-		mem := x.Args[1]
-		if !(!config.fe.CanSSA(t)) {
-			break
-		}
-		b = x.Block
-		v0 := b.NewValue0(v.Line, OpLoad, v.Type)
-		v.reset(OpCopy)
-		v.AddArg(v0)
-		v1 := b.NewValue0(v.Line, OpOffPtr, v.Type.PtrTo())
-		v1.AuxInt = t.FieldOff(int(i))
-		v1.AddArg(ptr)
-		v0.AddArg(v1)
-		v0.AddArg(mem)
-		return true
-	}
-	// match: (StructSelect [0] x:(IData _))
-	// cond:
-	// result: x
-	for {
-		if v.AuxInt != 0 {
-			break
-		}
-		x := v.Args[0]
-		if x.Op != OpIData {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpSub16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub16  (Const16 [c]) (Const16 [d]))
-	// cond:
-	// result: (Const16 [int64(int16(c-d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConst16)
-		v.AuxInt = int64(int16(c - d))
-		return true
-	}
-	// match: (Sub16 x (Const16 <t> [c]))
-	// cond: x.Op != OpConst16
-	// result: (Add16 (Const16 <t> [int64(int16(-c))]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst16) {
-			break
-		}
-		v.reset(OpAdd16)
-		v0 := b.NewValue0(v.Line, OpConst16, t)
-		v0.AuxInt = int64(int16(-c))
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Sub16 x x)
-	// cond:
-	// result: (Const16 [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpConst16)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Sub16 (Add16 x y) x)
-	// cond:
-	// result: y
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAdd16 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (Sub16 (Add16 x y) y)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAdd16 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if y != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpSub32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub32  (Const32 [c]) (Const32 [d]))
-	// cond:
-	// result: (Const32 [int64(int32(c-d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConst32)
-		v.AuxInt = int64(int32(c - d))
-		return true
-	}
-	// match: (Sub32 x (Const32 <t> [c]))
-	// cond: x.Op != OpConst32
-	// result: (Add32 (Const32 <t> [int64(int32(-c))]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst32) {
-			break
-		}
-		v.reset(OpAdd32)
-		v0 := b.NewValue0(v.Line, OpConst32, t)
-		v0.AuxInt = int64(int32(-c))
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Sub32 x x)
-	// cond:
-	// result: (Const32 [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Sub32 (Add32 x y) x)
-	// cond:
-	// result: y
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAdd32 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (Sub32 (Add32 x y) y)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAdd32 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if y != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpSub32F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub32F (Const32F [c]) (Const32F [d]))
-	// cond:
-	// result: (Const32F [f2i(float64(i2f32(c) - i2f32(d)))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32F {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32F {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConst32F)
-		v.AuxInt = f2i(float64(i2f32(c) - i2f32(d)))
-		return true
-	}
-	// match: (Sub32F x (Const32F [0]))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32F {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpSub64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub64  (Const64 [c]) (Const64 [d]))
-	// cond:
-	// result: (Const64 [c-d])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConst64)
-		v.AuxInt = c - d
-		return true
-	}
-	// match: (Sub64 x (Const64 <t> [c]))
-	// cond: x.Op != OpConst64
-	// result: (Add64 (Const64 <t> [-c]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst64) {
-			break
-		}
-		v.reset(OpAdd64)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = -c
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Sub64 x x)
-	// cond:
-	// result: (Const64 [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpConst64)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Sub64 (Add64 x y) x)
-	// cond:
-	// result: y
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAdd64 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (Sub64 (Add64 x y) y)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAdd64 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if y != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpSub64F(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub64F (Const64F [c]) (Const64F [d]))
-	// cond:
-	// result: (Const64F [f2i(i2f(c) - i2f(d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64F {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64F {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConst64F)
-		v.AuxInt = f2i(i2f(c) - i2f(d))
-		return true
-	}
-	// match: (Sub64F x (Const64F [0]))
-	// cond:
-	// result: x
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64F {
-			break
-		}
-		if v_1.AuxInt != 0 {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpSub8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Sub8   (Const8 [c]) (Const8 [d]))
-	// cond:
-	// result: (Const8 [int64(int8(c-d))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		c := v_0.AuxInt
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		d := v_1.AuxInt
-		v.reset(OpConst8)
-		v.AuxInt = int64(int8(c - d))
-		return true
-	}
-	// match: (Sub8  x (Const8  <t> [c]))
-	// cond: x.Op != OpConst8
-	// result: (Add8  (Const8  <t> [int64(int8(-c))]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst8) {
-			break
-		}
-		v.reset(OpAdd8)
-		v0 := b.NewValue0(v.Line, OpConst8, t)
-		v0.AuxInt = int64(int8(-c))
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Sub8  x x)
-	// cond:
-	// result: (Const8  [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpConst8)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Sub8  (Add8  x y) x)
-	// cond:
-	// result: y
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAdd8 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (Sub8  (Add8  x y) y)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAdd8 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if y != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpTrunc16to8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc16to8  (Const16 [c]))
-	// cond:
-	// result: (Const8   [int64(int8(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpConst8)
-		v.AuxInt = int64(int8(c))
-		return true
-	}
-	// match: (Trunc16to8  (ZeroExt8to16  x))
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpZeroExt8to16 {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Trunc16to8  (SignExt8to16  x))
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpSignExt8to16 {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Trunc16to8  (And16 (Const16 [y]) x))
-	// cond: y&0xFF == 0xFF
-	// result: (Trunc16to8 x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAnd16 {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpConst16 {
-			break
-		}
-		y := v_0_0.AuxInt
-		x := v_0.Args[1]
-		if !(y&0xFF == 0xFF) {
-			break
-		}
-		v.reset(OpTrunc16to8)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpTrunc32to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc32to16 (Const32 [c]))
-	// cond:
-	// result: (Const16  [int64(int16(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpConst16)
-		v.AuxInt = int64(int16(c))
-		return true
-	}
-	// match: (Trunc32to16 (ZeroExt8to32  x))
-	// cond:
-	// result: (ZeroExt8to16  x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpZeroExt8to32 {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpZeroExt8to16)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Trunc32to16 (ZeroExt16to32 x))
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpZeroExt16to32 {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Trunc32to16 (SignExt8to32  x))
-	// cond:
-	// result: (SignExt8to16  x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpSignExt8to32 {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpSignExt8to16)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Trunc32to16 (SignExt16to32 x))
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpSignExt16to32 {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Trunc32to16 (And32 (Const32 [y]) x))
-	// cond: y&0xFFFF == 0xFFFF
-	// result: (Trunc32to16 x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAnd32 {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpConst32 {
-			break
-		}
-		y := v_0_0.AuxInt
-		x := v_0.Args[1]
-		if !(y&0xFFFF == 0xFFFF) {
-			break
-		}
-		v.reset(OpTrunc32to16)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpTrunc32to8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc32to8  (Const32 [c]))
-	// cond:
-	// result: (Const8   [int64(int8(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpConst8)
-		v.AuxInt = int64(int8(c))
-		return true
-	}
-	// match: (Trunc32to8  (ZeroExt8to32  x))
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpZeroExt8to32 {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Trunc32to8  (SignExt8to32  x))
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpSignExt8to32 {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Trunc32to8  (And32 (Const32 [y]) x))
-	// cond: y&0xFF == 0xFF
-	// result: (Trunc32to8 x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAnd32 {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpConst32 {
-			break
-		}
-		y := v_0_0.AuxInt
-		x := v_0.Args[1]
-		if !(y&0xFF == 0xFF) {
-			break
-		}
-		v.reset(OpTrunc32to8)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpTrunc64to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc64to16 (Const64 [c]))
-	// cond:
-	// result: (Const16  [int64(int16(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpConst16)
-		v.AuxInt = int64(int16(c))
-		return true
-	}
-	// match: (Trunc64to16 (ZeroExt8to64  x))
-	// cond:
-	// result: (ZeroExt8to16  x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpZeroExt8to64 {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpZeroExt8to16)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Trunc64to16 (ZeroExt16to64 x))
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpZeroExt16to64 {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Trunc64to16 (SignExt8to64  x))
-	// cond:
-	// result: (SignExt8to16  x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpSignExt8to64 {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpSignExt8to16)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Trunc64to16 (SignExt16to64 x))
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpSignExt16to64 {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Trunc64to16 (And64 (Const64 [y]) x))
-	// cond: y&0xFFFF == 0xFFFF
-	// result: (Trunc64to16 x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAnd64 {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpConst64 {
-			break
-		}
-		y := v_0_0.AuxInt
-		x := v_0.Args[1]
-		if !(y&0xFFFF == 0xFFFF) {
-			break
-		}
-		v.reset(OpTrunc64to16)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpTrunc64to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc64to32 (Const64 [c]))
-	// cond:
-	// result: (Const32  [int64(int32(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpConst32)
-		v.AuxInt = int64(int32(c))
-		return true
-	}
-	// match: (Trunc64to32 (ZeroExt8to64  x))
-	// cond:
-	// result: (ZeroExt8to32  x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpZeroExt8to64 {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpZeroExt8to32)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Trunc64to32 (ZeroExt16to64 x))
-	// cond:
-	// result: (ZeroExt16to32 x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpZeroExt16to64 {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpZeroExt16to32)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Trunc64to32 (ZeroExt32to64 x))
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpZeroExt32to64 {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Trunc64to32 (SignExt8to64  x))
-	// cond:
-	// result: (SignExt8to32  x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpSignExt8to64 {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpSignExt8to32)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Trunc64to32 (SignExt16to64 x))
-	// cond:
-	// result: (SignExt16to32 x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpSignExt16to64 {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpSignExt16to32)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Trunc64to32 (SignExt32to64 x))
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpSignExt32to64 {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Trunc64to32 (And64 (Const64 [y]) x))
-	// cond: y&0xFFFFFFFF == 0xFFFFFFFF
-	// result: (Trunc64to32 x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAnd64 {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpConst64 {
-			break
-		}
-		y := v_0_0.AuxInt
-		x := v_0.Args[1]
-		if !(y&0xFFFFFFFF == 0xFFFFFFFF) {
-			break
-		}
-		v.reset(OpTrunc64to32)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpTrunc64to8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Trunc64to8  (Const64 [c]))
-	// cond:
-	// result: (Const8   [int64(int8(c))])
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		c := v_0.AuxInt
-		v.reset(OpConst8)
-		v.AuxInt = int64(int8(c))
-		return true
-	}
-	// match: (Trunc64to8  (ZeroExt8to64  x))
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpZeroExt8to64 {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Trunc64to8  (SignExt8to64  x))
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpSignExt8to64 {
-			break
-		}
-		x := v_0.Args[0]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Trunc64to8  (And64 (Const64 [y]) x))
-	// cond: y&0xFF == 0xFF
-	// result: (Trunc64to8 x)
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpAnd64 {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpConst64 {
-			break
-		}
-		y := v_0_0.AuxInt
-		x := v_0.Args[1]
-		if !(y&0xFF == 0xFF) {
-			break
-		}
-		v.reset(OpTrunc64to8)
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpXor16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor16 x (Const16 <t> [c]))
-	// cond: x.Op != OpConst16
-	// result: (Xor16 (Const16 <t> [c]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst16 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst16) {
-			break
-		}
-		v.reset(OpXor16)
-		v0 := b.NewValue0(v.Line, OpConst16, t)
-		v0.AuxInt = c
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Xor16 x x)
-	// cond:
-	// result: (Const16 [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpConst16)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Xor16 (Const16 [0]) x)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst16 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Xor16 x (Xor16 x y))
-	// cond:
-	// result: y
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpXor16 {
-			break
-		}
-		if x != v_1.Args[0] {
-			break
-		}
-		y := v_1.Args[1]
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (Xor16 x (Xor16 y x))
-	// cond:
-	// result: y
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpXor16 {
-			break
-		}
-		y := v_1.Args[0]
-		if x != v_1.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (Xor16 (Xor16 x y) x)
-	// cond:
-	// result: y
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpXor16 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (Xor16 (Xor16 x y) y)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpXor16 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if y != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpXor32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor32 x (Const32 <t> [c]))
-	// cond: x.Op != OpConst32
-	// result: (Xor32 (Const32 <t> [c]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst32 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst32) {
-			break
-		}
-		v.reset(OpXor32)
-		v0 := b.NewValue0(v.Line, OpConst32, t)
-		v0.AuxInt = c
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Xor32 x x)
-	// cond:
-	// result: (Const32 [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpConst32)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Xor32 (Const32 [0]) x)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst32 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Xor32 x (Xor32 x y))
-	// cond:
-	// result: y
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpXor32 {
-			break
-		}
-		if x != v_1.Args[0] {
-			break
-		}
-		y := v_1.Args[1]
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (Xor32 x (Xor32 y x))
-	// cond:
-	// result: y
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpXor32 {
-			break
-		}
-		y := v_1.Args[0]
-		if x != v_1.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (Xor32 (Xor32 x y) x)
-	// cond:
-	// result: y
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpXor32 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (Xor32 (Xor32 x y) y)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpXor32 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if y != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpXor64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor64 x (Const64 <t> [c]))
-	// cond: x.Op != OpConst64
-	// result: (Xor64 (Const64 <t> [c]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst64 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst64) {
-			break
-		}
-		v.reset(OpXor64)
-		v0 := b.NewValue0(v.Line, OpConst64, t)
-		v0.AuxInt = c
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Xor64 x x)
-	// cond:
-	// result: (Const64 [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpConst64)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Xor64 (Const64 [0]) x)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst64 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Xor64 x (Xor64 x y))
-	// cond:
-	// result: y
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpXor64 {
-			break
-		}
-		if x != v_1.Args[0] {
-			break
-		}
-		y := v_1.Args[1]
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (Xor64 x (Xor64 y x))
-	// cond:
-	// result: y
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpXor64 {
-			break
-		}
-		y := v_1.Args[0]
-		if x != v_1.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (Xor64 (Xor64 x y) x)
-	// cond:
-	// result: y
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpXor64 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (Xor64 (Xor64 x y) y)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpXor64 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if y != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpXor8(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Xor8  x (Const8  <t> [c]))
-	// cond: x.Op != OpConst8
-	// result: (Xor8  (Const8  <t> [c]) x)
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpConst8 {
-			break
-		}
-		t := v_1.Type
-		c := v_1.AuxInt
-		if !(x.Op != OpConst8) {
-			break
-		}
-		v.reset(OpXor8)
-		v0 := b.NewValue0(v.Line, OpConst8, t)
-		v0.AuxInt = c
-		v.AddArg(v0)
-		v.AddArg(x)
-		return true
-	}
-	// match: (Xor8  x x)
-	// cond:
-	// result: (Const8  [0])
-	for {
-		x := v.Args[0]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpConst8)
-		v.AuxInt = 0
-		return true
-	}
-	// match: (Xor8  (Const8  [0]) x)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpConst8 {
-			break
-		}
-		if v_0.AuxInt != 0 {
-			break
-		}
-		x := v.Args[1]
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	// match: (Xor8  x (Xor8  x y))
-	// cond:
-	// result: y
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpXor8 {
-			break
-		}
-		if x != v_1.Args[0] {
-			break
-		}
-		y := v_1.Args[1]
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (Xor8  x (Xor8  y x))
-	// cond:
-	// result: y
-	for {
-		x := v.Args[0]
-		v_1 := v.Args[1]
-		if v_1.Op != OpXor8 {
-			break
-		}
-		y := v_1.Args[0]
-		if x != v_1.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (Xor8  (Xor8  x y) x)
-	// cond:
-	// result: y
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpXor8 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if x != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = y.Type
-		v.AddArg(y)
-		return true
-	}
-	// match: (Xor8  (Xor8  x y) y)
-	// cond:
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpXor8 {
-			break
-		}
-		x := v_0.Args[0]
-		y := v_0.Args[1]
-		if y != v.Args[1] {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpZero(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (Zero (Load (OffPtr [c] (SP)) mem) mem)
-	// cond: mem.Op == OpStaticCall 	&& isSameSym(mem.Aux, "runtime.newobject") 	&& c == config.ctxt.FixedFrameSize() + config.PtrSize
-	// result: mem
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpLoad {
-			break
-		}
-		v_0_0 := v_0.Args[0]
-		if v_0_0.Op != OpOffPtr {
-			break
-		}
-		c := v_0_0.AuxInt
-		v_0_0_0 := v_0_0.Args[0]
-		if v_0_0_0.Op != OpSP {
-			break
-		}
-		mem := v_0.Args[1]
-		if mem != v.Args[1] {
-			break
-		}
-		if !(mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.PtrSize) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = mem.Type
-		v.AddArg(mem)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpZeroExt16to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt16to32 (Trunc32to16 x:(Rsh32Ux64 _ (Const64 [s]))))
-	// cond: s >= 16
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpTrunc32to16 {
-			break
-		}
-		x := v_0.Args[0]
-		if x.Op != OpRsh32Ux64 {
-			break
-		}
-		x_1 := x.Args[1]
-		if x_1.Op != OpConst64 {
-			break
-		}
-		s := x_1.AuxInt
-		if !(s >= 16) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpZeroExt16to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt16to64 (Trunc64to16 x:(Rsh64Ux64 _ (Const64 [s]))))
-	// cond: s >= 48
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpTrunc64to16 {
-			break
-		}
-		x := v_0.Args[0]
-		if x.Op != OpRsh64Ux64 {
-			break
-		}
-		x_1 := x.Args[1]
-		if x_1.Op != OpConst64 {
-			break
-		}
-		s := x_1.AuxInt
-		if !(s >= 48) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpZeroExt32to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt32to64 (Trunc64to32 x:(Rsh64Ux64 _ (Const64 [s]))))
-	// cond: s >= 32
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpTrunc64to32 {
-			break
-		}
-		x := v_0.Args[0]
-		if x.Op != OpRsh64Ux64 {
-			break
-		}
-		x_1 := x.Args[1]
-		if x_1.Op != OpConst64 {
-			break
-		}
-		s := x_1.AuxInt
-		if !(s >= 32) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpZeroExt8to16(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt8to16  (Trunc16to8  x:(Rsh16Ux64 _ (Const64 [s]))))
-	// cond: s >= 8
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpTrunc16to8 {
-			break
-		}
-		x := v_0.Args[0]
-		if x.Op != OpRsh16Ux64 {
-			break
-		}
-		x_1 := x.Args[1]
-		if x_1.Op != OpConst64 {
-			break
-		}
-		s := x_1.AuxInt
-		if !(s >= 8) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpZeroExt8to32(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt8to32  (Trunc32to8  x:(Rsh32Ux64 _ (Const64 [s]))))
-	// cond: s >= 24
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpTrunc32to8 {
-			break
-		}
-		x := v_0.Args[0]
-		if x.Op != OpRsh32Ux64 {
-			break
-		}
-		x_1 := x.Args[1]
-		if x_1.Op != OpConst64 {
-			break
-		}
-		s := x_1.AuxInt
-		if !(s >= 24) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteValuegeneric_OpZeroExt8to64(v *Value, config *Config) bool {
-	b := v.Block
-	_ = b
-	// match: (ZeroExt8to64  (Trunc64to8  x:(Rsh64Ux64 _ (Const64 [s]))))
-	// cond: s >= 56
-	// result: x
-	for {
-		v_0 := v.Args[0]
-		if v_0.Op != OpTrunc64to8 {
-			break
-		}
-		x := v_0.Args[0]
-		if x.Op != OpRsh64Ux64 {
-			break
-		}
-		x_1 := x.Args[1]
-		if x_1.Op != OpConst64 {
-			break
-		}
-		s := x_1.AuxInt
-		if !(s >= 56) {
-			break
-		}
-		v.reset(OpCopy)
-		v.Type = x.Type
-		v.AddArg(x)
-		return true
-	}
-	return false
-}
-func rewriteBlockgeneric(b *Block, config *Config) bool {
-	switch b.Kind {
-	case BlockIf:
-		// match: (If (Not cond) yes no)
-		// cond:
-		// result: (If cond no yes)
-		for {
-			v := b.Control
-			if v.Op != OpNot {
-				break
-			}
-			cond := v.Args[0]
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			b.Kind = BlockIf
-			b.SetControl(cond)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-		// match: (If (ConstBool [c]) yes no)
-		// cond: c == 1
-		// result: (First nil yes no)
-		for {
-			v := b.Control
-			if v.Op != OpConstBool {
-				break
-			}
-			c := v.AuxInt
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			if !(c == 1) {
-				break
-			}
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			_ = yes
-			_ = no
-			return true
-		}
-		// match: (If (ConstBool [c]) yes no)
-		// cond: c == 0
-		// result: (First nil no yes)
-		for {
-			v := b.Control
-			if v.Op != OpConstBool {
-				break
-			}
-			c := v.AuxInt
-			yes := b.Succs[0]
-			no := b.Succs[1]
-			if !(c == 0) {
-				break
-			}
-			b.Kind = BlockFirst
-			b.SetControl(nil)
-			b.swapSuccessors()
-			_ = no
-			_ = yes
-			return true
-		}
-	}
-	return false
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/schedule.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/schedule.go
deleted file mode 100644
index 59f24f1..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/schedule.go
+++ /dev/null
@@ -1,282 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/schedule.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/schedule.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import "container/heap"
-
-const (
-	ScorePhi = iota // towards top of block
-	ScoreNilCheck
-	ScoreReadTuple
-	ScoreVarDef
-	ScoreMemory
-	ScoreDefault
-	ScoreFlags
-	ScoreControl // towards bottom of block
-)
-
-type ValHeap struct {
-	a     []*Value
-	score []int8
-}
-
-func (h ValHeap) Len() int      { return len(h.a) }
-func (h ValHeap) Swap(i, j int) { a := h.a; a[i], a[j] = a[j], a[i] }
-
-func (h *ValHeap) Push(x interface{}) {
-	// Push and Pop use pointer receivers because they modify the slice's length,
-	// not just its contents.
-	v := x.(*Value)
-	h.a = append(h.a, v)
-}
-func (h *ValHeap) Pop() interface{} {
-	old := h.a
-	n := len(old)
-	x := old[n-1]
-	h.a = old[0 : n-1]
-	return x
-}
-func (h ValHeap) Less(i, j int) bool {
-	x := h.a[i]
-	y := h.a[j]
-	sx := h.score[x.ID]
-	sy := h.score[y.ID]
-	if c := sx - sy; c != 0 {
-		return c > 0 // higher score comes later.
-	}
-	if x.Line != y.Line { // Favor in-order line stepping
-		return x.Line > y.Line
-	}
-	if x.Op != OpPhi {
-		if c := len(x.Args) - len(y.Args); c != 0 {
-			return c < 0 // smaller args comes later
-		}
-	}
-	return x.ID > y.ID
-}
-
-// Schedule the Values in each Block. After this phase returns, the
-// order of b.Values matters and is the order in which those values
-// will appear in the assembly output. For now it generates a
-// reasonable valid schedule using a priority queue. TODO(khr):
-// schedule smarter.
-func schedule(f *Func) {
-	// For each value, the number of times it is used in the block
-	// by values that have not been scheduled yet.
-	uses := make([]int32, f.NumValues())
-
-	// reusable priority queue
-	priq := new(ValHeap)
-
-	// "priority" for a value
-	score := make([]int8, f.NumValues())
-
-	// scheduling order. We queue values in this list in reverse order.
-	var order []*Value
-
-	// maps mem values to the next live memory value
-	nextMem := make([]*Value, f.NumValues())
-	// additional pretend arguments for each Value. Used to enforce load/store ordering.
-	additionalArgs := make([][]*Value, f.NumValues())
-
-	for _, b := range f.Blocks {
-		// Compute score. Larger numbers are scheduled closer to the end of the block.
-		for _, v := range b.Values {
-			switch {
-			case v.Op == OpAMD64LoweredGetClosurePtr || v.Op == OpPPC64LoweredGetClosurePtr ||
-				v.Op == OpARMLoweredGetClosurePtr || v.Op == OpARM64LoweredGetClosurePtr ||
-				v.Op == Op386LoweredGetClosurePtr || v.Op == OpMIPS64LoweredGetClosurePtr ||
-				v.Op == OpS390XLoweredGetClosurePtr || v.Op == OpMIPSLoweredGetClosurePtr:
-				// We also score GetLoweredClosurePtr as early as possible to ensure that the
-				// context register is not stomped. GetLoweredClosurePtr should only appear
-				// in the entry block where there are no phi functions, so there is no
-				// conflict or ambiguity here.
-				if b != f.Entry {
-					f.Fatalf("LoweredGetClosurePtr appeared outside of entry block, b=%s", b.String())
-				}
-				score[v.ID] = ScorePhi
-			case v.Op == OpAMD64LoweredNilCheck || v.Op == OpPPC64LoweredNilCheck ||
-				v.Op == OpARMLoweredNilCheck || v.Op == OpARM64LoweredNilCheck ||
-				v.Op == Op386LoweredNilCheck || v.Op == OpMIPS64LoweredNilCheck ||
-				v.Op == OpS390XLoweredNilCheck || v.Op == OpMIPSLoweredNilCheck:
-				// Nil checks must come before loads from the same address.
-				score[v.ID] = ScoreNilCheck
-			case v.Op == OpPhi:
-				// We want all the phis first.
-				score[v.ID] = ScorePhi
-			case v.Op == OpVarDef:
-				// We want all the vardefs next.
-				score[v.ID] = ScoreVarDef
-			case v.Type.IsMemory():
-				// Schedule stores as early as possible. This tends to
-				// reduce register pressure. It also helps make sure
-				// VARDEF ops are scheduled before the corresponding LEA.
-				score[v.ID] = ScoreMemory
-			case v.Op == OpSelect0 || v.Op == OpSelect1:
-				// Schedule the pseudo-op of reading part of a tuple
-				// immediately after the tuple-generating op, since
-				// this value is already live. This also removes its
-				// false dependency on the other part of the tuple.
-				// Also ensures tuple is never spilled.
-				score[v.ID] = ScoreReadTuple
-			case v.Type.IsFlags() || v.Type.IsTuple():
-				// Schedule flag register generation as late as possible.
-				// This makes sure that we only have one live flags
-				// value at a time.
-				score[v.ID] = ScoreFlags
-			default:
-				score[v.ID] = ScoreDefault
-			}
-		}
-	}
-
-	for _, b := range f.Blocks {
-		// Find store chain for block.
-		// Store chains for different blocks overwrite each other, so
-		// the calculated store chain is good only for this block.
-		for _, v := range b.Values {
-			if v.Op != OpPhi && v.Type.IsMemory() {
-				mem := v
-				if v.Op == OpSelect1 {
-					v = v.Args[0]
-				}
-				for _, w := range v.Args {
-					if w.Type.IsMemory() {
-						nextMem[w.ID] = mem
-					}
-				}
-			}
-		}
-
-		// Compute uses.
-		for _, v := range b.Values {
-			if v.Op == OpPhi {
-				// If a value is used by a phi, it does not induce
-				// a scheduling edge because that use is from the
-				// previous iteration.
-				continue
-			}
-			for _, w := range v.Args {
-				if w.Block == b {
-					uses[w.ID]++
-				}
-				// Any load must come before the following store.
-				if v.Type.IsMemory() || !w.Type.IsMemory() {
-					continue // not a load
-				}
-				s := nextMem[w.ID]
-				if s == nil || s.Block != b {
-					continue
-				}
-				additionalArgs[s.ID] = append(additionalArgs[s.ID], v)
-				uses[v.ID]++
-			}
-		}
-
-		if b.Control != nil && b.Control.Op != OpPhi {
-			// Force the control value to be scheduled at the end,
-			// unless it is a phi value (which must be first).
-			score[b.Control.ID] = ScoreControl
-
-			// Schedule values dependent on the control value at the end.
-			// This reduces the number of register spills. We don't find
-			// all values that depend on the control, just values with a
-			// direct dependency. This is cheaper and in testing there
-			// was no difference in the number of spills.
-			for _, v := range b.Values {
-				if v.Op != OpPhi {
-					for _, a := range v.Args {
-						if a == b.Control {
-							score[v.ID] = ScoreControl
-						}
-					}
-				}
-			}
-		}
-
-		// To put things into a priority queue
-		// The values that should come last are least.
-		priq.score = score
-		priq.a = priq.a[:0]
-
-		// Initialize priority queue with schedulable values.
-		for _, v := range b.Values {
-			if uses[v.ID] == 0 {
-				heap.Push(priq, v)
-			}
-		}
-
-		// Schedule highest priority value, update use counts, repeat.
-		order = order[:0]
-		tuples := make(map[ID][]*Value)
-		for {
-			// Find highest priority schedulable value.
-			// Note that schedule is assembled backwards.
-
-			if priq.Len() == 0 {
-				break
-			}
-
-			v := heap.Pop(priq).(*Value)
-
-			// Add it to the schedule.
-			// Do not emit tuple-reading ops until we're ready to emit the tuple-generating op.
-			//TODO: maybe remove ReadTuple score above, if it does not help on performance
-			switch {
-			case v.Op == OpSelect0:
-				if tuples[v.Args[0].ID] == nil {
-					tuples[v.Args[0].ID] = make([]*Value, 2)
-				}
-				tuples[v.Args[0].ID][0] = v
-			case v.Op == OpSelect1:
-				if tuples[v.Args[0].ID] == nil {
-					tuples[v.Args[0].ID] = make([]*Value, 2)
-				}
-				tuples[v.Args[0].ID][1] = v
-			case v.Type.IsTuple() && tuples[v.ID] != nil:
-				if tuples[v.ID][1] != nil {
-					order = append(order, tuples[v.ID][1])
-				}
-				if tuples[v.ID][0] != nil {
-					order = append(order, tuples[v.ID][0])
-				}
-				delete(tuples, v.ID)
-				fallthrough
-			default:
-				order = append(order, v)
-			}
-
-			// Update use counts of arguments.
-			for _, w := range v.Args {
-				if w.Block != b {
-					continue
-				}
-				uses[w.ID]--
-				if uses[w.ID] == 0 {
-					// All uses scheduled, w is now schedulable.
-					heap.Push(priq, w)
-				}
-			}
-			for _, w := range additionalArgs[v.ID] {
-				uses[w.ID]--
-				if uses[w.ID] == 0 {
-					// All uses scheduled, w is now schedulable.
-					heap.Push(priq, w)
-				}
-			}
-		}
-		if len(order) != len(b.Values) {
-			f.Fatalf("schedule does not include all values")
-		}
-		for i := 0; i < len(b.Values); i++ {
-			b.Values[i] = order[len(b.Values)-1-i]
-		}
-	}
-
-	f.scheduled = true
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/schedule_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/schedule_test.go
deleted file mode 100644
index 1bbd277..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/schedule_test.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/schedule_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/schedule_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import "testing"
-
-func TestSchedule(t *testing.T) {
-	c := testConfig(t)
-	cases := []fun{
-		Fun(c, "entry",
-			Bloc("entry",
-				Valu("mem0", OpInitMem, TypeMem, 0, nil),
-				Valu("ptr", OpConst64, TypeInt64, 0xABCD, nil),
-				Valu("v", OpConst64, TypeInt64, 12, nil),
-				Valu("mem1", OpStore, TypeMem, 8, nil, "ptr", "v", "mem0"),
-				Valu("mem2", OpStore, TypeMem, 8, nil, "ptr", "v", "mem1"),
-				Valu("mem3", OpStore, TypeInt64, 8, nil, "ptr", "sum", "mem2"),
-				Valu("l1", OpLoad, TypeInt64, 0, nil, "ptr", "mem1"),
-				Valu("l2", OpLoad, TypeInt64, 0, nil, "ptr", "mem2"),
-				Valu("sum", OpAdd64, TypeInt64, 0, nil, "l1", "l2"),
-				Goto("exit")),
-			Bloc("exit",
-				Exit("mem3"))),
-	}
-	for _, c := range cases {
-		schedule(c.f)
-		if !isSingleLiveMem(c.f) {
-			t.Error("single-live-mem restriction not enforced by schedule for func:")
-			printFunc(c.f)
-		}
-	}
-}
-
-func isSingleLiveMem(f *Func) bool {
-	for _, b := range f.Blocks {
-		var liveMem *Value
-		for _, v := range b.Values {
-			for _, w := range v.Args {
-				if w.Type.IsMemory() {
-					if liveMem == nil {
-						liveMem = w
-						continue
-					}
-					if w != liveMem {
-						return false
-					}
-				}
-			}
-			if v.Type.IsMemory() {
-				liveMem = v
-			}
-		}
-	}
-	return true
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/shift_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/shift_test.go
deleted file mode 100644
index 4cd7863..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/shift_test.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/shift_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/shift_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import (
-	"testing"
-)
-
-func TestShiftConstAMD64(t *testing.T) {
-	c := testConfig(t)
-	fun := makeConstShiftFunc(c, 18, OpLsh64x64, TypeUInt64)
-	checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHLQconst: 1, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
-	fun.f.Free()
-	fun = makeConstShiftFunc(c, 66, OpLsh64x64, TypeUInt64)
-	checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHLQconst: 0, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
-	fun.f.Free()
-	fun = makeConstShiftFunc(c, 18, OpRsh64Ux64, TypeUInt64)
-	checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHRQconst: 1, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
-	fun.f.Free()
-	fun = makeConstShiftFunc(c, 66, OpRsh64Ux64, TypeUInt64)
-	checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHRQconst: 0, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
-	fun.f.Free()
-	fun = makeConstShiftFunc(c, 18, OpRsh64x64, TypeInt64)
-	checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SARQconst: 1, OpAMD64CMPQconst: 0})
-	fun.f.Free()
-	fun = makeConstShiftFunc(c, 66, OpRsh64x64, TypeInt64)
-	checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SARQconst: 1, OpAMD64CMPQconst: 0})
-	fun.f.Free()
-}
-
-func makeConstShiftFunc(c *Config, amount int64, op Op, typ Type) fun {
-	ptyp := &TypeImpl{Size_: 8, Ptr: true, Name: "ptr"}
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Valu("SP", OpSP, TypeUInt64, 0, nil),
-			Valu("argptr", OpOffPtr, ptyp, 8, nil, "SP"),
-			Valu("resptr", OpOffPtr, ptyp, 16, nil, "SP"),
-			Valu("load", OpLoad, typ, 0, nil, "argptr", "mem"),
-			Valu("c", OpConst64, TypeUInt64, amount, nil),
-			Valu("shift", op, typ, 0, nil, "load", "c"),
-			Valu("store", OpStore, TypeMem, 8, nil, "resptr", "shift", "mem"),
-			Exit("store")))
-	Compile(fun.f)
-	return fun
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/shortcircuit.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/shortcircuit.go
deleted file mode 100644
index 1f7cf93..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/shortcircuit.go
+++ /dev/null
@@ -1,136 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/shortcircuit.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/shortcircuit.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-// Shortcircuit finds situations where branch directions
-// are always correlated and rewrites the CFG to take
-// advantage of that fact.
-// This optimization is useful for compiling && and || expressions.
-func shortcircuit(f *Func) {
-	// Step 1: Replace a phi arg with a constant if that arg
-	// is the control value of a preceding If block.
-	// b1:
-	//    If a goto b2 else b3
-	// b2: <- b1 ...
-	//    x = phi(a, ...)
-	//
-	// We can replace the "a" in the phi with the constant true.
-	ct := f.ConstBool(f.Entry.Line, f.Config.fe.TypeBool(), true)
-	cf := f.ConstBool(f.Entry.Line, f.Config.fe.TypeBool(), false)
-	for _, b := range f.Blocks {
-		for _, v := range b.Values {
-			if v.Op != OpPhi {
-				continue
-			}
-			if !v.Type.IsBoolean() {
-				continue
-			}
-			for i, a := range v.Args {
-				e := b.Preds[i]
-				p := e.b
-				if p.Kind != BlockIf {
-					continue
-				}
-				if p.Control != a {
-					continue
-				}
-				if e.i == 0 {
-					v.SetArg(i, ct)
-				} else {
-					v.SetArg(i, cf)
-				}
-			}
-		}
-	}
-
-	// Step 2: Compute which values are live across blocks.
-	live := make([]bool, f.NumValues())
-	for _, b := range f.Blocks {
-		for _, v := range b.Values {
-			for _, a := range v.Args {
-				if a.Block != v.Block {
-					live[a.ID] = true
-				}
-			}
-		}
-		if b.Control != nil && b.Control.Block != b {
-			live[b.Control.ID] = true
-		}
-	}
-
-	// Step 3: Redirect control flow around known branches.
-	// p:
-	//   ... goto b ...
-	// b: <- p ...
-	//   v = phi(true, ...)
-	//   if v goto t else u
-	// We can redirect p to go directly to t instead of b.
-	// (If v is not live after b).
-	for _, b := range f.Blocks {
-		if b.Kind != BlockIf {
-			continue
-		}
-		if len(b.Values) != 1 {
-			continue
-		}
-		v := b.Values[0]
-		if v.Op != OpPhi {
-			continue
-		}
-		if b.Control != v {
-			continue
-		}
-		if live[v.ID] {
-			continue
-		}
-		for i := 0; i < len(v.Args); i++ {
-			a := v.Args[i]
-			if a.Op != OpConstBool {
-				continue
-			}
-
-			// The predecessor we come in from.
-			e1 := b.Preds[i]
-			p := e1.b
-			pi := e1.i
-
-			// The successor we always go to when coming in
-			// from that predecessor.
-			e2 := b.Succs[1-a.AuxInt]
-			t := e2.b
-			ti := e2.i
-
-			// Remove b's incoming edge from p.
-			b.removePred(i)
-			n := len(b.Preds)
-			v.Args[i].Uses--
-			v.Args[i] = v.Args[n]
-			v.Args[n] = nil
-			v.Args = v.Args[:n]
-
-			// Redirect p's outgoing edge to t.
-			p.Succs[pi] = Edge{t, len(t.Preds)}
-
-			// Fix up t to have one more predecessor.
-			t.Preds = append(t.Preds, Edge{p, pi})
-			for _, w := range t.Values {
-				if w.Op != OpPhi {
-					continue
-				}
-				w.AddArg(w.Args[ti])
-			}
-
-			if len(b.Preds) == 1 {
-				v.Op = OpCopy
-				// No longer a phi, stop optimizing here.
-				break
-			}
-			i--
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/shortcircuit_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/shortcircuit_test.go
deleted file mode 100644
index 7edd1cb..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/shortcircuit_test.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/shortcircuit_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/shortcircuit_test.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import "testing"
-
-func TestShortCircuit(t *testing.T) {
-	c := testConfig(t)
-
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("mem", OpInitMem, TypeMem, 0, nil),
-			Valu("arg1", OpArg, TypeInt64, 0, nil),
-			Valu("arg2", OpArg, TypeInt64, 0, nil),
-			Valu("arg3", OpArg, TypeInt64, 0, nil),
-			Goto("b1")),
-		Bloc("b1",
-			Valu("cmp1", OpLess64, TypeBool, 0, nil, "arg1", "arg2"),
-			If("cmp1", "b2", "b3")),
-		Bloc("b2",
-			Valu("cmp2", OpLess64, TypeBool, 0, nil, "arg2", "arg3"),
-			Goto("b3")),
-		Bloc("b3",
-			Valu("phi2", OpPhi, TypeBool, 0, nil, "cmp1", "cmp2"),
-			If("phi2", "b4", "b5")),
-		Bloc("b4",
-			Valu("cmp3", OpLess64, TypeBool, 0, nil, "arg3", "arg1"),
-			Goto("b5")),
-		Bloc("b5",
-			Valu("phi3", OpPhi, TypeBool, 0, nil, "phi2", "cmp3"),
-			If("phi3", "b6", "b7")),
-		Bloc("b6",
-			Exit("mem")),
-		Bloc("b7",
-			Exit("mem")))
-
-	CheckFunc(fun.f)
-	shortcircuit(fun.f)
-	CheckFunc(fun.f)
-
-	for _, b := range fun.f.Blocks {
-		for _, v := range b.Values {
-			if v.Op == OpPhi {
-				t.Errorf("phi %s remains", v)
-			}
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/sizeof_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/sizeof_test.go
deleted file mode 100644
index 99697ef..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/sizeof_test.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/sizeof_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/sizeof_test.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !nacl
-
-package ssa
-
-import (
-	"reflect"
-	"testing"
-	"unsafe"
-)
-
-// Assert that the size of important structures do not change unexpectedly.
-
-func TestSizeof(t *testing.T) {
-	const _64bit = unsafe.Sizeof(uintptr(0)) == 8
-
-	var tests = []struct {
-		val    interface{} // type as a value
-		_32bit uintptr     // size on 32bit platforms
-		_64bit uintptr     // size on 64bit platforms
-	}{
-		{Value{}, 68, 112},
-		{Block{}, 148, 288},
-	}
-
-	for _, tt := range tests {
-		want := tt._32bit
-		if _64bit {
-			want = tt._64bit
-		}
-		got := reflect.TypeOf(tt.val).Size()
-		if want != got {
-			t.Errorf("unsafe.Sizeof(%T) = %d, want %d", tt.val, got, want)
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/sparsemap.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/sparsemap.go
deleted file mode 100644
index a096df2..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/sparsemap.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/sparsemap.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/sparsemap.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-// from http://research.swtch.com/sparse
-// in turn, from Briggs and Torczon
-
-type sparseEntry struct {
-	key ID
-	val int32
-	aux int32
-}
-
-type sparseMap struct {
-	dense  []sparseEntry
-	sparse []int32
-}
-
-// newSparseMap returns a sparseMap that can map
-// integers between 0 and n-1 to int32s.
-func newSparseMap(n int) *sparseMap {
-	return &sparseMap{dense: nil, sparse: make([]int32, n)}
-}
-
-func (s *sparseMap) size() int {
-	return len(s.dense)
-}
-
-func (s *sparseMap) contains(k ID) bool {
-	i := s.sparse[k]
-	return i < int32(len(s.dense)) && s.dense[i].key == k
-}
-
-// get returns the value for key k, or -1 if k does
-// not appear in the map.
-func (s *sparseMap) get(k ID) int32 {
-	i := s.sparse[k]
-	if i < int32(len(s.dense)) && s.dense[i].key == k {
-		return s.dense[i].val
-	}
-	return -1
-}
-
-func (s *sparseMap) set(k ID, v, a int32) {
-	i := s.sparse[k]
-	if i < int32(len(s.dense)) && s.dense[i].key == k {
-		s.dense[i].val = v
-		s.dense[i].aux = a
-		return
-	}
-	s.dense = append(s.dense, sparseEntry{k, v, a})
-	s.sparse[k] = int32(len(s.dense)) - 1
-}
-
-// setBit sets the v'th bit of k's value, where 0 <= v < 32
-func (s *sparseMap) setBit(k ID, v uint) {
-	if v >= 32 {
-		panic("bit index too large.")
-	}
-	i := s.sparse[k]
-	if i < int32(len(s.dense)) && s.dense[i].key == k {
-		s.dense[i].val |= 1 << v
-		return
-	}
-	s.dense = append(s.dense, sparseEntry{k, 1 << v, 0})
-	s.sparse[k] = int32(len(s.dense)) - 1
-}
-
-func (s *sparseMap) remove(k ID) {
-	i := s.sparse[k]
-	if i < int32(len(s.dense)) && s.dense[i].key == k {
-		y := s.dense[len(s.dense)-1]
-		s.dense[i] = y
-		s.sparse[y.key] = i
-		s.dense = s.dense[:len(s.dense)-1]
-	}
-}
-
-func (s *sparseMap) clear() {
-	s.dense = s.dense[:0]
-}
-
-func (s *sparseMap) contents() []sparseEntry {
-	return s.dense
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/sparseset.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/sparseset.go
deleted file mode 100644
index 5d324fe..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/sparseset.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/sparseset.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/sparseset.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-// from http://research.swtch.com/sparse
-// in turn, from Briggs and Torczon
-
-type sparseSet struct {
-	dense  []ID
-	sparse []int32
-}
-
-// newSparseSet returns a sparseSet that can represent
-// integers between 0 and n-1
-func newSparseSet(n int) *sparseSet {
-	return &sparseSet{dense: nil, sparse: make([]int32, n)}
-}
-
-func (s *sparseSet) cap() int {
-	return len(s.sparse)
-}
-
-func (s *sparseSet) size() int {
-	return len(s.dense)
-}
-
-func (s *sparseSet) contains(x ID) bool {
-	i := s.sparse[x]
-	return i < int32(len(s.dense)) && s.dense[i] == x
-}
-
-func (s *sparseSet) add(x ID) {
-	i := s.sparse[x]
-	if i < int32(len(s.dense)) && s.dense[i] == x {
-		return
-	}
-	s.dense = append(s.dense, x)
-	s.sparse[x] = int32(len(s.dense)) - 1
-}
-
-func (s *sparseSet) addAll(a []ID) {
-	for _, x := range a {
-		s.add(x)
-	}
-}
-
-func (s *sparseSet) addAllValues(a []*Value) {
-	for _, v := range a {
-		s.add(v.ID)
-	}
-}
-
-func (s *sparseSet) remove(x ID) {
-	i := s.sparse[x]
-	if i < int32(len(s.dense)) && s.dense[i] == x {
-		y := s.dense[len(s.dense)-1]
-		s.dense[i] = y
-		s.sparse[y] = i
-		s.dense = s.dense[:len(s.dense)-1]
-	}
-}
-
-// pop removes an arbitrary element from the set.
-// The set must be nonempty.
-func (s *sparseSet) pop() ID {
-	x := s.dense[len(s.dense)-1]
-	s.dense = s.dense[:len(s.dense)-1]
-	return x
-}
-
-func (s *sparseSet) clear() {
-	s.dense = s.dense[:0]
-}
-
-func (s *sparseSet) contents() []ID {
-	return s.dense
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/sparsetree.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/sparsetree.go
deleted file mode 100644
index cb7f974..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/sparsetree.go
+++ /dev/null
@@ -1,220 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/sparsetree.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/sparsetree.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import (
-	"fmt"
-	"strings"
-)
-
-type SparseTreeNode struct {
-	child   *Block
-	sibling *Block
-	parent  *Block
-
-	// Every block has 6 numbers associated with it:
-	// entry-1, entry, entry+1, exit-1, and exit, exit+1.
-	// entry and exit are conceptually the top of the block (phi functions)
-	// entry+1 and exit-1 are conceptually the bottom of the block (ordinary defs)
-	// entry-1 and exit+1 are conceptually "just before" the block (conditions flowing in)
-	//
-	// This simplifies life if we wish to query information about x
-	// when x is both an input to and output of a block.
-	entry, exit int32
-}
-
-func (s *SparseTreeNode) String() string {
-	return fmt.Sprintf("[%d,%d]", s.entry, s.exit)
-}
-
-func (s *SparseTreeNode) Entry() int32 {
-	return s.entry
-}
-
-func (s *SparseTreeNode) Exit() int32 {
-	return s.exit
-}
-
-const (
-	// When used to lookup up definitions in a sparse tree,
-	// these adjustments to a block's entry (+adjust) and
-	// exit (-adjust) numbers allow a distinction to be made
-	// between assignments (typically branch-dependent
-	// conditionals) occurring "before" the block (e.g., as inputs
-	// to the block and its phi functions), "within" the block,
-	// and "after" the block.
-	AdjustBefore = -1 // defined before phi
-	AdjustWithin = 0  // defined by phi
-	AdjustAfter  = 1  // defined within block
-)
-
-// A SparseTree is a tree of Blocks.
-// It allows rapid ancestor queries,
-// such as whether one block dominates another.
-type SparseTree []SparseTreeNode
-
-// newSparseTree creates a SparseTree from a block-to-parent map (array indexed by Block.ID)
-func newSparseTree(f *Func, parentOf []*Block) SparseTree {
-	t := make(SparseTree, f.NumBlocks())
-	for _, b := range f.Blocks {
-		n := &t[b.ID]
-		if p := parentOf[b.ID]; p != nil {
-			n.parent = p
-			n.sibling = t[p.ID].child
-			t[p.ID].child = b
-		}
-	}
-	t.numberBlock(f.Entry, 1)
-	return t
-}
-
-// treestructure provides a string description of the dominator
-// tree and flow structure of block b and all blocks that it
-// dominates.
-func (t SparseTree) treestructure(b *Block) string {
-	return t.treestructure1(b, 0)
-}
-func (t SparseTree) treestructure1(b *Block, i int) string {
-	s := "\n" + strings.Repeat("\t", i) + b.String() + "->["
-	for i, e := range b.Succs {
-		if i > 0 {
-			s = s + ","
-		}
-		s = s + e.b.String()
-	}
-	s += "]"
-	if c0 := t[b.ID].child; c0 != nil {
-		s += "("
-		for c := c0; c != nil; c = t[c.ID].sibling {
-			if c != c0 {
-				s += " "
-			}
-			s += t.treestructure1(c, i+1)
-		}
-		s += ")"
-	}
-	return s
-}
-
-// numberBlock assigns entry and exit numbers for b and b's
-// children in an in-order walk from a gappy sequence, where n
-// is the first number not yet assigned or reserved. N should
-// be larger than zero. For each entry and exit number, the
-// values one larger and smaller are reserved to indicate
-// "strictly above" and "strictly below". numberBlock returns
-// the smallest number not yet assigned or reserved (i.e., the
-// exit number of the last block visited, plus two, because
-// last.exit+1 is a reserved value.)
-//
-// examples:
-//
-// single node tree Root, call with n=1
-//         entry=2 Root exit=5; returns 7
-//
-// two node tree, Root->Child, call with n=1
-//         entry=2 Root exit=11; returns 13
-//         entry=5 Child exit=8
-//
-// three node tree, Root->(Left, Right), call with n=1
-//         entry=2 Root exit=17; returns 19
-// entry=5 Left exit=8;  entry=11 Right exit=14
-//
-// This is the in-order sequence of assigned and reserved numbers
-// for the last example:
-//   root     left     left      right       right       root
-//  1 2e 3 | 4 5e 6 | 7 8x 9 | 10 11e 12 | 13 14x 15 | 16 17x 18
-
-func (t SparseTree) numberBlock(b *Block, n int32) int32 {
-	// reserve n for entry-1, assign n+1 to entry
-	n++
-	t[b.ID].entry = n
-	// reserve n+1 for entry+1, n+2 is next free number
-	n += 2
-	for c := t[b.ID].child; c != nil; c = t[c.ID].sibling {
-		n = t.numberBlock(c, n) // preserves n = next free number
-	}
-	// reserve n for exit-1, assign n+1 to exit
-	n++
-	t[b.ID].exit = n
-	// reserve n+1 for exit+1, n+2 is next free number, returned.
-	return n + 2
-}
-
-// Sibling returns a sibling of x in the dominator tree (i.e.,
-// a node with the same immediate dominator) or nil if there
-// are no remaining siblings in the arbitrary but repeatable
-// order chosen. Because the Child-Sibling order is used
-// to assign entry and exit numbers in the treewalk, those
-// numbers are also consistent with this order (i.e.,
-// Sibling(x) has entry number larger than x's exit number).
-func (t SparseTree) Sibling(x *Block) *Block {
-	return t[x.ID].sibling
-}
-
-// Child returns a child of x in the dominator tree, or
-// nil if there are none. The choice of first child is
-// arbitrary but repeatable.
-func (t SparseTree) Child(x *Block) *Block {
-	return t[x.ID].child
-}
-
-// isAncestorEq reports whether x is an ancestor of or equal to y.
-func (t SparseTree) isAncestorEq(x, y *Block) bool {
-	if x == y {
-		return true
-	}
-	xx := &t[x.ID]
-	yy := &t[y.ID]
-	return xx.entry <= yy.entry && yy.exit <= xx.exit
-}
-
-// isAncestor reports whether x is a strict ancestor of y.
-func (t SparseTree) isAncestor(x, y *Block) bool {
-	if x == y {
-		return false
-	}
-	xx := &t[x.ID]
-	yy := &t[y.ID]
-	return xx.entry < yy.entry && yy.exit < xx.exit
-}
-
-// domorder returns a value for dominator-oriented sorting.
-// Block domination does not provide a total ordering,
-// but domorder two has useful properties.
-// (1) If domorder(x) > domorder(y) then x does not dominate y.
-// (2) If domorder(x) < domorder(y) and domorder(y) < domorder(z) and x does not dominate y,
-//     then x does not dominate z.
-// Property (1) means that blocks sorted by domorder always have a maximal dominant block first.
-// Property (2) allows searches for dominated blocks to exit early.
-func (t SparseTree) domorder(x *Block) int32 {
-	// Here is an argument that entry(x) provides the properties documented above.
-	//
-	// Entry and exit values are assigned in a depth-first dominator tree walk.
-	// For all blocks x and y, one of the following holds:
-	//
-	// (x-dom-y) x dominates y => entry(x) < entry(y) < exit(y) < exit(x)
-	// (y-dom-x) y dominates x => entry(y) < entry(x) < exit(x) < exit(y)
-	// (x-then-y) neither x nor y dominates the other and x walked before y => entry(x) < exit(x) < entry(y) < exit(y)
-	// (y-then-x) neither x nor y dominates the other and y walked before y => entry(y) < exit(y) < entry(x) < exit(x)
-	//
-	// entry(x) > entry(y) eliminates case x-dom-y. This provides property (1) above.
-	//
-	// For property (2), assume entry(x) < entry(y) and entry(y) < entry(z) and x does not dominate y.
-	// entry(x) < entry(y) allows cases x-dom-y and x-then-y.
-	// But by supposition, x does not dominate y. So we have x-then-y.
-	//
-	// For contractidion, assume x dominates z.
-	// Then entry(x) < entry(z) < exit(z) < exit(x).
-	// But we know x-then-y, so entry(x) < exit(x) < entry(y) < exit(y).
-	// Combining those, entry(x) < entry(z) < exit(z) < exit(x) < entry(y) < exit(y).
-	// By supposition, entry(y) < entry(z), which allows cases y-dom-z and y-then-z.
-	// y-dom-z requires entry(y) < entry(z), but we have entry(z) < entry(y).
-	// y-then-z requires exit(y) < entry(z), but we have entry(z) < exit(y).
-	// We have a contradiction, so x does not dominate z, as required.
-	return t[x.ID].entry
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/sparsetreemap.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/sparsetreemap.go
deleted file mode 100644
index 16041f7..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/sparsetreemap.go
+++ /dev/null
@@ -1,192 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/sparsetreemap.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/sparsetreemap.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import "fmt"
-
-// A SparseTreeMap encodes a subset of nodes within a tree
-// used for sparse-ancestor queries.
-//
-// Combined with a SparseTreeHelper, this supports an Insert
-// to add a tree node to the set and a Find operation to locate
-// the nearest tree ancestor of a given node such that the
-// ancestor is also in the set.
-//
-// Given a set of blocks {B1, B2, B3} within the dominator tree, established
-// by stm.Insert()ing B1, B2, B3, etc, a query at block B
-// (performed with stm.Find(stm, B, adjust, helper))
-// will return the member of the set that is the nearest strict
-// ancestor of B within the dominator tree, or nil if none exists.
-// The expected complexity of this operation is the log of the size
-// the set, given certain assumptions about sparsity (the log complexity
-// could be guaranteed with additional data structures whose constant-
-// factor overhead has not yet been justified.)
-//
-// The adjust parameter allows positioning of the insertion
-// and lookup points within a block -- one of
-// AdjustBefore, AdjustWithin, AdjustAfter,
-// where lookups at AdjustWithin can find insertions at
-// AdjustBefore in the same block, and lookups at AdjustAfter
-// can find insertions at either AdjustBefore or AdjustWithin
-// in the same block.  (Note that this assumes a gappy numbering
-// such that exit number or exit number is separated from its
-// nearest neighbor by at least 3).
-//
-// The Sparse Tree lookup algorithm is described by
-// Paul F. Dietz. Maintaining order in a linked list. In
-// Proceedings of the Fourteenth Annual ACM Symposium on
-// Theory of Computing, pages 122–127, May 1982.
-// and by
-// Ben Wegbreit. Faster retrieval from context trees.
-// Communications of the ACM, 19(9):526–529, September 1976.
-type SparseTreeMap RBTint32
-
-// A SparseTreeHelper contains indexing and allocation data
-// structures common to a collection of SparseTreeMaps, as well
-// as exposing some useful control-flow-related data to other
-// packages, such as gc.
-type SparseTreeHelper struct {
-	Sdom   []SparseTreeNode // indexed by block.ID
-	Po     []*Block         // exported data; the blocks, in a post-order
-	Dom    []*Block         // exported data; the dominator of this block.
-	Ponums []int32          // exported data; Po[Ponums[b.ID]] == b; the index of b in Po
-}
-
-// NewSparseTreeHelper returns a SparseTreeHelper for use
-// in the gc package, for example in phi-function placement.
-func NewSparseTreeHelper(f *Func) *SparseTreeHelper {
-	dom := f.Idom()
-	ponums := make([]int32, f.NumBlocks())
-	po := postorderWithNumbering(f, ponums)
-	return makeSparseTreeHelper(newSparseTree(f, dom), dom, po, ponums)
-}
-
-func (h *SparseTreeHelper) NewTree() *SparseTreeMap {
-	return &SparseTreeMap{}
-}
-
-func makeSparseTreeHelper(sdom SparseTree, dom, po []*Block, ponums []int32) *SparseTreeHelper {
-	helper := &SparseTreeHelper{Sdom: []SparseTreeNode(sdom),
-		Dom:    dom,
-		Po:     po,
-		Ponums: ponums,
-	}
-	return helper
-}
-
-// A sparseTreeMapEntry contains the data stored in a binary search
-// data structure indexed by (dominator tree walk) entry and exit numbers.
-// Each entry is added twice, once keyed by entry-1/entry/entry+1 and
-// once keyed by exit+1/exit/exit-1.
-//
-// Within a sparse tree, the two entries added bracket all their descendant
-// entries within the tree; the first insertion is keyed by entry number,
-// which comes before all the entry and exit numbers of descendants, and
-// the second insertion is keyed by exit number, which comes after all the
-// entry and exit numbers of the descendants.
-type sparseTreeMapEntry struct {
-	index        *SparseTreeNode // references the entry and exit numbers for a block in the sparse tree
-	block        *Block          // TODO: store this in a separate index.
-	data         interface{}
-	sparseParent *sparseTreeMapEntry // references the nearest ancestor of this block in the sparse tree.
-	adjust       int32               // at what adjustment was this node entered into the sparse tree? The same block may be entered more than once, but at different adjustments.
-}
-
-// Insert creates a definition within b with data x.
-// adjust indicates where in the block should be inserted:
-// AdjustBefore means defined at a phi function (visible Within or After in the same block)
-// AdjustWithin means defined within the block (visible After in the same block)
-// AdjustAfter means after the block (visible within child blocks)
-func (m *SparseTreeMap) Insert(b *Block, adjust int32, x interface{}, helper *SparseTreeHelper) {
-	rbtree := (*RBTint32)(m)
-	blockIndex := &helper.Sdom[b.ID]
-	if blockIndex.entry == 0 {
-		// assert unreachable
-		return
-	}
-	// sp will be the sparse parent in this sparse tree (nearest ancestor in the larger tree that is also in this sparse tree)
-	sp := m.findEntry(b, adjust, helper)
-	entry := &sparseTreeMapEntry{index: blockIndex, block: b, data: x, sparseParent: sp, adjust: adjust}
-
-	right := blockIndex.exit - adjust
-	_ = rbtree.Insert(right, entry)
-
-	left := blockIndex.entry + adjust
-	_ = rbtree.Insert(left, entry)
-
-	// This newly inserted block may now be the sparse parent of some existing nodes (the new sparse children of this block)
-	// Iterate over nodes bracketed by this new node to correct their parent, but not over the proper sparse descendants of those nodes.
-	_, d := rbtree.Lub(left) // Lub (not EQ) of left is either right or a sparse child
-	for tme := d.(*sparseTreeMapEntry); tme != entry; tme = d.(*sparseTreeMapEntry) {
-		tme.sparseParent = entry
-		// all descendants of tme are unchanged;
-		// next sparse sibling (or right-bracketing sparse parent == entry) is first node after tme.index.exit - tme.adjust
-		_, d = rbtree.Lub(tme.index.exit - tme.adjust)
-	}
-}
-
-// Find returns the definition visible from block b, or nil if none can be found.
-// Adjust indicates where the block should be searched.
-// AdjustBefore searches before the phi functions of b.
-// AdjustWithin searches starting at the phi functions of b.
-// AdjustAfter searches starting at the exit from the block, including normal within-block definitions.
-//
-// Note that Finds are properly nested with Inserts:
-// m.Insert(b, a) followed by m.Find(b, a) will not return the result of the insert,
-// but m.Insert(b, AdjustBefore) followed by m.Find(b, AdjustWithin) will.
-//
-// Another way to think of this is that Find searches for inputs, Insert defines outputs.
-func (m *SparseTreeMap) Find(b *Block, adjust int32, helper *SparseTreeHelper) interface{} {
-	v := m.findEntry(b, adjust, helper)
-	if v == nil {
-		return nil
-	}
-	return v.data
-}
-
-func (m *SparseTreeMap) findEntry(b *Block, adjust int32, helper *SparseTreeHelper) *sparseTreeMapEntry {
-	rbtree := (*RBTint32)(m)
-	if rbtree == nil {
-		return nil
-	}
-	blockIndex := &helper.Sdom[b.ID]
-
-	// The Glb (not EQ) of this probe is either the entry-indexed end of a sparse parent
-	// or the exit-indexed end of a sparse sibling
-	_, v := rbtree.Glb(blockIndex.entry + adjust)
-
-	if v == nil {
-		return nil
-	}
-
-	otherEntry := v.(*sparseTreeMapEntry)
-	if otherEntry.index.exit >= blockIndex.exit { // otherEntry exit after blockIndex exit; therefore, brackets
-		return otherEntry
-	}
-	// otherEntry is a sparse Sibling, and shares the same sparse parent (nearest ancestor within larger tree)
-	sp := otherEntry.sparseParent
-	if sp != nil {
-		if sp.index.exit < blockIndex.exit { // no ancestor found
-			return nil
-		}
-		return sp
-	}
-	return nil
-}
-
-func (m *SparseTreeMap) String() string {
-	tree := (*RBTint32)(m)
-	return tree.String()
-}
-
-func (e *sparseTreeMapEntry) String() string {
-	if e == nil {
-		return "nil"
-	}
-	return fmt.Sprintf("(index=%v, block=%v, data=%v)->%v", e.index, e.block, e.data, e.sparseParent)
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/stackalloc.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/stackalloc.go
deleted file mode 100644
index c5a09b4..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/stackalloc.go
+++ /dev/null
@@ -1,412 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/stackalloc.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/stackalloc.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// TODO: live at start of block instead?
-
-package ssa
-
-import "fmt"
-
-type stackAllocState struct {
-	f *Func
-
-	// live is the output of stackalloc.
-	// live[b.id] = live values at the end of block b.
-	live [][]ID
-
-	// The following slices are reused across multiple users
-	// of stackAllocState.
-	values    []stackValState
-	interfere [][]ID // interfere[v.id] = values that interfere with v.
-	names     []LocalSlot
-	slots     []int
-	used      []bool
-
-	nArgSlot, // Number of Values sourced to arg slot
-	nNotNeed, // Number of Values not needing a stack slot
-	nNamedSlot, // Number of Values using a named stack slot
-	nReuse, // Number of values reusing a stack slot
-	nAuto, // Number of autos allocated for stack slots.
-	nSelfInterfere int32 // Number of self-interferences
-}
-
-func newStackAllocState(f *Func) *stackAllocState {
-	s := f.Config.stackAllocState
-	if s == nil {
-		return new(stackAllocState)
-	}
-	if s.f != nil {
-		f.Config.Fatalf(0, "newStackAllocState called without previous free")
-	}
-	return s
-}
-
-func putStackAllocState(s *stackAllocState) {
-	for i := range s.values {
-		s.values[i] = stackValState{}
-	}
-	for i := range s.interfere {
-		s.interfere[i] = nil
-	}
-	for i := range s.names {
-		s.names[i] = LocalSlot{}
-	}
-	for i := range s.slots {
-		s.slots[i] = 0
-	}
-	for i := range s.used {
-		s.used[i] = false
-	}
-	s.f.Config.stackAllocState = s
-	s.f = nil
-	s.live = nil
-	s.nArgSlot, s.nNotNeed, s.nNamedSlot, s.nReuse, s.nAuto, s.nSelfInterfere = 0, 0, 0, 0, 0, 0
-}
-
-type stackValState struct {
-	typ      Type
-	spill    *Value
-	needSlot bool
-}
-
-// stackalloc allocates storage in the stack frame for
-// all Values that did not get a register.
-// Returns a map from block ID to the stack values live at the end of that block.
-func stackalloc(f *Func, spillLive [][]ID) [][]ID {
-	if f.pass.debug > stackDebug {
-		fmt.Println("before stackalloc")
-		fmt.Println(f.String())
-	}
-	s := newStackAllocState(f)
-	s.init(f, spillLive)
-	defer putStackAllocState(s)
-
-	s.stackalloc()
-	if f.pass.stats > 0 {
-		f.LogStat("stack_alloc_stats",
-			s.nArgSlot, "arg_slots", s.nNotNeed, "slot_not_needed",
-			s.nNamedSlot, "named_slots", s.nAuto, "auto_slots",
-			s.nReuse, "reused_slots", s.nSelfInterfere, "self_interfering")
-	}
-
-	return s.live
-}
-
-func (s *stackAllocState) init(f *Func, spillLive [][]ID) {
-	s.f = f
-
-	// Initialize value information.
-	if n := f.NumValues(); cap(s.values) >= n {
-		s.values = s.values[:n]
-	} else {
-		s.values = make([]stackValState, n)
-	}
-	for _, b := range f.Blocks {
-		for _, v := range b.Values {
-			s.values[v.ID].typ = v.Type
-			s.values[v.ID].needSlot = !v.Type.IsMemory() && !v.Type.IsVoid() && !v.Type.IsFlags() && f.getHome(v.ID) == nil && !v.rematerializeable()
-			if f.pass.debug > stackDebug && s.values[v.ID].needSlot {
-				fmt.Printf("%s needs a stack slot\n", v)
-			}
-			if v.Op == OpStoreReg {
-				s.values[v.Args[0].ID].spill = v
-			}
-		}
-	}
-
-	// Compute liveness info for values needing a slot.
-	s.computeLive(spillLive)
-
-	// Build interference graph among values needing a slot.
-	s.buildInterferenceGraph()
-}
-
-func (s *stackAllocState) stackalloc() {
-	f := s.f
-
-	// Build map from values to their names, if any.
-	// A value may be associated with more than one name (e.g. after
-	// the assignment i=j). This step picks one name per value arbitrarily.
-	if n := f.NumValues(); cap(s.names) >= n {
-		s.names = s.names[:n]
-	} else {
-		s.names = make([]LocalSlot, n)
-	}
-	names := s.names
-	for _, name := range f.Names {
-		// Note: not "range f.NamedValues" above, because
-		// that would be nondeterministic.
-		for _, v := range f.NamedValues[name] {
-			names[v.ID] = name
-		}
-	}
-
-	// Allocate args to their assigned locations.
-	for _, v := range f.Entry.Values {
-		if v.Op != OpArg {
-			continue
-		}
-		loc := LocalSlot{v.Aux.(GCNode), v.Type, v.AuxInt}
-		if f.pass.debug > stackDebug {
-			fmt.Printf("stackalloc %s to %s\n", v, loc.Name())
-		}
-		f.setHome(v, loc)
-	}
-
-	// For each type, we keep track of all the stack slots we
-	// have allocated for that type.
-	// TODO: share slots among equivalent types. We would need to
-	// only share among types with the same GC signature. See the
-	// type.Equal calls below for where this matters.
-	locations := map[Type][]LocalSlot{}
-
-	// Each time we assign a stack slot to a value v, we remember
-	// the slot we used via an index into locations[v.Type].
-	slots := s.slots
-	if n := f.NumValues(); cap(slots) >= n {
-		slots = slots[:n]
-	} else {
-		slots = make([]int, n)
-		s.slots = slots
-	}
-	for i := range slots {
-		slots[i] = -1
-	}
-
-	// Pick a stack slot for each value needing one.
-	var used []bool
-	if n := f.NumValues(); cap(s.used) >= n {
-		used = s.used[:n]
-	} else {
-		used = make([]bool, n)
-		s.used = used
-	}
-	for _, b := range f.Blocks {
-		for _, v := range b.Values {
-			if !s.values[v.ID].needSlot {
-				s.nNotNeed++
-				continue
-			}
-			if v.Op == OpArg {
-				s.nArgSlot++
-				continue // already picked
-			}
-
-			// If this is a named value, try to use the name as
-			// the spill location.
-			var name LocalSlot
-			if v.Op == OpStoreReg {
-				name = names[v.Args[0].ID]
-			} else {
-				name = names[v.ID]
-			}
-			if name.N != nil && v.Type.Compare(name.Type) == CMPeq {
-				for _, id := range s.interfere[v.ID] {
-					h := f.getHome(id)
-					if h != nil && h.(LocalSlot).N == name.N && h.(LocalSlot).Off == name.Off {
-						// A variable can interfere with itself.
-						// It is rare, but but it can happen.
-						s.nSelfInterfere++
-						goto noname
-					}
-				}
-				if f.pass.debug > stackDebug {
-					fmt.Printf("stackalloc %s to %s\n", v, name.Name())
-				}
-				s.nNamedSlot++
-				f.setHome(v, name)
-				continue
-			}
-
-		noname:
-			// Set of stack slots we could reuse.
-			locs := locations[v.Type]
-			// Mark all positions in locs used by interfering values.
-			for i := 0; i < len(locs); i++ {
-				used[i] = false
-			}
-			for _, xid := range s.interfere[v.ID] {
-				slot := slots[xid]
-				if slot >= 0 {
-					used[slot] = true
-				}
-			}
-			// Find an unused stack slot.
-			var i int
-			for i = 0; i < len(locs); i++ {
-				if !used[i] {
-					s.nReuse++
-					break
-				}
-			}
-			// If there is no unused stack slot, allocate a new one.
-			if i == len(locs) {
-				s.nAuto++
-				locs = append(locs, LocalSlot{N: f.Config.fe.Auto(v.Type), Type: v.Type, Off: 0})
-				locations[v.Type] = locs
-			}
-			// Use the stack variable at that index for v.
-			loc := locs[i]
-			if f.pass.debug > stackDebug {
-				fmt.Printf("stackalloc %s to %s\n", v, loc.Name())
-			}
-			f.setHome(v, loc)
-			slots[v.ID] = i
-		}
-	}
-}
-
-// computeLive computes a map from block ID to a list of
-// stack-slot-needing value IDs live at the end of that block.
-// TODO: this could be quadratic if lots of variables are live across lots of
-// basic blocks. Figure out a way to make this function (or, more precisely, the user
-// of this function) require only linear size & time.
-func (s *stackAllocState) computeLive(spillLive [][]ID) {
-	s.live = make([][]ID, s.f.NumBlocks())
-	var phis []*Value
-	live := s.f.newSparseSet(s.f.NumValues())
-	defer s.f.retSparseSet(live)
-	t := s.f.newSparseSet(s.f.NumValues())
-	defer s.f.retSparseSet(t)
-
-	// Instead of iterating over f.Blocks, iterate over their postordering.
-	// Liveness information flows backward, so starting at the end
-	// increases the probability that we will stabilize quickly.
-	po := s.f.postorder()
-	for {
-		changed := false
-		for _, b := range po {
-			// Start with known live values at the end of the block
-			live.clear()
-			live.addAll(s.live[b.ID])
-
-			// Propagate backwards to the start of the block
-			phis = phis[:0]
-			for i := len(b.Values) - 1; i >= 0; i-- {
-				v := b.Values[i]
-				live.remove(v.ID)
-				if v.Op == OpPhi {
-					// Save phi for later.
-					// Note: its args might need a stack slot even though
-					// the phi itself doesn't. So don't use needSlot.
-					if !v.Type.IsMemory() && !v.Type.IsVoid() {
-						phis = append(phis, v)
-					}
-					continue
-				}
-				for _, a := range v.Args {
-					if s.values[a.ID].needSlot {
-						live.add(a.ID)
-					}
-				}
-			}
-
-			// for each predecessor of b, expand its list of live-at-end values
-			// invariant: s contains the values live at the start of b (excluding phi inputs)
-			for i, e := range b.Preds {
-				p := e.b
-				t.clear()
-				t.addAll(s.live[p.ID])
-				t.addAll(live.contents())
-				t.addAll(spillLive[p.ID])
-				for _, v := range phis {
-					a := v.Args[i]
-					if s.values[a.ID].needSlot {
-						t.add(a.ID)
-					}
-					if spill := s.values[a.ID].spill; spill != nil {
-						//TODO: remove?  Subsumed by SpillUse?
-						t.add(spill.ID)
-					}
-				}
-				if t.size() == len(s.live[p.ID]) {
-					continue
-				}
-				// grow p's live set
-				s.live[p.ID] = append(s.live[p.ID][:0], t.contents()...)
-				changed = true
-			}
-		}
-
-		if !changed {
-			break
-		}
-	}
-	if s.f.pass.debug > stackDebug {
-		for _, b := range s.f.Blocks {
-			fmt.Printf("stacklive %s %v\n", b, s.live[b.ID])
-		}
-	}
-}
-
-func (f *Func) getHome(vid ID) Location {
-	if int(vid) >= len(f.RegAlloc) {
-		return nil
-	}
-	return f.RegAlloc[vid]
-}
-
-func (f *Func) setHome(v *Value, loc Location) {
-	for v.ID >= ID(len(f.RegAlloc)) {
-		f.RegAlloc = append(f.RegAlloc, nil)
-	}
-	f.RegAlloc[v.ID] = loc
-}
-
-func (s *stackAllocState) buildInterferenceGraph() {
-	f := s.f
-	if n := f.NumValues(); cap(s.interfere) >= n {
-		s.interfere = s.interfere[:n]
-	} else {
-		s.interfere = make([][]ID, n)
-	}
-	live := f.newSparseSet(f.NumValues())
-	defer f.retSparseSet(live)
-	for _, b := range f.Blocks {
-		// Propagate liveness backwards to the start of the block.
-		// Two values interfere if one is defined while the other is live.
-		live.clear()
-		live.addAll(s.live[b.ID])
-		for i := len(b.Values) - 1; i >= 0; i-- {
-			v := b.Values[i]
-			if s.values[v.ID].needSlot {
-				live.remove(v.ID)
-				for _, id := range live.contents() {
-					if s.values[v.ID].typ.Compare(s.values[id].typ) == CMPeq {
-						s.interfere[v.ID] = append(s.interfere[v.ID], id)
-						s.interfere[id] = append(s.interfere[id], v.ID)
-					}
-				}
-			}
-			for _, a := range v.Args {
-				if s.values[a.ID].needSlot {
-					live.add(a.ID)
-				}
-			}
-			if v.Op == OpArg && s.values[v.ID].needSlot {
-				// OpArg is an input argument which is pre-spilled.
-				// We add back v.ID here because we want this value
-				// to appear live even before this point. Being live
-				// all the way to the start of the entry block prevents other
-				// values from being allocated to the same slot and clobbering
-				// the input value before we have a chance to load it.
-				live.add(v.ID)
-			}
-		}
-	}
-	if f.pass.debug > stackDebug {
-		for vid, i := range s.interfere {
-			if len(i) > 0 {
-				fmt.Printf("v%d interferes with", vid)
-				for _, x := range i {
-					fmt.Printf(" v%d", x)
-				}
-				fmt.Println()
-			}
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/stackframe.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/stackframe.go
deleted file mode 100644
index ead7813..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/stackframe.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/stackframe.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/stackframe.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-// stackframe calls back into the frontend to assign frame offsets.
-func stackframe(f *Func) {
-	f.Config.fe.AllocFrame(f)
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/tighten.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/tighten.go
deleted file mode 100644
index 8c13984..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/tighten.go
+++ /dev/null
@@ -1,168 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/tighten.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/tighten.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-// tighten moves Values closer to the Blocks in which they are used.
-// This can reduce the amount of register spilling required,
-// if it doesn't also create more live values.
-// A Value can be moved to any block that
-// dominates all blocks in which it is used.
-func tighten(f *Func) {
-	canMove := make([]bool, f.NumValues())
-	for _, b := range f.Blocks {
-		for _, v := range b.Values {
-			switch v.Op {
-			case OpPhi, OpGetClosurePtr, OpArg, OpSelect0, OpSelect1:
-				// Phis need to stay in their block.
-				// GetClosurePtr & Arg must stay in the entry block.
-				// Tuple selectors must stay with the tuple generator.
-				continue
-			}
-			if len(v.Args) > 0 && v.Args[len(v.Args)-1].Type.IsMemory() {
-				// We can't move values which have a memory arg - it might
-				// make two memory values live across a block boundary.
-				continue
-			}
-			// Count arguments which will need a register.
-			narg := 0
-			for _, a := range v.Args {
-				switch a.Op {
-				case OpConst8, OpConst16, OpConst32, OpConst64, OpAddr:
-					// Probably foldable into v, don't count as an argument needing a register.
-					// TODO: move tighten to a machine-dependent phase and use v.rematerializeable()?
-				default:
-					narg++
-				}
-			}
-			if narg >= 2 && !v.Type.IsBoolean() {
-				// Don't move values with more than one input, as that may
-				// increase register pressure.
-				// We make an exception for boolean-typed values, as they will
-				// likely be converted to flags, and we want flag generators
-				// moved next to uses (because we only have 1 flag register).
-				continue
-			}
-			canMove[v.ID] = true
-		}
-	}
-
-	// Build data structure for fast least-common-ancestor queries.
-	lca := makeLCArange(f)
-
-	// For each moveable value, record the block that dominates all uses found so far.
-	target := make([]*Block, f.NumValues())
-
-	// Grab loop information.
-	// We use this to make sure we don't tighten a value into a (deeper) loop.
-	idom := f.Idom()
-	loops := f.loopnest()
-	loops.calculateDepths()
-
-	changed := true
-	for changed {
-		changed = false
-
-		// Reset target
-		for i := range target {
-			target[i] = nil
-		}
-
-		// Compute target locations (for moveable values only).
-		// target location = the least common ancestor of all uses in the dominator tree.
-		for _, b := range f.Blocks {
-			for _, v := range b.Values {
-				for i, a := range v.Args {
-					if !canMove[a.ID] {
-						continue
-					}
-					use := b
-					if v.Op == OpPhi {
-						use = b.Preds[i].b
-					}
-					if target[a.ID] == nil {
-						target[a.ID] = use
-					} else {
-						target[a.ID] = lca.find(target[a.ID], use)
-					}
-				}
-			}
-			if c := b.Control; c != nil {
-				if !canMove[c.ID] {
-					continue
-				}
-				if target[c.ID] == nil {
-					target[c.ID] = b
-				} else {
-					target[c.ID] = lca.find(target[c.ID], b)
-				}
-			}
-		}
-
-		// If the target location is inside a loop,
-		// move the target location up to just before the loop head.
-		for _, b := range f.Blocks {
-			origloop := loops.b2l[b.ID]
-			for _, v := range b.Values {
-				t := target[v.ID]
-				if t == nil {
-					continue
-				}
-				targetloop := loops.b2l[t.ID]
-				for targetloop != nil && (origloop == nil || targetloop.depth > origloop.depth) {
-					t = idom[targetloop.header.ID]
-					target[v.ID] = t
-					targetloop = loops.b2l[t.ID]
-				}
-			}
-		}
-
-		// Move values to target locations.
-		for _, b := range f.Blocks {
-			for i := 0; i < len(b.Values); i++ {
-				v := b.Values[i]
-				t := target[v.ID]
-				if t == nil || t == b {
-					// v is not moveable, or is already in correct place.
-					continue
-				}
-				// Move v to the block which dominates its uses.
-				t.Values = append(t.Values, v)
-				v.Block = t
-				last := len(b.Values) - 1
-				b.Values[i] = b.Values[last]
-				b.Values[last] = nil
-				b.Values = b.Values[:last]
-				changed = true
-				i--
-			}
-		}
-	}
-}
-
-// phiTighten moves constants closer to phi users.
-// This pass avoids having lots of constants live for lots of the program.
-// See issue 16407.
-func phiTighten(f *Func) {
-	for _, b := range f.Blocks {
-		for _, v := range b.Values {
-			if v.Op != OpPhi {
-				continue
-			}
-			for i, a := range v.Args {
-				if !a.rematerializeable() {
-					continue // not a constant we can move around
-				}
-				if a.Block == b.Preds[i].b {
-					continue // already in the right place
-				}
-				// Make a copy of a, put in predecessor block.
-				v.SetArg(i, a.copyInto(b.Preds[i].b))
-			}
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/trim.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/trim.go
deleted file mode 100644
index e58ebda..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/trim.go
+++ /dev/null
@@ -1,140 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/trim.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/trim.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-// trim removes blocks with no code in them.
-// These blocks were inserted to remove critical edges.
-func trim(f *Func) {
-	n := 0
-	for _, b := range f.Blocks {
-		if !trimmableBlock(b) {
-			f.Blocks[n] = b
-			n++
-			continue
-		}
-
-		// Splice b out of the graph. NOTE: `mergePhi` depends on the
-		// order, in which the predecessors edges are merged here.
-		p, i := b.Preds[0].b, b.Preds[0].i
-		s, j := b.Succs[0].b, b.Succs[0].i
-		ns := len(s.Preds)
-		p.Succs[i] = Edge{s, j}
-		s.Preds[j] = Edge{p, i}
-
-		for _, e := range b.Preds[1:] {
-			p, i := e.b, e.i
-			p.Succs[i] = Edge{s, len(s.Preds)}
-			s.Preds = append(s.Preds, Edge{p, i})
-		}
-
-		// If `s` had more than one predecessor, update its phi-ops to
-		// account for the merge.
-		if ns > 1 {
-			for _, v := range s.Values {
-				if v.Op == OpPhi {
-					mergePhi(v, j, b)
-				}
-			}
-			// Remove the phi-ops from `b` if they were merged into the
-			// phi-ops of `s`.
-			k := 0
-			for _, v := range b.Values {
-				if v.Op == OpPhi {
-					if v.Uses == 0 {
-						v.resetArgs()
-						continue
-					}
-					// Pad the arguments of the remaining phi-ops, so
-					// they match the new predecessor count of `s`.
-					for len(v.Args) < len(s.Preds) {
-						v.AddArg(v.Args[0])
-					}
-				}
-				b.Values[k] = v
-				k++
-			}
-			b.Values = b.Values[:k]
-		}
-
-		// Merge the blocks' values.
-		for _, v := range b.Values {
-			v.Block = s
-		}
-		k := len(b.Values)
-		m := len(s.Values)
-		for i := 0; i < k; i++ {
-			s.Values = append(s.Values, nil)
-		}
-		copy(s.Values[k:], s.Values[:m])
-		copy(s.Values, b.Values)
-	}
-	if n < len(f.Blocks) {
-		f.invalidateCFG()
-		tail := f.Blocks[n:]
-		for i := range tail {
-			tail[i] = nil
-		}
-		f.Blocks = f.Blocks[:n]
-	}
-}
-
-// emptyBlock returns true if the block does not contain actual
-// instructions
-func emptyBlock(b *Block) bool {
-	for _, v := range b.Values {
-		if v.Op != OpPhi {
-			return false
-		}
-	}
-	return true
-}
-
-// trimmableBlock returns true if the block can be trimmed from the CFG,
-// subject to the following criteria:
-//  - it should not be the first block
-//  - it should be BlockPlain
-//  - it should not loop back to itself
-//  - it either is the single predecessor of the successor block or
-//    contains no actual instructions
-func trimmableBlock(b *Block) bool {
-	if b.Kind != BlockPlain || b == b.Func.Entry {
-		return false
-	}
-	s := b.Succs[0].b
-	return s != b && (len(s.Preds) == 1 || emptyBlock(b))
-}
-
-// mergePhi adjusts the number of `v`s arguments to account for merge
-// of `b`, which was `i`th predecessor of the `v`s block. Returns
-// `v`.
-func mergePhi(v *Value, i int, b *Block) *Value {
-	u := v.Args[i]
-	if u.Block == b {
-		if u.Op != OpPhi {
-			b.Func.Fatalf("value %s is not a phi operation", u.LongString())
-		}
-		// If the original block contained u = φ(u0, u1, ..., un) and
-		// the current phi is
-		//    v = φ(v0, v1, ..., u, ..., vk)
-		// then the merged phi is
-		//    v = φ(v0, v1, ..., u0, ..., vk, u1, ..., un)
-		v.SetArg(i, u.Args[0])
-		v.AddArgs(u.Args[1:]...)
-	} else {
-		// If the original block contained u = φ(u0, u1, ..., un) and
-		// the current phi is
-		//    v = φ(v0, v1, ...,  vi, ..., vk)
-		// i.e. it does not use a value from the predecessor block,
-		// then the merged phi is
-		//    v = φ(v0, v1, ..., vk, vi, vi, ...)
-		for j := 1; j < len(b.Preds); j++ {
-			v.AddArg(v.Args[i])
-		}
-	}
-	return v
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/type.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/type.go
deleted file mode 100644
index d785fc8..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/type.go
+++ /dev/null
@@ -1,195 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/type.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/type.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-// TODO: use go/types instead?
-
-// A type interface used to import cmd/internal/gc:Type
-// Type instances are not guaranteed to be canonical.
-type Type interface {
-	Size() int64 // return the size in bytes
-	Alignment() int64
-
-	IsBoolean() bool // is a named or unnamed boolean type
-	IsInteger() bool //  ... ditto for the others
-	IsSigned() bool
-	IsFloat() bool
-	IsComplex() bool
-	IsPtrShaped() bool
-	IsString() bool
-	IsSlice() bool
-	IsArray() bool
-	IsStruct() bool
-	IsInterface() bool
-
-	IsMemory() bool // special ssa-package-only types
-	IsFlags() bool
-	IsVoid() bool
-	IsTuple() bool
-
-	ElemType() Type // given []T or *T or [n]T, return T
-	PtrTo() Type    // given T, return *T
-
-	NumFields() int         // # of fields of a struct
-	FieldType(i int) Type   // type of ith field of the struct or ith part of a tuple
-	FieldOff(i int) int64   // offset of ith field of the struct
-	FieldName(i int) string // name of ith field of the struct
-
-	NumElem() int64 // # of elements of an array
-
-	String() string
-	SimpleString() string // a coarser generic description of T, e.g. T's underlying type
-	Compare(Type) Cmp     // compare types, returning one of CMPlt, CMPeq, CMPgt.
-}
-
-// Special compiler-only types.
-type CompilerType struct {
-	Name   string
-	size   int64
-	Memory bool
-	Flags  bool
-	Void   bool
-	Int128 bool
-}
-
-func (t *CompilerType) Size() int64            { return t.size } // Size in bytes
-func (t *CompilerType) Alignment() int64       { return 0 }
-func (t *CompilerType) IsBoolean() bool        { return false }
-func (t *CompilerType) IsInteger() bool        { return false }
-func (t *CompilerType) IsSigned() bool         { return false }
-func (t *CompilerType) IsFloat() bool          { return false }
-func (t *CompilerType) IsComplex() bool        { return false }
-func (t *CompilerType) IsPtrShaped() bool      { return false }
-func (t *CompilerType) IsString() bool         { return false }
-func (t *CompilerType) IsSlice() bool          { return false }
-func (t *CompilerType) IsArray() bool          { return false }
-func (t *CompilerType) IsStruct() bool         { return false }
-func (t *CompilerType) IsInterface() bool      { return false }
-func (t *CompilerType) IsMemory() bool         { return t.Memory }
-func (t *CompilerType) IsFlags() bool          { return t.Flags }
-func (t *CompilerType) IsVoid() bool           { return t.Void }
-func (t *CompilerType) IsTuple() bool          { return false }
-func (t *CompilerType) String() string         { return t.Name }
-func (t *CompilerType) SimpleString() string   { return t.Name }
-func (t *CompilerType) ElemType() Type         { panic("not implemented") }
-func (t *CompilerType) PtrTo() Type            { panic("not implemented") }
-func (t *CompilerType) NumFields() int         { panic("not implemented") }
-func (t *CompilerType) FieldType(i int) Type   { panic("not implemented") }
-func (t *CompilerType) FieldOff(i int) int64   { panic("not implemented") }
-func (t *CompilerType) FieldName(i int) string { panic("not implemented") }
-func (t *CompilerType) NumElem() int64         { panic("not implemented") }
-
-type TupleType struct {
-	first  Type
-	second Type
-	// Any tuple with a memory type must put that memory type second.
-}
-
-func (t *TupleType) Size() int64          { panic("not implemented") }
-func (t *TupleType) Alignment() int64     { panic("not implemented") }
-func (t *TupleType) IsBoolean() bool      { return false }
-func (t *TupleType) IsInteger() bool      { return false }
-func (t *TupleType) IsSigned() bool       { return false }
-func (t *TupleType) IsFloat() bool        { return false }
-func (t *TupleType) IsComplex() bool      { return false }
-func (t *TupleType) IsPtrShaped() bool    { return false }
-func (t *TupleType) IsString() bool       { return false }
-func (t *TupleType) IsSlice() bool        { return false }
-func (t *TupleType) IsArray() bool        { return false }
-func (t *TupleType) IsStruct() bool       { return false }
-func (t *TupleType) IsInterface() bool    { return false }
-func (t *TupleType) IsMemory() bool       { return false }
-func (t *TupleType) IsFlags() bool        { return false }
-func (t *TupleType) IsVoid() bool         { return false }
-func (t *TupleType) IsTuple() bool        { return true }
-func (t *TupleType) String() string       { return t.first.String() + "," + t.second.String() }
-func (t *TupleType) SimpleString() string { return "Tuple" }
-func (t *TupleType) ElemType() Type       { panic("not implemented") }
-func (t *TupleType) PtrTo() Type          { panic("not implemented") }
-func (t *TupleType) NumFields() int       { panic("not implemented") }
-func (t *TupleType) FieldType(i int) Type {
-	switch i {
-	case 0:
-		return t.first
-	case 1:
-		return t.second
-	default:
-		panic("bad tuple index")
-	}
-}
-func (t *TupleType) FieldOff(i int) int64   { panic("not implemented") }
-func (t *TupleType) FieldName(i int) string { panic("not implemented") }
-func (t *TupleType) NumElem() int64         { panic("not implemented") }
-
-// Cmp is a comparison between values a and b.
-// -1 if a < b
-//  0 if a == b
-//  1 if a > b
-type Cmp int8
-
-const (
-	CMPlt = Cmp(-1)
-	CMPeq = Cmp(0)
-	CMPgt = Cmp(1)
-)
-
-func (t *CompilerType) Compare(u Type) Cmp {
-	x, ok := u.(*CompilerType)
-	// ssa.CompilerType is smaller than any other type
-	if !ok {
-		return CMPlt
-	}
-	if t == x {
-		return CMPeq
-	}
-	// desire fast sorting, not pretty sorting.
-	if len(t.Name) == len(x.Name) {
-		if t.Name == x.Name {
-			return CMPeq
-		}
-		if t.Name < x.Name {
-			return CMPlt
-		}
-		return CMPgt
-	}
-	if len(t.Name) > len(x.Name) {
-		return CMPgt
-	}
-	return CMPlt
-}
-
-func (t *TupleType) Compare(u Type) Cmp {
-	// ssa.TupleType is greater than ssa.CompilerType
-	if _, ok := u.(*CompilerType); ok {
-		return CMPgt
-	}
-	// ssa.TupleType is smaller than any other type
-	x, ok := u.(*TupleType)
-	if !ok {
-		return CMPlt
-	}
-	if t == x {
-		return CMPeq
-	}
-	if c := t.first.Compare(x.first); c != CMPeq {
-		return c
-	}
-	return t.second.Compare(x.second)
-}
-
-var (
-	TypeInvalid = &CompilerType{Name: "invalid"}
-	TypeMem     = &CompilerType{Name: "mem", Memory: true}
-	TypeFlags   = &CompilerType{Name: "flags", Flags: true}
-	TypeVoid    = &CompilerType{Name: "void", Void: true}
-	TypeInt128  = &CompilerType{Name: "int128", size: 16, Int128: true}
-)
-
-func MakeTuple(t0, t1 Type) *TupleType {
-	return &TupleType{first: t0, second: t1}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/type_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/type_test.go
deleted file mode 100644
index e6bdc5e..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/type_test.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/type_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/type_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-// Stub implementation used for testing.
-type TypeImpl struct {
-	Size_   int64
-	Align   int64
-	Boolean bool
-	Integer bool
-	Signed  bool
-	Float   bool
-	Complex bool
-	Ptr     bool
-	string  bool
-	slice   bool
-	array   bool
-	struct_ bool
-	inter   bool
-	Elem_   Type
-
-	Name string
-}
-
-func (t *TypeImpl) Size() int64            { return t.Size_ }
-func (t *TypeImpl) Alignment() int64       { return t.Align }
-func (t *TypeImpl) IsBoolean() bool        { return t.Boolean }
-func (t *TypeImpl) IsInteger() bool        { return t.Integer }
-func (t *TypeImpl) IsSigned() bool         { return t.Signed }
-func (t *TypeImpl) IsFloat() bool          { return t.Float }
-func (t *TypeImpl) IsComplex() bool        { return t.Complex }
-func (t *TypeImpl) IsPtrShaped() bool      { return t.Ptr }
-func (t *TypeImpl) IsString() bool         { return t.string }
-func (t *TypeImpl) IsSlice() bool          { return t.slice }
-func (t *TypeImpl) IsArray() bool          { return t.array }
-func (t *TypeImpl) IsStruct() bool         { return t.struct_ }
-func (t *TypeImpl) IsInterface() bool      { return t.inter }
-func (t *TypeImpl) IsMemory() bool         { return false }
-func (t *TypeImpl) IsFlags() bool          { return false }
-func (t *TypeImpl) IsTuple() bool          { return false }
-func (t *TypeImpl) IsVoid() bool           { return false }
-func (t *TypeImpl) String() string         { return t.Name }
-func (t *TypeImpl) SimpleString() string   { return t.Name }
-func (t *TypeImpl) ElemType() Type         { return t.Elem_ }
-func (t *TypeImpl) PtrTo() Type            { return TypeBytePtr }
-func (t *TypeImpl) NumFields() int         { panic("not implemented") }
-func (t *TypeImpl) FieldType(i int) Type   { panic("not implemented") }
-func (t *TypeImpl) FieldOff(i int) int64   { panic("not implemented") }
-func (t *TypeImpl) FieldName(i int) string { panic("not implemented") }
-func (t *TypeImpl) NumElem() int64         { panic("not implemented") }
-
-func (t *TypeImpl) Equal(u Type) bool {
-	x, ok := u.(*TypeImpl)
-	if !ok {
-		return false
-	}
-	return x == t
-}
-
-func (t *TypeImpl) Compare(u Type) Cmp {
-	x, ok := u.(*TypeImpl)
-	// ssa.CompilerType < ssa.TypeImpl < gc.Type
-	if !ok {
-		_, ok := u.(*CompilerType)
-		if ok {
-			return CMPgt
-		}
-		return CMPlt
-	}
-	if t == x {
-		return CMPeq
-	}
-	if t.Name < x.Name {
-		return CMPlt
-	}
-	if t.Name > x.Name {
-		return CMPgt
-	}
-	return CMPeq
-
-}
-
-var (
-	// shortcuts for commonly used basic types
-	TypeInt8       = &TypeImpl{Size_: 1, Align: 1, Integer: true, Signed: true, Name: "int8"}
-	TypeInt16      = &TypeImpl{Size_: 2, Align: 2, Integer: true, Signed: true, Name: "int16"}
-	TypeInt32      = &TypeImpl{Size_: 4, Align: 4, Integer: true, Signed: true, Name: "int32"}
-	TypeInt64      = &TypeImpl{Size_: 8, Align: 8, Integer: true, Signed: true, Name: "int64"}
-	TypeFloat32    = &TypeImpl{Size_: 4, Align: 4, Float: true, Name: "float32"}
-	TypeFloat64    = &TypeImpl{Size_: 8, Align: 8, Float: true, Name: "float64"}
-	TypeComplex64  = &TypeImpl{Size_: 8, Align: 4, Complex: true, Name: "complex64"}
-	TypeComplex128 = &TypeImpl{Size_: 16, Align: 8, Complex: true, Name: "complex128"}
-	TypeUInt8      = &TypeImpl{Size_: 1, Align: 1, Integer: true, Name: "uint8"}
-	TypeUInt16     = &TypeImpl{Size_: 2, Align: 2, Integer: true, Name: "uint16"}
-	TypeUInt32     = &TypeImpl{Size_: 4, Align: 4, Integer: true, Name: "uint32"}
-	TypeUInt64     = &TypeImpl{Size_: 8, Align: 8, Integer: true, Name: "uint64"}
-	TypeBool       = &TypeImpl{Size_: 1, Align: 1, Boolean: true, Name: "bool"}
-	TypeBytePtr    = &TypeImpl{Size_: 8, Align: 8, Ptr: true, Name: "*byte"}
-	TypeInt64Ptr   = &TypeImpl{Size_: 8, Align: 8, Ptr: true, Name: "*int64"}
-)
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/value.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/value.go
deleted file mode 100644
index d99f8c7..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/value.go
+++ /dev/null
@@ -1,314 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/value.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/value.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import (
-	"fmt"
-	"math"
-)
-
-// A Value represents a value in the SSA representation of the program.
-// The ID and Type fields must not be modified. The remainder may be modified
-// if they preserve the value of the Value (e.g. changing a (mul 2 x) to an (add x x)).
-type Value struct {
-	// A unique identifier for the value. For performance we allocate these IDs
-	// densely starting at 1.  There is no guarantee that there won't be occasional holes, though.
-	ID ID
-
-	// The operation that computes this value. See op.go.
-	Op Op
-
-	// The type of this value. Normally this will be a Go type, but there
-	// are a few other pseudo-types, see type.go.
-	Type Type
-
-	// Auxiliary info for this value. The type of this information depends on the opcode and type.
-	// AuxInt is used for integer values, Aux is used for other values.
-	// Floats are stored in AuxInt using math.Float64bits(f).
-	AuxInt int64
-	Aux    interface{}
-
-	// Arguments of this value
-	Args []*Value
-
-	// Containing basic block
-	Block *Block
-
-	// Source line number
-	Line int32
-
-	// Use count. Each appearance in Value.Args and Block.Control counts once.
-	Uses int32
-
-	// Storage for the first three args
-	argstorage [3]*Value
-}
-
-// Examples:
-// Opcode          aux   args
-//  OpAdd          nil      2
-//  OpConst     string      0    string constant
-//  OpConst      int64      0    int64 constant
-//  OpAddcq      int64      1    amd64 op: v = arg[0] + constant
-
-// short form print. Just v#.
-func (v *Value) String() string {
-	if v == nil {
-		return "nil" // should never happen, but not panicking helps with debugging
-	}
-	return fmt.Sprintf("v%d", v.ID)
-}
-
-func (v *Value) AuxInt8() int8 {
-	if opcodeTable[v.Op].auxType != auxInt8 {
-		v.Fatalf("op %s doesn't have an int8 aux field", v.Op)
-	}
-	return int8(v.AuxInt)
-}
-
-func (v *Value) AuxInt16() int16 {
-	if opcodeTable[v.Op].auxType != auxInt16 {
-		v.Fatalf("op %s doesn't have an int16 aux field", v.Op)
-	}
-	return int16(v.AuxInt)
-}
-
-func (v *Value) AuxInt32() int32 {
-	if opcodeTable[v.Op].auxType != auxInt32 {
-		v.Fatalf("op %s doesn't have an int32 aux field", v.Op)
-	}
-	return int32(v.AuxInt)
-}
-
-func (v *Value) AuxFloat() float64 {
-	if opcodeTable[v.Op].auxType != auxFloat32 && opcodeTable[v.Op].auxType != auxFloat64 {
-		v.Fatalf("op %s doesn't have a float aux field", v.Op)
-	}
-	return math.Float64frombits(uint64(v.AuxInt))
-}
-func (v *Value) AuxValAndOff() ValAndOff {
-	if opcodeTable[v.Op].auxType != auxSymValAndOff {
-		v.Fatalf("op %s doesn't have a ValAndOff aux field", v.Op)
-	}
-	return ValAndOff(v.AuxInt)
-}
-
-// long form print.  v# = opcode <type> [aux] args [: reg]
-func (v *Value) LongString() string {
-	s := fmt.Sprintf("v%d = %s", v.ID, v.Op)
-	s += " <" + v.Type.String() + ">"
-	s += v.auxString()
-	for _, a := range v.Args {
-		s += fmt.Sprintf(" %v", a)
-	}
-	r := v.Block.Func.RegAlloc
-	if int(v.ID) < len(r) && r[v.ID] != nil {
-		s += " : " + r[v.ID].Name()
-	}
-	return s
-}
-
-func (v *Value) auxString() string {
-	switch opcodeTable[v.Op].auxType {
-	case auxBool:
-		if v.AuxInt == 0 {
-			return " [false]"
-		} else {
-			return " [true]"
-		}
-	case auxInt8:
-		return fmt.Sprintf(" [%d]", v.AuxInt8())
-	case auxInt16:
-		return fmt.Sprintf(" [%d]", v.AuxInt16())
-	case auxInt32:
-		return fmt.Sprintf(" [%d]", v.AuxInt32())
-	case auxInt64, auxInt128:
-		return fmt.Sprintf(" [%d]", v.AuxInt)
-	case auxSizeAndAlign:
-		return fmt.Sprintf(" [%s]", SizeAndAlign(v.AuxInt))
-	case auxFloat32, auxFloat64:
-		return fmt.Sprintf(" [%g]", v.AuxFloat())
-	case auxString:
-		return fmt.Sprintf(" {%q}", v.Aux)
-	case auxSym:
-		if v.Aux != nil {
-			return fmt.Sprintf(" {%v}", v.Aux)
-		}
-	case auxSymOff, auxSymInt32:
-		s := ""
-		if v.Aux != nil {
-			s = fmt.Sprintf(" {%v}", v.Aux)
-		}
-		if v.AuxInt != 0 {
-			s += fmt.Sprintf(" [%v]", v.AuxInt)
-		}
-		return s
-	case auxSymValAndOff:
-		s := ""
-		if v.Aux != nil {
-			s = fmt.Sprintf(" {%v}", v.Aux)
-		}
-		return s + fmt.Sprintf(" [%s]", v.AuxValAndOff())
-	case auxSymSizeAndAlign:
-		s := ""
-		if v.Aux != nil {
-			s = fmt.Sprintf(" {%v}", v.Aux)
-		}
-		return s + fmt.Sprintf(" [%s]", SizeAndAlign(v.AuxInt))
-	}
-	return ""
-}
-
-func (v *Value) AddArg(w *Value) {
-	if v.Args == nil {
-		v.resetArgs() // use argstorage
-	}
-	v.Args = append(v.Args, w)
-	w.Uses++
-}
-func (v *Value) AddArgs(a ...*Value) {
-	if v.Args == nil {
-		v.resetArgs() // use argstorage
-	}
-	v.Args = append(v.Args, a...)
-	for _, x := range a {
-		x.Uses++
-	}
-}
-func (v *Value) SetArg(i int, w *Value) {
-	v.Args[i].Uses--
-	v.Args[i] = w
-	w.Uses++
-}
-func (v *Value) RemoveArg(i int) {
-	v.Args[i].Uses--
-	copy(v.Args[i:], v.Args[i+1:])
-	v.Args[len(v.Args)-1] = nil // aid GC
-	v.Args = v.Args[:len(v.Args)-1]
-}
-func (v *Value) SetArgs1(a *Value) {
-	v.resetArgs()
-	v.AddArg(a)
-}
-func (v *Value) SetArgs2(a *Value, b *Value) {
-	v.resetArgs()
-	v.AddArg(a)
-	v.AddArg(b)
-}
-
-func (v *Value) resetArgs() {
-	for _, a := range v.Args {
-		a.Uses--
-	}
-	v.argstorage[0] = nil
-	v.argstorage[1] = nil
-	v.argstorage[2] = nil
-	v.Args = v.argstorage[:0]
-}
-
-func (v *Value) reset(op Op) {
-	v.Op = op
-	v.resetArgs()
-	v.AuxInt = 0
-	v.Aux = nil
-}
-
-// copyInto makes a new value identical to v and adds it to the end of b.
-func (v *Value) copyInto(b *Block) *Value {
-	c := b.NewValue0(v.Line, v.Op, v.Type)
-	c.Aux = v.Aux
-	c.AuxInt = v.AuxInt
-	c.AddArgs(v.Args...)
-	for _, a := range v.Args {
-		if a.Type.IsMemory() {
-			v.Fatalf("can't move a value with a memory arg %s", v.LongString())
-		}
-	}
-	return c
-}
-
-func (v *Value) Logf(msg string, args ...interface{}) { v.Block.Logf(msg, args...) }
-func (v *Value) Log() bool                            { return v.Block.Log() }
-func (v *Value) Fatalf(msg string, args ...interface{}) {
-	v.Block.Func.Config.Fatalf(v.Line, msg, args...)
-}
-
-// isGenericIntConst returns whether v is a generic integer constant.
-func (v *Value) isGenericIntConst() bool {
-	return v != nil && (v.Op == OpConst64 || v.Op == OpConst32 || v.Op == OpConst16 || v.Op == OpConst8)
-}
-
-// ExternSymbol is an aux value that encodes a variable's
-// constant offset from the static base pointer.
-type ExternSymbol struct {
-	Typ Type         // Go type
-	Sym fmt.Stringer // A *gc.Sym referring to a global variable
-	// Note: the offset for an external symbol is not
-	// calculated until link time.
-}
-
-// ArgSymbol is an aux value that encodes an argument or result
-// variable's constant offset from FP (FP = SP + framesize).
-type ArgSymbol struct {
-	Typ  Type   // Go type
-	Node GCNode // A *gc.Node referring to the argument/result variable.
-}
-
-// AutoSymbol is an aux value that encodes a local variable's
-// constant offset from SP.
-type AutoSymbol struct {
-	Typ  Type   // Go type
-	Node GCNode // A *gc.Node referring to a local (auto) variable.
-}
-
-func (s *ExternSymbol) String() string {
-	return s.Sym.String()
-}
-
-func (s *ArgSymbol) String() string {
-	return s.Node.String()
-}
-
-func (s *AutoSymbol) String() string {
-	return s.Node.String()
-}
-
-// Reg returns the register assigned to v, in cmd/internal/obj/$ARCH numbering.
-func (v *Value) Reg() int16 {
-	reg := v.Block.Func.RegAlloc[v.ID]
-	if reg == nil {
-		v.Fatalf("nil register for value: %s\n%s\n", v.LongString(), v.Block.Func)
-	}
-	return reg.(*Register).objNum
-}
-
-// Reg0 returns the register assigned to the first output of v, in cmd/internal/obj/$ARCH numbering.
-func (v *Value) Reg0() int16 {
-	reg := v.Block.Func.RegAlloc[v.ID].(LocPair)[0]
-	if reg == nil {
-		v.Fatalf("nil first register for value: %s\n%s\n", v.LongString(), v.Block.Func)
-	}
-	return reg.(*Register).objNum
-}
-
-// Reg1 returns the register assigned to the second output of v, in cmd/internal/obj/$ARCH numbering.
-func (v *Value) Reg1() int16 {
-	reg := v.Block.Func.RegAlloc[v.ID].(LocPair)[1]
-	if reg == nil {
-		v.Fatalf("nil second register for value: %s\n%s\n", v.LongString(), v.Block.Func)
-	}
-	return reg.(*Register).objNum
-}
-
-func (v *Value) RegName() string {
-	reg := v.Block.Func.RegAlloc[v.ID]
-	if reg == nil {
-		v.Fatalf("nil register for value: %s\n%s\n", v.LongString(), v.Block.Func)
-	}
-	return reg.(*Register).name
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/writebarrier.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/writebarrier.go
deleted file mode 100644
index 620ca37..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/writebarrier.go
+++ /dev/null
@@ -1,313 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/writebarrier.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/writebarrier.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import "fmt"
-
-// writebarrier expands write barrier ops (StoreWB, MoveWB, etc.) into
-// branches and runtime calls, like
-//
-// if writeBarrier.enabled {
-//   writebarrierptr(ptr, val)
-// } else {
-//   *ptr = val
-// }
-//
-// If ptr is an address of a stack slot, write barrier will be removed
-// and a normal store will be used.
-// A sequence of WB stores for many pointer fields of a single type will
-// be emitted together, with a single branch.
-//
-// Expanding WB ops introduces new control flows, and we would need to
-// split a block into two if there were values after WB ops, which would
-// require scheduling the values. To avoid this complexity, when building
-// SSA, we make sure that WB ops are always at the end of a block. We do
-// this before fuse as it may merge blocks. It also helps to reduce
-// number of blocks as fuse merges blocks introduced in this phase.
-func writebarrier(f *Func) {
-	var sb, sp, wbaddr *Value
-	var writebarrierptr, typedmemmove, typedmemclr interface{} // *gc.Sym
-	var storeWBs, others []*Value
-	var wbs *sparseSet
-	for _, b := range f.Blocks { // range loop is safe since the blocks we added contain no WB stores
-	valueLoop:
-		for i, v := range b.Values {
-			switch v.Op {
-			case OpStoreWB, OpMoveWB, OpMoveWBVolatile, OpZeroWB:
-				if IsStackAddr(v.Args[0]) {
-					switch v.Op {
-					case OpStoreWB:
-						v.Op = OpStore
-					case OpMoveWB, OpMoveWBVolatile:
-						v.Op = OpMove
-						v.Aux = nil
-					case OpZeroWB:
-						v.Op = OpZero
-						v.Aux = nil
-					}
-					continue
-				}
-
-				if wbaddr == nil {
-					// initalize global values for write barrier test and calls
-					// find SB and SP values in entry block
-					initln := f.Entry.Line
-					for _, v := range f.Entry.Values {
-						if v.Op == OpSB {
-							sb = v
-						}
-						if v.Op == OpSP {
-							sp = v
-						}
-					}
-					if sb == nil {
-						sb = f.Entry.NewValue0(initln, OpSB, f.Config.fe.TypeUintptr())
-					}
-					if sp == nil {
-						sp = f.Entry.NewValue0(initln, OpSP, f.Config.fe.TypeUintptr())
-					}
-					wbsym := &ExternSymbol{Typ: f.Config.fe.TypeBool(), Sym: f.Config.fe.Syslook("writeBarrier").(fmt.Stringer)}
-					wbaddr = f.Entry.NewValue1A(initln, OpAddr, f.Config.fe.TypeUInt32().PtrTo(), wbsym, sb)
-					writebarrierptr = f.Config.fe.Syslook("writebarrierptr")
-					typedmemmove = f.Config.fe.Syslook("typedmemmove")
-					typedmemclr = f.Config.fe.Syslook("typedmemclr")
-
-					wbs = f.newSparseSet(f.NumValues())
-					defer f.retSparseSet(wbs)
-				}
-
-				line := v.Line
-
-				// there may be a sequence of WB stores in the current block. find them.
-				storeWBs = storeWBs[:0]
-				others = others[:0]
-				wbs.clear()
-				for _, w := range b.Values[i:] {
-					if w.Op == OpStoreWB || w.Op == OpMoveWB || w.Op == OpMoveWBVolatile || w.Op == OpZeroWB {
-						storeWBs = append(storeWBs, w)
-						wbs.add(w.ID)
-					} else {
-						others = append(others, w)
-					}
-				}
-
-				// make sure that no value in this block depends on WB stores
-				for _, w := range b.Values {
-					if w.Op == OpStoreWB || w.Op == OpMoveWB || w.Op == OpMoveWBVolatile || w.Op == OpZeroWB {
-						continue
-					}
-					for _, a := range w.Args {
-						if wbs.contains(a.ID) {
-							f.Fatalf("value %v depends on WB store %v in the same block %v", w, a, b)
-						}
-					}
-				}
-
-				// find the memory before the WB stores
-				// this memory is not a WB store but it is used in a WB store.
-				var mem *Value
-				for _, w := range storeWBs {
-					a := w.Args[len(w.Args)-1]
-					if wbs.contains(a.ID) {
-						continue
-					}
-					if mem != nil {
-						b.Fatalf("two stores live simultaneously: %s, %s", mem, a)
-					}
-					mem = a
-				}
-
-				b.Values = append(b.Values[:i], others...) // move WB ops out of this block
-
-				bThen := f.NewBlock(BlockPlain)
-				bElse := f.NewBlock(BlockPlain)
-				bEnd := f.NewBlock(b.Kind)
-				bThen.Line = line
-				bElse.Line = line
-				bEnd.Line = line
-
-				// set up control flow for end block
-				bEnd.SetControl(b.Control)
-				bEnd.Likely = b.Likely
-				for _, e := range b.Succs {
-					bEnd.Succs = append(bEnd.Succs, e)
-					e.b.Preds[e.i].b = bEnd
-				}
-
-				// set up control flow for write barrier test
-				// load word, test word, avoiding partial register write from load byte.
-				flag := b.NewValue2(line, OpLoad, f.Config.fe.TypeUInt32(), wbaddr, mem)
-				const0 := f.ConstInt32(line, f.Config.fe.TypeUInt32(), 0)
-				flag = b.NewValue2(line, OpNeq32, f.Config.fe.TypeBool(), flag, const0)
-				b.Kind = BlockIf
-				b.SetControl(flag)
-				b.Likely = BranchUnlikely
-				b.Succs = b.Succs[:0]
-				b.AddEdgeTo(bThen)
-				b.AddEdgeTo(bElse)
-				bThen.AddEdgeTo(bEnd)
-				bElse.AddEdgeTo(bEnd)
-
-				memThen := mem
-				memElse := mem
-				for _, w := range storeWBs {
-					var val *Value
-					ptr := w.Args[0]
-					siz := w.AuxInt
-					typ := w.Aux // only non-nil for MoveWB, MoveWBVolatile, ZeroWB
-
-					var op Op
-					var fn interface{} // *gc.Sym
-					switch w.Op {
-					case OpStoreWB:
-						op = OpStore
-						fn = writebarrierptr
-						val = w.Args[1]
-					case OpMoveWB, OpMoveWBVolatile:
-						op = OpMove
-						fn = typedmemmove
-						val = w.Args[1]
-					case OpZeroWB:
-						op = OpZero
-						fn = typedmemclr
-					}
-
-					// then block: emit write barrier call
-					memThen = wbcall(line, bThen, fn, typ, ptr, val, memThen, sp, sb, w.Op == OpMoveWBVolatile)
-
-					// else block: normal store
-					if op == OpZero {
-						memElse = bElse.NewValue2I(line, op, TypeMem, siz, ptr, memElse)
-					} else {
-						memElse = bElse.NewValue3I(line, op, TypeMem, siz, ptr, val, memElse)
-					}
-				}
-
-				// merge memory
-				// Splice memory Phi into the last memory of the original sequence,
-				// which may be used in subsequent blocks. Other memories in the
-				// sequence must be dead after this block since there can be only
-				// one memory live.
-				last := storeWBs[0]
-				if len(storeWBs) > 1 {
-					// find the last store
-					last = nil
-					wbs.clear() // we reuse wbs to record WB stores that is used in another WB store
-					for _, w := range storeWBs {
-						wbs.add(w.Args[len(w.Args)-1].ID)
-					}
-					for _, w := range storeWBs {
-						if wbs.contains(w.ID) {
-							continue
-						}
-						if last != nil {
-							b.Fatalf("two stores live simultaneously: %s, %s", last, w)
-						}
-						last = w
-					}
-				}
-				bEnd.Values = append(bEnd.Values, last)
-				last.Block = bEnd
-				last.reset(OpPhi)
-				last.Type = TypeMem
-				last.AddArg(memThen)
-				last.AddArg(memElse)
-				for _, w := range storeWBs {
-					if w != last {
-						w.resetArgs()
-					}
-				}
-				for _, w := range storeWBs {
-					if w != last {
-						f.freeValue(w)
-					}
-				}
-
-				if f.Config.fe.Debug_wb() {
-					f.Config.Warnl(line, "write barrier")
-				}
-
-				break valueLoop
-			}
-		}
-	}
-}
-
-// wbcall emits write barrier runtime call in b, returns memory.
-// if valIsVolatile, it moves val into temp space before making the call.
-func wbcall(line int32, b *Block, fn interface{}, typ interface{}, ptr, val, mem, sp, sb *Value, valIsVolatile bool) *Value {
-	config := b.Func.Config
-
-	var tmp GCNode
-	if valIsVolatile {
-		// Copy to temp location if the source is volatile (will be clobbered by
-		// a function call). Marshaling the args to typedmemmove might clobber the
-		// value we're trying to move.
-		t := val.Type.ElemType()
-		tmp = config.fe.Auto(t)
-		aux := &AutoSymbol{Typ: t, Node: tmp}
-		mem = b.NewValue1A(line, OpVarDef, TypeMem, tmp, mem)
-		tmpaddr := b.NewValue1A(line, OpAddr, t.PtrTo(), aux, sp)
-		siz := MakeSizeAndAlign(t.Size(), t.Alignment()).Int64()
-		mem = b.NewValue3I(line, OpMove, TypeMem, siz, tmpaddr, val, mem)
-		val = tmpaddr
-	}
-
-	// put arguments on stack
-	off := config.ctxt.FixedFrameSize()
-
-	if typ != nil { // for typedmemmove
-		taddr := b.NewValue1A(line, OpAddr, config.fe.TypeUintptr(), typ, sb)
-		off = round(off, taddr.Type.Alignment())
-		arg := b.NewValue1I(line, OpOffPtr, taddr.Type.PtrTo(), off, sp)
-		mem = b.NewValue3I(line, OpStore, TypeMem, ptr.Type.Size(), arg, taddr, mem)
-		off += taddr.Type.Size()
-	}
-
-	off = round(off, ptr.Type.Alignment())
-	arg := b.NewValue1I(line, OpOffPtr, ptr.Type.PtrTo(), off, sp)
-	mem = b.NewValue3I(line, OpStore, TypeMem, ptr.Type.Size(), arg, ptr, mem)
-	off += ptr.Type.Size()
-
-	if val != nil {
-		off = round(off, val.Type.Alignment())
-		arg = b.NewValue1I(line, OpOffPtr, val.Type.PtrTo(), off, sp)
-		mem = b.NewValue3I(line, OpStore, TypeMem, val.Type.Size(), arg, val, mem)
-		off += val.Type.Size()
-	}
-	off = round(off, config.PtrSize)
-
-	// issue call
-	mem = b.NewValue1A(line, OpStaticCall, TypeMem, fn, mem)
-	mem.AuxInt = off - config.ctxt.FixedFrameSize()
-
-	if valIsVolatile {
-		mem = b.NewValue1A(line, OpVarKill, TypeMem, tmp, mem) // mark temp dead
-	}
-
-	return mem
-}
-
-// round to a multiple of r, r is a power of 2
-func round(o int64, r int64) int64 {
-	return (o + r - 1) &^ (r - 1)
-}
-
-// IsStackAddr returns whether v is known to be an address of a stack slot
-func IsStackAddr(v *Value) bool {
-	for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy {
-		v = v.Args[0]
-	}
-	switch v.Op {
-	case OpSP:
-		return true
-	case OpAddr:
-		return v.Args[0].Op == OpSP
-	}
-	return false
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/writebarrier_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/writebarrier_test.go
deleted file mode 100644
index 0f3b0e0..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/writebarrier_test.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/writebarrier_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/writebarrier_test.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-import "testing"
-
-func TestWriteBarrierStoreOrder(t *testing.T) {
-	// Make sure writebarrier phase works even StoreWB ops are not in dependency order
-	c := testConfig(t)
-	ptrType := &TypeImpl{Size_: 8, Ptr: true, Name: "testptr"} // dummy for testing
-	fun := Fun(c, "entry",
-		Bloc("entry",
-			Valu("start", OpInitMem, TypeMem, 0, nil),
-			Valu("sb", OpSB, TypeInvalid, 0, nil),
-			Valu("sp", OpSP, TypeInvalid, 0, nil),
-			Valu("v", OpConstNil, ptrType, 0, nil),
-			Valu("addr1", OpAddr, ptrType, 0, nil, "sb"),
-			Valu("wb2", OpStoreWB, TypeMem, 8, nil, "addr1", "v", "wb1"),
-			Valu("wb1", OpStoreWB, TypeMem, 8, nil, "addr1", "v", "start"), // wb1 and wb2 are out of order
-			Goto("exit")),
-		Bloc("exit",
-			Exit("wb2")))
-
-	CheckFunc(fun.f)
-	writebarrier(fun.f)
-	CheckFunc(fun.f)
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/zcse.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/zcse.go
deleted file mode 100644
index 2656d11..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/ssa/zcse.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/zcse.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/ssa/zcse.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ssa
-
-// zcse does an initial pass of common-subexpression elimination on the
-// function for values with zero arguments to allow the more expensive cse
-// to begin with a reduced number of values. Values are just relinked,
-// nothing is deleted. A subsequent deadcode pass is required to actually
-// remove duplicate expressions.
-func zcse(f *Func) {
-	vals := make(map[vkey]*Value)
-
-	for _, b := range f.Blocks {
-		for i := 0; i < len(b.Values); {
-			v := b.Values[i]
-			next := true
-			if opcodeTable[v.Op].argLen == 0 {
-				key := vkey{v.Op, keyFor(v), v.Aux, v.Type}
-				if vals[key] == nil {
-					vals[key] = v
-					if b != f.Entry {
-						// Move v to the entry block so it will dominate every block
-						// where we might use it. This prevents the need for any dominator
-						// calculations in this pass.
-						v.Block = f.Entry
-						f.Entry.Values = append(f.Entry.Values, v)
-						last := len(b.Values) - 1
-						b.Values[i] = b.Values[last]
-						b.Values[last] = nil
-						b.Values = b.Values[:last]
-
-						// process b.Values[i] again
-						next = false
-					}
-				}
-			}
-			if next {
-				i++
-			}
-		}
-	}
-
-	for _, b := range f.Blocks {
-		for _, v := range b.Values {
-			for i, a := range v.Args {
-				if opcodeTable[a.Op].argLen == 0 {
-					key := vkey{a.Op, keyFor(a), a.Aux, a.Type}
-					if rv, ok := vals[key]; ok {
-						v.SetArg(i, rv)
-					}
-				}
-			}
-		}
-	}
-}
-
-// vkey is a type used to uniquely identify a zero arg value.
-type vkey struct {
-	op Op
-	ai int64       // aux int
-	ax interface{} // aux
-	t  Type        // type
-}
-
-// keyFor returns the AuxInt portion of a  key structure uniquely identifying a
-// zero arg value for the supported ops.
-func keyFor(v *Value) int64 {
-	switch v.Op {
-	case OpConst64, OpConst64F, OpConst32F:
-		return v.AuxInt
-	case OpConst32:
-		return int64(int32(v.AuxInt))
-	case OpConst16:
-		return int64(int16(v.AuxInt))
-	case OpConst8, OpConstBool:
-		return int64(int8(v.AuxInt))
-	default:
-		return v.AuxInt
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/dumper.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/dumper.go
deleted file mode 100644
index 0572411..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/dumper.go
+++ /dev/null
@@ -1,215 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/syntax/dumper.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/syntax/dumper.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements printing of syntax tree structures.
-
-package syntax
-
-import (
-	"fmt"
-	"io"
-	"reflect"
-	"unicode"
-	"unicode/utf8"
-)
-
-// Fdump dumps the structure of the syntax tree rooted at n to w.
-// It is intended for debugging purposes; no specific output format
-// is guaranteed.
-func Fdump(w io.Writer, n Node) (err error) {
-	p := dumper{
-		output: w,
-		ptrmap: make(map[Node]int),
-		last:   '\n', // force printing of line number on first line
-	}
-
-	defer func() {
-		if e := recover(); e != nil {
-			err = e.(localError).err // re-panics if it's not a localError
-		}
-	}()
-
-	if n == nil {
-		p.printf("nil\n")
-		return
-	}
-	p.dump(reflect.ValueOf(n), n)
-	p.printf("\n")
-
-	return
-}
-
-type dumper struct {
-	output io.Writer
-	ptrmap map[Node]int // node -> dump line number
-	indent int          // current indentation level
-	last   byte         // last byte processed by Write
-	line   int          // current line number
-}
-
-var indentBytes = []byte(".  ")
-
-func (p *dumper) Write(data []byte) (n int, err error) {
-	var m int
-	for i, b := range data {
-		// invariant: data[0:n] has been written
-		if b == '\n' {
-			m, err = p.output.Write(data[n : i+1])
-			n += m
-			if err != nil {
-				return
-			}
-		} else if p.last == '\n' {
-			p.line++
-			_, err = fmt.Fprintf(p.output, "%6d  ", p.line)
-			if err != nil {
-				return
-			}
-			for j := p.indent; j > 0; j-- {
-				_, err = p.output.Write(indentBytes)
-				if err != nil {
-					return
-				}
-			}
-		}
-		p.last = b
-	}
-	if len(data) > n {
-		m, err = p.output.Write(data[n:])
-		n += m
-	}
-	return
-}
-
-// localError wraps locally caught errors so we can distinguish
-// them from genuine panics which we don't want to return as errors.
-type localError struct {
-	err error
-}
-
-// printf is a convenience wrapper that takes care of print errors.
-func (p *dumper) printf(format string, args ...interface{}) {
-	if _, err := fmt.Fprintf(p, format, args...); err != nil {
-		panic(localError{err})
-	}
-}
-
-// dump prints the contents of x.
-// If x is the reflect.Value of a struct s, where &s
-// implements Node, then &s should be passed for n -
-// this permits printing of the unexported span and
-// comments fields of the embedded isNode field by
-// calling the Span() and Comment() instead of using
-// reflection.
-func (p *dumper) dump(x reflect.Value, n Node) {
-	switch x.Kind() {
-	case reflect.Interface:
-		if x.IsNil() {
-			p.printf("nil")
-			return
-		}
-		p.dump(x.Elem(), nil)
-
-	case reflect.Ptr:
-		if x.IsNil() {
-			p.printf("nil")
-			return
-		}
-
-		// special cases for identifiers w/o attached comments (common case)
-		if x, ok := x.Interface().(*Name); ok {
-			p.printf(x.Value)
-			return
-		}
-
-		p.printf("*")
-		// Fields may share type expressions, and declarations
-		// may share the same group - use ptrmap to keep track
-		// of nodes that have been printed already.
-		if ptr, ok := x.Interface().(Node); ok {
-			if line, exists := p.ptrmap[ptr]; exists {
-				p.printf("(Node @ %d)", line)
-				return
-			}
-			p.ptrmap[ptr] = p.line
-			n = ptr
-		}
-		p.dump(x.Elem(), n)
-
-	case reflect.Slice:
-		if x.IsNil() {
-			p.printf("nil")
-			return
-		}
-		p.printf("%s (%d entries) {", x.Type(), x.Len())
-		if x.Len() > 0 {
-			p.indent++
-			p.printf("\n")
-			for i, n := 0, x.Len(); i < n; i++ {
-				p.printf("%d: ", i)
-				p.dump(x.Index(i), nil)
-				p.printf("\n")
-			}
-			p.indent--
-		}
-		p.printf("}")
-
-	case reflect.Struct:
-		typ := x.Type()
-
-		// if span, ok := x.Interface().(lexical.Span); ok {
-		// 	p.printf("%s", &span)
-		// 	return
-		// }
-
-		p.printf("%s {", typ)
-		p.indent++
-
-		first := true
-		if n != nil {
-			p.printf("\n")
-			first = false
-			// p.printf("Span: %s\n", n.Span())
-			// if c := *n.Comments(); c != nil {
-			// 	p.printf("Comments: ")
-			// 	p.dump(reflect.ValueOf(c), nil) // a Comment is not a Node
-			// 	p.printf("\n")
-			// }
-		}
-
-		for i, n := 0, typ.NumField(); i < n; i++ {
-			// Exclude non-exported fields because their
-			// values cannot be accessed via reflection.
-			if name := typ.Field(i).Name; isExported(name) {
-				if first {
-					p.printf("\n")
-					first = false
-				}
-				p.printf("%s: ", name)
-				p.dump(x.Field(i), nil)
-				p.printf("\n")
-			}
-		}
-
-		p.indent--
-		p.printf("}")
-
-	default:
-		switch x := x.Interface().(type) {
-		case string:
-			// print strings in quotes
-			p.printf("%q", x)
-		default:
-			p.printf("%v", x)
-		}
-	}
-}
-
-func isExported(name string) bool {
-	ch, _ := utf8.DecodeRuneInString(name)
-	return unicode.IsUpper(ch)
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/dumper_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/dumper_test.go
deleted file mode 100644
index 291eb01..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/dumper_test.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/syntax/dumper_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/syntax/dumper_test.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package syntax
-
-import (
-	"os"
-	"testing"
-)
-
-func TestDump(t *testing.T) {
-	if testing.Short() {
-		t.Skip("skipping test in short mode")
-	}
-
-	ast, err := ParseFile(*src, nil, nil, 0)
-	if err != nil {
-		t.Fatal(err)
-	}
-	Fdump(os.Stdout, ast)
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/nodes.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/nodes.go
deleted file mode 100644
index 17e9b7a..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/nodes.go
+++ /dev/null
@@ -1,455 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/syntax/nodes.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/syntax/nodes.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package syntax
-
-// ----------------------------------------------------------------------------
-// Nodes
-
-type Node interface {
-	Line() uint32
-	aNode()
-	init(p *parser)
-}
-
-type node struct {
-	// commented out for now since not yet used
-	// doc  *Comment // nil means no comment(s) attached
-	pos  uint32
-	line uint32
-}
-
-func (*node) aNode() {}
-
-func (n *node) Line() uint32 {
-	return n.line
-}
-
-func (n *node) init(p *parser) {
-	n.pos = uint32(p.pos)
-	n.line = uint32(p.line)
-}
-
-// ----------------------------------------------------------------------------
-// Files
-
-// package PkgName; DeclList[0], DeclList[1], ...
-type File struct {
-	PkgName  *Name
-	DeclList []Decl
-	Lines    int
-	node
-}
-
-// ----------------------------------------------------------------------------
-// Declarations
-
-type (
-	Decl interface {
-		Node
-		aDecl()
-	}
-
-	//              Path
-	// LocalPkgName Path
-	ImportDecl struct {
-		LocalPkgName *Name // including "."; nil means no rename present
-		Path         *BasicLit
-		Group        *Group // nil means not part of a group
-		decl
-	}
-
-	// NameList
-	// NameList      = Values
-	// NameList Type = Values
-	ConstDecl struct {
-		NameList []*Name
-		Type     Expr   // nil means no type
-		Values   Expr   // nil means no values
-		Group    *Group // nil means not part of a group
-		decl
-	}
-
-	// Name Type
-	TypeDecl struct {
-		Name   *Name
-		Type   Expr
-		Group  *Group // nil means not part of a group
-		Pragma Pragma
-		decl
-	}
-
-	// NameList Type
-	// NameList Type = Values
-	// NameList      = Values
-	VarDecl struct {
-		NameList []*Name
-		Type     Expr   // nil means no type
-		Values   Expr   // nil means no values
-		Group    *Group // nil means not part of a group
-		decl
-	}
-
-	// func          Name Type { Body }
-	// func          Name Type
-	// func Receiver Name Type { Body }
-	// func Receiver Name Type
-	FuncDecl struct {
-		Attr    map[string]bool // go:attr map
-		Recv    *Field          // nil means regular function
-		Name    *Name
-		Type    *FuncType
-		Body    []Stmt // nil means no body (forward declaration)
-		Pragma  Pragma // TODO(mdempsky): Cleaner solution.
-		EndLine uint32 // TODO(mdempsky): Cleaner solution.
-		decl
-	}
-)
-
-type decl struct{ node }
-
-func (*decl) aDecl() {}
-
-// All declarations belonging to the same group point to the same Group node.
-type Group struct {
-	dummy int // not empty so we are guaranteed different Group instances
-}
-
-// ----------------------------------------------------------------------------
-// Expressions
-
-type (
-	Expr interface {
-		Node
-		aExpr()
-	}
-
-	// Value
-	Name struct {
-		Value string
-		expr
-	}
-
-	// Value
-	BasicLit struct {
-		Value string
-		Kind  LitKind
-		expr
-	}
-
-	// Type { ElemList[0], ElemList[1], ... }
-	CompositeLit struct {
-		Type     Expr // nil means no literal type
-		ElemList []Expr
-		NKeys    int    // number of elements with keys
-		EndLine  uint32 // TODO(mdempsky): Cleaner solution.
-		expr
-	}
-
-	// Key: Value
-	KeyValueExpr struct {
-		Key, Value Expr
-		expr
-	}
-
-	// func Type { Body }
-	FuncLit struct {
-		Type    *FuncType
-		Body    []Stmt
-		EndLine uint32 // TODO(mdempsky): Cleaner solution.
-		expr
-	}
-
-	// (X)
-	ParenExpr struct {
-		X Expr
-		expr
-	}
-
-	// X.Sel
-	SelectorExpr struct {
-		X   Expr
-		Sel *Name
-		expr
-	}
-
-	// X[Index]
-	IndexExpr struct {
-		X     Expr
-		Index Expr
-		expr
-	}
-
-	// X[Index[0] : Index[1] : Index[2]]
-	SliceExpr struct {
-		X     Expr
-		Index [3]Expr
-		// Full indicates whether this is a simple or full slice expression.
-		// In a valid AST, this is equivalent to Index[2] != nil.
-		// TODO(mdempsky): This is only needed to report the "3-index
-		// slice of string" error when Index[2] is missing.
-		Full bool
-		expr
-	}
-
-	// X.(Type)
-	AssertExpr struct {
-		X Expr
-		// TODO(gri) consider using Name{"..."} instead of nil (permits attaching of comments)
-		Type Expr
-		expr
-	}
-
-	Operation struct {
-		Op   Operator
-		X, Y Expr // Y == nil means unary expression
-		expr
-	}
-
-	// Fun(ArgList[0], ArgList[1], ...)
-	CallExpr struct {
-		Fun     Expr
-		ArgList []Expr
-		HasDots bool // last argument is followed by ...
-		expr
-	}
-
-	// ElemList[0], ElemList[1], ...
-	ListExpr struct {
-		ElemList []Expr
-		expr
-	}
-
-	// [Len]Elem
-	ArrayType struct {
-		// TODO(gri) consider using Name{"..."} instead of nil (permits attaching of comments)
-		Len  Expr // nil means Len is ...
-		Elem Expr
-		expr
-	}
-
-	// []Elem
-	SliceType struct {
-		Elem Expr
-		expr
-	}
-
-	// ...Elem
-	DotsType struct {
-		Elem Expr
-		expr
-	}
-
-	// struct { FieldList[0] TagList[0]; FieldList[1] TagList[1]; ... }
-	StructType struct {
-		FieldList []*Field
-		TagList   []*BasicLit // i >= len(TagList) || TagList[i] == nil means no tag for field i
-		expr
-	}
-
-	// Name Type
-	//      Type
-	Field struct {
-		Name *Name // nil means anonymous field/parameter (structs/parameters), or embedded interface (interfaces)
-		Type Expr  // field names declared in a list share the same Type (identical pointers)
-		node
-	}
-
-	// interface { MethodList[0]; MethodList[1]; ... }
-	InterfaceType struct {
-		MethodList []*Field
-		expr
-	}
-
-	FuncType struct {
-		ParamList  []*Field
-		ResultList []*Field
-		expr
-	}
-
-	// map[Key]Value
-	MapType struct {
-		Key   Expr
-		Value Expr
-		expr
-	}
-
-	//   chan Elem
-	// <-chan Elem
-	// chan<- Elem
-	ChanType struct {
-		Dir  ChanDir // 0 means no direction
-		Elem Expr
-		expr
-	}
-)
-
-type expr struct{ node }
-
-func (*expr) aExpr() {}
-
-type ChanDir uint
-
-const (
-	_ ChanDir = iota
-	SendOnly
-	RecvOnly
-)
-
-// ----------------------------------------------------------------------------
-// Statements
-
-type (
-	Stmt interface {
-		Node
-		aStmt()
-	}
-
-	SimpleStmt interface {
-		Stmt
-		aSimpleStmt()
-	}
-
-	EmptyStmt struct {
-		simpleStmt
-	}
-
-	LabeledStmt struct {
-		Label *Name
-		Stmt  Stmt
-		stmt
-	}
-
-	BlockStmt struct {
-		Body []Stmt
-		stmt
-	}
-
-	ExprStmt struct {
-		X Expr
-		simpleStmt
-	}
-
-	SendStmt struct {
-		Chan, Value Expr // Chan <- Value
-		simpleStmt
-	}
-
-	DeclStmt struct {
-		DeclList []Decl
-		stmt
-	}
-
-	AssignStmt struct {
-		Op       Operator // 0 means no operation
-		Lhs, Rhs Expr     // Rhs == ImplicitOne means Lhs++ (Op == Add) or Lhs-- (Op == Sub)
-		simpleStmt
-	}
-
-	BranchStmt struct {
-		Tok   token // Break, Continue, Fallthrough, or Goto
-		Label *Name
-		stmt
-	}
-
-	CallStmt struct {
-		Tok  token // Go or Defer
-		Call *CallExpr
-		stmt
-	}
-
-	ReturnStmt struct {
-		Results Expr // nil means no explicit return values
-		stmt
-	}
-
-	IfStmt struct {
-		Init SimpleStmt
-		Cond Expr
-		Then []Stmt
-		Else Stmt // either *IfStmt or *BlockStmt
-		stmt
-	}
-
-	ForStmt struct {
-		Init SimpleStmt // incl. *RangeClause
-		Cond Expr
-		Post SimpleStmt
-		Body []Stmt
-		stmt
-	}
-
-	SwitchStmt struct {
-		Init SimpleStmt
-		Tag  Expr
-		Body []*CaseClause
-		stmt
-	}
-
-	SelectStmt struct {
-		Body []*CommClause
-		stmt
-	}
-)
-
-type (
-	RangeClause struct {
-		Lhs Expr // nil means no Lhs = or Lhs :=
-		Def bool // means :=
-		X   Expr // range X
-		simpleStmt
-	}
-
-	TypeSwitchGuard struct {
-		// TODO(gri) consider using Name{"..."} instead of nil (permits attaching of comments)
-		Lhs *Name // nil means no Lhs :=
-		X   Expr  // X.(type)
-		expr
-	}
-
-	CaseClause struct {
-		Cases Expr // nil means default clause
-		Body  []Stmt
-		node
-	}
-
-	CommClause struct {
-		Comm SimpleStmt // send or receive stmt; nil means default clause
-		Body []Stmt
-		node
-	}
-)
-
-type stmt struct{ node }
-
-func (stmt) aStmt() {}
-
-type simpleStmt struct {
-	stmt
-}
-
-func (simpleStmt) aSimpleStmt() {}
-
-// ----------------------------------------------------------------------------
-// Comments
-
-// TODO(gri) Consider renaming to CommentPos, CommentPlacement, etc.
-//           Kind = Above doesn't make much sense.
-type CommentKind uint
-
-const (
-	Above CommentKind = iota
-	Below
-	Left
-	Right
-)
-
-type Comment struct {
-	Kind CommentKind
-	Text string
-	Next *Comment
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/parser.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/parser.go
deleted file mode 100644
index 15157fb..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/parser.go
+++ /dev/null
@@ -1,2144 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/syntax/parser.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/syntax/parser.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package syntax
-
-import (
-	"fmt"
-	"io"
-	"strings"
-)
-
-const debug = false
-const trace = false
-
-// The old gc parser assigned line numbers very inconsistently depending
-// on when it happened to construct AST nodes. To make transitioning to the
-// new AST easier, we try to mimick the behavior as much as possible.
-const gcCompat = true
-
-type parser struct {
-	scanner
-
-	fnest  int    // function nesting level (for error handling)
-	xnest  int    // expression nesting level (for complit ambiguity resolution)
-	indent []byte // tracing support
-}
-
-func (p *parser) init(src io.Reader, errh ErrorHandler, pragh PragmaHandler) {
-	p.scanner.init(src, errh, pragh)
-
-	p.fnest = 0
-	p.xnest = 0
-	p.indent = nil
-}
-
-func (p *parser) got(tok token) bool {
-	if p.tok == tok {
-		p.next()
-		return true
-	}
-	return false
-}
-
-func (p *parser) want(tok token) {
-	if !p.got(tok) {
-		p.syntax_error("expecting " + tok.String())
-		p.advance()
-	}
-}
-
-// ----------------------------------------------------------------------------
-// Error handling
-
-// syntax_error reports a syntax error at the current line.
-func (p *parser) syntax_error(msg string) {
-	p.syntax_error_at(p.pos, p.line, msg)
-}
-
-// Like syntax_error, but reports error at given line rather than current lexer line.
-func (p *parser) syntax_error_at(pos, line int, msg string) {
-	if trace {
-		defer p.trace("syntax_error (" + msg + ")")()
-	}
-
-	if p.tok == _EOF && p.first != nil {
-		return // avoid meaningless follow-up errors
-	}
-
-	// add punctuation etc. as needed to msg
-	switch {
-	case msg == "":
-		// nothing to do
-	case strings.HasPrefix(msg, "in"), strings.HasPrefix(msg, "at"), strings.HasPrefix(msg, "after"):
-		msg = " " + msg
-	case strings.HasPrefix(msg, "expecting"):
-		msg = ", " + msg
-	default:
-		// plain error - we don't care about current token
-		p.error_at(pos, line, "syntax error: "+msg)
-		return
-	}
-
-	// determine token string
-	var tok string
-	switch p.tok {
-	case _Name:
-		tok = p.lit
-	case _Literal:
-		tok = "literal " + p.lit
-	case _Operator:
-		tok = p.op.String()
-	case _AssignOp:
-		tok = p.op.String() + "="
-	case _IncOp:
-		tok = p.op.String()
-		tok += tok
-	default:
-		tok = tokstring(p.tok)
-	}
-
-	p.error_at(pos, line, "syntax error: unexpected "+tok+msg)
-}
-
-// The stopset contains keywords that start a statement.
-// They are good synchronization points in case of syntax
-// errors and (usually) shouldn't be skipped over.
-const stopset uint64 = 1<<_Break |
-	1<<_Const |
-	1<<_Continue |
-	1<<_Defer |
-	1<<_Fallthrough |
-	1<<_For |
-	1<<_Func |
-	1<<_Go |
-	1<<_Goto |
-	1<<_If |
-	1<<_Return |
-	1<<_Select |
-	1<<_Switch |
-	1<<_Type |
-	1<<_Var
-
-// Advance consumes tokens until it finds a token of the stopset or followlist.
-// The stopset is only considered if we are inside a function (p.fnest > 0).
-// The followlist is the list of valid tokens that can follow a production;
-// if it is empty, exactly one token is consumed to ensure progress.
-func (p *parser) advance(followlist ...token) {
-	if len(followlist) == 0 {
-		p.next()
-		return
-	}
-
-	// compute follow set
-	// TODO(gri) the args are constants - do as constant expressions?
-	var followset uint64 = 1 << _EOF // never skip over EOF
-	for _, tok := range followlist {
-		followset |= 1 << tok
-	}
-
-	for !(contains(followset, p.tok) || p.fnest > 0 && contains(stopset, p.tok)) {
-		p.next()
-	}
-}
-
-func tokstring(tok token) string {
-	switch tok {
-	case _EOF:
-		return "EOF"
-	case _Comma:
-		return "comma"
-	case _Semi:
-		return "semicolon or newline"
-	}
-	return tok.String()
-}
-
-// usage: defer p.trace(msg)()
-func (p *parser) trace(msg string) func() {
-	fmt.Printf("%5d: %s%s (\n", p.line, p.indent, msg)
-	const tab = ". "
-	p.indent = append(p.indent, tab...)
-	return func() {
-		p.indent = p.indent[:len(p.indent)-len(tab)]
-		if x := recover(); x != nil {
-			panic(x) // skip print_trace
-		}
-		fmt.Printf("%5d: %s)\n", p.line, p.indent)
-	}
-}
-
-// ----------------------------------------------------------------------------
-// Package files
-//
-// Parse methods are annotated with matching Go productions as appropriate.
-// The annotations are intended as guidelines only since a single Go grammar
-// rule may be covered by multiple parse methods and vice versa.
-
-// SourceFile = PackageClause ";" { ImportDecl ";" } { TopLevelDecl ";" } .
-func (p *parser) file() *File {
-	if trace {
-		defer p.trace("file")()
-	}
-
-	f := new(File)
-	f.init(p)
-
-	// PackageClause
-	if !p.got(_Package) {
-		p.syntax_error("package statement must be first")
-		return nil
-	}
-	f.PkgName = p.name()
-	p.want(_Semi)
-
-	// don't bother continuing if package clause has errors
-	if p.first != nil {
-		return nil
-	}
-
-	// { ImportDecl ";" }
-	for p.got(_Import) {
-		f.DeclList = p.appendGroup(f.DeclList, p.importDecl)
-		p.want(_Semi)
-	}
-
-	// { TopLevelDecl ";" }
-	for p.tok != _EOF {
-		switch p.tok {
-		case _Const:
-			p.next()
-			f.DeclList = p.appendGroup(f.DeclList, p.constDecl)
-
-		case _Type:
-			p.next()
-			f.DeclList = p.appendGroup(f.DeclList, p.typeDecl)
-
-		case _Var:
-			p.next()
-			f.DeclList = p.appendGroup(f.DeclList, p.varDecl)
-
-		case _Func:
-			p.next()
-			f.DeclList = append(f.DeclList, p.funcDecl())
-
-		default:
-			if p.tok == _Lbrace && len(f.DeclList) > 0 && emptyFuncDecl(f.DeclList[len(f.DeclList)-1]) {
-				// opening { of function declaration on next line
-				p.syntax_error("unexpected semicolon or newline before {")
-			} else {
-				p.syntax_error("non-declaration statement outside function body")
-			}
-			p.advance(_Const, _Type, _Var, _Func)
-			continue
-		}
-
-		// Reset p.pragma BEFORE advancing to the next token (consuming ';')
-		// since comments before may set pragmas for the next function decl.
-		p.pragma = 0
-
-		if p.tok != _EOF && !p.got(_Semi) {
-			p.syntax_error("after top level declaration")
-			p.advance(_Const, _Type, _Var, _Func)
-		}
-	}
-	// p.tok == _EOF
-
-	f.Lines = p.source.line
-
-	return f
-}
-
-func emptyFuncDecl(dcl Decl) bool {
-	f, ok := dcl.(*FuncDecl)
-	return ok && f.Body == nil
-}
-
-// ----------------------------------------------------------------------------
-// Declarations
-
-// appendGroup(f) = f | "(" { f ";" } ")" .
-func (p *parser) appendGroup(list []Decl, f func(*Group) Decl) []Decl {
-	if p.got(_Lparen) {
-		g := new(Group)
-		for p.tok != _EOF && p.tok != _Rparen {
-			list = append(list, f(g))
-			if !p.osemi(_Rparen) {
-				break
-			}
-		}
-		p.want(_Rparen)
-		return list
-	}
-
-	return append(list, f(nil))
-}
-
-func (p *parser) importDecl(group *Group) Decl {
-	if trace {
-		defer p.trace("importDecl")()
-	}
-
-	d := new(ImportDecl)
-	d.init(p)
-
-	switch p.tok {
-	case _Name:
-		d.LocalPkgName = p.name()
-	case _Dot:
-		n := new(Name)
-		n.init(p)
-		n.Value = "."
-		d.LocalPkgName = n
-		p.next()
-	}
-	if p.tok == _Literal && (gcCompat || p.kind == StringLit) {
-		d.Path = p.oliteral()
-	} else {
-		p.syntax_error("missing import path; require quoted string")
-		p.advance(_Semi, _Rparen)
-	}
-	d.Group = group
-
-	return d
-}
-
-// ConstSpec = IdentifierList [ [ Type ] "=" ExpressionList ] .
-func (p *parser) constDecl(group *Group) Decl {
-	if trace {
-		defer p.trace("constDecl")()
-	}
-
-	d := new(ConstDecl)
-	d.init(p)
-
-	d.NameList = p.nameList(p.name())
-	if p.tok != _EOF && p.tok != _Semi && p.tok != _Rparen {
-		d.Type = p.tryType()
-		if p.got(_Assign) {
-			d.Values = p.exprList()
-		}
-	}
-	d.Group = group
-
-	return d
-}
-
-// TypeSpec = identifier Type .
-func (p *parser) typeDecl(group *Group) Decl {
-	if trace {
-		defer p.trace("typeDecl")()
-	}
-
-	d := new(TypeDecl)
-	d.init(p)
-
-	d.Name = p.name()
-	d.Type = p.tryType()
-	if d.Type == nil {
-		p.syntax_error("in type declaration")
-		p.advance(_Semi, _Rparen)
-	}
-	d.Group = group
-	d.Pragma = p.pragma
-
-	return d
-}
-
-// VarSpec = IdentifierList ( Type [ "=" ExpressionList ] | "=" ExpressionList ) .
-func (p *parser) varDecl(group *Group) Decl {
-	if trace {
-		defer p.trace("varDecl")()
-	}
-
-	d := new(VarDecl)
-	d.init(p)
-
-	d.NameList = p.nameList(p.name())
-	if p.got(_Assign) {
-		d.Values = p.exprList()
-	} else {
-		d.Type = p.type_()
-		if p.got(_Assign) {
-			d.Values = p.exprList()
-		}
-	}
-	d.Group = group
-	if gcCompat {
-		d.init(p)
-	}
-
-	return d
-}
-
-// FunctionDecl = "func" FunctionName ( Function | Signature ) .
-// FunctionName = identifier .
-// Function     = Signature FunctionBody .
-// MethodDecl   = "func" Receiver MethodName ( Function | Signature ) .
-// Receiver     = Parameters .
-func (p *parser) funcDecl() *FuncDecl {
-	if trace {
-		defer p.trace("funcDecl")()
-	}
-
-	f := new(FuncDecl)
-	f.init(p)
-
-	badRecv := false
-	if p.tok == _Lparen {
-		rcvr := p.paramList()
-		switch len(rcvr) {
-		case 0:
-			p.error("method has no receiver")
-			badRecv = true
-		case 1:
-			f.Recv = rcvr[0]
-		default:
-			p.error("method has multiple receivers")
-			badRecv = true
-		}
-	}
-
-	if p.tok != _Name {
-		p.syntax_error("expecting name or (")
-		p.advance(_Lbrace, _Semi)
-		return nil
-	}
-
-	// TODO(gri) check for regular functions only
-	// if name.Sym.Name == "init" {
-	// 	name = renameinit()
-	// 	if params != nil || result != nil {
-	// 		p.error("func init must have no arguments and no return values")
-	// 	}
-	// }
-
-	// if localpkg.Name == "main" && name.Name == "main" {
-	// 	if params != nil || result != nil {
-	// 		p.error("func main must have no arguments and no return values")
-	// 	}
-	// }
-
-	f.Name = p.name()
-	f.Type = p.funcType()
-	if gcCompat {
-		f.node = f.Type.node
-	}
-	f.Body = p.funcBody()
-
-	f.Pragma = p.pragma
-	f.EndLine = uint32(p.line)
-
-	// TODO(gri) deal with function properties
-	// if noescape && body != nil {
-	// 	p.error("can only use //go:noescape with external func implementations")
-	// }
-
-	if badRecv {
-		return nil // TODO(gri) better solution
-	}
-	return f
-}
-
-// ----------------------------------------------------------------------------
-// Expressions
-
-func (p *parser) expr() Expr {
-	if trace {
-		defer p.trace("expr")()
-	}
-
-	return p.binaryExpr(0)
-}
-
-// Expression = UnaryExpr | Expression binary_op Expression .
-func (p *parser) binaryExpr(prec int) Expr {
-	// don't trace binaryExpr - only leads to overly nested trace output
-
-	x := p.unaryExpr()
-	for (p.tok == _Operator || p.tok == _Star) && p.prec > prec {
-		t := new(Operation)
-		t.init(p)
-		t.Op = p.op
-		t.X = x
-		tprec := p.prec
-		p.next()
-		t.Y = p.binaryExpr(tprec)
-		if gcCompat {
-			t.init(p)
-		}
-		x = t
-	}
-	return x
-}
-
-// UnaryExpr = PrimaryExpr | unary_op UnaryExpr .
-func (p *parser) unaryExpr() Expr {
-	if trace {
-		defer p.trace("unaryExpr")()
-	}
-
-	switch p.tok {
-	case _Operator, _Star:
-		switch p.op {
-		case Mul, Add, Sub, Not, Xor:
-			x := new(Operation)
-			x.init(p)
-			x.Op = p.op
-			p.next()
-			x.X = p.unaryExpr()
-			if gcCompat {
-				x.init(p)
-			}
-			return x
-
-		case And:
-			p.next()
-			x := new(Operation)
-			x.init(p)
-			x.Op = And
-			// unaryExpr may have returned a parenthesized composite literal
-			// (see comment in operand) - remove parentheses if any
-			x.X = unparen(p.unaryExpr())
-			return x
-		}
-
-	case _Arrow:
-		// receive op (<-x) or receive-only channel (<-chan E)
-		p.next()
-
-		// If the next token is _Chan we still don't know if it is
-		// a channel (<-chan int) or a receive op (<-chan int(ch)).
-		// We only know once we have found the end of the unaryExpr.
-
-		x := p.unaryExpr()
-
-		// There are two cases:
-		//
-		//   <-chan...  => <-x is a channel type
-		//   <-x        => <-x is a receive operation
-		//
-		// In the first case, <- must be re-associated with
-		// the channel type parsed already:
-		//
-		//   <-(chan E)   =>  (<-chan E)
-		//   <-(chan<-E)  =>  (<-chan (<-E))
-
-		if _, ok := x.(*ChanType); ok {
-			// x is a channel type => re-associate <-
-			dir := SendOnly
-			t := x
-			for dir == SendOnly {
-				c, ok := t.(*ChanType)
-				if !ok {
-					break
-				}
-				dir = c.Dir
-				if dir == RecvOnly {
-					// t is type <-chan E but <-<-chan E is not permitted
-					// (report same error as for "type _ <-<-chan E")
-					p.syntax_error("unexpected <-, expecting chan")
-					// already progressed, no need to advance
-				}
-				c.Dir = RecvOnly
-				t = c.Elem
-			}
-			if dir == SendOnly {
-				// channel dir is <- but channel element E is not a channel
-				// (report same error as for "type _ <-chan<-E")
-				p.syntax_error(fmt.Sprintf("unexpected %s, expecting chan", String(t)))
-				// already progressed, no need to advance
-			}
-			return x
-		}
-
-		// x is not a channel type => we have a receive op
-		return &Operation{Op: Recv, X: x}
-	}
-
-	// TODO(mdempsky): We need parens here so we can report an
-	// error for "(x) := true". It should be possible to detect
-	// and reject that more efficiently though.
-	return p.pexpr(true)
-}
-
-// callStmt parses call-like statements that can be preceded by 'defer' and 'go'.
-func (p *parser) callStmt() *CallStmt {
-	if trace {
-		defer p.trace("callStmt")()
-	}
-
-	s := new(CallStmt)
-	s.init(p)
-	s.Tok = p.tok
-	p.next()
-
-	x := p.pexpr(p.tok == _Lparen) // keep_parens so we can report error below
-	switch x := x.(type) {
-	case *CallExpr:
-		s.Call = x
-		if gcCompat {
-			s.node = x.node
-		}
-	case *ParenExpr:
-		p.error(fmt.Sprintf("expression in %s must not be parenthesized", s.Tok))
-		// already progressed, no need to advance
-	default:
-		p.error(fmt.Sprintf("expression in %s must be function call", s.Tok))
-		// already progressed, no need to advance
-	}
-
-	return s // TODO(gri) should we return nil in case of failure?
-}
-
-// Operand     = Literal | OperandName | MethodExpr | "(" Expression ")" .
-// Literal     = BasicLit | CompositeLit | FunctionLit .
-// BasicLit    = int_lit | float_lit | imaginary_lit | rune_lit | string_lit .
-// OperandName = identifier | QualifiedIdent.
-func (p *parser) operand(keep_parens bool) Expr {
-	if trace {
-		defer p.trace("operand " + p.tok.String())()
-	}
-
-	switch p.tok {
-	case _Name:
-		return p.name()
-
-	case _Literal:
-		return p.oliteral()
-
-	case _Lparen:
-		p.next()
-		p.xnest++
-		x := p.expr() // expr_or_type
-		p.xnest--
-		p.want(_Rparen)
-
-		// Optimization: Record presence of ()'s only where needed
-		// for error reporting. Don't bother in other cases; it is
-		// just a waste of memory and time.
-
-		// Parentheses are not permitted on lhs of := .
-		// switch x.Op {
-		// case ONAME, ONONAME, OPACK, OTYPE, OLITERAL, OTYPESW:
-		// 	keep_parens = true
-		// }
-
-		// Parentheses are not permitted around T in a composite
-		// literal T{}. If the next token is a {, assume x is a
-		// composite literal type T (it may not be, { could be
-		// the opening brace of a block, but we don't know yet).
-		if p.tok == _Lbrace {
-			keep_parens = true
-		}
-
-		// Parentheses are also not permitted around the expression
-		// in a go/defer statement. In that case, operand is called
-		// with keep_parens set.
-		if keep_parens {
-			x = &ParenExpr{X: x}
-		}
-		return x
-
-	case _Func:
-		p.next()
-		t := p.funcType()
-		if p.tok == _Lbrace {
-			p.fnest++
-			p.xnest++
-			f := new(FuncLit)
-			f.init(p)
-			f.Type = t
-			f.Body = p.funcBody()
-			f.EndLine = uint32(p.line)
-			p.xnest--
-			p.fnest--
-			return f
-		}
-		return t
-
-	case _Lbrack, _Chan, _Map, _Struct, _Interface:
-		return p.type_() // othertype
-
-	case _Lbrace:
-		// common case: p.header is missing simpleStmt before { in if, for, switch
-		p.syntax_error("missing operand")
-		// '{' will be consumed in pexpr - no need to consume it here
-		return nil
-
-	default:
-		p.syntax_error("expecting expression")
-		p.advance()
-		return nil
-	}
-
-	// Syntactically, composite literals are operands. Because a complit
-	// type may be a qualified identifier which is handled by pexpr
-	// (together with selector expressions), complits are parsed there
-	// as well (operand is only called from pexpr).
-}
-
-// PrimaryExpr =
-// 	Operand |
-// 	Conversion |
-// 	PrimaryExpr Selector |
-// 	PrimaryExpr Index |
-// 	PrimaryExpr Slice |
-// 	PrimaryExpr TypeAssertion |
-// 	PrimaryExpr Arguments .
-//
-// Selector       = "." identifier .
-// Index          = "[" Expression "]" .
-// Slice          = "[" ( [ Expression ] ":" [ Expression ] ) |
-//                      ( [ Expression ] ":" Expression ":" Expression )
-//                  "]" .
-// TypeAssertion  = "." "(" Type ")" .
-// Arguments      = "(" [ ( ExpressionList | Type [ "," ExpressionList ] ) [ "..." ] [ "," ] ] ")" .
-func (p *parser) pexpr(keep_parens bool) Expr {
-	if trace {
-		defer p.trace("pexpr")()
-	}
-
-	x := p.operand(keep_parens)
-
-loop:
-	for {
-		switch p.tok {
-		case _Dot:
-			p.next()
-			switch p.tok {
-			case _Name:
-				// pexpr '.' sym
-				t := new(SelectorExpr)
-				t.init(p)
-				t.X = x
-				t.Sel = p.name()
-				x = t
-
-			case _Lparen:
-				p.next()
-				if p.got(_Type) {
-					t := new(TypeSwitchGuard)
-					t.init(p)
-					t.X = x
-					x = t
-				} else {
-					t := new(AssertExpr)
-					t.init(p)
-					t.X = x
-					t.Type = p.expr()
-					x = t
-				}
-				p.want(_Rparen)
-
-			default:
-				p.syntax_error("expecting name or (")
-				p.advance(_Semi, _Rparen)
-			}
-			if gcCompat {
-				x.init(p)
-			}
-
-		case _Lbrack:
-			p.next()
-			p.xnest++
-
-			var i Expr
-			if p.tok != _Colon {
-				i = p.expr()
-				if p.got(_Rbrack) {
-					// x[i]
-					t := new(IndexExpr)
-					t.init(p)
-					t.X = x
-					t.Index = i
-					x = t
-					p.xnest--
-					break
-				}
-			}
-
-			// x[i:...
-			t := new(SliceExpr)
-			t.init(p)
-			t.X = x
-			t.Index[0] = i
-			p.want(_Colon)
-			if p.tok != _Colon && p.tok != _Rbrack {
-				// x[i:j...
-				t.Index[1] = p.expr()
-			}
-			if p.got(_Colon) {
-				t.Full = true
-				// x[i:j:...]
-				if t.Index[1] == nil {
-					p.error("middle index required in 3-index slice")
-				}
-				if p.tok != _Rbrack {
-					// x[i:j:k...
-					t.Index[2] = p.expr()
-				} else {
-					p.error("final index required in 3-index slice")
-				}
-			}
-			p.want(_Rbrack)
-
-			x = t
-			p.xnest--
-
-		case _Lparen:
-			x = p.call(x)
-
-		case _Lbrace:
-			// operand may have returned a parenthesized complit
-			// type; accept it but complain if we have a complit
-			t := unparen(x)
-			// determine if '{' belongs to a complit or a compound_stmt
-			complit_ok := false
-			switch t.(type) {
-			case *Name, *SelectorExpr:
-				if p.xnest >= 0 {
-					// x is considered a comptype
-					complit_ok = true
-				}
-			case *ArrayType, *SliceType, *StructType, *MapType:
-				// x is a comptype
-				complit_ok = true
-			}
-			if !complit_ok {
-				break loop
-			}
-			if t != x {
-				p.syntax_error("cannot parenthesize type in composite literal")
-				// already progressed, no need to advance
-			}
-			n := p.complitexpr()
-			n.Type = x
-			x = n
-
-		default:
-			break loop
-		}
-	}
-
-	return x
-}
-
-// Element = Expression | LiteralValue .
-func (p *parser) bare_complitexpr() Expr {
-	if trace {
-		defer p.trace("bare_complitexpr")()
-	}
-
-	if p.tok == _Lbrace {
-		// '{' start_complit braced_keyval_list '}'
-		return p.complitexpr()
-	}
-
-	return p.expr()
-}
-
-// LiteralValue = "{" [ ElementList [ "," ] ] "}" .
-func (p *parser) complitexpr() *CompositeLit {
-	if trace {
-		defer p.trace("complitexpr")()
-	}
-
-	x := new(CompositeLit)
-	x.init(p)
-
-	p.want(_Lbrace)
-	p.xnest++
-
-	for p.tok != _EOF && p.tok != _Rbrace {
-		// value
-		e := p.bare_complitexpr()
-		if p.got(_Colon) {
-			// key ':' value
-			l := new(KeyValueExpr)
-			l.init(p)
-			l.Key = e
-			l.Value = p.bare_complitexpr()
-			if gcCompat {
-				l.init(p)
-			}
-			e = l
-			x.NKeys++
-		}
-		x.ElemList = append(x.ElemList, e)
-		if !p.ocomma(_Rbrace) {
-			break
-		}
-	}
-
-	x.EndLine = uint32(p.line)
-	p.xnest--
-	p.want(_Rbrace)
-
-	return x
-}
-
-// ----------------------------------------------------------------------------
-// Types
-
-func (p *parser) type_() Expr {
-	if trace {
-		defer p.trace("type_")()
-	}
-
-	if typ := p.tryType(); typ != nil {
-		return typ
-	}
-
-	p.syntax_error("")
-	p.advance()
-	return nil
-}
-
-func indirect(typ Expr) Expr {
-	return &Operation{Op: Mul, X: typ}
-}
-
-// tryType is like type_ but it returns nil if there was no type
-// instead of reporting an error.
-//
-// Type     = TypeName | TypeLit | "(" Type ")" .
-// TypeName = identifier | QualifiedIdent .
-// TypeLit  = ArrayType | StructType | PointerType | FunctionType | InterfaceType |
-// 	      SliceType | MapType | Channel_Type .
-func (p *parser) tryType() Expr {
-	if trace {
-		defer p.trace("tryType")()
-	}
-
-	switch p.tok {
-	case _Star:
-		// ptrtype
-		p.next()
-		return indirect(p.type_())
-
-	case _Arrow:
-		// recvchantype
-		p.next()
-		p.want(_Chan)
-		t := new(ChanType)
-		t.init(p)
-		t.Dir = RecvOnly
-		t.Elem = p.chanElem()
-		return t
-
-	case _Func:
-		// fntype
-		p.next()
-		return p.funcType()
-
-	case _Lbrack:
-		// '[' oexpr ']' ntype
-		// '[' _DotDotDot ']' ntype
-		p.next()
-		p.xnest++
-		if p.got(_Rbrack) {
-			// []T
-			p.xnest--
-			t := new(SliceType)
-			t.init(p)
-			t.Elem = p.type_()
-			return t
-		}
-
-		// [n]T
-		t := new(ArrayType)
-		t.init(p)
-		if !p.got(_DotDotDot) {
-			t.Len = p.expr()
-		}
-		p.want(_Rbrack)
-		p.xnest--
-		t.Elem = p.type_()
-		return t
-
-	case _Chan:
-		// _Chan non_recvchantype
-		// _Chan _Comm ntype
-		p.next()
-		t := new(ChanType)
-		t.init(p)
-		if p.got(_Arrow) {
-			t.Dir = SendOnly
-		}
-		t.Elem = p.chanElem()
-		return t
-
-	case _Map:
-		// _Map '[' ntype ']' ntype
-		p.next()
-		p.want(_Lbrack)
-		t := new(MapType)
-		t.init(p)
-		t.Key = p.type_()
-		p.want(_Rbrack)
-		t.Value = p.type_()
-		return t
-
-	case _Struct:
-		return p.structType()
-
-	case _Interface:
-		return p.interfaceType()
-
-	case _Name:
-		return p.dotname(p.name())
-
-	case _Lparen:
-		p.next()
-		t := p.type_()
-		p.want(_Rparen)
-		return t
-	}
-
-	return nil
-}
-
-func (p *parser) funcType() *FuncType {
-	if trace {
-		defer p.trace("funcType")()
-	}
-
-	typ := new(FuncType)
-	typ.init(p)
-	typ.ParamList = p.paramList()
-	typ.ResultList = p.funcResult()
-	if gcCompat {
-		typ.init(p)
-	}
-	return typ
-}
-
-func (p *parser) chanElem() Expr {
-	if trace {
-		defer p.trace("chanElem")()
-	}
-
-	if typ := p.tryType(); typ != nil {
-		return typ
-	}
-
-	p.syntax_error("missing channel element type")
-	// assume element type is simply absent - don't advance
-	return nil
-}
-
-func (p *parser) dotname(name *Name) Expr {
-	if trace {
-		defer p.trace("dotname")()
-	}
-
-	if p.got(_Dot) {
-		s := new(SelectorExpr)
-		s.init(p)
-		s.X = name
-		s.Sel = p.name()
-		return s
-	}
-	return name
-}
-
-// StructType = "struct" "{" { FieldDecl ";" } "}" .
-func (p *parser) structType() *StructType {
-	if trace {
-		defer p.trace("structType")()
-	}
-
-	typ := new(StructType)
-	typ.init(p)
-
-	p.want(_Struct)
-	p.want(_Lbrace)
-	for p.tok != _EOF && p.tok != _Rbrace {
-		p.fieldDecl(typ)
-		if !p.osemi(_Rbrace) {
-			break
-		}
-	}
-	if gcCompat {
-		typ.init(p)
-	}
-	p.want(_Rbrace)
-
-	return typ
-}
-
-// InterfaceType = "interface" "{" { MethodSpec ";" } "}" .
-func (p *parser) interfaceType() *InterfaceType {
-	if trace {
-		defer p.trace("interfaceType")()
-	}
-
-	typ := new(InterfaceType)
-	typ.init(p)
-
-	p.want(_Interface)
-	p.want(_Lbrace)
-	for p.tok != _EOF && p.tok != _Rbrace {
-		if m := p.methodDecl(); m != nil {
-			typ.MethodList = append(typ.MethodList, m)
-		}
-		if !p.osemi(_Rbrace) {
-			break
-		}
-	}
-	if gcCompat {
-		typ.init(p)
-	}
-	p.want(_Rbrace)
-
-	return typ
-}
-
-// FunctionBody = Block .
-func (p *parser) funcBody() []Stmt {
-	if trace {
-		defer p.trace("funcBody")()
-	}
-
-	if p.got(_Lbrace) {
-		p.fnest++
-		body := p.stmtList()
-		p.fnest--
-		p.want(_Rbrace)
-		if body == nil {
-			body = []Stmt{new(EmptyStmt)}
-		}
-		return body
-	}
-
-	return nil
-}
-
-// Result = Parameters | Type .
-func (p *parser) funcResult() []*Field {
-	if trace {
-		defer p.trace("funcResult")()
-	}
-
-	if p.tok == _Lparen {
-		return p.paramList()
-	}
-
-	if result := p.tryType(); result != nil {
-		f := new(Field)
-		f.init(p)
-		f.Type = result
-		return []*Field{f}
-	}
-
-	return nil
-}
-
-func (p *parser) addField(styp *StructType, name *Name, typ Expr, tag *BasicLit) {
-	if tag != nil {
-		for i := len(styp.FieldList) - len(styp.TagList); i > 0; i-- {
-			styp.TagList = append(styp.TagList, nil)
-		}
-		styp.TagList = append(styp.TagList, tag)
-	}
-
-	f := new(Field)
-	f.init(p)
-	f.Name = name
-	f.Type = typ
-	styp.FieldList = append(styp.FieldList, f)
-
-	if gcCompat && name != nil {
-		f.node = name.node
-	}
-
-	if debug && tag != nil && len(styp.FieldList) != len(styp.TagList) {
-		panic("inconsistent struct field list")
-	}
-}
-
-// FieldDecl      = (IdentifierList Type | AnonymousField) [ Tag ] .
-// AnonymousField = [ "*" ] TypeName .
-// Tag            = string_lit .
-func (p *parser) fieldDecl(styp *StructType) {
-	if trace {
-		defer p.trace("fieldDecl")()
-	}
-
-	var name *Name
-	switch p.tok {
-	case _Name:
-		name = p.name()
-		if p.tok == _Dot || p.tok == _Literal || p.tok == _Semi || p.tok == _Rbrace {
-			// embed oliteral
-			typ := p.qualifiedName(name)
-			tag := p.oliteral()
-			p.addField(styp, nil, typ, tag)
-			return
-		}
-
-		// new_name_list ntype oliteral
-		names := p.nameList(name)
-		typ := p.type_()
-		tag := p.oliteral()
-
-		for _, name := range names {
-			p.addField(styp, name, typ, tag)
-		}
-
-	case _Lparen:
-		p.next()
-		if p.tok == _Star {
-			// '(' '*' embed ')' oliteral
-			p.next()
-			typ := indirect(p.qualifiedName(nil))
-			p.want(_Rparen)
-			tag := p.oliteral()
-			p.addField(styp, nil, typ, tag)
-			p.error("cannot parenthesize embedded type")
-
-		} else {
-			// '(' embed ')' oliteral
-			typ := p.qualifiedName(nil)
-			p.want(_Rparen)
-			tag := p.oliteral()
-			p.addField(styp, nil, typ, tag)
-			p.error("cannot parenthesize embedded type")
-		}
-
-	case _Star:
-		p.next()
-		if p.got(_Lparen) {
-			// '*' '(' embed ')' oliteral
-			typ := indirect(p.qualifiedName(nil))
-			p.want(_Rparen)
-			tag := p.oliteral()
-			p.addField(styp, nil, typ, tag)
-			p.error("cannot parenthesize embedded type")
-
-		} else {
-			// '*' embed oliteral
-			typ := indirect(p.qualifiedName(nil))
-			tag := p.oliteral()
-			p.addField(styp, nil, typ, tag)
-		}
-
-	default:
-		p.syntax_error("expecting field name or embedded type")
-		p.advance(_Semi, _Rbrace)
-	}
-}
-
-func (p *parser) oliteral() *BasicLit {
-	if p.tok == _Literal {
-		b := new(BasicLit)
-		b.init(p)
-		b.Value = p.lit
-		b.Kind = p.kind
-		p.next()
-		return b
-	}
-	return nil
-}
-
-// MethodSpec        = MethodName Signature | InterfaceTypeName .
-// MethodName        = identifier .
-// InterfaceTypeName = TypeName .
-func (p *parser) methodDecl() *Field {
-	if trace {
-		defer p.trace("methodDecl")()
-	}
-
-	switch p.tok {
-	case _Name:
-		name := p.name()
-
-		// accept potential name list but complain
-		hasNameList := false
-		for p.got(_Comma) {
-			p.name()
-			hasNameList = true
-		}
-		if hasNameList {
-			p.syntax_error("name list not allowed in interface type")
-			// already progressed, no need to advance
-		}
-
-		f := new(Field)
-		f.init(p)
-		if p.tok != _Lparen {
-			// packname
-			f.Type = p.qualifiedName(name)
-			return f
-		}
-
-		f.Name = name
-		f.Type = p.funcType()
-		return f
-
-	case _Lparen:
-		p.next()
-		f := new(Field)
-		f.init(p)
-		f.Type = p.qualifiedName(nil)
-		p.want(_Rparen)
-		p.error("cannot parenthesize embedded type")
-		return f
-
-	default:
-		p.syntax_error("")
-		p.advance(_Semi, _Rbrace)
-		return nil
-	}
-}
-
-// ParameterDecl = [ IdentifierList ] [ "..." ] Type .
-func (p *parser) paramDecl() *Field {
-	if trace {
-		defer p.trace("paramDecl")()
-	}
-
-	f := new(Field)
-	f.init(p)
-
-	switch p.tok {
-	case _Name:
-		f.Name = p.name()
-		switch p.tok {
-		case _Name, _Star, _Arrow, _Func, _Lbrack, _Chan, _Map, _Struct, _Interface, _Lparen:
-			// sym name_or_type
-			f.Type = p.type_()
-
-		case _DotDotDot:
-			// sym dotdotdot
-			f.Type = p.dotsType()
-
-		case _Dot:
-			// name_or_type
-			// from dotname
-			f.Type = p.dotname(f.Name)
-			f.Name = nil
-		}
-
-	case _Arrow, _Star, _Func, _Lbrack, _Chan, _Map, _Struct, _Interface, _Lparen:
-		// name_or_type
-		f.Type = p.type_()
-
-	case _DotDotDot:
-		// dotdotdot
-		f.Type = p.dotsType()
-
-	default:
-		p.syntax_error("expecting )")
-		p.advance(_Comma, _Rparen)
-		return nil
-	}
-
-	return f
-}
-
-// ...Type
-func (p *parser) dotsType() *DotsType {
-	if trace {
-		defer p.trace("dotsType")()
-	}
-
-	t := new(DotsType)
-	t.init(p)
-
-	p.want(_DotDotDot)
-	t.Elem = p.tryType()
-	if t.Elem == nil {
-		p.error("final argument in variadic function missing type")
-	}
-
-	return t
-}
-
-// Parameters    = "(" [ ParameterList [ "," ] ] ")" .
-// ParameterList = ParameterDecl { "," ParameterDecl } .
-func (p *parser) paramList() (list []*Field) {
-	if trace {
-		defer p.trace("paramList")()
-	}
-
-	p.want(_Lparen)
-
-	var named int // number of parameters that have an explicit name and type
-	for p.tok != _EOF && p.tok != _Rparen {
-		if par := p.paramDecl(); par != nil {
-			if debug && par.Name == nil && par.Type == nil {
-				panic("parameter without name or type")
-			}
-			if par.Name != nil && par.Type != nil {
-				named++
-			}
-			list = append(list, par)
-		}
-		if !p.ocomma(_Rparen) {
-			break
-		}
-	}
-
-	// distribute parameter types
-	if named == 0 {
-		// all unnamed => found names are named types
-		for _, par := range list {
-			if typ := par.Name; typ != nil {
-				par.Type = typ
-				par.Name = nil
-			}
-		}
-	} else if named != len(list) {
-		// some named => all must be named
-		var typ Expr
-		for i := len(list) - 1; i >= 0; i-- {
-			if par := list[i]; par.Type != nil {
-				typ = par.Type
-				if par.Name == nil {
-					typ = nil // error
-				}
-			} else {
-				par.Type = typ
-			}
-			if typ == nil {
-				p.syntax_error("mixed named and unnamed function parameters")
-				break
-			}
-		}
-	}
-
-	p.want(_Rparen)
-	return
-}
-
-// ----------------------------------------------------------------------------
-// Statements
-
-// We represent x++, x-- as assignments x += ImplicitOne, x -= ImplicitOne.
-// ImplicitOne should not be used elsewhere.
-var ImplicitOne = &BasicLit{Value: "1"}
-
-// SimpleStmt = EmptyStmt | ExpressionStmt | SendStmt | IncDecStmt | Assignment | ShortVarDecl .
-//
-// simpleStmt may return missing_stmt if labelOk is set.
-func (p *parser) simpleStmt(lhs Expr, rangeOk bool) SimpleStmt {
-	if trace {
-		defer p.trace("simpleStmt")()
-	}
-
-	if rangeOk && p.got(_Range) {
-		// _Range expr
-		if debug && lhs != nil {
-			panic("invalid call of simpleStmt")
-		}
-		return p.rangeClause(nil, false)
-	}
-
-	if lhs == nil {
-		lhs = p.exprList()
-	}
-
-	if _, ok := lhs.(*ListExpr); !ok && p.tok != _Assign && p.tok != _Define {
-		// expr
-		switch p.tok {
-		case _AssignOp:
-			// lhs op= rhs
-			op := p.op
-			p.next()
-			return p.newAssignStmt(op, lhs, p.expr())
-
-		case _IncOp:
-			// lhs++ or lhs--
-			op := p.op
-			p.next()
-			return p.newAssignStmt(op, lhs, ImplicitOne)
-
-		case _Arrow:
-			// lhs <- rhs
-			p.next()
-			s := new(SendStmt)
-			s.init(p)
-			s.Chan = lhs
-			s.Value = p.expr()
-			if gcCompat {
-				s.init(p)
-			}
-			return s
-
-		default:
-			// expr
-			return &ExprStmt{X: lhs}
-		}
-	}
-
-	// expr_list
-	switch p.tok {
-	case _Assign:
-		p.next()
-
-		if rangeOk && p.got(_Range) {
-			// expr_list '=' _Range expr
-			return p.rangeClause(lhs, false)
-		}
-
-		// expr_list '=' expr_list
-		return p.newAssignStmt(0, lhs, p.exprList())
-
-	case _Define:
-		var n node
-		n.init(p)
-		p.next()
-
-		if rangeOk && p.got(_Range) {
-			// expr_list ':=' range expr
-			return p.rangeClause(lhs, true)
-		}
-
-		// expr_list ':=' expr_list
-		rhs := p.exprList()
-
-		if x, ok := rhs.(*TypeSwitchGuard); ok {
-			switch lhs := lhs.(type) {
-			case *Name:
-				x.Lhs = lhs
-			case *ListExpr:
-				p.error(fmt.Sprintf("argument count mismatch: %d = %d", len(lhs.ElemList), 1))
-			default:
-				// TODO(mdempsky): Have Expr types implement Stringer?
-				p.error(fmt.Sprintf("invalid variable name %s in type switch", lhs))
-			}
-			return &ExprStmt{X: x}
-		}
-
-		as := p.newAssignStmt(Def, lhs, rhs)
-		if gcCompat {
-			as.node = n
-		}
-		return as
-
-	default:
-		p.syntax_error("expecting := or = or comma")
-		p.advance(_Semi, _Rbrace)
-		return nil
-	}
-}
-
-func (p *parser) rangeClause(lhs Expr, def bool) *RangeClause {
-	r := new(RangeClause)
-	r.init(p)
-	r.Lhs = lhs
-	r.Def = def
-	r.X = p.expr()
-	if gcCompat {
-		r.init(p)
-	}
-	return r
-}
-
-func (p *parser) newAssignStmt(op Operator, lhs, rhs Expr) *AssignStmt {
-	a := new(AssignStmt)
-	a.init(p)
-	a.Op = op
-	a.Lhs = lhs
-	a.Rhs = rhs
-	return a
-}
-
-func (p *parser) labeledStmt(label *Name) Stmt {
-	if trace {
-		defer p.trace("labeledStmt")()
-	}
-
-	s := new(LabeledStmt)
-	s.init(p)
-	s.Label = label
-
-	p.want(_Colon)
-
-	if p.tok != _Rbrace && p.tok != _EOF {
-		s.Stmt = p.stmt()
-		if s.Stmt == missing_stmt {
-			// report error at line of ':' token
-			p.syntax_error_at(int(label.pos), int(label.line), "missing statement after label")
-			// we are already at the end of the labeled statement - no need to advance
-			return missing_stmt
-		}
-	}
-
-	return s
-}
-
-func (p *parser) blockStmt() *BlockStmt {
-	if trace {
-		defer p.trace("blockStmt")()
-	}
-
-	s := new(BlockStmt)
-	s.init(p)
-	p.want(_Lbrace)
-	s.Body = p.stmtList()
-	p.want(_Rbrace)
-
-	return s
-}
-
-func (p *parser) declStmt(f func(*Group) Decl) *DeclStmt {
-	if trace {
-		defer p.trace("declStmt")()
-	}
-
-	s := new(DeclStmt)
-	s.init(p)
-
-	p.next() // _Const, _Type, or _Var
-	s.DeclList = p.appendGroup(nil, f)
-
-	return s
-}
-
-func (p *parser) forStmt() Stmt {
-	if trace {
-		defer p.trace("forStmt")()
-	}
-
-	s := new(ForStmt)
-	s.init(p)
-
-	p.want(_For)
-	s.Init, s.Cond, s.Post = p.header(true)
-	if gcCompat {
-		s.init(p)
-	}
-	s.Body = p.stmtBody("for clause")
-
-	return s
-}
-
-// stmtBody parses if and for statement bodies.
-func (p *parser) stmtBody(context string) []Stmt {
-	if trace {
-		defer p.trace("stmtBody")()
-	}
-
-	if !p.got(_Lbrace) {
-		p.syntax_error("missing { after " + context)
-		p.advance(_Name, _Rbrace)
-	}
-
-	body := p.stmtList()
-	p.want(_Rbrace)
-
-	return body
-}
-
-func (p *parser) header(forStmt bool) (init SimpleStmt, cond Expr, post SimpleStmt) {
-	if p.tok == _Lbrace {
-		return
-	}
-
-	outer := p.xnest
-	p.xnest = -1
-
-	if p.tok != _Semi {
-		// accept potential varDecl but complain
-		if forStmt && p.got(_Var) {
-			p.error("var declaration not allowed in for initializer")
-		}
-		init = p.simpleStmt(nil, forStmt)
-		// If we have a range clause, we are done.
-		if _, ok := init.(*RangeClause); ok {
-			p.xnest = outer
-			return
-		}
-	}
-
-	var condStmt SimpleStmt
-	if p.got(_Semi) {
-		if forStmt {
-			if p.tok != _Semi {
-				condStmt = p.simpleStmt(nil, false)
-			}
-			p.want(_Semi)
-			if p.tok != _Lbrace {
-				post = p.simpleStmt(nil, false)
-			}
-		} else if p.tok != _Lbrace {
-			condStmt = p.simpleStmt(nil, false)
-		}
-	} else {
-		condStmt = init
-		init = nil
-	}
-
-	// unpack condStmt
-	switch s := condStmt.(type) {
-	case nil:
-		// nothing to do
-	case *ExprStmt:
-		cond = s.X
-	default:
-		p.error("invalid condition, tag, or type switch guard")
-	}
-
-	p.xnest = outer
-	return
-}
-
-func (p *parser) ifStmt() *IfStmt {
-	if trace {
-		defer p.trace("ifStmt")()
-	}
-
-	s := new(IfStmt)
-	s.init(p)
-
-	p.want(_If)
-	s.Init, s.Cond, _ = p.header(false)
-	if s.Cond == nil {
-		p.error("missing condition in if statement")
-	}
-
-	if gcCompat {
-		s.init(p)
-	}
-
-	s.Then = p.stmtBody("if clause")
-
-	if p.got(_Else) {
-		switch p.tok {
-		case _If:
-			s.Else = p.ifStmt()
-		case _Lbrace:
-			s.Else = p.blockStmt()
-		default:
-			p.error("else must be followed by if or statement block")
-			p.advance(_Name, _Rbrace)
-		}
-	}
-
-	return s
-}
-
-func (p *parser) switchStmt() *SwitchStmt {
-	if trace {
-		defer p.trace("switchStmt")()
-	}
-
-	p.want(_Switch)
-	s := new(SwitchStmt)
-	s.init(p)
-
-	s.Init, s.Tag, _ = p.header(false)
-
-	if !p.got(_Lbrace) {
-		p.syntax_error("missing { after switch clause")
-		p.advance(_Case, _Default, _Rbrace)
-	}
-	for p.tok != _EOF && p.tok != _Rbrace {
-		s.Body = append(s.Body, p.caseClause())
-	}
-	p.want(_Rbrace)
-
-	return s
-}
-
-func (p *parser) selectStmt() *SelectStmt {
-	if trace {
-		defer p.trace("selectStmt")()
-	}
-
-	p.want(_Select)
-	s := new(SelectStmt)
-	s.init(p)
-
-	if !p.got(_Lbrace) {
-		p.syntax_error("missing { after select clause")
-		p.advance(_Case, _Default, _Rbrace)
-	}
-	for p.tok != _EOF && p.tok != _Rbrace {
-		s.Body = append(s.Body, p.commClause())
-	}
-	p.want(_Rbrace)
-
-	return s
-}
-
-func (p *parser) caseClause() *CaseClause {
-	if trace {
-		defer p.trace("caseClause")()
-	}
-
-	c := new(CaseClause)
-	c.init(p)
-
-	switch p.tok {
-	case _Case:
-		p.next()
-		c.Cases = p.exprList()
-
-	case _Default:
-		p.next()
-
-	default:
-		p.syntax_error("expecting case or default or }")
-		p.advance(_Case, _Default, _Rbrace)
-	}
-
-	if gcCompat {
-		c.init(p)
-	}
-	p.want(_Colon)
-	c.Body = p.stmtList()
-
-	return c
-}
-
-func (p *parser) commClause() *CommClause {
-	if trace {
-		defer p.trace("commClause")()
-	}
-
-	c := new(CommClause)
-	c.init(p)
-
-	switch p.tok {
-	case _Case:
-		p.next()
-		c.Comm = p.simpleStmt(nil, false)
-
-		// The syntax restricts the possible simple statements here to:
-		//
-		//     lhs <- x (send statement)
-		//     <-x
-		//     lhs = <-x
-		//     lhs := <-x
-		//
-		// All these (and more) are recognized by simpleStmt and invalid
-		// syntax trees are flagged later, during type checking.
-		// TODO(gri) eventually may want to restrict valid syntax trees
-		// here.
-
-	case _Default:
-		p.next()
-
-	default:
-		p.syntax_error("expecting case or default or }")
-		p.advance(_Case, _Default, _Rbrace)
-	}
-
-	if gcCompat {
-		c.init(p)
-	}
-	p.want(_Colon)
-	c.Body = p.stmtList()
-
-	return c
-}
-
-// TODO(gri) find a better solution
-var missing_stmt Stmt = new(EmptyStmt) // = nod(OXXX, nil, nil)
-
-// Statement =
-// 	Declaration | LabeledStmt | SimpleStmt |
-// 	GoStmt | ReturnStmt | BreakStmt | ContinueStmt | GotoStmt |
-// 	FallthroughStmt | Block | IfStmt | SwitchStmt | SelectStmt | ForStmt |
-// 	DeferStmt .
-//
-// stmt may return missing_stmt.
-func (p *parser) stmt() Stmt {
-	if trace {
-		defer p.trace("stmt " + p.tok.String())()
-	}
-
-	// Most statements (assignments) start with an identifier;
-	// look for it first before doing anything more expensive.
-	if p.tok == _Name {
-		lhs := p.exprList()
-		if label, ok := lhs.(*Name); ok && p.tok == _Colon {
-			return p.labeledStmt(label)
-		}
-		return p.simpleStmt(lhs, false)
-	}
-
-	switch p.tok {
-	case _Lbrace:
-		return p.blockStmt()
-
-	case _Var:
-		return p.declStmt(p.varDecl)
-
-	case _Const:
-		return p.declStmt(p.constDecl)
-
-	case _Type:
-		return p.declStmt(p.typeDecl)
-
-	case _Operator, _Star:
-		switch p.op {
-		case Add, Sub, Mul, And, Xor, Not:
-			return p.simpleStmt(nil, false) // unary operators
-		}
-
-	case _Literal, _Func, _Lparen, // operands
-		_Lbrack, _Struct, _Map, _Chan, _Interface, // composite types
-		_Arrow: // receive operator
-		return p.simpleStmt(nil, false)
-
-	case _For:
-		return p.forStmt()
-
-	case _Switch:
-		return p.switchStmt()
-
-	case _Select:
-		return p.selectStmt()
-
-	case _If:
-		return p.ifStmt()
-
-	case _Fallthrough:
-		p.next()
-		s := new(BranchStmt)
-		s.init(p)
-		s.Tok = _Fallthrough
-		return s
-		// // will be converted to OFALL
-		// stmt := nod(OXFALL, nil, nil)
-		// stmt.Xoffset = int64(block)
-		// return stmt
-
-	case _Break, _Continue:
-		tok := p.tok
-		p.next()
-		s := new(BranchStmt)
-		s.init(p)
-		s.Tok = tok
-		if p.tok == _Name {
-			s.Label = p.name()
-		}
-		return s
-
-	case _Go, _Defer:
-		return p.callStmt()
-
-	case _Goto:
-		p.next()
-		s := new(BranchStmt)
-		s.init(p)
-		s.Tok = _Goto
-		s.Label = p.name()
-		return s
-		// stmt := nod(OGOTO, p.new_name(p.name()), nil)
-		// stmt.Sym = dclstack // context, for goto restrictions
-		// return stmt
-
-	case _Return:
-		p.next()
-		s := new(ReturnStmt)
-		s.init(p)
-		if p.tok != _Semi && p.tok != _Rbrace {
-			s.Results = p.exprList()
-		}
-		if gcCompat {
-			s.init(p)
-		}
-		return s
-
-	case _Semi:
-		s := new(EmptyStmt)
-		s.init(p)
-		return s
-	}
-
-	return missing_stmt
-}
-
-// StatementList = { Statement ";" } .
-func (p *parser) stmtList() (l []Stmt) {
-	if trace {
-		defer p.trace("stmtList")()
-	}
-
-	for p.tok != _EOF && p.tok != _Rbrace && p.tok != _Case && p.tok != _Default {
-		s := p.stmt()
-		if s == missing_stmt {
-			break
-		}
-		l = append(l, s)
-		// customized version of osemi:
-		// ';' is optional before a closing ')' or '}'
-		if p.tok == _Rparen || p.tok == _Rbrace {
-			continue
-		}
-		if !p.got(_Semi) {
-			p.syntax_error("at end of statement")
-			p.advance(_Semi, _Rbrace)
-		}
-	}
-	return
-}
-
-// Arguments = "(" [ ( ExpressionList | Type [ "," ExpressionList ] ) [ "..." ] [ "," ] ] ")" .
-func (p *parser) call(fun Expr) *CallExpr {
-	if trace {
-		defer p.trace("call")()
-	}
-
-	// call or conversion
-	// convtype '(' expr ocomma ')'
-	c := new(CallExpr)
-	c.init(p)
-	c.Fun = fun
-
-	p.want(_Lparen)
-	p.xnest++
-
-	for p.tok != _EOF && p.tok != _Rparen {
-		c.ArgList = append(c.ArgList, p.expr()) // expr_or_type
-		c.HasDots = p.got(_DotDotDot)
-		if !p.ocomma(_Rparen) || c.HasDots {
-			break
-		}
-	}
-
-	p.xnest--
-	if gcCompat {
-		c.init(p)
-	}
-	p.want(_Rparen)
-
-	return c
-}
-
-// ----------------------------------------------------------------------------
-// Common productions
-
-func (p *parser) name() *Name {
-	// no tracing to avoid overly verbose output
-
-	n := new(Name)
-	n.init(p)
-
-	if p.tok == _Name {
-		n.Value = p.lit
-		p.next()
-	} else {
-		n.Value = "_"
-		p.syntax_error("expecting name")
-		p.advance()
-	}
-
-	return n
-}
-
-// IdentifierList = identifier { "," identifier } .
-// The first name must be provided.
-func (p *parser) nameList(first *Name) []*Name {
-	if trace {
-		defer p.trace("nameList")()
-	}
-
-	if debug && first == nil {
-		panic("first name not provided")
-	}
-
-	l := []*Name{first}
-	for p.got(_Comma) {
-		l = append(l, p.name())
-	}
-
-	return l
-}
-
-// The first name may be provided, or nil.
-func (p *parser) qualifiedName(name *Name) Expr {
-	if trace {
-		defer p.trace("qualifiedName")()
-	}
-
-	switch {
-	case name != nil:
-		// name is provided
-	case p.tok == _Name:
-		name = p.name()
-	default:
-		name = new(Name)
-		name.init(p)
-		p.syntax_error("expecting name")
-		p.advance(_Dot, _Semi, _Rbrace)
-	}
-
-	return p.dotname(name)
-}
-
-// ExpressionList = Expression { "," Expression } .
-func (p *parser) exprList() Expr {
-	if trace {
-		defer p.trace("exprList")()
-	}
-
-	x := p.expr()
-	if p.got(_Comma) {
-		list := []Expr{x, p.expr()}
-		for p.got(_Comma) {
-			list = append(list, p.expr())
-		}
-		t := new(ListExpr)
-		t.init(p) // TODO(gri) what is the correct thing here?
-		t.ElemList = list
-		x = t
-	}
-	return x
-}
-
-// osemi parses an optional semicolon.
-func (p *parser) osemi(follow token) bool {
-	switch p.tok {
-	case _Semi:
-		p.next()
-		return true
-
-	case _Rparen, _Rbrace:
-		// semicolon is optional before ) or }
-		return true
-	}
-
-	p.syntax_error("expecting semicolon, newline, or " + tokstring(follow))
-	p.advance(follow)
-	return false
-}
-
-// ocomma parses an optional comma.
-func (p *parser) ocomma(follow token) bool {
-	switch p.tok {
-	case _Comma:
-		p.next()
-		return true
-
-	case _Rparen, _Rbrace:
-		// comma is optional before ) or }
-		return true
-	}
-
-	p.syntax_error("expecting comma or " + tokstring(follow))
-	p.advance(follow)
-	return false
-}
-
-// unparen removes all parentheses around an expression.
-func unparen(x Expr) Expr {
-	for {
-		p, ok := x.(*ParenExpr)
-		if !ok {
-			break
-		}
-		x = p.X
-	}
-	return x
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/parser_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/parser_test.go
deleted file mode 100644
index eb32616..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/parser_test.go
+++ /dev/null
@@ -1,187 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/syntax/parser_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/syntax/parser_test.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package syntax
-
-import (
-	"bytes"
-	"flag"
-	"fmt"
-	"io/ioutil"
-	"path/filepath"
-	"runtime"
-	"strings"
-	"sync"
-	"testing"
-	"time"
-)
-
-var fast = flag.Bool("fast", false, "parse package files in parallel")
-var src = flag.String("src", "parser.go", "source file to parse")
-var verify = flag.Bool("verify", false, "verify idempotent printing")
-
-func TestParse(t *testing.T) {
-	_, err := ParseFile(*src, nil, nil, 0)
-	if err != nil {
-		t.Fatal(err)
-	}
-}
-
-func TestStdLib(t *testing.T) {
-	if testing.Short() {
-		t.Skip("skipping test in short mode")
-	}
-
-	var m1 runtime.MemStats
-	runtime.ReadMemStats(&m1)
-	start := time.Now()
-
-	type parseResult struct {
-		filename string
-		lines    int
-	}
-
-	results := make(chan parseResult)
-	go func() {
-		defer close(results)
-		for _, dir := range []string{
-			runtime.GOROOT(),
-		} {
-			walkDirs(t, dir, func(filename string) {
-				if debug {
-					fmt.Printf("parsing %s\n", filename)
-				}
-				ast, err := ParseFile(filename, nil, nil, 0)
-				if err != nil {
-					t.Error(err)
-					return
-				}
-				if *verify {
-					verifyPrint(filename, ast)
-				}
-				results <- parseResult{filename, ast.Lines}
-			})
-		}
-	}()
-
-	var count, lines int
-	for res := range results {
-		count++
-		lines += res.lines
-		if testing.Verbose() {
-			fmt.Printf("%5d  %s (%d lines)\n", count, res.filename, res.lines)
-		}
-	}
-
-	dt := time.Since(start)
-	var m2 runtime.MemStats
-	runtime.ReadMemStats(&m2)
-	dm := float64(m2.TotalAlloc-m1.TotalAlloc) / 1e6
-
-	fmt.Printf("parsed %d lines (%d files) in %v (%d lines/s)\n", lines, count, dt, int64(float64(lines)/dt.Seconds()))
-	fmt.Printf("allocated %.3fMb (%.3fMb/s)\n", dm, dm/dt.Seconds())
-}
-
-func walkDirs(t *testing.T, dir string, action func(string)) {
-	fis, err := ioutil.ReadDir(dir)
-	if err != nil {
-		t.Error(err)
-		return
-	}
-
-	var files, dirs []string
-	for _, fi := range fis {
-		if fi.Mode().IsRegular() {
-			if strings.HasSuffix(fi.Name(), ".go") {
-				path := filepath.Join(dir, fi.Name())
-				files = append(files, path)
-			}
-		} else if fi.IsDir() && fi.Name() != "testdata" {
-			path := filepath.Join(dir, fi.Name())
-			if !strings.HasSuffix(path, "/test") {
-				dirs = append(dirs, path)
-			}
-		}
-	}
-
-	if *fast {
-		var wg sync.WaitGroup
-		wg.Add(len(files))
-		for _, filename := range files {
-			go func(filename string) {
-				defer wg.Done()
-				action(filename)
-			}(filename)
-		}
-		wg.Wait()
-	} else {
-		for _, filename := range files {
-			action(filename)
-		}
-	}
-
-	for _, dir := range dirs {
-		walkDirs(t, dir, action)
-	}
-}
-
-func verifyPrint(filename string, ast1 *File) {
-	var buf1 bytes.Buffer
-	_, err := Fprint(&buf1, ast1, true)
-	if err != nil {
-		panic(err)
-	}
-
-	ast2, err := ParseBytes(buf1.Bytes(), nil, nil, 0)
-	if err != nil {
-		panic(err)
-	}
-
-	var buf2 bytes.Buffer
-	_, err = Fprint(&buf2, ast2, true)
-	if err != nil {
-		panic(err)
-	}
-
-	if bytes.Compare(buf1.Bytes(), buf2.Bytes()) != 0 {
-		fmt.Printf("--- %s ---\n", filename)
-		fmt.Printf("%s\n", buf1.Bytes())
-		fmt.Println()
-
-		fmt.Printf("--- %s ---\n", filename)
-		fmt.Printf("%s\n", buf2.Bytes())
-		fmt.Println()
-		panic("not equal")
-	}
-}
-
-func TestIssue17697(t *testing.T) {
-	_, err := ParseBytes(nil, nil, nil, 0) // return with parser error, don't panic
-	if err == nil {
-		t.Errorf("no error reported")
-	}
-}
-
-func TestParseFile(t *testing.T) {
-	_, err := ParseFile("", nil, nil, 0)
-	if err == nil {
-		t.Error("missing io error")
-	}
-
-	var first error
-	_, err = ParseFile("", func(err error) {
-		if first == nil {
-			first = err
-		}
-	}, nil, 0)
-	if err == nil || first == nil {
-		t.Error("missing io error")
-	}
-	if err != first {
-		t.Errorf("got %v; want first error %v", err, first)
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/printer.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/printer.go
deleted file mode 100644
index 406135b..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/printer.go
+++ /dev/null
@@ -1,945 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/syntax/printer.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/syntax/printer.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements printing of syntax trees in source format.
-
-package syntax
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"strings"
-)
-
-// TODO(gri) Consider removing the linebreaks flag from this signature.
-// Its likely rarely used in common cases.
-
-func Fprint(w io.Writer, x Node, linebreaks bool) (n int, err error) {
-	p := printer{
-		output:     w,
-		linebreaks: linebreaks,
-	}
-
-	defer func() {
-		n = p.written
-		if e := recover(); e != nil {
-			err = e.(localError).err // re-panics if it's not a localError
-		}
-	}()
-
-	p.print(x)
-	p.flush(_EOF)
-
-	return
-}
-
-func String(n Node) string {
-	var buf bytes.Buffer
-	_, err := Fprint(&buf, n, false)
-	if err != nil {
-		panic(err) // TODO(gri) print something sensible into buf instead
-	}
-	return buf.String()
-}
-
-type ctrlSymbol int
-
-const (
-	none ctrlSymbol = iota
-	semi
-	blank
-	newline
-	indent
-	outdent
-	// comment
-	// eolComment
-)
-
-type whitespace struct {
-	last token
-	kind ctrlSymbol
-	//text string // comment text (possibly ""); valid if kind == comment
-}
-
-type printer struct {
-	output     io.Writer
-	written    int  // number of bytes written
-	linebreaks bool // print linebreaks instead of semis
-
-	indent  int // current indentation level
-	nlcount int // number of consecutive newlines
-
-	pending []whitespace // pending whitespace
-	lastTok token        // last token (after any pending semi) processed by print
-}
-
-// write is a thin wrapper around p.output.Write
-// that takes care of accounting and error handling.
-func (p *printer) write(data []byte) {
-	n, err := p.output.Write(data)
-	p.written += n
-	if err != nil {
-		panic(localError{err})
-	}
-}
-
-var (
-	tabBytes    = []byte("\t\t\t\t\t\t\t\t")
-	newlineByte = []byte("\n")
-	blankByte   = []byte(" ")
-)
-
-func (p *printer) writeBytes(data []byte) {
-	if len(data) == 0 {
-		panic("expected non-empty []byte")
-	}
-	if p.nlcount > 0 && p.indent > 0 {
-		// write indentation
-		n := p.indent
-		for n > len(tabBytes) {
-			p.write(tabBytes)
-			n -= len(tabBytes)
-		}
-		p.write(tabBytes[:n])
-	}
-	p.write(data)
-	p.nlcount = 0
-}
-
-func (p *printer) writeString(s string) {
-	p.writeBytes([]byte(s))
-}
-
-// If impliesSemi returns true for a non-blank line's final token tok,
-// a semicolon is automatically inserted. Vice versa, a semicolon may
-// be omitted in those cases.
-func impliesSemi(tok token) bool {
-	switch tok {
-	case _Name,
-		_Break, _Continue, _Fallthrough, _Return,
-		/*_Inc, _Dec,*/ _Rparen, _Rbrack, _Rbrace: // TODO(gri) fix this
-		return true
-	}
-	return false
-}
-
-// TODO(gri) provide table of []byte values for all tokens to avoid repeated string conversion
-
-func lineComment(text string) bool {
-	return strings.HasPrefix(text, "//")
-}
-
-func (p *printer) addWhitespace(kind ctrlSymbol, text string) {
-	p.pending = append(p.pending, whitespace{p.lastTok, kind /*text*/})
-	switch kind {
-	case semi:
-		p.lastTok = _Semi
-	case newline:
-		p.lastTok = 0
-		// TODO(gri) do we need to handle /*-style comments containing newlines here?
-	}
-}
-
-func (p *printer) flush(next token) {
-	// eliminate semis and redundant whitespace
-	sawNewline := next == _EOF
-	sawParen := next == _Rparen || next == _Rbrace
-	for i := len(p.pending) - 1; i >= 0; i-- {
-		switch p.pending[i].kind {
-		case semi:
-			k := semi
-			if sawParen {
-				sawParen = false
-				k = none // eliminate semi
-			} else if sawNewline && impliesSemi(p.pending[i].last) {
-				sawNewline = false
-				k = none // eliminate semi
-			}
-			p.pending[i].kind = k
-		case newline:
-			sawNewline = true
-		case blank, indent, outdent:
-			// nothing to do
-		// case comment:
-		// 	// A multi-line comment acts like a newline; and a ""
-		// 	// comment implies by definition at least one newline.
-		// 	if text := p.pending[i].text; strings.HasPrefix(text, "/*") && strings.ContainsRune(text, '\n') {
-		// 		sawNewline = true
-		// 	}
-		// case eolComment:
-		// 	// TODO(gri) act depending on sawNewline
-		default:
-			panic("unreachable")
-		}
-	}
-
-	// print pending
-	prev := none
-	for i := range p.pending {
-		switch p.pending[i].kind {
-		case none:
-			// nothing to do
-		case semi:
-			p.writeString(";")
-			p.nlcount = 0
-			prev = semi
-		case blank:
-			if prev != blank {
-				// at most one blank
-				p.writeBytes(blankByte)
-				p.nlcount = 0
-				prev = blank
-			}
-		case newline:
-			const maxEmptyLines = 1
-			if p.nlcount <= maxEmptyLines {
-				p.write(newlineByte)
-				p.nlcount++
-				prev = newline
-			}
-		case indent:
-			p.indent++
-		case outdent:
-			p.indent--
-			if p.indent < 0 {
-				panic("negative indentation")
-			}
-		// case comment:
-		// 	if text := p.pending[i].text; text != "" {
-		// 		p.writeString(text)
-		// 		p.nlcount = 0
-		// 		prev = comment
-		// 	}
-		// 	// TODO(gri) should check that line comments are always followed by newline
-		default:
-			panic("unreachable")
-		}
-	}
-
-	p.pending = p.pending[:0] // re-use underlying array
-}
-
-func mayCombine(prev token, next byte) (b bool) {
-	return // for now
-	// switch prev {
-	// case lexical.Int:
-	// 	b = next == '.' // 1.
-	// case lexical.Add:
-	// 	b = next == '+' // ++
-	// case lexical.Sub:
-	// 	b = next == '-' // --
-	// case lexical.Quo:
-	// 	b = next == '*' // /*
-	// case lexical.Lss:
-	// 	b = next == '-' || next == '<' // <- or <<
-	// case lexical.And:
-	// 	b = next == '&' || next == '^' // && or &^
-	// }
-	// return
-}
-
-func (p *printer) print(args ...interface{}) {
-	for i := 0; i < len(args); i++ {
-		switch x := args[i].(type) {
-		case nil:
-			// we should not reach here but don't crash
-
-		case Node:
-			p.printNode(x)
-
-		case token:
-			// _Name implies an immediately following string
-			// argument which is the actual value to print.
-			var s string
-			if x == _Name {
-				i++
-				if i >= len(args) {
-					panic("missing string argument after _Name")
-				}
-				s = args[i].(string)
-			} else {
-				s = x.String()
-			}
-
-			// TODO(gri) This check seems at the wrong place since it doesn't
-			//           take into account pending white space.
-			if mayCombine(p.lastTok, s[0]) {
-				panic("adjacent tokens combine without whitespace")
-			}
-
-			if x == _Semi {
-				// delay printing of semi
-				p.addWhitespace(semi, "")
-			} else {
-				p.flush(x)
-				p.writeString(s)
-				p.nlcount = 0
-				p.lastTok = x
-			}
-
-		case Operator:
-			if x != 0 {
-				p.flush(_Operator)
-				p.writeString(x.String())
-			}
-
-		case ctrlSymbol:
-			switch x {
-			case none, semi /*, comment*/ :
-				panic("unreachable")
-			case newline:
-				// TODO(gri) need to handle mandatory newlines after a //-style comment
-				if !p.linebreaks {
-					x = blank
-				}
-			}
-			p.addWhitespace(x, "")
-
-		// case *Comment: // comments are not Nodes
-		// 	p.addWhitespace(comment, x.Text)
-
-		default:
-			panic(fmt.Sprintf("unexpected argument %v (%T)", x, x))
-		}
-	}
-}
-
-func (p *printer) printNode(n Node) {
-	// ncom := *n.Comments()
-	// if ncom != nil {
-	// 	// TODO(gri) in general we cannot make assumptions about whether
-	// 	// a comment is a /*- or a //-style comment since the syntax
-	// 	// tree may have been manipulated. Need to make sure the correct
-	// 	// whitespace is emitted.
-	// 	for _, c := range ncom.Alone {
-	// 		p.print(c, newline)
-	// 	}
-	// 	for _, c := range ncom.Before {
-	// 		if c.Text == "" || lineComment(c.Text) {
-	// 			panic("unexpected empty line or //-style 'before' comment")
-	// 		}
-	// 		p.print(c, blank)
-	// 	}
-	// }
-
-	p.printRawNode(n)
-
-	// if ncom != nil && len(ncom.After) > 0 {
-	// 	for i, c := range ncom.After {
-	// 		if i+1 < len(ncom.After) {
-	// 			if c.Text == "" || lineComment(c.Text) {
-	// 				panic("unexpected empty line or //-style non-final 'after' comment")
-	// 			}
-	// 		}
-	// 		p.print(blank, c)
-	// 	}
-	// 	//p.print(newline)
-	// }
-}
-
-func (p *printer) printRawNode(n Node) {
-	switch n := n.(type) {
-	// expressions and types
-	case *Name:
-		p.print(_Name, n.Value) // _Name requires actual value following immediately
-
-	case *BasicLit:
-		p.print(_Name, n.Value) // _Name requires actual value following immediately
-
-	case *FuncLit:
-		p.print(n.Type, blank)
-		p.printBody(n.Body)
-
-	case *CompositeLit:
-		if n.Type != nil {
-			p.print(n.Type)
-		}
-		p.print(_Lbrace)
-		if n.NKeys > 0 && n.NKeys == len(n.ElemList) {
-			p.printExprLines(n.ElemList)
-		} else {
-			p.printExprList(n.ElemList)
-		}
-		p.print(_Rbrace)
-
-	case *ParenExpr:
-		p.print(_Lparen, n.X, _Rparen)
-
-	case *SelectorExpr:
-		p.print(n.X, _Dot, n.Sel)
-
-	case *IndexExpr:
-		p.print(n.X, _Lbrack, n.Index, _Rbrack)
-
-	case *SliceExpr:
-		p.print(n.X, _Lbrack)
-		if i := n.Index[0]; i != nil {
-			p.printNode(i)
-		}
-		p.print(_Colon)
-		if j := n.Index[1]; j != nil {
-			p.printNode(j)
-		}
-		if k := n.Index[2]; k != nil {
-			p.print(_Colon, k)
-		}
-		p.print(_Rbrack)
-
-	case *AssertExpr:
-		p.print(n.X, _Dot, _Lparen)
-		if n.Type != nil {
-			p.printNode(n.Type)
-		} else {
-			p.print(_Type)
-		}
-		p.print(_Rparen)
-
-	case *CallExpr:
-		p.print(n.Fun, _Lparen)
-		p.printExprList(n.ArgList)
-		if n.HasDots {
-			p.print(_DotDotDot)
-		}
-		p.print(_Rparen)
-
-	case *Operation:
-		if n.Y == nil {
-			// unary expr
-			p.print(n.Op)
-			// if n.Op == lexical.Range {
-			// 	p.print(blank)
-			// }
-			p.print(n.X)
-		} else {
-			// binary expr
-			// TODO(gri) eventually take precedence into account
-			// to control possibly missing parentheses
-			p.print(n.X, blank, n.Op, blank, n.Y)
-		}
-
-	case *KeyValueExpr:
-		p.print(n.Key, _Colon, blank, n.Value)
-
-	case *ListExpr:
-		p.printExprList(n.ElemList)
-
-	case *ArrayType:
-		var len interface{} = _DotDotDot
-		if n.Len != nil {
-			len = n.Len
-		}
-		p.print(_Lbrack, len, _Rbrack, n.Elem)
-
-	case *SliceType:
-		p.print(_Lbrack, _Rbrack, n.Elem)
-
-	case *DotsType:
-		p.print(_DotDotDot, n.Elem)
-
-	case *StructType:
-		p.print(_Struct)
-		if len(n.FieldList) > 0 && p.linebreaks {
-			p.print(blank)
-		}
-		p.print(_Lbrace)
-		if len(n.FieldList) > 0 {
-			p.print(newline, indent)
-			p.printFieldList(n.FieldList, n.TagList)
-			p.print(outdent, newline)
-		}
-		p.print(_Rbrace)
-
-	case *FuncType:
-		p.print(_Func)
-		p.printSignature(n)
-
-	case *InterfaceType:
-		p.print(_Interface)
-		if len(n.MethodList) > 0 && p.linebreaks {
-			p.print(blank)
-		}
-		p.print(_Lbrace)
-		if len(n.MethodList) > 0 {
-			p.print(newline, indent)
-			p.printMethodList(n.MethodList)
-			p.print(outdent, newline)
-		}
-		p.print(_Rbrace)
-
-	case *MapType:
-		p.print(_Map, _Lbrack, n.Key, _Rbrack, n.Value)
-
-	case *ChanType:
-		if n.Dir == RecvOnly {
-			p.print(_Arrow)
-		}
-		p.print(_Chan)
-		if n.Dir == SendOnly {
-			p.print(_Arrow)
-		}
-		p.print(blank, n.Elem)
-
-	// statements
-	case *DeclStmt:
-		p.printDecl(n.DeclList)
-
-	case *EmptyStmt:
-		// nothing to print
-
-	case *LabeledStmt:
-		p.print(outdent, n.Label, _Colon, indent, newline, n.Stmt)
-
-	case *ExprStmt:
-		p.print(n.X)
-
-	case *SendStmt:
-		p.print(n.Chan, blank, _Arrow, blank, n.Value)
-
-	case *AssignStmt:
-		p.print(n.Lhs)
-		if n.Rhs == ImplicitOne {
-			// TODO(gri) This is going to break the mayCombine
-			//           check once we enable that again.
-			p.print(n.Op, n.Op) // ++ or --
-		} else {
-			p.print(blank, n.Op, _Assign, blank)
-			p.print(n.Rhs)
-		}
-
-	case *CallStmt:
-		p.print(n.Tok, blank, n.Call)
-
-	case *ReturnStmt:
-		p.print(_Return)
-		if n.Results != nil {
-			p.print(blank, n.Results)
-		}
-
-	case *BranchStmt:
-		p.print(n.Tok)
-		if n.Label != nil {
-			p.print(blank, n.Label)
-		}
-
-	case *BlockStmt:
-		p.printBody(n.Body)
-
-	case *IfStmt:
-		p.print(_If, blank)
-		if n.Init != nil {
-			p.print(n.Init, _Semi, blank)
-		}
-		p.print(n.Cond, blank)
-		p.printBody(n.Then)
-		if n.Else != nil {
-			p.print(blank, _Else, blank, n.Else)
-		}
-
-	case *SwitchStmt:
-		p.print(_Switch, blank)
-		if n.Init != nil {
-			p.print(n.Init, _Semi, blank)
-		}
-		if n.Tag != nil {
-			p.print(n.Tag, blank)
-		}
-		p.printSwitchBody(n.Body)
-
-	case *TypeSwitchGuard:
-		if n.Lhs != nil {
-			p.print(n.Lhs, blank, _Define, blank)
-		}
-		p.print(n.X, _Dot, _Lparen, _Type, _Rparen)
-
-	case *SelectStmt:
-		p.print(_Select, blank) // for now
-		p.printSelectBody(n.Body)
-
-	case *RangeClause:
-		if n.Lhs != nil {
-			tok := _Assign
-			if n.Def {
-				tok = _Define
-			}
-			p.print(n.Lhs, blank, tok, blank)
-		}
-		p.print(_Range, blank, n.X)
-
-	case *ForStmt:
-		p.print(_For, blank)
-		if n.Init == nil && n.Post == nil {
-			if n.Cond != nil {
-				p.print(n.Cond, blank)
-			}
-		} else {
-			if n.Init != nil {
-				p.print(n.Init)
-				// TODO(gri) clean this up
-				if _, ok := n.Init.(*RangeClause); ok {
-					p.print(blank)
-					p.printBody(n.Body)
-					break
-				}
-			}
-			p.print(_Semi, blank)
-			if n.Cond != nil {
-				p.print(n.Cond)
-			}
-			p.print(_Semi, blank)
-			if n.Post != nil {
-				p.print(n.Post, blank)
-			}
-		}
-		p.printBody(n.Body)
-
-	case *ImportDecl:
-		if n.Group == nil {
-			p.print(_Import, blank)
-		}
-		if n.LocalPkgName != nil {
-			p.print(n.LocalPkgName, blank)
-		}
-		p.print(n.Path)
-
-	case *ConstDecl:
-		if n.Group == nil {
-			p.print(_Const, blank)
-		}
-		p.printNameList(n.NameList)
-		if n.Type != nil {
-			p.print(blank, n.Type)
-		}
-		if n.Values != nil {
-			p.print(blank, _Assign, blank, n.Values)
-		}
-
-	case *TypeDecl:
-		if n.Group == nil {
-			p.print(_Type, blank)
-		}
-		p.print(n.Name, blank, n.Type)
-
-	case *VarDecl:
-		if n.Group == nil {
-			p.print(_Var, blank)
-		}
-		p.printNameList(n.NameList)
-		if n.Type != nil {
-			p.print(blank, n.Type)
-		}
-		if n.Values != nil {
-			p.print(blank, _Assign, blank, n.Values)
-		}
-
-	case *FuncDecl:
-		p.print(_Func, blank)
-		if r := n.Recv; r != nil {
-			p.print(_Lparen)
-			if r.Name != nil {
-				p.print(r.Name, blank)
-			}
-			p.printNode(r.Type)
-			p.print(_Rparen, blank)
-		}
-		p.print(n.Name)
-		p.printSignature(n.Type)
-		if n.Body != nil {
-			p.print(blank)
-			p.printBody(n.Body)
-		}
-
-	case *printGroup:
-		p.print(n.Tok, blank, _Lparen)
-		if len(n.Decls) > 0 {
-			p.print(newline, indent)
-			for _, d := range n.Decls {
-				p.printNode(d)
-				p.print(_Semi, newline)
-			}
-			p.print(outdent)
-		}
-		p.print(_Rparen)
-
-	// files
-	case *File:
-		p.print(_Package, blank, n.PkgName)
-		if len(n.DeclList) > 0 {
-			p.print(_Semi, newline, newline)
-			p.printDeclList(n.DeclList)
-		}
-
-	default:
-		panic(fmt.Sprintf("syntax.Iterate: unexpected node type %T", n))
-	}
-}
-
-func (p *printer) printFields(fields []*Field, tags []*BasicLit, i, j int) {
-	if i+1 == j && fields[i].Name == nil {
-		// anonymous field
-		p.printNode(fields[i].Type)
-	} else {
-		for k, f := range fields[i:j] {
-			if k > 0 {
-				p.print(_Comma, blank)
-			}
-			p.printNode(f.Name)
-		}
-		p.print(blank)
-		p.printNode(fields[i].Type)
-	}
-	if i < len(tags) && tags[i] != nil {
-		p.print(blank)
-		p.printNode(tags[i])
-	}
-}
-
-func (p *printer) printFieldList(fields []*Field, tags []*BasicLit) {
-	i0 := 0
-	var typ Expr
-	for i, f := range fields {
-		if f.Name == nil || f.Type != typ {
-			if i0 < i {
-				p.printFields(fields, tags, i0, i)
-				p.print(_Semi, newline)
-				i0 = i
-			}
-			typ = f.Type
-		}
-	}
-	p.printFields(fields, tags, i0, len(fields))
-}
-
-func (p *printer) printMethodList(methods []*Field) {
-	for i, m := range methods {
-		if i > 0 {
-			p.print(_Semi, newline)
-		}
-		if m.Name != nil {
-			p.printNode(m.Name)
-			p.printSignature(m.Type.(*FuncType))
-		} else {
-			p.printNode(m.Type)
-		}
-	}
-}
-
-func (p *printer) printNameList(list []*Name) {
-	for i, x := range list {
-		if i > 0 {
-			p.print(_Comma, blank)
-		}
-		p.printNode(x)
-	}
-}
-
-func (p *printer) printExprList(list []Expr) {
-	for i, x := range list {
-		if i > 0 {
-			p.print(_Comma, blank)
-		}
-		p.printNode(x)
-	}
-}
-
-func (p *printer) printExprLines(list []Expr) {
-	if len(list) > 0 {
-		p.print(newline, indent)
-		for _, x := range list {
-			p.print(x, _Comma, newline)
-		}
-		p.print(outdent)
-	}
-}
-
-func groupFor(d Decl) (token, *Group) {
-	switch d := d.(type) {
-	case *ImportDecl:
-		return _Import, d.Group
-	case *ConstDecl:
-		return _Const, d.Group
-	case *TypeDecl:
-		return _Type, d.Group
-	case *VarDecl:
-		return _Var, d.Group
-	case *FuncDecl:
-		return _Func, nil
-	default:
-		panic("unreachable")
-	}
-}
-
-type printGroup struct {
-	node
-	Tok   token
-	Decls []Decl
-}
-
-func (p *printer) printDecl(list []Decl) {
-	tok, group := groupFor(list[0])
-
-	if group == nil {
-		if len(list) != 1 {
-			panic("unreachable")
-		}
-		p.printNode(list[0])
-		return
-	}
-
-	// if _, ok := list[0].(*EmptyDecl); ok {
-	// 	if len(list) != 1 {
-	// 		panic("unreachable")
-	// 	}
-	// 	// TODO(gri) if there are comments inside the empty
-	// 	// group, we may need to keep the list non-nil
-	// 	list = nil
-	// }
-
-	// printGroup is here for consistent comment handling
-	// (this is not yet used)
-	var pg printGroup
-	// *pg.Comments() = *group.Comments()
-	pg.Tok = tok
-	pg.Decls = list
-	p.printNode(&pg)
-}
-
-func (p *printer) printDeclList(list []Decl) {
-	i0 := 0
-	var tok token
-	var group *Group
-	for i, x := range list {
-		if s, g := groupFor(x); g == nil || g != group {
-			if i0 < i {
-				p.printDecl(list[i0:i])
-				p.print(_Semi, newline)
-				// print empty line between different declaration groups,
-				// different kinds of declarations, or between functions
-				if g != group || s != tok || s == _Func {
-					p.print(newline)
-				}
-				i0 = i
-			}
-			tok, group = s, g
-		}
-	}
-	p.printDecl(list[i0:])
-}
-
-func (p *printer) printSignature(sig *FuncType) {
-	p.printParameterList(sig.ParamList)
-	if list := sig.ResultList; list != nil {
-		p.print(blank)
-		if len(list) == 1 && list[0].Name == nil {
-			p.printNode(list[0].Type)
-		} else {
-			p.printParameterList(list)
-		}
-	}
-}
-
-func (p *printer) printParameterList(list []*Field) {
-	p.print(_Lparen)
-	if len(list) > 0 {
-		for i, f := range list {
-			if i > 0 {
-				p.print(_Comma, blank)
-			}
-			if f.Name != nil {
-				p.printNode(f.Name)
-				if i+1 < len(list) {
-					f1 := list[i+1]
-					if f1.Name != nil && f1.Type == f.Type {
-						continue // no need to print type
-					}
-				}
-				p.print(blank)
-			}
-			p.printNode(f.Type)
-		}
-	}
-	p.print(_Rparen)
-}
-
-func (p *printer) printStmtList(list []Stmt, braces bool) {
-	for i, x := range list {
-		p.print(x, _Semi)
-		if i+1 < len(list) {
-			p.print(newline)
-		} else if braces {
-			// Print an extra semicolon if the last statement is
-			// an empty statement and we are in a braced block
-			// because one semicolon is automatically removed.
-			if _, ok := x.(*EmptyStmt); ok {
-				p.print(x, _Semi)
-			}
-		}
-	}
-}
-
-func (p *printer) printBody(list []Stmt) {
-	p.print(_Lbrace)
-	if len(list) > 0 {
-		p.print(newline, indent)
-		p.printStmtList(list, true)
-		p.print(outdent, newline)
-	}
-	p.print(_Rbrace)
-}
-
-func (p *printer) printSwitchBody(list []*CaseClause) {
-	p.print(_Lbrace)
-	if len(list) > 0 {
-		p.print(newline)
-		for i, c := range list {
-			p.printCaseClause(c, i+1 == len(list))
-			p.print(newline)
-		}
-	}
-	p.print(_Rbrace)
-}
-
-func (p *printer) printSelectBody(list []*CommClause) {
-	p.print(_Lbrace)
-	if len(list) > 0 {
-		p.print(newline)
-		for i, c := range list {
-			p.printCommClause(c, i+1 == len(list))
-			p.print(newline)
-		}
-	}
-	p.print(_Rbrace)
-}
-
-func (p *printer) printCaseClause(c *CaseClause, braces bool) {
-	if c.Cases != nil {
-		p.print(_Case, blank, c.Cases)
-	} else {
-		p.print(_Default)
-	}
-	p.print(_Colon)
-	if len(c.Body) > 0 {
-		p.print(newline, indent)
-		p.printStmtList(c.Body, braces)
-		p.print(outdent)
-	}
-}
-
-func (p *printer) printCommClause(c *CommClause, braces bool) {
-	if c.Comm != nil {
-		p.print(_Case, blank)
-		p.print(c.Comm)
-	} else {
-		p.print(_Default)
-	}
-	p.print(_Colon)
-	if len(c.Body) > 0 {
-		p.print(newline, indent)
-		p.printStmtList(c.Body, braces)
-		p.print(outdent)
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/printer_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/printer_test.go
deleted file mode 100644
index 489e15a..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/printer_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/syntax/printer_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/syntax/printer_test.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package syntax
-
-import (
-	"fmt"
-	"os"
-	"testing"
-)
-
-func TestPrint(t *testing.T) {
-	if testing.Short() {
-		t.Skip("skipping test in short mode")
-	}
-
-	ast, err := ParseFile(*src, nil, nil, 0)
-	if err != nil {
-		t.Fatal(err)
-	}
-	Fprint(os.Stdout, ast, true)
-	fmt.Println()
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/scanner.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/scanner.go
deleted file mode 100644
index 630a055..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/scanner.go
+++ /dev/null
@@ -1,667 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/syntax/scanner.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/syntax/scanner.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package syntax
-
-import (
-	"fmt"
-	"io"
-	"strings"
-	"unicode"
-	"unicode/utf8"
-)
-
-type scanner struct {
-	source
-	nlsemi bool // if set '\n' and EOF translate to ';'
-	pragma Pragma
-
-	// current token, valid after calling next()
-	pos, line int
-	tok       token
-	lit       string   // valid if tok is _Name or _Literal
-	kind      LitKind  // valid if tok is _Literal
-	op        Operator // valid if tok is _Operator, _AssignOp, or _IncOp
-	prec      int      // valid if tok is _Operator, _AssignOp, or _IncOp
-
-	pragh PragmaHandler
-}
-
-func (s *scanner) init(src io.Reader, errh ErrorHandler, pragh PragmaHandler) {
-	s.source.init(src, errh)
-	s.nlsemi = false
-	s.pragh = pragh
-}
-
-func (s *scanner) next() {
-	nlsemi := s.nlsemi
-	s.nlsemi = false
-
-redo:
-	// skip white space
-	c := s.getr()
-	for c == ' ' || c == '\t' || c == '\n' && !nlsemi || c == '\r' {
-		c = s.getr()
-	}
-
-	// token start
-	s.pos, s.line = s.source.pos0(), s.source.line0
-
-	if isLetter(c) || c >= utf8.RuneSelf && (unicode.IsLetter(c) || s.isCompatRune(c, true)) {
-		s.ident()
-		return
-	}
-
-	switch c {
-	case -1:
-		if nlsemi {
-			s.tok = _Semi
-			break
-		}
-		s.tok = _EOF
-
-	case '\n':
-		s.tok = _Semi
-
-	case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
-		s.number(c)
-
-	case '"':
-		s.stdString()
-
-	case '`':
-		s.rawString()
-
-	case '\'':
-		s.rune()
-
-	case '(':
-		s.tok = _Lparen
-
-	case '[':
-		s.tok = _Lbrack
-
-	case '{':
-		s.tok = _Lbrace
-
-	case ',':
-		s.tok = _Comma
-
-	case ';':
-		s.tok = _Semi
-
-	case ')':
-		s.nlsemi = true
-		s.tok = _Rparen
-
-	case ']':
-		s.nlsemi = true
-		s.tok = _Rbrack
-
-	case '}':
-		s.nlsemi = true
-		s.tok = _Rbrace
-
-	case ':':
-		if s.getr() == '=' {
-			s.tok = _Define
-			break
-		}
-		s.ungetr()
-		s.tok = _Colon
-
-	case '.':
-		c = s.getr()
-		if isDigit(c) {
-			s.ungetr()
-			s.source.r0-- // make sure '.' is part of literal (line cannot have changed)
-			s.number('.')
-			break
-		}
-		if c == '.' {
-			c = s.getr()
-			if c == '.' {
-				s.tok = _DotDotDot
-				break
-			}
-			s.ungetr()
-			s.source.r0-- // make next ungetr work (line cannot have changed)
-		}
-		s.ungetr()
-		s.tok = _Dot
-
-	case '+':
-		s.op, s.prec = Add, precAdd
-		c = s.getr()
-		if c != '+' {
-			goto assignop
-		}
-		s.nlsemi = true
-		s.tok = _IncOp
-
-	case '-':
-		s.op, s.prec = Sub, precAdd
-		c = s.getr()
-		if c != '-' {
-			goto assignop
-		}
-		s.nlsemi = true
-		s.tok = _IncOp
-
-	case '*':
-		s.op, s.prec = Mul, precMul
-		// don't goto assignop - want _Star token
-		if s.getr() == '=' {
-			s.tok = _AssignOp
-			break
-		}
-		s.ungetr()
-		s.tok = _Star
-
-	case '/':
-		c = s.getr()
-		if c == '/' {
-			s.lineComment()
-			goto redo
-		}
-		if c == '*' {
-			s.fullComment()
-			if s.source.line > s.line && nlsemi {
-				// A multi-line comment acts like a newline;
-				// it translates to a ';' if nlsemi is set.
-				s.tok = _Semi
-				break
-			}
-			goto redo
-		}
-		s.op, s.prec = Div, precMul
-		goto assignop
-
-	case '%':
-		s.op, s.prec = Rem, precMul
-		c = s.getr()
-		goto assignop
-
-	case '&':
-		c = s.getr()
-		if c == '&' {
-			s.op, s.prec = AndAnd, precAndAnd
-			s.tok = _Operator
-			break
-		}
-		s.op, s.prec = And, precMul
-		if c == '^' {
-			s.op = AndNot
-			c = s.getr()
-		}
-		goto assignop
-
-	case '|':
-		c = s.getr()
-		if c == '|' {
-			s.op, s.prec = OrOr, precOrOr
-			s.tok = _Operator
-			break
-		}
-		s.op, s.prec = Or, precAdd
-		goto assignop
-
-	case '~':
-		s.error("bitwise complement operator is ^")
-		fallthrough
-
-	case '^':
-		s.op, s.prec = Xor, precAdd
-		c = s.getr()
-		goto assignop
-
-	case '<':
-		c = s.getr()
-		if c == '=' {
-			s.op, s.prec = Leq, precCmp
-			s.tok = _Operator
-			break
-		}
-		if c == '<' {
-			s.op, s.prec = Shl, precMul
-			c = s.getr()
-			goto assignop
-		}
-		if c == '-' {
-			s.tok = _Arrow
-			break
-		}
-		s.ungetr()
-		s.op, s.prec = Lss, precCmp
-		s.tok = _Operator
-
-	case '>':
-		c = s.getr()
-		if c == '=' {
-			s.op, s.prec = Geq, precCmp
-			s.tok = _Operator
-			break
-		}
-		if c == '>' {
-			s.op, s.prec = Shr, precMul
-			c = s.getr()
-			goto assignop
-		}
-		s.ungetr()
-		s.op, s.prec = Gtr, precCmp
-		s.tok = _Operator
-
-	case '=':
-		if s.getr() == '=' {
-			s.op, s.prec = Eql, precCmp
-			s.tok = _Operator
-			break
-		}
-		s.ungetr()
-		s.tok = _Assign
-
-	case '!':
-		if s.getr() == '=' {
-			s.op, s.prec = Neq, precCmp
-			s.tok = _Operator
-			break
-		}
-		s.ungetr()
-		s.op, s.prec = Not, 0
-		s.tok = _Operator
-
-	default:
-		s.tok = 0
-		s.error(fmt.Sprintf("illegal character %#U", c))
-		goto redo
-	}
-
-	return
-
-assignop:
-	if c == '=' {
-		s.tok = _AssignOp
-		return
-	}
-	s.ungetr()
-	s.tok = _Operator
-}
-
-func isLetter(c rune) bool {
-	return 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || c == '_'
-}
-
-func isDigit(c rune) bool {
-	return '0' <= c && c <= '9'
-}
-
-func (s *scanner) ident() {
-	s.startLit()
-
-	// accelerate common case (7bit ASCII)
-	c := s.getr()
-	for isLetter(c) || isDigit(c) {
-		c = s.getr()
-	}
-
-	// general case
-	if c >= utf8.RuneSelf {
-		for unicode.IsLetter(c) || c == '_' || unicode.IsDigit(c) || s.isCompatRune(c, false) {
-			c = s.getr()
-		}
-	}
-	s.ungetr()
-
-	lit := s.stopLit()
-
-	// possibly a keyword
-	if len(lit) >= 2 {
-		if tok := keywordMap[hash(lit)]; tok != 0 && tokstrings[tok] == string(lit) {
-			s.nlsemi = contains(1<<_Break|1<<_Continue|1<<_Fallthrough|1<<_Return, tok)
-			s.tok = tok
-			return
-		}
-	}
-
-	s.nlsemi = true
-	s.lit = string(lit)
-	s.tok = _Name
-}
-
-func (s *scanner) isCompatRune(c rune, start bool) bool {
-	if !gcCompat || c < utf8.RuneSelf {
-		return false
-	}
-	if start && unicode.IsNumber(c) {
-		s.error(fmt.Sprintf("identifier cannot begin with digit %#U", c))
-	} else {
-		s.error(fmt.Sprintf("invalid identifier character %#U", c))
-	}
-	return true
-}
-
-// hash is a perfect hash function for keywords.
-// It assumes that s has at least length 2.
-func hash(s []byte) uint {
-	return (uint(s[0])<<4 ^ uint(s[1]) + uint(len(s))) & uint(len(keywordMap)-1)
-}
-
-var keywordMap [1 << 6]token // size must be power of two
-
-func init() {
-	// populate keywordMap
-	for tok := _Break; tok <= _Var; tok++ {
-		h := hash([]byte(tokstrings[tok]))
-		if keywordMap[h] != 0 {
-			panic("imperfect hash")
-		}
-		keywordMap[h] = tok
-	}
-}
-
-func (s *scanner) number(c rune) {
-	s.startLit()
-
-	if c != '.' {
-		s.kind = IntLit // until proven otherwise
-		if c == '0' {
-			c = s.getr()
-			if c == 'x' || c == 'X' {
-				// hex
-				c = s.getr()
-				hasDigit := false
-				for isDigit(c) || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
-					c = s.getr()
-					hasDigit = true
-				}
-				if !hasDigit {
-					s.error("malformed hex constant")
-				}
-				goto done
-			}
-
-			// decimal 0, octal, or float
-			has8or9 := false
-			for isDigit(c) {
-				if c > '7' {
-					has8or9 = true
-				}
-				c = s.getr()
-			}
-			if c != '.' && c != 'e' && c != 'E' && c != 'i' {
-				// octal
-				if has8or9 {
-					s.error("malformed octal constant")
-				}
-				goto done
-			}
-
-		} else {
-			// decimal or float
-			for isDigit(c) {
-				c = s.getr()
-			}
-		}
-	}
-
-	// float
-	if c == '.' {
-		s.kind = FloatLit
-		c = s.getr()
-		for isDigit(c) {
-			c = s.getr()
-		}
-	}
-
-	// exponent
-	if c == 'e' || c == 'E' {
-		s.kind = FloatLit
-		c = s.getr()
-		if c == '-' || c == '+' {
-			c = s.getr()
-		}
-		if !isDigit(c) {
-			s.error("malformed floating-point constant exponent")
-		}
-		for isDigit(c) {
-			c = s.getr()
-		}
-	}
-
-	// complex
-	if c == 'i' {
-		s.kind = ImagLit
-		s.getr()
-	}
-
-done:
-	s.ungetr()
-	s.nlsemi = true
-	s.lit = string(s.stopLit())
-	s.tok = _Literal
-}
-
-func (s *scanner) stdString() {
-	s.startLit()
-
-	for {
-		r := s.getr()
-		if r == '"' {
-			break
-		}
-		if r == '\\' {
-			s.escape('"')
-			continue
-		}
-		if r == '\n' {
-			s.ungetr() // assume newline is not part of literal
-			s.error("newline in string")
-			break
-		}
-		if r < 0 {
-			s.error_at(s.pos, s.line, "string not terminated")
-			break
-		}
-	}
-
-	s.nlsemi = true
-	s.lit = string(s.stopLit())
-	s.kind = StringLit
-	s.tok = _Literal
-}
-
-func (s *scanner) rawString() {
-	s.startLit()
-
-	for {
-		r := s.getr()
-		if r == '`' {
-			break
-		}
-		if r < 0 {
-			s.error_at(s.pos, s.line, "string not terminated")
-			break
-		}
-	}
-	// We leave CRs in the string since they are part of the
-	// literal (even though they are not part of the literal
-	// value).
-
-	s.nlsemi = true
-	s.lit = string(s.stopLit())
-	s.kind = StringLit
-	s.tok = _Literal
-}
-
-func (s *scanner) rune() {
-	s.startLit()
-
-	r := s.getr()
-	ok := false
-	if r == '\'' {
-		s.error("empty character literal or unescaped ' in character literal")
-	} else if r == '\n' {
-		s.ungetr() // assume newline is not part of literal
-		s.error("newline in character literal")
-	} else {
-		ok = true
-		if r == '\\' {
-			ok = s.escape('\'')
-		}
-	}
-
-	r = s.getr()
-	if r != '\'' {
-		// only report error if we're ok so far
-		if ok {
-			s.error("missing '")
-		}
-		s.ungetr()
-	}
-
-	s.nlsemi = true
-	s.lit = string(s.stopLit())
-	s.kind = RuneLit
-	s.tok = _Literal
-}
-
-func (s *scanner) lineComment() {
-	// recognize pragmas
-	var prefix string
-	r := s.getr()
-	if s.pragh == nil {
-		goto skip
-	}
-
-	switch r {
-	case 'g':
-		prefix = "go:"
-	case 'l':
-		prefix = "line "
-	default:
-		goto skip
-	}
-
-	s.startLit()
-	for _, m := range prefix {
-		if r != m {
-			s.stopLit()
-			goto skip
-		}
-		r = s.getr()
-	}
-
-	for r >= 0 {
-		if r == '\n' {
-			s.ungetr()
-			break
-		}
-		r = s.getr()
-	}
-	s.pragma |= s.pragh(0, s.line, strings.TrimSuffix(string(s.stopLit()), "\r"))
-	return
-
-skip:
-	// consume line
-	for r != '\n' && r >= 0 {
-		r = s.getr()
-	}
-	s.ungetr() // don't consume '\n' - needed for nlsemi logic
-}
-
-func (s *scanner) fullComment() {
-	for {
-		r := s.getr()
-		for r == '*' {
-			r = s.getr()
-			if r == '/' {
-				return
-			}
-		}
-		if r < 0 {
-			s.error_at(s.pos, s.line, "comment not terminated")
-			return
-		}
-	}
-}
-
-func (s *scanner) escape(quote rune) bool {
-	var n int
-	var base, max uint32
-
-	c := s.getr()
-	switch c {
-	case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote:
-		return true
-	case '0', '1', '2', '3', '4', '5', '6', '7':
-		n, base, max = 3, 8, 255
-	case 'x':
-		c = s.getr()
-		n, base, max = 2, 16, 255
-	case 'u':
-		c = s.getr()
-		n, base, max = 4, 16, unicode.MaxRune
-	case 'U':
-		c = s.getr()
-		n, base, max = 8, 16, unicode.MaxRune
-	default:
-		if c < 0 {
-			return true // complain in caller about EOF
-		}
-		s.error("unknown escape sequence")
-		return false
-	}
-
-	var x uint32
-	for i := n; i > 0; i-- {
-		d := base
-		switch {
-		case isDigit(c):
-			d = uint32(c) - '0'
-		case 'a' <= c && c <= 'f':
-			d = uint32(c) - ('a' - 10)
-		case 'A' <= c && c <= 'F':
-			d = uint32(c) - ('A' - 10)
-		}
-		if d >= base {
-			if c < 0 {
-				return true // complain in caller about EOF
-			}
-			if gcCompat {
-				name := "hex"
-				if base == 8 {
-					name = "octal"
-				}
-				s.error(fmt.Sprintf("non-%s character in escape sequence: %c", name, c))
-			} else {
-				if c != quote {
-					s.error(fmt.Sprintf("illegal character %#U in escape sequence", c))
-				} else {
-					s.error("escape sequence incomplete")
-				}
-			}
-			s.ungetr()
-			return false
-		}
-		// d < base
-		x = x*base + d
-		c = s.getr()
-	}
-	s.ungetr()
-
-	if x > max && base == 8 {
-		s.error(fmt.Sprintf("octal escape value > 255: %d", x))
-		return false
-	}
-
-	if x > max || 0xD800 <= x && x < 0xE000 /* surrogate range */ {
-		s.error("escape sequence is invalid Unicode code point")
-		return false
-	}
-
-	return true
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/scanner_test.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/scanner_test.go
deleted file mode 100644
index c4202fe..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/scanner_test.go
+++ /dev/null
@@ -1,358 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/syntax/scanner_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/syntax/scanner_test.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package syntax
-
-import (
-	"fmt"
-	"os"
-	"testing"
-)
-
-func TestScanner(t *testing.T) {
-	if testing.Short() {
-		t.Skip("skipping test in short mode")
-	}
-
-	src, err := os.Open("parser.go")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer src.Close()
-
-	var s scanner
-	s.init(src, nil, nil)
-	for {
-		s.next()
-		if s.tok == _EOF {
-			break
-		}
-		switch s.tok {
-		case _Name:
-			fmt.Println(s.line, s.tok, "=>", s.lit)
-		case _Operator:
-			fmt.Println(s.line, s.tok, "=>", s.op, s.prec)
-		default:
-			fmt.Println(s.line, s.tok)
-		}
-	}
-}
-
-func TestTokens(t *testing.T) {
-	// make source
-	var buf []byte
-	for i, s := range sampleTokens {
-		buf = append(buf, "\t\t\t\t"[:i&3]...)     // leading indentation
-		buf = append(buf, s.src...)                // token
-		buf = append(buf, "        "[:i&7]...)     // trailing spaces
-		buf = append(buf, "/* foo */ // bar\n"...) // comments
-	}
-
-	// scan source
-	var got scanner
-	got.init(&bytesReader{buf}, nil, nil)
-	got.next()
-	for i, want := range sampleTokens {
-		nlsemi := false
-
-		if got.line != i+1 {
-			t.Errorf("got line %d; want %d", got.line, i+1)
-		}
-
-		if got.tok != want.tok {
-			t.Errorf("got tok = %s; want %s", got.tok, want.tok)
-			continue
-		}
-
-		switch want.tok {
-		case _Name, _Literal:
-			if got.lit != want.src {
-				t.Errorf("got lit = %q; want %q", got.lit, want.src)
-				continue
-			}
-			nlsemi = true
-
-		case _Operator, _AssignOp, _IncOp:
-			if got.op != want.op {
-				t.Errorf("got op = %s; want %s", got.op, want.op)
-				continue
-			}
-			if got.prec != want.prec {
-				t.Errorf("got prec = %d; want %d", got.prec, want.prec)
-				continue
-			}
-			nlsemi = want.tok == _IncOp
-
-		case _Rparen, _Rbrack, _Rbrace, _Break, _Continue, _Fallthrough, _Return:
-			nlsemi = true
-		}
-
-		if nlsemi {
-			got.next()
-			if got.tok != _Semi {
-				t.Errorf("got tok = %s; want ;", got.tok)
-				continue
-			}
-		}
-
-		got.next()
-	}
-
-	if got.tok != _EOF {
-		t.Errorf("got %q; want _EOF", got.tok)
-	}
-}
-
-var sampleTokens = [...]struct {
-	tok  token
-	src  string
-	op   Operator
-	prec int
-}{
-	// name samples
-	{_Name, "x", 0, 0},
-	{_Name, "X123", 0, 0},
-	{_Name, "foo", 0, 0},
-	{_Name, "Foo123", 0, 0},
-	{_Name, "foo_bar", 0, 0},
-	{_Name, "_", 0, 0},
-	{_Name, "_foobar", 0, 0},
-	{_Name, "a۰۱۸", 0, 0},
-	{_Name, "foo६४", 0, 0},
-	{_Name, "bar9876", 0, 0},
-	{_Name, "ŝ", 0, 0},
-	{_Name, "ŝfoo", 0, 0},
-
-	// literal samples
-	{_Literal, "0", 0, 0},
-	{_Literal, "1", 0, 0},
-	{_Literal, "12345", 0, 0},
-	{_Literal, "123456789012345678890123456789012345678890", 0, 0},
-	{_Literal, "01234567", 0, 0},
-	{_Literal, "0x0", 0, 0},
-	{_Literal, "0xcafebabe", 0, 0},
-	{_Literal, "0.", 0, 0},
-	{_Literal, "0.e0", 0, 0},
-	{_Literal, "0.e-1", 0, 0},
-	{_Literal, "0.e+123", 0, 0},
-	{_Literal, ".0", 0, 0},
-	{_Literal, ".0E00", 0, 0},
-	{_Literal, ".0E-0123", 0, 0},
-	{_Literal, ".0E+12345678901234567890", 0, 0},
-	{_Literal, ".45e1", 0, 0},
-	{_Literal, "3.14159265", 0, 0},
-	{_Literal, "1e0", 0, 0},
-	{_Literal, "1e+100", 0, 0},
-	{_Literal, "1e-100", 0, 0},
-	{_Literal, "2.71828e-1000", 0, 0},
-	{_Literal, "0i", 0, 0},
-	{_Literal, "1i", 0, 0},
-	{_Literal, "012345678901234567889i", 0, 0},
-	{_Literal, "123456789012345678890i", 0, 0},
-	{_Literal, "0.i", 0, 0},
-	{_Literal, ".0i", 0, 0},
-	{_Literal, "3.14159265i", 0, 0},
-	{_Literal, "1e0i", 0, 0},
-	{_Literal, "1e+100i", 0, 0},
-	{_Literal, "1e-100i", 0, 0},
-	{_Literal, "2.71828e-1000i", 0, 0},
-	{_Literal, "'a'", 0, 0},
-	{_Literal, "'\\000'", 0, 0},
-	{_Literal, "'\\xFF'", 0, 0},
-	{_Literal, "'\\uff16'", 0, 0},
-	{_Literal, "'\\U0000ff16'", 0, 0},
-	{_Literal, "`foobar`", 0, 0},
-	{_Literal, "`foo\tbar`", 0, 0},
-	{_Literal, "`\r`", 0, 0},
-
-	// operators
-	{_Operator, "||", OrOr, precOrOr},
-
-	{_Operator, "&&", AndAnd, precAndAnd},
-
-	{_Operator, "==", Eql, precCmp},
-	{_Operator, "!=", Neq, precCmp},
-	{_Operator, "<", Lss, precCmp},
-	{_Operator, "<=", Leq, precCmp},
-	{_Operator, ">", Gtr, precCmp},
-	{_Operator, ">=", Geq, precCmp},
-
-	{_Operator, "+", Add, precAdd},
-	{_Operator, "-", Sub, precAdd},
-	{_Operator, "|", Or, precAdd},
-	{_Operator, "^", Xor, precAdd},
-
-	{_Star, "*", Mul, precMul},
-	{_Operator, "/", Div, precMul},
-	{_Operator, "%", Rem, precMul},
-	{_Operator, "&", And, precMul},
-	{_Operator, "&^", AndNot, precMul},
-	{_Operator, "<<", Shl, precMul},
-	{_Operator, ">>", Shr, precMul},
-
-	// assignment operations
-	{_AssignOp, "+=", Add, precAdd},
-	{_AssignOp, "-=", Sub, precAdd},
-	{_AssignOp, "|=", Or, precAdd},
-	{_AssignOp, "^=", Xor, precAdd},
-
-	{_AssignOp, "*=", Mul, precMul},
-	{_AssignOp, "/=", Div, precMul},
-	{_AssignOp, "%=", Rem, precMul},
-	{_AssignOp, "&=", And, precMul},
-	{_AssignOp, "&^=", AndNot, precMul},
-	{_AssignOp, "<<=", Shl, precMul},
-	{_AssignOp, ">>=", Shr, precMul},
-
-	// other operations
-	{_IncOp, "++", Add, precAdd},
-	{_IncOp, "--", Sub, precAdd},
-	{_Assign, "=", 0, 0},
-	{_Define, ":=", 0, 0},
-	{_Arrow, "<-", 0, 0},
-
-	// delimiters
-	{_Lparen, "(", 0, 0},
-	{_Lbrack, "[", 0, 0},
-	{_Lbrace, "{", 0, 0},
-	{_Rparen, ")", 0, 0},
-	{_Rbrack, "]", 0, 0},
-	{_Rbrace, "}", 0, 0},
-	{_Comma, ",", 0, 0},
-	{_Semi, ";", 0, 0},
-	{_Colon, ":", 0, 0},
-	{_Dot, ".", 0, 0},
-	{_DotDotDot, "...", 0, 0},
-
-	// keywords
-	{_Break, "break", 0, 0},
-	{_Case, "case", 0, 0},
-	{_Chan, "chan", 0, 0},
-	{_Const, "const", 0, 0},
-	{_Continue, "continue", 0, 0},
-	{_Default, "default", 0, 0},
-	{_Defer, "defer", 0, 0},
-	{_Else, "else", 0, 0},
-	{_Fallthrough, "fallthrough", 0, 0},
-	{_For, "for", 0, 0},
-	{_Func, "func", 0, 0},
-	{_Go, "go", 0, 0},
-	{_Goto, "goto", 0, 0},
-	{_If, "if", 0, 0},
-	{_Import, "import", 0, 0},
-	{_Interface, "interface", 0, 0},
-	{_Map, "map", 0, 0},
-	{_Package, "package", 0, 0},
-	{_Range, "range", 0, 0},
-	{_Return, "return", 0, 0},
-	{_Select, "select", 0, 0},
-	{_Struct, "struct", 0, 0},
-	{_Switch, "switch", 0, 0},
-	{_Type, "type", 0, 0},
-	{_Var, "var", 0, 0},
-}
-
-func TestScanErrors(t *testing.T) {
-	for _, test := range []struct {
-		src, msg  string
-		pos, line int
-	}{
-		// Note: Positions for lexical errors are the earliest position
-		// where the error is apparent, not the beginning of the respective
-		// token.
-
-		// rune-level errors
-		{"fo\x00o", "invalid NUL character", 2, 1},
-		{"foo\n\ufeff bar", "invalid BOM in the middle of the file", 4, 2},
-		{"foo\n\n\xff    ", "invalid UTF-8 encoding", 5, 3},
-
-		// token-level errors
-		{"x + ~y", "bitwise complement operator is ^", 4, 1},
-		{"foo$bar = 0", "illegal character U+0024 '$'", 3, 1},
-		{"const x = 0xyz", "malformed hex constant", 12, 1},
-		{"0123456789", "malformed octal constant", 10, 1},
-		{"0123456789. /* foobar", "comment not terminated", 12, 1},   // valid float constant
-		{"0123456789e0 /*\nfoobar", "comment not terminated", 13, 1}, // valid float constant
-		{"var a, b = 08, 07\n", "malformed octal constant", 13, 1},
-		{"(x + 1.0e+x)", "malformed floating-point constant exponent", 10, 1},
-
-		{`''`, "empty character literal or unescaped ' in character literal", 1, 1},
-		{"'\n", "newline in character literal", 1, 1},
-		{`'\`, "missing '", 2, 1},
-		{`'\'`, "missing '", 3, 1},
-		{`'\x`, "missing '", 3, 1},
-		{`'\x'`, "non-hex character in escape sequence: '", 3, 1},
-		{`'\y'`, "unknown escape sequence", 2, 1},
-		{`'\x0'`, "non-hex character in escape sequence: '", 4, 1},
-		{`'\00'`, "non-octal character in escape sequence: '", 4, 1},
-		{`'\377' /*`, "comment not terminated", 7, 1}, // valid octal escape
-		{`'\378`, "non-octal character in escape sequence: 8", 4, 1},
-		{`'\400'`, "octal escape value > 255: 256", 5, 1},
-		{`'xx`, "missing '", 2, 1},
-
-		{"\"\n", "newline in string", 1, 1},
-		{`"`, "string not terminated", 0, 1},
-		{`"foo`, "string not terminated", 0, 1},
-		{"`", "string not terminated", 0, 1},
-		{"`foo", "string not terminated", 0, 1},
-		{"/*/", "comment not terminated", 0, 1},
-		{"/*\n\nfoo", "comment not terminated", 0, 1},
-		{"/*\n\nfoo", "comment not terminated", 0, 1},
-		{`"\`, "string not terminated", 0, 1},
-		{`"\"`, "string not terminated", 0, 1},
-		{`"\x`, "string not terminated", 0, 1},
-		{`"\x"`, "non-hex character in escape sequence: \"", 3, 1},
-		{`"\y"`, "unknown escape sequence", 2, 1},
-		{`"\x0"`, "non-hex character in escape sequence: \"", 4, 1},
-		{`"\00"`, "non-octal character in escape sequence: \"", 4, 1},
-		{`"\377" /*`, "comment not terminated", 7, 1}, // valid octal escape
-		{`"\378"`, "non-octal character in escape sequence: 8", 4, 1},
-		{`"\400"`, "octal escape value > 255: 256", 5, 1},
-
-		{`s := "foo\z"`, "unknown escape sequence", 10, 1},
-		{`s := "foo\z00\nbar"`, "unknown escape sequence", 10, 1},
-		{`"\x`, "string not terminated", 0, 1},
-		{`"\x"`, "non-hex character in escape sequence: \"", 3, 1},
-		{`var s string = "\x"`, "non-hex character in escape sequence: \"", 18, 1},
-		{`return "\Uffffffff"`, "escape sequence is invalid Unicode code point", 18, 1},
-
-		// former problem cases
-		{"package p\n\n\xef", "invalid UTF-8 encoding", 11, 3},
-	} {
-		var s scanner
-		nerrors := 0
-		s.init(&bytesReader{[]byte(test.src)}, func(err error) {
-			nerrors++
-			// only check the first error
-			e := err.(Error) // we know it's an Error
-			if nerrors == 1 {
-				if e.Msg != test.msg {
-					t.Errorf("%q: got msg = %q; want %q", test.src, e.Msg, test.msg)
-				}
-				if e.Pos != test.pos {
-					t.Errorf("%q: got pos = %d; want %d", test.src, e.Pos, test.pos)
-				}
-				if e.Line != test.line {
-					t.Errorf("%q: got line = %d; want %d", test.src, e.Line, test.line)
-				}
-			} else if nerrors > 1 {
-				t.Errorf("%q: got unexpected %q at pos = %d, line = %d", test.src, e.Msg, e.Pos, e.Line)
-			}
-		}, nil)
-
-		for {
-			s.next()
-			if s.tok == _EOF {
-				break
-			}
-		}
-
-		if nerrors == 0 {
-			t.Errorf("%q: got no error; want %q", test.src, test.msg)
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/source.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/source.go
deleted file mode 100644
index 5ca2a1e..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/source.go
+++ /dev/null
@@ -1,184 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/syntax/source.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/syntax/source.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package syntax
-
-import (
-	"io"
-	"unicode/utf8"
-)
-
-// buf [...read...|...|...unread...|s|...free...]
-//         ^      ^   ^            ^
-//         |      |   |            |
-//        suf     r0  r            w
-
-type source struct {
-	src   io.Reader
-	errh  ErrorHandler
-	first error // first error encountered
-
-	// source buffer
-	buf         [4 << 10]byte
-	offs        int   // source offset of buf
-	r0, r, w    int   // previous/current read and write buf positions, excluding sentinel
-	line0, line int   // previous/current line
-	err         error // pending io error
-
-	// literal buffer
-	lit []byte // literal prefix
-	suf int    // literal suffix; suf >= 0 means we are scanning a literal
-}
-
-func (s *source) init(src io.Reader, errh ErrorHandler) {
-	s.src = src
-	s.errh = errh
-	s.first = nil
-
-	s.buf[0] = utf8.RuneSelf // terminate with sentinel
-	s.offs = 0
-	s.r0, s.r, s.w = 0, 0, 0
-	s.line0, s.line = 1, 1
-	s.err = nil
-
-	s.lit = s.lit[:0]
-	s.suf = -1
-}
-
-func (s *source) error(msg string) {
-	s.error_at(s.pos0(), s.line0, msg)
-}
-
-func (s *source) error_at(pos, line int, msg string) {
-	err := Error{pos, line, msg}
-	if s.first == nil {
-		s.first = err
-	}
-	if s.errh == nil {
-		panic(s.first)
-	}
-	s.errh(err)
-}
-
-// pos0 returns the byte position of the last character read.
-func (s *source) pos0() int {
-	return s.offs + s.r0
-}
-
-func (s *source) ungetr() {
-	s.r, s.line = s.r0, s.line0
-}
-
-func (s *source) getr() rune {
-redo:
-	s.r0, s.line0 = s.r, s.line
-
-	// We could avoid at least one test that is always taken in the
-	// for loop below by duplicating the common case code (ASCII)
-	// here since we always have at least the sentinel (utf8.RuneSelf)
-	// in the buffer. Measure and optimize if necessary.
-
-	// make sure we have at least one rune in buffer, or we are at EOF
-	for s.r+utf8.UTFMax > s.w && !utf8.FullRune(s.buf[s.r:s.w]) && s.err == nil && s.w-s.r < len(s.buf) {
-		s.fill() // s.w-s.r < len(s.buf) => buffer is not full
-	}
-
-	// common case: ASCII and enough bytes
-	// (invariant: s.buf[s.w] == utf8.RuneSelf)
-	if b := s.buf[s.r]; b < utf8.RuneSelf {
-		s.r++
-		if b == 0 {
-			s.error("invalid NUL character")
-			goto redo
-		}
-		if b == '\n' {
-			s.line++
-		}
-		return rune(b)
-	}
-
-	// EOF
-	if s.r == s.w {
-		if s.err != io.EOF {
-			s.error(s.err.Error())
-		}
-		return -1
-	}
-
-	// uncommon case: not ASCII
-	r, w := utf8.DecodeRune(s.buf[s.r:s.w])
-	s.r += w
-
-	if r == utf8.RuneError && w == 1 {
-		s.error("invalid UTF-8 encoding")
-		goto redo
-	}
-
-	// BOM's are only allowed as the first character in a file
-	const BOM = 0xfeff
-	if r == BOM {
-		if s.r0 > 0 { // s.r0 is always > 0 after 1st character (fill will set it to 1)
-			s.error("invalid BOM in the middle of the file")
-		}
-		goto redo
-	}
-
-	return r
-}
-
-func (s *source) fill() {
-	// Slide unread bytes to beginning but preserve last read char
-	// (for one ungetr call) plus one extra byte (for a 2nd ungetr
-	// call, only for ".." character sequence and float literals
-	// starting with ".").
-	if s.r0 > 1 {
-		// save literal prefix, if any
-		// (We see at most one ungetr call while reading
-		// a literal, so make sure s.r0 remains in buf.)
-		if s.suf >= 0 {
-			s.lit = append(s.lit, s.buf[s.suf:s.r0]...)
-			s.suf = 1 // == s.r0 after slide below
-		}
-		s.offs += s.r0 - 1
-		r := s.r - s.r0 + 1 // last read char plus one byte
-		s.w = r + copy(s.buf[r:], s.buf[s.r:s.w])
-		s.r = r
-		s.r0 = 1
-	}
-
-	// read more data: try a limited number of times
-	for i := 100; i > 0; i-- {
-		n, err := s.src.Read(s.buf[s.w : len(s.buf)-1]) // -1 to leave space for sentinel
-		if n < 0 {
-			panic("negative read") // incorrect underlying io.Reader implementation
-		}
-		s.w += n
-		if n > 0 || err != nil {
-			s.buf[s.w] = utf8.RuneSelf // sentinel
-			if err != nil {
-				s.err = err
-			}
-			return
-		}
-	}
-
-	s.err = io.ErrNoProgress
-}
-
-func (s *source) startLit() {
-	s.suf = s.r0
-	s.lit = s.lit[:0] // reuse lit
-}
-
-func (s *source) stopLit() []byte {
-	lit := s.buf[s.suf:s.r]
-	if len(s.lit) > 0 {
-		lit = append(s.lit, lit...)
-	}
-	s.suf = -1 // no pending literal
-	return lit
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/syntax.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/syntax.go
deleted file mode 100644
index 03a7bc7..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/syntax.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/syntax/syntax.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/syntax/syntax.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package syntax
-
-import (
-	"fmt"
-	"io"
-	"os"
-)
-
-// Mode describes the parser mode.
-type Mode uint
-
-// Error describes a syntax error. Error implements the error interface.
-type Error struct {
-	// TODO(gri) decide what we really need here
-	Pos  int // byte offset from file start
-	Line int // line (starting with 1)
-	Msg  string
-}
-
-func (err Error) Error() string {
-	return fmt.Sprintf("%d: %s", err.Line, err.Msg)
-}
-
-var _ error = Error{} // verify that Error implements error
-
-// An ErrorHandler is called for each error encountered reading a .go file.
-type ErrorHandler func(err error)
-
-// A Pragma value is a set of flags that augment a function or
-// type declaration. Callers may assign meaning to the flags as
-// appropriate.
-type Pragma uint16
-
-// A PragmaHandler is used to process //line and //go: directives as
-// they're scanned. The returned Pragma value will be unioned into the
-// next FuncDecl node.
-type PragmaHandler func(pos, line int, text string) Pragma
-
-// Parse parses a single Go source file from src and returns the corresponding
-// syntax tree. If there are syntax errors, Parse will return the first error
-// encountered.
-//
-// If errh != nil, it is called with each error encountered, and Parse will
-// process as much source as possible. If errh is nil, Parse will terminate
-// immediately upon encountering an error.
-//
-// If a PragmaHandler is provided, it is called with each pragma encountered.
-//
-// The Mode argument is currently ignored.
-func Parse(src io.Reader, errh ErrorHandler, pragh PragmaHandler, mode Mode) (_ *File, err error) {
-	defer func() {
-		if p := recover(); p != nil {
-			var ok bool
-			if err, ok = p.(Error); ok {
-				return
-			}
-			panic(p)
-		}
-	}()
-
-	var p parser
-	p.init(src, errh, pragh)
-	p.next()
-	return p.file(), p.first
-}
-
-// ParseBytes behaves like Parse but it reads the source from the []byte slice provided.
-func ParseBytes(src []byte, errh ErrorHandler, pragh PragmaHandler, mode Mode) (*File, error) {
-	return Parse(&bytesReader{src}, errh, pragh, mode)
-}
-
-type bytesReader struct {
-	data []byte
-}
-
-func (r *bytesReader) Read(p []byte) (int, error) {
-	if len(r.data) > 0 {
-		n := copy(p, r.data)
-		r.data = r.data[n:]
-		return n, nil
-	}
-	return 0, io.EOF
-}
-
-// ParseFile behaves like Parse but it reads the source from the named file.
-func ParseFile(filename string, errh ErrorHandler, pragh PragmaHandler, mode Mode) (*File, error) {
-	src, err := os.Open(filename)
-	if err != nil {
-		if errh != nil {
-			errh(err)
-		}
-		return nil, err
-	}
-	defer src.Close()
-	return Parse(src, errh, pragh, mode)
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/tokens.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/tokens.go
deleted file mode 100644
index a651877..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/syntax/tokens.go
+++ /dev/null
@@ -1,266 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/syntax/tokens.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/syntax/tokens.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package syntax
-
-import "fmt"
-
-type token uint
-
-const (
-	_ token = iota
-	_EOF
-
-	// names and literals
-	_Name
-	_Literal
-
-	// operators and operations
-	_Operator // excluding '*' (_Star)
-	_AssignOp
-	_IncOp
-	_Assign
-	_Define
-	_Arrow
-	_Star
-
-	// delimitors
-	_Lparen
-	_Lbrack
-	_Lbrace
-	_Rparen
-	_Rbrack
-	_Rbrace
-	_Comma
-	_Semi
-	_Colon
-	_Dot
-	_DotDotDot
-
-	// keywords
-	_Break
-	_Case
-	_Chan
-	_Const
-	_Continue
-	_Default
-	_Defer
-	_Else
-	_Fallthrough
-	_For
-	_Func
-	_Go
-	_Goto
-	_If
-	_Import
-	_Interface
-	_Map
-	_Package
-	_Range
-	_Return
-	_Select
-	_Struct
-	_Switch
-	_Type
-	_Var
-
-	tokenCount
-)
-
-const (
-	// for BranchStmt
-	Break       = _Break
-	Continue    = _Continue
-	Fallthrough = _Fallthrough
-	Goto        = _Goto
-
-	// for CallStmt
-	Go    = _Go
-	Defer = _Defer
-)
-
-var tokstrings = [...]string{
-	// source control
-	_EOF: "EOF",
-
-	// names and literals
-	_Name:    "name",
-	_Literal: "literal",
-
-	// operators and operations
-	_Operator: "op",
-	_AssignOp: "op=",
-	_IncOp:    "opop",
-	_Assign:   "=",
-	_Define:   ":=",
-	_Arrow:    "<-",
-	_Star:     "*",
-
-	// delimitors
-	_Lparen:    "(",
-	_Lbrack:    "[",
-	_Lbrace:    "{",
-	_Rparen:    ")",
-	_Rbrack:    "]",
-	_Rbrace:    "}",
-	_Comma:     ",",
-	_Semi:      ";",
-	_Colon:     ":",
-	_Dot:       ".",
-	_DotDotDot: "...",
-
-	// keywords
-	_Break:       "break",
-	_Case:        "case",
-	_Chan:        "chan",
-	_Const:       "const",
-	_Continue:    "continue",
-	_Default:     "default",
-	_Defer:       "defer",
-	_Else:        "else",
-	_Fallthrough: "fallthrough",
-	_For:         "for",
-	_Func:        "func",
-	_Go:          "go",
-	_Goto:        "goto",
-	_If:          "if",
-	_Import:      "import",
-	_Interface:   "interface",
-	_Map:         "map",
-	_Package:     "package",
-	_Range:       "range",
-	_Return:      "return",
-	_Select:      "select",
-	_Struct:      "struct",
-	_Switch:      "switch",
-	_Type:        "type",
-	_Var:         "var",
-}
-
-func (tok token) String() string {
-	var s string
-	if 0 <= tok && int(tok) < len(tokstrings) {
-		s = tokstrings[tok]
-	}
-	if s == "" {
-		s = fmt.Sprintf("<tok-%d>", tok)
-	}
-	return s
-}
-
-// Make sure we have at most 64 tokens so we can use them in a set.
-const _ uint64 = 1 << (tokenCount - 1)
-
-// contains reports whether tok is in tokset.
-func contains(tokset uint64, tok token) bool {
-	return tokset&(1<<tok) != 0
-}
-
-type LitKind uint
-
-const (
-	IntLit LitKind = iota
-	FloatLit
-	ImagLit
-	RuneLit
-	StringLit
-)
-
-type Operator uint
-
-const (
-	_    Operator = iota
-	Def           // :=
-	Not           // !
-	Recv          // <-
-
-	// precOrOr
-	OrOr // ||
-
-	// precAndAnd
-	AndAnd // &&
-
-	// precCmp
-	Eql // ==
-	Neq // !=
-	Lss // <
-	Leq // <=
-	Gtr // >
-	Geq // >=
-
-	// precAdd
-	Add // +
-	Sub // -
-	Or  // |
-	Xor // ^
-
-	// precMul
-	Mul    // *
-	Div    // /
-	Rem    // %
-	And    // &
-	AndNot // &^
-	Shl    // <<
-	Shr    // >>
-)
-
-var opstrings = [...]string{
-	// prec == 0
-	Def:  ":", // : in :=
-	Not:  "!",
-	Recv: "<-",
-
-	// precOrOr
-	OrOr: "||",
-
-	// precAndAnd
-	AndAnd: "&&",
-
-	// precCmp
-	Eql: "==",
-	Neq: "!=",
-	Lss: "<",
-	Leq: "<=",
-	Gtr: ">",
-	Geq: ">=",
-
-	// precAdd
-	Add: "+",
-	Sub: "-",
-	Or:  "|",
-	Xor: "^",
-
-	// precMul
-	Mul:    "*",
-	Div:    "/",
-	Rem:    "%",
-	And:    "&",
-	AndNot: "&^",
-	Shl:    "<<",
-	Shr:    ">>",
-}
-
-func (op Operator) String() string {
-	var s string
-	if 0 <= op && int(op) < len(opstrings) {
-		s = opstrings[op]
-	}
-	if s == "" {
-		s = fmt.Sprintf("<op-%d>", op)
-	}
-	return s
-}
-
-// Operator precedences
-const (
-	_ = iota
-	precOrOr
-	precAndAnd
-	precCmp
-	precAdd
-	precMul
-)
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/x86/387.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/x86/387.go
deleted file mode 100644
index b3b66d9..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/x86/387.go
+++ /dev/null
@@ -1,360 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/x86/387.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/x86/387.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package x86
-
-import (
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/compile/internal/ssa"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/x86"
-	"math"
-)
-
-// Generates code for v using 387 instructions.  Reports whether
-// the instruction was handled by this routine.
-func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) bool {
-	// The SSA compiler pretends that it has an SSE backend.
-	// If we don't have one of those, we need to translate
-	// all the SSE ops to equivalent 387 ops. That's what this
-	// function does.
-
-	switch v.Op {
-	case ssa.Op386MOVSSconst, ssa.Op386MOVSDconst:
-		p := gc.Prog(loadPush(v.Type))
-		p.From.Type = obj.TYPE_FCONST
-		p.From.Val = math.Float64frombits(uint64(v.AuxInt))
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = x86.REG_F0
-		popAndSave(s, v)
-		return true
-	case ssa.Op386MOVSSconst2, ssa.Op386MOVSDconst2:
-		p := gc.Prog(loadPush(v.Type))
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = x86.REG_F0
-		popAndSave(s, v)
-		return true
-
-	case ssa.Op386MOVSSload, ssa.Op386MOVSDload, ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1, ssa.Op386MOVSSloadidx4, ssa.Op386MOVSDloadidx8:
-		p := gc.Prog(loadPush(v.Type))
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
-		switch v.Op {
-		case ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1:
-			p.From.Scale = 1
-			p.From.Index = v.Args[1].Reg()
-		case ssa.Op386MOVSSloadidx4:
-			p.From.Scale = 4
-			p.From.Index = v.Args[1].Reg()
-		case ssa.Op386MOVSDloadidx8:
-			p.From.Scale = 8
-			p.From.Index = v.Args[1].Reg()
-		}
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = x86.REG_F0
-		popAndSave(s, v)
-		return true
-
-	case ssa.Op386MOVSSstore, ssa.Op386MOVSDstore:
-		// Push to-be-stored value on top of stack.
-		push(s, v.Args[1])
-
-		// Pop and store value.
-		var op obj.As
-		switch v.Op {
-		case ssa.Op386MOVSSstore:
-			op = x86.AFMOVFP
-		case ssa.Op386MOVSDstore:
-			op = x86.AFMOVDP
-		}
-		p := gc.Prog(op)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = x86.REG_F0
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
-		return true
-
-	case ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1, ssa.Op386MOVSSstoreidx4, ssa.Op386MOVSDstoreidx8:
-		push(s, v.Args[2])
-		var op obj.As
-		switch v.Op {
-		case ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSSstoreidx4:
-			op = x86.AFMOVFP
-		case ssa.Op386MOVSDstoreidx1, ssa.Op386MOVSDstoreidx8:
-			op = x86.AFMOVDP
-		}
-		p := gc.Prog(op)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = x86.REG_F0
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
-		switch v.Op {
-		case ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1:
-			p.To.Scale = 1
-			p.To.Index = v.Args[1].Reg()
-		case ssa.Op386MOVSSstoreidx4:
-			p.To.Scale = 4
-			p.To.Index = v.Args[1].Reg()
-		case ssa.Op386MOVSDstoreidx8:
-			p.To.Scale = 8
-			p.To.Index = v.Args[1].Reg()
-		}
-		return true
-
-	case ssa.Op386ADDSS, ssa.Op386ADDSD, ssa.Op386SUBSS, ssa.Op386SUBSD,
-		ssa.Op386MULSS, ssa.Op386MULSD, ssa.Op386DIVSS, ssa.Op386DIVSD:
-		if v.Reg() != v.Args[0].Reg() {
-			v.Fatalf("input[0] and output not in same register %s", v.LongString())
-		}
-
-		// Push arg1 on top of stack
-		push(s, v.Args[1])
-
-		// Set precision if needed.  64 bits is the default.
-		switch v.Op {
-		case ssa.Op386ADDSS, ssa.Op386SUBSS, ssa.Op386MULSS, ssa.Op386DIVSS:
-			p := gc.Prog(x86.AFSTCW)
-			s.AddrScratch(&p.To)
-			p = gc.Prog(x86.AFLDCW)
-			p.From.Type = obj.TYPE_MEM
-			p.From.Name = obj.NAME_EXTERN
-			p.From.Sym = gc.Linksym(gc.Pkglookup("controlWord32", gc.Runtimepkg))
-		}
-
-		var op obj.As
-		switch v.Op {
-		case ssa.Op386ADDSS, ssa.Op386ADDSD:
-			op = x86.AFADDDP
-		case ssa.Op386SUBSS, ssa.Op386SUBSD:
-			op = x86.AFSUBDP
-		case ssa.Op386MULSS, ssa.Op386MULSD:
-			op = x86.AFMULDP
-		case ssa.Op386DIVSS, ssa.Op386DIVSD:
-			op = x86.AFDIVDP
-		}
-		p := gc.Prog(op)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = x86.REG_F0
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = s.SSEto387[v.Reg()] + 1
-
-		// Restore precision if needed.
-		switch v.Op {
-		case ssa.Op386ADDSS, ssa.Op386SUBSS, ssa.Op386MULSS, ssa.Op386DIVSS:
-			p := gc.Prog(x86.AFLDCW)
-			s.AddrScratch(&p.From)
-		}
-
-		return true
-
-	case ssa.Op386UCOMISS, ssa.Op386UCOMISD:
-		push(s, v.Args[0])
-
-		// Compare.
-		p := gc.Prog(x86.AFUCOMP)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = x86.REG_F0
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = s.SSEto387[v.Args[1].Reg()] + 1
-
-		// Save AX.
-		p = gc.Prog(x86.AMOVL)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = x86.REG_AX
-		s.AddrScratch(&p.To)
-
-		// Move status word into AX.
-		p = gc.Prog(x86.AFSTSW)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = x86.REG_AX
-
-		// Then move the flags we need to the integer flags.
-		gc.Prog(x86.ASAHF)
-
-		// Restore AX.
-		p = gc.Prog(x86.AMOVL)
-		s.AddrScratch(&p.From)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = x86.REG_AX
-
-		return true
-
-	case ssa.Op386SQRTSD:
-		push(s, v.Args[0])
-		gc.Prog(x86.AFSQRT)
-		popAndSave(s, v)
-		return true
-
-	case ssa.Op386FCHS:
-		push(s, v.Args[0])
-		gc.Prog(x86.AFCHS)
-		popAndSave(s, v)
-		return true
-
-	case ssa.Op386CVTSL2SS, ssa.Op386CVTSL2SD:
-		p := gc.Prog(x86.AMOVL)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		s.AddrScratch(&p.To)
-		p = gc.Prog(x86.AFMOVL)
-		s.AddrScratch(&p.From)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = x86.REG_F0
-		popAndSave(s, v)
-		return true
-
-	case ssa.Op386CVTTSD2SL, ssa.Op386CVTTSS2SL:
-		push(s, v.Args[0])
-
-		// Save control word.
-		p := gc.Prog(x86.AFSTCW)
-		s.AddrScratch(&p.To)
-		p.To.Offset += 4
-
-		// Load control word which truncates (rounds towards zero).
-		p = gc.Prog(x86.AFLDCW)
-		p.From.Type = obj.TYPE_MEM
-		p.From.Name = obj.NAME_EXTERN
-		p.From.Sym = gc.Linksym(gc.Pkglookup("controlWord64trunc", gc.Runtimepkg))
-
-		// Now do the conversion.
-		p = gc.Prog(x86.AFMOVLP)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = x86.REG_F0
-		s.AddrScratch(&p.To)
-		p = gc.Prog(x86.AMOVL)
-		s.AddrScratch(&p.From)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-
-		// Restore control word.
-		p = gc.Prog(x86.AFLDCW)
-		s.AddrScratch(&p.From)
-		p.From.Offset += 4
-		return true
-
-	case ssa.Op386CVTSS2SD:
-		// float32 -> float64 is a nop
-		push(s, v.Args[0])
-		popAndSave(s, v)
-		return true
-
-	case ssa.Op386CVTSD2SS:
-		// Round to nearest float32.
-		push(s, v.Args[0])
-		p := gc.Prog(x86.AFMOVFP)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = x86.REG_F0
-		s.AddrScratch(&p.To)
-		p = gc.Prog(x86.AFMOVF)
-		s.AddrScratch(&p.From)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = x86.REG_F0
-		popAndSave(s, v)
-		return true
-
-	case ssa.OpLoadReg:
-		if !v.Type.IsFloat() {
-			return false
-		}
-		// Load+push the value we need.
-		p := gc.Prog(loadPush(v.Type))
-		gc.AddrAuto(&p.From, v.Args[0])
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = x86.REG_F0
-		// Move the value to its assigned register.
-		popAndSave(s, v)
-		return true
-
-	case ssa.OpStoreReg:
-		if !v.Type.IsFloat() {
-			return false
-		}
-		push(s, v.Args[0])
-		var op obj.As
-		switch v.Type.Size() {
-		case 4:
-			op = x86.AFMOVFP
-		case 8:
-			op = x86.AFMOVDP
-		}
-		p := gc.Prog(op)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = x86.REG_F0
-		gc.AddrAuto(&p.To, v)
-		return true
-
-	case ssa.OpCopy:
-		if !v.Type.IsFloat() {
-			return false
-		}
-		push(s, v.Args[0])
-		popAndSave(s, v)
-		return true
-
-	case ssa.Op386CALLstatic, ssa.Op386CALLclosure, ssa.Op386CALLdefer, ssa.Op386CALLgo, ssa.Op386CALLinter:
-		flush387(s)  // Calls must empty the the FP stack.
-		return false // then issue the call as normal
-	}
-	return false
-}
-
-// push pushes v onto the floating-point stack.  v must be in a register.
-func push(s *gc.SSAGenState, v *ssa.Value) {
-	p := gc.Prog(x86.AFMOVD)
-	p.From.Type = obj.TYPE_REG
-	p.From.Reg = s.SSEto387[v.Reg()]
-	p.To.Type = obj.TYPE_REG
-	p.To.Reg = x86.REG_F0
-}
-
-// popAndSave pops a value off of the floating-point stack and stores
-// it in the reigster assigned to v.
-func popAndSave(s *gc.SSAGenState, v *ssa.Value) {
-	r := v.Reg()
-	if _, ok := s.SSEto387[r]; ok {
-		// Pop value, write to correct register.
-		p := gc.Prog(x86.AFMOVDP)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = x86.REG_F0
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = s.SSEto387[v.Reg()] + 1
-	} else {
-		// Don't actually pop value. This 387 register is now the
-		// new home for the not-yet-assigned-a-home SSE register.
-		// Increase the register mapping of all other registers by one.
-		for rSSE, r387 := range s.SSEto387 {
-			s.SSEto387[rSSE] = r387 + 1
-		}
-		s.SSEto387[r] = x86.REG_F0
-	}
-}
-
-// loadPush returns the opcode for load+push of the given type.
-func loadPush(t ssa.Type) obj.As {
-	if t.Size() == 4 {
-		return x86.AFMOVF
-	}
-	return x86.AFMOVD
-}
-
-// flush387 removes all entries from the 387 floating-point stack.
-func flush387(s *gc.SSAGenState) {
-	for k := range s.SSEto387 {
-		p := gc.Prog(x86.AFMOVDP)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = x86.REG_F0
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = x86.REG_F0
-		delete(s.SSEto387, k)
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/x86/galign.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/x86/galign.go
deleted file mode 100644
index 813b811..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/x86/galign.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/x86/galign.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/x86/galign.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package x86
-
-import (
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/x86"
-	"fmt"
-	"os"
-)
-
-func Init() {
-	gc.Thearch.LinkArch = &x86.Link386
-	gc.Thearch.REGSP = x86.REGSP
-	switch v := obj.GO386; v {
-	case "387":
-		gc.Thearch.Use387 = true
-	case "sse2":
-	default:
-		fmt.Fprintf(os.Stderr, "unsupported setting GO386=%s\n", v)
-		gc.Exit(1)
-	}
-	gc.Thearch.MAXWIDTH = (1 << 32) - 1
-
-	gc.Thearch.Defframe = defframe
-	gc.Thearch.Proginfo = proginfo
-
-	gc.Thearch.SSAMarkMoves = ssaMarkMoves
-	gc.Thearch.SSAGenValue = ssaGenValue
-	gc.Thearch.SSAGenBlock = ssaGenBlock
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/x86/ggen.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/x86/ggen.go
deleted file mode 100644
index 866b84c..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/x86/ggen.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/x86/ggen.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/x86/ggen.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package x86
-
-import (
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/x86"
-)
-
-func defframe(ptxt *obj.Prog) {
-	// fill in argument size, stack size
-	ptxt.To.Type = obj.TYPE_TEXTSIZE
-
-	ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.ArgWidth(), int64(gc.Widthptr)))
-	frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
-	ptxt.To.Offset = int64(frame)
-
-	// insert code to zero ambiguously live variables
-	// so that the garbage collector only sees initialized values
-	// when it looks for pointers.
-	p := ptxt
-
-	hi := int64(0)
-	lo := hi
-	ax := uint32(0)
-	for _, n := range gc.Curfn.Func.Dcl {
-		if !n.Name.Needzero {
-			continue
-		}
-		if n.Class != gc.PAUTO {
-			gc.Fatalf("needzero class %d", n.Class)
-		}
-		if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
-			gc.Fatalf("var %L has size %d offset %d", n, int(n.Type.Width), int(n.Xoffset))
-		}
-		if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthptr) {
-			// merge with range we already have
-			lo = n.Xoffset
-
-			continue
-		}
-
-		// zero old range
-		p = zerorange(p, int64(frame), lo, hi, &ax)
-
-		// set new range
-		hi = n.Xoffset + n.Type.Width
-
-		lo = n.Xoffset
-	}
-
-	// zero final range
-	zerorange(p, int64(frame), lo, hi, &ax)
-}
-
-func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32) *obj.Prog {
-	cnt := hi - lo
-	if cnt == 0 {
-		return p
-	}
-	if *ax == 0 {
-		p = gc.Appendpp(p, x86.AMOVL, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
-		*ax = 1
-	}
-
-	if cnt <= int64(4*gc.Widthreg) {
-		for i := int64(0); i < cnt; i += int64(gc.Widthreg) {
-			p = gc.Appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo+i)
-		}
-	} else if !gc.Nacl && cnt <= int64(128*gc.Widthreg) {
-		p = gc.Appendpp(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, frame+lo, obj.TYPE_REG, x86.REG_DI, 0)
-		p = gc.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, 1*(128-cnt/int64(gc.Widthreg)))
-		p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
-	} else {
-		p = gc.Appendpp(p, x86.AMOVL, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0)
-		p = gc.Appendpp(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, frame+lo, obj.TYPE_REG, x86.REG_DI, 0)
-		p = gc.Appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
-		p = gc.Appendpp(p, x86.ASTOSL, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
-	}
-
-	return p
-}
-
-func ginsnop() {
-	p := gc.Prog(x86.AXCHGL)
-	p.From.Type = obj.TYPE_REG
-	p.From.Reg = x86.REG_AX
-	p.To.Type = obj.TYPE_REG
-	p.To.Reg = x86.REG_AX
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/x86/prog.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/x86/prog.go
deleted file mode 100644
index 764c142..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/x86/prog.go
+++ /dev/null
@@ -1,273 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/x86/prog.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/x86/prog.go:1
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package x86
-
-import (
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/x86"
-)
-
-const (
-	LeftRdwr  uint32 = gc.LeftRead | gc.LeftWrite
-	RightRdwr uint32 = gc.RightRead | gc.RightWrite
-)
-
-// This table gives the basic information about instruction
-// generated by the compiler and processed in the optimizer.
-// See opt.h for bit definitions.
-//
-// Instructions not generated need not be listed.
-// As an exception to that rule, we typically write down all the
-// size variants of an operation even if we just use a subset.
-//
-// The table is formatted for 8-space tabs.
-var progtable = [x86.ALAST & obj.AMask]gc.ProgInfo{
-	obj.ATYPE:     {Flags: gc.Pseudo | gc.Skip},
-	obj.ATEXT:     {Flags: gc.Pseudo},
-	obj.AFUNCDATA: {Flags: gc.Pseudo},
-	obj.APCDATA:   {Flags: gc.Pseudo},
-	obj.AUNDEF:    {Flags: gc.Break},
-	obj.AUSEFIELD: {Flags: gc.OK},
-	obj.AVARDEF:   {Flags: gc.Pseudo | gc.RightWrite},
-	obj.AVARKILL:  {Flags: gc.Pseudo | gc.RightWrite},
-	obj.AVARLIVE:  {Flags: gc.Pseudo | gc.LeftRead},
-
-	// NOP is an internal no-op that also stands
-	// for USED and SET annotations, not the Intel opcode.
-	obj.ANOP:                   {Flags: gc.LeftRead | gc.RightWrite},
-	x86.AADCL & obj.AMask:      {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry},
-	x86.AADCW & obj.AMask:      {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry},
-	x86.AADDB & obj.AMask:      {Flags: gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.AADDL & obj.AMask:      {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.AADDW & obj.AMask:      {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.AADDSD & obj.AMask:     {Flags: gc.SizeD | gc.LeftRead | RightRdwr},
-	x86.AADDSS & obj.AMask:     {Flags: gc.SizeF | gc.LeftRead | RightRdwr},
-	x86.AANDB & obj.AMask:      {Flags: gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.AANDL & obj.AMask:      {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.AANDW & obj.AMask:      {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry},
-	obj.ACALL:                  {Flags: gc.RightAddr | gc.Call | gc.KillCarry},
-	x86.ACDQ & obj.AMask:       {Flags: gc.OK},
-	x86.ACWD & obj.AMask:       {Flags: gc.OK},
-	x86.ACLD & obj.AMask:       {Flags: gc.OK},
-	x86.ASTD & obj.AMask:       {Flags: gc.OK},
-	x86.ACMPB & obj.AMask:      {Flags: gc.SizeB | gc.LeftRead | gc.RightRead | gc.SetCarry},
-	x86.ACMPL & obj.AMask:      {Flags: gc.SizeL | gc.LeftRead | gc.RightRead | gc.SetCarry},
-	x86.ACMPW & obj.AMask:      {Flags: gc.SizeW | gc.LeftRead | gc.RightRead | gc.SetCarry},
-	x86.ACOMISD & obj.AMask:    {Flags: gc.SizeD | gc.LeftRead | gc.RightRead | gc.SetCarry},
-	x86.ACOMISS & obj.AMask:    {Flags: gc.SizeF | gc.LeftRead | gc.RightRead | gc.SetCarry},
-	x86.ACVTSD2SL & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.ACVTSD2SS & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.ACVTSL2SD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.ACVTSL2SS & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.ACVTSS2SD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.ACVTSS2SL & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.ACVTTSD2SL & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.ACVTTSS2SL & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.ADECB & obj.AMask:      {Flags: gc.SizeB | RightRdwr},
-	x86.ADECL & obj.AMask:      {Flags: gc.SizeL | RightRdwr},
-	x86.ADECW & obj.AMask:      {Flags: gc.SizeW | RightRdwr},
-	x86.ADIVB & obj.AMask:      {Flags: gc.SizeB | gc.LeftRead | gc.SetCarry},
-	x86.ADIVL & obj.AMask:      {Flags: gc.SizeL | gc.LeftRead | gc.SetCarry},
-	x86.ADIVW & obj.AMask:      {Flags: gc.SizeW | gc.LeftRead | gc.SetCarry},
-	x86.ADIVSD & obj.AMask:     {Flags: gc.SizeD | gc.LeftRead | RightRdwr},
-	x86.ADIVSS & obj.AMask:     {Flags: gc.SizeF | gc.LeftRead | RightRdwr},
-	x86.AFLDCW & obj.AMask:     {Flags: gc.SizeW | gc.LeftAddr},
-	x86.AFSTCW & obj.AMask:     {Flags: gc.SizeW | gc.RightAddr},
-	x86.AFSTSW & obj.AMask:     {Flags: gc.SizeW | gc.RightAddr | gc.RightWrite},
-	x86.AFADDD & obj.AMask:     {Flags: gc.SizeD | gc.LeftAddr | RightRdwr},
-	x86.AFADDDP & obj.AMask:    {Flags: gc.SizeD | gc.LeftAddr | RightRdwr},
-	x86.AFADDF & obj.AMask:     {Flags: gc.SizeF | gc.LeftAddr | RightRdwr},
-	x86.AFCOMD & obj.AMask:     {Flags: gc.SizeD | gc.LeftAddr | gc.RightRead},
-	x86.AFCOMDP & obj.AMask:    {Flags: gc.SizeD | gc.LeftAddr | gc.RightRead},
-	x86.AFCOMDPP & obj.AMask:   {Flags: gc.SizeD | gc.LeftAddr | gc.RightRead},
-	x86.AFCOMF & obj.AMask:     {Flags: gc.SizeF | gc.LeftAddr | gc.RightRead},
-	x86.AFCOMFP & obj.AMask:    {Flags: gc.SizeF | gc.LeftAddr | gc.RightRead},
-	// NOTE(khr): don't use FUCOMI* instructions, not available
-	// on Pentium MMX.  See issue 13923.
-	//x86.AFUCOMIP&obj.AMask:   {Flags: gc.SizeF | gc.LeftAddr | gc.RightRead},
-	x86.AFUCOMP & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RightRead},
-	x86.AFUCOMPP & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightRead},
-	x86.AFCHS & obj.AMask:    {Flags: gc.SizeD | RightRdwr}, // also SizeF
-
-	x86.AFDIVDP & obj.AMask:  {Flags: gc.SizeD | gc.LeftAddr | RightRdwr},
-	x86.AFDIVF & obj.AMask:   {Flags: gc.SizeF | gc.LeftAddr | RightRdwr},
-	x86.AFDIVD & obj.AMask:   {Flags: gc.SizeD | gc.LeftAddr | RightRdwr},
-	x86.AFDIVRDP & obj.AMask: {Flags: gc.SizeD | gc.LeftAddr | RightRdwr},
-	x86.AFDIVRF & obj.AMask:  {Flags: gc.SizeF | gc.LeftAddr | RightRdwr},
-	x86.AFDIVRD & obj.AMask:  {Flags: gc.SizeD | gc.LeftAddr | RightRdwr},
-	x86.AFXCHD & obj.AMask:   {Flags: gc.SizeD | LeftRdwr | RightRdwr},
-	x86.AFSUBD & obj.AMask:   {Flags: gc.SizeD | gc.LeftAddr | RightRdwr},
-	x86.AFSUBDP & obj.AMask:  {Flags: gc.SizeD | gc.LeftAddr | RightRdwr},
-	x86.AFSUBF & obj.AMask:   {Flags: gc.SizeF | gc.LeftAddr | RightRdwr},
-	x86.AFSUBRD & obj.AMask:  {Flags: gc.SizeD | gc.LeftAddr | RightRdwr},
-	x86.AFSUBRDP & obj.AMask: {Flags: gc.SizeD | gc.LeftAddr | RightRdwr},
-	x86.AFSUBRF & obj.AMask:  {Flags: gc.SizeF | gc.LeftAddr | RightRdwr},
-	x86.AFMOVD & obj.AMask:   {Flags: gc.SizeD | gc.LeftAddr | gc.RightWrite},
-	x86.AFMOVF & obj.AMask:   {Flags: gc.SizeF | gc.LeftAddr | gc.RightWrite},
-	x86.AFMOVL & obj.AMask:   {Flags: gc.SizeL | gc.LeftAddr | gc.RightWrite},
-	x86.AFMOVW & obj.AMask:   {Flags: gc.SizeW | gc.LeftAddr | gc.RightWrite},
-	x86.AFMOVV & obj.AMask:   {Flags: gc.SizeQ | gc.LeftAddr | gc.RightWrite},
-
-	// These instructions are marked as RightAddr
-	// so that the register optimizer does not try to replace the
-	// memory references with integer register references.
-	// But they do not use the previous value at the address, so
-	// we also mark them RightWrite.
-	x86.AFMOVDP & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.RightAddr},
-	x86.AFMOVFP & obj.AMask:  {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.RightAddr},
-	x86.AFMOVLP & obj.AMask:  {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.RightAddr},
-	x86.AFMOVWP & obj.AMask:  {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.RightAddr},
-	x86.AFMOVVP & obj.AMask:  {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.RightAddr},
-	x86.AFMULD & obj.AMask:   {Flags: gc.SizeD | gc.LeftAddr | RightRdwr},
-	x86.AFMULDP & obj.AMask:  {Flags: gc.SizeD | gc.LeftAddr | RightRdwr},
-	x86.AFMULF & obj.AMask:   {Flags: gc.SizeF | gc.LeftAddr | RightRdwr},
-	x86.AIDIVB & obj.AMask:   {Flags: gc.SizeB | gc.LeftRead | gc.SetCarry},
-	x86.AIDIVL & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.SetCarry},
-	x86.AIDIVW & obj.AMask:   {Flags: gc.SizeW | gc.LeftRead | gc.SetCarry},
-	x86.AIMULB & obj.AMask:   {Flags: gc.SizeB | gc.LeftRead | gc.SetCarry},
-	x86.AIMULL & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.ImulAXDX | gc.SetCarry},
-	x86.AIMULW & obj.AMask:   {Flags: gc.SizeW | gc.LeftRead | gc.ImulAXDX | gc.SetCarry},
-	x86.AINCB & obj.AMask:    {Flags: gc.SizeB | RightRdwr},
-	x86.AINCL & obj.AMask:    {Flags: gc.SizeL | RightRdwr},
-	x86.AINCW & obj.AMask:    {Flags: gc.SizeW | RightRdwr},
-	x86.AJCC & obj.AMask:     {Flags: gc.Cjmp | gc.UseCarry},
-	x86.AJCS & obj.AMask:     {Flags: gc.Cjmp | gc.UseCarry},
-	x86.AJEQ & obj.AMask:     {Flags: gc.Cjmp | gc.UseCarry},
-	x86.AJGE & obj.AMask:     {Flags: gc.Cjmp | gc.UseCarry},
-	x86.AJGT & obj.AMask:     {Flags: gc.Cjmp | gc.UseCarry},
-	x86.AJHI & obj.AMask:     {Flags: gc.Cjmp | gc.UseCarry},
-	x86.AJLE & obj.AMask:     {Flags: gc.Cjmp | gc.UseCarry},
-	x86.AJLS & obj.AMask:     {Flags: gc.Cjmp | gc.UseCarry},
-	x86.AJLT & obj.AMask:     {Flags: gc.Cjmp | gc.UseCarry},
-	x86.AJMI & obj.AMask:     {Flags: gc.Cjmp | gc.UseCarry},
-	x86.AJNE & obj.AMask:     {Flags: gc.Cjmp | gc.UseCarry},
-	x86.AJOC & obj.AMask:     {Flags: gc.Cjmp | gc.UseCarry},
-	x86.AJOS & obj.AMask:     {Flags: gc.Cjmp | gc.UseCarry},
-	x86.AJPC & obj.AMask:     {Flags: gc.Cjmp | gc.UseCarry},
-	x86.AJPL & obj.AMask:     {Flags: gc.Cjmp | gc.UseCarry},
-	x86.AJPS & obj.AMask:     {Flags: gc.Cjmp | gc.UseCarry},
-	obj.AJMP:                 {Flags: gc.Jump | gc.Break | gc.KillCarry},
-	x86.ALEAW & obj.AMask:    {Flags: gc.LeftAddr | gc.RightWrite},
-	x86.ALEAL & obj.AMask:    {Flags: gc.LeftAddr | gc.RightWrite},
-	x86.AMOVBLSX & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.AMOVBLZX & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.AMOVBWSX & obj.AMask: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.AMOVBWZX & obj.AMask: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.AMOVWLSX & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.AMOVWLZX & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
-	x86.AMOVB & obj.AMask:    {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move},
-	x86.AMOVL & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move},
-	x86.AMOVW & obj.AMask:    {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move},
-	x86.AMOVSB & obj.AMask:   {Flags: gc.OK},
-	x86.AMOVSL & obj.AMask:   {Flags: gc.OK},
-	x86.AMOVSW & obj.AMask:   {Flags: gc.OK},
-	obj.ADUFFCOPY:            {Flags: gc.OK},
-	x86.AMOVSD & obj.AMask:   {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move},
-	x86.AMOVSS & obj.AMask:   {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move},
-
-	// We use MOVAPD as a faster synonym for MOVSD.
-	x86.AMOVAPD & obj.AMask:  {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move},
-	x86.AMULB & obj.AMask:    {Flags: gc.SizeB | gc.LeftRead | gc.SetCarry},
-	x86.AMULL & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | gc.SetCarry},
-	x86.AMULW & obj.AMask:    {Flags: gc.SizeW | gc.LeftRead | gc.SetCarry},
-	x86.AMULSD & obj.AMask:   {Flags: gc.SizeD | gc.LeftRead | RightRdwr},
-	x86.AMULSS & obj.AMask:   {Flags: gc.SizeF | gc.LeftRead | RightRdwr},
-	x86.ANEGB & obj.AMask:    {Flags: gc.SizeB | RightRdwr | gc.SetCarry},
-	x86.ANEGL & obj.AMask:    {Flags: gc.SizeL | RightRdwr | gc.SetCarry},
-	x86.ANEGW & obj.AMask:    {Flags: gc.SizeW | RightRdwr | gc.SetCarry},
-	x86.ANOTB & obj.AMask:    {Flags: gc.SizeB | RightRdwr},
-	x86.ANOTL & obj.AMask:    {Flags: gc.SizeL | RightRdwr},
-	x86.ANOTW & obj.AMask:    {Flags: gc.SizeW | RightRdwr},
-	x86.AORB & obj.AMask:     {Flags: gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.AORL & obj.AMask:     {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.AORW & obj.AMask:     {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.APOPL & obj.AMask:    {Flags: gc.SizeL | gc.RightWrite},
-	x86.APUSHL & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead},
-	x86.APXOR & obj.AMask:    {Flags: gc.SizeD | gc.LeftRead | RightRdwr},
-	x86.ARCLB & obj.AMask:    {Flags: gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry},
-	x86.ARCLL & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry},
-	x86.ARCLW & obj.AMask:    {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry},
-	x86.ARCRB & obj.AMask:    {Flags: gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry},
-	x86.ARCRL & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry},
-	x86.ARCRW & obj.AMask:    {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry},
-	x86.AREP & obj.AMask:     {Flags: gc.OK},
-	x86.AREPN & obj.AMask:    {Flags: gc.OK},
-	obj.ARET:                 {Flags: gc.Break | gc.KillCarry},
-	x86.AROLB & obj.AMask:    {Flags: gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.AROLL & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.AROLW & obj.AMask:    {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ARORB & obj.AMask:    {Flags: gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ARORL & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ARORW & obj.AMask:    {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ASAHF & obj.AMask:    {Flags: gc.OK},
-	x86.ASALB & obj.AMask:    {Flags: gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ASALL & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ASALW & obj.AMask:    {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ASARB & obj.AMask:    {Flags: gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ASARL & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ASARW & obj.AMask:    {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ASBBB & obj.AMask:    {Flags: gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry},
-	x86.ASBBL & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry},
-	x86.ASBBW & obj.AMask:    {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry},
-	x86.ASETCC & obj.AMask:   {Flags: gc.SizeB | RightRdwr | gc.UseCarry},
-	x86.ASETCS & obj.AMask:   {Flags: gc.SizeB | RightRdwr | gc.UseCarry},
-	x86.ASETEQ & obj.AMask:   {Flags: gc.SizeB | RightRdwr | gc.UseCarry},
-	x86.ASETGE & obj.AMask:   {Flags: gc.SizeB | RightRdwr | gc.UseCarry},
-	x86.ASETGT & obj.AMask:   {Flags: gc.SizeB | RightRdwr | gc.UseCarry},
-	x86.ASETHI & obj.AMask:   {Flags: gc.SizeB | RightRdwr | gc.UseCarry},
-	x86.ASETLE & obj.AMask:   {Flags: gc.SizeB | RightRdwr | gc.UseCarry},
-	x86.ASETLS & obj.AMask:   {Flags: gc.SizeB | RightRdwr | gc.UseCarry},
-	x86.ASETLT & obj.AMask:   {Flags: gc.SizeB | RightRdwr | gc.UseCarry},
-	x86.ASETMI & obj.AMask:   {Flags: gc.SizeB | RightRdwr | gc.UseCarry},
-	x86.ASETNE & obj.AMask:   {Flags: gc.SizeB | RightRdwr | gc.UseCarry},
-	x86.ASETOC & obj.AMask:   {Flags: gc.SizeB | RightRdwr | gc.UseCarry},
-	x86.ASETOS & obj.AMask:   {Flags: gc.SizeB | RightRdwr | gc.UseCarry},
-	x86.ASETPC & obj.AMask:   {Flags: gc.SizeB | RightRdwr | gc.UseCarry},
-	x86.ASETPL & obj.AMask:   {Flags: gc.SizeB | RightRdwr | gc.UseCarry},
-	x86.ASETPS & obj.AMask:   {Flags: gc.SizeB | RightRdwr | gc.UseCarry},
-	x86.ASHLB & obj.AMask:    {Flags: gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ASHLL & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ASHLW & obj.AMask:    {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ASHRB & obj.AMask:    {Flags: gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ASHRL & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ASHRW & obj.AMask:    {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry},
-	x86.ASTOSB & obj.AMask:   {Flags: gc.OK},
-	x86.ASTOSL & obj.AMask:   {Flags: gc.OK},
-	x86.ASTOSW & obj.AMask:   {Flags: gc.OK},
-	obj.ADUFFZERO:            {Flags: gc.OK},
-	x86.ASUBB & obj.AMask:    {Flags: gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.ASUBL & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.ASUBW & obj.AMask:    {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.ASUBSD & obj.AMask:   {Flags: gc.SizeD | gc.LeftRead | RightRdwr},
-	x86.ASUBSS & obj.AMask:   {Flags: gc.SizeF | gc.LeftRead | RightRdwr},
-	x86.ATESTB & obj.AMask:   {Flags: gc.SizeB | gc.LeftRead | gc.RightRead | gc.SetCarry},
-	x86.ATESTL & obj.AMask:   {Flags: gc.SizeL | gc.LeftRead | gc.RightRead | gc.SetCarry},
-	x86.ATESTW & obj.AMask:   {Flags: gc.SizeW | gc.LeftRead | gc.RightRead | gc.SetCarry},
-	x86.AUCOMISD & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightRead},
-	x86.AUCOMISS & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightRead},
-	x86.AXCHGB & obj.AMask:   {Flags: gc.SizeB | LeftRdwr | RightRdwr},
-	x86.AXCHGL & obj.AMask:   {Flags: gc.SizeL | LeftRdwr | RightRdwr},
-	x86.AXCHGW & obj.AMask:   {Flags: gc.SizeW | LeftRdwr | RightRdwr},
-	x86.AXORB & obj.AMask:    {Flags: gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.AXORL & obj.AMask:    {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry},
-	x86.AXORW & obj.AMask:    {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry},
-}
-
-func proginfo(p *obj.Prog) gc.ProgInfo {
-	info := progtable[p.As&obj.AMask]
-	if info.Flags == 0 {
-		gc.Fatalf("unknown instruction %v", p)
-	}
-
-	if info.Flags&gc.ImulAXDX != 0 && p.To.Type != obj.TYPE_NONE {
-		info.Flags |= RightRdwr
-	}
-
-	return info
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/x86/ssa.go b/pkg/bootstrap/src/bootstrap/cmd/compile/internal/x86/ssa.go
deleted file mode 100644
index 64a83a0..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/internal/x86/ssa.go
+++ /dev/null
@@ -1,921 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/x86/ssa.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/x86/ssa.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package x86
-
-import (
-	"fmt"
-	"math"
-
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/compile/internal/ssa"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/obj/x86"
-)
-
-// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
-func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
-	flive := b.FlagsLiveAtEnd
-	if b.Control != nil && b.Control.Type.IsFlags() {
-		flive = true
-	}
-	for i := len(b.Values) - 1; i >= 0; i-- {
-		v := b.Values[i]
-		if flive && v.Op == ssa.Op386MOVLconst {
-			// The "mark" is any non-nil Aux value.
-			v.Aux = v
-		}
-		if v.Type.IsFlags() {
-			flive = false
-		}
-		for _, a := range v.Args {
-			if a.Type.IsFlags() {
-				flive = true
-			}
-		}
-	}
-}
-
-// loadByType returns the load instruction of the given type.
-func loadByType(t ssa.Type) obj.As {
-	// Avoid partial register write
-	if !t.IsFloat() && t.Size() <= 2 {
-		if t.Size() == 1 {
-			return x86.AMOVBLZX
-		} else {
-			return x86.AMOVWLZX
-		}
-	}
-	// Otherwise, there's no difference between load and store opcodes.
-	return storeByType(t)
-}
-
-// storeByType returns the store instruction of the given type.
-func storeByType(t ssa.Type) obj.As {
-	width := t.Size()
-	if t.IsFloat() {
-		switch width {
-		case 4:
-			return x86.AMOVSS
-		case 8:
-			return x86.AMOVSD
-		}
-	} else {
-		switch width {
-		case 1:
-			return x86.AMOVB
-		case 2:
-			return x86.AMOVW
-		case 4:
-			return x86.AMOVL
-		}
-	}
-	panic("bad store type")
-}
-
-// moveByType returns the reg->reg move instruction of the given type.
-func moveByType(t ssa.Type) obj.As {
-	if t.IsFloat() {
-		switch t.Size() {
-		case 4:
-			return x86.AMOVSS
-		case 8:
-			return x86.AMOVSD
-		default:
-			panic(fmt.Sprintf("bad float register width %d:%s", t.Size(), t))
-		}
-	} else {
-		switch t.Size() {
-		case 1:
-			// Avoids partial register write
-			return x86.AMOVL
-		case 2:
-			return x86.AMOVL
-		case 4:
-			return x86.AMOVL
-		default:
-			panic(fmt.Sprintf("bad int register width %d:%s", t.Size(), t))
-		}
-	}
-}
-
-// opregreg emits instructions for
-//     dest := dest(To) op src(From)
-// and also returns the created obj.Prog so it
-// may be further adjusted (offset, scale, etc).
-func opregreg(op obj.As, dest, src int16) *obj.Prog {
-	p := gc.Prog(op)
-	p.From.Type = obj.TYPE_REG
-	p.To.Type = obj.TYPE_REG
-	p.To.Reg = dest
-	p.From.Reg = src
-	return p
-}
-
-func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
-	s.SetLineno(v.Line)
-
-	if gc.Thearch.Use387 {
-		if ssaGenValue387(s, v) {
-			return // v was handled by 387 generation.
-		}
-	}
-
-	switch v.Op {
-	case ssa.Op386ADDL:
-		r := v.Reg()
-		r1 := v.Args[0].Reg()
-		r2 := v.Args[1].Reg()
-		switch {
-		case r == r1:
-			p := gc.Prog(v.Op.Asm())
-			p.From.Type = obj.TYPE_REG
-			p.From.Reg = r2
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = r
-		case r == r2:
-			p := gc.Prog(v.Op.Asm())
-			p.From.Type = obj.TYPE_REG
-			p.From.Reg = r1
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = r
-		default:
-			p := gc.Prog(x86.ALEAL)
-			p.From.Type = obj.TYPE_MEM
-			p.From.Reg = r1
-			p.From.Scale = 1
-			p.From.Index = r2
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = r
-		}
-
-	// 2-address opcode arithmetic
-	case ssa.Op386SUBL,
-		ssa.Op386MULL,
-		ssa.Op386ANDL,
-		ssa.Op386ORL,
-		ssa.Op386XORL,
-		ssa.Op386SHLL,
-		ssa.Op386SHRL, ssa.Op386SHRW, ssa.Op386SHRB,
-		ssa.Op386SARL, ssa.Op386SARW, ssa.Op386SARB,
-		ssa.Op386ADDSS, ssa.Op386ADDSD, ssa.Op386SUBSS, ssa.Op386SUBSD,
-		ssa.Op386MULSS, ssa.Op386MULSD, ssa.Op386DIVSS, ssa.Op386DIVSD,
-		ssa.Op386PXOR,
-		ssa.Op386ADCL,
-		ssa.Op386SBBL:
-		r := v.Reg()
-		if r != v.Args[0].Reg() {
-			v.Fatalf("input[0] and output not in same register %s", v.LongString())
-		}
-		opregreg(v.Op.Asm(), r, v.Args[1].Reg())
-
-	case ssa.Op386ADDLcarry, ssa.Op386SUBLcarry:
-		// output 0 is carry/borrow, output 1 is the low 32 bits.
-		r := v.Reg0()
-		if r != v.Args[0].Reg() {
-			v.Fatalf("input[0] and output[0] not in same register %s", v.LongString())
-		}
-		opregreg(v.Op.Asm(), r, v.Args[1].Reg())
-
-	case ssa.Op386ADDLconstcarry, ssa.Op386SUBLconstcarry:
-		// output 0 is carry/borrow, output 1 is the low 32 bits.
-		r := v.Reg0()
-		if r != v.Args[0].Reg() {
-			v.Fatalf("input[0] and output[0] not in same register %s", v.LongString())
-		}
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = v.AuxInt
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-
-	case ssa.Op386DIVL, ssa.Op386DIVW,
-		ssa.Op386DIVLU, ssa.Op386DIVWU,
-		ssa.Op386MODL, ssa.Op386MODW,
-		ssa.Op386MODLU, ssa.Op386MODWU:
-
-		// Arg[0] is already in AX as it's the only register we allow
-		// and AX is the only output
-		x := v.Args[1].Reg()
-
-		// CPU faults upon signed overflow, which occurs when most
-		// negative int is divided by -1.
-		var j *obj.Prog
-		if v.Op == ssa.Op386DIVL || v.Op == ssa.Op386DIVW ||
-			v.Op == ssa.Op386MODL || v.Op == ssa.Op386MODW {
-
-			var c *obj.Prog
-			switch v.Op {
-			case ssa.Op386DIVL, ssa.Op386MODL:
-				c = gc.Prog(x86.ACMPL)
-				j = gc.Prog(x86.AJEQ)
-				gc.Prog(x86.ACDQ) //TODO: fix
-
-			case ssa.Op386DIVW, ssa.Op386MODW:
-				c = gc.Prog(x86.ACMPW)
-				j = gc.Prog(x86.AJEQ)
-				gc.Prog(x86.ACWD)
-			}
-			c.From.Type = obj.TYPE_REG
-			c.From.Reg = x
-			c.To.Type = obj.TYPE_CONST
-			c.To.Offset = -1
-
-			j.To.Type = obj.TYPE_BRANCH
-		}
-
-		// for unsigned ints, we sign extend by setting DX = 0
-		// signed ints were sign extended above
-		if v.Op == ssa.Op386DIVLU || v.Op == ssa.Op386MODLU ||
-			v.Op == ssa.Op386DIVWU || v.Op == ssa.Op386MODWU {
-			c := gc.Prog(x86.AXORL)
-			c.From.Type = obj.TYPE_REG
-			c.From.Reg = x86.REG_DX
-			c.To.Type = obj.TYPE_REG
-			c.To.Reg = x86.REG_DX
-		}
-
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = x
-
-		// signed division, rest of the check for -1 case
-		if j != nil {
-			j2 := gc.Prog(obj.AJMP)
-			j2.To.Type = obj.TYPE_BRANCH
-
-			var n *obj.Prog
-			if v.Op == ssa.Op386DIVL || v.Op == ssa.Op386DIVW {
-				// n * -1 = -n
-				n = gc.Prog(x86.ANEGL)
-				n.To.Type = obj.TYPE_REG
-				n.To.Reg = x86.REG_AX
-			} else {
-				// n % -1 == 0
-				n = gc.Prog(x86.AXORL)
-				n.From.Type = obj.TYPE_REG
-				n.From.Reg = x86.REG_DX
-				n.To.Type = obj.TYPE_REG
-				n.To.Reg = x86.REG_DX
-			}
-
-			j.To.Val = n
-			j2.To.Val = s.Pc()
-		}
-
-	case ssa.Op386HMULL, ssa.Op386HMULW, ssa.Op386HMULB,
-		ssa.Op386HMULLU, ssa.Op386HMULWU, ssa.Op386HMULBU:
-		// the frontend rewrites constant division by 8/16/32 bit integers into
-		// HMUL by a constant
-		// SSA rewrites generate the 64 bit versions
-
-		// Arg[0] is already in AX as it's the only register we allow
-		// and DX is the only output we care about (the high bits)
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[1].Reg()
-
-		// IMULB puts the high portion in AH instead of DL,
-		// so move it to DL for consistency
-		if v.Type.Size() == 1 {
-			m := gc.Prog(x86.AMOVB)
-			m.From.Type = obj.TYPE_REG
-			m.From.Reg = x86.REG_AH
-			m.To.Type = obj.TYPE_REG
-			m.To.Reg = x86.REG_DX
-		}
-
-	case ssa.Op386MULLQU:
-		// AX * args[1], high 32 bits in DX (result[0]), low 32 bits in AX (result[1]).
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[1].Reg()
-
-	case ssa.Op386ADDLconst:
-		r := v.Reg()
-		a := v.Args[0].Reg()
-		if r == a {
-			if v.AuxInt == 1 {
-				p := gc.Prog(x86.AINCL)
-				p.To.Type = obj.TYPE_REG
-				p.To.Reg = r
-				return
-			}
-			if v.AuxInt == -1 {
-				p := gc.Prog(x86.ADECL)
-				p.To.Type = obj.TYPE_REG
-				p.To.Reg = r
-				return
-			}
-			p := gc.Prog(v.Op.Asm())
-			p.From.Type = obj.TYPE_CONST
-			p.From.Offset = v.AuxInt
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = r
-			return
-		}
-		p := gc.Prog(x86.ALEAL)
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = a
-		p.From.Offset = v.AuxInt
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-
-	case ssa.Op386MULLconst:
-		r := v.Reg()
-		if r != v.Args[0].Reg() {
-			v.Fatalf("input[0] and output not in same register %s", v.LongString())
-		}
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = v.AuxInt
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-		// TODO: Teach doasm to compile the three-address multiply imul $c, r1, r2
-		// then we don't need to use resultInArg0 for these ops.
-		//p.From3 = new(obj.Addr)
-		//p.From3.Type = obj.TYPE_REG
-		//p.From3.Reg = v.Args[0].Reg()
-
-	case ssa.Op386SUBLconst,
-		ssa.Op386ADCLconst,
-		ssa.Op386SBBLconst,
-		ssa.Op386ANDLconst,
-		ssa.Op386ORLconst,
-		ssa.Op386XORLconst,
-		ssa.Op386SHLLconst,
-		ssa.Op386SHRLconst, ssa.Op386SHRWconst, ssa.Op386SHRBconst,
-		ssa.Op386SARLconst, ssa.Op386SARWconst, ssa.Op386SARBconst,
-		ssa.Op386ROLLconst, ssa.Op386ROLWconst, ssa.Op386ROLBconst:
-		r := v.Reg()
-		if r != v.Args[0].Reg() {
-			v.Fatalf("input[0] and output not in same register %s", v.LongString())
-		}
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = v.AuxInt
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-	case ssa.Op386SBBLcarrymask:
-		r := v.Reg()
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = r
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-	case ssa.Op386LEAL1, ssa.Op386LEAL2, ssa.Op386LEAL4, ssa.Op386LEAL8:
-		r := v.Args[0].Reg()
-		i := v.Args[1].Reg()
-		p := gc.Prog(x86.ALEAL)
-		switch v.Op {
-		case ssa.Op386LEAL1:
-			p.From.Scale = 1
-			if i == x86.REG_SP {
-				r, i = i, r
-			}
-		case ssa.Op386LEAL2:
-			p.From.Scale = 2
-		case ssa.Op386LEAL4:
-			p.From.Scale = 4
-		case ssa.Op386LEAL8:
-			p.From.Scale = 8
-		}
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = r
-		p.From.Index = i
-		gc.AddAux(&p.From, v)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.Op386LEAL:
-		p := gc.Prog(x86.ALEAL)
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.Op386CMPL, ssa.Op386CMPW, ssa.Op386CMPB,
-		ssa.Op386TESTL, ssa.Op386TESTW, ssa.Op386TESTB:
-		opregreg(v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
-	case ssa.Op386UCOMISS, ssa.Op386UCOMISD:
-		// Go assembler has swapped operands for UCOMISx relative to CMP,
-		// must account for that right here.
-		opregreg(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg())
-	case ssa.Op386CMPLconst, ssa.Op386CMPWconst, ssa.Op386CMPBconst:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_CONST
-		p.To.Offset = v.AuxInt
-	case ssa.Op386TESTLconst, ssa.Op386TESTWconst, ssa.Op386TESTBconst:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = v.AuxInt
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Args[0].Reg()
-	case ssa.Op386MOVLconst:
-		x := v.Reg()
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = v.AuxInt
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = x
-		// If flags are live at this instruction, suppress the
-		// MOV $0,AX -> XOR AX,AX optimization.
-		if v.Aux != nil {
-			p.Mark |= x86.PRESERVEFLAGS
-		}
-	case ssa.Op386MOVSSconst, ssa.Op386MOVSDconst:
-		x := v.Reg()
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_FCONST
-		p.From.Val = math.Float64frombits(uint64(v.AuxInt))
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = x
-	case ssa.Op386MOVSSconst1, ssa.Op386MOVSDconst1:
-		var literal string
-		if v.Op == ssa.Op386MOVSDconst1 {
-			literal = fmt.Sprintf("$f64.%016x", uint64(v.AuxInt))
-		} else {
-			literal = fmt.Sprintf("$f32.%08x", math.Float32bits(float32(math.Float64frombits(uint64(v.AuxInt)))))
-		}
-		p := gc.Prog(x86.ALEAL)
-		p.From.Type = obj.TYPE_MEM
-		p.From.Name = obj.NAME_EXTERN
-		p.From.Sym = obj.Linklookup(gc.Ctxt, literal, 0)
-		p.From.Sym.Set(obj.AttrLocal, true)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.Op386MOVSSconst2, ssa.Op386MOVSDconst2:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-
-	case ssa.Op386MOVSSload, ssa.Op386MOVSDload, ssa.Op386MOVLload, ssa.Op386MOVWload, ssa.Op386MOVBload, ssa.Op386MOVBLSXload, ssa.Op386MOVWLSXload:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.Op386MOVSDloadidx8:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
-		p.From.Scale = 8
-		p.From.Index = v.Args[1].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.Op386MOVLloadidx4, ssa.Op386MOVSSloadidx4:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
-		p.From.Scale = 4
-		p.From.Index = v.Args[1].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.Op386MOVWloadidx2:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.From, v)
-		p.From.Scale = 2
-		p.From.Index = v.Args[1].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.Op386MOVBloadidx1, ssa.Op386MOVWloadidx1, ssa.Op386MOVLloadidx1, ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1:
-		r := v.Args[0].Reg()
-		i := v.Args[1].Reg()
-		if i == x86.REG_SP {
-			r, i = i, r
-		}
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = r
-		p.From.Scale = 1
-		p.From.Index = i
-		gc.AddAux(&p.From, v)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.Op386MOVSSstore, ssa.Op386MOVSDstore, ssa.Op386MOVLstore, ssa.Op386MOVWstore, ssa.Op386MOVBstore:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[1].Reg()
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
-	case ssa.Op386MOVSDstoreidx8:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[2].Reg()
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		p.To.Scale = 8
-		p.To.Index = v.Args[1].Reg()
-		gc.AddAux(&p.To, v)
-	case ssa.Op386MOVSSstoreidx4, ssa.Op386MOVLstoreidx4:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[2].Reg()
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		p.To.Scale = 4
-		p.To.Index = v.Args[1].Reg()
-		gc.AddAux(&p.To, v)
-	case ssa.Op386MOVWstoreidx2:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[2].Reg()
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		p.To.Scale = 2
-		p.To.Index = v.Args[1].Reg()
-		gc.AddAux(&p.To, v)
-	case ssa.Op386MOVBstoreidx1, ssa.Op386MOVWstoreidx1, ssa.Op386MOVLstoreidx1, ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1:
-		r := v.Args[0].Reg()
-		i := v.Args[1].Reg()
-		if i == x86.REG_SP {
-			r, i = i, r
-		}
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[2].Reg()
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = r
-		p.To.Scale = 1
-		p.To.Index = i
-		gc.AddAux(&p.To, v)
-	case ssa.Op386MOVLstoreconst, ssa.Op386MOVWstoreconst, ssa.Op386MOVBstoreconst:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_CONST
-		sc := v.AuxValAndOff()
-		p.From.Offset = sc.Val()
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux2(&p.To, v, sc.Off())
-	case ssa.Op386MOVLstoreconstidx1, ssa.Op386MOVLstoreconstidx4, ssa.Op386MOVWstoreconstidx1, ssa.Op386MOVWstoreconstidx2, ssa.Op386MOVBstoreconstidx1:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_CONST
-		sc := v.AuxValAndOff()
-		p.From.Offset = sc.Val()
-		r := v.Args[0].Reg()
-		i := v.Args[1].Reg()
-		switch v.Op {
-		case ssa.Op386MOVBstoreconstidx1, ssa.Op386MOVWstoreconstidx1, ssa.Op386MOVLstoreconstidx1:
-			p.To.Scale = 1
-			if i == x86.REG_SP {
-				r, i = i, r
-			}
-		case ssa.Op386MOVWstoreconstidx2:
-			p.To.Scale = 2
-		case ssa.Op386MOVLstoreconstidx4:
-			p.To.Scale = 4
-		}
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = r
-		p.To.Index = i
-		gc.AddAux2(&p.To, v, sc.Off())
-	case ssa.Op386MOVWLSX, ssa.Op386MOVBLSX, ssa.Op386MOVWLZX, ssa.Op386MOVBLZX,
-		ssa.Op386CVTSL2SS, ssa.Op386CVTSL2SD,
-		ssa.Op386CVTTSS2SL, ssa.Op386CVTTSD2SL,
-		ssa.Op386CVTSS2SD, ssa.Op386CVTSD2SS:
-		opregreg(v.Op.Asm(), v.Reg(), v.Args[0].Reg())
-	case ssa.Op386DUFFZERO:
-		p := gc.Prog(obj.ADUFFZERO)
-		p.To.Type = obj.TYPE_ADDR
-		p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
-		p.To.Offset = v.AuxInt
-	case ssa.Op386DUFFCOPY:
-		p := gc.Prog(obj.ADUFFCOPY)
-		p.To.Type = obj.TYPE_ADDR
-		p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
-		p.To.Offset = v.AuxInt
-
-	case ssa.OpCopy, ssa.Op386MOVLconvert: // TODO: use MOVLreg for reg->reg copies instead of OpCopy?
-		if v.Type.IsMemory() {
-			return
-		}
-		x := v.Args[0].Reg()
-		y := v.Reg()
-		if x != y {
-			opregreg(moveByType(v.Type), y, x)
-		}
-	case ssa.OpLoadReg:
-		if v.Type.IsFlags() {
-			v.Fatalf("load flags not implemented: %v", v.LongString())
-			return
-		}
-		p := gc.Prog(loadByType(v.Type))
-		gc.AddrAuto(&p.From, v.Args[0])
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-
-	case ssa.OpStoreReg:
-		if v.Type.IsFlags() {
-			v.Fatalf("store flags not implemented: %v", v.LongString())
-			return
-		}
-		p := gc.Prog(storeByType(v.Type))
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		gc.AddrAuto(&p.To, v)
-	case ssa.OpPhi:
-		gc.CheckLoweredPhi(v)
-	case ssa.OpInitMem:
-		// memory arg needs no code
-	case ssa.OpArg:
-		// input args need no code
-	case ssa.Op386LoweredGetClosurePtr:
-		// Closure pointer is DX.
-		gc.CheckLoweredGetClosurePtr(v)
-	case ssa.Op386LoweredGetG:
-		r := v.Reg()
-		// See the comments in cmd/internal/obj/x86/obj6.go
-		// near CanUse1InsnTLS for a detailed explanation of these instructions.
-		if x86.CanUse1InsnTLS(gc.Ctxt) {
-			// MOVL (TLS), r
-			p := gc.Prog(x86.AMOVL)
-			p.From.Type = obj.TYPE_MEM
-			p.From.Reg = x86.REG_TLS
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = r
-		} else {
-			// MOVL TLS, r
-			// MOVL (r)(TLS*1), r
-			p := gc.Prog(x86.AMOVL)
-			p.From.Type = obj.TYPE_REG
-			p.From.Reg = x86.REG_TLS
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = r
-			q := gc.Prog(x86.AMOVL)
-			q.From.Type = obj.TYPE_MEM
-			q.From.Reg = r
-			q.From.Index = x86.REG_TLS
-			q.From.Scale = 1
-			q.To.Type = obj.TYPE_REG
-			q.To.Reg = r
-		}
-	case ssa.Op386CALLstatic:
-		if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym {
-			// Deferred calls will appear to be returning to
-			// the CALL deferreturn(SB) that we are about to emit.
-			// However, the stack trace code will show the line
-			// of the instruction byte before the return PC.
-			// To avoid that being an unrelated instruction,
-			// insert an actual hardware NOP that will have the right line number.
-			// This is different from obj.ANOP, which is a virtual no-op
-			// that doesn't make it into the instruction stream.
-			ginsnop()
-		}
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(v.Aux.(*gc.Sym))
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.Op386CALLclosure:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Args[0].Reg()
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.Op386CALLdefer:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(gc.Deferproc.Sym)
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.Op386CALLgo:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(gc.Newproc.Sym)
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.Op386CALLinter:
-		p := gc.Prog(obj.ACALL)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Args[0].Reg()
-		if gc.Maxarg < v.AuxInt {
-			gc.Maxarg = v.AuxInt
-		}
-	case ssa.Op386NEGL,
-		ssa.Op386BSWAPL,
-		ssa.Op386NOTL:
-		r := v.Reg()
-		if r != v.Args[0].Reg() {
-			v.Fatalf("input[0] and output not in same register %s", v.LongString())
-		}
-		p := gc.Prog(v.Op.Asm())
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = r
-	case ssa.Op386BSFL, ssa.Op386BSFW,
-		ssa.Op386BSRL, ssa.Op386BSRW,
-		ssa.Op386SQRTSD:
-		p := gc.Prog(v.Op.Asm())
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = v.Args[0].Reg()
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-	case ssa.OpSP, ssa.OpSB, ssa.OpSelect0, ssa.OpSelect1:
-		// nothing to do
-	case ssa.Op386SETEQ, ssa.Op386SETNE,
-		ssa.Op386SETL, ssa.Op386SETLE,
-		ssa.Op386SETG, ssa.Op386SETGE,
-		ssa.Op386SETGF, ssa.Op386SETGEF,
-		ssa.Op386SETB, ssa.Op386SETBE,
-		ssa.Op386SETORD, ssa.Op386SETNAN,
-		ssa.Op386SETA, ssa.Op386SETAE:
-		p := gc.Prog(v.Op.Asm())
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-
-	case ssa.Op386SETNEF:
-		p := gc.Prog(v.Op.Asm())
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-		q := gc.Prog(x86.ASETPS)
-		q.To.Type = obj.TYPE_REG
-		q.To.Reg = x86.REG_AX
-		opregreg(x86.AORL, v.Reg(), x86.REG_AX)
-
-	case ssa.Op386SETEQF:
-		p := gc.Prog(v.Op.Asm())
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = v.Reg()
-		q := gc.Prog(x86.ASETPC)
-		q.To.Type = obj.TYPE_REG
-		q.To.Reg = x86.REG_AX
-		opregreg(x86.AANDL, v.Reg(), x86.REG_AX)
-
-	case ssa.Op386InvertFlags:
-		v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
-	case ssa.Op386FlagEQ, ssa.Op386FlagLT_ULT, ssa.Op386FlagLT_UGT, ssa.Op386FlagGT_ULT, ssa.Op386FlagGT_UGT:
-		v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
-	case ssa.Op386REPSTOSL:
-		gc.Prog(x86.AREP)
-		gc.Prog(x86.ASTOSL)
-	case ssa.Op386REPMOVSL:
-		gc.Prog(x86.AREP)
-		gc.Prog(x86.AMOVSL)
-	case ssa.OpVarDef:
-		gc.Gvardef(v.Aux.(*gc.Node))
-	case ssa.OpVarKill:
-		gc.Gvarkill(v.Aux.(*gc.Node))
-	case ssa.OpVarLive:
-		gc.Gvarlive(v.Aux.(*gc.Node))
-	case ssa.OpKeepAlive:
-		gc.KeepAlive(v)
-	case ssa.Op386LoweredNilCheck:
-		// Issue a load which will fault if the input is nil.
-		// TODO: We currently use the 2-byte instruction TESTB AX, (reg).
-		// Should we use the 3-byte TESTB $0, (reg) instead?  It is larger
-		// but it doesn't have false dependency on AX.
-		// Or maybe allocate an output register and use MOVL (reg),reg2 ?
-		// That trades clobbering flags for clobbering a register.
-		p := gc.Prog(x86.ATESTB)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = x86.REG_AX
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = v.Args[0].Reg()
-		gc.AddAux(&p.To, v)
-		if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers
-			gc.Warnl(v.Line, "generated nil check")
-		}
-	case ssa.Op386FCHS:
-		v.Fatalf("FCHS in non-387 mode")
-	default:
-		v.Fatalf("genValue not implemented: %s", v.LongString())
-	}
-}
-
-var blockJump = [...]struct {
-	asm, invasm obj.As
-}{
-	ssa.Block386EQ:  {x86.AJEQ, x86.AJNE},
-	ssa.Block386NE:  {x86.AJNE, x86.AJEQ},
-	ssa.Block386LT:  {x86.AJLT, x86.AJGE},
-	ssa.Block386GE:  {x86.AJGE, x86.AJLT},
-	ssa.Block386LE:  {x86.AJLE, x86.AJGT},
-	ssa.Block386GT:  {x86.AJGT, x86.AJLE},
-	ssa.Block386ULT: {x86.AJCS, x86.AJCC},
-	ssa.Block386UGE: {x86.AJCC, x86.AJCS},
-	ssa.Block386UGT: {x86.AJHI, x86.AJLS},
-	ssa.Block386ULE: {x86.AJLS, x86.AJHI},
-	ssa.Block386ORD: {x86.AJPC, x86.AJPS},
-	ssa.Block386NAN: {x86.AJPS, x86.AJPC},
-}
-
-var eqfJumps = [2][2]gc.FloatingEQNEJump{
-	{{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPS, Index: 1}}, // next == b.Succs[0]
-	{{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPC, Index: 0}}, // next == b.Succs[1]
-}
-var nefJumps = [2][2]gc.FloatingEQNEJump{
-	{{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPC, Index: 1}}, // next == b.Succs[0]
-	{{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPS, Index: 0}}, // next == b.Succs[1]
-}
-
-func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
-	s.SetLineno(b.Line)
-
-	if gc.Thearch.Use387 {
-		// Empty the 387's FP stack before the block ends.
-		flush387(s)
-	}
-
-	switch b.Kind {
-	case ssa.BlockPlain:
-		if b.Succs[0].Block() != next {
-			p := gc.Prog(obj.AJMP)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-		}
-	case ssa.BlockDefer:
-		// defer returns in rax:
-		// 0 if we should continue executing
-		// 1 if we should jump to deferreturn call
-		p := gc.Prog(x86.ATESTL)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = x86.REG_AX
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = x86.REG_AX
-		p = gc.Prog(x86.AJNE)
-		p.To.Type = obj.TYPE_BRANCH
-		s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
-		if b.Succs[0].Block() != next {
-			p := gc.Prog(obj.AJMP)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-		}
-	case ssa.BlockExit:
-		gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
-	case ssa.BlockRet:
-		gc.Prog(obj.ARET)
-	case ssa.BlockRetJmp:
-		p := gc.Prog(obj.AJMP)
-		p.To.Type = obj.TYPE_MEM
-		p.To.Name = obj.NAME_EXTERN
-		p.To.Sym = gc.Linksym(b.Aux.(*gc.Sym))
-
-	case ssa.Block386EQF:
-		gc.SSAGenFPJump(s, b, next, &eqfJumps)
-
-	case ssa.Block386NEF:
-		gc.SSAGenFPJump(s, b, next, &nefJumps)
-
-	case ssa.Block386EQ, ssa.Block386NE,
-		ssa.Block386LT, ssa.Block386GE,
-		ssa.Block386LE, ssa.Block386GT,
-		ssa.Block386ULT, ssa.Block386UGT,
-		ssa.Block386ULE, ssa.Block386UGE:
-		jmp := blockJump[b.Kind]
-		likely := b.Likely
-		var p *obj.Prog
-		switch next {
-		case b.Succs[0].Block():
-			p = gc.Prog(jmp.invasm)
-			likely *= -1
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
-		case b.Succs[1].Block():
-			p = gc.Prog(jmp.asm)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-		default:
-			p = gc.Prog(jmp.asm)
-			p.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
-			q := gc.Prog(obj.AJMP)
-			q.To.Type = obj.TYPE_BRANCH
-			s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
-		}
-
-		// liblink reorders the instruction stream as it sees fit.
-		// Pass along what we know so liblink can make use of it.
-		// TODO: Once we've fully switched to SSA,
-		// make liblink leave our output alone.
-		switch likely {
-		case ssa.BranchUnlikely:
-			p.From.Type = obj.TYPE_CONST
-			p.From.Offset = 0
-		case ssa.BranchLikely:
-			p.From.Type = obj.TYPE_CONST
-			p.From.Offset = 1
-		}
-
-	default:
-		b.Fatalf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString())
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/compile/main.go b/pkg/bootstrap/src/bootstrap/cmd/compile/main.go
deleted file mode 100644
index 4bf61f4..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/compile/main.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/main.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/main.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
-	"bootstrap/cmd/compile/internal/amd64"
-	"bootstrap/cmd/compile/internal/arm"
-	"bootstrap/cmd/compile/internal/arm64"
-	"bootstrap/cmd/compile/internal/gc"
-	"bootstrap/cmd/compile/internal/mips"
-	"bootstrap/cmd/compile/internal/mips64"
-	"bootstrap/cmd/compile/internal/ppc64"
-	"bootstrap/cmd/compile/internal/s390x"
-	"bootstrap/cmd/compile/internal/x86"
-	"bootstrap/cmd/internal/obj"
-	"fmt"
-	"log"
-	"os"
-)
-
-func main() {
-	// disable timestamps for reproducible output
-	log.SetFlags(0)
-	log.SetPrefix("compile: ")
-
-	switch obj.GOARCH {
-	default:
-		fmt.Fprintf(os.Stderr, "compile: unknown architecture %q\n", obj.GOARCH)
-		os.Exit(2)
-	case "386":
-		x86.Init()
-	case "amd64", "amd64p32":
-		amd64.Init()
-	case "arm":
-		arm.Init()
-	case "arm64":
-		arm64.Init()
-	case "mips", "mipsle":
-		mips.Init()
-	case "mips64", "mips64le":
-		mips64.Init()
-	case "ppc64", "ppc64le":
-		ppc64.Init()
-	case "s390x":
-		s390x.Init()
-	}
-
-	gc.Main()
-	gc.Exit(0)
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/bio/buf.go b/pkg/bootstrap/src/bootstrap/cmd/internal/bio/buf.go
deleted file mode 100644
index 9473095..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/bio/buf.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/bio/buf.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/bio/buf.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package bio implements common I/O abstractions used within the Go toolchain.
-package bio
-
-import (
-	"bufio"
-	"log"
-	"os"
-)
-
-// Reader implements a seekable buffered io.Reader.
-type Reader struct {
-	f *os.File
-	*bufio.Reader
-}
-
-// Writer implements a seekable buffered io.Writer.
-type Writer struct {
-	f *os.File
-	*bufio.Writer
-}
-
-// Create creates the file named name and returns a Writer
-// for that file.
-func Create(name string) (*Writer, error) {
-	f, err := os.Create(name)
-	if err != nil {
-		return nil, err
-	}
-	return &Writer{f: f, Writer: bufio.NewWriter(f)}, nil
-}
-
-// Open returns a Reader for the file named name.
-func Open(name string) (*Reader, error) {
-	f, err := os.Open(name)
-	if err != nil {
-		return nil, err
-	}
-	return &Reader{f: f, Reader: bufio.NewReader(f)}, nil
-}
-
-func (r *Reader) Seek(offset int64, whence int) int64 {
-	if whence == 1 {
-		offset -= int64(r.Buffered())
-	}
-	off, err := r.f.Seek(offset, whence)
-	if err != nil {
-		log.Fatalf("seeking in output: %v", err)
-	}
-	r.Reset(r.f)
-	return off
-}
-
-func (w *Writer) Seek(offset int64, whence int) int64 {
-	if err := w.Flush(); err != nil {
-		log.Fatalf("writing output: %v", err)
-	}
-	off, err := w.f.Seek(offset, whence)
-	if err != nil {
-		log.Fatalf("seeking in output: %v", err)
-	}
-	return off
-}
-
-func (r *Reader) Offset() int64 {
-	off, err := r.f.Seek(0, 1)
-	if err != nil {
-		log.Fatalf("seeking in output [0, 1]: %v", err)
-	}
-	off -= int64(r.Buffered())
-	return off
-}
-
-func (w *Writer) Offset() int64 {
-	if err := w.Flush(); err != nil {
-		log.Fatalf("writing output: %v", err)
-	}
-	off, err := w.f.Seek(0, 1)
-	if err != nil {
-		log.Fatalf("seeking in output [0, 1]: %v", err)
-	}
-	return off
-}
-
-func (r *Reader) Close() error {
-	return r.f.Close()
-}
-
-func (w *Writer) Close() error {
-	err := w.Flush()
-	err1 := w.f.Close()
-	if err == nil {
-		err = err1
-	}
-	return err
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/bio/must.go b/pkg/bootstrap/src/bootstrap/cmd/internal/bio/must.go
deleted file mode 100644
index 5de6e80..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/bio/must.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/bio/must.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/bio/must.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package bio
-
-import (
-	"io"
-	"log"
-)
-
-// MustClose closes Closer c and calls log.Fatal if it returns a non-nil error.
-func MustClose(c io.Closer) {
-	if err := c.Close(); err != nil {
-		log.Fatal(err)
-	}
-}
-
-// MustWriter returns a Writer that wraps the provided Writer,
-// except that it calls log.Fatal instead of returning a non-nil error.
-func MustWriter(w io.Writer) io.Writer {
-	return mustWriter{w}
-}
-
-type mustWriter struct {
-	w io.Writer
-}
-
-func (w mustWriter) Write(b []byte) (int, error) {
-	n, err := w.w.Write(b)
-	if err != nil {
-		log.Fatal(err)
-	}
-	return n, nil
-}
-
-func (w mustWriter) WriteString(s string) (int, error) {
-	n, err := io.WriteString(w.w, s)
-	if err != nil {
-		log.Fatal(err)
-	}
-	return n, nil
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/dwarf/dwarf.go b/pkg/bootstrap/src/bootstrap/cmd/internal/dwarf/dwarf.go
deleted file mode 100644
index e87feb7..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/dwarf/dwarf.go
+++ /dev/null
@@ -1,607 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/dwarf/dwarf.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/dwarf/dwarf.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package dwarf generates DWARF debugging information.
-// DWARF generation is split between the compiler and the linker,
-// this package contains the shared code.
-package dwarf
-
-import (
-	"fmt"
-	"strings"
-)
-
-// InfoPrefix is the prefix for all the symbols containing DWARF info entries.
-const InfoPrefix = "go.info."
-
-// Sym represents a symbol.
-type Sym interface {
-}
-
-// A Var represents a local variable or a function parameter.
-type Var struct {
-	Name   string
-	Abbrev int // Either DW_ABRV_AUTO or DW_ABRV_PARAM
-	Offset int32
-	Type   Sym
-	Link   *Var
-}
-
-// A Context specifies how to add data to a Sym.
-type Context interface {
-	PtrSize() int
-	AddInt(s Sym, size int, i int64)
-	AddBytes(s Sym, b []byte)
-	AddAddress(s Sym, t interface{}, ofs int64)
-	AddSectionOffset(s Sym, size int, t interface{}, ofs int64)
-	AddString(s Sym, v string)
-	SymValue(s Sym) int64
-}
-
-// AppendUleb128 appends v to b using DWARF's unsigned LEB128 encoding.
-func AppendUleb128(b []byte, v uint64) []byte {
-	for {
-		c := uint8(v & 0x7f)
-		v >>= 7
-		if v != 0 {
-			c |= 0x80
-		}
-		b = append(b, c)
-		if c&0x80 == 0 {
-			break
-		}
-	}
-	return b
-}
-
-// AppendSleb128 appends v to b using DWARF's signed LEB128 encoding.
-func AppendSleb128(b []byte, v int64) []byte {
-	for {
-		c := uint8(v & 0x7f)
-		s := uint8(v & 0x40)
-		v >>= 7
-		if (v != -1 || s == 0) && (v != 0 || s != 0) {
-			c |= 0x80
-		}
-		b = append(b, c)
-		if c&0x80 == 0 {
-			break
-		}
-	}
-	return b
-}
-
-var encbuf [20]byte
-
-// AppendUleb128 appends v to s using DWARF's unsigned LEB128 encoding.
-func Uleb128put(ctxt Context, s Sym, v int64) {
-	b := AppendUleb128(encbuf[:0], uint64(v))
-	ctxt.AddBytes(s, b)
-}
-
-// AppendUleb128 appends v to s using DWARF's signed LEB128 encoding.
-func Sleb128put(ctxt Context, s Sym, v int64) {
-	b := AppendSleb128(encbuf[:0], v)
-	ctxt.AddBytes(s, b)
-}
-
-/*
- * Defining Abbrevs.  This is hardcoded, and there will be
- * only a handful of them.  The DWARF spec places no restriction on
- * the ordering of attributes in the Abbrevs and DIEs, and we will
- * always write them out in the order of declaration in the abbrev.
- */
-type dwAttrForm struct {
-	attr uint16
-	form uint8
-}
-
-// Go-specific type attributes.
-const (
-	DW_AT_go_kind = 0x2900
-	DW_AT_go_key  = 0x2901
-	DW_AT_go_elem = 0x2902
-
-	DW_AT_internal_location = 253 // params and locals; not emitted
-)
-
-// Index into the abbrevs table below.
-// Keep in sync with ispubname() and ispubtype() below.
-// ispubtype considers >= NULLTYPE public
-const (
-	DW_ABRV_NULL = iota
-	DW_ABRV_COMPUNIT
-	DW_ABRV_FUNCTION
-	DW_ABRV_VARIABLE
-	DW_ABRV_AUTO
-	DW_ABRV_PARAM
-	DW_ABRV_STRUCTFIELD
-	DW_ABRV_FUNCTYPEPARAM
-	DW_ABRV_DOTDOTDOT
-	DW_ABRV_ARRAYRANGE
-	DW_ABRV_NULLTYPE
-	DW_ABRV_BASETYPE
-	DW_ABRV_ARRAYTYPE
-	DW_ABRV_CHANTYPE
-	DW_ABRV_FUNCTYPE
-	DW_ABRV_IFACETYPE
-	DW_ABRV_MAPTYPE
-	DW_ABRV_PTRTYPE
-	DW_ABRV_BARE_PTRTYPE // only for void*, no DW_AT_type attr to please gdb 6.
-	DW_ABRV_SLICETYPE
-	DW_ABRV_STRINGTYPE
-	DW_ABRV_STRUCTTYPE
-	DW_ABRV_TYPEDECL
-	DW_NABRV
-)
-
-type dwAbbrev struct {
-	tag      uint8
-	children uint8
-	attr     []dwAttrForm
-}
-
-var abbrevs = [DW_NABRV]dwAbbrev{
-	/* The mandatory DW_ABRV_NULL entry. */
-	{0, 0, []dwAttrForm{}},
-
-	/* COMPUNIT */
-	{
-		DW_TAG_compile_unit,
-		DW_CHILDREN_yes,
-		[]dwAttrForm{
-			{DW_AT_name, DW_FORM_string},
-			{DW_AT_language, DW_FORM_data1},
-			{DW_AT_low_pc, DW_FORM_addr},
-			{DW_AT_high_pc, DW_FORM_addr},
-			{DW_AT_stmt_list, DW_FORM_data4},
-			{DW_AT_comp_dir, DW_FORM_string},
-		},
-	},
-
-	/* FUNCTION */
-	{
-		DW_TAG_subprogram,
-		DW_CHILDREN_yes,
-		[]dwAttrForm{
-			{DW_AT_name, DW_FORM_string},
-			{DW_AT_low_pc, DW_FORM_addr},
-			{DW_AT_high_pc, DW_FORM_addr},
-			{DW_AT_external, DW_FORM_flag},
-		},
-	},
-
-	/* VARIABLE */
-	{
-		DW_TAG_variable,
-		DW_CHILDREN_no,
-		[]dwAttrForm{
-			{DW_AT_name, DW_FORM_string},
-			{DW_AT_location, DW_FORM_block1},
-			{DW_AT_type, DW_FORM_ref_addr},
-			{DW_AT_external, DW_FORM_flag},
-		},
-	},
-
-	/* AUTO */
-	{
-		DW_TAG_variable,
-		DW_CHILDREN_no,
-		[]dwAttrForm{
-			{DW_AT_name, DW_FORM_string},
-			{DW_AT_location, DW_FORM_block1},
-			{DW_AT_type, DW_FORM_ref_addr},
-		},
-	},
-
-	/* PARAM */
-	{
-		DW_TAG_formal_parameter,
-		DW_CHILDREN_no,
-		[]dwAttrForm{
-			{DW_AT_name, DW_FORM_string},
-			{DW_AT_location, DW_FORM_block1},
-			{DW_AT_type, DW_FORM_ref_addr},
-		},
-	},
-
-	/* STRUCTFIELD */
-	{
-		DW_TAG_member,
-		DW_CHILDREN_no,
-		[]dwAttrForm{
-			{DW_AT_name, DW_FORM_string},
-			{DW_AT_data_member_location, DW_FORM_block1},
-			{DW_AT_type, DW_FORM_ref_addr},
-		},
-	},
-
-	/* FUNCTYPEPARAM */
-	{
-		DW_TAG_formal_parameter,
-		DW_CHILDREN_no,
-
-		// No name!
-		[]dwAttrForm{
-			{DW_AT_type, DW_FORM_ref_addr},
-		},
-	},
-
-	/* DOTDOTDOT */
-	{
-		DW_TAG_unspecified_parameters,
-		DW_CHILDREN_no,
-		[]dwAttrForm{},
-	},
-
-	/* ARRAYRANGE */
-	{
-		DW_TAG_subrange_type,
-		DW_CHILDREN_no,
-
-		// No name!
-		[]dwAttrForm{
-			{DW_AT_type, DW_FORM_ref_addr},
-			{DW_AT_count, DW_FORM_udata},
-		},
-	},
-
-	// Below here are the types considered public by ispubtype
-	/* NULLTYPE */
-	{
-		DW_TAG_unspecified_type,
-		DW_CHILDREN_no,
-		[]dwAttrForm{
-			{DW_AT_name, DW_FORM_string},
-		},
-	},
-
-	/* BASETYPE */
-	{
-		DW_TAG_base_type,
-		DW_CHILDREN_no,
-		[]dwAttrForm{
-			{DW_AT_name, DW_FORM_string},
-			{DW_AT_encoding, DW_FORM_data1},
-			{DW_AT_byte_size, DW_FORM_data1},
-			{DW_AT_go_kind, DW_FORM_data1},
-		},
-	},
-
-	/* ARRAYTYPE */
-	// child is subrange with upper bound
-	{
-		DW_TAG_array_type,
-		DW_CHILDREN_yes,
-		[]dwAttrForm{
-			{DW_AT_name, DW_FORM_string},
-			{DW_AT_type, DW_FORM_ref_addr},
-			{DW_AT_byte_size, DW_FORM_udata},
-			{DW_AT_go_kind, DW_FORM_data1},
-		},
-	},
-
-	/* CHANTYPE */
-	{
-		DW_TAG_typedef,
-		DW_CHILDREN_no,
-		[]dwAttrForm{
-			{DW_AT_name, DW_FORM_string},
-			{DW_AT_type, DW_FORM_ref_addr},
-			{DW_AT_go_kind, DW_FORM_data1},
-			{DW_AT_go_elem, DW_FORM_ref_addr},
-		},
-	},
-
-	/* FUNCTYPE */
-	{
-		DW_TAG_subroutine_type,
-		DW_CHILDREN_yes,
-		[]dwAttrForm{
-			{DW_AT_name, DW_FORM_string},
-			// {DW_AT_type,	DW_FORM_ref_addr},
-			{DW_AT_go_kind, DW_FORM_data1},
-		},
-	},
-
-	/* IFACETYPE */
-	{
-		DW_TAG_typedef,
-		DW_CHILDREN_yes,
-		[]dwAttrForm{
-			{DW_AT_name, DW_FORM_string},
-			{DW_AT_type, DW_FORM_ref_addr},
-			{DW_AT_go_kind, DW_FORM_data1},
-		},
-	},
-
-	/* MAPTYPE */
-	{
-		DW_TAG_typedef,
-		DW_CHILDREN_no,
-		[]dwAttrForm{
-			{DW_AT_name, DW_FORM_string},
-			{DW_AT_type, DW_FORM_ref_addr},
-			{DW_AT_go_kind, DW_FORM_data1},
-			{DW_AT_go_key, DW_FORM_ref_addr},
-			{DW_AT_go_elem, DW_FORM_ref_addr},
-		},
-	},
-
-	/* PTRTYPE */
-	{
-		DW_TAG_pointer_type,
-		DW_CHILDREN_no,
-		[]dwAttrForm{
-			{DW_AT_name, DW_FORM_string},
-			{DW_AT_type, DW_FORM_ref_addr},
-			{DW_AT_go_kind, DW_FORM_data1},
-		},
-	},
-
-	/* BARE_PTRTYPE */
-	{
-		DW_TAG_pointer_type,
-		DW_CHILDREN_no,
-		[]dwAttrForm{
-			{DW_AT_name, DW_FORM_string},
-		},
-	},
-
-	/* SLICETYPE */
-	{
-		DW_TAG_structure_type,
-		DW_CHILDREN_yes,
-		[]dwAttrForm{
-			{DW_AT_name, DW_FORM_string},
-			{DW_AT_byte_size, DW_FORM_udata},
-			{DW_AT_go_kind, DW_FORM_data1},
-			{DW_AT_go_elem, DW_FORM_ref_addr},
-		},
-	},
-
-	/* STRINGTYPE */
-	{
-		DW_TAG_structure_type,
-		DW_CHILDREN_yes,
-		[]dwAttrForm{
-			{DW_AT_name, DW_FORM_string},
-			{DW_AT_byte_size, DW_FORM_udata},
-			{DW_AT_go_kind, DW_FORM_data1},
-		},
-	},
-
-	/* STRUCTTYPE */
-	{
-		DW_TAG_structure_type,
-		DW_CHILDREN_yes,
-		[]dwAttrForm{
-			{DW_AT_name, DW_FORM_string},
-			{DW_AT_byte_size, DW_FORM_udata},
-			{DW_AT_go_kind, DW_FORM_data1},
-		},
-	},
-
-	/* TYPEDECL */
-	{
-		DW_TAG_typedef,
-		DW_CHILDREN_no,
-		[]dwAttrForm{
-			{DW_AT_name, DW_FORM_string},
-			{DW_AT_type, DW_FORM_ref_addr},
-		},
-	},
-}
-
-// GetAbbrev returns the contents of the .debug_abbrev section.
-func GetAbbrev() []byte {
-	var buf []byte
-	for i := 1; i < DW_NABRV; i++ {
-		// See section 7.5.3
-		buf = AppendUleb128(buf, uint64(i))
-
-		buf = AppendUleb128(buf, uint64(abbrevs[i].tag))
-		buf = append(buf, byte(abbrevs[i].children))
-		for _, f := range abbrevs[i].attr {
-			buf = AppendUleb128(buf, uint64(f.attr))
-			buf = AppendUleb128(buf, uint64(f.form))
-		}
-		buf = append(buf, 0, 0)
-	}
-	return append(buf, 0)
-}
-
-/*
- * Debugging Information Entries and their attributes.
- */
-
-// DWAttr represents an attribute of a DWDie.
-//
-// For DW_CLS_string and _block, value should contain the length, and
-// data the data, for _reference, value is 0 and data is a DWDie* to
-// the referenced instance, for all others, value is the whole thing
-// and data is null.
-type DWAttr struct {
-	Link  *DWAttr
-	Atr   uint16 // DW_AT_
-	Cls   uint8  // DW_CLS_
-	Value int64
-	Data  interface{}
-}
-
-// DWDie represents a DWARF debug info entry.
-type DWDie struct {
-	Abbrev int
-	Link   *DWDie
-	Child  *DWDie
-	Attr   *DWAttr
-	Sym    Sym
-}
-
-func putattr(ctxt Context, s Sym, abbrev int, form int, cls int, value int64, data interface{}) error {
-	switch form {
-	case DW_FORM_addr: // address
-		ctxt.AddAddress(s, data, value)
-
-	case DW_FORM_block1: // block
-		if cls == DW_CLS_ADDRESS {
-			ctxt.AddInt(s, 1, int64(1+ctxt.PtrSize()))
-			ctxt.AddInt(s, 1, DW_OP_addr)
-			ctxt.AddAddress(s, data, 0)
-			break
-		}
-
-		value &= 0xff
-		ctxt.AddInt(s, 1, value)
-		p := data.([]byte)[:value]
-		ctxt.AddBytes(s, p)
-
-	case DW_FORM_block2: // block
-		value &= 0xffff
-
-		ctxt.AddInt(s, 2, value)
-		p := data.([]byte)[:value]
-		ctxt.AddBytes(s, p)
-
-	case DW_FORM_block4: // block
-		value &= 0xffffffff
-
-		ctxt.AddInt(s, 4, value)
-		p := data.([]byte)[:value]
-		ctxt.AddBytes(s, p)
-
-	case DW_FORM_block: // block
-		Uleb128put(ctxt, s, value)
-
-		p := data.([]byte)[:value]
-		ctxt.AddBytes(s, p)
-
-	case DW_FORM_data1: // constant
-		ctxt.AddInt(s, 1, value)
-
-	case DW_FORM_data2: // constant
-		ctxt.AddInt(s, 2, value)
-
-	case DW_FORM_data4: // constant, {line,loclist,mac,rangelist}ptr
-		if cls == DW_CLS_PTR { // DW_AT_stmt_list
-			ctxt.AddSectionOffset(s, 4, data, 0)
-			break
-		}
-		ctxt.AddInt(s, 4, value)
-
-	case DW_FORM_data8: // constant, {line,loclist,mac,rangelist}ptr
-		ctxt.AddInt(s, 8, value)
-
-	case DW_FORM_sdata: // constant
-		Sleb128put(ctxt, s, value)
-
-	case DW_FORM_udata: // constant
-		Uleb128put(ctxt, s, value)
-
-	case DW_FORM_string: // string
-		str := data.(string)
-		ctxt.AddString(s, str)
-		// TODO(ribrdb): verify padded strings are never used and remove this
-		for i := int64(len(str)); i < value; i++ {
-			ctxt.AddInt(s, 1, 0)
-		}
-
-	case DW_FORM_flag: // flag
-		if value != 0 {
-			ctxt.AddInt(s, 1, 1)
-		} else {
-			ctxt.AddInt(s, 1, 0)
-		}
-
-	// In DWARF 2 (which is what we claim to generate),
-	// the ref_addr is the same size as a normal address.
-	// In DWARF 3 it is always 32 bits, unless emitting a large
-	// (> 4 GB of debug info aka "64-bit") unit, which we don't implement.
-	case DW_FORM_ref_addr: // reference to a DIE in the .info section
-		if data == nil {
-			return fmt.Errorf("dwarf: null reference in %d", abbrev)
-		} else {
-			ctxt.AddSectionOffset(s, ctxt.PtrSize(), data, 0)
-		}
-
-	case DW_FORM_ref1, // reference within the compilation unit
-		DW_FORM_ref2,      // reference
-		DW_FORM_ref4,      // reference
-		DW_FORM_ref8,      // reference
-		DW_FORM_ref_udata, // reference
-
-		DW_FORM_strp,     // string
-		DW_FORM_indirect: // (see Section 7.5.3)
-		fallthrough
-	default:
-		return fmt.Errorf("dwarf: unsupported attribute form %d / class %d", form, cls)
-	}
-	return nil
-}
-
-// PutAttrs writes the attributes for a DIE to symbol 's'.
-//
-// Note that we can (and do) add arbitrary attributes to a DIE, but
-// only the ones actually listed in the Abbrev will be written out.
-func PutAttrs(ctxt Context, s Sym, abbrev int, attr *DWAttr) {
-Outer:
-	for _, f := range abbrevs[abbrev].attr {
-		for ap := attr; ap != nil; ap = ap.Link {
-			if ap.Atr == f.attr {
-				putattr(ctxt, s, abbrev, int(f.form), int(ap.Cls), ap.Value, ap.Data)
-				continue Outer
-			}
-		}
-
-		putattr(ctxt, s, abbrev, int(f.form), 0, 0, nil)
-	}
-}
-
-// HasChildren returns true if 'die' uses an abbrev that supports children.
-func HasChildren(die *DWDie) bool {
-	return abbrevs[die.Abbrev].children != 0
-}
-
-// PutFunc writes a DIE for a function to s.
-// It also writes child DIEs for each variable in vars.
-func PutFunc(ctxt Context, s Sym, name string, external bool, startPC Sym, size int64, vars *Var) {
-	Uleb128put(ctxt, s, DW_ABRV_FUNCTION)
-	putattr(ctxt, s, DW_ABRV_FUNCTION, DW_FORM_string, DW_CLS_STRING, int64(len(name)), name)
-	putattr(ctxt, s, DW_ABRV_FUNCTION, DW_FORM_addr, DW_CLS_ADDRESS, 0, startPC)
-	putattr(ctxt, s, DW_ABRV_FUNCTION, DW_FORM_addr, DW_CLS_ADDRESS, size+ctxt.SymValue(startPC), startPC)
-	var ev int64
-	if external {
-		ev = 1
-	}
-	putattr(ctxt, s, DW_ABRV_FUNCTION, DW_FORM_flag, DW_CLS_FLAG, ev, 0)
-	names := make(map[string]bool)
-	for v := vars; v != nil; v = v.Link {
-		if strings.Contains(v.Name, ".autotmp_") {
-			continue
-		}
-		var n string
-		if names[v.Name] {
-			n = fmt.Sprintf("%s#%d", v.Name, len(names))
-		} else {
-			n = v.Name
-		}
-		names[n] = true
-
-		Uleb128put(ctxt, s, int64(v.Abbrev))
-		putattr(ctxt, s, v.Abbrev, DW_FORM_string, DW_CLS_STRING, int64(len(n)), n)
-		loc := append(encbuf[:0], DW_OP_call_frame_cfa)
-		if v.Offset != 0 {
-			loc = append(loc, DW_OP_consts)
-			loc = AppendSleb128(loc, int64(v.Offset))
-			loc = append(loc, DW_OP_plus)
-		}
-		putattr(ctxt, s, v.Abbrev, DW_FORM_block1, DW_CLS_BLOCK, int64(len(loc)), loc)
-		putattr(ctxt, s, v.Abbrev, DW_FORM_ref_addr, DW_CLS_REFERENCE, 0, v.Type)
-
-	}
-	Uleb128put(ctxt, s, 0)
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/dwarf/dwarf_defs.go b/pkg/bootstrap/src/bootstrap/cmd/internal/dwarf/dwarf_defs.go
deleted file mode 100644
index c5aa952..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/dwarf/dwarf_defs.go
+++ /dev/null
@@ -1,486 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/dwarf/dwarf_defs.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/dwarf/dwarf_defs.go:1
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package dwarf
-
-// Cut, pasted, tr-and-awk'ed from tables in
-// http://dwarfstd.org/doc/Dwarf3.pdf
-
-// Table 18
-const (
-	DW_TAG_array_type               = 0x01
-	DW_TAG_class_type               = 0x02
-	DW_TAG_entry_point              = 0x03
-	DW_TAG_enumeration_type         = 0x04
-	DW_TAG_formal_parameter         = 0x05
-	DW_TAG_imported_declaration     = 0x08
-	DW_TAG_label                    = 0x0a
-	DW_TAG_lexical_block            = 0x0b
-	DW_TAG_member                   = 0x0d
-	DW_TAG_pointer_type             = 0x0f
-	DW_TAG_reference_type           = 0x10
-	DW_TAG_compile_unit             = 0x11
-	DW_TAG_string_type              = 0x12
-	DW_TAG_structure_type           = 0x13
-	DW_TAG_subroutine_type          = 0x15
-	DW_TAG_typedef                  = 0x16
-	DW_TAG_union_type               = 0x17
-	DW_TAG_unspecified_parameters   = 0x18
-	DW_TAG_variant                  = 0x19
-	DW_TAG_common_block             = 0x1a
-	DW_TAG_common_inclusion         = 0x1b
-	DW_TAG_inheritance              = 0x1c
-	DW_TAG_inlined_subroutine       = 0x1d
-	DW_TAG_module                   = 0x1e
-	DW_TAG_ptr_to_member_type       = 0x1f
-	DW_TAG_set_type                 = 0x20
-	DW_TAG_subrange_type            = 0x21
-	DW_TAG_with_stmt                = 0x22
-	DW_TAG_access_declaration       = 0x23
-	DW_TAG_base_type                = 0x24
-	DW_TAG_catch_block              = 0x25
-	DW_TAG_const_type               = 0x26
-	DW_TAG_constant                 = 0x27
-	DW_TAG_enumerator               = 0x28
-	DW_TAG_file_type                = 0x29
-	DW_TAG_friend                   = 0x2a
-	DW_TAG_namelist                 = 0x2b
-	DW_TAG_namelist_item            = 0x2c
-	DW_TAG_packed_type              = 0x2d
-	DW_TAG_subprogram               = 0x2e
-	DW_TAG_template_type_parameter  = 0x2f
-	DW_TAG_template_value_parameter = 0x30
-	DW_TAG_thrown_type              = 0x31
-	DW_TAG_try_block                = 0x32
-	DW_TAG_variant_part             = 0x33
-	DW_TAG_variable                 = 0x34
-	DW_TAG_volatile_type            = 0x35
-	// Dwarf3
-	DW_TAG_dwarf_procedure  = 0x36
-	DW_TAG_restrict_type    = 0x37
-	DW_TAG_interface_type   = 0x38
-	DW_TAG_namespace        = 0x39
-	DW_TAG_imported_module  = 0x3a
-	DW_TAG_unspecified_type = 0x3b
-	DW_TAG_partial_unit     = 0x3c
-	DW_TAG_imported_unit    = 0x3d
-	DW_TAG_condition        = 0x3f
-	DW_TAG_shared_type      = 0x40
-	// Dwarf4
-	DW_TAG_type_unit             = 0x41
-	DW_TAG_rvalue_reference_type = 0x42
-	DW_TAG_template_alias        = 0x43
-
-	// User defined
-	DW_TAG_lo_user = 0x4080
-	DW_TAG_hi_user = 0xffff
-)
-
-// Table 19
-const (
-	DW_CHILDREN_no  = 0x00
-	DW_CHILDREN_yes = 0x01
-)
-
-// Not from the spec, but logically belongs here
-const (
-	DW_CLS_ADDRESS = 0x01 + iota
-	DW_CLS_BLOCK
-	DW_CLS_CONSTANT
-	DW_CLS_FLAG
-	DW_CLS_PTR // lineptr, loclistptr, macptr, rangelistptr
-	DW_CLS_REFERENCE
-	DW_CLS_ADDRLOC
-	DW_CLS_STRING
-)
-
-// Table 20
-const (
-	DW_AT_sibling              = 0x01 // reference
-	DW_AT_location             = 0x02 // block, loclistptr
-	DW_AT_name                 = 0x03 // string
-	DW_AT_ordering             = 0x09 // constant
-	DW_AT_byte_size            = 0x0b // block, constant, reference
-	DW_AT_bit_offset           = 0x0c // block, constant, reference
-	DW_AT_bit_size             = 0x0d // block, constant, reference
-	DW_AT_stmt_list            = 0x10 // lineptr
-	DW_AT_low_pc               = 0x11 // address
-	DW_AT_high_pc              = 0x12 // address
-	DW_AT_language             = 0x13 // constant
-	DW_AT_discr                = 0x15 // reference
-	DW_AT_discr_value          = 0x16 // constant
-	DW_AT_visibility           = 0x17 // constant
-	DW_AT_import               = 0x18 // reference
-	DW_AT_string_length        = 0x19 // block, loclistptr
-	DW_AT_common_reference     = 0x1a // reference
-	DW_AT_comp_dir             = 0x1b // string
-	DW_AT_const_value          = 0x1c // block, constant, string
-	DW_AT_containing_type      = 0x1d // reference
-	DW_AT_default_value        = 0x1e // reference
-	DW_AT_inline               = 0x20 // constant
-	DW_AT_is_optional          = 0x21 // flag
-	DW_AT_lower_bound          = 0x22 // block, constant, reference
-	DW_AT_producer             = 0x25 // string
-	DW_AT_prototyped           = 0x27 // flag
-	DW_AT_return_addr          = 0x2a // block, loclistptr
-	DW_AT_start_scope          = 0x2c // constant
-	DW_AT_bit_stride           = 0x2e // constant
-	DW_AT_upper_bound          = 0x2f // block, constant, reference
-	DW_AT_abstract_origin      = 0x31 // reference
-	DW_AT_accessibility        = 0x32 // constant
-	DW_AT_address_class        = 0x33 // constant
-	DW_AT_artificial           = 0x34 // flag
-	DW_AT_base_types           = 0x35 // reference
-	DW_AT_calling_convention   = 0x36 // constant
-	DW_AT_count                = 0x37 // block, constant, reference
-	DW_AT_data_member_location = 0x38 // block, constant, loclistptr
-	DW_AT_decl_column          = 0x39 // constant
-	DW_AT_decl_file            = 0x3a // constant
-	DW_AT_decl_line            = 0x3b // constant
-	DW_AT_declaration          = 0x3c // flag
-	DW_AT_discr_list           = 0x3d // block
-	DW_AT_encoding             = 0x3e // constant
-	DW_AT_external             = 0x3f // flag
-	DW_AT_frame_base           = 0x40 // block, loclistptr
-	DW_AT_friend               = 0x41 // reference
-	DW_AT_identifier_case      = 0x42 // constant
-	DW_AT_macro_info           = 0x43 // macptr
-	DW_AT_namelist_item        = 0x44 // block
-	DW_AT_priority             = 0x45 // reference
-	DW_AT_segment              = 0x46 // block, loclistptr
-	DW_AT_specification        = 0x47 // reference
-	DW_AT_static_link          = 0x48 // block, loclistptr
-	DW_AT_type                 = 0x49 // reference
-	DW_AT_use_location         = 0x4a // block, loclistptr
-	DW_AT_variable_parameter   = 0x4b // flag
-	DW_AT_virtuality           = 0x4c // constant
-	DW_AT_vtable_elem_location = 0x4d // block, loclistptr
-	// Dwarf3
-	DW_AT_allocated      = 0x4e // block, constant, reference
-	DW_AT_associated     = 0x4f // block, constant, reference
-	DW_AT_data_location  = 0x50 // block
-	DW_AT_byte_stride    = 0x51 // block, constant, reference
-	DW_AT_entry_pc       = 0x52 // address
-	DW_AT_use_UTF8       = 0x53 // flag
-	DW_AT_extension      = 0x54 // reference
-	DW_AT_ranges         = 0x55 // rangelistptr
-	DW_AT_trampoline     = 0x56 // address, flag, reference, string
-	DW_AT_call_column    = 0x57 // constant
-	DW_AT_call_file      = 0x58 // constant
-	DW_AT_call_line      = 0x59 // constant
-	DW_AT_description    = 0x5a // string
-	DW_AT_binary_scale   = 0x5b // constant
-	DW_AT_decimal_scale  = 0x5c // constant
-	DW_AT_small          = 0x5d // reference
-	DW_AT_decimal_sign   = 0x5e // constant
-	DW_AT_digit_count    = 0x5f // constant
-	DW_AT_picture_string = 0x60 // string
-	DW_AT_mutable        = 0x61 // flag
-	DW_AT_threads_scaled = 0x62 // flag
-	DW_AT_explicit       = 0x63 // flag
-	DW_AT_object_pointer = 0x64 // reference
-	DW_AT_endianity      = 0x65 // constant
-	DW_AT_elemental      = 0x66 // flag
-	DW_AT_pure           = 0x67 // flag
-	DW_AT_recursive      = 0x68 // flag
-
-	DW_AT_lo_user = 0x2000 // ---
-	DW_AT_hi_user = 0x3fff // ---
-)
-
-// Table 21
-const (
-	DW_FORM_addr      = 0x01 // address
-	DW_FORM_block2    = 0x03 // block
-	DW_FORM_block4    = 0x04 // block
-	DW_FORM_data2     = 0x05 // constant
-	DW_FORM_data4     = 0x06 // constant, lineptr, loclistptr, macptr, rangelistptr
-	DW_FORM_data8     = 0x07 // constant, lineptr, loclistptr, macptr, rangelistptr
-	DW_FORM_string    = 0x08 // string
-	DW_FORM_block     = 0x09 // block
-	DW_FORM_block1    = 0x0a // block
-	DW_FORM_data1     = 0x0b // constant
-	DW_FORM_flag      = 0x0c // flag
-	DW_FORM_sdata     = 0x0d // constant
-	DW_FORM_strp      = 0x0e // string
-	DW_FORM_udata     = 0x0f // constant
-	DW_FORM_ref_addr  = 0x10 // reference
-	DW_FORM_ref1      = 0x11 // reference
-	DW_FORM_ref2      = 0x12 // reference
-	DW_FORM_ref4      = 0x13 // reference
-	DW_FORM_ref8      = 0x14 // reference
-	DW_FORM_ref_udata = 0x15 // reference
-	DW_FORM_indirect  = 0x16 // (see Section 7.5.3)
-)
-
-// Table 24 (#operands, notes)
-const (
-	DW_OP_addr                = 0x03 // 1 constant address (size target specific)
-	DW_OP_deref               = 0x06 // 0
-	DW_OP_const1u             = 0x08 // 1 1-byte constant
-	DW_OP_const1s             = 0x09 // 1 1-byte constant
-	DW_OP_const2u             = 0x0a // 1 2-byte constant
-	DW_OP_const2s             = 0x0b // 1 2-byte constant
-	DW_OP_const4u             = 0x0c // 1 4-byte constant
-	DW_OP_const4s             = 0x0d // 1 4-byte constant
-	DW_OP_const8u             = 0x0e // 1 8-byte constant
-	DW_OP_const8s             = 0x0f // 1 8-byte constant
-	DW_OP_constu              = 0x10 // 1 ULEB128 constant
-	DW_OP_consts              = 0x11 // 1 SLEB128 constant
-	DW_OP_dup                 = 0x12 // 0
-	DW_OP_drop                = 0x13 // 0
-	DW_OP_over                = 0x14 // 0
-	DW_OP_pick                = 0x15 // 1 1-byte stack index
-	DW_OP_swap                = 0x16 // 0
-	DW_OP_rot                 = 0x17 // 0
-	DW_OP_xderef              = 0x18 // 0
-	DW_OP_abs                 = 0x19 // 0
-	DW_OP_and                 = 0x1a // 0
-	DW_OP_div                 = 0x1b // 0
-	DW_OP_minus               = 0x1c // 0
-	DW_OP_mod                 = 0x1d // 0
-	DW_OP_mul                 = 0x1e // 0
-	DW_OP_neg                 = 0x1f // 0
-	DW_OP_not                 = 0x20 // 0
-	DW_OP_or                  = 0x21 // 0
-	DW_OP_plus                = 0x22 // 0
-	DW_OP_plus_uconst         = 0x23 // 1 ULEB128 addend
-	DW_OP_shl                 = 0x24 // 0
-	DW_OP_shr                 = 0x25 // 0
-	DW_OP_shra                = 0x26 // 0
-	DW_OP_xor                 = 0x27 // 0
-	DW_OP_skip                = 0x2f // 1 signed 2-byte constant
-	DW_OP_bra                 = 0x28 // 1 signed 2-byte constant
-	DW_OP_eq                  = 0x29 // 0
-	DW_OP_ge                  = 0x2a // 0
-	DW_OP_gt                  = 0x2b // 0
-	DW_OP_le                  = 0x2c // 0
-	DW_OP_lt                  = 0x2d // 0
-	DW_OP_ne                  = 0x2e // 0
-	DW_OP_lit0                = 0x30 // 0 ...
-	DW_OP_lit31               = 0x4f // 0 literals 0..31 = (DW_OP_lit0 + literal)
-	DW_OP_reg0                = 0x50 // 0 ..
-	DW_OP_reg31               = 0x6f // 0 reg 0..31 = (DW_OP_reg0 + regnum)
-	DW_OP_breg0               = 0x70 // 1 ...
-	DW_OP_breg31              = 0x8f // 1 SLEB128 offset base register 0..31 = (DW_OP_breg0 + regnum)
-	DW_OP_regx                = 0x90 // 1 ULEB128 register
-	DW_OP_fbreg               = 0x91 // 1 SLEB128 offset
-	DW_OP_bregx               = 0x92 // 2 ULEB128 register followed by SLEB128 offset
-	DW_OP_piece               = 0x93 // 1 ULEB128 size of piece addressed
-	DW_OP_deref_size          = 0x94 // 1 1-byte size of data retrieved
-	DW_OP_xderef_size         = 0x95 // 1 1-byte size of data retrieved
-	DW_OP_nop                 = 0x96 // 0
-	DW_OP_push_object_address = 0x97 // 0
-	DW_OP_call2               = 0x98 // 1 2-byte offset of DIE
-	DW_OP_call4               = 0x99 // 1 4-byte offset of DIE
-	DW_OP_call_ref            = 0x9a // 1 4- or 8-byte offset of DIE
-	DW_OP_form_tls_address    = 0x9b // 0
-	DW_OP_call_frame_cfa      = 0x9c // 0
-	DW_OP_bit_piece           = 0x9d // 2
-	DW_OP_lo_user             = 0xe0
-	DW_OP_hi_user             = 0xff
-)
-
-// Table 25
-const (
-	DW_ATE_address         = 0x01
-	DW_ATE_boolean         = 0x02
-	DW_ATE_complex_float   = 0x03
-	DW_ATE_float           = 0x04
-	DW_ATE_signed          = 0x05
-	DW_ATE_signed_char     = 0x06
-	DW_ATE_unsigned        = 0x07
-	DW_ATE_unsigned_char   = 0x08
-	DW_ATE_imaginary_float = 0x09
-	DW_ATE_packed_decimal  = 0x0a
-	DW_ATE_numeric_string  = 0x0b
-	DW_ATE_edited          = 0x0c
-	DW_ATE_signed_fixed    = 0x0d
-	DW_ATE_unsigned_fixed  = 0x0e
-	DW_ATE_decimal_float   = 0x0f
-	DW_ATE_lo_user         = 0x80
-	DW_ATE_hi_user         = 0xff
-)
-
-// Table 26
-const (
-	DW_DS_unsigned           = 0x01
-	DW_DS_leading_overpunch  = 0x02
-	DW_DS_trailing_overpunch = 0x03
-	DW_DS_leading_separate   = 0x04
-	DW_DS_trailing_separate  = 0x05
-)
-
-// Table 27
-const (
-	DW_END_default = 0x00
-	DW_END_big     = 0x01
-	DW_END_little  = 0x02
-	DW_END_lo_user = 0x40
-	DW_END_hi_user = 0xff
-)
-
-// Table 28
-const (
-	DW_ACCESS_public    = 0x01
-	DW_ACCESS_protected = 0x02
-	DW_ACCESS_private   = 0x03
-)
-
-// Table 29
-const (
-	DW_VIS_local     = 0x01
-	DW_VIS_exported  = 0x02
-	DW_VIS_qualified = 0x03
-)
-
-// Table 30
-const (
-	DW_VIRTUALITY_none         = 0x00
-	DW_VIRTUALITY_virtual      = 0x01
-	DW_VIRTUALITY_pure_virtual = 0x02
-)
-
-// Table 31
-const (
-	DW_LANG_C89         = 0x0001
-	DW_LANG_C           = 0x0002
-	DW_LANG_Ada83       = 0x0003
-	DW_LANG_C_plus_plus = 0x0004
-	DW_LANG_Cobol74     = 0x0005
-	DW_LANG_Cobol85     = 0x0006
-	DW_LANG_Fortran77   = 0x0007
-	DW_LANG_Fortran90   = 0x0008
-	DW_LANG_Pascal83    = 0x0009
-	DW_LANG_Modula2     = 0x000a
-	// Dwarf3
-	DW_LANG_Java           = 0x000b
-	DW_LANG_C99            = 0x000c
-	DW_LANG_Ada95          = 0x000d
-	DW_LANG_Fortran95      = 0x000e
-	DW_LANG_PLI            = 0x000f
-	DW_LANG_ObjC           = 0x0010
-	DW_LANG_ObjC_plus_plus = 0x0011
-	DW_LANG_UPC            = 0x0012
-	DW_LANG_D              = 0x0013
-	// Dwarf4
-	DW_LANG_Python = 0x0014
-	// Dwarf5
-	DW_LANG_Go = 0x0016
-
-	DW_LANG_lo_user = 0x8000
-	DW_LANG_hi_user = 0xffff
-)
-
-// Table 32
-const (
-	DW_ID_case_sensitive   = 0x00
-	DW_ID_up_case          = 0x01
-	DW_ID_down_case        = 0x02
-	DW_ID_case_insensitive = 0x03
-)
-
-// Table 33
-const (
-	DW_CC_normal  = 0x01
-	DW_CC_program = 0x02
-	DW_CC_nocall  = 0x03
-	DW_CC_lo_user = 0x40
-	DW_CC_hi_user = 0xff
-)
-
-// Table 34
-const (
-	DW_INL_not_inlined          = 0x00
-	DW_INL_inlined              = 0x01
-	DW_INL_declared_not_inlined = 0x02
-	DW_INL_declared_inlined     = 0x03
-)
-
-// Table 35
-const (
-	DW_ORD_row_major = 0x00
-	DW_ORD_col_major = 0x01
-)
-
-// Table 36
-const (
-	DW_DSC_label = 0x00
-	DW_DSC_range = 0x01
-)
-
-// Table 37
-const (
-	DW_LNS_copy             = 0x01
-	DW_LNS_advance_pc       = 0x02
-	DW_LNS_advance_line     = 0x03
-	DW_LNS_set_file         = 0x04
-	DW_LNS_set_column       = 0x05
-	DW_LNS_negate_stmt      = 0x06
-	DW_LNS_set_basic_block  = 0x07
-	DW_LNS_const_add_pc     = 0x08
-	DW_LNS_fixed_advance_pc = 0x09
-	// Dwarf3
-	DW_LNS_set_prologue_end   = 0x0a
-	DW_LNS_set_epilogue_begin = 0x0b
-	DW_LNS_set_isa            = 0x0c
-)
-
-// Table 38
-const (
-	DW_LNE_end_sequence = 0x01
-	DW_LNE_set_address  = 0x02
-	DW_LNE_define_file  = 0x03
-	DW_LNE_lo_user      = 0x80
-	DW_LNE_hi_user      = 0xff
-)
-
-// Table 39
-const (
-	DW_MACINFO_define     = 0x01
-	DW_MACINFO_undef      = 0x02
-	DW_MACINFO_start_file = 0x03
-	DW_MACINFO_end_file   = 0x04
-	DW_MACINFO_vendor_ext = 0xff
-)
-
-// Table 40.
-const (
-	// operand,...
-	DW_CFA_nop              = 0x00
-	DW_CFA_set_loc          = 0x01 // address
-	DW_CFA_advance_loc1     = 0x02 // 1-byte delta
-	DW_CFA_advance_loc2     = 0x03 // 2-byte delta
-	DW_CFA_advance_loc4     = 0x04 // 4-byte delta
-	DW_CFA_offset_extended  = 0x05 // ULEB128 register, ULEB128 offset
-	DW_CFA_restore_extended = 0x06 // ULEB128 register
-	DW_CFA_undefined        = 0x07 // ULEB128 register
-	DW_CFA_same_value       = 0x08 // ULEB128 register
-	DW_CFA_register         = 0x09 // ULEB128 register, ULEB128 register
-	DW_CFA_remember_state   = 0x0a
-	DW_CFA_restore_state    = 0x0b
-
-	DW_CFA_def_cfa            = 0x0c // ULEB128 register, ULEB128 offset
-	DW_CFA_def_cfa_register   = 0x0d // ULEB128 register
-	DW_CFA_def_cfa_offset     = 0x0e // ULEB128 offset
-	DW_CFA_def_cfa_expression = 0x0f // BLOCK
-	DW_CFA_expression         = 0x10 // ULEB128 register, BLOCK
-	DW_CFA_offset_extended_sf = 0x11 // ULEB128 register, SLEB128 offset
-	DW_CFA_def_cfa_sf         = 0x12 // ULEB128 register, SLEB128 offset
-	DW_CFA_def_cfa_offset_sf  = 0x13 // SLEB128 offset
-	DW_CFA_val_offset         = 0x14 // ULEB128, ULEB128
-	DW_CFA_val_offset_sf      = 0x15 // ULEB128, SLEB128
-	DW_CFA_val_expression     = 0x16 // ULEB128, BLOCK
-
-	DW_CFA_lo_user = 0x1c
-	DW_CFA_hi_user = 0x3f
-
-	// Opcodes that take an addend operand.
-	DW_CFA_advance_loc = 0x1 << 6 // +delta
-	DW_CFA_offset      = 0x2 << 6 // +register (ULEB128 offset)
-	DW_CFA_restore     = 0x3 << 6 // +register
-)
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/gcprog/gcprog.go b/pkg/bootstrap/src/bootstrap/cmd/internal/gcprog/gcprog.go
deleted file mode 100644
index 3ebf57d..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/gcprog/gcprog.go
+++ /dev/null
@@ -1,300 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/gcprog/gcprog.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/gcprog/gcprog.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package gcprog implements an encoder for packed GC pointer bitmaps,
-// known as GC programs.
-//
-// Program Format
-//
-// The GC program encodes a sequence of 0 and 1 bits indicating scalar or pointer words in an object.
-// The encoding is a simple Lempel-Ziv program, with codes to emit literal bits and to repeat the
-// last n bits c times.
-//
-// The possible codes are:
-//
-//	00000000: stop
-//	0nnnnnnn: emit n bits copied from the next (n+7)/8 bytes, least significant bit first
-//	10000000 n c: repeat the previous n bits c times; n, c are varints
-//	1nnnnnnn c: repeat the previous n bits c times; c is a varint
-//
-// The numbers n and c, when they follow a code, are encoded as varints
-// using the same encoding as encoding/binary's Uvarint.
-//
-package gcprog
-
-import (
-	"fmt"
-	"io"
-)
-
-const progMaxLiteral = 127 // maximum n for literal n bit code
-
-// A Writer is an encoder for GC programs.
-//
-// The typical use of a Writer is to call Init, maybe call Debug,
-// make a sequence of Ptr, Advance, Repeat, and Append calls
-// to describe the data type, and then finally call End.
-type Writer struct {
-	writeByte func(byte)
-	index     int64
-	b         [progMaxLiteral]byte
-	nb        int
-	debug     io.Writer
-	debugBuf  []byte
-}
-
-// Init initializes w to write a new GC program
-// by calling writeByte for each byte in the program.
-func (w *Writer) Init(writeByte func(byte)) {
-	w.writeByte = writeByte
-}
-
-// Debug causes the writer to print a debugging trace to out
-// during future calls to methods like Ptr, Advance, and End.
-// It also enables debugging checks during the encoding.
-func (w *Writer) Debug(out io.Writer) {
-	w.debug = out
-}
-
-// BitIndex returns the number of bits written to the bit stream so far.
-func (w *Writer) BitIndex() int64 {
-	return w.index
-}
-
-// byte writes the byte x to the output.
-func (w *Writer) byte(x byte) {
-	if w.debug != nil {
-		w.debugBuf = append(w.debugBuf, x)
-	}
-	w.writeByte(x)
-}
-
-// End marks the end of the program, writing any remaining bytes.
-func (w *Writer) End() {
-	w.flushlit()
-	w.byte(0)
-	if w.debug != nil {
-		index := progbits(w.debugBuf)
-		if index != w.index {
-			println("gcprog: End wrote program for", index, "bits, but current index is", w.index)
-			panic("gcprog: out of sync")
-		}
-	}
-}
-
-// Ptr emits a 1 into the bit stream at the given bit index.
-// that is, it records that the index'th word in the object memory is a pointer.
-// Any bits between the current index and the new index
-// are set to zero, meaning the corresponding words are scalars.
-func (w *Writer) Ptr(index int64) {
-	if index < w.index {
-		println("gcprog: Ptr at index", index, "but current index is", w.index)
-		panic("gcprog: invalid Ptr index")
-	}
-	w.ZeroUntil(index)
-	if w.debug != nil {
-		fmt.Fprintf(w.debug, "gcprog: ptr at %d\n", index)
-	}
-	w.lit(1)
-}
-
-// ShouldRepeat reports whether it would be worthwhile to
-// use a Repeat to describe c elements of n bits each,
-// compared to just emitting c copies of the n-bit description.
-func (w *Writer) ShouldRepeat(n, c int64) bool {
-	// Should we lay out the bits directly instead of
-	// encoding them as a repetition? Certainly if count==1,
-	// since there's nothing to repeat, but also if the total
-	// size of the plain pointer bits for the type will fit in
-	// 4 or fewer bytes, since using a repetition will require
-	// flushing the current bits plus at least one byte for
-	// the repeat size and one for the repeat count.
-	return c > 1 && c*n > 4*8
-}
-
-// Repeat emits an instruction to repeat the description
-// of the last n words c times (including the initial description, c+1 times in total).
-func (w *Writer) Repeat(n, c int64) {
-	if n == 0 || c == 0 {
-		return
-	}
-	w.flushlit()
-	if w.debug != nil {
-		fmt.Fprintf(w.debug, "gcprog: repeat %d × %d\n", n, c)
-	}
-	if n < 128 {
-		w.byte(0x80 | byte(n))
-	} else {
-		w.byte(0x80)
-		w.varint(n)
-	}
-	w.varint(c)
-	w.index += n * c
-}
-
-// ZeroUntil adds zeros to the bit stream until reaching the given index;
-// that is, it records that the words from the most recent pointer until
-// the index'th word are scalars.
-// ZeroUntil is usually called in preparation for a call to Repeat, Append, or End.
-func (w *Writer) ZeroUntil(index int64) {
-	if index < w.index {
-		println("gcprog: Advance", index, "but index is", w.index)
-		panic("gcprog: invalid Advance index")
-	}
-	skip := (index - w.index)
-	if skip == 0 {
-		return
-	}
-	if skip < 4*8 {
-		if w.debug != nil {
-			fmt.Fprintf(w.debug, "gcprog: advance to %d by literals\n", index)
-		}
-		for i := int64(0); i < skip; i++ {
-			w.lit(0)
-		}
-		return
-	}
-
-	if w.debug != nil {
-		fmt.Fprintf(w.debug, "gcprog: advance to %d by repeat\n", index)
-	}
-	w.lit(0)
-	w.flushlit()
-	w.Repeat(1, skip-1)
-}
-
-// Append emits the given GC program into the current output.
-// The caller asserts that the program emits n bits (describes n words),
-// and Append panics if that is not true.
-func (w *Writer) Append(prog []byte, n int64) {
-	w.flushlit()
-	if w.debug != nil {
-		fmt.Fprintf(w.debug, "gcprog: append prog for %d ptrs\n", n)
-		fmt.Fprintf(w.debug, "\t")
-	}
-	n1 := progbits(prog)
-	if n1 != n {
-		panic("gcprog: wrong bit count in append")
-	}
-	// The last byte of the prog terminates the program.
-	// Don't emit that, or else our own program will end.
-	for i, x := range prog[:len(prog)-1] {
-		if w.debug != nil {
-			if i > 0 {
-				fmt.Fprintf(w.debug, " ")
-			}
-			fmt.Fprintf(w.debug, "%02x", x)
-		}
-		w.byte(x)
-	}
-	if w.debug != nil {
-		fmt.Fprintf(w.debug, "\n")
-	}
-	w.index += n
-}
-
-// progbits returns the length of the bit stream encoded by the program p.
-func progbits(p []byte) int64 {
-	var n int64
-	for len(p) > 0 {
-		x := p[0]
-		p = p[1:]
-		if x == 0 {
-			break
-		}
-		if x&0x80 == 0 {
-			count := x &^ 0x80
-			n += int64(count)
-			p = p[(count+7)/8:]
-			continue
-		}
-		nbit := int64(x &^ 0x80)
-		if nbit == 0 {
-			nbit, p = readvarint(p)
-		}
-		var count int64
-		count, p = readvarint(p)
-		n += nbit * count
-	}
-	if len(p) > 0 {
-		println("gcprog: found end instruction after", n, "ptrs, with", len(p), "bytes remaining")
-		panic("gcprog: extra data at end of program")
-	}
-	return n
-}
-
-// readvarint reads a varint from p, returning the value and the remainder of p.
-func readvarint(p []byte) (int64, []byte) {
-	var v int64
-	var nb uint
-	for {
-		c := p[0]
-		p = p[1:]
-		v |= int64(c&^0x80) << nb
-		nb += 7
-		if c&0x80 == 0 {
-			break
-		}
-	}
-	return v, p
-}
-
-// lit adds a single literal bit to w.
-func (w *Writer) lit(x byte) {
-	if w.nb == progMaxLiteral {
-		w.flushlit()
-	}
-	w.b[w.nb] = x
-	w.nb++
-	w.index++
-}
-
-// varint emits the varint encoding of x.
-func (w *Writer) varint(x int64) {
-	if x < 0 {
-		panic("gcprog: negative varint")
-	}
-	for x >= 0x80 {
-		w.byte(byte(0x80 | x))
-		x >>= 7
-	}
-	w.byte(byte(x))
-}
-
-// flushlit flushes any pending literal bits.
-func (w *Writer) flushlit() {
-	if w.nb == 0 {
-		return
-	}
-	if w.debug != nil {
-		fmt.Fprintf(w.debug, "gcprog: flush %d literals\n", w.nb)
-		fmt.Fprintf(w.debug, "\t%v\n", w.b[:w.nb])
-		fmt.Fprintf(w.debug, "\t%02x", byte(w.nb))
-	}
-	w.byte(byte(w.nb))
-	var bits uint8
-	for i := 0; i < w.nb; i++ {
-		bits |= w.b[i] << uint(i%8)
-		if (i+1)%8 == 0 {
-			if w.debug != nil {
-				fmt.Fprintf(w.debug, " %02x", bits)
-			}
-			w.byte(bits)
-			bits = 0
-		}
-	}
-	if w.nb%8 != 0 {
-		if w.debug != nil {
-			fmt.Fprintf(w.debug, " %02x", bits)
-		}
-		w.byte(bits)
-	}
-	if w.debug != nil {
-		fmt.Fprintf(w.debug, "\n")
-	}
-	w.nb = 0
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/addrtype_string.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/addrtype_string.go
deleted file mode 100644
index a6cda3c..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/addrtype_string.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/addrtype_string.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/addrtype_string.go:1
-// Code generated by "stringer -type AddrType cmd/internal/obj"; DO NOT EDIT
-
-package obj
-
-import "fmt"
-
-const (
-	_AddrType_name_0 = "TYPE_NONE"
-	_AddrType_name_1 = "TYPE_BRANCHTYPE_TEXTSIZETYPE_MEMTYPE_CONSTTYPE_FCONSTTYPE_SCONSTTYPE_REGTYPE_ADDRTYPE_SHIFTTYPE_REGREGTYPE_REGREG2TYPE_INDIRTYPE_REGLIST"
-)
-
-var (
-	_AddrType_index_0 = [...]uint8{0, 9}
-	_AddrType_index_1 = [...]uint8{0, 11, 24, 32, 42, 53, 64, 72, 81, 91, 102, 114, 124, 136}
-)
-
-func (i AddrType) String() string {
-	switch {
-	case i == 0:
-		return _AddrType_name_0
-	case 6 <= i && i <= 18:
-		i -= 6
-		return _AddrType_name_1[_AddrType_index_1[i]:_AddrType_index_1[i+1]]
-	default:
-		return fmt.Sprintf("AddrType(%d)", i)
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm/a.out.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm/a.out.go
deleted file mode 100644
index e2ada77..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm/a.out.go
+++ /dev/null
@@ -1,341 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/arm/a.out.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/arm/a.out.go:1
-// Inferno utils/5c/5.out.h
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/5c/5.out.h
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package arm
-
-import "bootstrap/cmd/internal/obj"
-
-//go:generate go run ../stringer.go -i $GOFILE -o anames.go -p arm
-
-const (
-	NSNAME = 8
-	NSYM   = 50
-	NREG   = 16
-)
-
-/* -1 disables use of REGARG */
-const (
-	REGARG = -1
-)
-
-const (
-	REG_R0 = obj.RBaseARM + iota // must be 16-aligned
-	REG_R1
-	REG_R2
-	REG_R3
-	REG_R4
-	REG_R5
-	REG_R6
-	REG_R7
-	REG_R8
-	REG_R9
-	REG_R10
-	REG_R11
-	REG_R12
-	REG_R13
-	REG_R14
-	REG_R15
-
-	REG_F0 // must be 16-aligned
-	REG_F1
-	REG_F2
-	REG_F3
-	REG_F4
-	REG_F5
-	REG_F6
-	REG_F7
-	REG_F8
-	REG_F9
-	REG_F10
-	REG_F11
-	REG_F12
-	REG_F13
-	REG_F14
-	REG_F15
-
-	REG_FPSR // must be 2-aligned
-	REG_FPCR
-
-	REG_CPSR // must be 2-aligned
-	REG_SPSR
-
-	MAXREG
-	REGRET = REG_R0
-	/* compiler allocates R1 up as temps */
-	/* compiler allocates register variables R3 up */
-	/* compiler allocates external registers R10 down */
-	REGEXT = REG_R10
-	/* these two registers are declared in runtime.h */
-	REGG = REGEXT - 0
-	REGM = REGEXT - 1
-
-	REGCTXT = REG_R7
-	REGTMP  = REG_R11
-	REGSP   = REG_R13
-	REGLINK = REG_R14
-	REGPC   = REG_R15
-
-	NFREG = 16
-	/* compiler allocates register variables F0 up */
-	/* compiler allocates external registers F7 down */
-	FREGRET = REG_F0
-	FREGEXT = REG_F7
-	FREGTMP = REG_F15
-)
-
-const (
-	C_NONE = iota
-	C_REG
-	C_REGREG
-	C_REGREG2
-	C_REGLIST
-	C_SHIFT
-	C_FREG
-	C_PSR
-	C_FCR
-
-	C_RCON /* 0xff rotated */
-	C_NCON /* ~RCON */
-	C_SCON /* 0xffff */
-	C_LCON
-	C_LCONADDR
-	C_ZFCON
-	C_SFCON
-	C_LFCON
-
-	C_RACON
-	C_LACON
-
-	C_SBRA
-	C_LBRA
-
-	C_HAUTO  /* halfword insn offset (-0xff to 0xff) */
-	C_FAUTO  /* float insn offset (0 to 0x3fc, word aligned) */
-	C_HFAUTO /* both H and F */
-	C_SAUTO  /* -0xfff to 0xfff */
-	C_LAUTO
-
-	C_HOREG
-	C_FOREG
-	C_HFOREG
-	C_SOREG
-	C_ROREG
-	C_SROREG /* both nil and R */
-	C_LOREG
-
-	C_PC
-	C_SP
-	C_HREG
-
-	C_ADDR /* reference to relocatable address */
-
-	// TLS "var" in local exec mode: will become a constant offset from
-	// thread local base that is ultimately chosen by the program linker.
-	C_TLS_LE
-
-	// TLS "var" in initial exec mode: will become a memory address (chosen
-	// by the program linker) that the dynamic linker will fill with the
-	// offset from the thread local base.
-	C_TLS_IE
-
-	C_TEXTSIZE
-
-	C_GOK
-
-	C_NCLASS /* must be the last */
-)
-
-const (
-	AAND = obj.ABaseARM + obj.A_ARCHSPECIFIC + iota
-	AEOR
-	ASUB
-	ARSB
-	AADD
-	AADC
-	ASBC
-	ARSC
-	ATST
-	ATEQ
-	ACMP
-	ACMN
-	AORR
-	ABIC
-
-	AMVN
-
-	/*
-	 * Do not reorder or fragment the conditional branch
-	 * opcodes, or the predication code will break
-	 */
-	ABEQ
-	ABNE
-	ABCS
-	ABHS
-	ABCC
-	ABLO
-	ABMI
-	ABPL
-	ABVS
-	ABVC
-	ABHI
-	ABLS
-	ABGE
-	ABLT
-	ABGT
-	ABLE
-
-	AMOVWD
-	AMOVWF
-	AMOVDW
-	AMOVFW
-	AMOVFD
-	AMOVDF
-	AMOVF
-	AMOVD
-
-	ACMPF
-	ACMPD
-	AADDF
-	AADDD
-	ASUBF
-	ASUBD
-	AMULF
-	AMULD
-	ADIVF
-	ADIVD
-	ASQRTF
-	ASQRTD
-	AABSF
-	AABSD
-	ANEGF
-	ANEGD
-
-	ASRL
-	ASRA
-	ASLL
-	AMULU
-	ADIVU
-	AMUL
-	ADIV
-	AMOD
-	AMODU
-
-	AMOVB
-	AMOVBS
-	AMOVBU
-	AMOVH
-	AMOVHS
-	AMOVHU
-	AMOVW
-	AMOVM
-	ASWPBU
-	ASWPW
-
-	ARFE
-	ASWI
-	AMULA
-
-	AWORD
-
-	AMULL
-	AMULAL
-	AMULLU
-	AMULALU
-
-	ABX
-	ABXRET
-	ADWORD
-
-	ALDREX
-	ASTREX
-	ALDREXD
-	ASTREXD
-
-	APLD
-
-	ACLZ
-
-	AMULWT
-	AMULWB
-	AMULAWT
-	AMULAWB
-
-	ADATABUNDLE
-	ADATABUNDLEEND
-
-	AMRC // MRC/MCR
-
-	ALAST
-
-	// aliases
-	AB  = obj.AJMP
-	ABL = obj.ACALL
-)
-
-/* scond byte */
-const (
-	C_SCOND = (1 << 4) - 1
-	C_SBIT  = 1 << 4
-	C_PBIT  = 1 << 5
-	C_WBIT  = 1 << 6
-	C_FBIT  = 1 << 7 /* psr flags-only */
-	C_UBIT  = 1 << 7 /* up bit, unsigned bit */
-
-	// These constants are the ARM condition codes encodings,
-	// XORed with 14 so that C_SCOND_NONE has value 0,
-	// so that a zeroed Prog.scond means "always execute".
-	C_SCOND_XOR = 14
-
-	C_SCOND_EQ   = 0 ^ C_SCOND_XOR
-	C_SCOND_NE   = 1 ^ C_SCOND_XOR
-	C_SCOND_HS   = 2 ^ C_SCOND_XOR
-	C_SCOND_LO   = 3 ^ C_SCOND_XOR
-	C_SCOND_MI   = 4 ^ C_SCOND_XOR
-	C_SCOND_PL   = 5 ^ C_SCOND_XOR
-	C_SCOND_VS   = 6 ^ C_SCOND_XOR
-	C_SCOND_VC   = 7 ^ C_SCOND_XOR
-	C_SCOND_HI   = 8 ^ C_SCOND_XOR
-	C_SCOND_LS   = 9 ^ C_SCOND_XOR
-	C_SCOND_GE   = 10 ^ C_SCOND_XOR
-	C_SCOND_LT   = 11 ^ C_SCOND_XOR
-	C_SCOND_GT   = 12 ^ C_SCOND_XOR
-	C_SCOND_LE   = 13 ^ C_SCOND_XOR
-	C_SCOND_NONE = 14 ^ C_SCOND_XOR
-	C_SCOND_NV   = 15 ^ C_SCOND_XOR
-
-	/* D_SHIFT type */
-	SHIFT_LL = 0 << 5
-	SHIFT_LR = 1 << 5
-	SHIFT_AR = 2 << 5
-	SHIFT_RR = 3 << 5
-)
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm/anames.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm/anames.go
deleted file mode 100644
index c1d5486..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm/anames.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/arm/anames.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/arm/anames.go:1
-// Generated by stringer -i a.out.go -o anames.go -p arm
-// Do not edit.
-
-package arm
-
-import "bootstrap/cmd/internal/obj"
-
-var Anames = []string{
-	obj.A_ARCHSPECIFIC: "AND",
-	"EOR",
-	"SUB",
-	"RSB",
-	"ADD",
-	"ADC",
-	"SBC",
-	"RSC",
-	"TST",
-	"TEQ",
-	"CMP",
-	"CMN",
-	"ORR",
-	"BIC",
-	"MVN",
-	"BEQ",
-	"BNE",
-	"BCS",
-	"BHS",
-	"BCC",
-	"BLO",
-	"BMI",
-	"BPL",
-	"BVS",
-	"BVC",
-	"BHI",
-	"BLS",
-	"BGE",
-	"BLT",
-	"BGT",
-	"BLE",
-	"MOVWD",
-	"MOVWF",
-	"MOVDW",
-	"MOVFW",
-	"MOVFD",
-	"MOVDF",
-	"MOVF",
-	"MOVD",
-	"CMPF",
-	"CMPD",
-	"ADDF",
-	"ADDD",
-	"SUBF",
-	"SUBD",
-	"MULF",
-	"MULD",
-	"DIVF",
-	"DIVD",
-	"SQRTF",
-	"SQRTD",
-	"ABSF",
-	"ABSD",
-	"NEGF",
-	"NEGD",
-	"SRL",
-	"SRA",
-	"SLL",
-	"MULU",
-	"DIVU",
-	"MUL",
-	"DIV",
-	"MOD",
-	"MODU",
-	"MOVB",
-	"MOVBS",
-	"MOVBU",
-	"MOVH",
-	"MOVHS",
-	"MOVHU",
-	"MOVW",
-	"MOVM",
-	"SWPBU",
-	"SWPW",
-	"RFE",
-	"SWI",
-	"MULA",
-	"WORD",
-	"MULL",
-	"MULAL",
-	"MULLU",
-	"MULALU",
-	"BX",
-	"BXRET",
-	"DWORD",
-	"LDREX",
-	"STREX",
-	"LDREXD",
-	"STREXD",
-	"PLD",
-	"CLZ",
-	"MULWT",
-	"MULWB",
-	"MULAWT",
-	"MULAWB",
-	"DATABUNDLE",
-	"DATABUNDLEEND",
-	"MRC",
-	"LAST",
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm/anames5.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm/anames5.go
deleted file mode 100644
index 00f6cdb..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm/anames5.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/arm/anames5.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/arm/anames5.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package arm
-
-var cnames5 = []string{
-	"NONE",
-	"REG",
-	"REGREG",
-	"REGREG2",
-	"REGLIST",
-	"SHIFT",
-	"FREG",
-	"PSR",
-	"FCR",
-	"RCON",
-	"NCON",
-	"SCON",
-	"LCON",
-	"LCONADDR",
-	"ZFCON",
-	"SFCON",
-	"LFCON",
-	"RACON",
-	"LACON",
-	"SBRA",
-	"LBRA",
-	"HAUTO",
-	"FAUTO",
-	"HFAUTO",
-	"SAUTO",
-	"LAUTO",
-	"HOREG",
-	"FOREG",
-	"HFOREG",
-	"SOREG",
-	"ROREG",
-	"SROREG",
-	"LOREG",
-	"PC",
-	"SP",
-	"HREG",
-	"ADDR",
-	"C_TLS_LE",
-	"C_TLS_IE",
-	"TEXTSIZE",
-	"GOK",
-	"NCLASS",
-	"SCOND = (1<<4)-1",
-	"SBIT = 1<<4",
-	"PBIT = 1<<5",
-	"WBIT = 1<<6",
-	"FBIT = 1<<7",
-	"UBIT = 1<<7",
-	"SCOND_XOR = 14",
-	"SCOND_EQ = 0 ^ C_SCOND_XOR",
-	"SCOND_NE = 1 ^ C_SCOND_XOR",
-	"SCOND_HS = 2 ^ C_SCOND_XOR",
-	"SCOND_LO = 3 ^ C_SCOND_XOR",
-	"SCOND_MI = 4 ^ C_SCOND_XOR",
-	"SCOND_PL = 5 ^ C_SCOND_XOR",
-	"SCOND_VS = 6 ^ C_SCOND_XOR",
-	"SCOND_VC = 7 ^ C_SCOND_XOR",
-	"SCOND_HI = 8 ^ C_SCOND_XOR",
-	"SCOND_LS = 9 ^ C_SCOND_XOR",
-	"SCOND_GE = 10 ^ C_SCOND_XOR",
-	"SCOND_LT = 11 ^ C_SCOND_XOR",
-	"SCOND_GT = 12 ^ C_SCOND_XOR",
-	"SCOND_LE = 13 ^ C_SCOND_XOR",
-	"SCOND_NONE = 14 ^ C_SCOND_XOR",
-	"SCOND_NV = 15 ^ C_SCOND_XOR",
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm/asm5.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm/asm5.go
deleted file mode 100644
index 6a48be2..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm/asm5.go
+++ /dev/null
@@ -1,2848 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/arm/asm5.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/arm/asm5.go:1
-// Inferno utils/5l/span.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/5l/span.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package arm
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"fmt"
-	"log"
-	"math"
-	"sort"
-)
-
-type Optab struct {
-	as       obj.As
-	a1       uint8
-	a2       int8
-	a3       uint8
-	type_    uint8
-	size     int8
-	param    int16
-	flag     int8
-	pcrelsiz uint8
-}
-
-type Opcross [32][2][32]uint8
-
-const (
-	LFROM  = 1 << 0
-	LTO    = 1 << 1
-	LPOOL  = 1 << 2
-	LPCREL = 1 << 3
-)
-
-var optab = []Optab{
-	/* struct Optab:
-	OPCODE,	from, prog->reg, to,		 type,size,param,flag */
-	{obj.ATEXT, C_ADDR, C_NONE, C_TEXTSIZE, 0, 0, 0, 0, 0},
-	{AADD, C_REG, C_REG, C_REG, 1, 4, 0, 0, 0},
-	{AADD, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0},
-	{AMOVW, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0},
-	{AMVN, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0},
-	{ACMP, C_REG, C_REG, C_NONE, 1, 4, 0, 0, 0},
-	{AADD, C_RCON, C_REG, C_REG, 2, 4, 0, 0, 0},
-	{AADD, C_RCON, C_NONE, C_REG, 2, 4, 0, 0, 0},
-	{AMOVW, C_RCON, C_NONE, C_REG, 2, 4, 0, 0, 0},
-	{AMVN, C_RCON, C_NONE, C_REG, 2, 4, 0, 0, 0},
-	{ACMP, C_RCON, C_REG, C_NONE, 2, 4, 0, 0, 0},
-	{AADD, C_SHIFT, C_REG, C_REG, 3, 4, 0, 0, 0},
-	{AADD, C_SHIFT, C_NONE, C_REG, 3, 4, 0, 0, 0},
-	{AMVN, C_SHIFT, C_NONE, C_REG, 3, 4, 0, 0, 0},
-	{ACMP, C_SHIFT, C_REG, C_NONE, 3, 4, 0, 0, 0},
-	{AMOVW, C_RACON, C_NONE, C_REG, 4, 4, REGSP, 0, 0},
-	{AB, C_NONE, C_NONE, C_SBRA, 5, 4, 0, LPOOL, 0},
-	{ABL, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0},
-	{ABX, C_NONE, C_NONE, C_SBRA, 74, 20, 0, 0, 0},
-	{ABEQ, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0},
-	{ABEQ, C_RCON, C_NONE, C_SBRA, 5, 4, 0, 0, 0}, // prediction hinted form, hint ignored
-
-	{AB, C_NONE, C_NONE, C_ROREG, 6, 4, 0, LPOOL, 0},
-	{ABL, C_NONE, C_NONE, C_ROREG, 7, 4, 0, 0, 0},
-	{ABL, C_REG, C_NONE, C_ROREG, 7, 4, 0, 0, 0},
-	{ABX, C_NONE, C_NONE, C_ROREG, 75, 12, 0, 0, 0},
-	{ABXRET, C_NONE, C_NONE, C_ROREG, 76, 4, 0, 0, 0},
-	{ASLL, C_RCON, C_REG, C_REG, 8, 4, 0, 0, 0},
-	{ASLL, C_RCON, C_NONE, C_REG, 8, 4, 0, 0, 0},
-	{ASLL, C_REG, C_NONE, C_REG, 9, 4, 0, 0, 0},
-	{ASLL, C_REG, C_REG, C_REG, 9, 4, 0, 0, 0},
-	{ASWI, C_NONE, C_NONE, C_NONE, 10, 4, 0, 0, 0},
-	{ASWI, C_NONE, C_NONE, C_LOREG, 10, 4, 0, 0, 0},
-	{ASWI, C_NONE, C_NONE, C_LCON, 10, 4, 0, 0, 0},
-	{AWORD, C_NONE, C_NONE, C_LCON, 11, 4, 0, 0, 0},
-	{AWORD, C_NONE, C_NONE, C_LCONADDR, 11, 4, 0, 0, 0},
-	{AWORD, C_NONE, C_NONE, C_ADDR, 11, 4, 0, 0, 0},
-	{AWORD, C_NONE, C_NONE, C_TLS_LE, 103, 4, 0, 0, 0},
-	{AWORD, C_NONE, C_NONE, C_TLS_IE, 104, 4, 0, 0, 0},
-	{AMOVW, C_NCON, C_NONE, C_REG, 12, 4, 0, 0, 0},
-	{AMOVW, C_LCON, C_NONE, C_REG, 12, 4, 0, LFROM, 0},
-	{AMOVW, C_LCONADDR, C_NONE, C_REG, 12, 4, 0, LFROM | LPCREL, 4},
-	{AADD, C_NCON, C_REG, C_REG, 13, 8, 0, 0, 0},
-	{AADD, C_NCON, C_NONE, C_REG, 13, 8, 0, 0, 0},
-	{AMVN, C_NCON, C_NONE, C_REG, 13, 8, 0, 0, 0},
-	{ACMP, C_NCON, C_REG, C_NONE, 13, 8, 0, 0, 0},
-	{AADD, C_LCON, C_REG, C_REG, 13, 8, 0, LFROM, 0},
-	{AADD, C_LCON, C_NONE, C_REG, 13, 8, 0, LFROM, 0},
-	{AMVN, C_LCON, C_NONE, C_REG, 13, 8, 0, LFROM, 0},
-	{ACMP, C_LCON, C_REG, C_NONE, 13, 8, 0, LFROM, 0},
-	{AMOVB, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0},
-	{AMOVBS, C_REG, C_NONE, C_REG, 14, 8, 0, 0, 0},
-	{AMOVBU, C_REG, C_NONE, C_REG, 58, 4, 0, 0, 0},
-	{AMOVH, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0},
-	{AMOVHS, C_REG, C_NONE, C_REG, 14, 8, 0, 0, 0},
-	{AMOVHU, C_REG, C_NONE, C_REG, 14, 8, 0, 0, 0},
-	{AMUL, C_REG, C_REG, C_REG, 15, 4, 0, 0, 0},
-	{AMUL, C_REG, C_NONE, C_REG, 15, 4, 0, 0, 0},
-	{ADIV, C_REG, C_REG, C_REG, 16, 4, 0, 0, 0},
-	{ADIV, C_REG, C_NONE, C_REG, 16, 4, 0, 0, 0},
-	{AMULL, C_REG, C_REG, C_REGREG, 17, 4, 0, 0, 0},
-	{AMULA, C_REG, C_REG, C_REGREG2, 17, 4, 0, 0, 0},
-	{AMOVW, C_REG, C_NONE, C_SAUTO, 20, 4, REGSP, 0, 0},
-	{AMOVW, C_REG, C_NONE, C_SOREG, 20, 4, 0, 0, 0},
-	{AMOVB, C_REG, C_NONE, C_SAUTO, 20, 4, REGSP, 0, 0},
-	{AMOVB, C_REG, C_NONE, C_SOREG, 20, 4, 0, 0, 0},
-	{AMOVBS, C_REG, C_NONE, C_SAUTO, 20, 4, REGSP, 0, 0},
-	{AMOVBS, C_REG, C_NONE, C_SOREG, 20, 4, 0, 0, 0},
-	{AMOVBU, C_REG, C_NONE, C_SAUTO, 20, 4, REGSP, 0, 0},
-	{AMOVBU, C_REG, C_NONE, C_SOREG, 20, 4, 0, 0, 0},
-	{AMOVW, C_SAUTO, C_NONE, C_REG, 21, 4, REGSP, 0, 0},
-	{AMOVW, C_SOREG, C_NONE, C_REG, 21, 4, 0, 0, 0},
-	{AMOVBU, C_SAUTO, C_NONE, C_REG, 21, 4, REGSP, 0, 0},
-	{AMOVBU, C_SOREG, C_NONE, C_REG, 21, 4, 0, 0, 0},
-	{AMOVW, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0},
-	{AMOVW, C_REG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0},
-	{AMOVW, C_REG, C_NONE, C_ADDR, 64, 8, 0, LTO | LPCREL, 4},
-	{AMOVB, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0},
-	{AMOVB, C_REG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0},
-	{AMOVB, C_REG, C_NONE, C_ADDR, 64, 8, 0, LTO | LPCREL, 4},
-	{AMOVBS, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0},
-	{AMOVBS, C_REG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0},
-	{AMOVBS, C_REG, C_NONE, C_ADDR, 64, 8, 0, LTO | LPCREL, 4},
-	{AMOVBU, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0},
-	{AMOVBU, C_REG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0},
-	{AMOVBU, C_REG, C_NONE, C_ADDR, 64, 8, 0, LTO | LPCREL, 4},
-	{AMOVW, C_TLS_LE, C_NONE, C_REG, 101, 4, 0, LFROM, 0},
-	{AMOVW, C_TLS_IE, C_NONE, C_REG, 102, 8, 0, LFROM, 0},
-	{AMOVW, C_LAUTO, C_NONE, C_REG, 31, 8, REGSP, LFROM, 0},
-	{AMOVW, C_LOREG, C_NONE, C_REG, 31, 8, 0, LFROM, 0},
-	{AMOVW, C_ADDR, C_NONE, C_REG, 65, 8, 0, LFROM | LPCREL, 4},
-	{AMOVBU, C_LAUTO, C_NONE, C_REG, 31, 8, REGSP, LFROM, 0},
-	{AMOVBU, C_LOREG, C_NONE, C_REG, 31, 8, 0, LFROM, 0},
-	{AMOVBU, C_ADDR, C_NONE, C_REG, 65, 8, 0, LFROM | LPCREL, 4},
-	{AMOVW, C_LACON, C_NONE, C_REG, 34, 8, REGSP, LFROM, 0},
-	{AMOVW, C_PSR, C_NONE, C_REG, 35, 4, 0, 0, 0},
-	{AMOVW, C_REG, C_NONE, C_PSR, 36, 4, 0, 0, 0},
-	{AMOVW, C_RCON, C_NONE, C_PSR, 37, 4, 0, 0, 0},
-	{AMOVM, C_REGLIST, C_NONE, C_SOREG, 38, 4, 0, 0, 0},
-	{AMOVM, C_SOREG, C_NONE, C_REGLIST, 39, 4, 0, 0, 0},
-	{ASWPW, C_SOREG, C_REG, C_REG, 40, 4, 0, 0, 0},
-	{ARFE, C_NONE, C_NONE, C_NONE, 41, 4, 0, 0, 0},
-	{AMOVF, C_FREG, C_NONE, C_FAUTO, 50, 4, REGSP, 0, 0},
-	{AMOVF, C_FREG, C_NONE, C_FOREG, 50, 4, 0, 0, 0},
-	{AMOVF, C_FAUTO, C_NONE, C_FREG, 51, 4, REGSP, 0, 0},
-	{AMOVF, C_FOREG, C_NONE, C_FREG, 51, 4, 0, 0, 0},
-	{AMOVF, C_FREG, C_NONE, C_LAUTO, 52, 12, REGSP, LTO, 0},
-	{AMOVF, C_FREG, C_NONE, C_LOREG, 52, 12, 0, LTO, 0},
-	{AMOVF, C_LAUTO, C_NONE, C_FREG, 53, 12, REGSP, LFROM, 0},
-	{AMOVF, C_LOREG, C_NONE, C_FREG, 53, 12, 0, LFROM, 0},
-	{AMOVF, C_FREG, C_NONE, C_ADDR, 68, 8, 0, LTO | LPCREL, 4},
-	{AMOVF, C_ADDR, C_NONE, C_FREG, 69, 8, 0, LFROM | LPCREL, 4},
-	{AADDF, C_FREG, C_NONE, C_FREG, 54, 4, 0, 0, 0},
-	{AADDF, C_FREG, C_REG, C_FREG, 54, 4, 0, 0, 0},
-	{AMOVF, C_FREG, C_NONE, C_FREG, 54, 4, 0, 0, 0},
-	{AMOVW, C_REG, C_NONE, C_FCR, 56, 4, 0, 0, 0},
-	{AMOVW, C_FCR, C_NONE, C_REG, 57, 4, 0, 0, 0},
-	{AMOVW, C_SHIFT, C_NONE, C_REG, 59, 4, 0, 0, 0},
-	{AMOVBU, C_SHIFT, C_NONE, C_REG, 59, 4, 0, 0, 0},
-	{AMOVB, C_SHIFT, C_NONE, C_REG, 60, 4, 0, 0, 0},
-	{AMOVBS, C_SHIFT, C_NONE, C_REG, 60, 4, 0, 0, 0},
-	{AMOVW, C_REG, C_NONE, C_SHIFT, 61, 4, 0, 0, 0},
-	{AMOVB, C_REG, C_NONE, C_SHIFT, 61, 4, 0, 0, 0},
-	{AMOVBS, C_REG, C_NONE, C_SHIFT, 61, 4, 0, 0, 0},
-	{AMOVBU, C_REG, C_NONE, C_SHIFT, 61, 4, 0, 0, 0},
-	{AMOVH, C_REG, C_NONE, C_HAUTO, 70, 4, REGSP, 0, 0},
-	{AMOVH, C_REG, C_NONE, C_HOREG, 70, 4, 0, 0, 0},
-	{AMOVHS, C_REG, C_NONE, C_HAUTO, 70, 4, REGSP, 0, 0},
-	{AMOVHS, C_REG, C_NONE, C_HOREG, 70, 4, 0, 0, 0},
-	{AMOVHU, C_REG, C_NONE, C_HAUTO, 70, 4, REGSP, 0, 0},
-	{AMOVHU, C_REG, C_NONE, C_HOREG, 70, 4, 0, 0, 0},
-	{AMOVB, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0},
-	{AMOVB, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0},
-	{AMOVBS, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0},
-	{AMOVBS, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0},
-	{AMOVH, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0},
-	{AMOVH, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0},
-	{AMOVHS, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0},
-	{AMOVHS, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0},
-	{AMOVHU, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0},
-	{AMOVHU, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0},
-	{AMOVH, C_REG, C_NONE, C_LAUTO, 72, 8, REGSP, LTO, 0},
-	{AMOVH, C_REG, C_NONE, C_LOREG, 72, 8, 0, LTO, 0},
-	{AMOVH, C_REG, C_NONE, C_ADDR, 94, 8, 0, LTO | LPCREL, 4},
-	{AMOVHS, C_REG, C_NONE, C_LAUTO, 72, 8, REGSP, LTO, 0},
-	{AMOVHS, C_REG, C_NONE, C_LOREG, 72, 8, 0, LTO, 0},
-	{AMOVHS, C_REG, C_NONE, C_ADDR, 94, 8, 0, LTO | LPCREL, 4},
-	{AMOVHU, C_REG, C_NONE, C_LAUTO, 72, 8, REGSP, LTO, 0},
-	{AMOVHU, C_REG, C_NONE, C_LOREG, 72, 8, 0, LTO, 0},
-	{AMOVHU, C_REG, C_NONE, C_ADDR, 94, 8, 0, LTO | LPCREL, 4},
-	{AMOVB, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0},
-	{AMOVB, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0},
-	{AMOVB, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4},
-	{AMOVBS, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0},
-	{AMOVBS, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0},
-	{AMOVBS, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4},
-	{AMOVH, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0},
-	{AMOVH, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0},
-	{AMOVH, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4},
-	{AMOVHS, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0},
-	{AMOVHS, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0},
-	{AMOVHS, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4},
-	{AMOVHU, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0},
-	{AMOVHU, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0},
-	{AMOVHU, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4},
-	{ALDREX, C_SOREG, C_NONE, C_REG, 77, 4, 0, 0, 0},
-	{ASTREX, C_SOREG, C_REG, C_REG, 78, 4, 0, 0, 0},
-	{AMOVF, C_ZFCON, C_NONE, C_FREG, 80, 8, 0, 0, 0},
-	{AMOVF, C_SFCON, C_NONE, C_FREG, 81, 4, 0, 0, 0},
-	{ACMPF, C_FREG, C_REG, C_NONE, 82, 8, 0, 0, 0},
-	{ACMPF, C_FREG, C_NONE, C_NONE, 83, 8, 0, 0, 0},
-	{AMOVFW, C_FREG, C_NONE, C_FREG, 84, 4, 0, 0, 0},
-	{AMOVWF, C_FREG, C_NONE, C_FREG, 85, 4, 0, 0, 0},
-	{AMOVFW, C_FREG, C_NONE, C_REG, 86, 8, 0, 0, 0},
-	{AMOVWF, C_REG, C_NONE, C_FREG, 87, 8, 0, 0, 0},
-	{AMOVW, C_REG, C_NONE, C_FREG, 88, 4, 0, 0, 0},
-	{AMOVW, C_FREG, C_NONE, C_REG, 89, 4, 0, 0, 0},
-	{ATST, C_REG, C_NONE, C_NONE, 90, 4, 0, 0, 0},
-	{ALDREXD, C_SOREG, C_NONE, C_REG, 91, 4, 0, 0, 0},
-	{ASTREXD, C_SOREG, C_REG, C_REG, 92, 4, 0, 0, 0},
-	{APLD, C_SOREG, C_NONE, C_NONE, 95, 4, 0, 0, 0},
-	{obj.AUNDEF, C_NONE, C_NONE, C_NONE, 96, 4, 0, 0, 0},
-	{ACLZ, C_REG, C_NONE, C_REG, 97, 4, 0, 0, 0},
-	{AMULWT, C_REG, C_REG, C_REG, 98, 4, 0, 0, 0},
-	{AMULAWT, C_REG, C_REG, C_REGREG2, 99, 4, 0, 0, 0},
-	{obj.AUSEFIELD, C_ADDR, C_NONE, C_NONE, 0, 0, 0, 0, 0},
-	{obj.APCDATA, C_LCON, C_NONE, C_LCON, 0, 0, 0, 0, 0},
-	{obj.AFUNCDATA, C_LCON, C_NONE, C_ADDR, 0, 0, 0, 0, 0},
-	{obj.ANOP, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0, 0},
-	{obj.ADUFFZERO, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0}, // same as ABL
-	{obj.ADUFFCOPY, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0}, // same as ABL
-
-	{ADATABUNDLE, C_NONE, C_NONE, C_NONE, 100, 4, 0, 0, 0},
-	{ADATABUNDLEEND, C_NONE, C_NONE, C_NONE, 100, 0, 0, 0, 0},
-	{obj.AXXX, C_NONE, C_NONE, C_NONE, 0, 4, 0, 0, 0},
-}
-
-var pool struct {
-	start uint32
-	size  uint32
-	extra uint32
-}
-
-var oprange [ALAST & obj.AMask][]Optab
-
-var xcmp [C_GOK + 1][C_GOK + 1]bool
-
-var deferreturn *obj.LSym
-
-// Note about encoding: Prog.scond holds the condition encoding,
-// but XOR'ed with C_SCOND_XOR, so that C_SCOND_NONE == 0.
-// The code that shifts the value << 28 has the responsibility
-// for XORing with C_SCOND_XOR too.
-
-// asmoutnacl assembles the instruction p. It replaces asmout for NaCl.
-// It returns the total number of bytes put in out, and it can change
-// p->pc if extra padding is necessary.
-// In rare cases, asmoutnacl might split p into two instructions.
-// origPC is the PC for this Prog (no padding is taken into account).
-func asmoutnacl(ctxt *obj.Link, origPC int32, p *obj.Prog, o *Optab, out []uint32) int {
-	size := int(o.size)
-
-	// instruction specific
-	switch p.As {
-	default:
-		if out != nil {
-			asmout(ctxt, p, o, out)
-		}
-
-	case ADATABUNDLE, // align to 16-byte boundary
-		ADATABUNDLEEND: // zero width instruction, just to align next instruction to 16-byte boundary
-		p.Pc = (p.Pc + 15) &^ 15
-
-		if out != nil {
-			asmout(ctxt, p, o, out)
-		}
-
-	case obj.AUNDEF,
-		APLD:
-		size = 4
-		if out != nil {
-			switch p.As {
-			case obj.AUNDEF:
-				out[0] = 0xe7fedef0 // NACL_INSTR_ARM_ABORT_NOW (UDF #0xEDE0)
-
-			case APLD:
-				out[0] = 0xe1a01001 // (MOVW R1, R1)
-			}
-		}
-
-	case AB, ABL:
-		if p.To.Type != obj.TYPE_MEM {
-			if out != nil {
-				asmout(ctxt, p, o, out)
-			}
-		} else {
-			if p.To.Offset != 0 || size != 4 || p.To.Reg > REG_R15 || p.To.Reg < REG_R0 {
-				ctxt.Diag("unsupported instruction: %v", p)
-			}
-			if p.Pc&15 == 12 {
-				p.Pc += 4
-			}
-			if out != nil {
-				out[0] = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x03c0013f | (uint32(p.To.Reg)&15)<<12 | (uint32(p.To.Reg)&15)<<16 // BIC $0xc000000f, Rx
-				if p.As == AB {
-					out[1] = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x012fff10 | (uint32(p.To.Reg)&15)<<0 // BX Rx
-				} else { // ABL
-					out[1] = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x012fff30 | (uint32(p.To.Reg)&15)<<0 // BLX Rx
-				}
-			}
-
-			size = 8
-		}
-
-		// align the last instruction (the actual BL) to the last instruction in a bundle
-		if p.As == ABL {
-			if deferreturn == nil {
-				deferreturn = obj.Linklookup(ctxt, "runtime.deferreturn", 0)
-			}
-			if p.To.Sym == deferreturn {
-				p.Pc = ((int64(origPC) + 15) &^ 15) + 16 - int64(size)
-			} else {
-				p.Pc += (16 - ((p.Pc + int64(size)) & 15)) & 15
-			}
-		}
-
-	case ALDREX,
-		ALDREXD,
-		AMOVB,
-		AMOVBS,
-		AMOVBU,
-		AMOVD,
-		AMOVF,
-		AMOVH,
-		AMOVHS,
-		AMOVHU,
-		AMOVM,
-		AMOVW,
-		ASTREX,
-		ASTREXD:
-		if p.To.Type == obj.TYPE_REG && p.To.Reg == REG_R15 && p.From.Reg == REG_R13 { // MOVW.W x(R13), PC
-			if out != nil {
-				asmout(ctxt, p, o, out)
-			}
-			if size == 4 {
-				if out != nil {
-					// Note: 5c and 5g reg.c know that DIV/MOD smashes R12
-					// so that this return instruction expansion is valid.
-					out[0] = out[0] &^ 0x3000                                         // change PC to R12
-					out[1] = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x03ccc13f // BIC $0xc000000f, R12
-					out[2] = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x012fff1c // BX R12
-				}
-
-				size += 8
-				if (p.Pc+int64(size))&15 == 4 {
-					p.Pc += 4
-				}
-				break
-			} else {
-				// if the instruction used more than 4 bytes, then it must have used a very large
-				// offset to update R13, so we need to additionally mask R13.
-				if out != nil {
-					out[size/4-1] &^= 0x3000                                                 // change PC to R12
-					out[size/4] = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x03cdd103   // BIC $0xc0000000, R13
-					out[size/4+1] = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x03ccc13f // BIC $0xc000000f, R12
-					out[size/4+2] = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x012fff1c // BX R12
-				}
-
-				// p->pc+size is only ok at 4 or 12 mod 16.
-				if (p.Pc+int64(size))%8 == 0 {
-					p.Pc += 4
-				}
-				size += 12
-				break
-			}
-		}
-
-		if p.To.Type == obj.TYPE_REG && p.To.Reg == REG_R15 {
-			ctxt.Diag("unsupported instruction (move to another register and use indirect jump instead): %v", p)
-		}
-
-		if p.To.Type == obj.TYPE_MEM && p.To.Reg == REG_R13 && (p.Scond&C_WBIT != 0) && size > 4 {
-			// function prolog with very large frame size: MOVW.W R14,-100004(R13)
-			// split it into two instructions:
-			// 	ADD $-100004, R13
-			// 	MOVW R14, 0(R13)
-			q := ctxt.NewProg()
-
-			p.Scond &^= C_WBIT
-			*q = *p
-			a := &p.To
-			var a2 *obj.Addr
-			if p.To.Type == obj.TYPE_MEM {
-				a2 = &q.To
-			} else {
-				a2 = &q.From
-			}
-			nocache(q)
-			nocache(p)
-
-			// insert q after p
-			q.Link = p.Link
-
-			p.Link = q
-			q.Pcond = nil
-
-			// make p into ADD $X, R13
-			p.As = AADD
-
-			p.From = *a
-			p.From.Reg = 0
-			p.From.Type = obj.TYPE_CONST
-			p.To = obj.Addr{}
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = REG_R13
-
-			// make q into p but load/store from 0(R13)
-			q.Spadj = 0
-
-			*a2 = obj.Addr{}
-			a2.Type = obj.TYPE_MEM
-			a2.Reg = REG_R13
-			a2.Sym = nil
-			a2.Offset = 0
-			size = int(oplook(ctxt, p).size)
-			break
-		}
-
-		if (p.To.Type == obj.TYPE_MEM && p.To.Reg != REG_R9) || // MOVW Rx, X(Ry), y != 9
-			(p.From.Type == obj.TYPE_MEM && p.From.Reg != REG_R9) { // MOVW X(Rx), Ry, x != 9
-			var a *obj.Addr
-			if p.To.Type == obj.TYPE_MEM {
-				a = &p.To
-			} else {
-				a = &p.From
-			}
-			reg := int(a.Reg)
-			if size == 4 {
-				// if addr.reg == 0, then it is probably load from x(FP) with small x, no need to modify.
-				if reg == 0 {
-					if out != nil {
-						asmout(ctxt, p, o, out)
-					}
-				} else {
-					if out != nil {
-						out[0] = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x03c00103 | (uint32(reg)&15)<<16 | (uint32(reg)&15)<<12 // BIC $0xc0000000, Rx
-					}
-					if p.Pc&15 == 12 {
-						p.Pc += 4
-					}
-					size += 4
-					if out != nil {
-						asmout(ctxt, p, o, out[1:])
-					}
-				}
-
-				break
-			} else {
-				// if a load/store instruction takes more than 1 word to implement, then
-				// we need to separate the instruction into two:
-				// 1. explicitly load the address into R11.
-				// 2. load/store from R11.
-				// This won't handle .W/.P, so we should reject such code.
-				if p.Scond&(C_PBIT|C_WBIT) != 0 {
-					ctxt.Diag("unsupported instruction (.P/.W): %v", p)
-				}
-				q := ctxt.NewProg()
-				*q = *p
-				var a2 *obj.Addr
-				if p.To.Type == obj.TYPE_MEM {
-					a2 = &q.To
-				} else {
-					a2 = &q.From
-				}
-				nocache(q)
-				nocache(p)
-
-				// insert q after p
-				q.Link = p.Link
-
-				p.Link = q
-				q.Pcond = nil
-
-				// make p into MOVW $X(R), R11
-				p.As = AMOVW
-
-				p.From = *a
-				p.From.Type = obj.TYPE_ADDR
-				p.To = obj.Addr{}
-				p.To.Type = obj.TYPE_REG
-				p.To.Reg = REG_R11
-
-				// make q into p but load/store from 0(R11)
-				*a2 = obj.Addr{}
-
-				a2.Type = obj.TYPE_MEM
-				a2.Reg = REG_R11
-				a2.Sym = nil
-				a2.Offset = 0
-				size = int(oplook(ctxt, p).size)
-				break
-			}
-		} else if out != nil {
-			asmout(ctxt, p, o, out)
-		}
-	}
-
-	// destination register specific
-	if p.To.Type == obj.TYPE_REG {
-		switch p.To.Reg {
-		case REG_R9:
-			ctxt.Diag("invalid instruction, cannot write to R9: %v", p)
-
-		case REG_R13:
-			if out != nil {
-				out[size/4] = 0xe3cdd103 // BIC $0xc0000000, R13
-			}
-			if (p.Pc+int64(size))&15 == 0 {
-				p.Pc += 4
-			}
-			size += 4
-		}
-	}
-
-	return size
-}
-
-func span5(ctxt *obj.Link, cursym *obj.LSym) {
-	var p *obj.Prog
-	var op *obj.Prog
-
-	p = cursym.Text
-	if p == nil || p.Link == nil { // handle external functions and ELF section symbols
-		return
-	}
-
-	if oprange[AAND&obj.AMask] == nil {
-		buildop(ctxt)
-	}
-
-	ctxt.Cursym = cursym
-
-	ctxt.Autosize = int32(p.To.Offset + 4)
-	c := int32(0)
-
-	op = p
-	p = p.Link
-	var i int
-	var m int
-	var o *Optab
-	for ; p != nil || ctxt.Blitrl != nil; op, p = p, p.Link {
-		if p == nil {
-			if checkpool(ctxt, op, 0) {
-				p = op
-				continue
-			}
-
-			// can't happen: blitrl is not nil, but checkpool didn't flushpool
-			ctxt.Diag("internal inconsistency")
-
-			break
-		}
-
-		ctxt.Curp = p
-		p.Pc = int64(c)
-		o = oplook(ctxt, p)
-		if ctxt.Headtype != obj.Hnacl {
-			m = int(o.size)
-		} else {
-			m = asmoutnacl(ctxt, c, p, o, nil)
-			c = int32(p.Pc)     // asmoutnacl might change pc for alignment
-			o = oplook(ctxt, p) // asmoutnacl might change p in rare cases
-		}
-
-		if m%4 != 0 || p.Pc%4 != 0 {
-			ctxt.Diag("!pc invalid: %v size=%d", p, m)
-		}
-
-		// must check literal pool here in case p generates many instructions
-		if ctxt.Blitrl != nil {
-			i = m
-			if checkpool(ctxt, op, i) {
-				p = op
-				continue
-			}
-		}
-
-		if m == 0 && (p.As != obj.AFUNCDATA && p.As != obj.APCDATA && p.As != ADATABUNDLEEND && p.As != obj.ANOP && p.As != obj.AUSEFIELD) {
-			ctxt.Diag("zero-width instruction\n%v", p)
-			continue
-		}
-
-		switch o.flag & (LFROM | LTO | LPOOL) {
-		case LFROM:
-			addpool(ctxt, p, &p.From)
-
-		case LTO:
-			addpool(ctxt, p, &p.To)
-
-		case LPOOL:
-			if p.Scond&C_SCOND == C_SCOND_NONE {
-				flushpool(ctxt, p, 0, 0)
-			}
-		}
-
-		if p.As == AMOVW && p.To.Type == obj.TYPE_REG && p.To.Reg == REGPC && p.Scond&C_SCOND == C_SCOND_NONE {
-			flushpool(ctxt, p, 0, 0)
-		}
-		c += int32(m)
-	}
-
-	cursym.Size = int64(c)
-
-	/*
-	 * if any procedure is large enough to
-	 * generate a large SBRA branch, then
-	 * generate extra passes putting branches
-	 * around jmps to fix. this is rare.
-	 */
-	times := 0
-
-	var bflag int
-	var opc int32
-	var out [6 + 3]uint32
-	for {
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f span1\n", obj.Cputime())
-		}
-		bflag = 0
-		c = 0
-		times++
-		cursym.Text.Pc = 0 // force re-layout the code.
-		for p = cursym.Text; p != nil; p = p.Link {
-			ctxt.Curp = p
-			o = oplook(ctxt, p)
-			if int64(c) > p.Pc {
-				p.Pc = int64(c)
-			}
-
-			/* very large branches
-			if(o->type == 6 && p->pcond) {
-				otxt = p->pcond->pc - c;
-				if(otxt < 0)
-					otxt = -otxt;
-				if(otxt >= (1L<<17) - 10) {
-					q = emallocz(sizeof(Prog));
-					q->link = p->link;
-					p->link = q;
-					q->as = AB;
-					q->to.type = TYPE_BRANCH;
-					q->pcond = p->pcond;
-					p->pcond = q;
-					q = emallocz(sizeof(Prog));
-					q->link = p->link;
-					p->link = q;
-					q->as = AB;
-					q->to.type = TYPE_BRANCH;
-					q->pcond = q->link->link;
-					bflag = 1;
-				}
-			}
-			*/
-			opc = int32(p.Pc)
-
-			if ctxt.Headtype != obj.Hnacl {
-				m = int(o.size)
-			} else {
-				m = asmoutnacl(ctxt, c, p, o, nil)
-			}
-			if p.Pc != int64(opc) {
-				bflag = 1
-			}
-
-			//print("%v pc changed %d to %d in iter. %d\n", p, opc, (int32)p->pc, times);
-			c = int32(p.Pc + int64(m))
-
-			if m%4 != 0 || p.Pc%4 != 0 {
-				ctxt.Diag("pc invalid: %v size=%d", p, m)
-			}
-
-			if m/4 > len(out) {
-				ctxt.Diag("instruction size too large: %d > %d", m/4, len(out))
-			}
-			if m == 0 && (p.As != obj.AFUNCDATA && p.As != obj.APCDATA && p.As != ADATABUNDLEEND && p.As != obj.ANOP && p.As != obj.AUSEFIELD) {
-				if p.As == obj.ATEXT {
-					ctxt.Autosize = int32(p.To.Offset + 4)
-					continue
-				}
-
-				ctxt.Diag("zero-width instruction\n%v", p)
-				continue
-			}
-		}
-
-		cursym.Size = int64(c)
-		if bflag == 0 {
-			break
-		}
-	}
-
-	if c%4 != 0 {
-		ctxt.Diag("sym->size=%d, invalid", c)
-	}
-
-	/*
-	 * lay out the code.  all the pc-relative code references,
-	 * even cross-function, are resolved now;
-	 * only data references need to be relocated.
-	 * with more work we could leave cross-function
-	 * code references to be relocated too, and then
-	 * perhaps we'd be able to parallelize the span loop above.
-	 */
-
-	p = cursym.Text
-	ctxt.Autosize = int32(p.To.Offset + 4)
-	cursym.Grow(cursym.Size)
-
-	bp := cursym.P
-	c = int32(p.Pc) // even p->link might need extra padding
-	var v int
-	for p = p.Link; p != nil; p = p.Link {
-		ctxt.Pc = p.Pc
-		ctxt.Curp = p
-		o = oplook(ctxt, p)
-		opc = int32(p.Pc)
-		if ctxt.Headtype != obj.Hnacl {
-			asmout(ctxt, p, o, out[:])
-			m = int(o.size)
-		} else {
-			m = asmoutnacl(ctxt, c, p, o, out[:])
-			if int64(opc) != p.Pc {
-				ctxt.Diag("asmoutnacl broken: pc changed (%d->%d) in last stage: %v", opc, int32(p.Pc), p)
-			}
-		}
-
-		if m%4 != 0 || p.Pc%4 != 0 {
-			ctxt.Diag("final stage: pc invalid: %v size=%d", p, m)
-		}
-
-		if int64(c) > p.Pc {
-			ctxt.Diag("PC padding invalid: want %#d, has %#d: %v", p.Pc, c, p)
-		}
-		for int64(c) != p.Pc {
-			// emit 0xe1a00000 (MOVW R0, R0)
-			bp[0] = 0x00
-			bp = bp[1:]
-
-			bp[0] = 0x00
-			bp = bp[1:]
-			bp[0] = 0xa0
-			bp = bp[1:]
-			bp[0] = 0xe1
-			bp = bp[1:]
-			c += 4
-		}
-
-		for i = 0; i < m/4; i++ {
-			v = int(out[i])
-			bp[0] = byte(v)
-			bp = bp[1:]
-			bp[0] = byte(v >> 8)
-			bp = bp[1:]
-			bp[0] = byte(v >> 16)
-			bp = bp[1:]
-			bp[0] = byte(v >> 24)
-			bp = bp[1:]
-		}
-
-		c += int32(m)
-	}
-}
-
-/*
- * when the first reference to the literal pool threatens
- * to go out of range of a 12-bit PC-relative offset,
- * drop the pool now, and branch round it.
- * this happens only in extended basic blocks that exceed 4k.
- */
-func checkpool(ctxt *obj.Link, p *obj.Prog, sz int) bool {
-	if pool.size >= 0xff0 || immaddr(int32((p.Pc+int64(sz)+4)+4+int64(12+pool.size)-int64(pool.start+8))) == 0 {
-		return flushpool(ctxt, p, 1, 0)
-	} else if p.Link == nil {
-		return flushpool(ctxt, p, 2, 0)
-	}
-	return false
-}
-
-func flushpool(ctxt *obj.Link, p *obj.Prog, skip int, force int) bool {
-	if ctxt.Blitrl != nil {
-		if skip != 0 {
-			if false && skip == 1 {
-				fmt.Printf("note: flush literal pool at %x: len=%d ref=%x\n", uint64(p.Pc+4), pool.size, pool.start)
-			}
-			q := ctxt.NewProg()
-			q.As = AB
-			q.To.Type = obj.TYPE_BRANCH
-			q.Pcond = p.Link
-			q.Link = ctxt.Blitrl
-			q.Lineno = p.Lineno
-			ctxt.Blitrl = q
-		} else if force == 0 && (p.Pc+int64(12+pool.size)-int64(pool.start) < 2048) { // 12 take into account the maximum nacl literal pool alignment padding size
-			return false
-		}
-		if ctxt.Headtype == obj.Hnacl && pool.size%16 != 0 {
-			// if pool is not multiple of 16 bytes, add an alignment marker
-			q := ctxt.NewProg()
-
-			q.As = ADATABUNDLEEND
-			ctxt.Elitrl.Link = q
-			ctxt.Elitrl = q
-		}
-
-		// The line number for constant pool entries doesn't really matter.
-		// We set it to the line number of the preceding instruction so that
-		// there are no deltas to encode in the pc-line tables.
-		for q := ctxt.Blitrl; q != nil; q = q.Link {
-			q.Lineno = p.Lineno
-		}
-
-		ctxt.Elitrl.Link = p.Link
-		p.Link = ctxt.Blitrl
-
-		ctxt.Blitrl = nil /* BUG: should refer back to values until out-of-range */
-		ctxt.Elitrl = nil
-		pool.size = 0
-		pool.start = 0
-		pool.extra = 0
-		return true
-	}
-
-	return false
-}
-
-func addpool(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
-	var t obj.Prog
-
-	c := aclass(ctxt, a)
-
-	t.Ctxt = ctxt
-	t.As = AWORD
-
-	switch c {
-	default:
-		t.To.Offset = a.Offset
-		t.To.Sym = a.Sym
-		t.To.Type = a.Type
-		t.To.Name = a.Name
-
-		if ctxt.Flag_shared && t.To.Sym != nil {
-			t.Rel = p
-		}
-
-	case C_SROREG,
-		C_LOREG,
-		C_ROREG,
-		C_FOREG,
-		C_SOREG,
-		C_HOREG,
-		C_FAUTO,
-		C_SAUTO,
-		C_LAUTO,
-		C_LACON:
-		t.To.Type = obj.TYPE_CONST
-		t.To.Offset = ctxt.Instoffset
-	}
-
-	if t.Rel == nil {
-		for q := ctxt.Blitrl; q != nil; q = q.Link { /* could hash on t.t0.offset */
-			if q.Rel == nil && q.To == t.To {
-				p.Pcond = q
-				return
-			}
-		}
-	}
-
-	if ctxt.Headtype == obj.Hnacl && pool.size%16 == 0 {
-		// start a new data bundle
-		q := ctxt.NewProg()
-		q.As = ADATABUNDLE
-		q.Pc = int64(pool.size)
-		pool.size += 4
-		if ctxt.Blitrl == nil {
-			ctxt.Blitrl = q
-			pool.start = uint32(p.Pc)
-		} else {
-			ctxt.Elitrl.Link = q
-		}
-
-		ctxt.Elitrl = q
-	}
-
-	q := ctxt.NewProg()
-	*q = t
-	q.Pc = int64(pool.size)
-
-	if ctxt.Blitrl == nil {
-		ctxt.Blitrl = q
-		pool.start = uint32(p.Pc)
-	} else {
-		ctxt.Elitrl.Link = q
-	}
-	ctxt.Elitrl = q
-	pool.size += 4
-
-	p.Pcond = q
-}
-
-func regoff(ctxt *obj.Link, a *obj.Addr) int32 {
-	ctxt.Instoffset = 0
-	aclass(ctxt, a)
-	return int32(ctxt.Instoffset)
-}
-
-func immrot(v uint32) int32 {
-	for i := 0; i < 16; i++ {
-		if v&^0xff == 0 {
-			return int32(uint32(int32(i)<<8) | v | 1<<25)
-		}
-		v = v<<2 | v>>30
-	}
-
-	return 0
-}
-
-func immaddr(v int32) int32 {
-	if v >= 0 && v <= 0xfff {
-		return v&0xfff | 1<<24 | 1<<23 /* pre indexing */ /* pre indexing, up */
-	}
-	if v >= -0xfff && v < 0 {
-		return -v&0xfff | 1<<24 /* pre indexing */
-	}
-	return 0
-}
-
-func immfloat(v int32) bool {
-	return v&0xC03 == 0 /* offset will fit in floating-point load/store */
-}
-
-func immhalf(v int32) bool {
-	if v >= 0 && v <= 0xff {
-		return v|1<<24|1<<23 != 0 /* pre indexing */ /* pre indexing, up */
-	}
-	if v >= -0xff && v < 0 {
-		return -v&0xff|1<<24 != 0 /* pre indexing */
-	}
-	return false
-}
-
-func aclass(ctxt *obj.Link, a *obj.Addr) int {
-	switch a.Type {
-	case obj.TYPE_NONE:
-		return C_NONE
-
-	case obj.TYPE_REG:
-		ctxt.Instoffset = 0
-		if REG_R0 <= a.Reg && a.Reg <= REG_R15 {
-			return C_REG
-		}
-		if REG_F0 <= a.Reg && a.Reg <= REG_F15 {
-			return C_FREG
-		}
-		if a.Reg == REG_FPSR || a.Reg == REG_FPCR {
-			return C_FCR
-		}
-		if a.Reg == REG_CPSR || a.Reg == REG_SPSR {
-			return C_PSR
-		}
-		return C_GOK
-
-	case obj.TYPE_REGREG:
-		return C_REGREG
-
-	case obj.TYPE_REGREG2:
-		return C_REGREG2
-
-	case obj.TYPE_REGLIST:
-		return C_REGLIST
-
-	case obj.TYPE_SHIFT:
-		return C_SHIFT
-
-	case obj.TYPE_MEM:
-		switch a.Name {
-		case obj.NAME_EXTERN,
-			obj.NAME_GOTREF,
-			obj.NAME_STATIC:
-			if a.Sym == nil || a.Sym.Name == "" {
-				fmt.Printf("null sym external\n")
-				return C_GOK
-			}
-
-			ctxt.Instoffset = 0 // s.b. unused but just in case
-			if a.Sym.Type == obj.STLSBSS {
-				if ctxt.Flag_shared {
-					return C_TLS_IE
-				} else {
-					return C_TLS_LE
-				}
-			}
-
-			return C_ADDR
-
-		case obj.NAME_AUTO:
-			ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
-			if t := immaddr(int32(ctxt.Instoffset)); t != 0 {
-				if immhalf(int32(ctxt.Instoffset)) {
-					if immfloat(t) {
-						return C_HFAUTO
-					}
-					return C_HAUTO
-				}
-
-				if immfloat(t) {
-					return C_FAUTO
-				}
-				return C_SAUTO
-			}
-
-			return C_LAUTO
-
-		case obj.NAME_PARAM:
-			ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + 4
-			if t := immaddr(int32(ctxt.Instoffset)); t != 0 {
-				if immhalf(int32(ctxt.Instoffset)) {
-					if immfloat(t) {
-						return C_HFAUTO
-					}
-					return C_HAUTO
-				}
-
-				if immfloat(t) {
-					return C_FAUTO
-				}
-				return C_SAUTO
-			}
-
-			return C_LAUTO
-
-		case obj.NAME_NONE:
-			ctxt.Instoffset = a.Offset
-			if t := immaddr(int32(ctxt.Instoffset)); t != 0 {
-				if immhalf(int32(ctxt.Instoffset)) { /* n.b. that it will also satisfy immrot */
-					if immfloat(t) {
-						return C_HFOREG
-					}
-					return C_HOREG
-				}
-
-				if immfloat(t) {
-					return C_FOREG /* n.b. that it will also satisfy immrot */
-				}
-				if immrot(uint32(ctxt.Instoffset)) != 0 {
-					return C_SROREG
-				}
-				if immhalf(int32(ctxt.Instoffset)) {
-					return C_HOREG
-				}
-				return C_SOREG
-			}
-
-			if immrot(uint32(ctxt.Instoffset)) != 0 {
-				return C_ROREG
-			}
-			return C_LOREG
-		}
-
-		return C_GOK
-
-	case obj.TYPE_FCONST:
-		if chipzero5(ctxt, a.Val.(float64)) >= 0 {
-			return C_ZFCON
-		}
-		if chipfloat5(ctxt, a.Val.(float64)) >= 0 {
-			return C_SFCON
-		}
-		return C_LFCON
-
-	case obj.TYPE_TEXTSIZE:
-		return C_TEXTSIZE
-
-	case obj.TYPE_CONST,
-		obj.TYPE_ADDR:
-		switch a.Name {
-		case obj.NAME_NONE:
-			ctxt.Instoffset = a.Offset
-			if a.Reg != 0 {
-				return aconsize(ctxt)
-			}
-
-			if immrot(uint32(ctxt.Instoffset)) != 0 {
-				return C_RCON
-			}
-			if immrot(^uint32(ctxt.Instoffset)) != 0 {
-				return C_NCON
-			}
-			return C_LCON
-
-		case obj.NAME_EXTERN,
-			obj.NAME_GOTREF,
-			obj.NAME_STATIC:
-			s := a.Sym
-			if s == nil {
-				break
-			}
-			ctxt.Instoffset = 0 // s.b. unused but just in case
-			return C_LCONADDR
-
-		case obj.NAME_AUTO:
-			ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
-			return aconsize(ctxt)
-
-		case obj.NAME_PARAM:
-			ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + 4
-			return aconsize(ctxt)
-		}
-
-		return C_GOK
-
-	case obj.TYPE_BRANCH:
-		return C_SBRA
-	}
-
-	return C_GOK
-}
-
-func aconsize(ctxt *obj.Link) int {
-	if immrot(uint32(ctxt.Instoffset)) != 0 {
-		return C_RACON
-	}
-	if immrot(uint32(-ctxt.Instoffset)) != 0 {
-		return C_RACON
-	}
-	return C_LACON
-}
-
-func prasm(p *obj.Prog) {
-	fmt.Printf("%v\n", p)
-}
-
-func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
-	a1 := int(p.Optab)
-	if a1 != 0 {
-		return &optab[a1-1]
-	}
-	a1 = int(p.From.Class)
-	if a1 == 0 {
-		a1 = aclass(ctxt, &p.From) + 1
-		p.From.Class = int8(a1)
-	}
-
-	a1--
-	a3 := int(p.To.Class)
-	if a3 == 0 {
-		a3 = aclass(ctxt, &p.To) + 1
-		p.To.Class = int8(a3)
-	}
-
-	a3--
-	a2 := C_NONE
-	if p.Reg != 0 {
-		a2 = C_REG
-	}
-
-	if false { /*debug['O']*/
-		fmt.Printf("oplook %v %v %v %v\n", p.As, DRconv(a1), DRconv(a2), DRconv(a3))
-		fmt.Printf("\t\t%d %d\n", p.From.Type, p.To.Type)
-	}
-
-	ops := oprange[p.As&obj.AMask]
-	c1 := &xcmp[a1]
-	c3 := &xcmp[a3]
-	for i := range ops {
-		op := &ops[i]
-		if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] {
-			p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
-			return op
-		}
-	}
-
-	ctxt.Diag("illegal combination %v; %v %v %v, %d %d", p, DRconv(a1), DRconv(a2), DRconv(a3), p.From.Type, p.To.Type)
-	ctxt.Diag("from %d %d to %d %d\n", p.From.Type, p.From.Name, p.To.Type, p.To.Name)
-	prasm(p)
-	if ops == nil {
-		ops = optab
-	}
-	return &ops[0]
-}
-
-func cmp(a int, b int) bool {
-	if a == b {
-		return true
-	}
-	switch a {
-	case C_LCON:
-		if b == C_RCON || b == C_NCON {
-			return true
-		}
-
-	case C_LACON:
-		if b == C_RACON {
-			return true
-		}
-
-	case C_LFCON:
-		if b == C_ZFCON || b == C_SFCON {
-			return true
-		}
-
-	case C_HFAUTO:
-		return b == C_HAUTO || b == C_FAUTO
-
-	case C_FAUTO, C_HAUTO:
-		return b == C_HFAUTO
-
-	case C_SAUTO:
-		return cmp(C_HFAUTO, b)
-
-	case C_LAUTO:
-		return cmp(C_SAUTO, b)
-
-	case C_HFOREG:
-		return b == C_HOREG || b == C_FOREG
-
-	case C_FOREG, C_HOREG:
-		return b == C_HFOREG
-
-	case C_SROREG:
-		return cmp(C_SOREG, b) || cmp(C_ROREG, b)
-
-	case C_SOREG, C_ROREG:
-		return b == C_SROREG || cmp(C_HFOREG, b)
-
-	case C_LOREG:
-		return cmp(C_SROREG, b)
-
-	case C_LBRA:
-		if b == C_SBRA {
-			return true
-		}
-
-	case C_HREG:
-		return cmp(C_SP, b) || cmp(C_PC, b)
-	}
-
-	return false
-}
-
-type ocmp []Optab
-
-func (x ocmp) Len() int {
-	return len(x)
-}
-
-func (x ocmp) Swap(i, j int) {
-	x[i], x[j] = x[j], x[i]
-}
-
-func (x ocmp) Less(i, j int) bool {
-	p1 := &x[i]
-	p2 := &x[j]
-	n := int(p1.as) - int(p2.as)
-	if n != 0 {
-		return n < 0
-	}
-	n = int(p1.a1) - int(p2.a1)
-	if n != 0 {
-		return n < 0
-	}
-	n = int(p1.a2) - int(p2.a2)
-	if n != 0 {
-		return n < 0
-	}
-	n = int(p1.a3) - int(p2.a3)
-	if n != 0 {
-		return n < 0
-	}
-	return false
-}
-
-func opset(a, b0 obj.As) {
-	oprange[a&obj.AMask] = oprange[b0]
-}
-
-func buildop(ctxt *obj.Link) {
-	var n int
-
-	for i := 0; i < C_GOK; i++ {
-		for n = 0; n < C_GOK; n++ {
-			if cmp(n, i) {
-				xcmp[i][n] = true
-			}
-		}
-	}
-	for n = 0; optab[n].as != obj.AXXX; n++ {
-		if optab[n].flag&LPCREL != 0 {
-			if ctxt.Flag_shared {
-				optab[n].size += int8(optab[n].pcrelsiz)
-			} else {
-				optab[n].flag &^= LPCREL
-			}
-		}
-	}
-
-	sort.Sort(ocmp(optab[:n]))
-	for i := 0; i < n; i++ {
-		r := optab[i].as
-		r0 := r & obj.AMask
-		start := i
-		for optab[i].as == r {
-			i++
-		}
-		oprange[r0] = optab[start:i]
-		i--
-
-		switch r {
-		default:
-			ctxt.Diag("unknown op in build: %v", r)
-			log.Fatalf("bad code")
-
-		case AADD:
-			opset(AAND, r0)
-			opset(AEOR, r0)
-			opset(ASUB, r0)
-			opset(ARSB, r0)
-			opset(AADC, r0)
-			opset(ASBC, r0)
-			opset(ARSC, r0)
-			opset(AORR, r0)
-			opset(ABIC, r0)
-
-		case ACMP:
-			opset(ATEQ, r0)
-			opset(ACMN, r0)
-
-		case AMVN:
-			break
-
-		case ABEQ:
-			opset(ABNE, r0)
-			opset(ABCS, r0)
-			opset(ABHS, r0)
-			opset(ABCC, r0)
-			opset(ABLO, r0)
-			opset(ABMI, r0)
-			opset(ABPL, r0)
-			opset(ABVS, r0)
-			opset(ABVC, r0)
-			opset(ABHI, r0)
-			opset(ABLS, r0)
-			opset(ABGE, r0)
-			opset(ABLT, r0)
-			opset(ABGT, r0)
-			opset(ABLE, r0)
-
-		case ASLL:
-			opset(ASRL, r0)
-			opset(ASRA, r0)
-
-		case AMUL:
-			opset(AMULU, r0)
-
-		case ADIV:
-			opset(AMOD, r0)
-			opset(AMODU, r0)
-			opset(ADIVU, r0)
-
-		case AMOVW,
-			AMOVB,
-			AMOVBS,
-			AMOVBU,
-			AMOVH,
-			AMOVHS,
-			AMOVHU:
-			break
-
-		case ASWPW:
-			opset(ASWPBU, r0)
-
-		case AB,
-			ABL,
-			ABX,
-			ABXRET,
-			obj.ADUFFZERO,
-			obj.ADUFFCOPY,
-			ASWI,
-			AWORD,
-			AMOVM,
-			ARFE,
-			obj.ATEXT,
-			obj.AUSEFIELD,
-			obj.ATYPE:
-			break
-
-		case AADDF:
-			opset(AADDD, r0)
-			opset(ASUBF, r0)
-			opset(ASUBD, r0)
-			opset(AMULF, r0)
-			opset(AMULD, r0)
-			opset(ADIVF, r0)
-			opset(ADIVD, r0)
-			opset(ASQRTF, r0)
-			opset(ASQRTD, r0)
-			opset(AMOVFD, r0)
-			opset(AMOVDF, r0)
-			opset(AABSF, r0)
-			opset(AABSD, r0)
-			opset(ANEGF, r0)
-			opset(ANEGD, r0)
-
-		case ACMPF:
-			opset(ACMPD, r0)
-
-		case AMOVF:
-			opset(AMOVD, r0)
-
-		case AMOVFW:
-			opset(AMOVDW, r0)
-
-		case AMOVWF:
-			opset(AMOVWD, r0)
-
-		case AMULL:
-			opset(AMULAL, r0)
-			opset(AMULLU, r0)
-			opset(AMULALU, r0)
-
-		case AMULWT:
-			opset(AMULWB, r0)
-
-		case AMULAWT:
-			opset(AMULAWB, r0)
-
-		case AMULA,
-			ALDREX,
-			ASTREX,
-			ALDREXD,
-			ASTREXD,
-			ATST,
-			APLD,
-			obj.AUNDEF,
-			ACLZ,
-			obj.AFUNCDATA,
-			obj.APCDATA,
-			obj.ANOP,
-			ADATABUNDLE,
-			ADATABUNDLEEND:
-			break
-		}
-	}
-}
-
-func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
-	ctxt.Printp = p
-	o1 := uint32(0)
-	o2 := uint32(0)
-	o3 := uint32(0)
-	o4 := uint32(0)
-	o5 := uint32(0)
-	o6 := uint32(0)
-	ctxt.Armsize += int32(o.size)
-	if false { /*debug['P']*/
-		fmt.Printf("%x: %v\ttype %d\n", uint32(p.Pc), p, o.type_)
-	}
-	switch o.type_ {
-	default:
-		ctxt.Diag("unknown asm %d", o.type_)
-		prasm(p)
-
-	case 0: /* pseudo ops */
-		if false { /*debug['G']*/
-			fmt.Printf("%x: %s: arm\n", uint32(p.Pc), p.From.Sym.Name)
-		}
-
-	case 1: /* op R,[R],R */
-		o1 = oprrr(ctxt, p.As, int(p.Scond))
-
-		rf := int(p.From.Reg)
-		rt := int(p.To.Reg)
-		r := int(p.Reg)
-		if p.To.Type == obj.TYPE_NONE {
-			rt = 0
-		}
-		if p.As == AMOVB || p.As == AMOVH || p.As == AMOVW || p.As == AMVN {
-			r = 0
-		} else if r == 0 {
-			r = rt
-		}
-		o1 |= (uint32(rf)&15)<<0 | (uint32(r)&15)<<16 | (uint32(rt)&15)<<12
-
-	case 2: /* movbu $I,[R],R */
-		aclass(ctxt, &p.From)
-
-		o1 = oprrr(ctxt, p.As, int(p.Scond))
-		o1 |= uint32(immrot(uint32(ctxt.Instoffset)))
-		rt := int(p.To.Reg)
-		r := int(p.Reg)
-		if p.To.Type == obj.TYPE_NONE {
-			rt = 0
-		}
-		if p.As == AMOVW || p.As == AMVN {
-			r = 0
-		} else if r == 0 {
-			r = rt
-		}
-		o1 |= (uint32(r)&15)<<16 | (uint32(rt)&15)<<12
-
-	case 3: /* add R<<[IR],[R],R */
-		o1 = mov(ctxt, p)
-
-	case 4: /* MOVW $off(R), R -> add $off,[R],R */
-		aclass(ctxt, &p.From)
-		if ctxt.Instoffset < 0 {
-			o1 = oprrr(ctxt, ASUB, int(p.Scond))
-			o1 |= uint32(immrot(uint32(-ctxt.Instoffset)))
-		} else {
-			o1 = oprrr(ctxt, AADD, int(p.Scond))
-			o1 |= uint32(immrot(uint32(ctxt.Instoffset)))
-		}
-		r := int(p.From.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		o1 |= (uint32(r) & 15) << 16
-		o1 |= (uint32(p.To.Reg) & 15) << 12
-
-	case 5: /* bra s */
-		o1 = opbra(ctxt, p, p.As, int(p.Scond))
-
-		v := int32(-8)
-		if p.To.Sym != nil {
-			rel := obj.Addrel(ctxt.Cursym)
-			rel.Off = int32(ctxt.Pc)
-			rel.Siz = 4
-			rel.Sym = p.To.Sym
-			v += int32(p.To.Offset)
-			rel.Add = int64(o1) | (int64(v)>>2)&0xffffff
-			rel.Type = obj.R_CALLARM
-			break
-		}
-
-		if p.Pcond != nil {
-			v = int32((p.Pcond.Pc - ctxt.Pc) - 8)
-		}
-		o1 |= (uint32(v) >> 2) & 0xffffff
-
-	case 6: /* b ,O(R) -> add $O,R,PC */
-		aclass(ctxt, &p.To)
-
-		o1 = oprrr(ctxt, AADD, int(p.Scond))
-		o1 |= uint32(immrot(uint32(ctxt.Instoffset)))
-		o1 |= (uint32(p.To.Reg) & 15) << 16
-		o1 |= (REGPC & 15) << 12
-
-	case 7: /* bl (R) -> blx R */
-		aclass(ctxt, &p.To)
-
-		if ctxt.Instoffset != 0 {
-			ctxt.Diag("%v: doesn't support BL offset(REG) with non-zero offset %d", p, ctxt.Instoffset)
-		}
-		o1 = oprrr(ctxt, ABL, int(p.Scond))
-		o1 |= (uint32(p.To.Reg) & 15) << 0
-		rel := obj.Addrel(ctxt.Cursym)
-		rel.Off = int32(ctxt.Pc)
-		rel.Siz = 0
-		rel.Type = obj.R_CALLIND
-
-	case 8: /* sll $c,[R],R -> mov (R<<$c),R */
-		aclass(ctxt, &p.From)
-
-		o1 = oprrr(ctxt, p.As, int(p.Scond))
-		r := int(p.Reg)
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-		o1 |= (uint32(r) & 15) << 0
-		o1 |= uint32((ctxt.Instoffset & 31) << 7)
-		o1 |= (uint32(p.To.Reg) & 15) << 12
-
-	case 9: /* sll R,[R],R -> mov (R<<R),R */
-		o1 = oprrr(ctxt, p.As, int(p.Scond))
-
-		r := int(p.Reg)
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-		o1 |= (uint32(r) & 15) << 0
-		o1 |= (uint32(p.From.Reg)&15)<<8 | 1<<4
-		o1 |= (uint32(p.To.Reg) & 15) << 12
-
-	case 10: /* swi [$con] */
-		o1 = oprrr(ctxt, p.As, int(p.Scond))
-
-		if p.To.Type != obj.TYPE_NONE {
-			aclass(ctxt, &p.To)
-			o1 |= uint32(ctxt.Instoffset & 0xffffff)
-		}
-
-	case 11: /* word */
-		aclass(ctxt, &p.To)
-
-		o1 = uint32(ctxt.Instoffset)
-		if p.To.Sym != nil {
-			// This case happens with words generated
-			// in the PC stream as part of the literal pool.
-			rel := obj.Addrel(ctxt.Cursym)
-
-			rel.Off = int32(ctxt.Pc)
-			rel.Siz = 4
-			rel.Sym = p.To.Sym
-			rel.Add = p.To.Offset
-
-			if ctxt.Flag_shared {
-				if p.To.Name == obj.NAME_GOTREF {
-					rel.Type = obj.R_GOTPCREL
-				} else {
-					rel.Type = obj.R_PCREL
-				}
-				rel.Add += ctxt.Pc - p.Rel.Pc - 8
-			} else {
-				rel.Type = obj.R_ADDR
-			}
-			o1 = 0
-		}
-
-	case 12: /* movw $lcon, reg */
-		o1 = omvl(ctxt, p, &p.From, int(p.To.Reg))
-
-		if o.flag&LPCREL != 0 {
-			o2 = oprrr(ctxt, AADD, int(p.Scond)) | (uint32(p.To.Reg)&15)<<0 | (REGPC&15)<<16 | (uint32(p.To.Reg)&15)<<12
-		}
-
-	case 13: /* op $lcon, [R], R */
-		o1 = omvl(ctxt, p, &p.From, REGTMP)
-
-		if o1 == 0 {
-			break
-		}
-		o2 = oprrr(ctxt, p.As, int(p.Scond))
-		o2 |= REGTMP & 15
-		r := int(p.Reg)
-		if p.As == AMOVW || p.As == AMVN {
-			r = 0
-		} else if r == 0 {
-			r = int(p.To.Reg)
-		}
-		o2 |= (uint32(r) & 15) << 16
-		if p.To.Type != obj.TYPE_NONE {
-			o2 |= (uint32(p.To.Reg) & 15) << 12
-		}
-
-	case 14: /* movb/movbu/movh/movhu R,R */
-		o1 = oprrr(ctxt, ASLL, int(p.Scond))
-
-		if p.As == AMOVBU || p.As == AMOVHU {
-			o2 = oprrr(ctxt, ASRL, int(p.Scond))
-		} else {
-			o2 = oprrr(ctxt, ASRA, int(p.Scond))
-		}
-
-		r := int(p.To.Reg)
-		o1 |= (uint32(p.From.Reg)&15)<<0 | (uint32(r)&15)<<12
-		o2 |= uint32(r)&15 | (uint32(r)&15)<<12
-		if p.As == AMOVB || p.As == AMOVBS || p.As == AMOVBU {
-			o1 |= 24 << 7
-			o2 |= 24 << 7
-		} else {
-			o1 |= 16 << 7
-			o2 |= 16 << 7
-		}
-
-	case 15: /* mul r,[r,]r */
-		o1 = oprrr(ctxt, p.As, int(p.Scond))
-
-		rf := int(p.From.Reg)
-		rt := int(p.To.Reg)
-		r := int(p.Reg)
-		if r == 0 {
-			r = rt
-		}
-		if rt == r {
-			r = rf
-			rf = rt
-		}
-
-		if false {
-			if rt == r || rf == REGPC&15 || r == REGPC&15 || rt == REGPC&15 {
-				ctxt.Diag("bad registers in MUL")
-				prasm(p)
-			}
-		}
-
-		o1 |= (uint32(rf)&15)<<8 | (uint32(r)&15)<<0 | (uint32(rt)&15)<<16
-
-	case 16: /* div r,[r,]r */
-		o1 = 0xf << 28
-
-		o2 = 0
-
-	case 17:
-		o1 = oprrr(ctxt, p.As, int(p.Scond))
-		rf := int(p.From.Reg)
-		rt := int(p.To.Reg)
-		rt2 := int(p.To.Offset)
-		r := int(p.Reg)
-		o1 |= (uint32(rf)&15)<<8 | (uint32(r)&15)<<0 | (uint32(rt)&15)<<16 | (uint32(rt2)&15)<<12
-
-	case 20: /* mov/movb/movbu R,O(R) */
-		aclass(ctxt, &p.To)
-
-		r := int(p.To.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		o1 = osr(ctxt, p.As, int(p.From.Reg), int32(ctxt.Instoffset), r, int(p.Scond))
-
-	case 21: /* mov/movbu O(R),R -> lr */
-		aclass(ctxt, &p.From)
-
-		r := int(p.From.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		o1 = olr(ctxt, int32(ctxt.Instoffset), r, int(p.To.Reg), int(p.Scond))
-		if p.As != AMOVW {
-			o1 |= 1 << 22
-		}
-
-	case 30: /* mov/movb/movbu R,L(R) */
-		o1 = omvl(ctxt, p, &p.To, REGTMP)
-
-		if o1 == 0 {
-			break
-		}
-		r := int(p.To.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		o2 = osrr(ctxt, int(p.From.Reg), REGTMP&15, r, int(p.Scond))
-		if p.As != AMOVW {
-			o2 |= 1 << 22
-		}
-
-	case 31: /* mov/movbu L(R),R -> lr[b] */
-		o1 = omvl(ctxt, p, &p.From, REGTMP)
-
-		if o1 == 0 {
-			break
-		}
-		r := int(p.From.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		o2 = olrr(ctxt, REGTMP&15, r, int(p.To.Reg), int(p.Scond))
-		if p.As == AMOVBU || p.As == AMOVBS || p.As == AMOVB {
-			o2 |= 1 << 22
-		}
-
-	case 34: /* mov $lacon,R */
-		o1 = omvl(ctxt, p, &p.From, REGTMP)
-
-		if o1 == 0 {
-			break
-		}
-
-		o2 = oprrr(ctxt, AADD, int(p.Scond))
-		o2 |= REGTMP & 15
-		r := int(p.From.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		o2 |= (uint32(r) & 15) << 16
-		if p.To.Type != obj.TYPE_NONE {
-			o2 |= (uint32(p.To.Reg) & 15) << 12
-		}
-
-	case 35: /* mov PSR,R */
-		o1 = 2<<23 | 0xf<<16 | 0<<0
-
-		o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
-		o1 |= (uint32(p.From.Reg) & 1) << 22
-		o1 |= (uint32(p.To.Reg) & 15) << 12
-
-	case 36: /* mov R,PSR */
-		o1 = 2<<23 | 0x29f<<12 | 0<<4
-
-		if p.Scond&C_FBIT != 0 {
-			o1 ^= 0x010 << 12
-		}
-		o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
-		o1 |= (uint32(p.To.Reg) & 1) << 22
-		o1 |= (uint32(p.From.Reg) & 15) << 0
-
-	case 37: /* mov $con,PSR */
-		aclass(ctxt, &p.From)
-
-		o1 = 2<<23 | 0x29f<<12 | 0<<4
-		if p.Scond&C_FBIT != 0 {
-			o1 ^= 0x010 << 12
-		}
-		o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
-		o1 |= uint32(immrot(uint32(ctxt.Instoffset)))
-		o1 |= (uint32(p.To.Reg) & 1) << 22
-		o1 |= (uint32(p.From.Reg) & 15) << 0
-
-	case 38, 39:
-		switch o.type_ {
-		case 38: /* movm $con,oreg -> stm */
-			o1 = 0x4 << 25
-
-			o1 |= uint32(p.From.Offset & 0xffff)
-			o1 |= (uint32(p.To.Reg) & 15) << 16
-			aclass(ctxt, &p.To)
-
-		case 39: /* movm oreg,$con -> ldm */
-			o1 = 0x4<<25 | 1<<20
-
-			o1 |= uint32(p.To.Offset & 0xffff)
-			o1 |= (uint32(p.From.Reg) & 15) << 16
-			aclass(ctxt, &p.From)
-		}
-
-		if ctxt.Instoffset != 0 {
-			ctxt.Diag("offset must be zero in MOVM; %v", p)
-		}
-		o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
-		if p.Scond&C_PBIT != 0 {
-			o1 |= 1 << 24
-		}
-		if p.Scond&C_UBIT != 0 {
-			o1 |= 1 << 23
-		}
-		if p.Scond&C_SBIT != 0 {
-			o1 |= 1 << 22
-		}
-		if p.Scond&C_WBIT != 0 {
-			o1 |= 1 << 21
-		}
-
-	case 40: /* swp oreg,reg,reg */
-		aclass(ctxt, &p.From)
-
-		if ctxt.Instoffset != 0 {
-			ctxt.Diag("offset must be zero in SWP")
-		}
-		o1 = 0x2<<23 | 0x9<<4
-		if p.As != ASWPW {
-			o1 |= 1 << 22
-		}
-		o1 |= (uint32(p.From.Reg) & 15) << 16
-		o1 |= (uint32(p.Reg) & 15) << 0
-		o1 |= (uint32(p.To.Reg) & 15) << 12
-		o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
-
-	case 41: /* rfe -> movm.s.w.u 0(r13),[r15] */
-		o1 = 0xe8fd8000
-
-	case 50: /* floating point store */
-		v := regoff(ctxt, &p.To)
-
-		r := int(p.To.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		o1 = ofsr(ctxt, p.As, int(p.From.Reg), v, r, int(p.Scond), p)
-
-	case 51: /* floating point load */
-		v := regoff(ctxt, &p.From)
-
-		r := int(p.From.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		o1 = ofsr(ctxt, p.As, int(p.To.Reg), v, r, int(p.Scond), p) | 1<<20
-
-	case 52: /* floating point store, int32 offset UGLY */
-		o1 = omvl(ctxt, p, &p.To, REGTMP)
-
-		if o1 == 0 {
-			break
-		}
-		r := int(p.To.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		o2 = oprrr(ctxt, AADD, int(p.Scond)) | (REGTMP&15)<<12 | (REGTMP&15)<<16 | (uint32(r)&15)<<0
-		o3 = ofsr(ctxt, p.As, int(p.From.Reg), 0, REGTMP, int(p.Scond), p)
-
-	case 53: /* floating point load, int32 offset UGLY */
-		o1 = omvl(ctxt, p, &p.From, REGTMP)
-
-		if o1 == 0 {
-			break
-		}
-		r := int(p.From.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		o2 = oprrr(ctxt, AADD, int(p.Scond)) | (REGTMP&15)<<12 | (REGTMP&15)<<16 | (uint32(r)&15)<<0
-		o3 = ofsr(ctxt, p.As, int(p.To.Reg), 0, (REGTMP&15), int(p.Scond), p) | 1<<20
-
-	case 54: /* floating point arith */
-		o1 = oprrr(ctxt, p.As, int(p.Scond))
-
-		rf := int(p.From.Reg)
-		rt := int(p.To.Reg)
-		r := int(p.Reg)
-		if r == 0 {
-			r = rt
-			if p.As == AMOVF || p.As == AMOVD || p.As == AMOVFD || p.As == AMOVDF || p.As == ASQRTF || p.As == ASQRTD || p.As == AABSF || p.As == AABSD || p.As == ANEGF || p.As == ANEGD {
-				r = 0
-			}
-		}
-
-		o1 |= (uint32(rf)&15)<<0 | (uint32(r)&15)<<16 | (uint32(rt)&15)<<12
-
-	case 56: /* move to FP[CS]R */
-		o1 = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0xe<<24 | 1<<8 | 1<<4
-
-		o1 |= ((uint32(p.To.Reg)&1)+1)<<21 | (uint32(p.From.Reg)&15)<<12
-
-	case 57: /* move from FP[CS]R */
-		o1 = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0xe<<24 | 1<<8 | 1<<4
-
-		o1 |= ((uint32(p.From.Reg)&1)+1)<<21 | (uint32(p.To.Reg)&15)<<12 | 1<<20
-
-	case 58: /* movbu R,R */
-		o1 = oprrr(ctxt, AAND, int(p.Scond))
-
-		o1 |= uint32(immrot(0xff))
-		rt := int(p.To.Reg)
-		r := int(p.From.Reg)
-		if p.To.Type == obj.TYPE_NONE {
-			rt = 0
-		}
-		if r == 0 {
-			r = rt
-		}
-		o1 |= (uint32(r)&15)<<16 | (uint32(rt)&15)<<12
-
-	case 59: /* movw/bu R<<I(R),R -> ldr indexed */
-		if p.From.Reg == 0 {
-			if p.As != AMOVW {
-				ctxt.Diag("byte MOV from shifter operand")
-			}
-			o1 = mov(ctxt, p)
-			break
-		}
-
-		if p.From.Offset&(1<<4) != 0 {
-			ctxt.Diag("bad shift in LDR")
-		}
-		o1 = olrr(ctxt, int(p.From.Offset), int(p.From.Reg), int(p.To.Reg), int(p.Scond))
-		if p.As == AMOVBU {
-			o1 |= 1 << 22
-		}
-
-	case 60: /* movb R(R),R -> ldrsb indexed */
-		if p.From.Reg == 0 {
-			ctxt.Diag("byte MOV from shifter operand")
-			o1 = mov(ctxt, p)
-			break
-		}
-
-		if p.From.Offset&(^0xf) != 0 {
-			ctxt.Diag("bad shift in LDRSB")
-		}
-		o1 = olhrr(ctxt, int(p.From.Offset), int(p.From.Reg), int(p.To.Reg), int(p.Scond))
-		o1 ^= 1<<5 | 1<<6
-
-	case 61: /* movw/b/bu R,R<<[IR](R) -> str indexed */
-		if p.To.Reg == 0 {
-			ctxt.Diag("MOV to shifter operand")
-		}
-		o1 = osrr(ctxt, int(p.From.Reg), int(p.To.Offset), int(p.To.Reg), int(p.Scond))
-		if p.As == AMOVB || p.As == AMOVBS || p.As == AMOVBU {
-			o1 |= 1 << 22
-		}
-
-		/* reloc ops */
-	case 64: /* mov/movb/movbu R,addr */
-		o1 = omvl(ctxt, p, &p.To, REGTMP)
-
-		if o1 == 0 {
-			break
-		}
-		o2 = osr(ctxt, p.As, int(p.From.Reg), 0, REGTMP, int(p.Scond))
-		if o.flag&LPCREL != 0 {
-			o3 = o2
-			o2 = oprrr(ctxt, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12
-		}
-
-	case 65: /* mov/movbu addr,R */
-		o1 = omvl(ctxt, p, &p.From, REGTMP)
-
-		if o1 == 0 {
-			break
-		}
-		o2 = olr(ctxt, 0, REGTMP, int(p.To.Reg), int(p.Scond))
-		if p.As == AMOVBU || p.As == AMOVBS || p.As == AMOVB {
-			o2 |= 1 << 22
-		}
-		if o.flag&LPCREL != 0 {
-			o3 = o2
-			o2 = oprrr(ctxt, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12
-		}
-
-	case 101: /* movw tlsvar,R, local exec*/
-		if p.Scond&C_SCOND != C_SCOND_NONE {
-			ctxt.Diag("conditional tls")
-		}
-		o1 = omvl(ctxt, p, &p.From, int(p.To.Reg))
-
-	case 102: /* movw tlsvar,R, initial exec*/
-		if p.Scond&C_SCOND != C_SCOND_NONE {
-			ctxt.Diag("conditional tls")
-		}
-		o1 = omvl(ctxt, p, &p.From, int(p.To.Reg))
-		o2 = olrr(ctxt, int(p.To.Reg)&15, (REGPC & 15), int(p.To.Reg), int(p.Scond))
-
-	case 103: /* word tlsvar, local exec */
-		if p.To.Sym == nil {
-			ctxt.Diag("nil sym in tls %v", p)
-		}
-		if p.To.Offset != 0 {
-			ctxt.Diag("offset against tls var in %v", p)
-		}
-		// This case happens with words generated in the PC stream as part of
-		// the literal pool.
-		rel := obj.Addrel(ctxt.Cursym)
-
-		rel.Off = int32(ctxt.Pc)
-		rel.Siz = 4
-		rel.Sym = p.To.Sym
-		rel.Type = obj.R_TLS_LE
-		o1 = 0
-
-	case 104: /* word tlsvar, initial exec */
-		if p.To.Sym == nil {
-			ctxt.Diag("nil sym in tls %v", p)
-		}
-		if p.To.Offset != 0 {
-			ctxt.Diag("offset against tls var in %v", p)
-		}
-		rel := obj.Addrel(ctxt.Cursym)
-		rel.Off = int32(ctxt.Pc)
-		rel.Siz = 4
-		rel.Sym = p.To.Sym
-		rel.Type = obj.R_TLS_IE
-		rel.Add = ctxt.Pc - p.Rel.Pc - 8 - int64(rel.Siz)
-
-	case 68: /* floating point store -> ADDR */
-		o1 = omvl(ctxt, p, &p.To, REGTMP)
-
-		if o1 == 0 {
-			break
-		}
-		o2 = ofsr(ctxt, p.As, int(p.From.Reg), 0, REGTMP, int(p.Scond), p)
-		if o.flag&LPCREL != 0 {
-			o3 = o2
-			o2 = oprrr(ctxt, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12
-		}
-
-	case 69: /* floating point load <- ADDR */
-		o1 = omvl(ctxt, p, &p.From, REGTMP)
-
-		if o1 == 0 {
-			break
-		}
-		o2 = ofsr(ctxt, p.As, int(p.To.Reg), 0, (REGTMP&15), int(p.Scond), p) | 1<<20
-		if o.flag&LPCREL != 0 {
-			o3 = o2
-			o2 = oprrr(ctxt, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12
-		}
-
-		/* ArmV4 ops: */
-	case 70: /* movh/movhu R,O(R) -> strh */
-		aclass(ctxt, &p.To)
-
-		r := int(p.To.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		o1 = oshr(ctxt, int(p.From.Reg), int32(ctxt.Instoffset), r, int(p.Scond))
-
-	case 71: /* movb/movh/movhu O(R),R -> ldrsb/ldrsh/ldrh */
-		aclass(ctxt, &p.From)
-
-		r := int(p.From.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		o1 = olhr(ctxt, int32(ctxt.Instoffset), r, int(p.To.Reg), int(p.Scond))
-		if p.As == AMOVB || p.As == AMOVBS {
-			o1 ^= 1<<5 | 1<<6
-		} else if p.As == AMOVH || p.As == AMOVHS {
-			o1 ^= (1 << 6)
-		}
-
-	case 72: /* movh/movhu R,L(R) -> strh */
-		o1 = omvl(ctxt, p, &p.To, REGTMP)
-
-		if o1 == 0 {
-			break
-		}
-		r := int(p.To.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		o2 = oshrr(ctxt, int(p.From.Reg), REGTMP&15, r, int(p.Scond))
-
-	case 73: /* movb/movh/movhu L(R),R -> ldrsb/ldrsh/ldrh */
-		o1 = omvl(ctxt, p, &p.From, REGTMP)
-
-		if o1 == 0 {
-			break
-		}
-		r := int(p.From.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		o2 = olhrr(ctxt, REGTMP&15, r, int(p.To.Reg), int(p.Scond))
-		if p.As == AMOVB || p.As == AMOVBS {
-			o2 ^= 1<<5 | 1<<6
-		} else if p.As == AMOVH || p.As == AMOVHS {
-			o2 ^= (1 << 6)
-		}
-
-	case 74: /* bx $I */
-		ctxt.Diag("ABX $I")
-
-	case 75: /* bx O(R) */
-		aclass(ctxt, &p.To)
-
-		if ctxt.Instoffset != 0 {
-			ctxt.Diag("non-zero offset in ABX")
-		}
-
-		/*
-			o1 = 	oprrr(ctxt, AADD, p->scond) | immrot(0) | ((REGPC&15)<<16) | ((REGLINK&15)<<12);	// mov PC, LR
-			o2 = (((p->scond&C_SCOND) ^ C_SCOND_XOR)<<28) | (0x12fff<<8) | (1<<4) | ((p->to.reg&15) << 0);		// BX R
-		*/
-		// p->to.reg may be REGLINK
-		o1 = oprrr(ctxt, AADD, int(p.Scond))
-
-		o1 |= uint32(immrot(uint32(ctxt.Instoffset)))
-		o1 |= (uint32(p.To.Reg) & 15) << 16
-		o1 |= (REGTMP & 15) << 12
-		o2 = oprrr(ctxt, AADD, int(p.Scond)) | uint32(immrot(0)) | (REGPC&15)<<16 | (REGLINK&15)<<12 // mov PC, LR
-		o3 = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x12fff<<8 | 1<<4 | REGTMP&15             // BX Rtmp
-
-	case 76: /* bx O(R) when returning from fn*/
-		ctxt.Diag("ABXRET")
-
-	case 77: /* ldrex oreg,reg */
-		aclass(ctxt, &p.From)
-
-		if ctxt.Instoffset != 0 {
-			ctxt.Diag("offset must be zero in LDREX")
-		}
-		o1 = 0x19<<20 | 0xf9f
-		o1 |= (uint32(p.From.Reg) & 15) << 16
-		o1 |= (uint32(p.To.Reg) & 15) << 12
-		o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
-
-	case 78: /* strex reg,oreg,reg */
-		aclass(ctxt, &p.From)
-
-		if ctxt.Instoffset != 0 {
-			ctxt.Diag("offset must be zero in STREX")
-		}
-		o1 = 0x18<<20 | 0xf90
-		o1 |= (uint32(p.From.Reg) & 15) << 16
-		o1 |= (uint32(p.Reg) & 15) << 0
-		o1 |= (uint32(p.To.Reg) & 15) << 12
-		o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
-
-	case 80: /* fmov zfcon,freg */
-		if p.As == AMOVD {
-			o1 = 0xeeb00b00 // VMOV imm 64
-			o2 = oprrr(ctxt, ASUBD, int(p.Scond))
-		} else {
-			o1 = 0x0eb00a00 // VMOV imm 32
-			o2 = oprrr(ctxt, ASUBF, int(p.Scond))
-		}
-
-		v := int32(0x70) // 1.0
-		r := (int(p.To.Reg) & 15) << 0
-
-		// movf $1.0, r
-		o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
-
-		o1 |= (uint32(r) & 15) << 12
-		o1 |= (uint32(v) & 0xf) << 0
-		o1 |= (uint32(v) & 0xf0) << 12
-
-		// subf r,r,r
-		o2 |= (uint32(r)&15)<<0 | (uint32(r)&15)<<16 | (uint32(r)&15)<<12
-
-	case 81: /* fmov sfcon,freg */
-		o1 = 0x0eb00a00 // VMOV imm 32
-		if p.As == AMOVD {
-			o1 = 0xeeb00b00 // VMOV imm 64
-		}
-		o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
-		o1 |= (uint32(p.To.Reg) & 15) << 12
-		v := int32(chipfloat5(ctxt, p.From.Val.(float64)))
-		o1 |= (uint32(v) & 0xf) << 0
-		o1 |= (uint32(v) & 0xf0) << 12
-
-	case 82: /* fcmp freg,freg, */
-		o1 = oprrr(ctxt, p.As, int(p.Scond))
-
-		o1 |= (uint32(p.Reg)&15)<<12 | (uint32(p.From.Reg)&15)<<0
-		o2 = 0x0ef1fa10 // VMRS R15
-		o2 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
-
-	case 83: /* fcmp freg,, */
-		o1 = oprrr(ctxt, p.As, int(p.Scond))
-
-		o1 |= (uint32(p.From.Reg)&15)<<12 | 1<<16
-		o2 = 0x0ef1fa10 // VMRS R15
-		o2 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
-
-	case 84: /* movfw freg,freg - truncate float-to-fix */
-		o1 = oprrr(ctxt, p.As, int(p.Scond))
-
-		o1 |= (uint32(p.From.Reg) & 15) << 0
-		o1 |= (uint32(p.To.Reg) & 15) << 12
-
-	case 85: /* movwf freg,freg - fix-to-float */
-		o1 = oprrr(ctxt, p.As, int(p.Scond))
-
-		o1 |= (uint32(p.From.Reg) & 15) << 0
-		o1 |= (uint32(p.To.Reg) & 15) << 12
-
-		// macro for movfw freg,FTMP; movw FTMP,reg
-	case 86: /* movfw freg,reg - truncate float-to-fix */
-		o1 = oprrr(ctxt, p.As, int(p.Scond))
-
-		o1 |= (uint32(p.From.Reg) & 15) << 0
-		o1 |= (FREGTMP & 15) << 12
-		o2 = oprrr(ctxt, -AMOVFW, int(p.Scond))
-		o2 |= (FREGTMP & 15) << 16
-		o2 |= (uint32(p.To.Reg) & 15) << 12
-
-		// macro for movw reg,FTMP; movwf FTMP,freg
-	case 87: /* movwf reg,freg - fix-to-float */
-		o1 = oprrr(ctxt, -AMOVWF, int(p.Scond))
-
-		o1 |= (uint32(p.From.Reg) & 15) << 12
-		o1 |= (FREGTMP & 15) << 16
-		o2 = oprrr(ctxt, p.As, int(p.Scond))
-		o2 |= (FREGTMP & 15) << 0
-		o2 |= (uint32(p.To.Reg) & 15) << 12
-
-	case 88: /* movw reg,freg  */
-		o1 = oprrr(ctxt, -AMOVWF, int(p.Scond))
-
-		o1 |= (uint32(p.From.Reg) & 15) << 12
-		o1 |= (uint32(p.To.Reg) & 15) << 16
-
-	case 89: /* movw freg,reg  */
-		o1 = oprrr(ctxt, -AMOVFW, int(p.Scond))
-
-		o1 |= (uint32(p.From.Reg) & 15) << 16
-		o1 |= (uint32(p.To.Reg) & 15) << 12
-
-	case 90: /* tst reg  */
-		o1 = oprrr(ctxt, -ACMP, int(p.Scond))
-
-		o1 |= (uint32(p.From.Reg) & 15) << 16
-
-	case 91: /* ldrexd oreg,reg */
-		aclass(ctxt, &p.From)
-
-		if ctxt.Instoffset != 0 {
-			ctxt.Diag("offset must be zero in LDREX")
-		}
-		o1 = 0x1b<<20 | 0xf9f
-		o1 |= (uint32(p.From.Reg) & 15) << 16
-		o1 |= (uint32(p.To.Reg) & 15) << 12
-		o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
-
-	case 92: /* strexd reg,oreg,reg */
-		aclass(ctxt, &p.From)
-
-		if ctxt.Instoffset != 0 {
-			ctxt.Diag("offset must be zero in STREX")
-		}
-		o1 = 0x1a<<20 | 0xf90
-		o1 |= (uint32(p.From.Reg) & 15) << 16
-		o1 |= (uint32(p.Reg) & 15) << 0
-		o1 |= (uint32(p.To.Reg) & 15) << 12
-		o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
-
-	case 93: /* movb/movh/movhu addr,R -> ldrsb/ldrsh/ldrh */
-		o1 = omvl(ctxt, p, &p.From, REGTMP)
-
-		if o1 == 0 {
-			break
-		}
-		o2 = olhr(ctxt, 0, REGTMP, int(p.To.Reg), int(p.Scond))
-		if p.As == AMOVB || p.As == AMOVBS {
-			o2 ^= 1<<5 | 1<<6
-		} else if p.As == AMOVH || p.As == AMOVHS {
-			o2 ^= (1 << 6)
-		}
-		if o.flag&LPCREL != 0 {
-			o3 = o2
-			o2 = oprrr(ctxt, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12
-		}
-
-	case 94: /* movh/movhu R,addr -> strh */
-		o1 = omvl(ctxt, p, &p.To, REGTMP)
-
-		if o1 == 0 {
-			break
-		}
-		o2 = oshr(ctxt, int(p.From.Reg), 0, REGTMP, int(p.Scond))
-		if o.flag&LPCREL != 0 {
-			o3 = o2
-			o2 = oprrr(ctxt, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12
-		}
-
-	case 95: /* PLD off(reg) */
-		o1 = 0xf5d0f000
-
-		o1 |= (uint32(p.From.Reg) & 15) << 16
-		if p.From.Offset < 0 {
-			o1 &^= (1 << 23)
-			o1 |= uint32((-p.From.Offset) & 0xfff)
-		} else {
-			o1 |= uint32(p.From.Offset & 0xfff)
-		}
-
-	// This is supposed to be something that stops execution.
-	// It's not supposed to be reached, ever, but if it is, we'd
-	// like to be able to tell how we got there. Assemble as
-	// 0xf7fabcfd which is guaranteed to raise undefined instruction
-	// exception.
-	case 96: /* UNDEF */
-		o1 = 0xf7fabcfd
-
-	case 97: /* CLZ Rm, Rd */
-		o1 = oprrr(ctxt, p.As, int(p.Scond))
-
-		o1 |= (uint32(p.To.Reg) & 15) << 12
-		o1 |= (uint32(p.From.Reg) & 15) << 0
-
-	case 98: /* MULW{T,B} Rs, Rm, Rd */
-		o1 = oprrr(ctxt, p.As, int(p.Scond))
-
-		o1 |= (uint32(p.To.Reg) & 15) << 16
-		o1 |= (uint32(p.From.Reg) & 15) << 8
-		o1 |= (uint32(p.Reg) & 15) << 0
-
-	case 99: /* MULAW{T,B} Rs, Rm, Rn, Rd */
-		o1 = oprrr(ctxt, p.As, int(p.Scond))
-
-		o1 |= (uint32(p.To.Reg) & 15) << 12
-		o1 |= (uint32(p.From.Reg) & 15) << 8
-		o1 |= (uint32(p.Reg) & 15) << 0
-		o1 |= uint32((p.To.Offset & 15) << 16)
-
-	// DATABUNDLE: BKPT $0x5be0, signify the start of NaCl data bundle;
-	// DATABUNDLEEND: zero width alignment marker
-	case 100:
-		if p.As == ADATABUNDLE {
-			o1 = 0xe125be70
-		}
-	}
-
-	out[0] = o1
-	out[1] = o2
-	out[2] = o3
-	out[3] = o4
-	out[4] = o5
-	out[5] = o6
-	return
-}
-
-func mov(ctxt *obj.Link, p *obj.Prog) uint32 {
-	aclass(ctxt, &p.From)
-	o1 := oprrr(ctxt, p.As, int(p.Scond))
-	o1 |= uint32(p.From.Offset)
-	rt := int(p.To.Reg)
-	if p.To.Type == obj.TYPE_NONE {
-		rt = 0
-	}
-	r := int(p.Reg)
-	if p.As == AMOVW || p.As == AMVN {
-		r = 0
-	} else if r == 0 {
-		r = rt
-	}
-	o1 |= (uint32(r)&15)<<16 | (uint32(rt)&15)<<12
-	return o1
-}
-
-func oprrr(ctxt *obj.Link, a obj.As, sc int) uint32 {
-	o := ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
-	if sc&C_SBIT != 0 {
-		o |= 1 << 20
-	}
-	if sc&(C_PBIT|C_WBIT) != 0 {
-		ctxt.Diag(".nil/.W on dp instruction")
-	}
-	switch a {
-	case AMULU, AMUL:
-		return o | 0x0<<21 | 0x9<<4
-	case AMULA:
-		return o | 0x1<<21 | 0x9<<4
-	case AMULLU:
-		return o | 0x4<<21 | 0x9<<4
-	case AMULL:
-		return o | 0x6<<21 | 0x9<<4
-	case AMULALU:
-		return o | 0x5<<21 | 0x9<<4
-	case AMULAL:
-		return o | 0x7<<21 | 0x9<<4
-	case AAND:
-		return o | 0x0<<21
-	case AEOR:
-		return o | 0x1<<21
-	case ASUB:
-		return o | 0x2<<21
-	case ARSB:
-		return o | 0x3<<21
-	case AADD:
-		return o | 0x4<<21
-	case AADC:
-		return o | 0x5<<21
-	case ASBC:
-		return o | 0x6<<21
-	case ARSC:
-		return o | 0x7<<21
-	case ATST:
-		return o | 0x8<<21 | 1<<20
-	case ATEQ:
-		return o | 0x9<<21 | 1<<20
-	case ACMP:
-		return o | 0xa<<21 | 1<<20
-	case ACMN:
-		return o | 0xb<<21 | 1<<20
-	case AORR:
-		return o | 0xc<<21
-
-	case AMOVB, AMOVH, AMOVW:
-		return o | 0xd<<21
-	case ABIC:
-		return o | 0xe<<21
-	case AMVN:
-		return o | 0xf<<21
-	case ASLL:
-		return o | 0xd<<21 | 0<<5
-	case ASRL:
-		return o | 0xd<<21 | 1<<5
-	case ASRA:
-		return o | 0xd<<21 | 2<<5
-	case ASWI:
-		return o | 0xf<<24
-
-	case AADDD:
-		return o | 0xe<<24 | 0x3<<20 | 0xb<<8 | 0<<4
-	case AADDF:
-		return o | 0xe<<24 | 0x3<<20 | 0xa<<8 | 0<<4
-	case ASUBD:
-		return o | 0xe<<24 | 0x3<<20 | 0xb<<8 | 4<<4
-	case ASUBF:
-		return o | 0xe<<24 | 0x3<<20 | 0xa<<8 | 4<<4
-	case AMULD:
-		return o | 0xe<<24 | 0x2<<20 | 0xb<<8 | 0<<4
-	case AMULF:
-		return o | 0xe<<24 | 0x2<<20 | 0xa<<8 | 0<<4
-	case ADIVD:
-		return o | 0xe<<24 | 0x8<<20 | 0xb<<8 | 0<<4
-	case ADIVF:
-		return o | 0xe<<24 | 0x8<<20 | 0xa<<8 | 0<<4
-	case ASQRTD:
-		return o | 0xe<<24 | 0xb<<20 | 1<<16 | 0xb<<8 | 0xc<<4
-	case ASQRTF:
-		return o | 0xe<<24 | 0xb<<20 | 1<<16 | 0xa<<8 | 0xc<<4
-	case AABSD:
-		return o | 0xe<<24 | 0xb<<20 | 0<<16 | 0xb<<8 | 0xc<<4
-	case AABSF:
-		return o | 0xe<<24 | 0xb<<20 | 0<<16 | 0xa<<8 | 0xc<<4
-	case ANEGD:
-		return o | 0xe<<24 | 0xb<<20 | 1<<16 | 0xb<<8 | 0x4<<4
-	case ANEGF:
-		return o | 0xe<<24 | 0xb<<20 | 1<<16 | 0xa<<8 | 0x4<<4
-	case ACMPD:
-		return o | 0xe<<24 | 0xb<<20 | 4<<16 | 0xb<<8 | 0xc<<4
-	case ACMPF:
-		return o | 0xe<<24 | 0xb<<20 | 4<<16 | 0xa<<8 | 0xc<<4
-
-	case AMOVF:
-		return o | 0xe<<24 | 0xb<<20 | 0<<16 | 0xa<<8 | 4<<4
-	case AMOVD:
-		return o | 0xe<<24 | 0xb<<20 | 0<<16 | 0xb<<8 | 4<<4
-
-	case AMOVDF:
-		return o | 0xe<<24 | 0xb<<20 | 7<<16 | 0xa<<8 | 0xc<<4 | 1<<8 // dtof
-	case AMOVFD:
-		return o | 0xe<<24 | 0xb<<20 | 7<<16 | 0xa<<8 | 0xc<<4 | 0<<8 // dtof
-
-	case AMOVWF:
-		if sc&C_UBIT == 0 {
-			o |= 1 << 7 /* signed */
-		}
-		return o | 0xe<<24 | 0xb<<20 | 8<<16 | 0xa<<8 | 4<<4 | 0<<18 | 0<<8 // toint, double
-
-	case AMOVWD:
-		if sc&C_UBIT == 0 {
-			o |= 1 << 7 /* signed */
-		}
-		return o | 0xe<<24 | 0xb<<20 | 8<<16 | 0xa<<8 | 4<<4 | 0<<18 | 1<<8 // toint, double
-
-	case AMOVFW:
-		if sc&C_UBIT == 0 {
-			o |= 1 << 16 /* signed */
-		}
-		return o | 0xe<<24 | 0xb<<20 | 8<<16 | 0xa<<8 | 4<<4 | 1<<18 | 0<<8 | 1<<7 // toint, double, trunc
-
-	case AMOVDW:
-		if sc&C_UBIT == 0 {
-			o |= 1 << 16 /* signed */
-		}
-		return o | 0xe<<24 | 0xb<<20 | 8<<16 | 0xa<<8 | 4<<4 | 1<<18 | 1<<8 | 1<<7 // toint, double, trunc
-
-	case -AMOVWF: // copy WtoF
-		return o | 0xe<<24 | 0x0<<20 | 0xb<<8 | 1<<4
-
-	case -AMOVFW: // copy FtoW
-		return o | 0xe<<24 | 0x1<<20 | 0xb<<8 | 1<<4
-
-	case -ACMP: // cmp imm
-		return o | 0x3<<24 | 0x5<<20
-
-		// CLZ doesn't support .nil
-	case ACLZ:
-		return o&(0xf<<28) | 0x16f<<16 | 0xf1<<4
-
-	case AMULWT:
-		return o&(0xf<<28) | 0x12<<20 | 0xe<<4
-
-	case AMULWB:
-		return o&(0xf<<28) | 0x12<<20 | 0xa<<4
-
-	case AMULAWT:
-		return o&(0xf<<28) | 0x12<<20 | 0xc<<4
-
-	case AMULAWB:
-		return o&(0xf<<28) | 0x12<<20 | 0x8<<4
-
-	case ABL: // BLX REG
-		return o&(0xf<<28) | 0x12fff3<<4
-	}
-
-	ctxt.Diag("bad rrr %d", a)
-	prasm(ctxt.Curp)
-	return 0
-}
-
-func opbra(ctxt *obj.Link, p *obj.Prog, a obj.As, sc int) uint32 {
-	if sc&(C_SBIT|C_PBIT|C_WBIT) != 0 {
-		ctxt.Diag("%v: .nil/.nil/.W on bra instruction", p)
-	}
-	sc &= C_SCOND
-	sc ^= C_SCOND_XOR
-	if a == ABL || a == obj.ADUFFZERO || a == obj.ADUFFCOPY {
-		return uint32(sc)<<28 | 0x5<<25 | 0x1<<24
-	}
-	if sc != 0xe {
-		ctxt.Diag("%v: .COND on bcond instruction", p)
-	}
-	switch a {
-	case ABEQ:
-		return 0x0<<28 | 0x5<<25
-	case ABNE:
-		return 0x1<<28 | 0x5<<25
-	case ABCS:
-		return 0x2<<28 | 0x5<<25
-	case ABHS:
-		return 0x2<<28 | 0x5<<25
-	case ABCC:
-		return 0x3<<28 | 0x5<<25
-	case ABLO:
-		return 0x3<<28 | 0x5<<25
-	case ABMI:
-		return 0x4<<28 | 0x5<<25
-	case ABPL:
-		return 0x5<<28 | 0x5<<25
-	case ABVS:
-		return 0x6<<28 | 0x5<<25
-	case ABVC:
-		return 0x7<<28 | 0x5<<25
-	case ABHI:
-		return 0x8<<28 | 0x5<<25
-	case ABLS:
-		return 0x9<<28 | 0x5<<25
-	case ABGE:
-		return 0xa<<28 | 0x5<<25
-	case ABLT:
-		return 0xb<<28 | 0x5<<25
-	case ABGT:
-		return 0xc<<28 | 0x5<<25
-	case ABLE:
-		return 0xd<<28 | 0x5<<25
-	case AB:
-		return 0xe<<28 | 0x5<<25
-	}
-
-	ctxt.Diag("bad bra %v", a)
-	prasm(ctxt.Curp)
-	return 0
-}
-
-func olr(ctxt *obj.Link, v int32, b int, r int, sc int) uint32 {
-	if sc&C_SBIT != 0 {
-		ctxt.Diag(".nil on LDR/STR instruction")
-	}
-	o := ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
-	if sc&C_PBIT == 0 {
-		o |= 1 << 24
-	}
-	if sc&C_UBIT == 0 {
-		o |= 1 << 23
-	}
-	if sc&C_WBIT != 0 {
-		o |= 1 << 21
-	}
-	o |= 1<<26 | 1<<20
-	if v < 0 {
-		if sc&C_UBIT != 0 {
-			ctxt.Diag(".U on neg offset")
-		}
-		v = -v
-		o ^= 1 << 23
-	}
-
-	if v >= 1<<12 || v < 0 {
-		ctxt.Diag("literal span too large: %d (R%d)\n%v", v, b, ctxt.Printp)
-	}
-	o |= uint32(v)
-	o |= (uint32(b) & 15) << 16
-	o |= (uint32(r) & 15) << 12
-	return o
-}
-
-func olhr(ctxt *obj.Link, v int32, b int, r int, sc int) uint32 {
-	if sc&C_SBIT != 0 {
-		ctxt.Diag(".nil on LDRH/STRH instruction")
-	}
-	o := ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
-	if sc&C_PBIT == 0 {
-		o |= 1 << 24
-	}
-	if sc&C_WBIT != 0 {
-		o |= 1 << 21
-	}
-	o |= 1<<23 | 1<<20 | 0xb<<4
-	if v < 0 {
-		v = -v
-		o ^= 1 << 23
-	}
-
-	if v >= 1<<8 || v < 0 {
-		ctxt.Diag("literal span too large: %d (R%d)\n%v", v, b, ctxt.Printp)
-	}
-	o |= uint32(v)&0xf | (uint32(v)>>4)<<8 | 1<<22
-	o |= (uint32(b) & 15) << 16
-	o |= (uint32(r) & 15) << 12
-	return o
-}
-
-func osr(ctxt *obj.Link, a obj.As, r int, v int32, b int, sc int) uint32 {
-	o := olr(ctxt, v, b, r, sc) ^ (1 << 20)
-	if a != AMOVW {
-		o |= 1 << 22
-	}
-	return o
-}
-
-func oshr(ctxt *obj.Link, r int, v int32, b int, sc int) uint32 {
-	o := olhr(ctxt, v, b, r, sc) ^ (1 << 20)
-	return o
-}
-
-func osrr(ctxt *obj.Link, r int, i int, b int, sc int) uint32 {
-	return olr(ctxt, int32(i), b, r, sc) ^ (1<<25 | 1<<20)
-}
-
-func oshrr(ctxt *obj.Link, r int, i int, b int, sc int) uint32 {
-	return olhr(ctxt, int32(i), b, r, sc) ^ (1<<22 | 1<<20)
-}
-
-func olrr(ctxt *obj.Link, i int, b int, r int, sc int) uint32 {
-	return olr(ctxt, int32(i), b, r, sc) ^ (1 << 25)
-}
-
-func olhrr(ctxt *obj.Link, i int, b int, r int, sc int) uint32 {
-	return olhr(ctxt, int32(i), b, r, sc) ^ (1 << 22)
-}
-
-func ofsr(ctxt *obj.Link, a obj.As, r int, v int32, b int, sc int, p *obj.Prog) uint32 {
-	if sc&C_SBIT != 0 {
-		ctxt.Diag(".nil on FLDR/FSTR instruction: %v", p)
-	}
-	o := ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
-	if sc&C_PBIT == 0 {
-		o |= 1 << 24
-	}
-	if sc&C_WBIT != 0 {
-		o |= 1 << 21
-	}
-	o |= 6<<25 | 1<<24 | 1<<23 | 10<<8
-	if v < 0 {
-		v = -v
-		o ^= 1 << 23
-	}
-
-	if v&3 != 0 {
-		ctxt.Diag("odd offset for floating point op: %d\n%v", v, p)
-	} else if v >= 1<<10 || v < 0 {
-		ctxt.Diag("literal span too large: %d\n%v", v, p)
-	}
-	o |= (uint32(v) >> 2) & 0xFF
-	o |= (uint32(b) & 15) << 16
-	o |= (uint32(r) & 15) << 12
-
-	switch a {
-	default:
-		ctxt.Diag("bad fst %v", a)
-		fallthrough
-
-	case AMOVD:
-		o |= 1 << 8
-		fallthrough
-
-	case AMOVF:
-		break
-	}
-
-	return o
-}
-
-func omvl(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, dr int) uint32 {
-	var o1 uint32
-	if p.Pcond == nil {
-		aclass(ctxt, a)
-		v := immrot(^uint32(ctxt.Instoffset))
-		if v == 0 {
-			ctxt.Diag("missing literal")
-			prasm(p)
-			return 0
-		}
-
-		o1 = oprrr(ctxt, AMVN, int(p.Scond)&C_SCOND)
-		o1 |= uint32(v)
-		o1 |= (uint32(dr) & 15) << 12
-	} else {
-		v := int32(p.Pcond.Pc - p.Pc - 8)
-		o1 = olr(ctxt, v, REGPC, dr, int(p.Scond)&C_SCOND)
-	}
-
-	return o1
-}
-
-func chipzero5(ctxt *obj.Link, e float64) int {
-	// We use GOARM=7 to gate the use of VFPv3 vmov (imm) instructions.
-	if obj.GOARM < 7 || e != 0 {
-		return -1
-	}
-	return 0
-}
-
-func chipfloat5(ctxt *obj.Link, e float64) int {
-	// We use GOARM=7 to gate the use of VFPv3 vmov (imm) instructions.
-	if obj.GOARM < 7 {
-		return -1
-	}
-
-	ei := math.Float64bits(e)
-	l := uint32(ei)
-	h := uint32(ei >> 32)
-
-	if l != 0 || h&0xffff != 0 {
-		return -1
-	}
-	h1 := h & 0x7fc00000
-	if h1 != 0x40000000 && h1 != 0x3fc00000 {
-		return -1
-	}
-	n := 0
-
-	// sign bit (a)
-	if h&0x80000000 != 0 {
-		n |= 1 << 7
-	}
-
-	// exp sign bit (b)
-	if h1 == 0x3fc00000 {
-		n |= 1 << 6
-	}
-
-	// rest of exp and mantissa (cd-efgh)
-	n |= int((h >> 16) & 0x3f)
-
-	//print("match %.8lux %.8lux %d\n", l, h, n);
-	return n
-}
-
-func nocache(p *obj.Prog) {
-	p.Optab = 0
-	p.From.Class = 0
-	if p.From3 != nil {
-		p.From3.Class = 0
-	}
-	p.To.Class = 0
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm/list5.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm/list5.go
deleted file mode 100644
index 1a901db..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm/list5.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/arm/list5.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/arm/list5.go:1
-// Inferno utils/5c/list.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/5c/list.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package arm
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"fmt"
-)
-
-func init() {
-	obj.RegisterRegister(obj.RBaseARM, MAXREG, Rconv)
-	obj.RegisterOpcode(obj.ABaseARM, Anames)
-}
-
-func Rconv(r int) string {
-	if r == 0 {
-		return "NONE"
-	}
-	if r == REGG {
-		// Special case.
-		return "g"
-	}
-	if REG_R0 <= r && r <= REG_R15 {
-		return fmt.Sprintf("R%d", r-REG_R0)
-	}
-	if REG_F0 <= r && r <= REG_F15 {
-		return fmt.Sprintf("F%d", r-REG_F0)
-	}
-
-	switch r {
-	case REG_FPSR:
-		return "FPSR"
-
-	case REG_FPCR:
-		return "FPCR"
-
-	case REG_CPSR:
-		return "CPSR"
-
-	case REG_SPSR:
-		return "SPSR"
-	}
-
-	return fmt.Sprintf("Rgok(%d)", r-obj.RBaseARM)
-}
-
-func DRconv(a int) string {
-	s := "C_??"
-	if a >= C_NONE && a <= C_NCLASS {
-		s = cnames5[a]
-	}
-	var fp string
-	fp += s
-	return fp
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm/obj5.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm/obj5.go
deleted file mode 100644
index 1287ce7..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm/obj5.go
+++ /dev/null
@@ -1,1057 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/arm/obj5.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/arm/obj5.go:1
-// Derived from Inferno utils/5c/swt.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/5c/swt.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package arm
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"fmt"
-	"log"
-	"math"
-)
-
-var progedit_tlsfallback *obj.LSym
-
-func progedit(ctxt *obj.Link, p *obj.Prog) {
-	p.From.Class = 0
-	p.To.Class = 0
-
-	// Rewrite B/BL to symbol as TYPE_BRANCH.
-	switch p.As {
-	case AB,
-		ABL,
-		obj.ADUFFZERO,
-		obj.ADUFFCOPY:
-		if p.To.Type == obj.TYPE_MEM && (p.To.Name == obj.NAME_EXTERN || p.To.Name == obj.NAME_STATIC) && p.To.Sym != nil {
-			p.To.Type = obj.TYPE_BRANCH
-		}
-	}
-
-	// Replace TLS register fetches on older ARM processors.
-	switch p.As {
-	// Treat MRC 15, 0, <reg>, C13, C0, 3 specially.
-	case AMRC:
-		if p.To.Offset&0xffff0fff == 0xee1d0f70 {
-			// Because the instruction might be rewritten to a BL which returns in R0
-			// the register must be zero.
-			if p.To.Offset&0xf000 != 0 {
-				ctxt.Diag("%v: TLS MRC instruction must write to R0 as it might get translated into a BL instruction", p.Line())
-			}
-
-			if obj.GOARM < 7 {
-				// Replace it with BL runtime.read_tls_fallback(SB) for ARM CPUs that lack the tls extension.
-				if progedit_tlsfallback == nil {
-					progedit_tlsfallback = obj.Linklookup(ctxt, "runtime.read_tls_fallback", 0)
-				}
-
-				// MOVW	LR, R11
-				p.As = AMOVW
-
-				p.From.Type = obj.TYPE_REG
-				p.From.Reg = REGLINK
-				p.To.Type = obj.TYPE_REG
-				p.To.Reg = REGTMP
-
-				// BL	runtime.read_tls_fallback(SB)
-				p = obj.Appendp(ctxt, p)
-
-				p.As = ABL
-				p.To.Type = obj.TYPE_BRANCH
-				p.To.Sym = progedit_tlsfallback
-				p.To.Offset = 0
-
-				// MOVW	R11, LR
-				p = obj.Appendp(ctxt, p)
-
-				p.As = AMOVW
-				p.From.Type = obj.TYPE_REG
-				p.From.Reg = REGTMP
-				p.To.Type = obj.TYPE_REG
-				p.To.Reg = REGLINK
-				break
-			}
-		}
-
-		// Otherwise, MRC/MCR instructions need no further treatment.
-		p.As = AWORD
-	}
-
-	// Rewrite float constants to values stored in memory.
-	switch p.As {
-	case AMOVF:
-		if p.From.Type == obj.TYPE_FCONST && chipfloat5(ctxt, p.From.Val.(float64)) < 0 && (chipzero5(ctxt, p.From.Val.(float64)) < 0 || p.Scond&C_SCOND != C_SCOND_NONE) {
-			f32 := float32(p.From.Val.(float64))
-			i32 := math.Float32bits(f32)
-			literal := fmt.Sprintf("$f32.%08x", i32)
-			s := obj.Linklookup(ctxt, literal, 0)
-			p.From.Type = obj.TYPE_MEM
-			p.From.Sym = s
-			p.From.Name = obj.NAME_EXTERN
-			p.From.Offset = 0
-		}
-
-	case AMOVD:
-		if p.From.Type == obj.TYPE_FCONST && chipfloat5(ctxt, p.From.Val.(float64)) < 0 && (chipzero5(ctxt, p.From.Val.(float64)) < 0 || p.Scond&C_SCOND != C_SCOND_NONE) {
-			i64 := math.Float64bits(p.From.Val.(float64))
-			literal := fmt.Sprintf("$f64.%016x", i64)
-			s := obj.Linklookup(ctxt, literal, 0)
-			p.From.Type = obj.TYPE_MEM
-			p.From.Sym = s
-			p.From.Name = obj.NAME_EXTERN
-			p.From.Offset = 0
-		}
-	}
-
-	if ctxt.Flag_dynlink {
-		rewriteToUseGot(ctxt, p)
-	}
-}
-
-// Rewrite p, if necessary, to access global data via the global offset table.
-func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog) {
-	if p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO {
-		//     ADUFFxxx $offset
-		// becomes
-		//     MOVW runtime.duffxxx@GOT, R9
-		//     ADD $offset, R9
-		//     CALL (R9)
-		var sym *obj.LSym
-		if p.As == obj.ADUFFZERO {
-			sym = obj.Linklookup(ctxt, "runtime.duffzero", 0)
-		} else {
-			sym = obj.Linklookup(ctxt, "runtime.duffcopy", 0)
-		}
-		offset := p.To.Offset
-		p.As = AMOVW
-		p.From.Type = obj.TYPE_MEM
-		p.From.Name = obj.NAME_GOTREF
-		p.From.Sym = sym
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R9
-		p.To.Name = obj.NAME_NONE
-		p.To.Offset = 0
-		p.To.Sym = nil
-		p1 := obj.Appendp(ctxt, p)
-		p1.As = AADD
-		p1.From.Type = obj.TYPE_CONST
-		p1.From.Offset = offset
-		p1.To.Type = obj.TYPE_REG
-		p1.To.Reg = REG_R9
-		p2 := obj.Appendp(ctxt, p1)
-		p2.As = obj.ACALL
-		p2.To.Type = obj.TYPE_MEM
-		p2.To.Reg = REG_R9
-		return
-	}
-
-	// We only care about global data: NAME_EXTERN means a global
-	// symbol in the Go sense, and p.Sym.Local is true for a few
-	// internally defined symbols.
-	if p.From.Type == obj.TYPE_ADDR && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() {
-		// MOVW $sym, Rx becomes MOVW sym@GOT, Rx
-		// MOVW $sym+<off>, Rx becomes MOVW sym@GOT, Rx; ADD <off>, Rx
-		if p.As != AMOVW {
-			ctxt.Diag("do not know how to handle TYPE_ADDR in %v with -dynlink", p)
-		}
-		if p.To.Type != obj.TYPE_REG {
-			ctxt.Diag("do not know how to handle LEAQ-type insn to non-register in %v with -dynlink", p)
-		}
-		p.From.Type = obj.TYPE_MEM
-		p.From.Name = obj.NAME_GOTREF
-		if p.From.Offset != 0 {
-			q := obj.Appendp(ctxt, p)
-			q.As = AADD
-			q.From.Type = obj.TYPE_CONST
-			q.From.Offset = p.From.Offset
-			q.To = p.To
-			p.From.Offset = 0
-		}
-	}
-	if p.From3 != nil && p.From3.Name == obj.NAME_EXTERN {
-		ctxt.Diag("don't know how to handle %v with -dynlink", p)
-	}
-	var source *obj.Addr
-	// MOVx sym, Ry becomes MOVW sym@GOT, R9; MOVx (R9), Ry
-	// MOVx Ry, sym becomes MOVW sym@GOT, R9; MOVx Ry, (R9)
-	// An addition may be inserted between the two MOVs if there is an offset.
-	if p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() {
-		if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() {
-			ctxt.Diag("cannot handle NAME_EXTERN on both sides in %v with -dynlink", p)
-		}
-		source = &p.From
-	} else if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() {
-		source = &p.To
-	} else {
-		return
-	}
-	if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP {
-		return
-	}
-	if source.Sym.Type == obj.STLSBSS {
-		return
-	}
-	if source.Type != obj.TYPE_MEM {
-		ctxt.Diag("don't know how to handle %v with -dynlink", p)
-	}
-	p1 := obj.Appendp(ctxt, p)
-	p2 := obj.Appendp(ctxt, p1)
-
-	p1.As = AMOVW
-	p1.From.Type = obj.TYPE_MEM
-	p1.From.Sym = source.Sym
-	p1.From.Name = obj.NAME_GOTREF
-	p1.To.Type = obj.TYPE_REG
-	p1.To.Reg = REG_R9
-
-	p2.As = p.As
-	p2.From = p.From
-	p2.To = p.To
-	if p.From.Name == obj.NAME_EXTERN {
-		p2.From.Reg = REG_R9
-		p2.From.Name = obj.NAME_NONE
-		p2.From.Sym = nil
-	} else if p.To.Name == obj.NAME_EXTERN {
-		p2.To.Reg = REG_R9
-		p2.To.Name = obj.NAME_NONE
-		p2.To.Sym = nil
-	} else {
-		return
-	}
-	obj.Nopout(p)
-}
-
-// Prog.mark
-const (
-	FOLL  = 1 << 0
-	LABEL = 1 << 1
-	LEAF  = 1 << 2
-)
-
-func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
-	autosize := int32(0)
-
-	ctxt.Cursym = cursym
-
-	if cursym.Text == nil || cursym.Text.Link == nil {
-		return
-	}
-
-	softfloat(ctxt, cursym)
-
-	p := cursym.Text
-	autoffset := int32(p.To.Offset)
-	if autoffset < 0 {
-		autoffset = 0
-	}
-	cursym.Locals = autoffset
-	cursym.Args = p.To.Val.(int32)
-
-	/*
-	 * find leaf subroutines
-	 * strip NOPs
-	 * expand RET
-	 * expand BECOME pseudo
-	 */
-	var q1 *obj.Prog
-	var q *obj.Prog
-	for p := cursym.Text; p != nil; p = p.Link {
-		switch p.As {
-		case obj.ATEXT:
-			p.Mark |= LEAF
-
-		case obj.ARET:
-			break
-
-		case ADIV, ADIVU, AMOD, AMODU:
-			q = p
-			if ctxt.Sym_div == nil {
-				initdiv(ctxt)
-			}
-			cursym.Text.Mark &^= LEAF
-			continue
-
-		case obj.ANOP:
-			q1 = p.Link
-			q.Link = q1 /* q is non-nop */
-			if q1 != nil {
-				q1.Mark |= p.Mark
-			}
-			continue
-
-		case ABL,
-			ABX,
-			obj.ADUFFZERO,
-			obj.ADUFFCOPY:
-			cursym.Text.Mark &^= LEAF
-			fallthrough
-
-		case AB,
-			ABEQ,
-			ABNE,
-			ABCS,
-			ABHS,
-			ABCC,
-			ABLO,
-			ABMI,
-			ABPL,
-			ABVS,
-			ABVC,
-			ABHI,
-			ABLS,
-			ABGE,
-			ABLT,
-			ABGT,
-			ABLE:
-			q1 = p.Pcond
-			if q1 != nil {
-				for q1.As == obj.ANOP {
-					q1 = q1.Link
-					p.Pcond = q1
-				}
-			}
-		}
-
-		q = p
-	}
-
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-	var q2 *obj.Prog
-	for p := cursym.Text; p != nil; p = p.Link {
-		o := p.As
-		switch o {
-		case obj.ATEXT:
-			autosize = int32(p.To.Offset + 4)
-			if autosize <= 4 {
-				if cursym.Text.Mark&LEAF != 0 {
-					p.To.Offset = -4
-					autosize = 0
-				}
-			}
-
-			if autosize == 0 && cursym.Text.Mark&LEAF == 0 {
-				if ctxt.Debugvlog != 0 {
-					ctxt.Logf("save suppressed in: %s\n", cursym.Name)
-				}
-
-				cursym.Text.Mark |= LEAF
-			}
-
-			if cursym.Text.Mark&LEAF != 0 {
-				cursym.Set(obj.AttrLeaf, true)
-				if autosize == 0 {
-					break
-				}
-			}
-
-			if p.From3.Offset&obj.NOSPLIT == 0 {
-				p = stacksplit(ctxt, p, autosize) // emit split check
-			}
-
-			// MOVW.W		R14,$-autosize(SP)
-			p = obj.Appendp(ctxt, p)
-
-			p.As = AMOVW
-			p.Scond |= C_WBIT
-			p.From.Type = obj.TYPE_REG
-			p.From.Reg = REGLINK
-			p.To.Type = obj.TYPE_MEM
-			p.To.Offset = int64(-autosize)
-			p.To.Reg = REGSP
-			p.Spadj = autosize
-
-			if cursym.Text.From3.Offset&obj.WRAPPER != 0 {
-				// if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
-				//
-				//	MOVW g_panic(g), R1
-				//	CMP $0, R1
-				//	B.EQ end
-				//	MOVW panic_argp(R1), R2
-				//	ADD $(autosize+4), R13, R3
-				//	CMP R2, R3
-				//	B.NE end
-				//	ADD $4, R13, R4
-				//	MOVW R4, panic_argp(R1)
-				// end:
-				//	NOP
-				//
-				// The NOP is needed to give the jumps somewhere to land.
-				// It is a liblink NOP, not an ARM NOP: it encodes to 0 instruction bytes.
-
-				p = obj.Appendp(ctxt, p)
-
-				p.As = AMOVW
-				p.From.Type = obj.TYPE_MEM
-				p.From.Reg = REGG
-				p.From.Offset = 4 * int64(ctxt.Arch.PtrSize) // G.panic
-				p.To.Type = obj.TYPE_REG
-				p.To.Reg = REG_R1
-
-				p = obj.Appendp(ctxt, p)
-				p.As = ACMP
-				p.From.Type = obj.TYPE_CONST
-				p.From.Offset = 0
-				p.Reg = REG_R1
-
-				p = obj.Appendp(ctxt, p)
-				p.As = ABEQ
-				p.To.Type = obj.TYPE_BRANCH
-				p1 = p
-
-				p = obj.Appendp(ctxt, p)
-				p.As = AMOVW
-				p.From.Type = obj.TYPE_MEM
-				p.From.Reg = REG_R1
-				p.From.Offset = 0 // Panic.argp
-				p.To.Type = obj.TYPE_REG
-				p.To.Reg = REG_R2
-
-				p = obj.Appendp(ctxt, p)
-				p.As = AADD
-				p.From.Type = obj.TYPE_CONST
-				p.From.Offset = int64(autosize) + 4
-				p.Reg = REG_R13
-				p.To.Type = obj.TYPE_REG
-				p.To.Reg = REG_R3
-
-				p = obj.Appendp(ctxt, p)
-				p.As = ACMP
-				p.From.Type = obj.TYPE_REG
-				p.From.Reg = REG_R2
-				p.Reg = REG_R3
-
-				p = obj.Appendp(ctxt, p)
-				p.As = ABNE
-				p.To.Type = obj.TYPE_BRANCH
-				p2 = p
-
-				p = obj.Appendp(ctxt, p)
-				p.As = AADD
-				p.From.Type = obj.TYPE_CONST
-				p.From.Offset = 4
-				p.Reg = REG_R13
-				p.To.Type = obj.TYPE_REG
-				p.To.Reg = REG_R4
-
-				p = obj.Appendp(ctxt, p)
-				p.As = AMOVW
-				p.From.Type = obj.TYPE_REG
-				p.From.Reg = REG_R4
-				p.To.Type = obj.TYPE_MEM
-				p.To.Reg = REG_R1
-				p.To.Offset = 0 // Panic.argp
-
-				p = obj.Appendp(ctxt, p)
-
-				p.As = obj.ANOP
-				p1.Pcond = p
-				p2.Pcond = p
-			}
-
-		case obj.ARET:
-			nocache(p)
-			if cursym.Text.Mark&LEAF != 0 {
-				if autosize == 0 {
-					p.As = AB
-					p.From = obj.Addr{}
-					if p.To.Sym != nil { // retjmp
-						p.To.Type = obj.TYPE_BRANCH
-					} else {
-						p.To.Type = obj.TYPE_MEM
-						p.To.Offset = 0
-						p.To.Reg = REGLINK
-					}
-
-					break
-				}
-			}
-
-			p.As = AMOVW
-			p.Scond |= C_PBIT
-			p.From.Type = obj.TYPE_MEM
-			p.From.Offset = int64(autosize)
-			p.From.Reg = REGSP
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = REGPC
-
-			// If there are instructions following
-			// this ARET, they come from a branch
-			// with the same stackframe, so no spadj.
-			if p.To.Sym != nil { // retjmp
-				p.To.Reg = REGLINK
-				q2 = obj.Appendp(ctxt, p)
-				q2.As = AB
-				q2.To.Type = obj.TYPE_BRANCH
-				q2.To.Sym = p.To.Sym
-				p.To.Sym = nil
-				p = q2
-			}
-
-		case AADD:
-			if p.From.Type == obj.TYPE_CONST && p.From.Reg == 0 && p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP {
-				p.Spadj = int32(-p.From.Offset)
-			}
-
-		case ASUB:
-			if p.From.Type == obj.TYPE_CONST && p.From.Reg == 0 && p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP {
-				p.Spadj = int32(p.From.Offset)
-			}
-
-		case ADIV, ADIVU, AMOD, AMODU:
-			if cursym.Text.From3.Offset&obj.NOSPLIT != 0 {
-				ctxt.Diag("cannot divide in NOSPLIT function")
-			}
-			if ctxt.Debugdivmod != 0 {
-				break
-			}
-			if p.From.Type != obj.TYPE_REG {
-				break
-			}
-			if p.To.Type != obj.TYPE_REG {
-				break
-			}
-
-			// Make copy because we overwrite p below.
-			q1 := *p
-			if q1.Reg == REGTMP || q1.Reg == 0 && q1.To.Reg == REGTMP {
-				ctxt.Diag("div already using REGTMP: %v", p)
-			}
-
-			/* MOV m(g),REGTMP */
-			p.As = AMOVW
-			p.Lineno = q1.Lineno
-			p.From.Type = obj.TYPE_MEM
-			p.From.Reg = REGG
-			p.From.Offset = 6 * 4 // offset of g.m
-			p.Reg = 0
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = REGTMP
-
-			/* MOV a,m_divmod(REGTMP) */
-			p = obj.Appendp(ctxt, p)
-			p.As = AMOVW
-			p.Lineno = q1.Lineno
-			p.From.Type = obj.TYPE_REG
-			p.From.Reg = q1.From.Reg
-			p.To.Type = obj.TYPE_MEM
-			p.To.Reg = REGTMP
-			p.To.Offset = 8 * 4 // offset of m.divmod
-
-			/* MOV b, R8 */
-			p = obj.Appendp(ctxt, p)
-			p.As = AMOVW
-			p.Lineno = q1.Lineno
-			p.From.Type = obj.TYPE_REG
-			p.From.Reg = q1.Reg
-			if q1.Reg == 0 {
-				p.From.Reg = q1.To.Reg
-			}
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = REG_R8
-			p.To.Offset = 0
-
-			/* CALL appropriate */
-			p = obj.Appendp(ctxt, p)
-			p.As = ABL
-			p.Lineno = q1.Lineno
-			p.To.Type = obj.TYPE_BRANCH
-			switch o {
-			case ADIV:
-				p.To.Sym = ctxt.Sym_div
-
-			case ADIVU:
-				p.To.Sym = ctxt.Sym_divu
-
-			case AMOD:
-				p.To.Sym = ctxt.Sym_mod
-
-			case AMODU:
-				p.To.Sym = ctxt.Sym_modu
-			}
-
-			/* MOV REGTMP, b */
-			p = obj.Appendp(ctxt, p)
-			p.As = AMOVW
-			p.Lineno = q1.Lineno
-			p.From.Type = obj.TYPE_REG
-			p.From.Reg = REGTMP
-			p.From.Offset = 0
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = q1.To.Reg
-
-		case AMOVW:
-			if (p.Scond&C_WBIT != 0) && p.To.Type == obj.TYPE_MEM && p.To.Reg == REGSP {
-				p.Spadj = int32(-p.To.Offset)
-			}
-			if (p.Scond&C_PBIT != 0) && p.From.Type == obj.TYPE_MEM && p.From.Reg == REGSP && p.To.Reg != REGPC {
-				p.Spadj = int32(-p.From.Offset)
-			}
-			if p.From.Type == obj.TYPE_ADDR && p.From.Reg == REGSP && p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP {
-				p.Spadj = int32(-p.From.Offset)
-			}
-		}
-	}
-}
-
-func isfloatreg(a *obj.Addr) bool {
-	return a.Type == obj.TYPE_REG && REG_F0 <= a.Reg && a.Reg <= REG_F15
-}
-
-func softfloat(ctxt *obj.Link, cursym *obj.LSym) {
-	if obj.GOARM > 5 {
-		return
-	}
-
-	symsfloat := obj.Linklookup(ctxt, "_sfloat", 0)
-
-	wasfloat := 0
-	for p := cursym.Text; p != nil; p = p.Link {
-		if p.Pcond != nil {
-			p.Pcond.Mark |= LABEL
-		}
-	}
-	var next *obj.Prog
-	for p := cursym.Text; p != nil; p = p.Link {
-		switch p.As {
-		case AMOVW:
-			if isfloatreg(&p.To) || isfloatreg(&p.From) {
-				goto soft
-			}
-			goto notsoft
-
-		case AMOVWD,
-			AMOVWF,
-			AMOVDW,
-			AMOVFW,
-			AMOVFD,
-			AMOVDF,
-			AMOVF,
-			AMOVD,
-			ACMPF,
-			ACMPD,
-			AADDF,
-			AADDD,
-			ASUBF,
-			ASUBD,
-			AMULF,
-			AMULD,
-			ADIVF,
-			ADIVD,
-			ASQRTF,
-			ASQRTD,
-			AABSF,
-			AABSD,
-			ANEGF,
-			ANEGD:
-			goto soft
-
-		default:
-			goto notsoft
-		}
-
-	soft:
-		if wasfloat == 0 || (p.Mark&LABEL != 0) {
-			next = ctxt.NewProg()
-			*next = *p
-
-			// BL _sfloat(SB)
-			*p = obj.Prog{}
-			p.Ctxt = ctxt
-			p.Link = next
-			p.As = ABL
-			p.To.Type = obj.TYPE_BRANCH
-			p.To.Sym = symsfloat
-			p.Lineno = next.Lineno
-
-			p = next
-			wasfloat = 1
-		}
-
-		continue
-
-	notsoft:
-		wasfloat = 0
-	}
-}
-
-func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32) *obj.Prog {
-	// MOVW			g_stackguard(g), R1
-	p = obj.Appendp(ctxt, p)
-
-	p.As = AMOVW
-	p.From.Type = obj.TYPE_MEM
-	p.From.Reg = REGG
-	p.From.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0
-	if ctxt.Cursym.CFunc() {
-		p.From.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1
-	}
-	p.To.Type = obj.TYPE_REG
-	p.To.Reg = REG_R1
-
-	if framesize <= obj.StackSmall {
-		// small stack: SP < stackguard
-		//	CMP	stackguard, SP
-		p = obj.Appendp(ctxt, p)
-
-		p.As = ACMP
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_R1
-		p.Reg = REGSP
-	} else if framesize <= obj.StackBig {
-		// large stack: SP-framesize < stackguard-StackSmall
-		//	MOVW $-framesize(SP), R2
-		//	CMP stackguard, R2
-		p = obj.Appendp(ctxt, p)
-
-		p.As = AMOVW
-		p.From.Type = obj.TYPE_ADDR
-		p.From.Reg = REGSP
-		p.From.Offset = int64(-framesize)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R2
-
-		p = obj.Appendp(ctxt, p)
-		p.As = ACMP
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_R1
-		p.Reg = REG_R2
-	} else {
-		// Such a large stack we need to protect against wraparound
-		// if SP is close to zero.
-		//	SP-stackguard+StackGuard < framesize + (StackGuard-StackSmall)
-		// The +StackGuard on both sides is required to keep the left side positive:
-		// SP is allowed to be slightly below stackguard. See stack.h.
-		//	CMP $StackPreempt, R1
-		//	MOVW.NE $StackGuard(SP), R2
-		//	SUB.NE R1, R2
-		//	MOVW.NE $(framesize+(StackGuard-StackSmall)), R3
-		//	CMP.NE R3, R2
-		p = obj.Appendp(ctxt, p)
-
-		p.As = ACMP
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = int64(uint32(obj.StackPreempt & (1<<32 - 1)))
-		p.Reg = REG_R1
-
-		p = obj.Appendp(ctxt, p)
-		p.As = AMOVW
-		p.From.Type = obj.TYPE_ADDR
-		p.From.Reg = REGSP
-		p.From.Offset = obj.StackGuard
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R2
-		p.Scond = C_SCOND_NE
-
-		p = obj.Appendp(ctxt, p)
-		p.As = ASUB
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_R1
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R2
-		p.Scond = C_SCOND_NE
-
-		p = obj.Appendp(ctxt, p)
-		p.As = AMOVW
-		p.From.Type = obj.TYPE_ADDR
-		p.From.Offset = int64(framesize) + (obj.StackGuard - obj.StackSmall)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R3
-		p.Scond = C_SCOND_NE
-
-		p = obj.Appendp(ctxt, p)
-		p.As = ACMP
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_R3
-		p.Reg = REG_R2
-		p.Scond = C_SCOND_NE
-	}
-
-	// BLS call-to-morestack
-	bls := obj.Appendp(ctxt, p)
-	bls.As = ABLS
-	bls.To.Type = obj.TYPE_BRANCH
-
-	var last *obj.Prog
-	for last = ctxt.Cursym.Text; last.Link != nil; last = last.Link {
-	}
-
-	// Now we are at the end of the function, but logically
-	// we are still in function prologue. We need to fix the
-	// SP data and PCDATA.
-	spfix := obj.Appendp(ctxt, last)
-	spfix.As = obj.ANOP
-	spfix.Spadj = -framesize
-
-	pcdata := obj.Appendp(ctxt, spfix)
-	pcdata.Lineno = ctxt.Cursym.Text.Lineno
-	pcdata.Mode = ctxt.Cursym.Text.Mode
-	pcdata.As = obj.APCDATA
-	pcdata.From.Type = obj.TYPE_CONST
-	pcdata.From.Offset = obj.PCDATA_StackMapIndex
-	pcdata.To.Type = obj.TYPE_CONST
-	pcdata.To.Offset = -1 // pcdata starts at -1 at function entry
-
-	// MOVW	LR, R3
-	movw := obj.Appendp(ctxt, pcdata)
-	movw.As = AMOVW
-	movw.From.Type = obj.TYPE_REG
-	movw.From.Reg = REGLINK
-	movw.To.Type = obj.TYPE_REG
-	movw.To.Reg = REG_R3
-
-	bls.Pcond = movw
-
-	// BL runtime.morestack
-	call := obj.Appendp(ctxt, movw)
-	call.As = obj.ACALL
-	call.To.Type = obj.TYPE_BRANCH
-	morestack := "runtime.morestack"
-	switch {
-	case ctxt.Cursym.CFunc():
-		morestack = "runtime.morestackc"
-	case ctxt.Cursym.Text.From3.Offset&obj.NEEDCTXT == 0:
-		morestack = "runtime.morestack_noctxt"
-	}
-	call.To.Sym = obj.Linklookup(ctxt, morestack, 0)
-
-	// B start
-	b := obj.Appendp(ctxt, call)
-	b.As = obj.AJMP
-	b.To.Type = obj.TYPE_BRANCH
-	b.Pcond = ctxt.Cursym.Text.Link
-	b.Spadj = +framesize
-
-	return bls
-}
-
-func initdiv(ctxt *obj.Link) {
-	if ctxt.Sym_div != nil {
-		return
-	}
-	ctxt.Sym_div = obj.Linklookup(ctxt, "_div", 0)
-	ctxt.Sym_divu = obj.Linklookup(ctxt, "_divu", 0)
-	ctxt.Sym_mod = obj.Linklookup(ctxt, "_mod", 0)
-	ctxt.Sym_modu = obj.Linklookup(ctxt, "_modu", 0)
-}
-
-func follow(ctxt *obj.Link, s *obj.LSym) {
-	ctxt.Cursym = s
-
-	firstp := ctxt.NewProg()
-	lastp := firstp
-	xfol(ctxt, s.Text, &lastp)
-	lastp.Link = nil
-	s.Text = firstp.Link
-}
-
-func relinv(a obj.As) obj.As {
-	switch a {
-	case ABEQ:
-		return ABNE
-	case ABNE:
-		return ABEQ
-	case ABCS:
-		return ABCC
-	case ABHS:
-		return ABLO
-	case ABCC:
-		return ABCS
-	case ABLO:
-		return ABHS
-	case ABMI:
-		return ABPL
-	case ABPL:
-		return ABMI
-	case ABVS:
-		return ABVC
-	case ABVC:
-		return ABVS
-	case ABHI:
-		return ABLS
-	case ABLS:
-		return ABHI
-	case ABGE:
-		return ABLT
-	case ABLT:
-		return ABGE
-	case ABGT:
-		return ABLE
-	case ABLE:
-		return ABGT
-	}
-
-	log.Fatalf("unknown relation: %s", Anames[a])
-	return 0
-}
-
-func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) {
-	var q *obj.Prog
-	var r *obj.Prog
-	var i int
-
-loop:
-	if p == nil {
-		return
-	}
-	a := p.As
-	if a == AB {
-		q = p.Pcond
-		if q != nil && q.As != obj.ATEXT {
-			p.Mark |= FOLL
-			p = q
-			if p.Mark&FOLL == 0 {
-				goto loop
-			}
-		}
-	}
-
-	if p.Mark&FOLL != 0 {
-		i = 0
-		q = p
-		for ; i < 4; i, q = i+1, q.Link {
-			if q == *last || q == nil {
-				break
-			}
-			a = q.As
-			if a == obj.ANOP {
-				i--
-				continue
-			}
-
-			if a == AB || (a == obj.ARET && q.Scond == C_SCOND_NONE) || a == ARFE || a == obj.AUNDEF {
-				goto copy
-			}
-			if q.Pcond == nil || (q.Pcond.Mark&FOLL != 0) {
-				continue
-			}
-			if a != ABEQ && a != ABNE {
-				continue
-			}
-
-		copy:
-			for {
-				r = ctxt.NewProg()
-				*r = *p
-				if r.Mark&FOLL == 0 {
-					fmt.Printf("can't happen 1\n")
-				}
-				r.Mark |= FOLL
-				if p != q {
-					p = p.Link
-					(*last).Link = r
-					*last = r
-					continue
-				}
-
-				(*last).Link = r
-				*last = r
-				if a == AB || (a == obj.ARET && q.Scond == C_SCOND_NONE) || a == ARFE || a == obj.AUNDEF {
-					return
-				}
-				r.As = ABNE
-				if a == ABNE {
-					r.As = ABEQ
-				}
-				r.Pcond = p.Link
-				r.Link = p.Pcond
-				if r.Link.Mark&FOLL == 0 {
-					xfol(ctxt, r.Link, last)
-				}
-				if r.Pcond.Mark&FOLL == 0 {
-					fmt.Printf("can't happen 2\n")
-				}
-				return
-			}
-		}
-
-		a = AB
-		q = ctxt.NewProg()
-		q.As = a
-		q.Lineno = p.Lineno
-		q.To.Type = obj.TYPE_BRANCH
-		q.To.Offset = p.Pc
-		q.Pcond = p
-		p = q
-	}
-
-	p.Mark |= FOLL
-	(*last).Link = p
-	*last = p
-	if a == AB || (a == obj.ARET && p.Scond == C_SCOND_NONE) || a == ARFE || a == obj.AUNDEF {
-		return
-	}
-
-	if p.Pcond != nil {
-		if a != ABL && a != ABX && p.Link != nil {
-			q = obj.Brchain(ctxt, p.Link)
-			if a != obj.ATEXT {
-				if q != nil && (q.Mark&FOLL != 0) {
-					p.As = relinv(a)
-					p.Link = p.Pcond
-					p.Pcond = q
-				}
-			}
-
-			xfol(ctxt, p.Link, last)
-			q = obj.Brchain(ctxt, p.Pcond)
-			if q == nil {
-				q = p.Pcond
-			}
-			if q.Mark&FOLL != 0 {
-				p.Pcond = q
-				return
-			}
-
-			p = q
-			goto loop
-		}
-	}
-
-	p = p.Link
-	goto loop
-}
-
-var unaryDst = map[obj.As]bool{
-	ASWI:  true,
-	AWORD: true,
-}
-
-var Linkarm = obj.LinkArch{
-	Arch:       sys.ArchARM,
-	Preprocess: preprocess,
-	Assemble:   span5,
-	Follow:     follow,
-	Progedit:   progedit,
-	UnaryDst:   unaryDst,
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm64/a.out.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm64/a.out.go
deleted file mode 100644
index fc1ab91..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm64/a.out.go
+++ /dev/null
@@ -1,722 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/arm64/a.out.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/arm64/a.out.go:1
-// cmd/7c/7.out.h  from Vita Nuova.
-// https://code.google.com/p/ken-cc/source/browse/src/cmd/7c/7.out.h
-//
-// 	Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// 	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// 	Portions Copyright © 1997-1999 Vita Nuova Limited
-// 	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// 	Portions Copyright © 2004,2006 Bruce Ellis
-// 	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// 	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// 	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package arm64
-
-import "bootstrap/cmd/internal/obj"
-
-const (
-	NSNAME = 8
-	NSYM   = 50
-	NREG   = 32 /* number of general registers */
-	NFREG  = 32 /* number of floating point registers */
-)
-
-// General purpose registers, kept in the low bits of Prog.Reg.
-const (
-	// integer
-	REG_R0 = obj.RBaseARM64 + iota
-	REG_R1
-	REG_R2
-	REG_R3
-	REG_R4
-	REG_R5
-	REG_R6
-	REG_R7
-	REG_R8
-	REG_R9
-	REG_R10
-	REG_R11
-	REG_R12
-	REG_R13
-	REG_R14
-	REG_R15
-	REG_R16
-	REG_R17
-	REG_R18
-	REG_R19
-	REG_R20
-	REG_R21
-	REG_R22
-	REG_R23
-	REG_R24
-	REG_R25
-	REG_R26
-	REG_R27
-	REG_R28
-	REG_R29
-	REG_R30
-	REG_R31
-
-	// scalar floating point
-	REG_F0
-	REG_F1
-	REG_F2
-	REG_F3
-	REG_F4
-	REG_F5
-	REG_F6
-	REG_F7
-	REG_F8
-	REG_F9
-	REG_F10
-	REG_F11
-	REG_F12
-	REG_F13
-	REG_F14
-	REG_F15
-	REG_F16
-	REG_F17
-	REG_F18
-	REG_F19
-	REG_F20
-	REG_F21
-	REG_F22
-	REG_F23
-	REG_F24
-	REG_F25
-	REG_F26
-	REG_F27
-	REG_F28
-	REG_F29
-	REG_F30
-	REG_F31
-
-	// SIMD
-	REG_V0
-	REG_V1
-	REG_V2
-	REG_V3
-	REG_V4
-	REG_V5
-	REG_V6
-	REG_V7
-	REG_V8
-	REG_V9
-	REG_V10
-	REG_V11
-	REG_V12
-	REG_V13
-	REG_V14
-	REG_V15
-	REG_V16
-	REG_V17
-	REG_V18
-	REG_V19
-	REG_V20
-	REG_V21
-	REG_V22
-	REG_V23
-	REG_V24
-	REG_V25
-	REG_V26
-	REG_V27
-	REG_V28
-	REG_V29
-	REG_V30
-	REG_V31
-
-	// The EQ in
-	// 	CSET	EQ, R0
-	// is encoded as TYPE_REG, even though it's not really a register.
-	COND_EQ
-	COND_NE
-	COND_HS
-	COND_LO
-	COND_MI
-	COND_PL
-	COND_VS
-	COND_VC
-	COND_HI
-	COND_LS
-	COND_GE
-	COND_LT
-	COND_GT
-	COND_LE
-	COND_AL
-	COND_NV
-
-	REG_RSP = REG_V31 + 32 // to differentiate ZR/SP, REG_RSP&0x1f = 31
-)
-
-// Not registers, but flags that can be combined with regular register
-// constants to indicate extended register conversion. When checking,
-// you should subtract obj.RBaseARM64 first. From this difference, bit 11
-// indicates extended register, bits 8-10 select the conversion mode.
-const REG_EXT = obj.RBaseARM64 + 1<<11
-
-const (
-	REG_UXTB = REG_EXT + iota<<8
-	REG_UXTH
-	REG_UXTW
-	REG_UXTX
-	REG_SXTB
-	REG_SXTH
-	REG_SXTW
-	REG_SXTX
-)
-
-// Special registers, after subtracting obj.RBaseARM64, bit 12 indicates
-// a special register and the low bits select the register.
-const (
-	REG_SPECIAL = obj.RBaseARM64 + 1<<12 + iota
-	REG_DAIF
-	REG_NZCV
-	REG_FPSR
-	REG_FPCR
-	REG_SPSR_EL1
-	REG_ELR_EL1
-	REG_SPSR_EL2
-	REG_ELR_EL2
-	REG_CurrentEL
-	REG_SP_EL0
-	REG_SPSel
-	REG_DAIFSet
-	REG_DAIFClr
-)
-
-// Register assignments:
-//
-// compiler allocates R0 up as temps
-// compiler allocates register variables R7-R25
-// compiler allocates external registers R26 down
-//
-// compiler allocates register variables F7-F26
-// compiler allocates external registers F26 down
-const (
-	REGMIN = REG_R7  // register variables allocated from here to REGMAX
-	REGRT1 = REG_R16 // ARM64 IP0, for external linker, runtime, duffzero and duffcopy
-	REGRT2 = REG_R17 // ARM64 IP1, for external linker, runtime, duffcopy
-	REGPR  = REG_R18 // ARM64 platform register, unused in the Go toolchain
-	REGMAX = REG_R25
-
-	REGCTXT = REG_R26 // environment for closures
-	REGTMP  = REG_R27 // reserved for liblink
-	REGG    = REG_R28 // G
-	REGFP   = REG_R29 // frame pointer, unused in the Go toolchain
-	REGLINK = REG_R30
-
-	// ARM64 uses R31 as both stack pointer and zero register,
-	// depending on the instruction. To differentiate RSP from ZR,
-	// we use a different numeric value for REGZERO and REGSP.
-	REGZERO = REG_R31
-	REGSP   = REG_RSP
-
-	FREGRET = REG_F0
-	FREGMIN = REG_F7  // first register variable
-	FREGMAX = REG_F26 // last register variable for 7g only
-	FREGEXT = REG_F26 // first external register
-)
-
-const (
-	BIG = 2048 - 8
-)
-
-const (
-	/* mark flags */
-	LABEL = 1 << iota
-	LEAF
-	FLOAT
-	BRANCH
-	LOAD
-	FCMP
-	SYNC
-	LIST
-	FOLL
-	NOSCHED
-)
-
-const (
-	C_NONE   = iota
-	C_REG    // R0..R30
-	C_RSP    // R0..R30, RSP
-	C_FREG   // F0..F31
-	C_VREG   // V0..V31
-	C_PAIR   // (Rn, Rm)
-	C_SHIFT  // Rn<<2
-	C_EXTREG // Rn.UXTB<<3
-	C_SPR    // REG_NZCV
-	C_COND   // EQ, NE, etc
-
-	C_ZCON     // $0 or ZR
-	C_ADDCON0  // 12-bit unsigned, unshifted
-	C_ADDCON   // 12-bit unsigned, shifted left by 0 or 12
-	C_MOVCON   // generated by a 16-bit constant, optionally inverted and/or shifted by multiple of 16
-	C_BITCON   // bitfield and logical immediate masks
-	C_ABCON0   // could be C_ADDCON0 or C_BITCON
-	C_ABCON    // could be C_ADDCON or C_BITCON
-	C_MBCON    // could be C_MOVCON or C_BITCON
-	C_LCON     // 32-bit constant
-	C_VCON     // 64-bit constant
-	C_FCON     // floating-point constant
-	C_VCONADDR // 64-bit memory address
-
-	C_AACON // ADDCON offset in auto constant $a(FP)
-	C_LACON // 32-bit offset in auto constant $a(FP)
-	C_AECON // ADDCON offset in extern constant $e(SB)
-
-	// TODO(aram): only one branch class should be enough
-	C_SBRA // for TYPE_BRANCH
-	C_LBRA
-
-	C_NPAUTO   // -512 <= x < 0, 0 mod 8
-	C_NSAUTO   // -256 <= x < 0
-	C_PSAUTO   // 0 to 255
-	C_PPAUTO   // 0 to 504, 0 mod 8
-	C_UAUTO4K  // 0 to 4095
-	C_UAUTO8K  // 0 to 8190, 0 mod 2
-	C_UAUTO16K // 0 to 16380, 0 mod 4
-	C_UAUTO32K // 0 to 32760, 0 mod 8
-	C_UAUTO64K // 0 to 65520, 0 mod 16
-	C_LAUTO    // any other 32-bit constant
-
-	C_SEXT1  // 0 to 4095, direct
-	C_SEXT2  // 0 to 8190
-	C_SEXT4  // 0 to 16380
-	C_SEXT8  // 0 to 32760
-	C_SEXT16 // 0 to 65520
-	C_LEXT
-
-	// TODO(aram): s/AUTO/INDIR/
-	C_ZOREG  // 0(R)
-	C_NPOREG // mirror NPAUTO, etc
-	C_NSOREG
-	C_PSOREG
-	C_PPOREG
-	C_UOREG4K
-	C_UOREG8K
-	C_UOREG16K
-	C_UOREG32K
-	C_UOREG64K
-	C_LOREG
-
-	C_ADDR // TODO(aram): explain difference from C_VCONADDR
-
-	// The GOT slot for a symbol in -dynlink mode.
-	C_GOTADDR
-
-	// TLS "var" in local exec mode: will become a constant offset from
-	// thread local base that is ultimately chosen by the program linker.
-	C_TLS_LE
-
-	// TLS "var" in initial exec mode: will become a memory address (chosen
-	// by the program linker) that the dynamic linker will fill with the
-	// offset from the thread local base.
-	C_TLS_IE
-
-	C_ROFF // register offset (including register extended)
-
-	C_GOK
-	C_TEXTSIZE
-	C_NCLASS // must be last
-)
-
-const (
-	C_XPRE  = 1 << 6 // match arm.C_WBIT, so Prog.String know how to print it
-	C_XPOST = 1 << 5 // match arm.C_PBIT, so Prog.String know how to print it
-)
-
-//go:generate go run ../stringer.go -i $GOFILE -o anames.go -p arm64
-
-const (
-	AADC = obj.ABaseARM64 + obj.A_ARCHSPECIFIC + iota
-	AADCS
-	AADCSW
-	AADCW
-	AADD
-	AADDS
-	AADDSW
-	AADDW
-	AADR
-	AADRP
-	AAND
-	AANDS
-	AANDSW
-	AANDW
-	AASR
-	AASRW
-	AAT
-	ABFI
-	ABFIW
-	ABFM
-	ABFMW
-	ABFXIL
-	ABFXILW
-	ABIC
-	ABICS
-	ABICSW
-	ABICW
-	ABRK
-	ACBNZ
-	ACBNZW
-	ACBZ
-	ACBZW
-	ACCMN
-	ACCMNW
-	ACCMP
-	ACCMPW
-	ACINC
-	ACINCW
-	ACINV
-	ACINVW
-	ACLREX
-	ACLS
-	ACLSW
-	ACLZ
-	ACLZW
-	ACMN
-	ACMNW
-	ACMP
-	ACMPW
-	ACNEG
-	ACNEGW
-	ACRC32B
-	ACRC32CB
-	ACRC32CH
-	ACRC32CW
-	ACRC32CX
-	ACRC32H
-	ACRC32W
-	ACRC32X
-	ACSEL
-	ACSELW
-	ACSET
-	ACSETM
-	ACSETMW
-	ACSETW
-	ACSINC
-	ACSINCW
-	ACSINV
-	ACSINVW
-	ACSNEG
-	ACSNEGW
-	ADC
-	ADCPS1
-	ADCPS2
-	ADCPS3
-	ADMB
-	ADRPS
-	ADSB
-	AEON
-	AEONW
-	AEOR
-	AEORW
-	AERET
-	AEXTR
-	AEXTRW
-	AHINT
-	AHLT
-	AHVC
-	AIC
-	AISB
-	ALDAR
-	ALDARB
-	ALDARH
-	ALDARW
-	ALDAXP
-	ALDAXPW
-	ALDAXR
-	ALDAXRB
-	ALDAXRH
-	ALDAXRW
-	ALDP
-	ALDXR
-	ALDXRB
-	ALDXRH
-	ALDXRW
-	ALDXP
-	ALDXPW
-	ALSL
-	ALSLW
-	ALSR
-	ALSRW
-	AMADD
-	AMADDW
-	AMNEG
-	AMNEGW
-	AMOVK
-	AMOVKW
-	AMOVN
-	AMOVNW
-	AMOVZ
-	AMOVZW
-	AMRS
-	AMSR
-	AMSUB
-	AMSUBW
-	AMUL
-	AMULW
-	AMVN
-	AMVNW
-	ANEG
-	ANEGS
-	ANEGSW
-	ANEGW
-	ANGC
-	ANGCS
-	ANGCSW
-	ANGCW
-	AORN
-	AORNW
-	AORR
-	AORRW
-	APRFM
-	APRFUM
-	ARBIT
-	ARBITW
-	AREM
-	AREMW
-	AREV
-	AREV16
-	AREV16W
-	AREV32
-	AREVW
-	AROR
-	ARORW
-	ASBC
-	ASBCS
-	ASBCSW
-	ASBCW
-	ASBFIZ
-	ASBFIZW
-	ASBFM
-	ASBFMW
-	ASBFX
-	ASBFXW
-	ASDIV
-	ASDIVW
-	ASEV
-	ASEVL
-	ASMADDL
-	ASMC
-	ASMNEGL
-	ASMSUBL
-	ASMULH
-	ASMULL
-	ASTXR
-	ASTXRB
-	ASTXRH
-	ASTXP
-	ASTXPW
-	ASTXRW
-	ASTLP
-	ASTLPW
-	ASTLR
-	ASTLRB
-	ASTLRH
-	ASTLRW
-	ASTLXP
-	ASTLXPW
-	ASTLXR
-	ASTLXRB
-	ASTLXRH
-	ASTLXRW
-	ASTP
-	ASUB
-	ASUBS
-	ASUBSW
-	ASUBW
-	ASVC
-	ASXTB
-	ASXTBW
-	ASXTH
-	ASXTHW
-	ASXTW
-	ASYS
-	ASYSL
-	ATBNZ
-	ATBZ
-	ATLBI
-	ATST
-	ATSTW
-	AUBFIZ
-	AUBFIZW
-	AUBFM
-	AUBFMW
-	AUBFX
-	AUBFXW
-	AUDIV
-	AUDIVW
-	AUMADDL
-	AUMNEGL
-	AUMSUBL
-	AUMULH
-	AUMULL
-	AUREM
-	AUREMW
-	AUXTB
-	AUXTH
-	AUXTW
-	AUXTBW
-	AUXTHW
-	AWFE
-	AWFI
-	AYIELD
-	AMOVB
-	AMOVBU
-	AMOVH
-	AMOVHU
-	AMOVW
-	AMOVWU
-	AMOVD
-	AMOVNP
-	AMOVNPW
-	AMOVP
-	AMOVPD
-	AMOVPQ
-	AMOVPS
-	AMOVPSW
-	AMOVPW
-	ABEQ
-	ABNE
-	ABCS
-	ABHS
-	ABCC
-	ABLO
-	ABMI
-	ABPL
-	ABVS
-	ABVC
-	ABHI
-	ABLS
-	ABGE
-	ABLT
-	ABGT
-	ABLE
-	AFABSD
-	AFABSS
-	AFADDD
-	AFADDS
-	AFCCMPD
-	AFCCMPED
-	AFCCMPS
-	AFCCMPES
-	AFCMPD
-	AFCMPED
-	AFCMPES
-	AFCMPS
-	AFCVTSD
-	AFCVTDS
-	AFCVTZSD
-	AFCVTZSDW
-	AFCVTZSS
-	AFCVTZSSW
-	AFCVTZUD
-	AFCVTZUDW
-	AFCVTZUS
-	AFCVTZUSW
-	AFDIVD
-	AFDIVS
-	AFMOVD
-	AFMOVS
-	AFMULD
-	AFMULS
-	AFNEGD
-	AFNEGS
-	AFSQRTD
-	AFSQRTS
-	AFSUBD
-	AFSUBS
-	ASCVTFD
-	ASCVTFS
-	ASCVTFWD
-	ASCVTFWS
-	AUCVTFD
-	AUCVTFS
-	AUCVTFWD
-	AUCVTFWS
-	AWORD
-	ADWORD
-	AFCSELS
-	AFCSELD
-	AFMAXS
-	AFMINS
-	AFMAXD
-	AFMIND
-	AFMAXNMS
-	AFMAXNMD
-	AFNMULS
-	AFNMULD
-	AFRINTNS
-	AFRINTND
-	AFRINTPS
-	AFRINTPD
-	AFRINTMS
-	AFRINTMD
-	AFRINTZS
-	AFRINTZD
-	AFRINTAS
-	AFRINTAD
-	AFRINTXS
-	AFRINTXD
-	AFRINTIS
-	AFRINTID
-	AFMADDS
-	AFMADDD
-	AFMSUBS
-	AFMSUBD
-	AFNMADDS
-	AFNMADDD
-	AFNMSUBS
-	AFNMSUBD
-	AFMINNMS
-	AFMINNMD
-	AFCVTDH
-	AFCVTHS
-	AFCVTHD
-	AFCVTSH
-	AAESD
-	AAESE
-	AAESIMC
-	AAESMC
-	ASHA1C
-	ASHA1H
-	ASHA1M
-	ASHA1P
-	ASHA1SU0
-	ASHA1SU1
-	ASHA256H
-	ASHA256H2
-	ASHA256SU0
-	ASHA256SU1
-	ALAST
-	AB  = obj.AJMP
-	ABL = obj.ACALL
-)
-
-const (
-	// shift types
-	SHIFT_LL = 0 << 22
-	SHIFT_LR = 1 << 22
-	SHIFT_AR = 2 << 22
-)
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm64/anames.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm64/anames.go
deleted file mode 100644
index 907aaaf..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm64/anames.go
+++ /dev/null
@@ -1,373 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/arm64/anames.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/arm64/anames.go:1
-// Generated by stringer -i a.out.go -o anames.go -p arm64
-// Do not edit.
-
-package arm64
-
-import "bootstrap/cmd/internal/obj"
-
-var Anames = []string{
-	obj.A_ARCHSPECIFIC: "ADC",
-	"ADCS",
-	"ADCSW",
-	"ADCW",
-	"ADD",
-	"ADDS",
-	"ADDSW",
-	"ADDW",
-	"ADR",
-	"ADRP",
-	"AND",
-	"ANDS",
-	"ANDSW",
-	"ANDW",
-	"ASR",
-	"ASRW",
-	"AT",
-	"BFI",
-	"BFIW",
-	"BFM",
-	"BFMW",
-	"BFXIL",
-	"BFXILW",
-	"BIC",
-	"BICS",
-	"BICSW",
-	"BICW",
-	"BRK",
-	"CBNZ",
-	"CBNZW",
-	"CBZ",
-	"CBZW",
-	"CCMN",
-	"CCMNW",
-	"CCMP",
-	"CCMPW",
-	"CINC",
-	"CINCW",
-	"CINV",
-	"CINVW",
-	"CLREX",
-	"CLS",
-	"CLSW",
-	"CLZ",
-	"CLZW",
-	"CMN",
-	"CMNW",
-	"CMP",
-	"CMPW",
-	"CNEG",
-	"CNEGW",
-	"CRC32B",
-	"CRC32CB",
-	"CRC32CH",
-	"CRC32CW",
-	"CRC32CX",
-	"CRC32H",
-	"CRC32W",
-	"CRC32X",
-	"CSEL",
-	"CSELW",
-	"CSET",
-	"CSETM",
-	"CSETMW",
-	"CSETW",
-	"CSINC",
-	"CSINCW",
-	"CSINV",
-	"CSINVW",
-	"CSNEG",
-	"CSNEGW",
-	"DC",
-	"DCPS1",
-	"DCPS2",
-	"DCPS3",
-	"DMB",
-	"DRPS",
-	"DSB",
-	"EON",
-	"EONW",
-	"EOR",
-	"EORW",
-	"ERET",
-	"EXTR",
-	"EXTRW",
-	"HINT",
-	"HLT",
-	"HVC",
-	"IC",
-	"ISB",
-	"LDAR",
-	"LDARB",
-	"LDARH",
-	"LDARW",
-	"LDAXP",
-	"LDAXPW",
-	"LDAXR",
-	"LDAXRB",
-	"LDAXRH",
-	"LDAXRW",
-	"LDP",
-	"LDXR",
-	"LDXRB",
-	"LDXRH",
-	"LDXRW",
-	"LDXP",
-	"LDXPW",
-	"LSL",
-	"LSLW",
-	"LSR",
-	"LSRW",
-	"MADD",
-	"MADDW",
-	"MNEG",
-	"MNEGW",
-	"MOVK",
-	"MOVKW",
-	"MOVN",
-	"MOVNW",
-	"MOVZ",
-	"MOVZW",
-	"MRS",
-	"MSR",
-	"MSUB",
-	"MSUBW",
-	"MUL",
-	"MULW",
-	"MVN",
-	"MVNW",
-	"NEG",
-	"NEGS",
-	"NEGSW",
-	"NEGW",
-	"NGC",
-	"NGCS",
-	"NGCSW",
-	"NGCW",
-	"ORN",
-	"ORNW",
-	"ORR",
-	"ORRW",
-	"PRFM",
-	"PRFUM",
-	"RBIT",
-	"RBITW",
-	"REM",
-	"REMW",
-	"REV",
-	"REV16",
-	"REV16W",
-	"REV32",
-	"REVW",
-	"ROR",
-	"RORW",
-	"SBC",
-	"SBCS",
-	"SBCSW",
-	"SBCW",
-	"SBFIZ",
-	"SBFIZW",
-	"SBFM",
-	"SBFMW",
-	"SBFX",
-	"SBFXW",
-	"SDIV",
-	"SDIVW",
-	"SEV",
-	"SEVL",
-	"SMADDL",
-	"SMC",
-	"SMNEGL",
-	"SMSUBL",
-	"SMULH",
-	"SMULL",
-	"STXR",
-	"STXRB",
-	"STXRH",
-	"STXP",
-	"STXPW",
-	"STXRW",
-	"STLP",
-	"STLPW",
-	"STLR",
-	"STLRB",
-	"STLRH",
-	"STLRW",
-	"STLXP",
-	"STLXPW",
-	"STLXR",
-	"STLXRB",
-	"STLXRH",
-	"STLXRW",
-	"STP",
-	"SUB",
-	"SUBS",
-	"SUBSW",
-	"SUBW",
-	"SVC",
-	"SXTB",
-	"SXTBW",
-	"SXTH",
-	"SXTHW",
-	"SXTW",
-	"SYS",
-	"SYSL",
-	"TBNZ",
-	"TBZ",
-	"TLBI",
-	"TST",
-	"TSTW",
-	"UBFIZ",
-	"UBFIZW",
-	"UBFM",
-	"UBFMW",
-	"UBFX",
-	"UBFXW",
-	"UDIV",
-	"UDIVW",
-	"UMADDL",
-	"UMNEGL",
-	"UMSUBL",
-	"UMULH",
-	"UMULL",
-	"UREM",
-	"UREMW",
-	"UXTB",
-	"UXTH",
-	"UXTW",
-	"UXTBW",
-	"UXTHW",
-	"WFE",
-	"WFI",
-	"YIELD",
-	"MOVB",
-	"MOVBU",
-	"MOVH",
-	"MOVHU",
-	"MOVW",
-	"MOVWU",
-	"MOVD",
-	"MOVNP",
-	"MOVNPW",
-	"MOVP",
-	"MOVPD",
-	"MOVPQ",
-	"MOVPS",
-	"MOVPSW",
-	"MOVPW",
-	"BEQ",
-	"BNE",
-	"BCS",
-	"BHS",
-	"BCC",
-	"BLO",
-	"BMI",
-	"BPL",
-	"BVS",
-	"BVC",
-	"BHI",
-	"BLS",
-	"BGE",
-	"BLT",
-	"BGT",
-	"BLE",
-	"FABSD",
-	"FABSS",
-	"FADDD",
-	"FADDS",
-	"FCCMPD",
-	"FCCMPED",
-	"FCCMPS",
-	"FCCMPES",
-	"FCMPD",
-	"FCMPED",
-	"FCMPES",
-	"FCMPS",
-	"FCVTSD",
-	"FCVTDS",
-	"FCVTZSD",
-	"FCVTZSDW",
-	"FCVTZSS",
-	"FCVTZSSW",
-	"FCVTZUD",
-	"FCVTZUDW",
-	"FCVTZUS",
-	"FCVTZUSW",
-	"FDIVD",
-	"FDIVS",
-	"FMOVD",
-	"FMOVS",
-	"FMULD",
-	"FMULS",
-	"FNEGD",
-	"FNEGS",
-	"FSQRTD",
-	"FSQRTS",
-	"FSUBD",
-	"FSUBS",
-	"SCVTFD",
-	"SCVTFS",
-	"SCVTFWD",
-	"SCVTFWS",
-	"UCVTFD",
-	"UCVTFS",
-	"UCVTFWD",
-	"UCVTFWS",
-	"WORD",
-	"DWORD",
-	"FCSELS",
-	"FCSELD",
-	"FMAXS",
-	"FMINS",
-	"FMAXD",
-	"FMIND",
-	"FMAXNMS",
-	"FMAXNMD",
-	"FNMULS",
-	"FNMULD",
-	"FRINTNS",
-	"FRINTND",
-	"FRINTPS",
-	"FRINTPD",
-	"FRINTMS",
-	"FRINTMD",
-	"FRINTZS",
-	"FRINTZD",
-	"FRINTAS",
-	"FRINTAD",
-	"FRINTXS",
-	"FRINTXD",
-	"FRINTIS",
-	"FRINTID",
-	"FMADDS",
-	"FMADDD",
-	"FMSUBS",
-	"FMSUBD",
-	"FNMADDS",
-	"FNMADDD",
-	"FNMSUBS",
-	"FNMSUBD",
-	"FMINNMS",
-	"FMINNMD",
-	"FCVTDH",
-	"FCVTHS",
-	"FCVTHD",
-	"FCVTSH",
-	"AESD",
-	"AESE",
-	"AESIMC",
-	"AESMC",
-	"SHA1C",
-	"SHA1H",
-	"SHA1M",
-	"SHA1P",
-	"SHA1SU0",
-	"SHA1SU1",
-	"SHA256H",
-	"SHA256H2",
-	"SHA256SU0",
-	"SHA256SU1",
-	"LAST",
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm64/anames7.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm64/anames7.go
deleted file mode 100644
index c5134bb..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm64/anames7.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/arm64/anames7.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/arm64/anames7.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package arm64
-
-var cnames7 = []string{
-	"NONE",
-	"REG",
-	"RSP",
-	"FREG",
-	"VREG",
-	"PAIR",
-	"SHIFT",
-	"EXTREG",
-	"SPR",
-	"COND",
-	"ZCON",
-	"ADDCON0",
-	"ADDCON",
-	"MOVCON",
-	"BITCON",
-	"ABCON0",
-	"ABCON",
-	"MBCON",
-	"LCON",
-	"VCON",
-	"FCON",
-	"VCONADDR",
-	"AACON",
-	"LACON",
-	"AECON",
-	"SBRA",
-	"LBRA",
-	"NPAUTO",
-	"NSAUTO",
-	"PSAUTO",
-	"PPAUTO",
-	"UAUTO4K",
-	"UAUTO8K",
-	"UAUTO16K",
-	"UAUTO32K",
-	"UAUTO64K",
-	"LAUTO",
-	"SEXT1",
-	"SEXT2",
-	"SEXT4",
-	"SEXT8",
-	"SEXT16",
-	"LEXT",
-	"ZOREG",
-	"NPOREG",
-	"NSOREG",
-	"PSOREG",
-	"PPOREG",
-	"UOREG4K",
-	"UOREG8K",
-	"UOREG16K",
-	"UOREG32K",
-	"UOREG64K",
-	"LOREG",
-	"ADDR",
-	"GOTADDR",
-	"TLS_LE",
-	"TLS_IE",
-	"ROFF",
-	"GOK",
-	"TEXTSIZE",
-	"NCLASS",
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm64/asm7.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm64/asm7.go
deleted file mode 100644
index b2c9401..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm64/asm7.go
+++ /dev/null
@@ -1,4376 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/arm64/asm7.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/arm64/asm7.go:1
-// cmd/7l/asm.c, cmd/7l/asmout.c, cmd/7l/optab.c, cmd/7l/span.c, cmd/ld/sub.c, cmd/ld/mod.c, from Vita Nuova.
-// https://code.google.com/p/ken-cc/source/browse/
-//
-// 	Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// 	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// 	Portions Copyright © 1997-1999 Vita Nuova Limited
-// 	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// 	Portions Copyright © 2004,2006 Bruce Ellis
-// 	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// 	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// 	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package arm64
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"fmt"
-	"log"
-	"math"
-	"sort"
-)
-
-const (
-	funcAlign = 16
-)
-
-const (
-	REGFROM = 1
-)
-
-type Optab struct {
-	as    obj.As
-	a1    uint8
-	a2    uint8
-	a3    uint8
-	type_ int8
-	size  int8
-	param int16
-	flag  int8
-	scond uint16
-}
-
-var oprange [ALAST & obj.AMask][]Optab
-
-var xcmp [C_NCLASS][C_NCLASS]bool
-
-const (
-	S32     = 0 << 31
-	S64     = 1 << 31
-	Sbit    = 1 << 29
-	LSL0_32 = 2 << 13
-	LSL0_64 = 3 << 13
-)
-
-func OPDP2(x uint32) uint32 {
-	return 0<<30 | 0<<29 | 0xd6<<21 | x<<10
-}
-
-func OPDP3(sf uint32, op54 uint32, op31 uint32, o0 uint32) uint32 {
-	return sf<<31 | op54<<29 | 0x1B<<24 | op31<<21 | o0<<15
-}
-
-func OPBcc(x uint32) uint32 {
-	return 0x2A<<25 | 0<<24 | 0<<4 | x&15
-}
-
-func OPBLR(x uint32) uint32 {
-	/* x=0, JMP; 1, CALL; 2, RET */
-	return 0x6B<<25 | 0<<23 | x<<21 | 0x1F<<16 | 0<<10
-}
-
-func SYSOP(l uint32, op0 uint32, op1 uint32, crn uint32, crm uint32, op2 uint32, rt uint32) uint32 {
-	return 0x354<<22 | l<<21 | op0<<19 | op1<<16 | crn&15<<12 | crm&15<<8 | op2<<5 | rt
-}
-
-func SYSHINT(x uint32) uint32 {
-	return SYSOP(0, 0, 3, 2, 0, x, 0x1F)
-}
-
-func LDSTR12U(sz uint32, v uint32, opc uint32) uint32 {
-	return sz<<30 | 7<<27 | v<<26 | 1<<24 | opc<<22
-}
-
-func LDSTR9S(sz uint32, v uint32, opc uint32) uint32 {
-	return sz<<30 | 7<<27 | v<<26 | 0<<24 | opc<<22
-}
-
-func LD2STR(o uint32) uint32 {
-	return o &^ (3 << 22)
-}
-
-func LDSTX(sz uint32, o2 uint32, l uint32, o1 uint32, o0 uint32) uint32 {
-	return sz<<30 | 0x8<<24 | o2<<23 | l<<22 | o1<<21 | o0<<15
-}
-
-func FPCMP(m uint32, s uint32, type_ uint32, op uint32, op2 uint32) uint32 {
-	return m<<31 | s<<29 | 0x1E<<24 | type_<<22 | 1<<21 | op<<14 | 8<<10 | op2
-}
-
-func FPCCMP(m uint32, s uint32, type_ uint32, op uint32) uint32 {
-	return m<<31 | s<<29 | 0x1E<<24 | type_<<22 | 1<<21 | 1<<10 | op<<4
-}
-
-func FPOP1S(m uint32, s uint32, type_ uint32, op uint32) uint32 {
-	return m<<31 | s<<29 | 0x1E<<24 | type_<<22 | 1<<21 | op<<15 | 0x10<<10
-}
-
-func FPOP2S(m uint32, s uint32, type_ uint32, op uint32) uint32 {
-	return m<<31 | s<<29 | 0x1E<<24 | type_<<22 | 1<<21 | op<<12 | 2<<10
-}
-
-func FPCVTI(sf uint32, s uint32, type_ uint32, rmode uint32, op uint32) uint32 {
-	return sf<<31 | s<<29 | 0x1E<<24 | type_<<22 | 1<<21 | rmode<<19 | op<<16 | 0<<10
-}
-
-func ADR(p uint32, o uint32, rt uint32) uint32 {
-	return p<<31 | (o&3)<<29 | 0x10<<24 | ((o>>2)&0x7FFFF)<<5 | rt&31
-}
-
-func OPBIT(x uint32) uint32 {
-	return 1<<30 | 0<<29 | 0xD6<<21 | 0<<16 | x<<10
-}
-
-const (
-	LFROM = 1 << 0
-	LTO   = 1 << 1
-)
-
-var optab = []Optab{
-	/* struct Optab:
-	OPCODE, from, prog->reg, to, type,size,param,flag,scond */
-	{obj.ATEXT, C_ADDR, C_NONE, C_TEXTSIZE, 0, 0, 0, 0, 0},
-
-	/* arithmetic operations */
-	{AADD, C_REG, C_REG, C_REG, 1, 4, 0, 0, 0},
-	{AADD, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0},
-	{AADC, C_REG, C_REG, C_REG, 1, 4, 0, 0, 0},
-	{AADC, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0},
-	{ANEG, C_REG, C_NONE, C_REG, 25, 4, 0, 0, 0},
-	{ANEG, C_NONE, C_NONE, C_REG, 25, 4, 0, 0, 0},
-	{ANGC, C_REG, C_NONE, C_REG, 17, 4, 0, 0, 0},
-	{ACMP, C_REG, C_REG, C_NONE, 1, 4, 0, 0, 0},
-	{AADD, C_ADDCON, C_RSP, C_RSP, 2, 4, 0, 0, 0},
-	{AADD, C_ADDCON, C_NONE, C_RSP, 2, 4, 0, 0, 0},
-	{ACMP, C_ADDCON, C_RSP, C_NONE, 2, 4, 0, 0, 0},
-	{AADD, C_MOVCON, C_RSP, C_RSP, 62, 8, 0, 0, 0},
-	{AADD, C_MOVCON, C_NONE, C_RSP, 62, 8, 0, 0, 0},
-	{ACMP, C_MOVCON, C_RSP, C_NONE, 62, 8, 0, 0, 0},
-	{AADD, C_BITCON, C_RSP, C_RSP, 62, 8, 0, 0, 0},
-	{AADD, C_BITCON, C_NONE, C_RSP, 62, 8, 0, 0, 0},
-	{ACMP, C_BITCON, C_RSP, C_NONE, 62, 8, 0, 0, 0},
-	{AADD, C_VCON, C_RSP, C_RSP, 13, 8, 0, LFROM, 0},
-	{AADD, C_VCON, C_NONE, C_RSP, 13, 8, 0, LFROM, 0},
-	{ACMP, C_VCON, C_REG, C_NONE, 13, 8, 0, LFROM, 0},
-	{AADD, C_SHIFT, C_REG, C_REG, 3, 4, 0, 0, 0},
-	{AADD, C_SHIFT, C_NONE, C_REG, 3, 4, 0, 0, 0},
-	{AMVN, C_SHIFT, C_NONE, C_REG, 3, 4, 0, 0, 0},
-	{ACMP, C_SHIFT, C_REG, C_NONE, 3, 4, 0, 0, 0},
-	{ANEG, C_SHIFT, C_NONE, C_REG, 26, 4, 0, 0, 0},
-	{AADD, C_REG, C_RSP, C_RSP, 27, 4, 0, 0, 0},
-	{AADD, C_REG, C_NONE, C_RSP, 27, 4, 0, 0, 0},
-	{ACMP, C_REG, C_RSP, C_NONE, 27, 4, 0, 0, 0},
-	{AADD, C_EXTREG, C_RSP, C_RSP, 27, 4, 0, 0, 0},
-	{AADD, C_EXTREG, C_NONE, C_RSP, 27, 4, 0, 0, 0},
-	{AMVN, C_EXTREG, C_NONE, C_RSP, 27, 4, 0, 0, 0},
-	{ACMP, C_EXTREG, C_RSP, C_NONE, 27, 4, 0, 0, 0},
-	{AADD, C_REG, C_REG, C_REG, 1, 4, 0, 0, 0},
-	{AADD, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0},
-
-	/* logical operations */
-	{AAND, C_REG, C_REG, C_REG, 1, 4, 0, 0, 0},
-	{AAND, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0},
-	{ABIC, C_REG, C_REG, C_REG, 1, 4, 0, 0, 0},
-	{ABIC, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0},
-	{AAND, C_BITCON, C_REG, C_REG, 53, 4, 0, 0, 0},
-	{AAND, C_BITCON, C_NONE, C_REG, 53, 4, 0, 0, 0},
-	{ABIC, C_BITCON, C_REG, C_REG, 53, 4, 0, 0, 0},
-	{ABIC, C_BITCON, C_NONE, C_REG, 53, 4, 0, 0, 0},
-	{AAND, C_MOVCON, C_REG, C_REG, 62, 8, 0, 0, 0},
-	{AAND, C_MOVCON, C_NONE, C_REG, 62, 8, 0, 0, 0},
-	{ABIC, C_MOVCON, C_REG, C_REG, 62, 8, 0, 0, 0},
-	{ABIC, C_MOVCON, C_NONE, C_REG, 62, 8, 0, 0, 0},
-	{AAND, C_VCON, C_REG, C_REG, 28, 8, 0, LFROM, 0},
-	{AAND, C_VCON, C_NONE, C_REG, 28, 8, 0, LFROM, 0},
-	{ABIC, C_VCON, C_REG, C_REG, 28, 8, 0, LFROM, 0},
-	{ABIC, C_VCON, C_NONE, C_REG, 28, 8, 0, LFROM, 0},
-	{AAND, C_SHIFT, C_REG, C_REG, 3, 4, 0, 0, 0},
-	{AAND, C_SHIFT, C_NONE, C_REG, 3, 4, 0, 0, 0},
-	{ABIC, C_SHIFT, C_REG, C_REG, 3, 4, 0, 0, 0},
-	{ABIC, C_SHIFT, C_NONE, C_REG, 3, 4, 0, 0, 0},
-	{AMOVD, C_RSP, C_NONE, C_RSP, 24, 4, 0, 0, 0},
-	{AMVN, C_REG, C_NONE, C_REG, 24, 4, 0, 0, 0},
-	{AMOVB, C_REG, C_NONE, C_REG, 45, 4, 0, 0, 0},
-	{AMOVBU, C_REG, C_NONE, C_REG, 45, 4, 0, 0, 0},
-	{AMOVH, C_REG, C_NONE, C_REG, 45, 4, 0, 0, 0}, /* also MOVHU */
-	{AMOVW, C_REG, C_NONE, C_REG, 45, 4, 0, 0, 0}, /* also MOVWU */
-	/* TODO: MVN C_SHIFT */
-
-	/* MOVs that become MOVK/MOVN/MOVZ/ADD/SUB/OR */
-	{AMOVW, C_MOVCON, C_NONE, C_REG, 32, 4, 0, 0, 0},
-	{AMOVD, C_MOVCON, C_NONE, C_REG, 32, 4, 0, 0, 0},
-
-	// TODO: these don't work properly.
-	// { AMOVW,		C_ADDCON,	C_NONE,	C_REG,		2, 4, 0 , 0},
-	// { AMOVD,		C_ADDCON,	C_NONE,	C_REG,		2, 4, 0 , 0},
-	{AMOVW, C_BITCON, C_NONE, C_REG, 32, 4, 0, 0, 0},
-	{AMOVD, C_BITCON, C_NONE, C_REG, 32, 4, 0, 0, 0},
-
-	{AMOVK, C_VCON, C_NONE, C_REG, 33, 4, 0, 0, 0},
-	{AMOVD, C_AACON, C_NONE, C_REG, 4, 4, REGFROM, 0, 0},
-	{ASDIV, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0},
-	{ASDIV, C_REG, C_REG, C_REG, 1, 4, 0, 0, 0},
-	{AB, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0},
-	{ABL, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0},
-	{AB, C_NONE, C_NONE, C_ZOREG, 6, 4, 0, 0, 0},
-	{ABL, C_NONE, C_NONE, C_REG, 6, 4, 0, 0, 0},
-	{ABL, C_REG, C_NONE, C_REG, 6, 4, 0, 0, 0},
-	{ABL, C_NONE, C_NONE, C_ZOREG, 6, 4, 0, 0, 0},
-	{obj.ARET, C_NONE, C_NONE, C_REG, 6, 4, 0, 0, 0},
-	{obj.ARET, C_NONE, C_NONE, C_ZOREG, 6, 4, 0, 0, 0},
-	{AADRP, C_SBRA, C_NONE, C_REG, 60, 4, 0, 0, 0},
-	{AADR, C_SBRA, C_NONE, C_REG, 61, 4, 0, 0, 0},
-	{ABFM, C_VCON, C_REG, C_REG, 42, 4, 0, 0, 0},
-	{ABFI, C_VCON, C_REG, C_REG, 43, 4, 0, 0, 0},
-	{AEXTR, C_VCON, C_REG, C_REG, 44, 4, 0, 0, 0},
-	{ASXTB, C_REG, C_NONE, C_REG, 45, 4, 0, 0, 0},
-	{ACLS, C_REG, C_NONE, C_REG, 46, 4, 0, 0, 0},
-	{ABEQ, C_NONE, C_NONE, C_SBRA, 7, 4, 0, 0, 0},
-	{ALSL, C_VCON, C_REG, C_REG, 8, 4, 0, 0, 0},
-	{ALSL, C_VCON, C_NONE, C_REG, 8, 4, 0, 0, 0},
-	{ALSL, C_REG, C_NONE, C_REG, 9, 4, 0, 0, 0},
-	{ALSL, C_REG, C_REG, C_REG, 9, 4, 0, 0, 0},
-	{ASVC, C_NONE, C_NONE, C_VCON, 10, 4, 0, 0, 0},
-	{ASVC, C_NONE, C_NONE, C_NONE, 10, 4, 0, 0, 0},
-	{ADWORD, C_NONE, C_NONE, C_VCON, 11, 8, 0, 0, 0},
-	{ADWORD, C_NONE, C_NONE, C_LEXT, 11, 8, 0, 0, 0},
-	{ADWORD, C_NONE, C_NONE, C_ADDR, 11, 8, 0, 0, 0},
-	{ADWORD, C_NONE, C_NONE, C_LACON, 11, 8, 0, 0, 0},
-	{AWORD, C_NONE, C_NONE, C_LCON, 14, 4, 0, 0, 0},
-	{AWORD, C_NONE, C_NONE, C_LEXT, 14, 4, 0, 0, 0},
-	{AWORD, C_NONE, C_NONE, C_ADDR, 14, 4, 0, 0, 0},
-	{AMOVW, C_VCON, C_NONE, C_REG, 12, 4, 0, LFROM, 0},
-	{AMOVW, C_VCONADDR, C_NONE, C_REG, 68, 8, 0, 0, 0},
-	{AMOVD, C_VCON, C_NONE, C_REG, 12, 4, 0, LFROM, 0},
-	{AMOVD, C_VCONADDR, C_NONE, C_REG, 68, 8, 0, 0, 0},
-	{AMOVB, C_REG, C_NONE, C_ADDR, 64, 12, 0, 0, 0},
-	{AMOVBU, C_REG, C_NONE, C_ADDR, 64, 12, 0, 0, 0},
-	{AMOVH, C_REG, C_NONE, C_ADDR, 64, 12, 0, 0, 0},
-	{AMOVW, C_REG, C_NONE, C_ADDR, 64, 12, 0, 0, 0},
-	{AMOVD, C_REG, C_NONE, C_ADDR, 64, 12, 0, 0, 0},
-	{AMOVB, C_ADDR, C_NONE, C_REG, 65, 12, 0, 0, 0},
-	{AMOVBU, C_ADDR, C_NONE, C_REG, 65, 12, 0, 0, 0},
-	{AMOVH, C_ADDR, C_NONE, C_REG, 65, 12, 0, 0, 0},
-	{AMOVW, C_ADDR, C_NONE, C_REG, 65, 12, 0, 0, 0},
-	{AMOVD, C_ADDR, C_NONE, C_REG, 65, 12, 0, 0, 0},
-	{AMOVD, C_GOTADDR, C_NONE, C_REG, 71, 8, 0, 0, 0},
-	{AMOVD, C_TLS_LE, C_NONE, C_REG, 69, 4, 0, 0, 0},
-	{AMOVD, C_TLS_IE, C_NONE, C_REG, 70, 8, 0, 0, 0},
-	{AMUL, C_REG, C_REG, C_REG, 15, 4, 0, 0, 0},
-	{AMUL, C_REG, C_NONE, C_REG, 15, 4, 0, 0, 0},
-	{AMADD, C_REG, C_REG, C_REG, 15, 4, 0, 0, 0},
-	{AREM, C_REG, C_REG, C_REG, 16, 8, 0, 0, 0},
-	{AREM, C_REG, C_NONE, C_REG, 16, 8, 0, 0, 0},
-	{ACSEL, C_COND, C_REG, C_REG, 18, 4, 0, 0, 0}, /* from3 optional */
-	{ACSET, C_COND, C_NONE, C_REG, 18, 4, 0, 0, 0},
-	{ACCMN, C_COND, C_REG, C_VCON, 19, 4, 0, 0, 0}, /* from3 either C_REG or C_VCON */
-
-	/* scaled 12-bit unsigned displacement store */
-	{AMOVB, C_REG, C_NONE, C_UAUTO4K, 20, 4, REGSP, 0, 0},
-	{AMOVB, C_REG, C_NONE, C_UOREG4K, 20, 4, 0, 0, 0},
-	{AMOVBU, C_REG, C_NONE, C_UAUTO4K, 20, 4, REGSP, 0, 0},
-	{AMOVBU, C_REG, C_NONE, C_UOREG4K, 20, 4, 0, 0, 0},
-
-	{AMOVH, C_REG, C_NONE, C_UAUTO8K, 20, 4, REGSP, 0, 0},
-	{AMOVH, C_REG, C_NONE, C_ZOREG, 20, 4, 0, 0, 0},
-	{AMOVH, C_REG, C_NONE, C_UOREG8K, 20, 4, 0, 0, 0},
-
-	{AMOVW, C_REG, C_NONE, C_UAUTO16K, 20, 4, REGSP, 0, 0},
-	{AMOVW, C_REG, C_NONE, C_ZOREG, 20, 4, 0, 0, 0},
-	{AMOVW, C_REG, C_NONE, C_UOREG16K, 20, 4, 0, 0, 0},
-
-	/* unscaled 9-bit signed displacement store */
-	{AMOVB, C_REG, C_NONE, C_NSAUTO, 20, 4, REGSP, 0, 0},
-	{AMOVB, C_REG, C_NONE, C_NSOREG, 20, 4, 0, 0, 0},
-	{AMOVBU, C_REG, C_NONE, C_NSAUTO, 20, 4, REGSP, 0, 0},
-	{AMOVBU, C_REG, C_NONE, C_NSOREG, 20, 4, 0, 0, 0},
-
-	{AMOVH, C_REG, C_NONE, C_NSAUTO, 20, 4, REGSP, 0, 0},
-	{AMOVH, C_REG, C_NONE, C_NSOREG, 20, 4, 0, 0, 0},
-	{AMOVW, C_REG, C_NONE, C_NSAUTO, 20, 4, REGSP, 0, 0},
-	{AMOVW, C_REG, C_NONE, C_NSOREG, 20, 4, 0, 0, 0},
-
-	{AMOVD, C_REG, C_NONE, C_UAUTO32K, 20, 4, REGSP, 0, 0},
-	{AMOVD, C_REG, C_NONE, C_ZOREG, 20, 4, 0, 0, 0},
-	{AMOVD, C_REG, C_NONE, C_UOREG32K, 20, 4, 0, 0, 0},
-	{AMOVD, C_REG, C_NONE, C_NSOREG, 20, 4, 0, 0, 0},
-	{AMOVD, C_REG, C_NONE, C_NSAUTO, 20, 4, REGSP, 0, 0},
-
-	/* short displacement load */
-	{AMOVB, C_UAUTO4K, C_NONE, C_REG, 21, 4, REGSP, 0, 0},
-	{AMOVB, C_NSAUTO, C_NONE, C_REG, 21, 4, REGSP, 0, 0},
-	{AMOVB, C_ZOREG, C_NONE, C_REG, 21, 4, 0, 0, 0},
-	{AMOVB, C_UOREG4K, C_NONE, C_REG, 21, 4, REGSP, 0, 0},
-	{AMOVB, C_NSOREG, C_NONE, C_REG, 21, 4, REGSP, 0, 0},
-
-	{AMOVBU, C_UAUTO4K, C_NONE, C_REG, 21, 4, REGSP, 0, 0},
-	{AMOVBU, C_NSAUTO, C_NONE, C_REG, 21, 4, REGSP, 0, 0},
-	{AMOVBU, C_ZOREG, C_NONE, C_REG, 21, 4, 0, 0, 0},
-	{AMOVBU, C_UOREG4K, C_NONE, C_REG, 21, 4, REGSP, 0, 0},
-	{AMOVBU, C_NSOREG, C_NONE, C_REG, 21, 4, REGSP, 0, 0},
-
-	{AMOVH, C_UAUTO8K, C_NONE, C_REG, 21, 4, REGSP, 0, 0},
-	{AMOVH, C_NSAUTO, C_NONE, C_REG, 21, 4, REGSP, 0, 0},
-	{AMOVH, C_ZOREG, C_NONE, C_REG, 21, 4, 0, 0, 0},
-	{AMOVH, C_UOREG8K, C_NONE, C_REG, 21, 4, REGSP, 0, 0},
-	{AMOVH, C_NSOREG, C_NONE, C_REG, 21, 4, REGSP, 0, 0},
-
-	{AMOVW, C_UAUTO16K, C_NONE, C_REG, 21, 4, REGSP, 0, 0},
-	{AMOVW, C_NSAUTO, C_NONE, C_REG, 21, 4, REGSP, 0, 0},
-	{AMOVW, C_ZOREG, C_NONE, C_REG, 21, 4, 0, 0, 0},
-	{AMOVW, C_UOREG16K, C_NONE, C_REG, 21, 4, REGSP, 0, 0},
-	{AMOVW, C_NSOREG, C_NONE, C_REG, 21, 4, REGSP, 0, 0},
-
-	{AMOVD, C_UAUTO32K, C_NONE, C_REG, 21, 4, REGSP, 0, 0},
-	{AMOVD, C_NSAUTO, C_NONE, C_REG, 21, 4, REGSP, 0, 0},
-	{AMOVD, C_ZOREG, C_NONE, C_REG, 21, 4, 0, 0, 0},
-	{AMOVD, C_UOREG32K, C_NONE, C_REG, 21, 4, REGSP, 0, 0},
-	{AMOVD, C_NSOREG, C_NONE, C_REG, 21, 4, REGSP, 0, 0},
-
-	/* long displacement store */
-	{AMOVB, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, 0, 0},
-	{AMOVB, C_REG, C_NONE, C_LOREG, 30, 8, 0, 0, 0},
-	{AMOVBU, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, 0, 0},
-	{AMOVBU, C_REG, C_NONE, C_LOREG, 30, 8, 0, 0, 0},
-	{AMOVH, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, 0, 0},
-	{AMOVH, C_REG, C_NONE, C_LOREG, 30, 8, 0, 0, 0},
-	{AMOVW, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, 0, 0},
-	{AMOVW, C_REG, C_NONE, C_LOREG, 30, 8, 0, 0, 0},
-	{AMOVD, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, 0, 0},
-	{AMOVD, C_REG, C_NONE, C_LOREG, 30, 8, 0, 0, 0},
-
-	/* long displacement load */
-	{AMOVB, C_LAUTO, C_NONE, C_REG, 31, 8, REGSP, 0, 0},
-	{AMOVB, C_LOREG, C_NONE, C_REG, 31, 8, 0, 0, 0},
-	{AMOVB, C_LOREG, C_NONE, C_REG, 31, 8, 0, 0, 0},
-	{AMOVBU, C_LAUTO, C_NONE, C_REG, 31, 8, REGSP, 0, 0},
-	{AMOVBU, C_LOREG, C_NONE, C_REG, 31, 8, 0, 0, 0},
-	{AMOVBU, C_LOREG, C_NONE, C_REG, 31, 8, 0, 0, 0},
-	{AMOVH, C_LAUTO, C_NONE, C_REG, 31, 8, REGSP, 0, 0},
-	{AMOVH, C_LOREG, C_NONE, C_REG, 31, 8, 0, 0, 0},
-	{AMOVH, C_LOREG, C_NONE, C_REG, 31, 8, 0, 0, 0},
-	{AMOVW, C_LAUTO, C_NONE, C_REG, 31, 8, REGSP, 0, 0},
-	{AMOVW, C_LOREG, C_NONE, C_REG, 31, 8, 0, 0, 0},
-	{AMOVW, C_LOREG, C_NONE, C_REG, 31, 8, 0, 0, 0},
-	{AMOVD, C_LAUTO, C_NONE, C_REG, 31, 8, REGSP, 0, 0},
-	{AMOVD, C_LOREG, C_NONE, C_REG, 31, 8, 0, 0, 0},
-	{AMOVD, C_LOREG, C_NONE, C_REG, 31, 8, 0, 0, 0},
-
-	/* load long effective stack address (load int32 offset and add) */
-	{AMOVD, C_LACON, C_NONE, C_REG, 34, 8, REGSP, LFROM, 0},
-
-	/* pre/post-indexed load (unscaled, signed 9-bit offset) */
-	{AMOVD, C_LOREG, C_NONE, C_REG, 22, 4, 0, 0, C_XPOST},
-	{AMOVW, C_LOREG, C_NONE, C_REG, 22, 4, 0, 0, C_XPOST},
-	{AMOVH, C_LOREG, C_NONE, C_REG, 22, 4, 0, 0, C_XPOST},
-	{AMOVB, C_LOREG, C_NONE, C_REG, 22, 4, 0, 0, C_XPOST},
-	{AMOVBU, C_LOREG, C_NONE, C_REG, 22, 4, 0, 0, C_XPOST},
-	{AFMOVS, C_LOREG, C_NONE, C_FREG, 22, 4, 0, 0, C_XPOST},
-	{AFMOVD, C_LOREG, C_NONE, C_FREG, 22, 4, 0, 0, C_XPOST},
-	{AMOVD, C_LOREG, C_NONE, C_REG, 22, 4, 0, 0, C_XPRE},
-	{AMOVW, C_LOREG, C_NONE, C_REG, 22, 4, 0, 0, C_XPRE},
-	{AMOVH, C_LOREG, C_NONE, C_REG, 22, 4, 0, 0, C_XPRE},
-	{AMOVB, C_LOREG, C_NONE, C_REG, 22, 4, 0, 0, C_XPRE},
-	{AMOVBU, C_LOREG, C_NONE, C_REG, 22, 4, 0, 0, C_XPRE},
-	{AFMOVS, C_LOREG, C_NONE, C_FREG, 22, 4, 0, 0, C_XPRE},
-	{AFMOVD, C_LOREG, C_NONE, C_FREG, 22, 4, 0, 0, C_XPRE},
-
-	/* pre/post-indexed store (unscaled, signed 9-bit offset) */
-	{AMOVD, C_REG, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPOST},
-	{AMOVW, C_REG, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPOST},
-	{AMOVH, C_REG, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPOST},
-	{AMOVB, C_REG, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPOST},
-	{AMOVBU, C_REG, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPOST},
-	{AFMOVS, C_FREG, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPOST},
-	{AFMOVD, C_FREG, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPOST},
-	{AMOVD, C_REG, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPRE},
-	{AMOVW, C_REG, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPRE},
-	{AMOVH, C_REG, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPRE},
-	{AMOVB, C_REG, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPRE},
-	{AMOVBU, C_REG, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPRE},
-	{AFMOVS, C_FREG, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPRE},
-	{AFMOVD, C_FREG, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPRE},
-
-	/* pre/post-indexed load/store register pair
-	   (unscaled, signed 10-bit quad-aligned offset) */
-	{ALDP, C_LOREG, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPRE},
-	{ALDP, C_LOREG, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPOST},
-	{ASTP, C_PAIR, C_NONE, C_LOREG, 67, 4, 0, 0, C_XPRE},
-	{ASTP, C_PAIR, C_NONE, C_LOREG, 67, 4, 0, 0, C_XPOST},
-
-	/* special */
-	{AMOVD, C_SPR, C_NONE, C_REG, 35, 4, 0, 0, 0},
-	{AMRS, C_SPR, C_NONE, C_REG, 35, 4, 0, 0, 0},
-	{AMOVD, C_REG, C_NONE, C_SPR, 36, 4, 0, 0, 0},
-	{AMSR, C_REG, C_NONE, C_SPR, 36, 4, 0, 0, 0},
-	{AMOVD, C_VCON, C_NONE, C_SPR, 37, 4, 0, 0, 0},
-	{AMSR, C_VCON, C_NONE, C_SPR, 37, 4, 0, 0, 0},
-	{AERET, C_NONE, C_NONE, C_NONE, 41, 4, 0, 0, 0},
-	{AFMOVS, C_FREG, C_NONE, C_UAUTO16K, 20, 4, REGSP, 0, 0},
-	{AFMOVS, C_FREG, C_NONE, C_NSAUTO, 20, 4, REGSP, 0, 0},
-	{AFMOVS, C_FREG, C_NONE, C_ZOREG, 20, 4, 0, 0, 0},
-	{AFMOVS, C_FREG, C_NONE, C_UOREG16K, 20, 4, 0, 0, 0},
-	{AFMOVS, C_FREG, C_NONE, C_NSOREG, 20, 4, 0, 0, 0},
-	{AFMOVD, C_FREG, C_NONE, C_UAUTO32K, 20, 4, REGSP, 0, 0},
-	{AFMOVD, C_FREG, C_NONE, C_NSAUTO, 20, 4, REGSP, 0, 0},
-	{AFMOVD, C_FREG, C_NONE, C_ZOREG, 20, 4, 0, 0, 0},
-	{AFMOVD, C_FREG, C_NONE, C_UOREG32K, 20, 4, 0, 0, 0},
-	{AFMOVD, C_FREG, C_NONE, C_NSOREG, 20, 4, 0, 0, 0},
-	{AFMOVS, C_UAUTO16K, C_NONE, C_FREG, 21, 4, REGSP, 0, 0},
-	{AFMOVS, C_NSAUTO, C_NONE, C_FREG, 21, 4, REGSP, 0, 0},
-	{AFMOVS, C_ZOREG, C_NONE, C_FREG, 21, 4, 0, 0, 0},
-	{AFMOVS, C_UOREG16K, C_NONE, C_FREG, 21, 4, 0, 0, 0},
-	{AFMOVS, C_NSOREG, C_NONE, C_FREG, 21, 4, 0, 0, 0},
-	{AFMOVD, C_UAUTO32K, C_NONE, C_FREG, 21, 4, REGSP, 0, 0},
-	{AFMOVD, C_NSAUTO, C_NONE, C_FREG, 21, 4, REGSP, 0, 0},
-	{AFMOVD, C_ZOREG, C_NONE, C_FREG, 21, 4, 0, 0, 0},
-	{AFMOVD, C_UOREG32K, C_NONE, C_FREG, 21, 4, 0, 0, 0},
-	{AFMOVD, C_NSOREG, C_NONE, C_FREG, 21, 4, 0, 0, 0},
-	{AFMOVS, C_FREG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0},
-	{AFMOVS, C_FREG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0},
-	{AFMOVD, C_FREG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0},
-	{AFMOVD, C_FREG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0},
-	{AFMOVS, C_LAUTO, C_NONE, C_FREG, 31, 8, REGSP, LFROM, 0},
-	{AFMOVS, C_LOREG, C_NONE, C_FREG, 31, 8, 0, LFROM, 0},
-	{AFMOVD, C_LAUTO, C_NONE, C_FREG, 31, 8, REGSP, LFROM, 0},
-	{AFMOVD, C_LOREG, C_NONE, C_FREG, 31, 8, 0, LFROM, 0},
-	{AFMOVS, C_FREG, C_NONE, C_ADDR, 64, 12, 0, 0, 0},
-	{AFMOVS, C_ADDR, C_NONE, C_FREG, 65, 12, 0, 0, 0},
-	{AFMOVD, C_FREG, C_NONE, C_ADDR, 64, 12, 0, 0, 0},
-	{AFMOVD, C_ADDR, C_NONE, C_FREG, 65, 12, 0, 0, 0},
-	{AFADDS, C_FREG, C_NONE, C_FREG, 54, 4, 0, 0, 0},
-	{AFADDS, C_FREG, C_FREG, C_FREG, 54, 4, 0, 0, 0},
-	{AFADDS, C_FCON, C_NONE, C_FREG, 54, 4, 0, 0, 0},
-	{AFADDS, C_FCON, C_FREG, C_FREG, 54, 4, 0, 0, 0},
-	{AFMOVS, C_FCON, C_NONE, C_FREG, 54, 4, 0, 0, 0},
-	{AFMOVS, C_FREG, C_NONE, C_FREG, 54, 4, 0, 0, 0},
-	{AFMOVD, C_FCON, C_NONE, C_FREG, 54, 4, 0, 0, 0},
-	{AFMOVD, C_FREG, C_NONE, C_FREG, 54, 4, 0, 0, 0},
-	{AFCVTZSD, C_FREG, C_NONE, C_REG, 29, 4, 0, 0, 0},
-	{ASCVTFD, C_REG, C_NONE, C_FREG, 29, 4, 0, 0, 0},
-	{AFMOVS, C_REG, C_NONE, C_FREG, 29, 4, 0, 0, 0},
-	{AFMOVS, C_FREG, C_NONE, C_REG, 29, 4, 0, 0, 0},
-	{AFMOVD, C_REG, C_NONE, C_FREG, 29, 4, 0, 0, 0},
-	{AFMOVD, C_FREG, C_NONE, C_REG, 29, 4, 0, 0, 0},
-	{AFCMPS, C_FREG, C_FREG, C_NONE, 56, 4, 0, 0, 0},
-	{AFCMPS, C_FCON, C_FREG, C_NONE, 56, 4, 0, 0, 0},
-	{AFCCMPS, C_COND, C_REG, C_VCON, 57, 4, 0, 0, 0},
-	{AFCSELD, C_COND, C_REG, C_FREG, 18, 4, 0, 0, 0},
-	{AFCVTSD, C_FREG, C_NONE, C_FREG, 29, 4, 0, 0, 0},
-	{ACLREX, C_NONE, C_NONE, C_VCON, 38, 4, 0, 0, 0},
-	{ACLREX, C_NONE, C_NONE, C_NONE, 38, 4, 0, 0, 0},
-	{ACBZ, C_REG, C_NONE, C_SBRA, 39, 4, 0, 0, 0},
-	{ATBZ, C_VCON, C_REG, C_SBRA, 40, 4, 0, 0, 0},
-	{ASYS, C_VCON, C_NONE, C_NONE, 50, 4, 0, 0, 0},
-	{ASYS, C_VCON, C_REG, C_NONE, 50, 4, 0, 0, 0},
-	{ASYSL, C_VCON, C_NONE, C_REG, 50, 4, 0, 0, 0},
-	{ADMB, C_VCON, C_NONE, C_NONE, 51, 4, 0, 0, 0},
-	{AHINT, C_VCON, C_NONE, C_NONE, 52, 4, 0, 0, 0},
-	{ALDAR, C_ZOREG, C_NONE, C_REG, 58, 4, 0, 0, 0},
-	{ALDXR, C_ZOREG, C_NONE, C_REG, 58, 4, 0, 0, 0},
-	{ALDAXR, C_ZOREG, C_NONE, C_REG, 58, 4, 0, 0, 0},
-	{ALDXP, C_ZOREG, C_REG, C_REG, 58, 4, 0, 0, 0},
-	{ASTLR, C_REG, C_NONE, C_ZOREG, 59, 4, 0, 0, 0},  // to3=C_NONE
-	{ASTXR, C_REG, C_NONE, C_ZOREG, 59, 4, 0, 0, 0},  // to3=C_REG
-	{ASTLXR, C_REG, C_NONE, C_ZOREG, 59, 4, 0, 0, 0}, // to3=C_REG
-
-	//	{ ASTXP,		C_REG, C_NONE,	C_ZOREG,		59, 4, 0 , 0}, // TODO(aram):
-
-	{AAESD, C_VREG, C_NONE, C_VREG, 29, 4, 0, 0, 0},
-	{ASHA1C, C_VREG, C_REG, C_VREG, 1, 4, 0, 0, 0},
-
-	{obj.AUNDEF, C_NONE, C_NONE, C_NONE, 90, 4, 0, 0, 0},
-	{obj.AUSEFIELD, C_ADDR, C_NONE, C_NONE, 0, 0, 0, 0, 0},
-	{obj.APCDATA, C_VCON, C_NONE, C_VCON, 0, 0, 0, 0, 0},
-	{obj.AFUNCDATA, C_VCON, C_NONE, C_ADDR, 0, 0, 0, 0, 0},
-	{obj.ANOP, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0, 0},
-	{obj.ADUFFZERO, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0}, // same as AB/ABL
-	{obj.ADUFFCOPY, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0}, // same as AB/ABL
-
-	{obj.AXXX, C_NONE, C_NONE, C_NONE, 0, 4, 0, 0, 0},
-}
-
-/*
- * valid pstate field values, and value to use in instruction
- */
-var pstatefield = []struct {
-	a uint32
-	b uint32
-}{
-	{REG_SPSel, 0<<16 | 4<<12 | 5<<5},
-	{REG_DAIFSet, 3<<16 | 4<<12 | 6<<5},
-	{REG_DAIFClr, 3<<16 | 4<<12 | 7<<5},
-}
-
-var pool struct {
-	start uint32
-	size  uint32
-}
-
-func prasm(p *obj.Prog) {
-	fmt.Printf("%v\n", p)
-}
-
-func span7(ctxt *obj.Link, cursym *obj.LSym) {
-	p := cursym.Text
-	if p == nil || p.Link == nil { // handle external functions and ELF section symbols
-		return
-	}
-	ctxt.Cursym = cursym
-	ctxt.Autosize = int32(p.To.Offset&0xffffffff) + 8
-
-	if oprange[AAND&obj.AMask] == nil {
-		buildop(ctxt)
-	}
-
-	bflag := 1
-	c := int64(0)
-	p.Pc = c
-	var m int
-	var o *Optab
-	for p = p.Link; p != nil; p = p.Link {
-		ctxt.Curp = p
-		if p.As == ADWORD && (c&7) != 0 {
-			c += 4
-		}
-		p.Pc = c
-		o = oplook(ctxt, p)
-		m = int(o.size)
-		if m == 0 {
-			if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA && p.As != obj.AUSEFIELD {
-				ctxt.Diag("zero-width instruction\n%v", p)
-			}
-			continue
-		}
-
-		switch o.flag & (LFROM | LTO) {
-		case LFROM:
-			addpool(ctxt, p, &p.From)
-
-		case LTO:
-			addpool(ctxt, p, &p.To)
-			break
-		}
-
-		if p.As == AB || p.As == obj.ARET || p.As == AERET { /* TODO: other unconditional operations */
-			checkpool(ctxt, p, 0)
-		}
-		c += int64(m)
-		if ctxt.Blitrl != nil {
-			checkpool(ctxt, p, 1)
-		}
-	}
-
-	cursym.Size = c
-
-	/*
-	 * if any procedure is large enough to
-	 * generate a large SBRA branch, then
-	 * generate extra passes putting branches
-	 * around jmps to fix. this is rare.
-	 */
-	for bflag != 0 {
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f span1\n", obj.Cputime())
-		}
-		bflag = 0
-		c = 0
-		for p = cursym.Text.Link; p != nil; p = p.Link {
-			if p.As == ADWORD && (c&7) != 0 {
-				c += 4
-			}
-			p.Pc = c
-			o = oplook(ctxt, p)
-
-			/* very large branches */
-			if (o.type_ == 7 || o.type_ == 39) && p.Pcond != nil { // 7: BEQ and like, 39: CBZ and like
-				otxt := p.Pcond.Pc - c
-				if otxt <= -(1<<18)+10 || otxt >= (1<<18)-10 {
-					q := ctxt.NewProg()
-					q.Link = p.Link
-					p.Link = q
-					q.As = AB
-					q.To.Type = obj.TYPE_BRANCH
-					q.Pcond = p.Pcond
-					p.Pcond = q
-					q = ctxt.NewProg()
-					q.Link = p.Link
-					p.Link = q
-					q.As = AB
-					q.To.Type = obj.TYPE_BRANCH
-					q.Pcond = q.Link.Link
-					bflag = 1
-				}
-			}
-			m = int(o.size)
-
-			if m == 0 {
-				if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA && p.As != obj.AUSEFIELD {
-					ctxt.Diag("zero-width instruction\n%v", p)
-				}
-				continue
-			}
-
-			c += int64(m)
-		}
-	}
-
-	c += -c & (funcAlign - 1)
-	cursym.Size = c
-
-	/*
-	 * lay out the code, emitting code and data relocations.
-	 */
-	cursym.Grow(cursym.Size)
-	bp := cursym.P
-	psz := int32(0)
-	var i int
-	var out [6]uint32
-	for p := cursym.Text.Link; p != nil; p = p.Link {
-		ctxt.Pc = p.Pc
-		ctxt.Curp = p
-		o = oplook(ctxt, p)
-
-		// need to align DWORDs on 8-byte boundary. The ISA doesn't
-		// require it, but the various 64-bit loads we generate assume it.
-		if o.as == ADWORD && psz%8 != 0 {
-			bp[3] = 0
-			bp[2] = bp[3]
-			bp[1] = bp[2]
-			bp[0] = bp[1]
-			bp = bp[4:]
-			psz += 4
-		}
-
-		if int(o.size) > 4*len(out) {
-			log.Fatalf("out array in span7 is too small, need at least %d for %v", o.size/4, p)
-		}
-		asmout(ctxt, p, o, out[:])
-		for i = 0; i < int(o.size/4); i++ {
-			ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
-			bp = bp[4:]
-			psz += 4
-		}
-	}
-}
-
-/*
- * when the first reference to the literal pool threatens
- * to go out of range of a 1Mb PC-relative offset
- * drop the pool now, and branch round it.
- */
-func checkpool(ctxt *obj.Link, p *obj.Prog, skip int) {
-	if pool.size >= 0xffff0 || !ispcdisp(int32(p.Pc+4+int64(pool.size)-int64(pool.start)+8)) {
-		flushpool(ctxt, p, skip)
-	} else if p.Link == nil {
-		flushpool(ctxt, p, 2)
-	}
-}
-
-func flushpool(ctxt *obj.Link, p *obj.Prog, skip int) {
-	if ctxt.Blitrl != nil {
-		if skip != 0 {
-			if ctxt.Debugvlog != 0 && skip == 1 {
-				fmt.Printf("note: flush literal pool at %#x: len=%d ref=%x\n", uint64(p.Pc+4), pool.size, pool.start)
-			}
-			q := ctxt.NewProg()
-			q.As = AB
-			q.To.Type = obj.TYPE_BRANCH
-			q.Pcond = p.Link
-			q.Link = ctxt.Blitrl
-			q.Lineno = p.Lineno
-			ctxt.Blitrl = q
-		} else if p.Pc+int64(pool.size)-int64(pool.start) < maxPCDisp {
-			return
-		}
-
-		// The line number for constant pool entries doesn't really matter.
-		// We set it to the line number of the preceding instruction so that
-		// there are no deltas to encode in the pc-line tables.
-		for q := ctxt.Blitrl; q != nil; q = q.Link {
-			q.Lineno = p.Lineno
-		}
-
-		ctxt.Elitrl.Link = p.Link
-		p.Link = ctxt.Blitrl
-
-		ctxt.Blitrl = nil /* BUG: should refer back to values until out-of-range */
-		ctxt.Elitrl = nil
-		pool.size = 0
-		pool.start = 0
-	}
-}
-
-/*
- * TODO: hash
- */
-func addpool(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
-	c := aclass(ctxt, a)
-	lit := ctxt.Instoffset
-	t := *ctxt.NewProg()
-	t.As = AWORD
-	sz := 4
-
-	// MOVD foo(SB), R is actually
-	//	MOVD addr, REGTMP
-	//	MOVD REGTMP, R
-	// where addr is the address of the DWORD containing the address of foo.
-	if p.As == AMOVD || c == C_ADDR || c == C_VCON || int64(lit) != int64(int32(lit)) || uint64(lit) != uint64(uint32(lit)) {
-		// conservative: don't know if we want signed or unsigned extension.
-		// in case of ambiguity, store 64-bit
-		t.As = ADWORD
-		sz = 8
-	}
-
-	switch c {
-	// TODO(aram): remove.
-	default:
-		if a.Name != obj.NAME_EXTERN {
-			fmt.Printf("addpool: %v in %v shouldn't go to default case\n", DRconv(c), p)
-		}
-
-		t.To.Offset = a.Offset
-		t.To.Sym = a.Sym
-		t.To.Type = a.Type
-		t.To.Name = a.Name
-
-	/* This is here because MOV uint12<<12, R is disabled in optab.
-	Because of this, we need to load the constant from memory. */
-	case C_ADDCON:
-		fallthrough
-
-	case C_PSAUTO,
-		C_PPAUTO,
-		C_UAUTO4K,
-		C_UAUTO8K,
-		C_UAUTO16K,
-		C_UAUTO32K,
-		C_UAUTO64K,
-		C_NSAUTO,
-		C_NPAUTO,
-		C_LAUTO,
-		C_PPOREG,
-		C_PSOREG,
-		C_UOREG4K,
-		C_UOREG8K,
-		C_UOREG16K,
-		C_UOREG32K,
-		C_UOREG64K,
-		C_NSOREG,
-		C_NPOREG,
-		C_LOREG,
-		C_LACON,
-		C_LCON,
-		C_VCON:
-		if a.Name == obj.NAME_EXTERN {
-			fmt.Printf("addpool: %v in %v needs reloc\n", DRconv(c), p)
-		}
-
-		t.To.Type = obj.TYPE_CONST
-		t.To.Offset = lit
-		break
-	}
-
-	for q := ctxt.Blitrl; q != nil; q = q.Link { /* could hash on t.t0.offset */
-		if q.To == t.To {
-			p.Pcond = q
-			return
-		}
-	}
-
-	q := ctxt.NewProg()
-	*q = t
-	q.Pc = int64(pool.size)
-	if ctxt.Blitrl == nil {
-		ctxt.Blitrl = q
-		pool.start = uint32(p.Pc)
-	} else {
-		ctxt.Elitrl.Link = q
-	}
-	ctxt.Elitrl = q
-	pool.size = -pool.size & (funcAlign - 1)
-	pool.size += uint32(sz)
-	p.Pcond = q
-}
-
-func regoff(ctxt *obj.Link, a *obj.Addr) uint32 {
-	ctxt.Instoffset = 0
-	aclass(ctxt, a)
-	return uint32(ctxt.Instoffset)
-}
-
-// Maximum PC-relative displacement.
-// The actual limit is ±2²⁰, but we are conservative
-// to avoid needing to recompute the literal pool flush points
-// as span-dependent jumps are enlarged.
-const maxPCDisp = 512 * 1024
-
-// ispcdisp reports whether v is a valid PC-relative displacement.
-func ispcdisp(v int32) bool {
-	return -maxPCDisp < v && v < maxPCDisp && v&3 == 0
-}
-
-func isaddcon(v int64) bool {
-	/* uimm12 or uimm24? */
-	if v < 0 {
-		return false
-	}
-	if (v & 0xFFF) == 0 {
-		v >>= 12
-	}
-	return v <= 0xFFF
-}
-
-// isbitcon returns whether a constant can be encoded into a logical instruction.
-// bitcon has a binary form of repetition of a bit sequence of length 2, 4, 8, 16, 32, or 64,
-// which itself is a rotate (w.r.t. the length of the unit) of a sequence of ones.
-// special cases: 0 and -1 are not bitcon.
-// this function needs to run against virtually all the constants, so it needs to be fast.
-// for this reason, bitcon testing and bitcon encoding are separate functions.
-func isbitcon(x uint64) bool {
-	if x == 1<<64-1 || x == 0 {
-		return false
-	}
-	// determine the period and sign-extend a unit to 64 bits
-	switch {
-	case x != x>>32|x<<32:
-		// period is 64
-		// nothing to do
-	case x != x>>16|x<<48:
-		// period is 32
-		x = uint64(int64(int32(x)))
-	case x != x>>8|x<<56:
-		// period is 16
-		x = uint64(int64(int16(x)))
-	case x != x>>4|x<<60:
-		// period is 8
-		x = uint64(int64(int8(x)))
-	default:
-		// period is 4 or 2, always true
-		// 0001, 0010, 0100, 1000 -- 0001 rotate
-		// 0011, 0110, 1100, 1001 -- 0011 rotate
-		// 0111, 1011, 1101, 1110 -- 0111 rotate
-		// 0101, 1010             -- 01   rotate, repeat
-		return true
-	}
-	return sequenceOfOnes(x) || sequenceOfOnes(^x)
-}
-
-// sequenceOfOnes tests whether a constant is a sequence of ones in binary, with leading and trailing zeros
-func sequenceOfOnes(x uint64) bool {
-	y := x & -x // lowest set bit of x. x is good iff x+y is a power of 2
-	y += x
-	return (y-1)&y == 0
-}
-
-// bitconEncode returns the encoding of a bitcon used in logical instructions
-// x is known to be a bitcon
-// a bitcon is a sequence of n ones at low bits (i.e. 1<<n-1), right rotated
-// by R bits, and repeated with period of 64, 32, 16, 8, 4, or 2.
-// it is encoded in logical instructions with 3 bitfields
-// N (1 bit) : R (6 bits) : S (6 bits), where
-// N=1           -- period=64
-// N=0, S=0xxxxx -- period=32
-// N=0, S=10xxxx -- period=16
-// N=0, S=110xxx -- period=8
-// N=0, S=1110xx -- period=4
-// N=0, S=11110x -- period=2
-// R is the shift amount, low bits of S = n-1
-func bitconEncode(x uint64, mode int) uint32 {
-	var period uint32
-	// determine the period and sign-extend a unit to 64 bits
-	switch {
-	case x != x>>32|x<<32:
-		period = 64
-	case x != x>>16|x<<48:
-		period = 32
-		x = uint64(int64(int32(x)))
-	case x != x>>8|x<<56:
-		period = 16
-		x = uint64(int64(int16(x)))
-	case x != x>>4|x<<60:
-		period = 8
-		x = uint64(int64(int8(x)))
-	case x != x>>2|x<<62:
-		period = 4
-		x = uint64(int64(x<<60) >> 60)
-	default:
-		period = 2
-		x = uint64(int64(x<<62) >> 62)
-	}
-	neg := false
-	if int64(x) < 0 {
-		x = ^x
-		neg = true
-	}
-	y := x & -x // lowest set bit of x.
-	s := log2(y)
-	n := log2(x+y) - s // x (or ^x) is a sequence of n ones left shifted by s bits
-	if neg {
-		// ^x is a sequence of n ones left shifted by s bits
-		// adjust n, s for x
-		s = n + s
-		n = period - n
-	}
-
-	N := uint32(0)
-	if mode == 64 && period == 64 {
-		N = 1
-	}
-	R := (period - s) & (period - 1) & uint32(mode-1) // shift amount of right rotate
-	S := (n - 1) | 63&^(period<<1-1)                  // low bits = #ones - 1, high bits encodes period
-	return N<<22 | R<<16 | S<<10
-}
-
-func log2(x uint64) uint32 {
-	if x == 0 {
-		panic("log2 of 0")
-	}
-	n := uint32(0)
-	if x >= 1<<32 {
-		x >>= 32
-		n += 32
-	}
-	if x >= 1<<16 {
-		x >>= 16
-		n += 16
-	}
-	if x >= 1<<8 {
-		x >>= 8
-		n += 8
-	}
-	if x >= 1<<4 {
-		x >>= 4
-		n += 4
-	}
-	if x >= 1<<2 {
-		x >>= 2
-		n += 2
-	}
-	if x >= 1<<1 {
-		x >>= 1
-		n += 1
-	}
-	return n
-}
-
-func autoclass(l int64) int {
-	if l < 0 {
-		if l >= -256 {
-			return C_NSAUTO
-		}
-		if l >= -512 && (l&7) == 0 {
-			return C_NPAUTO
-		}
-		return C_LAUTO
-	}
-
-	if l <= 255 {
-		return C_PSAUTO
-	}
-	if l <= 504 && (l&7) == 0 {
-		return C_PPAUTO
-	}
-	if l <= 4095 {
-		return C_UAUTO4K
-	}
-	if l <= 8190 && (l&1) == 0 {
-		return C_UAUTO8K
-	}
-	if l <= 16380 && (l&3) == 0 {
-		return C_UAUTO16K
-	}
-	if l <= 32760 && (l&7) == 0 {
-		return C_UAUTO32K
-	}
-	if l <= 65520 && (l&0xF) == 0 {
-		return C_UAUTO64K
-	}
-	return C_LAUTO
-}
-
-func oregclass(l int64) int {
-	if l == 0 {
-		return C_ZOREG
-	}
-	return autoclass(l) - C_NPAUTO + C_NPOREG
-}
-
-/*
- * given an offset v and a class c (see above)
- * return the offset value to use in the instruction,
- * scaled if necessary
- */
-func offsetshift(ctxt *obj.Link, v int64, c int) int64 {
-	s := 0
-	if c >= C_SEXT1 && c <= C_SEXT16 {
-		s = c - C_SEXT1
-	} else if c >= C_UAUTO4K && c <= C_UAUTO64K {
-		s = c - C_UAUTO4K
-	} else if c >= C_UOREG4K && c <= C_UOREG64K {
-		s = c - C_UOREG4K
-	}
-	vs := v >> uint(s)
-	if vs<<uint(s) != v {
-		ctxt.Diag("odd offset: %d\n%v", v, ctxt.Curp)
-	}
-	return vs
-}
-
-/*
- * if v contains a single 16-bit value aligned
- * on a 16-bit field, and thus suitable for movk/movn,
- * return the field index 0 to 3; otherwise return -1
- */
-func movcon(v int64) int {
-	for s := 0; s < 64; s += 16 {
-		if (uint64(v) &^ (uint64(0xFFFF) << uint(s))) == 0 {
-			return s / 16
-		}
-	}
-	return -1
-}
-
-func rclass(r int16) int {
-	switch {
-	case REG_R0 <= r && r <= REG_R30: // not 31
-		return C_REG
-	case r == REGZERO:
-		return C_ZCON
-	case REG_F0 <= r && r <= REG_F31:
-		return C_FREG
-	case REG_V0 <= r && r <= REG_V31:
-		return C_VREG
-	case COND_EQ <= r && r <= COND_NV:
-		return C_COND
-	case r == REGSP:
-		return C_RSP
-	case r&REG_EXT != 0:
-		return C_EXTREG
-	case r >= REG_SPECIAL:
-		return C_SPR
-	}
-	return C_GOK
-}
-
-func aclass(ctxt *obj.Link, a *obj.Addr) int {
-	switch a.Type {
-	case obj.TYPE_NONE:
-		return C_NONE
-
-	case obj.TYPE_REG:
-		return rclass(a.Reg)
-
-	case obj.TYPE_REGREG:
-		return C_PAIR
-
-	case obj.TYPE_SHIFT:
-		return C_SHIFT
-
-	case obj.TYPE_MEM:
-		switch a.Name {
-		case obj.NAME_EXTERN, obj.NAME_STATIC:
-			if a.Sym == nil {
-				break
-			}
-			ctxt.Instoffset = a.Offset
-			if a.Sym != nil { // use relocation
-				if a.Sym.Type == obj.STLSBSS {
-					if ctxt.Flag_shared {
-						return C_TLS_IE
-					} else {
-						return C_TLS_LE
-					}
-				}
-				return C_ADDR
-			}
-			return C_LEXT
-
-		case obj.NAME_GOTREF:
-			return C_GOTADDR
-
-		case obj.NAME_AUTO:
-			ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
-			return autoclass(ctxt.Instoffset)
-
-		case obj.NAME_PARAM:
-			ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + 8
-			return autoclass(ctxt.Instoffset)
-
-		case obj.NAME_NONE:
-			ctxt.Instoffset = a.Offset
-			return oregclass(ctxt.Instoffset)
-		}
-		return C_GOK
-
-	case obj.TYPE_FCONST:
-		return C_FCON
-
-	case obj.TYPE_TEXTSIZE:
-		return C_TEXTSIZE
-
-	case obj.TYPE_CONST, obj.TYPE_ADDR:
-		switch a.Name {
-		case obj.NAME_NONE:
-			ctxt.Instoffset = a.Offset
-			if a.Reg != 0 && a.Reg != REGZERO {
-				goto aconsize
-			}
-			v := ctxt.Instoffset
-			if v == 0 {
-				return C_ZCON
-			}
-			if isaddcon(v) {
-				if v <= 0xFFF {
-					if isbitcon(uint64(v)) {
-						return C_ABCON0
-					}
-					return C_ADDCON0
-				}
-				if isbitcon(uint64(v)) {
-					return C_ABCON
-				}
-				return C_ADDCON
-			}
-
-			t := movcon(v)
-			if t >= 0 {
-				if isbitcon(uint64(v)) {
-					return C_MBCON
-				}
-				return C_MOVCON
-			}
-
-			t = movcon(^v)
-			if t >= 0 {
-				if isbitcon(uint64(v)) {
-					return C_MBCON
-				}
-				return C_MOVCON
-			}
-
-			if isbitcon(uint64(v)) {
-				return C_BITCON
-			}
-
-			if uint64(v) == uint64(uint32(v)) || v == int64(int32(v)) {
-				return C_LCON
-			}
-			return C_VCON
-
-		case obj.NAME_EXTERN, obj.NAME_STATIC:
-			if a.Sym == nil {
-				break
-			}
-			if a.Sym.Type == obj.STLSBSS {
-				ctxt.Diag("taking address of TLS variable is not supported")
-			}
-			ctxt.Instoffset = a.Offset
-			return C_VCONADDR
-
-		case obj.NAME_AUTO:
-			ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
-			goto aconsize
-
-		case obj.NAME_PARAM:
-			ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + 8
-			goto aconsize
-		}
-		return C_GOK
-
-	aconsize:
-		if isaddcon(ctxt.Instoffset) {
-			return C_AACON
-		}
-		return C_LACON
-
-	case obj.TYPE_BRANCH:
-		return C_SBRA
-	}
-
-	return C_GOK
-}
-
-func oclass(a *obj.Addr) int {
-	return int(a.Class) - 1
-}
-
-func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
-	a1 := int(p.Optab)
-	if a1 != 0 {
-		return &optab[a1-1]
-	}
-	a1 = int(p.From.Class)
-	if a1 == 0 {
-		a1 = aclass(ctxt, &p.From) + 1
-		p.From.Class = int8(a1)
-	}
-
-	a1--
-	a3 := int(p.To.Class)
-	if a3 == 0 {
-		a3 = aclass(ctxt, &p.To) + 1
-		p.To.Class = int8(a3)
-	}
-
-	a3--
-	a2 := C_NONE
-	if p.Reg != 0 {
-		a2 = rclass(p.Reg)
-	}
-
-	if false {
-		fmt.Printf("oplook %v %d %d %d\n", p.As, a1, a2, a3)
-		fmt.Printf("\t\t%d %d\n", p.From.Type, p.To.Type)
-	}
-
-	ops := oprange[p.As&obj.AMask]
-	c1 := &xcmp[a1]
-	c2 := &xcmp[a2]
-	c3 := &xcmp[a3]
-	c4 := &xcmp[p.Scond>>5]
-	for i := range ops {
-		op := &ops[i]
-		if (int(op.a2) == a2 || c2[op.a2]) && c4[op.scond>>5] && c1[op.a1] && c3[op.a3] {
-			p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
-			return op
-		}
-	}
-
-	ctxt.Diag("illegal combination %v %v %v %v, %d %d", p, DRconv(a1), DRconv(a2), DRconv(a3), p.From.Type, p.To.Type)
-	prasm(p)
-	if ops == nil {
-		ops = optab
-	}
-	return &ops[0]
-}
-
-func cmp(a int, b int) bool {
-	if a == b {
-		return true
-	}
-	switch a {
-	case C_RSP:
-		if b == C_REG {
-			return true
-		}
-
-	case C_REG:
-		if b == C_ZCON {
-			return true
-		}
-
-	case C_ADDCON0:
-		if b == C_ZCON || b == C_ABCON0 {
-			return true
-		}
-
-	case C_ADDCON:
-		if b == C_ZCON || b == C_ABCON0 || b == C_ADDCON0 || b == C_ABCON {
-			return true
-		}
-
-	case C_BITCON:
-		if b == C_ABCON0 || b == C_ABCON || b == C_MBCON {
-			return true
-		}
-
-	case C_MOVCON:
-		if b == C_MBCON || b == C_ZCON || b == C_ADDCON0 {
-			return true
-		}
-
-	case C_LCON:
-		if b == C_ZCON || b == C_BITCON || b == C_ADDCON || b == C_ADDCON0 || b == C_ABCON || b == C_ABCON0 || b == C_MBCON || b == C_MOVCON {
-			return true
-		}
-
-	case C_VCON:
-		return cmp(C_LCON, b)
-
-	case C_LACON:
-		if b == C_AACON {
-			return true
-		}
-
-	case C_SEXT2:
-		if b == C_SEXT1 {
-			return true
-		}
-
-	case C_SEXT4:
-		if b == C_SEXT1 || b == C_SEXT2 {
-			return true
-		}
-
-	case C_SEXT8:
-		if b >= C_SEXT1 && b <= C_SEXT4 {
-			return true
-		}
-
-	case C_SEXT16:
-		if b >= C_SEXT1 && b <= C_SEXT8 {
-			return true
-		}
-
-	case C_LEXT:
-		if b >= C_SEXT1 && b <= C_SEXT16 {
-			return true
-		}
-
-	case C_PPAUTO:
-		if b == C_PSAUTO {
-			return true
-		}
-
-	case C_UAUTO4K:
-		if b == C_PSAUTO || b == C_PPAUTO {
-			return true
-		}
-
-	case C_UAUTO8K:
-		return cmp(C_UAUTO4K, b)
-
-	case C_UAUTO16K:
-		return cmp(C_UAUTO8K, b)
-
-	case C_UAUTO32K:
-		return cmp(C_UAUTO16K, b)
-
-	case C_UAUTO64K:
-		return cmp(C_UAUTO32K, b)
-
-	case C_NPAUTO:
-		return cmp(C_NSAUTO, b)
-
-	case C_LAUTO:
-		return cmp(C_NPAUTO, b) || cmp(C_UAUTO64K, b)
-
-	case C_PSOREG:
-		if b == C_ZOREG {
-			return true
-		}
-
-	case C_PPOREG:
-		if b == C_ZOREG || b == C_PSOREG {
-			return true
-		}
-
-	case C_UOREG4K:
-		if b == C_ZOREG || b == C_PSAUTO || b == C_PSOREG || b == C_PPAUTO || b == C_PPOREG {
-			return true
-		}
-
-	case C_UOREG8K:
-		return cmp(C_UOREG4K, b)
-
-	case C_UOREG16K:
-		return cmp(C_UOREG8K, b)
-
-	case C_UOREG32K:
-		return cmp(C_UOREG16K, b)
-
-	case C_UOREG64K:
-		return cmp(C_UOREG32K, b)
-
-	case C_NPOREG:
-		return cmp(C_NSOREG, b)
-
-	case C_LOREG:
-		return cmp(C_NPOREG, b) || cmp(C_UOREG64K, b)
-
-	case C_LBRA:
-		if b == C_SBRA {
-			return true
-		}
-	}
-
-	return false
-}
-
-type ocmp []Optab
-
-func (x ocmp) Len() int {
-	return len(x)
-}
-
-func (x ocmp) Swap(i, j int) {
-	x[i], x[j] = x[j], x[i]
-}
-
-func (x ocmp) Less(i, j int) bool {
-	p1 := &x[i]
-	p2 := &x[j]
-	if p1.as != p2.as {
-		return p1.as < p2.as
-	}
-	if p1.a1 != p2.a1 {
-		return p1.a1 < p2.a1
-	}
-	if p1.a2 != p2.a2 {
-		return p1.a2 < p2.a2
-	}
-	if p1.a3 != p2.a3 {
-		return p1.a3 < p2.a3
-	}
-	if p1.scond != p2.scond {
-		return p1.scond < p2.scond
-	}
-	return false
-}
-
-func oprangeset(a obj.As, t []Optab) {
-	oprange[a&obj.AMask] = t
-}
-
-func buildop(ctxt *obj.Link) {
-	var n int
-	for i := 0; i < C_GOK; i++ {
-		for n = 0; n < C_GOK; n++ {
-			if cmp(n, i) {
-				xcmp[i][n] = true
-			}
-		}
-	}
-	for n = 0; optab[n].as != obj.AXXX; n++ {
-	}
-	sort.Sort(ocmp(optab[:n]))
-	for i := 0; i < n; i++ {
-		r := optab[i].as
-		start := i
-		for optab[i].as == r {
-			i++
-		}
-		t := optab[start:i]
-		i--
-		oprangeset(r, t)
-		switch r {
-		default:
-			ctxt.Diag("unknown op in build: %v", r)
-			log.Fatalf("bad code")
-
-		case AADD:
-			oprangeset(AADDS, t)
-			oprangeset(ASUB, t)
-			oprangeset(ASUBS, t)
-			oprangeset(AADDW, t)
-			oprangeset(AADDSW, t)
-			oprangeset(ASUBW, t)
-			oprangeset(ASUBSW, t)
-
-		case AAND: /* logical immediate, logical shifted register */
-			oprangeset(AANDS, t)
-
-			oprangeset(AANDSW, t)
-			oprangeset(AANDW, t)
-			oprangeset(AEOR, t)
-			oprangeset(AEORW, t)
-			oprangeset(AORR, t)
-			oprangeset(AORRW, t)
-
-		case ABIC: /* only logical shifted register */
-			oprangeset(ABICS, t)
-
-			oprangeset(ABICSW, t)
-			oprangeset(ABICW, t)
-			oprangeset(AEON, t)
-			oprangeset(AEONW, t)
-			oprangeset(AORN, t)
-			oprangeset(AORNW, t)
-
-		case ANEG:
-			oprangeset(ANEGS, t)
-			oprangeset(ANEGSW, t)
-			oprangeset(ANEGW, t)
-
-		case AADC: /* rn=Rd */
-			oprangeset(AADCW, t)
-
-			oprangeset(AADCS, t)
-			oprangeset(AADCSW, t)
-			oprangeset(ASBC, t)
-			oprangeset(ASBCW, t)
-			oprangeset(ASBCS, t)
-			oprangeset(ASBCSW, t)
-
-		case ANGC: /* rn=REGZERO */
-			oprangeset(ANGCW, t)
-
-			oprangeset(ANGCS, t)
-			oprangeset(ANGCSW, t)
-
-		case ACMP:
-			oprangeset(ACMPW, t)
-			oprangeset(ACMN, t)
-			oprangeset(ACMNW, t)
-
-		case ATST:
-			oprangeset(ATSTW, t)
-
-			/* register/register, and shifted */
-		case AMVN:
-			oprangeset(AMVNW, t)
-
-		case AMOVK:
-			oprangeset(AMOVKW, t)
-			oprangeset(AMOVN, t)
-			oprangeset(AMOVNW, t)
-			oprangeset(AMOVZ, t)
-			oprangeset(AMOVZW, t)
-
-		case ABEQ:
-			oprangeset(ABNE, t)
-			oprangeset(ABCS, t)
-			oprangeset(ABHS, t)
-			oprangeset(ABCC, t)
-			oprangeset(ABLO, t)
-			oprangeset(ABMI, t)
-			oprangeset(ABPL, t)
-			oprangeset(ABVS, t)
-			oprangeset(ABVC, t)
-			oprangeset(ABHI, t)
-			oprangeset(ABLS, t)
-			oprangeset(ABGE, t)
-			oprangeset(ABLT, t)
-			oprangeset(ABGT, t)
-			oprangeset(ABLE, t)
-
-		case ALSL:
-			oprangeset(ALSLW, t)
-			oprangeset(ALSR, t)
-			oprangeset(ALSRW, t)
-			oprangeset(AASR, t)
-			oprangeset(AASRW, t)
-			oprangeset(AROR, t)
-			oprangeset(ARORW, t)
-
-		case ACLS:
-			oprangeset(ACLSW, t)
-			oprangeset(ACLZ, t)
-			oprangeset(ACLZW, t)
-			oprangeset(ARBIT, t)
-			oprangeset(ARBITW, t)
-			oprangeset(AREV, t)
-			oprangeset(AREVW, t)
-			oprangeset(AREV16, t)
-			oprangeset(AREV16W, t)
-			oprangeset(AREV32, t)
-
-		case ASDIV:
-			oprangeset(ASDIVW, t)
-			oprangeset(AUDIV, t)
-			oprangeset(AUDIVW, t)
-			oprangeset(ACRC32B, t)
-			oprangeset(ACRC32CB, t)
-			oprangeset(ACRC32CH, t)
-			oprangeset(ACRC32CW, t)
-			oprangeset(ACRC32CX, t)
-			oprangeset(ACRC32H, t)
-			oprangeset(ACRC32W, t)
-			oprangeset(ACRC32X, t)
-
-		case AMADD:
-			oprangeset(AMADDW, t)
-			oprangeset(AMSUB, t)
-			oprangeset(AMSUBW, t)
-			oprangeset(ASMADDL, t)
-			oprangeset(ASMSUBL, t)
-			oprangeset(AUMADDL, t)
-			oprangeset(AUMSUBL, t)
-
-		case AREM:
-			oprangeset(AREMW, t)
-			oprangeset(AUREM, t)
-			oprangeset(AUREMW, t)
-
-		case AMUL:
-			oprangeset(AMULW, t)
-			oprangeset(AMNEG, t)
-			oprangeset(AMNEGW, t)
-			oprangeset(ASMNEGL, t)
-			oprangeset(ASMULL, t)
-			oprangeset(ASMULH, t)
-			oprangeset(AUMNEGL, t)
-			oprangeset(AUMULH, t)
-			oprangeset(AUMULL, t)
-
-		case AMOVB:
-			oprangeset(AMOVBU, t)
-
-		case AMOVH:
-			oprangeset(AMOVHU, t)
-
-		case AMOVW:
-			oprangeset(AMOVWU, t)
-
-		case ABFM:
-			oprangeset(ABFMW, t)
-			oprangeset(ASBFM, t)
-			oprangeset(ASBFMW, t)
-			oprangeset(AUBFM, t)
-			oprangeset(AUBFMW, t)
-
-		case ABFI:
-			oprangeset(ABFIW, t)
-			oprangeset(ABFXIL, t)
-			oprangeset(ABFXILW, t)
-			oprangeset(ASBFIZ, t)
-			oprangeset(ASBFIZW, t)
-			oprangeset(ASBFX, t)
-			oprangeset(ASBFXW, t)
-			oprangeset(AUBFIZ, t)
-			oprangeset(AUBFIZW, t)
-			oprangeset(AUBFX, t)
-			oprangeset(AUBFXW, t)
-
-		case AEXTR:
-			oprangeset(AEXTRW, t)
-
-		case ASXTB:
-			oprangeset(ASXTBW, t)
-			oprangeset(ASXTH, t)
-			oprangeset(ASXTHW, t)
-			oprangeset(ASXTW, t)
-			oprangeset(AUXTB, t)
-			oprangeset(AUXTH, t)
-			oprangeset(AUXTW, t)
-			oprangeset(AUXTBW, t)
-			oprangeset(AUXTHW, t)
-
-		case ACCMN:
-			oprangeset(ACCMNW, t)
-			oprangeset(ACCMP, t)
-			oprangeset(ACCMPW, t)
-
-		case ACSEL:
-			oprangeset(ACSELW, t)
-			oprangeset(ACSINC, t)
-			oprangeset(ACSINCW, t)
-			oprangeset(ACSINV, t)
-			oprangeset(ACSINVW, t)
-			oprangeset(ACSNEG, t)
-			oprangeset(ACSNEGW, t)
-
-			// aliases Rm=Rn, !cond
-			oprangeset(ACINC, t)
-
-			oprangeset(ACINCW, t)
-			oprangeset(ACINV, t)
-			oprangeset(ACINVW, t)
-			oprangeset(ACNEG, t)
-			oprangeset(ACNEGW, t)
-
-			// aliases, Rm=Rn=REGZERO, !cond
-		case ACSET:
-			oprangeset(ACSETW, t)
-
-			oprangeset(ACSETM, t)
-			oprangeset(ACSETMW, t)
-
-		case AMOVD,
-			AMOVBU,
-			AB,
-			ABL,
-			AWORD,
-			ADWORD,
-			obj.ARET,
-			obj.ATEXT,
-			ASTP,
-			ALDP:
-			break
-
-		case AERET:
-			oprangeset(AWFE, t)
-			oprangeset(AWFI, t)
-			oprangeset(AYIELD, t)
-			oprangeset(ASEV, t)
-			oprangeset(ASEVL, t)
-			oprangeset(ADRPS, t)
-
-		case ACBZ:
-			oprangeset(ACBZW, t)
-			oprangeset(ACBNZ, t)
-			oprangeset(ACBNZW, t)
-
-		case ATBZ:
-			oprangeset(ATBNZ, t)
-
-		case AADR, AADRP:
-			break
-
-		case ACLREX:
-			break
-
-		case ASVC:
-			oprangeset(AHLT, t)
-			oprangeset(AHVC, t)
-			oprangeset(ASMC, t)
-			oprangeset(ABRK, t)
-			oprangeset(ADCPS1, t)
-			oprangeset(ADCPS2, t)
-			oprangeset(ADCPS3, t)
-
-		case AFADDS:
-			oprangeset(AFADDD, t)
-			oprangeset(AFSUBS, t)
-			oprangeset(AFSUBD, t)
-			oprangeset(AFMULS, t)
-			oprangeset(AFMULD, t)
-			oprangeset(AFNMULS, t)
-			oprangeset(AFNMULD, t)
-			oprangeset(AFDIVS, t)
-			oprangeset(AFMAXD, t)
-			oprangeset(AFMAXS, t)
-			oprangeset(AFMIND, t)
-			oprangeset(AFMINS, t)
-			oprangeset(AFMAXNMD, t)
-			oprangeset(AFMAXNMS, t)
-			oprangeset(AFMINNMD, t)
-			oprangeset(AFMINNMS, t)
-			oprangeset(AFDIVD, t)
-
-		case AFCVTSD:
-			oprangeset(AFCVTDS, t)
-			oprangeset(AFABSD, t)
-			oprangeset(AFABSS, t)
-			oprangeset(AFNEGD, t)
-			oprangeset(AFNEGS, t)
-			oprangeset(AFSQRTD, t)
-			oprangeset(AFSQRTS, t)
-			oprangeset(AFRINTNS, t)
-			oprangeset(AFRINTND, t)
-			oprangeset(AFRINTPS, t)
-			oprangeset(AFRINTPD, t)
-			oprangeset(AFRINTMS, t)
-			oprangeset(AFRINTMD, t)
-			oprangeset(AFRINTZS, t)
-			oprangeset(AFRINTZD, t)
-			oprangeset(AFRINTAS, t)
-			oprangeset(AFRINTAD, t)
-			oprangeset(AFRINTXS, t)
-			oprangeset(AFRINTXD, t)
-			oprangeset(AFRINTIS, t)
-			oprangeset(AFRINTID, t)
-			oprangeset(AFCVTDH, t)
-			oprangeset(AFCVTHS, t)
-			oprangeset(AFCVTHD, t)
-			oprangeset(AFCVTSH, t)
-
-		case AFCMPS:
-			oprangeset(AFCMPD, t)
-			oprangeset(AFCMPES, t)
-			oprangeset(AFCMPED, t)
-
-		case AFCCMPS:
-			oprangeset(AFCCMPD, t)
-			oprangeset(AFCCMPES, t)
-			oprangeset(AFCCMPED, t)
-
-		case AFCSELD:
-			oprangeset(AFCSELS, t)
-
-		case AFMOVS, AFMOVD:
-			break
-
-		case AFCVTZSD:
-			oprangeset(AFCVTZSDW, t)
-			oprangeset(AFCVTZSS, t)
-			oprangeset(AFCVTZSSW, t)
-			oprangeset(AFCVTZUD, t)
-			oprangeset(AFCVTZUDW, t)
-			oprangeset(AFCVTZUS, t)
-			oprangeset(AFCVTZUSW, t)
-
-		case ASCVTFD:
-			oprangeset(ASCVTFS, t)
-			oprangeset(ASCVTFWD, t)
-			oprangeset(ASCVTFWS, t)
-			oprangeset(AUCVTFD, t)
-			oprangeset(AUCVTFS, t)
-			oprangeset(AUCVTFWD, t)
-			oprangeset(AUCVTFWS, t)
-
-		case ASYS:
-			oprangeset(AAT, t)
-			oprangeset(ADC, t)
-			oprangeset(AIC, t)
-			oprangeset(ATLBI, t)
-
-		case ASYSL, AHINT:
-			break
-
-		case ADMB:
-			oprangeset(ADSB, t)
-			oprangeset(AISB, t)
-
-		case AMRS, AMSR:
-			break
-
-		case ALDAR:
-			oprangeset(ALDARW, t)
-			fallthrough
-
-		case ALDXR:
-			oprangeset(ALDXRB, t)
-			oprangeset(ALDXRH, t)
-			oprangeset(ALDXRW, t)
-
-		case ALDAXR:
-			oprangeset(ALDAXRB, t)
-			oprangeset(ALDAXRH, t)
-			oprangeset(ALDAXRW, t)
-
-		case ALDXP:
-			oprangeset(ALDXPW, t)
-
-		case ASTLR:
-			oprangeset(ASTLRW, t)
-
-		case ASTXR:
-			oprangeset(ASTXRB, t)
-			oprangeset(ASTXRH, t)
-			oprangeset(ASTXRW, t)
-
-		case ASTLXR:
-			oprangeset(ASTLXRB, t)
-			oprangeset(ASTLXRH, t)
-			oprangeset(ASTLXRW, t)
-
-		case ASTXP:
-			oprangeset(ASTXPW, t)
-
-		case AAESD:
-			oprangeset(AAESE, t)
-			oprangeset(AAESMC, t)
-			oprangeset(AAESIMC, t)
-			oprangeset(ASHA1H, t)
-			oprangeset(ASHA1SU1, t)
-			oprangeset(ASHA256SU0, t)
-
-		case ASHA1C:
-			oprangeset(ASHA1P, t)
-			oprangeset(ASHA1M, t)
-			oprangeset(ASHA1SU0, t)
-			oprangeset(ASHA256H, t)
-			oprangeset(ASHA256H2, t)
-			oprangeset(ASHA256SU1, t)
-
-		case obj.ANOP,
-			obj.AUNDEF,
-			obj.AUSEFIELD,
-			obj.AFUNCDATA,
-			obj.APCDATA,
-			obj.ADUFFZERO,
-			obj.ADUFFCOPY:
-			break
-		}
-	}
-}
-
-func chipfloat7(ctxt *obj.Link, e float64) int {
-	ei := math.Float64bits(e)
-	l := uint32(int32(ei))
-	h := uint32(int32(ei >> 32))
-
-	if l != 0 || h&0xffff != 0 {
-		return -1
-	}
-	h1 := h & 0x7fc00000
-	if h1 != 0x40000000 && h1 != 0x3fc00000 {
-		return -1
-	}
-	n := 0
-
-	// sign bit (a)
-	if h&0x80000000 != 0 {
-		n |= 1 << 7
-	}
-
-	// exp sign bit (b)
-	if h1 == 0x3fc00000 {
-		n |= 1 << 6
-	}
-
-	// rest of exp and mantissa (cd-efgh)
-	n |= int((h >> 16) & 0x3f)
-
-	//print("match %.8lux %.8lux %d\n", l, h, n);
-	return n
-}
-
-/* form offset parameter to SYS; special register number */
-func SYSARG5(op0 int, op1 int, Cn int, Cm int, op2 int) int {
-	return op0<<19 | op1<<16 | Cn<<12 | Cm<<8 | op2<<5
-}
-
-func SYSARG4(op1 int, Cn int, Cm int, op2 int) int {
-	return SYSARG5(0, op1, Cn, Cm, op2)
-}
-
-func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
-	o1 := uint32(0)
-	o2 := uint32(0)
-	o3 := uint32(0)
-	o4 := uint32(0)
-	o5 := uint32(0)
-	if false { /*debug['P']*/
-		fmt.Printf("%x: %v\ttype %d\n", uint32(p.Pc), p, o.type_)
-	}
-	switch o.type_ {
-	default:
-		ctxt.Diag("unknown asm %d", o.type_)
-		prasm(p)
-
-	case 0: /* pseudo ops */
-		break
-
-	case 1: /* op Rm,[Rn],Rd; default Rn=Rd -> op Rm<<0,[Rn,]Rd (shifted register) */
-		o1 = oprrr(ctxt, p.As)
-
-		rf := int(p.From.Reg)
-		rt := int(p.To.Reg)
-		r := int(p.Reg)
-		if p.To.Type == obj.TYPE_NONE {
-			rt = REGZERO
-		}
-		if r == 0 {
-			r = rt
-		}
-		o1 |= (uint32(rf&31) << 16) | (uint32(r&31) << 5) | uint32(rt&31)
-
-	case 2: /* add/sub $(uimm12|uimm24)[,R],R; cmp $(uimm12|uimm24),R */
-		o1 = opirr(ctxt, p.As)
-
-		rt := int(p.To.Reg)
-		if p.To.Type == obj.TYPE_NONE {
-			if (o1 & Sbit) == 0 {
-				ctxt.Diag("ineffective ZR destination\n%v", p)
-			}
-			rt = REGZERO
-		}
-
-		r := int(p.Reg)
-		if r == 0 {
-			r = rt
-		}
-		v := int32(regoff(ctxt, &p.From))
-		o1 = oaddi(ctxt, int32(o1), v, r, rt)
-
-	case 3: /* op R<<n[,R],R (shifted register) */
-		o1 = oprrr(ctxt, p.As)
-
-		o1 |= uint32(p.From.Offset) /* includes reg, op, etc */
-		rt := int(p.To.Reg)
-		if p.To.Type == obj.TYPE_NONE {
-			rt = REGZERO
-		}
-		r := int(p.Reg)
-		if p.As == AMVN || p.As == AMVNW {
-			r = REGZERO
-		} else if r == 0 {
-			r = rt
-		}
-		o1 |= (uint32(r&31) << 5) | uint32(rt&31)
-
-	case 4: /* mov $addcon, R; mov $recon, R; mov $racon, R */
-		o1 = opirr(ctxt, p.As)
-
-		rt := int(p.To.Reg)
-		r := int(o.param)
-		if r == 0 {
-			r = REGZERO
-		} else if r == REGFROM {
-			r = int(p.From.Reg)
-		}
-		if r == 0 {
-			r = REGSP
-		}
-		v := int32(regoff(ctxt, &p.From))
-		if (v & 0xFFF000) != 0 {
-			v >>= 12
-			o1 |= 1 << 22 /* shift, by 12 */
-		}
-
-		o1 |= ((uint32(v) & 0xFFF) << 10) | (uint32(r&31) << 5) | uint32(rt&31)
-
-	case 5: /* b s; bl s */
-		o1 = opbra(ctxt, p.As)
-
-		if p.To.Sym == nil {
-			o1 |= uint32(brdist(ctxt, p, 0, 26, 2))
-			break
-		}
-
-		rel := obj.Addrel(ctxt.Cursym)
-		rel.Off = int32(ctxt.Pc)
-		rel.Siz = 4
-		rel.Sym = p.To.Sym
-		rel.Add = p.To.Offset
-		rel.Type = obj.R_CALLARM64
-
-	case 6: /* b ,O(R); bl ,O(R) */
-		o1 = opbrr(ctxt, p.As)
-
-		o1 |= uint32(p.To.Reg&31) << 5
-		rel := obj.Addrel(ctxt.Cursym)
-		rel.Off = int32(ctxt.Pc)
-		rel.Siz = 0
-		rel.Type = obj.R_CALLIND
-
-	case 7: /* beq s */
-		o1 = opbra(ctxt, p.As)
-
-		o1 |= uint32(brdist(ctxt, p, 0, 19, 2) << 5)
-
-	case 8: /* lsl $c,[R],R -> ubfm $(W-1)-c,$(-c MOD (W-1)),Rn,Rd */
-		rt := int(p.To.Reg)
-
-		rf := int(p.Reg)
-		if rf == 0 {
-			rf = rt
-		}
-		v := int32(p.From.Offset)
-		switch p.As {
-		case AASR:
-			o1 = opbfm(ctxt, ASBFM, int(v), 63, rf, rt)
-
-		case AASRW:
-			o1 = opbfm(ctxt, ASBFMW, int(v), 31, rf, rt)
-
-		case ALSL:
-			o1 = opbfm(ctxt, AUBFM, int((64-v)&63), int(63-v), rf, rt)
-
-		case ALSLW:
-			o1 = opbfm(ctxt, AUBFMW, int((32-v)&31), int(31-v), rf, rt)
-
-		case ALSR:
-			o1 = opbfm(ctxt, AUBFM, int(v), 63, rf, rt)
-
-		case ALSRW:
-			o1 = opbfm(ctxt, AUBFMW, int(v), 31, rf, rt)
-
-		case AROR:
-			o1 = opextr(ctxt, AEXTR, v, rf, rf, rt)
-
-		case ARORW:
-			o1 = opextr(ctxt, AEXTRW, v, rf, rf, rt)
-
-		default:
-			ctxt.Diag("bad shift $con\n%v", ctxt.Curp)
-			break
-		}
-
-	case 9: /* lsl Rm,[Rn],Rd -> lslv Rm, Rn, Rd */
-		o1 = oprrr(ctxt, p.As)
-
-		r := int(p.Reg)
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-		o1 |= (uint32(p.From.Reg&31) << 16) | (uint32(r&31) << 5) | uint32(p.To.Reg&31)
-
-	case 10: /* brk/hvc/.../svc [$con] */
-		o1 = opimm(ctxt, p.As)
-
-		if p.To.Type != obj.TYPE_NONE {
-			o1 |= uint32((p.To.Offset & 0xffff) << 5)
-		}
-
-	case 11: /* dword */
-		aclass(ctxt, &p.To)
-
-		o1 = uint32(ctxt.Instoffset)
-		o2 = uint32(ctxt.Instoffset >> 32)
-		if p.To.Sym != nil {
-			rel := obj.Addrel(ctxt.Cursym)
-			rel.Off = int32(ctxt.Pc)
-			rel.Siz = 8
-			rel.Sym = p.To.Sym
-			rel.Add = p.To.Offset
-			rel.Type = obj.R_ADDR
-			o2 = 0
-			o1 = o2
-		}
-
-	case 12: /* movT $vcon, reg */
-		o1 = omovlit(ctxt, p.As, p, &p.From, int(p.To.Reg))
-
-	case 13: /* addop $vcon, [R], R (64 bit literal); cmp $lcon,R -> addop $lcon,R, ZR */
-		o1 = omovlit(ctxt, AMOVD, p, &p.From, REGTMP)
-
-		if !(o1 != 0) {
-			break
-		}
-		rt := int(p.To.Reg)
-		if p.To.Type == obj.TYPE_NONE {
-			rt = REGZERO
-		}
-		r := int(p.Reg)
-		if r == 0 {
-			r = rt
-		}
-		if p.To.Type != obj.TYPE_NONE && (p.To.Reg == REGSP || r == REGSP) {
-			o2 = opxrrr(ctxt, p.As)
-			o2 |= REGTMP & 31 << 16
-			o2 |= LSL0_64
-		} else {
-			o2 = oprrr(ctxt, p.As)
-			o2 |= REGTMP & 31 << 16 /* shift is 0 */
-		}
-
-		o2 |= uint32(r&31) << 5
-		o2 |= uint32(rt & 31)
-
-	case 14: /* word */
-		if aclass(ctxt, &p.To) == C_ADDR {
-			ctxt.Diag("address constant needs DWORD\n%v", p)
-		}
-		o1 = uint32(ctxt.Instoffset)
-		if p.To.Sym != nil {
-			// This case happens with words generated
-			// in the PC stream as part of the literal pool.
-			rel := obj.Addrel(ctxt.Cursym)
-
-			rel.Off = int32(ctxt.Pc)
-			rel.Siz = 4
-			rel.Sym = p.To.Sym
-			rel.Add = p.To.Offset
-			rel.Type = obj.R_ADDR
-			o1 = 0
-		}
-
-	case 15: /* mul/mneg/umulh/umull r,[r,]r; madd/msub Rm,Rn,Ra,Rd */
-		o1 = oprrr(ctxt, p.As)
-
-		rf := int(p.From.Reg)
-		rt := int(p.To.Reg)
-		var r int
-		var ra int
-		if p.From3Type() == obj.TYPE_REG {
-			r = int(p.From3.Reg)
-			ra = int(p.Reg)
-			if ra == 0 {
-				ra = REGZERO
-			}
-		} else {
-			r = int(p.Reg)
-			if r == 0 {
-				r = rt
-			}
-			ra = REGZERO
-		}
-
-		o1 |= (uint32(rf&31) << 16) | (uint32(ra&31) << 10) | (uint32(r&31) << 5) | uint32(rt&31)
-
-	case 16: /* XremY R[,R],R -> XdivY; XmsubY */
-		o1 = oprrr(ctxt, p.As)
-
-		rf := int(p.From.Reg)
-		rt := int(p.To.Reg)
-		r := int(p.Reg)
-		if r == 0 {
-			r = rt
-		}
-		o1 |= (uint32(rf&31) << 16) | (uint32(r&31) << 5) | REGTMP&31
-		o2 = oprrr(ctxt, AMSUBW)
-		o2 |= o1 & (1 << 31) /* same size */
-		o2 |= (uint32(rf&31) << 16) | (uint32(r&31) << 10) | (REGTMP & 31 << 5) | uint32(rt&31)
-
-	case 17: /* op Rm,[Rn],Rd; default Rn=ZR */
-		o1 = oprrr(ctxt, p.As)
-
-		rf := int(p.From.Reg)
-		rt := int(p.To.Reg)
-		r := int(p.Reg)
-		if p.To.Type == obj.TYPE_NONE {
-			rt = REGZERO
-		}
-		if r == 0 {
-			r = REGZERO
-		}
-		o1 |= (uint32(rf&31) << 16) | (uint32(r&31) << 5) | uint32(rt&31)
-
-	case 18: /* csel cond,Rn,Rm,Rd; cinc/cinv/cneg cond,Rn,Rd; cset cond,Rd */
-		o1 = oprrr(ctxt, p.As)
-
-		cond := int(p.From.Reg)
-		r := int(p.Reg)
-		var rf int
-		if r != 0 {
-			if p.From3Type() == obj.TYPE_NONE {
-				/* CINC/CINV/CNEG */
-				rf = r
-
-				cond ^= 1
-			} else {
-				rf = int(p.From3.Reg) /* CSEL */
-			}
-		} else {
-			/* CSET */
-			if p.From3Type() != obj.TYPE_NONE {
-				ctxt.Diag("invalid combination\n%v", p)
-			}
-			rf = REGZERO
-			r = rf
-			cond ^= 1
-		}
-
-		rt := int(p.To.Reg)
-		o1 |= (uint32(rf&31) << 16) | (uint32(cond&31) << 12) | (uint32(r&31) << 5) | uint32(rt&31)
-
-	case 19: /* CCMN cond, (Rm|uimm5),Rn, uimm4 -> ccmn Rn,Rm,uimm4,cond */
-		nzcv := int(p.To.Offset)
-
-		cond := int(p.From.Reg)
-		var rf int
-		if p.From3.Type == obj.TYPE_REG {
-			o1 = oprrr(ctxt, p.As)
-			rf = int(p.From3.Reg) /* Rm */
-		} else {
-			o1 = opirr(ctxt, p.As)
-			rf = int(p.From3.Offset & 0x1F)
-		}
-
-		o1 |= (uint32(rf&31) << 16) | (uint32(cond) << 12) | (uint32(p.Reg&31) << 5) | uint32(nzcv)
-
-	case 20: /* movT R,O(R) -> strT */
-		v := int32(regoff(ctxt, &p.To))
-		sz := int32(1 << uint(movesize(p.As)))
-
-		r := int(p.To.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		if v < 0 || v%sz != 0 { /* unscaled 9-bit signed */
-			o1 = olsr9s(ctxt, int32(opstr9(ctxt, p.As)), v, r, int(p.From.Reg))
-		} else {
-			v = int32(offsetshift(ctxt, int64(v), int(o.a3)))
-			o1 = olsr12u(ctxt, int32(opstr12(ctxt, p.As)), v, r, int(p.From.Reg))
-		}
-
-	case 21: /* movT O(R),R -> ldrT */
-		v := int32(regoff(ctxt, &p.From))
-		sz := int32(1 << uint(movesize(p.As)))
-
-		r := int(p.From.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		if v < 0 || v%sz != 0 { /* unscaled 9-bit signed */
-			o1 = olsr9s(ctxt, int32(opldr9(ctxt, p.As)), v, r, int(p.To.Reg))
-		} else {
-			v = int32(offsetshift(ctxt, int64(v), int(o.a1)))
-			//print("offset=%lld v=%ld a1=%d\n", instoffset, v, o->a1);
-			o1 = olsr12u(ctxt, int32(opldr12(ctxt, p.As)), v, r, int(p.To.Reg))
-		}
-
-	case 22: /* movT (R)O!,R; movT O(R)!, R -> ldrT */
-		v := int32(p.From.Offset)
-
-		if v < -256 || v > 255 {
-			ctxt.Diag("offset out of range\n%v", p)
-		}
-		o1 = opldrpp(ctxt, p.As)
-		if o.scond == C_XPOST {
-			o1 |= 1 << 10
-		} else {
-			o1 |= 3 << 10
-		}
-		o1 |= ((uint32(v) & 0x1FF) << 12) | (uint32(p.From.Reg&31) << 5) | uint32(p.To.Reg&31)
-
-	case 23: /* movT R,(R)O!; movT O(R)!, R -> strT */
-		v := int32(p.To.Offset)
-
-		if v < -256 || v > 255 {
-			ctxt.Diag("offset out of range\n%v", p)
-		}
-		o1 = LD2STR(opldrpp(ctxt, p.As))
-		if o.scond == C_XPOST {
-			o1 |= 1 << 10
-		} else {
-			o1 |= 3 << 10
-		}
-		o1 |= ((uint32(v) & 0x1FF) << 12) | (uint32(p.To.Reg&31) << 5) | uint32(p.From.Reg&31)
-
-	case 24: /* mov/mvn Rs,Rd -> add $0,Rs,Rd or orr Rs,ZR,Rd */
-		rf := int(p.From.Reg)
-		rt := int(p.To.Reg)
-		s := rf == REGSP || rt == REGSP
-		if p.As == AMVN || p.As == AMVNW {
-			if s {
-				ctxt.Diag("illegal SP reference\n%v", p)
-			}
-			o1 = oprrr(ctxt, p.As)
-			o1 |= (uint32(rf&31) << 16) | (REGZERO & 31 << 5) | uint32(rt&31)
-		} else if s {
-			o1 = opirr(ctxt, p.As)
-			o1 |= (uint32(rf&31) << 5) | uint32(rt&31)
-		} else {
-			o1 = oprrr(ctxt, p.As)
-			o1 |= (uint32(rf&31) << 16) | (REGZERO & 31 << 5) | uint32(rt&31)
-		}
-
-	case 25: /* negX Rs, Rd -> subX Rs<<0, ZR, Rd */
-		o1 = oprrr(ctxt, p.As)
-
-		rf := int(p.From.Reg)
-		if rf == C_NONE {
-			rf = int(p.To.Reg)
-		}
-		rt := int(p.To.Reg)
-		o1 |= (uint32(rf&31) << 16) | (REGZERO & 31 << 5) | uint32(rt&31)
-
-	case 26: /* negX Rm<<s, Rd -> subX Rm<<s, ZR, Rd */
-		o1 = oprrr(ctxt, p.As)
-
-		o1 |= uint32(p.From.Offset) /* includes reg, op, etc */
-		rt := int(p.To.Reg)
-		o1 |= (REGZERO & 31 << 5) | uint32(rt&31)
-
-	case 27: /* op Rm<<n[,Rn],Rd (extended register) */
-		o1 = opxrrr(ctxt, p.As)
-
-		if (p.From.Reg-obj.RBaseARM64)&REG_EXT != 0 {
-			ctxt.Diag("extended register not implemented\n%v", p)
-			// o1 |= uint32(p.From.Offset) /* includes reg, op, etc */
-		} else {
-			o1 |= uint32(p.From.Reg&31) << 16
-		}
-		rt := int(p.To.Reg)
-		if p.To.Type == obj.TYPE_NONE {
-			rt = REGZERO
-		}
-		r := int(p.Reg)
-		if r == 0 {
-			r = rt
-		}
-		o1 |= (uint32(r&31) << 5) | uint32(rt&31)
-
-	case 28: /* logop $vcon, [R], R (64 bit literal) */
-		o1 = omovlit(ctxt, AMOVD, p, &p.From, REGTMP)
-
-		if !(o1 != 0) {
-			break
-		}
-		r := int(p.Reg)
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-		o2 = oprrr(ctxt, p.As)
-		o2 |= REGTMP & 31 << 16 /* shift is 0 */
-		o2 |= uint32(r&31) << 5
-		o2 |= uint32(p.To.Reg & 31)
-
-	case 29: /* op Rn, Rd */
-		fc := aclass(ctxt, &p.From)
-		tc := aclass(ctxt, &p.To)
-		if (p.As == AFMOVD || p.As == AFMOVS) && (fc == C_REG || fc == C_ZCON || tc == C_REG || tc == C_ZCON) {
-			// FMOV Rx, Fy or FMOV Fy, Rx
-			o1 = FPCVTI(0, 0, 0, 0, 6)
-			if p.As == AFMOVD {
-				o1 |= 1<<31 | 1<<22 // 64-bit
-			}
-			if fc == C_REG || fc == C_ZCON {
-				o1 |= 1 << 16 // FMOV Rx, Fy
-			}
-		} else {
-			o1 = oprrr(ctxt, p.As)
-		}
-		o1 |= uint32(p.From.Reg&31)<<5 | uint32(p.To.Reg&31)
-
-	case 30: /* movT R,L(R) -> strT */
-		s := movesize(o.as)
-
-		if s < 0 {
-			ctxt.Diag("unexpected long move, op %v tab %v\n%v", p.As, o.as, p)
-		}
-		v := int32(regoff(ctxt, &p.To))
-		if v < 0 {
-			ctxt.Diag("negative large offset\n%v", p)
-		}
-		if (v & ((1 << uint(s)) - 1)) != 0 {
-			ctxt.Diag("misaligned offset\n%v", p)
-		}
-		hi := v - (v & (0xFFF << uint(s)))
-		if (hi & 0xFFF) != 0 {
-			ctxt.Diag("internal: miscalculated offset %d [%d]\n%v", v, s, p)
-		}
-
-		//fprint(2, "v=%ld (%#lux) s=%d hi=%ld (%#lux) v'=%ld (%#lux)\n", v, v, s, hi, hi, ((v-hi)>>s)&0xFFF, ((v-hi)>>s)&0xFFF);
-		r := int(p.To.Reg)
-
-		if r == 0 {
-			r = int(o.param)
-		}
-		o1 = oaddi(ctxt, int32(opirr(ctxt, AADD)), hi, r, REGTMP)
-		o2 = olsr12u(ctxt, int32(opstr12(ctxt, p.As)), ((v-hi)>>uint(s))&0xFFF, REGTMP, int(p.From.Reg))
-
-	case 31: /* movT L(R), R -> ldrT */
-		s := movesize(o.as)
-
-		if s < 0 {
-			ctxt.Diag("unexpected long move, op %v tab %v\n%v", p.As, o.as, p)
-		}
-		v := int32(regoff(ctxt, &p.From))
-		if v < 0 {
-			ctxt.Diag("negative large offset\n%v", p)
-		}
-		if (v & ((1 << uint(s)) - 1)) != 0 {
-			ctxt.Diag("misaligned offset\n%v", p)
-		}
-		hi := v - (v & (0xFFF << uint(s)))
-		if (hi & 0xFFF) != 0 {
-			ctxt.Diag("internal: miscalculated offset %d [%d]\n%v", v, s, p)
-		}
-
-		//fprint(2, "v=%ld (%#lux) s=%d hi=%ld (%#lux) v'=%ld (%#lux)\n", v, v, s, hi, hi, ((v-hi)>>s)&0xFFF, ((v-hi)>>s)&0xFFF);
-		r := int(p.From.Reg)
-
-		if r == 0 {
-			r = int(o.param)
-		}
-		o1 = oaddi(ctxt, int32(opirr(ctxt, AADD)), hi, r, REGTMP)
-		o2 = olsr12u(ctxt, int32(opldr12(ctxt, p.As)), ((v-hi)>>uint(s))&0xFFF, REGTMP, int(p.To.Reg))
-
-	case 32: /* mov $con, R -> movz/movn */
-		o1 = omovconst(ctxt, p.As, p, &p.From, int(p.To.Reg))
-
-	case 33: /* movk $uimm16 << pos */
-		o1 = opirr(ctxt, p.As)
-
-		d := p.From.Offset
-		if (d >> 16) != 0 {
-			ctxt.Diag("requires uimm16\n%v", p)
-		}
-		s := 0
-		if p.From3Type() != obj.TYPE_NONE {
-			if p.From3.Type != obj.TYPE_CONST {
-				ctxt.Diag("missing bit position\n%v", p)
-			}
-			s = int(p.From3.Offset / 16)
-			if (s*16&0xF) != 0 || s >= 4 || (o1&S64) == 0 && s >= 2 {
-				ctxt.Diag("illegal bit position\n%v", p)
-			}
-		}
-
-		rt := int(p.To.Reg)
-		o1 |= uint32(((d & 0xFFFF) << 5) | int64((uint32(s)&3)<<21) | int64(rt&31))
-
-	case 34: /* mov $lacon,R */
-		o1 = omovlit(ctxt, AMOVD, p, &p.From, REGTMP)
-
-		if !(o1 != 0) {
-			break
-		}
-		o2 = opxrrr(ctxt, AADD)
-		o2 |= REGTMP & 31 << 16
-		o2 |= LSL0_64
-		r := int(p.From.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		o2 |= uint32(r&31) << 5
-		o2 |= uint32(p.To.Reg & 31)
-
-	case 35: /* mov SPR,R -> mrs */
-		o1 = oprrr(ctxt, AMRS)
-
-		v := int32(p.From.Offset)
-		if (o1 & uint32(v&^(3<<19))) != 0 {
-			ctxt.Diag("MRS register value overlap\n%v", p)
-		}
-		o1 |= uint32(v)
-		o1 |= uint32(p.To.Reg & 31)
-
-	case 36: /* mov R,SPR */
-		o1 = oprrr(ctxt, AMSR)
-
-		v := int32(p.To.Offset)
-		if (o1 & uint32(v&^(3<<19))) != 0 {
-			ctxt.Diag("MSR register value overlap\n%v", p)
-		}
-		o1 |= uint32(v)
-		o1 |= uint32(p.From.Reg & 31)
-
-	case 37: /* mov $con,PSTATEfield -> MSR [immediate] */
-		if (uint64(p.From.Offset) &^ uint64(0xF)) != 0 {
-			ctxt.Diag("illegal immediate for PSTATE field\n%v", p)
-		}
-		o1 = opirr(ctxt, AMSR)
-		o1 |= uint32((p.From.Offset & 0xF) << 8) /* Crm */
-		v := int32(0)
-		for i := 0; i < len(pstatefield); i++ {
-			if int64(pstatefield[i].a) == p.To.Offset {
-				v = int32(pstatefield[i].b)
-				break
-			}
-		}
-
-		if v == 0 {
-			ctxt.Diag("illegal PSTATE field for immediate move\n%v", p)
-		}
-		o1 |= uint32(v)
-
-	case 38: /* clrex [$imm] */
-		o1 = opimm(ctxt, p.As)
-
-		if p.To.Type == obj.TYPE_NONE {
-			o1 |= 0xF << 8
-		} else {
-			o1 |= uint32((p.To.Offset & 0xF) << 8)
-		}
-
-	case 39: /* cbz R, rel */
-		o1 = opirr(ctxt, p.As)
-
-		o1 |= uint32(p.From.Reg & 31)
-		o1 |= uint32(brdist(ctxt, p, 0, 19, 2) << 5)
-
-	case 40: /* tbz */
-		o1 = opirr(ctxt, p.As)
-
-		v := int32(p.From.Offset)
-		if v < 0 || v > 63 {
-			ctxt.Diag("illegal bit number\n%v", p)
-		}
-		o1 |= ((uint32(v) & 0x20) << (31 - 5)) | ((uint32(v) & 0x1F) << 19)
-		o1 |= uint32(brdist(ctxt, p, 0, 14, 2) << 5)
-		o1 |= uint32(p.Reg)
-
-	case 41: /* eret, nop, others with no operands */
-		o1 = op0(ctxt, p.As)
-
-	case 42: /* bfm R,r,s,R */
-		o1 = opbfm(ctxt, p.As, int(p.From.Offset), int(p.From3.Offset), int(p.Reg), int(p.To.Reg))
-
-	case 43: /* bfm aliases */
-		r := int(p.From.Offset)
-
-		s := int(p.From3.Offset)
-		rf := int(p.Reg)
-		rt := int(p.To.Reg)
-		if rf == 0 {
-			rf = rt
-		}
-		switch p.As {
-		case ABFI:
-			o1 = opbfm(ctxt, ABFM, 64-r, s-1, rf, rt)
-
-		case ABFIW:
-			o1 = opbfm(ctxt, ABFMW, 32-r, s-1, rf, rt)
-
-		case ABFXIL:
-			o1 = opbfm(ctxt, ABFM, r, r+s-1, rf, rt)
-
-		case ABFXILW:
-			o1 = opbfm(ctxt, ABFMW, r, r+s-1, rf, rt)
-
-		case ASBFIZ:
-			o1 = opbfm(ctxt, ASBFM, 64-r, s-1, rf, rt)
-
-		case ASBFIZW:
-			o1 = opbfm(ctxt, ASBFMW, 32-r, s-1, rf, rt)
-
-		case ASBFX:
-			o1 = opbfm(ctxt, ASBFM, r, r+s-1, rf, rt)
-
-		case ASBFXW:
-			o1 = opbfm(ctxt, ASBFMW, r, r+s-1, rf, rt)
-
-		case AUBFIZ:
-			o1 = opbfm(ctxt, AUBFM, 64-r, s-1, rf, rt)
-
-		case AUBFIZW:
-			o1 = opbfm(ctxt, AUBFMW, 32-r, s-1, rf, rt)
-
-		case AUBFX:
-			o1 = opbfm(ctxt, AUBFM, r, r+s-1, rf, rt)
-
-		case AUBFXW:
-			o1 = opbfm(ctxt, AUBFMW, r, r+s-1, rf, rt)
-
-		default:
-			ctxt.Diag("bad bfm alias\n%v", ctxt.Curp)
-			break
-		}
-
-	case 44: /* extr $b, Rn, Rm, Rd */
-		o1 = opextr(ctxt, p.As, int32(p.From.Offset), int(p.From3.Reg), int(p.Reg), int(p.To.Reg))
-
-	case 45: /* sxt/uxt[bhw] R,R; movT R,R -> sxtT R,R */
-		rf := int(p.From.Reg)
-
-		rt := int(p.To.Reg)
-		as := p.As
-		if rf == REGZERO {
-			as = AMOVWU /* clearer in disassembly */
-		}
-		switch as {
-		case AMOVB, ASXTB:
-			o1 = opbfm(ctxt, ASBFM, 0, 7, rf, rt)
-
-		case AMOVH, ASXTH:
-			o1 = opbfm(ctxt, ASBFM, 0, 15, rf, rt)
-
-		case AMOVW, ASXTW:
-			o1 = opbfm(ctxt, ASBFM, 0, 31, rf, rt)
-
-		case AMOVBU, AUXTB:
-			o1 = opbfm(ctxt, AUBFM, 0, 7, rf, rt)
-
-		case AMOVHU, AUXTH:
-			o1 = opbfm(ctxt, AUBFM, 0, 15, rf, rt)
-
-		case AMOVWU:
-			o1 = oprrr(ctxt, as) | (uint32(rf&31) << 16) | (REGZERO & 31 << 5) | uint32(rt&31)
-
-		case AUXTW:
-			o1 = opbfm(ctxt, AUBFM, 0, 31, rf, rt)
-
-		case ASXTBW:
-			o1 = opbfm(ctxt, ASBFMW, 0, 7, rf, rt)
-
-		case ASXTHW:
-			o1 = opbfm(ctxt, ASBFMW, 0, 15, rf, rt)
-
-		case AUXTBW:
-			o1 = opbfm(ctxt, AUBFMW, 0, 7, rf, rt)
-
-		case AUXTHW:
-			o1 = opbfm(ctxt, AUBFMW, 0, 15, rf, rt)
-
-		default:
-			ctxt.Diag("bad sxt %v", as)
-			break
-		}
-
-	case 46: /* cls */
-		o1 = opbit(ctxt, p.As)
-
-		o1 |= uint32(p.From.Reg&31) << 5
-		o1 |= uint32(p.To.Reg & 31)
-
-	case 47: /* movT R,V(R) -> strT (huge offset) */
-		o1 = omovlit(ctxt, AMOVW, p, &p.To, REGTMP)
-
-		if !(o1 != 0) {
-			break
-		}
-		r := int(p.To.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		o2 = olsxrr(ctxt, p.As, REGTMP, r, int(p.From.Reg))
-
-	case 48: /* movT V(R), R -> ldrT (huge offset) */
-		o1 = omovlit(ctxt, AMOVW, p, &p.From, REGTMP)
-
-		if !(o1 != 0) {
-			break
-		}
-		r := int(p.From.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		o2 = olsxrr(ctxt, p.As, REGTMP, r, int(p.To.Reg))
-
-	case 50: /* sys/sysl */
-		o1 = opirr(ctxt, p.As)
-
-		if (p.From.Offset &^ int64(SYSARG4(0x7, 0xF, 0xF, 0x7))) != 0 {
-			ctxt.Diag("illegal SYS argument\n%v", p)
-		}
-		o1 |= uint32(p.From.Offset)
-		if p.To.Type == obj.TYPE_REG {
-			o1 |= uint32(p.To.Reg & 31)
-		} else if p.Reg != 0 {
-			o1 |= uint32(p.Reg & 31)
-		} else {
-			o1 |= 0x1F
-		}
-
-	case 51: /* dmb */
-		o1 = opirr(ctxt, p.As)
-
-		if p.From.Type == obj.TYPE_CONST {
-			o1 |= uint32((p.From.Offset & 0xF) << 8)
-		}
-
-	case 52: /* hint */
-		o1 = opirr(ctxt, p.As)
-
-		o1 |= uint32((p.From.Offset & 0x7F) << 5)
-
-	case 53: /* and/or/eor/bic/... $bitcon, Rn, Rd */
-		a := p.As
-		rt := int(p.To.Reg)
-		r := int(p.Reg)
-		if r == 0 {
-			r = rt
-		}
-		mode := 64
-		v := uint64(p.From.Offset)
-		switch p.As {
-		case AANDW, AORRW, AEORW, AANDSW:
-			mode = 32
-		case ABIC, AORN, AEON, ABICS:
-			v = ^v
-		case ABICW, AORNW, AEONW, ABICSW:
-			v = ^v
-			mode = 32
-		}
-		o1 = opirr(ctxt, a)
-		o1 |= bitconEncode(v, mode) | uint32(r&31)<<5 | uint32(rt&31)
-
-	case 54: /* floating point arith */
-		o1 = oprrr(ctxt, p.As)
-
-		var rf int
-		if p.From.Type == obj.TYPE_CONST {
-			rf = chipfloat7(ctxt, p.From.Val.(float64))
-			if rf < 0 || true {
-				ctxt.Diag("invalid floating-point immediate\n%v", p)
-				rf = 0
-			}
-
-			rf |= (1 << 3)
-		} else {
-			rf = int(p.From.Reg)
-		}
-		rt := int(p.To.Reg)
-		r := int(p.Reg)
-		if (o1&(0x1F<<24)) == (0x1E<<24) && (o1&(1<<11)) == 0 { /* monadic */
-			r = rf
-			rf = 0
-		} else if r == 0 {
-			r = rt
-		}
-		o1 |= (uint32(rf&31) << 16) | (uint32(r&31) << 5) | uint32(rt&31)
-
-	case 56: /* floating point compare */
-		o1 = oprrr(ctxt, p.As)
-
-		var rf int
-		if p.From.Type == obj.TYPE_CONST {
-			o1 |= 8 /* zero */
-			rf = 0
-		} else {
-			rf = int(p.From.Reg)
-		}
-		rt := int(p.Reg)
-		o1 |= uint32(rf&31)<<16 | uint32(rt&31)<<5
-
-	case 57: /* floating point conditional compare */
-		o1 = oprrr(ctxt, p.As)
-
-		cond := int(p.From.Reg)
-		nzcv := int(p.To.Offset)
-		if nzcv&^0xF != 0 {
-			ctxt.Diag("implausible condition\n%v", p)
-		}
-		rf := int(p.Reg)
-		if p.From3 == nil || p.From3.Reg < REG_F0 || p.From3.Reg > REG_F31 {
-			ctxt.Diag("illegal FCCMP\n%v", p)
-			break
-		}
-		rt := int(p.From3.Reg)
-		o1 |= uint32(rf&31)<<16 | uint32(cond)<<12 | uint32(rt&31)<<5 | uint32(nzcv)
-
-	case 58: /* ldar/ldxr/ldaxr */
-		o1 = opload(ctxt, p.As)
-
-		o1 |= 0x1F << 16
-		o1 |= uint32(p.From.Reg) << 5
-		if p.Reg != 0 {
-			o1 |= uint32(p.Reg) << 10
-		} else {
-			o1 |= 0x1F << 10
-		}
-		o1 |= uint32(p.To.Reg & 31)
-
-	case 59: /* stxr/stlxr */
-		o1 = opstore(ctxt, p.As)
-
-		if p.RegTo2 != obj.REG_NONE {
-			o1 |= uint32(p.RegTo2&31) << 16
-		} else {
-			o1 |= 0x1F << 16
-		}
-
-		// TODO(aram): add support for STXP
-		o1 |= uint32(p.To.Reg&31) << 5
-
-		o1 |= uint32(p.From.Reg & 31)
-
-	case 60: /* adrp label,r */
-		d := brdist(ctxt, p, 12, 21, 0)
-
-		o1 = ADR(1, uint32(d), uint32(p.To.Reg))
-
-	case 61: /* adr label, r */
-		d := brdist(ctxt, p, 0, 21, 0)
-
-		o1 = ADR(0, uint32(d), uint32(p.To.Reg))
-
-	case 62: /* op $movcon, [R], R -> mov $movcon, REGTMP + op REGTMP, [R], R */
-		if p.Reg == REGTMP {
-			ctxt.Diag("cannot use REGTMP as source: %v\n", p)
-		}
-		o1 = omovconst(ctxt, AMOVD, p, &p.From, REGTMP)
-
-		rt := int(p.To.Reg)
-		if p.To.Type == obj.TYPE_NONE {
-			rt = REGZERO
-		}
-		r := int(p.Reg)
-		if r == 0 {
-			r = rt
-		}
-		if p.To.Type != obj.TYPE_NONE && (p.To.Reg == REGSP || r == REGSP) {
-			o2 = opxrrr(ctxt, p.As)
-			o2 |= REGTMP & 31 << 16
-			o2 |= LSL0_64
-		} else {
-			o2 = oprrr(ctxt, p.As)
-			o2 |= REGTMP & 31 << 16 /* shift is 0 */
-		}
-		o2 |= uint32(r&31) << 5
-		o2 |= uint32(rt & 31)
-
-		/* reloc ops */
-	case 64: /* movT R,addr -> adrp + add + movT R, (REGTMP) */
-		o1 = ADR(1, 0, REGTMP)
-		o2 = opirr(ctxt, AADD) | REGTMP&31<<5 | REGTMP&31
-		rel := obj.Addrel(ctxt.Cursym)
-		rel.Off = int32(ctxt.Pc)
-		rel.Siz = 8
-		rel.Sym = p.To.Sym
-		rel.Add = p.To.Offset
-		rel.Type = obj.R_ADDRARM64
-		o3 = olsr12u(ctxt, int32(opstr12(ctxt, p.As)), 0, REGTMP, int(p.From.Reg))
-
-	case 65: /* movT addr,R -> adrp + add + movT (REGTMP), R */
-		o1 = ADR(1, 0, REGTMP)
-		o2 = opirr(ctxt, AADD) | REGTMP&31<<5 | REGTMP&31
-		rel := obj.Addrel(ctxt.Cursym)
-		rel.Off = int32(ctxt.Pc)
-		rel.Siz = 8
-		rel.Sym = p.From.Sym
-		rel.Add = p.From.Offset
-		rel.Type = obj.R_ADDRARM64
-		o3 = olsr12u(ctxt, int32(opldr12(ctxt, p.As)), 0, REGTMP, int(p.To.Reg))
-
-	case 66: /* ldp O(R)!, (r1, r2); ldp (R)O!, (r1, r2) */
-		v := int32(p.From.Offset)
-
-		if v < -512 || v > 504 {
-			ctxt.Diag("offset out of range\n%v", p)
-		}
-		if o.scond == C_XPOST {
-			o1 |= 1 << 23
-		} else {
-			o1 |= 3 << 23
-		}
-		o1 |= 1 << 22
-		o1 |= uint32(int64(2<<30|5<<27|((uint32(v)/8)&0x7f)<<15) | p.To.Offset<<10 | int64(uint32(p.From.Reg&31)<<5) | int64(p.To.Reg&31))
-
-	case 67: /* stp (r1, r2), O(R)!; stp (r1, r2), (R)O! */
-		v := int32(p.To.Offset)
-
-		if v < -512 || v > 504 {
-			ctxt.Diag("offset out of range\n%v", p)
-		}
-		if o.scond == C_XPOST {
-			o1 |= 1 << 23
-		} else {
-			o1 |= 3 << 23
-		}
-		o1 |= uint32(int64(2<<30|5<<27|((uint32(v)/8)&0x7f)<<15) | p.From.Offset<<10 | int64(uint32(p.To.Reg&31)<<5) | int64(p.From.Reg&31))
-
-	case 68: /* movT $vconaddr(SB), reg -> adrp + add + reloc */
-		if p.As == AMOVW {
-			ctxt.Diag("invalid load of 32-bit address: %v", p)
-		}
-		o1 = ADR(1, 0, uint32(p.To.Reg))
-		o2 = opirr(ctxt, AADD) | uint32(p.To.Reg&31)<<5 | uint32(p.To.Reg&31)
-		rel := obj.Addrel(ctxt.Cursym)
-		rel.Off = int32(ctxt.Pc)
-		rel.Siz = 8
-		rel.Sym = p.From.Sym
-		rel.Add = p.From.Offset
-		rel.Type = obj.R_ADDRARM64
-
-	case 69: /* LE model movd $tlsvar, reg -> movz reg, 0 + reloc */
-		o1 = opirr(ctxt, AMOVZ)
-		o1 |= uint32(p.To.Reg & 31)
-		rel := obj.Addrel(ctxt.Cursym)
-		rel.Off = int32(ctxt.Pc)
-		rel.Siz = 4
-		rel.Sym = p.From.Sym
-		rel.Type = obj.R_ARM64_TLS_LE
-		if p.From.Offset != 0 {
-			ctxt.Diag("invalid offset on MOVW $tlsvar")
-		}
-
-	case 70: /* IE model movd $tlsvar, reg -> adrp REGTMP, 0; ldr reg, [REGTMP, #0] + relocs */
-		o1 = ADR(1, 0, REGTMP)
-		o2 = olsr12u(ctxt, int32(opldr12(ctxt, AMOVD)), 0, REGTMP, int(p.To.Reg))
-		rel := obj.Addrel(ctxt.Cursym)
-		rel.Off = int32(ctxt.Pc)
-		rel.Siz = 8
-		rel.Sym = p.From.Sym
-		rel.Add = 0
-		rel.Type = obj.R_ARM64_TLS_IE
-		if p.From.Offset != 0 {
-			ctxt.Diag("invalid offset on MOVW $tlsvar")
-		}
-
-	case 71: /* movd sym@GOT, reg -> adrp REGTMP, #0; ldr reg, [REGTMP, #0] + relocs */
-		o1 = ADR(1, 0, REGTMP)
-		o2 = olsr12u(ctxt, int32(opldr12(ctxt, AMOVD)), 0, REGTMP, int(p.To.Reg))
-		rel := obj.Addrel(ctxt.Cursym)
-		rel.Off = int32(ctxt.Pc)
-		rel.Siz = 8
-		rel.Sym = p.From.Sym
-		rel.Add = 0
-		rel.Type = obj.R_ARM64_GOTPCREL
-
-	// This is supposed to be something that stops execution.
-	// It's not supposed to be reached, ever, but if it is, we'd
-	// like to be able to tell how we got there. Assemble as
-	// 0xbea71700 which is guaranteed to raise undefined instruction
-	// exception.
-	case 90:
-		o1 = 0xbea71700
-
-		break
-	}
-
-	out[0] = o1
-	out[1] = o2
-	out[2] = o3
-	out[3] = o4
-	out[4] = o5
-	return
-}
-
-/*
- * basic Rm op Rn -> Rd (using shifted register with 0)
- * also op Rn -> Rt
- * also Rm*Rn op Ra -> Rd
- */
-func oprrr(ctxt *obj.Link, a obj.As) uint32 {
-	switch a {
-	case AADC:
-		return S64 | 0<<30 | 0<<29 | 0xd0<<21 | 0<<10
-
-	case AADCW:
-		return S32 | 0<<30 | 0<<29 | 0xd0<<21 | 0<<10
-
-	case AADCS:
-		return S64 | 0<<30 | 1<<29 | 0xd0<<21 | 0<<10
-
-	case AADCSW:
-		return S32 | 0<<30 | 1<<29 | 0xd0<<21 | 0<<10
-
-	case ANGC, ASBC:
-		return S64 | 1<<30 | 0<<29 | 0xd0<<21 | 0<<10
-
-	case ANGCS, ASBCS:
-		return S64 | 1<<30 | 1<<29 | 0xd0<<21 | 0<<10
-
-	case ANGCW, ASBCW:
-		return S32 | 1<<30 | 0<<29 | 0xd0<<21 | 0<<10
-
-	case ANGCSW, ASBCSW:
-		return S32 | 1<<30 | 1<<29 | 0xd0<<21 | 0<<10
-
-	case AADD:
-		return S64 | 0<<30 | 0<<29 | 0x0b<<24 | 0<<22 | 0<<21 | 0<<10
-
-	case AADDW:
-		return S32 | 0<<30 | 0<<29 | 0x0b<<24 | 0<<22 | 0<<21 | 0<<10
-
-	case ACMN, AADDS:
-		return S64 | 0<<30 | 1<<29 | 0x0b<<24 | 0<<22 | 0<<21 | 0<<10
-
-	case ACMNW, AADDSW:
-		return S32 | 0<<30 | 1<<29 | 0x0b<<24 | 0<<22 | 0<<21 | 0<<10
-
-	case ASUB:
-		return S64 | 1<<30 | 0<<29 | 0x0b<<24 | 0<<22 | 0<<21 | 0<<10
-
-	case ASUBW:
-		return S32 | 1<<30 | 0<<29 | 0x0b<<24 | 0<<22 | 0<<21 | 0<<10
-
-	case ACMP, ASUBS:
-		return S64 | 1<<30 | 1<<29 | 0x0b<<24 | 0<<22 | 0<<21 | 0<<10
-
-	case ACMPW, ASUBSW:
-		return S32 | 1<<30 | 1<<29 | 0x0b<<24 | 0<<22 | 0<<21 | 0<<10
-
-	case AAND:
-		return S64 | 0<<29 | 0xA<<24
-
-	case AANDW:
-		return S32 | 0<<29 | 0xA<<24
-
-	case AMOVD, AORR:
-		return S64 | 1<<29 | 0xA<<24
-
-		//	case AMOVW:
-	case AMOVWU, AORRW:
-		return S32 | 1<<29 | 0xA<<24
-
-	case AEOR:
-		return S64 | 2<<29 | 0xA<<24
-
-	case AEORW:
-		return S32 | 2<<29 | 0xA<<24
-
-	case AANDS:
-		return S64 | 3<<29 | 0xA<<24
-
-	case AANDSW:
-		return S32 | 3<<29 | 0xA<<24
-
-	case ABIC:
-		return S64 | 0<<29 | 0xA<<24 | 1<<21
-
-	case ABICW:
-		return S32 | 0<<29 | 0xA<<24 | 1<<21
-
-	case ABICS:
-		return S64 | 3<<29 | 0xA<<24 | 1<<21
-
-	case ABICSW:
-		return S32 | 3<<29 | 0xA<<24 | 1<<21
-
-	case AEON:
-		return S64 | 2<<29 | 0xA<<24 | 1<<21
-
-	case AEONW:
-		return S32 | 2<<29 | 0xA<<24 | 1<<21
-
-	case AMVN, AORN:
-		return S64 | 1<<29 | 0xA<<24 | 1<<21
-
-	case AMVNW, AORNW:
-		return S32 | 1<<29 | 0xA<<24 | 1<<21
-
-	case AASR:
-		return S64 | OPDP2(10) /* also ASRV */
-
-	case AASRW:
-		return S32 | OPDP2(10)
-
-	case ALSL:
-		return S64 | OPDP2(8)
-
-	case ALSLW:
-		return S32 | OPDP2(8)
-
-	case ALSR:
-		return S64 | OPDP2(9)
-
-	case ALSRW:
-		return S32 | OPDP2(9)
-
-	case AROR:
-		return S64 | OPDP2(11)
-
-	case ARORW:
-		return S32 | OPDP2(11)
-
-	case ACCMN:
-		return S64 | 0<<30 | 1<<29 | 0xD2<<21 | 0<<11 | 0<<10 | 0<<4 /* cond<<12 | nzcv<<0 */
-
-	case ACCMNW:
-		return S32 | 0<<30 | 1<<29 | 0xD2<<21 | 0<<11 | 0<<10 | 0<<4
-
-	case ACCMP:
-		return S64 | 1<<30 | 1<<29 | 0xD2<<21 | 0<<11 | 0<<10 | 0<<4 /* imm5<<16 | cond<<12 | nzcv<<0 */
-
-	case ACCMPW:
-		return S32 | 1<<30 | 1<<29 | 0xD2<<21 | 0<<11 | 0<<10 | 0<<4
-
-	case ACRC32B:
-		return S32 | OPDP2(16)
-
-	case ACRC32H:
-		return S32 | OPDP2(17)
-
-	case ACRC32W:
-		return S32 | OPDP2(18)
-
-	case ACRC32X:
-		return S64 | OPDP2(19)
-
-	case ACRC32CB:
-		return S32 | OPDP2(20)
-
-	case ACRC32CH:
-		return S32 | OPDP2(21)
-
-	case ACRC32CW:
-		return S32 | OPDP2(22)
-
-	case ACRC32CX:
-		return S64 | OPDP2(23)
-
-	case ACSEL:
-		return S64 | 0<<30 | 0<<29 | 0xD4<<21 | 0<<11 | 0<<10
-
-	case ACSELW:
-		return S32 | 0<<30 | 0<<29 | 0xD4<<21 | 0<<11 | 0<<10
-
-	case ACSET:
-		return S64 | 0<<30 | 0<<29 | 0xD4<<21 | 0<<11 | 1<<10
-
-	case ACSETW:
-		return S32 | 0<<30 | 0<<29 | 0xD4<<21 | 0<<11 | 1<<10
-
-	case ACSETM:
-		return S64 | 1<<30 | 0<<29 | 0xD4<<21 | 0<<11 | 0<<10
-
-	case ACSETMW:
-		return S32 | 1<<30 | 0<<29 | 0xD4<<21 | 0<<11 | 0<<10
-
-	case ACINC, ACSINC:
-		return S64 | 0<<30 | 0<<29 | 0xD4<<21 | 0<<11 | 1<<10
-
-	case ACINCW, ACSINCW:
-		return S32 | 0<<30 | 0<<29 | 0xD4<<21 | 0<<11 | 1<<10
-
-	case ACINV, ACSINV:
-		return S64 | 1<<30 | 0<<29 | 0xD4<<21 | 0<<11 | 0<<10
-
-	case ACINVW, ACSINVW:
-		return S32 | 1<<30 | 0<<29 | 0xD4<<21 | 0<<11 | 0<<10
-
-	case ACNEG, ACSNEG:
-		return S64 | 1<<30 | 0<<29 | 0xD4<<21 | 0<<11 | 1<<10
-
-	case ACNEGW, ACSNEGW:
-		return S32 | 1<<30 | 0<<29 | 0xD4<<21 | 0<<11 | 1<<10
-
-	case AMUL, AMADD:
-		return S64 | 0<<29 | 0x1B<<24 | 0<<21 | 0<<15
-
-	case AMULW, AMADDW:
-		return S32 | 0<<29 | 0x1B<<24 | 0<<21 | 0<<15
-
-	case AMNEG, AMSUB:
-		return S64 | 0<<29 | 0x1B<<24 | 0<<21 | 1<<15
-
-	case AMNEGW, AMSUBW:
-		return S32 | 0<<29 | 0x1B<<24 | 0<<21 | 1<<15
-
-	case AMRS:
-		return SYSOP(1, 2, 0, 0, 0, 0, 0)
-
-	case AMSR:
-		return SYSOP(0, 2, 0, 0, 0, 0, 0)
-
-	case ANEG:
-		return S64 | 1<<30 | 0<<29 | 0xB<<24 | 0<<21
-
-	case ANEGW:
-		return S32 | 1<<30 | 0<<29 | 0xB<<24 | 0<<21
-
-	case ANEGS:
-		return S64 | 1<<30 | 1<<29 | 0xB<<24 | 0<<21
-
-	case ANEGSW:
-		return S32 | 1<<30 | 1<<29 | 0xB<<24 | 0<<21
-
-	case AREM, ASDIV:
-		return S64 | OPDP2(3)
-
-	case AREMW, ASDIVW:
-		return S32 | OPDP2(3)
-
-	case ASMULL, ASMADDL:
-		return OPDP3(1, 0, 1, 0)
-
-	case ASMNEGL, ASMSUBL:
-		return OPDP3(1, 0, 1, 1)
-
-	case ASMULH:
-		return OPDP3(1, 0, 2, 0)
-
-	case AUMULL, AUMADDL:
-		return OPDP3(1, 0, 5, 0)
-
-	case AUMNEGL, AUMSUBL:
-		return OPDP3(1, 0, 5, 1)
-
-	case AUMULH:
-		return OPDP3(1, 0, 6, 0)
-
-	case AUREM, AUDIV:
-		return S64 | OPDP2(2)
-
-	case AUREMW, AUDIVW:
-		return S32 | OPDP2(2)
-
-	case AAESE:
-		return 0x4E<<24 | 2<<20 | 8<<16 | 4<<12 | 2<<10
-
-	case AAESD:
-		return 0x4E<<24 | 2<<20 | 8<<16 | 5<<12 | 2<<10
-
-	case AAESMC:
-		return 0x4E<<24 | 2<<20 | 8<<16 | 6<<12 | 2<<10
-
-	case AAESIMC:
-		return 0x4E<<24 | 2<<20 | 8<<16 | 7<<12 | 2<<10
-
-	case ASHA1C:
-		return 0x5E<<24 | 0<<12
-
-	case ASHA1P:
-		return 0x5E<<24 | 1<<12
-
-	case ASHA1M:
-		return 0x5E<<24 | 2<<12
-
-	case ASHA1SU0:
-		return 0x5E<<24 | 3<<12
-
-	case ASHA256H:
-		return 0x5E<<24 | 4<<12
-
-	case ASHA256H2:
-		return 0x5E<<24 | 5<<12
-
-	case ASHA256SU1:
-		return 0x5E<<24 | 6<<12
-
-	case ASHA1H:
-		return 0x5E<<24 | 2<<20 | 8<<16 | 0<<12 | 2<<10
-
-	case ASHA1SU1:
-		return 0x5E<<24 | 2<<20 | 8<<16 | 1<<12 | 2<<10
-
-	case ASHA256SU0:
-		return 0x5E<<24 | 2<<20 | 8<<16 | 2<<12 | 2<<10
-
-	case AFCVTZSD:
-		return FPCVTI(1, 0, 1, 3, 0)
-
-	case AFCVTZSDW:
-		return FPCVTI(0, 0, 1, 3, 0)
-
-	case AFCVTZSS:
-		return FPCVTI(1, 0, 0, 3, 0)
-
-	case AFCVTZSSW:
-		return FPCVTI(0, 0, 0, 3, 0)
-
-	case AFCVTZUD:
-		return FPCVTI(1, 0, 1, 3, 1)
-
-	case AFCVTZUDW:
-		return FPCVTI(0, 0, 1, 3, 1)
-
-	case AFCVTZUS:
-		return FPCVTI(1, 0, 0, 3, 1)
-
-	case AFCVTZUSW:
-		return FPCVTI(0, 0, 0, 3, 1)
-
-	case ASCVTFD:
-		return FPCVTI(1, 0, 1, 0, 2)
-
-	case ASCVTFS:
-		return FPCVTI(1, 0, 0, 0, 2)
-
-	case ASCVTFWD:
-		return FPCVTI(0, 0, 1, 0, 2)
-
-	case ASCVTFWS:
-		return FPCVTI(0, 0, 0, 0, 2)
-
-	case AUCVTFD:
-		return FPCVTI(1, 0, 1, 0, 3)
-
-	case AUCVTFS:
-		return FPCVTI(1, 0, 0, 0, 3)
-
-	case AUCVTFWD:
-		return FPCVTI(0, 0, 1, 0, 3)
-
-	case AUCVTFWS:
-		return FPCVTI(0, 0, 0, 0, 3)
-
-	case AFADDS:
-		return FPOP2S(0, 0, 0, 2)
-
-	case AFADDD:
-		return FPOP2S(0, 0, 1, 2)
-
-	case AFSUBS:
-		return FPOP2S(0, 0, 0, 3)
-
-	case AFSUBD:
-		return FPOP2S(0, 0, 1, 3)
-
-	case AFMULS:
-		return FPOP2S(0, 0, 0, 0)
-
-	case AFMULD:
-		return FPOP2S(0, 0, 1, 0)
-
-	case AFDIVS:
-		return FPOP2S(0, 0, 0, 1)
-
-	case AFDIVD:
-		return FPOP2S(0, 0, 1, 1)
-
-	case AFMAXS:
-		return FPOP2S(0, 0, 0, 4)
-
-	case AFMINS:
-		return FPOP2S(0, 0, 0, 5)
-
-	case AFMAXD:
-		return FPOP2S(0, 0, 1, 4)
-
-	case AFMIND:
-		return FPOP2S(0, 0, 1, 5)
-
-	case AFMAXNMS:
-		return FPOP2S(0, 0, 0, 6)
-
-	case AFMAXNMD:
-		return FPOP2S(0, 0, 1, 6)
-
-	case AFMINNMS:
-		return FPOP2S(0, 0, 0, 7)
-
-	case AFMINNMD:
-		return FPOP2S(0, 0, 1, 7)
-
-	case AFNMULS:
-		return FPOP2S(0, 0, 0, 8)
-
-	case AFNMULD:
-		return FPOP2S(0, 0, 1, 8)
-
-	case AFCMPS:
-		return FPCMP(0, 0, 0, 0, 0)
-
-	case AFCMPD:
-		return FPCMP(0, 0, 1, 0, 0)
-
-	case AFCMPES:
-		return FPCMP(0, 0, 0, 0, 16)
-
-	case AFCMPED:
-		return FPCMP(0, 0, 1, 0, 16)
-
-	case AFCCMPS:
-		return FPCCMP(0, 0, 0, 0)
-
-	case AFCCMPD:
-		return FPCCMP(0, 0, 1, 0)
-
-	case AFCCMPES:
-		return FPCCMP(0, 0, 0, 1)
-
-	case AFCCMPED:
-		return FPCCMP(0, 0, 1, 1)
-
-	case AFCSELS:
-		return 0x1E<<24 | 0<<22 | 1<<21 | 3<<10
-
-	case AFCSELD:
-		return 0x1E<<24 | 1<<22 | 1<<21 | 3<<10
-
-	case AFMOVS:
-		return FPOP1S(0, 0, 0, 0)
-
-	case AFABSS:
-		return FPOP1S(0, 0, 0, 1)
-
-	case AFNEGS:
-		return FPOP1S(0, 0, 0, 2)
-
-	case AFSQRTS:
-		return FPOP1S(0, 0, 0, 3)
-
-	case AFCVTSD:
-		return FPOP1S(0, 0, 0, 5)
-
-	case AFCVTSH:
-		return FPOP1S(0, 0, 0, 7)
-
-	case AFRINTNS:
-		return FPOP1S(0, 0, 0, 8)
-
-	case AFRINTPS:
-		return FPOP1S(0, 0, 0, 9)
-
-	case AFRINTMS:
-		return FPOP1S(0, 0, 0, 10)
-
-	case AFRINTZS:
-		return FPOP1S(0, 0, 0, 11)
-
-	case AFRINTAS:
-		return FPOP1S(0, 0, 0, 12)
-
-	case AFRINTXS:
-		return FPOP1S(0, 0, 0, 14)
-
-	case AFRINTIS:
-		return FPOP1S(0, 0, 0, 15)
-
-	case AFMOVD:
-		return FPOP1S(0, 0, 1, 0)
-
-	case AFABSD:
-		return FPOP1S(0, 0, 1, 1)
-
-	case AFNEGD:
-		return FPOP1S(0, 0, 1, 2)
-
-	case AFSQRTD:
-		return FPOP1S(0, 0, 1, 3)
-
-	case AFCVTDS:
-		return FPOP1S(0, 0, 1, 4)
-
-	case AFCVTDH:
-		return FPOP1S(0, 0, 1, 7)
-
-	case AFRINTND:
-		return FPOP1S(0, 0, 1, 8)
-
-	case AFRINTPD:
-		return FPOP1S(0, 0, 1, 9)
-
-	case AFRINTMD:
-		return FPOP1S(0, 0, 1, 10)
-
-	case AFRINTZD:
-		return FPOP1S(0, 0, 1, 11)
-
-	case AFRINTAD:
-		return FPOP1S(0, 0, 1, 12)
-
-	case AFRINTXD:
-		return FPOP1S(0, 0, 1, 14)
-
-	case AFRINTID:
-		return FPOP1S(0, 0, 1, 15)
-
-	case AFCVTHS:
-		return FPOP1S(0, 0, 3, 4)
-
-	case AFCVTHD:
-		return FPOP1S(0, 0, 3, 5)
-	}
-
-	ctxt.Diag("bad rrr %d %v", a, a)
-	prasm(ctxt.Curp)
-	return 0
-}
-
-/*
- * imm -> Rd
- * imm op Rn -> Rd
- */
-func opirr(ctxt *obj.Link, a obj.As) uint32 {
-	switch a {
-	/* op $addcon, Rn, Rd */
-	case AMOVD, AADD:
-		return S64 | 0<<30 | 0<<29 | 0x11<<24
-
-	case ACMN, AADDS:
-		return S64 | 0<<30 | 1<<29 | 0x11<<24
-
-	case AMOVW, AADDW:
-		return S32 | 0<<30 | 0<<29 | 0x11<<24
-
-	case ACMNW, AADDSW:
-		return S32 | 0<<30 | 1<<29 | 0x11<<24
-
-	case ASUB:
-		return S64 | 1<<30 | 0<<29 | 0x11<<24
-
-	case ACMP, ASUBS:
-		return S64 | 1<<30 | 1<<29 | 0x11<<24
-
-	case ASUBW:
-		return S32 | 1<<30 | 0<<29 | 0x11<<24
-
-	case ACMPW, ASUBSW:
-		return S32 | 1<<30 | 1<<29 | 0x11<<24
-
-		/* op $imm(SB), Rd; op label, Rd */
-	case AADR:
-		return 0<<31 | 0x10<<24
-
-	case AADRP:
-		return 1<<31 | 0x10<<24
-
-		/* op $bimm, Rn, Rd */
-	case AAND, ABIC:
-		return S64 | 0<<29 | 0x24<<23
-
-	case AANDW, ABICW:
-		return S32 | 0<<29 | 0x24<<23 | 0<<22
-
-	case AORR, AORN:
-		return S64 | 1<<29 | 0x24<<23
-
-	case AORRW, AORNW:
-		return S32 | 1<<29 | 0x24<<23 | 0<<22
-
-	case AEOR, AEON:
-		return S64 | 2<<29 | 0x24<<23
-
-	case AEORW, AEONW:
-		return S32 | 2<<29 | 0x24<<23 | 0<<22
-
-	case AANDS, ABICS:
-		return S64 | 3<<29 | 0x24<<23
-
-	case AANDSW, ABICSW:
-		return S32 | 3<<29 | 0x24<<23 | 0<<22
-
-	case AASR:
-		return S64 | 0<<29 | 0x26<<23 /* alias of SBFM */
-
-	case AASRW:
-		return S32 | 0<<29 | 0x26<<23 | 0<<22
-
-		/* op $width, $lsb, Rn, Rd */
-	case ABFI:
-		return S64 | 2<<29 | 0x26<<23 | 1<<22
-		/* alias of BFM */
-
-	case ABFIW:
-		return S32 | 2<<29 | 0x26<<23 | 0<<22
-
-		/* op $imms, $immr, Rn, Rd */
-	case ABFM:
-		return S64 | 1<<29 | 0x26<<23 | 1<<22
-
-	case ABFMW:
-		return S32 | 1<<29 | 0x26<<23 | 0<<22
-
-	case ASBFM:
-		return S64 | 0<<29 | 0x26<<23 | 1<<22
-
-	case ASBFMW:
-		return S32 | 0<<29 | 0x26<<23 | 0<<22
-
-	case AUBFM:
-		return S64 | 2<<29 | 0x26<<23 | 1<<22
-
-	case AUBFMW:
-		return S32 | 2<<29 | 0x26<<23 | 0<<22
-
-	case ABFXIL:
-		return S64 | 1<<29 | 0x26<<23 | 1<<22 /* alias of BFM */
-
-	case ABFXILW:
-		return S32 | 1<<29 | 0x26<<23 | 0<<22
-
-	case AEXTR:
-		return S64 | 0<<29 | 0x27<<23 | 1<<22 | 0<<21
-
-	case AEXTRW:
-		return S32 | 0<<29 | 0x27<<23 | 0<<22 | 0<<21
-
-	case ACBNZ:
-		return S64 | 0x1A<<25 | 1<<24
-
-	case ACBNZW:
-		return S32 | 0x1A<<25 | 1<<24
-
-	case ACBZ:
-		return S64 | 0x1A<<25 | 0<<24
-
-	case ACBZW:
-		return S32 | 0x1A<<25 | 0<<24
-
-	case ACCMN:
-		return S64 | 0<<30 | 1<<29 | 0xD2<<21 | 1<<11 | 0<<10 | 0<<4 /* imm5<<16 | cond<<12 | nzcv<<0 */
-
-	case ACCMNW:
-		return S32 | 0<<30 | 1<<29 | 0xD2<<21 | 1<<11 | 0<<10 | 0<<4
-
-	case ACCMP:
-		return S64 | 1<<30 | 1<<29 | 0xD2<<21 | 1<<11 | 0<<10 | 0<<4 /* imm5<<16 | cond<<12 | nzcv<<0 */
-
-	case ACCMPW:
-		return S32 | 1<<30 | 1<<29 | 0xD2<<21 | 1<<11 | 0<<10 | 0<<4
-
-	case AMOVK:
-		return S64 | 3<<29 | 0x25<<23
-
-	case AMOVKW:
-		return S32 | 3<<29 | 0x25<<23
-
-	case AMOVN:
-		return S64 | 0<<29 | 0x25<<23
-
-	case AMOVNW:
-		return S32 | 0<<29 | 0x25<<23
-
-	case AMOVZ:
-		return S64 | 2<<29 | 0x25<<23
-
-	case AMOVZW:
-		return S32 | 2<<29 | 0x25<<23
-
-	case AMSR:
-		return SYSOP(0, 0, 0, 4, 0, 0, 0x1F) /* MSR (immediate) */
-
-	case AAT,
-		ADC,
-		AIC,
-		ATLBI,
-		ASYS:
-		return SYSOP(0, 1, 0, 0, 0, 0, 0)
-
-	case ASYSL:
-		return SYSOP(1, 1, 0, 0, 0, 0, 0)
-
-	case ATBZ:
-		return 0x36 << 24
-
-	case ATBNZ:
-		return 0x37 << 24
-
-	case ADSB:
-		return SYSOP(0, 0, 3, 3, 0, 4, 0x1F)
-
-	case ADMB:
-		return SYSOP(0, 0, 3, 3, 0, 5, 0x1F)
-
-	case AISB:
-		return SYSOP(0, 0, 3, 3, 0, 6, 0x1F)
-
-	case AHINT:
-		return SYSOP(0, 0, 3, 2, 0, 0, 0x1F)
-	}
-
-	ctxt.Diag("bad irr %v", a)
-	prasm(ctxt.Curp)
-	return 0
-}
-
-func opbit(ctxt *obj.Link, a obj.As) uint32 {
-	switch a {
-	case ACLS:
-		return S64 | OPBIT(5)
-
-	case ACLSW:
-		return S32 | OPBIT(5)
-
-	case ACLZ:
-		return S64 | OPBIT(4)
-
-	case ACLZW:
-		return S32 | OPBIT(4)
-
-	case ARBIT:
-		return S64 | OPBIT(0)
-
-	case ARBITW:
-		return S32 | OPBIT(0)
-
-	case AREV:
-		return S64 | OPBIT(3)
-
-	case AREVW:
-		return S32 | OPBIT(2)
-
-	case AREV16:
-		return S64 | OPBIT(1)
-
-	case AREV16W:
-		return S32 | OPBIT(1)
-
-	case AREV32:
-		return S64 | OPBIT(2)
-
-	default:
-		ctxt.Diag("bad bit op\n%v", ctxt.Curp)
-		return 0
-	}
-}
-
-/*
- * add/subtract extended register
- */
-func opxrrr(ctxt *obj.Link, a obj.As) uint32 {
-	switch a {
-	case AADD:
-		return S64 | 0<<30 | 0<<29 | 0x0b<<24 | 0<<22 | 1<<21 | LSL0_64
-
-	case AADDW:
-		return S32 | 0<<30 | 0<<29 | 0x0b<<24 | 0<<22 | 1<<21 | LSL0_32
-
-	case ACMN, AADDS:
-		return S64 | 0<<30 | 1<<29 | 0x0b<<24 | 0<<22 | 1<<21 | LSL0_64
-
-	case ACMNW, AADDSW:
-		return S32 | 0<<30 | 1<<29 | 0x0b<<24 | 0<<22 | 1<<21 | LSL0_32
-
-	case ASUB:
-		return S64 | 1<<30 | 0<<29 | 0x0b<<24 | 0<<22 | 1<<21 | LSL0_64
-
-	case ASUBW:
-		return S32 | 1<<30 | 0<<29 | 0x0b<<24 | 0<<22 | 1<<21 | LSL0_32
-
-	case ACMP, ASUBS:
-		return S64 | 1<<30 | 1<<29 | 0x0b<<24 | 0<<22 | 1<<21 | LSL0_64
-
-	case ACMPW, ASUBSW:
-		return S32 | 1<<30 | 1<<29 | 0x0b<<24 | 0<<22 | 1<<21 | LSL0_32
-	}
-
-	ctxt.Diag("bad opxrrr %v\n%v", a, ctxt.Curp)
-	return 0
-}
-
-func opimm(ctxt *obj.Link, a obj.As) uint32 {
-	switch a {
-	case ASVC:
-		return 0xD4<<24 | 0<<21 | 1 /* imm16<<5 */
-
-	case AHVC:
-		return 0xD4<<24 | 0<<21 | 2
-
-	case ASMC:
-		return 0xD4<<24 | 0<<21 | 3
-
-	case ABRK:
-		return 0xD4<<24 | 1<<21 | 0
-
-	case AHLT:
-		return 0xD4<<24 | 2<<21 | 0
-
-	case ADCPS1:
-		return 0xD4<<24 | 5<<21 | 1
-
-	case ADCPS2:
-		return 0xD4<<24 | 5<<21 | 2
-
-	case ADCPS3:
-		return 0xD4<<24 | 5<<21 | 3
-
-	case ACLREX:
-		return SYSOP(0, 0, 3, 3, 0, 2, 0x1F)
-	}
-
-	ctxt.Diag("bad imm %v", a)
-	prasm(ctxt.Curp)
-	return 0
-}
-
-func brdist(ctxt *obj.Link, p *obj.Prog, preshift int, flen int, shift int) int64 {
-	v := int64(0)
-	t := int64(0)
-	if p.Pcond != nil {
-		v = (p.Pcond.Pc >> uint(preshift)) - (ctxt.Pc >> uint(preshift))
-		if (v & ((1 << uint(shift)) - 1)) != 0 {
-			ctxt.Diag("misaligned label\n%v", p)
-		}
-		v >>= uint(shift)
-		t = int64(1) << uint(flen-1)
-		if v < -t || v >= t {
-			ctxt.Diag("branch too far %#x vs %#x [%p]\n%v\n%v", v, t, ctxt.Blitrl, p, p.Pcond)
-			panic("branch too far")
-		}
-	}
-
-	return v & ((t << 1) - 1)
-}
-
-/*
- * pc-relative branches
- */
-func opbra(ctxt *obj.Link, a obj.As) uint32 {
-	switch a {
-	case ABEQ:
-		return OPBcc(0x0)
-
-	case ABNE:
-		return OPBcc(0x1)
-
-	case ABCS:
-		return OPBcc(0x2)
-
-	case ABHS:
-		return OPBcc(0x2)
-
-	case ABCC:
-		return OPBcc(0x3)
-
-	case ABLO:
-		return OPBcc(0x3)
-
-	case ABMI:
-		return OPBcc(0x4)
-
-	case ABPL:
-		return OPBcc(0x5)
-
-	case ABVS:
-		return OPBcc(0x6)
-
-	case ABVC:
-		return OPBcc(0x7)
-
-	case ABHI:
-		return OPBcc(0x8)
-
-	case ABLS:
-		return OPBcc(0x9)
-
-	case ABGE:
-		return OPBcc(0xa)
-
-	case ABLT:
-		return OPBcc(0xb)
-
-	case ABGT:
-		return OPBcc(0xc)
-
-	case ABLE:
-		return OPBcc(0xd) /* imm19<<5 | cond */
-
-	case AB:
-		return 0<<31 | 5<<26 /* imm26 */
-
-	case obj.ADUFFZERO, obj.ADUFFCOPY, ABL:
-		return 1<<31 | 5<<26
-	}
-
-	ctxt.Diag("bad bra %v", a)
-	prasm(ctxt.Curp)
-	return 0
-}
-
-func opbrr(ctxt *obj.Link, a obj.As) uint32 {
-	switch a {
-	case ABL:
-		return OPBLR(1) /* BLR */
-
-	case AB:
-		return OPBLR(0) /* BR */
-
-	case obj.ARET:
-		return OPBLR(2) /* RET */
-	}
-
-	ctxt.Diag("bad brr %v", a)
-	prasm(ctxt.Curp)
-	return 0
-}
-
-func op0(ctxt *obj.Link, a obj.As) uint32 {
-	switch a {
-	case ADRPS:
-		return 0x6B<<25 | 5<<21 | 0x1F<<16 | 0x1F<<5
-
-	case AERET:
-		return 0x6B<<25 | 4<<21 | 0x1F<<16 | 0<<10 | 0x1F<<5
-
-	// case ANOP:
-	// 	return SYSHINT(0)
-
-	case AYIELD:
-		return SYSHINT(1)
-
-	case AWFE:
-		return SYSHINT(2)
-
-	case AWFI:
-		return SYSHINT(3)
-
-	case ASEV:
-		return SYSHINT(4)
-
-	case ASEVL:
-		return SYSHINT(5)
-	}
-
-	ctxt.Diag("bad op0 %v", a)
-	prasm(ctxt.Curp)
-	return 0
-}
-
-/*
- * register offset
- */
-func opload(ctxt *obj.Link, a obj.As) uint32 {
-	switch a {
-	case ALDAR:
-		return LDSTX(3, 1, 1, 0, 1) | 0x1F<<10
-
-	case ALDARW:
-		return LDSTX(2, 1, 1, 0, 1) | 0x1F<<10
-
-	case ALDARB:
-		return LDSTX(0, 1, 1, 0, 1) | 0x1F<<10
-
-	case ALDARH:
-		return LDSTX(1, 1, 1, 0, 1) | 0x1F<<10
-
-	case ALDAXP:
-		return LDSTX(3, 0, 1, 1, 1)
-
-	case ALDAXPW:
-		return LDSTX(2, 0, 1, 1, 1)
-
-	case ALDAXR:
-		return LDSTX(3, 0, 1, 0, 1) | 0x1F<<10
-
-	case ALDAXRW:
-		return LDSTX(2, 0, 1, 0, 1) | 0x1F<<10
-
-	case ALDAXRB:
-		return LDSTX(0, 0, 1, 0, 1) | 0x1F<<10
-
-	case ALDAXRH:
-		return LDSTX(1, 0, 1, 0, 1) | 0x1F<<10
-
-	case ALDXR:
-		return LDSTX(3, 0, 1, 0, 0) | 0x1F<<10
-
-	case ALDXRB:
-		return LDSTX(0, 0, 1, 0, 0) | 0x1F<<10
-
-	case ALDXRH:
-		return LDSTX(1, 0, 1, 0, 0) | 0x1F<<10
-
-	case ALDXRW:
-		return LDSTX(2, 0, 1, 0, 0) | 0x1F<<10
-
-	case ALDXP:
-		return LDSTX(3, 0, 1, 1, 0)
-
-	case ALDXPW:
-		return LDSTX(2, 0, 1, 1, 0)
-
-	case AMOVNP:
-		return S64 | 0<<30 | 5<<27 | 0<<26 | 0<<23 | 1<<22
-
-	case AMOVNPW:
-		return S32 | 0<<30 | 5<<27 | 0<<26 | 0<<23 | 1<<22
-	}
-
-	ctxt.Diag("bad opload %v\n%v", a, ctxt.Curp)
-	return 0
-}
-
-func opstore(ctxt *obj.Link, a obj.As) uint32 {
-	switch a {
-	case ASTLR:
-		return LDSTX(3, 1, 0, 0, 1) | 0x1F<<10
-
-	case ASTLRB:
-		return LDSTX(0, 1, 0, 0, 1) | 0x1F<<10
-
-	case ASTLRH:
-		return LDSTX(1, 1, 0, 0, 1) | 0x1F<<10
-
-	case ASTLP:
-		return LDSTX(3, 0, 0, 1, 1)
-
-	case ASTLPW:
-		return LDSTX(2, 0, 0, 1, 1)
-
-	case ASTLRW:
-		return LDSTX(2, 1, 0, 0, 1) | 0x1F<<10
-
-	case ASTLXP:
-		return LDSTX(2, 0, 0, 1, 1)
-
-	case ASTLXPW:
-		return LDSTX(3, 0, 0, 1, 1)
-
-	case ASTLXR:
-		return LDSTX(3, 0, 0, 0, 1) | 0x1F<<10
-
-	case ASTLXRB:
-		return LDSTX(0, 0, 0, 0, 1) | 0x1F<<10
-
-	case ASTLXRH:
-		return LDSTX(1, 0, 0, 0, 1) | 0x1F<<10
-
-	case ASTLXRW:
-		return LDSTX(2, 0, 0, 0, 1) | 0x1F<<10
-
-	case ASTXR:
-		return LDSTX(3, 0, 0, 0, 0) | 0x1F<<10
-
-	case ASTXRB:
-		return LDSTX(0, 0, 0, 0, 0) | 0x1F<<10
-
-	case ASTXRH:
-		return LDSTX(1, 0, 0, 0, 0) | 0x1F<<10
-
-	case ASTXP:
-		return LDSTX(3, 0, 0, 1, 0)
-
-	case ASTXPW:
-		return LDSTX(2, 0, 0, 1, 0)
-
-	case ASTXRW:
-		return LDSTX(2, 0, 0, 0, 0) | 0x1F<<10
-
-	case AMOVNP:
-		return S64 | 0<<30 | 5<<27 | 0<<26 | 0<<23 | 1<<22
-
-	case AMOVNPW:
-		return S32 | 0<<30 | 5<<27 | 0<<26 | 0<<23 | 1<<22
-	}
-
-	ctxt.Diag("bad opstore %v\n%v", a, ctxt.Curp)
-	return 0
-}
-
-/*
- * load/store register (unsigned immediate) C3.3.13
- *	these produce 64-bit values (when there's an option)
- */
-func olsr12u(ctxt *obj.Link, o int32, v int32, b int, r int) uint32 {
-	if v < 0 || v >= (1<<12) {
-		ctxt.Diag("offset out of range: %d\n%v", v, ctxt.Curp)
-	}
-	o |= (v & 0xFFF) << 10
-	o |= int32(b&31) << 5
-	o |= int32(r & 31)
-	return uint32(o)
-}
-
-func opldr12(ctxt *obj.Link, a obj.As) uint32 {
-	switch a {
-	case AMOVD:
-		return LDSTR12U(3, 0, 1) /* imm12<<10 | Rn<<5 | Rt */
-
-	case AMOVW:
-		return LDSTR12U(2, 0, 2)
-
-	case AMOVWU:
-		return LDSTR12U(2, 0, 1)
-
-	case AMOVH:
-		return LDSTR12U(1, 0, 2)
-
-	case AMOVHU:
-		return LDSTR12U(1, 0, 1)
-
-	case AMOVB:
-		return LDSTR12U(0, 0, 2)
-
-	case AMOVBU:
-		return LDSTR12U(0, 0, 1)
-
-	case AFMOVS:
-		return LDSTR12U(2, 1, 1)
-
-	case AFMOVD:
-		return LDSTR12U(3, 1, 1)
-	}
-
-	ctxt.Diag("bad opldr12 %v\n%v", a, ctxt.Curp)
-	return 0
-}
-
-func opstr12(ctxt *obj.Link, a obj.As) uint32 {
-	return LD2STR(opldr12(ctxt, a))
-}
-
-/*
- * load/store register (unscaled immediate) C3.3.12
- */
-func olsr9s(ctxt *obj.Link, o int32, v int32, b int, r int) uint32 {
-	if v < -256 || v > 255 {
-		ctxt.Diag("offset out of range: %d\n%v", v, ctxt.Curp)
-	}
-	o |= (v & 0x1FF) << 12
-	o |= int32(b&31) << 5
-	o |= int32(r & 31)
-	return uint32(o)
-}
-
-func opldr9(ctxt *obj.Link, a obj.As) uint32 {
-	switch a {
-	case AMOVD:
-		return LDSTR9S(3, 0, 1) /* simm9<<12 | Rn<<5 | Rt */
-
-	case AMOVW:
-		return LDSTR9S(2, 0, 2)
-
-	case AMOVWU:
-		return LDSTR9S(2, 0, 1)
-
-	case AMOVH:
-		return LDSTR9S(1, 0, 2)
-
-	case AMOVHU:
-		return LDSTR9S(1, 0, 1)
-
-	case AMOVB:
-		return LDSTR9S(0, 0, 2)
-
-	case AMOVBU:
-		return LDSTR9S(0, 0, 1)
-
-	case AFMOVS:
-		return LDSTR9S(2, 1, 1)
-
-	case AFMOVD:
-		return LDSTR9S(3, 1, 1)
-	}
-
-	ctxt.Diag("bad opldr9 %v\n%v", a, ctxt.Curp)
-	return 0
-}
-
-func opstr9(ctxt *obj.Link, a obj.As) uint32 {
-	return LD2STR(opldr9(ctxt, a))
-}
-
-func opldrpp(ctxt *obj.Link, a obj.As) uint32 {
-	switch a {
-	case AMOVD:
-		return 3<<30 | 7<<27 | 0<<26 | 0<<24 | 1<<22 /* simm9<<12 | Rn<<5 | Rt */
-
-	case AMOVW:
-		return 2<<30 | 7<<27 | 0<<26 | 0<<24 | 2<<22
-
-	case AMOVWU:
-		return 2<<30 | 7<<27 | 0<<26 | 0<<24 | 1<<22
-
-	case AMOVH:
-		return 1<<30 | 7<<27 | 0<<26 | 0<<24 | 2<<22
-
-	case AMOVHU:
-		return 1<<30 | 7<<27 | 0<<26 | 0<<24 | 1<<22
-
-	case AMOVB:
-		return 0<<30 | 7<<27 | 0<<26 | 0<<24 | 2<<22
-
-	case AMOVBU:
-		return 0<<30 | 7<<27 | 0<<26 | 0<<24 | 1<<22
-	}
-
-	ctxt.Diag("bad opldr %v\n%v", a, ctxt.Curp)
-	return 0
-}
-
-/*
- * load/store register (extended register)
- */
-func olsxrr(ctxt *obj.Link, as obj.As, rt int, r1 int, r2 int) uint32 {
-	ctxt.Diag("need load/store extended register\n%v", ctxt.Curp)
-	return 0xffffffff
-}
-
-func oaddi(ctxt *obj.Link, o1 int32, v int32, r int, rt int) uint32 {
-	if (v & 0xFFF000) != 0 {
-		if v&0xFFF != 0 {
-			ctxt.Diag("%v misuses oaddi", ctxt.Curp)
-		}
-		v >>= 12
-		o1 |= 1 << 22
-	}
-
-	o1 |= ((v & 0xFFF) << 10) | (int32(r&31) << 5) | int32(rt&31)
-	return uint32(o1)
-}
-
-/*
- * load a a literal value into dr
- */
-func omovlit(ctxt *obj.Link, as obj.As, p *obj.Prog, a *obj.Addr, dr int) uint32 {
-	var o1 int32
-	if p.Pcond == nil { /* not in literal pool */
-		aclass(ctxt, a)
-		ctxt.Logf("omovlit add %d (%#x)\n", ctxt.Instoffset, uint64(ctxt.Instoffset))
-
-		/* TODO: could be clever, and use general constant builder */
-		o1 = int32(opirr(ctxt, AADD))
-
-		v := int32(ctxt.Instoffset)
-		if v != 0 && (v&0xFFF) == 0 {
-			v >>= 12
-			o1 |= 1 << 22 /* shift, by 12 */
-		}
-
-		o1 |= ((v & 0xFFF) << 10) | (REGZERO & 31 << 5) | int32(dr&31)
-	} else {
-		fp := 0
-		w := 0 /* default: 32 bit, unsigned */
-		switch as {
-		case AFMOVS:
-			fp = 1
-
-		case AFMOVD:
-			fp = 1
-			w = 1 /* 64 bit simd&fp */
-
-		case AMOVD:
-			if p.Pcond.As == ADWORD {
-				w = 1 /* 64 bit */
-			} else if p.Pcond.To.Offset < 0 {
-				w = 2 /* sign extend */
-			}
-
-		case AMOVB, AMOVH, AMOVW:
-			w = 2 /* 32 bit, sign-extended to 64 */
-			break
-		}
-
-		v := int32(brdist(ctxt, p, 0, 19, 2))
-		o1 = (int32(w) << 30) | (int32(fp) << 26) | (3 << 27)
-		o1 |= (v & 0x7FFFF) << 5
-		o1 |= int32(dr & 31)
-	}
-
-	return uint32(o1)
-}
-
-// load a constant (MOVCON or BITCON) in a into rt
-func omovconst(ctxt *obj.Link, as obj.As, p *obj.Prog, a *obj.Addr, rt int) (o1 uint32) {
-	if c := oclass(a); c == C_BITCON || c == C_ABCON || c == C_ABCON0 {
-		// or $bitcon, REGZERO, rt
-		mode := 64
-		var as1 obj.As
-		switch as {
-		case AMOVW:
-			as1 = AORRW
-			mode = 32
-		case AMOVD:
-			as1 = AORR
-		}
-		o1 = opirr(ctxt, as1)
-		o1 |= bitconEncode(uint64(a.Offset), mode) | uint32(REGZERO&31)<<5 | uint32(rt&31)
-		return o1
-	}
-
-	r := 32
-	if as == AMOVD {
-		r = 64
-	}
-	d := a.Offset
-	s := movcon(d)
-	if s < 0 || s >= r {
-		d = ^d
-		s = movcon(d)
-		if s < 0 || s >= r {
-			ctxt.Diag("impossible move wide: %#x\n%v", uint64(a.Offset), p)
-		}
-		if as == AMOVD {
-			o1 = opirr(ctxt, AMOVN)
-		} else {
-			o1 = opirr(ctxt, AMOVNW)
-		}
-	} else {
-		if as == AMOVD {
-			o1 = opirr(ctxt, AMOVZ)
-		} else {
-			o1 = opirr(ctxt, AMOVZW)
-		}
-	}
-	o1 |= uint32((((d >> uint(s*16)) & 0xFFFF) << 5) | int64((uint32(s)&3)<<21) | int64(rt&31))
-	return o1
-}
-
-func opbfm(ctxt *obj.Link, a obj.As, r int, s int, rf int, rt int) uint32 {
-	var c uint32
-	o := opirr(ctxt, a)
-	if (o & (1 << 31)) == 0 {
-		c = 32
-	} else {
-		c = 64
-	}
-	if r < 0 || uint32(r) >= c {
-		ctxt.Diag("illegal bit number\n%v", ctxt.Curp)
-	}
-	o |= (uint32(r) & 0x3F) << 16
-	if s < 0 || uint32(s) >= c {
-		ctxt.Diag("illegal bit number\n%v", ctxt.Curp)
-	}
-	o |= (uint32(s) & 0x3F) << 10
-	o |= (uint32(rf&31) << 5) | uint32(rt&31)
-	return o
-}
-
-func opextr(ctxt *obj.Link, a obj.As, v int32, rn int, rm int, rt int) uint32 {
-	var c uint32
-	o := opirr(ctxt, a)
-	if (o & (1 << 31)) != 0 {
-		c = 63
-	} else {
-		c = 31
-	}
-	if v < 0 || uint32(v) > c {
-		ctxt.Diag("illegal bit number\n%v", ctxt.Curp)
-	}
-	o |= uint32(v) << 10
-	o |= uint32(rn&31) << 5
-	o |= uint32(rm&31) << 16
-	o |= uint32(rt & 31)
-	return o
-}
-
-/*
- * size in log2(bytes)
- */
-func movesize(a obj.As) int {
-	switch a {
-	case AMOVD:
-		return 3
-
-	case AMOVW, AMOVWU:
-		return 2
-
-	case AMOVH, AMOVHU:
-		return 1
-
-	case AMOVB, AMOVBU:
-		return 0
-
-	case AFMOVS:
-		return 2
-
-	case AFMOVD:
-		return 3
-
-	default:
-		return -1
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm64/asm_test.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm64/asm_test.go
deleted file mode 100644
index cfdbc2e..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm64/asm_test.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/arm64/asm_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/arm64/asm_test.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package arm64
-
-import (
-	"bytes"
-	"fmt"
-	"internal/testenv"
-	"io/ioutil"
-	"os"
-	"os/exec"
-	"path/filepath"
-	"testing"
-)
-
-// TestLarge generates a very large file to verify that large
-// program builds successfully, in particular, too-far
-// conditional branches are fixed.
-func TestLarge(t *testing.T) {
-	if testing.Short() {
-		t.Skip("Skip in short mode")
-	}
-	testenv.MustHaveGoBuild(t)
-
-	dir, err := ioutil.TempDir("", "testlarge")
-	if err != nil {
-		t.Fatalf("could not create directory: %v", err)
-	}
-	defer os.RemoveAll(dir)
-
-	// generate a very large function
-	buf := bytes.NewBuffer(make([]byte, 0, 7000000))
-	gen(buf)
-
-	tmpfile := filepath.Join(dir, "x.s")
-	err = ioutil.WriteFile(tmpfile, buf.Bytes(), 0644)
-	if err != nil {
-		t.Fatalf("can't write output: %v\n", err)
-	}
-
-	// build generated file
-	cmd := exec.Command(testenv.GoToolPath(t), "tool", "asm", "-o", filepath.Join(dir, "x.o"), tmpfile)
-	cmd.Env = []string{"GOARCH=arm64", "GOOS=linux"}
-	out, err := cmd.CombinedOutput()
-	if err != nil {
-		t.Errorf("Build failed: %v, output: %s", err, out)
-	}
-}
-
-// gen generates a very large program, with a very far conditional branch.
-func gen(buf *bytes.Buffer) {
-	fmt.Fprintln(buf, "TEXT f(SB),0,$0-0")
-	fmt.Fprintln(buf, "CBZ R0, label")
-	fmt.Fprintln(buf, "BEQ label")
-	for i := 0; i < 1<<19; i++ {
-		fmt.Fprintln(buf, "MOVD R0, R1")
-	}
-	fmt.Fprintln(buf, "label:")
-	fmt.Fprintln(buf, "RET")
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm64/list7.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm64/list7.go
deleted file mode 100644
index 06cee6a..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm64/list7.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/arm64/list7.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/arm64/list7.go:1
-// cmd/7l/list.c and cmd/7l/sub.c from Vita Nuova.
-// https://code.google.com/p/ken-cc/source/browse/
-//
-// 	Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// 	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// 	Portions Copyright © 1997-1999 Vita Nuova Limited
-// 	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// 	Portions Copyright © 2004,2006 Bruce Ellis
-// 	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// 	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// 	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package arm64
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"fmt"
-)
-
-var strcond = [16]string{
-	"EQ",
-	"NE",
-	"HS",
-	"LO",
-	"MI",
-	"PL",
-	"VS",
-	"VC",
-	"HI",
-	"LS",
-	"GE",
-	"LT",
-	"GT",
-	"LE",
-	"AL",
-	"NV",
-}
-
-func init() {
-	obj.RegisterRegister(obj.RBaseARM64, REG_SPECIAL+1024, Rconv)
-	obj.RegisterOpcode(obj.ABaseARM64, Anames)
-}
-
-func Rconv(r int) string {
-	if r == REGG {
-		return "g"
-	}
-	switch {
-	case REG_R0 <= r && r <= REG_R30:
-		return fmt.Sprintf("R%d", r-REG_R0)
-	case r == REG_R31:
-		return "ZR"
-	case REG_F0 <= r && r <= REG_F31:
-		return fmt.Sprintf("F%d", r-REG_F0)
-	case REG_V0 <= r && r <= REG_V31:
-		return fmt.Sprintf("V%d", r-REG_V0)
-	case COND_EQ <= r && r <= COND_NV:
-		return strcond[r-COND_EQ]
-	case r == REGSP:
-		return "RSP"
-	case r == REG_DAIF:
-		return "DAIF"
-	case r == REG_NZCV:
-		return "NZCV"
-	case r == REG_FPSR:
-		return "FPSR"
-	case r == REG_FPCR:
-		return "FPCR"
-	case r == REG_SPSR_EL1:
-		return "SPSR_EL1"
-	case r == REG_ELR_EL1:
-		return "ELR_EL1"
-	case r == REG_SPSR_EL2:
-		return "SPSR_EL2"
-	case r == REG_ELR_EL2:
-		return "ELR_EL2"
-	case r == REG_CurrentEL:
-		return "CurrentEL"
-	case r == REG_SP_EL0:
-		return "SP_EL0"
-	case r == REG_SPSel:
-		return "SPSel"
-	case r == REG_DAIFSet:
-		return "DAIFSet"
-	case r == REG_DAIFClr:
-		return "DAIFClr"
-	}
-	return fmt.Sprintf("badreg(%d)", r)
-}
-
-func DRconv(a int) string {
-	if a >= C_NONE && a <= C_NCLASS {
-		return cnames7[a]
-	}
-	return "C_??"
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm64/obj7.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm64/obj7.go
deleted file mode 100644
index cf68577..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/arm64/obj7.go
+++ /dev/null
@@ -1,1007 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/arm64/obj7.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/arm64/obj7.go:1
-// cmd/7l/noop.c, cmd/7l/obj.c, cmd/ld/pass.c from Vita Nuova.
-// https://code.google.com/p/ken-cc/source/browse/
-//
-// 	Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// 	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-// 	Portions Copyright © 1997-1999 Vita Nuova Limited
-// 	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-// 	Portions Copyright © 2004,2006 Bruce Ellis
-// 	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-// 	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-// 	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package arm64
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"fmt"
-	"log"
-	"math"
-)
-
-var complements = []obj.As{
-	AADD:  ASUB,
-	AADDW: ASUBW,
-	ASUB:  AADD,
-	ASUBW: AADDW,
-	ACMP:  ACMN,
-	ACMPW: ACMNW,
-	ACMN:  ACMP,
-	ACMNW: ACMPW,
-}
-
-func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32) *obj.Prog {
-	// MOV	g_stackguard(g), R1
-	p = obj.Appendp(ctxt, p)
-
-	p.As = AMOVD
-	p.From.Type = obj.TYPE_MEM
-	p.From.Reg = REGG
-	p.From.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0
-	if ctxt.Cursym.CFunc() {
-		p.From.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1
-	}
-	p.To.Type = obj.TYPE_REG
-	p.To.Reg = REG_R1
-
-	q := (*obj.Prog)(nil)
-	if framesize <= obj.StackSmall {
-		// small stack: SP < stackguard
-		//	MOV	SP, R2
-		//	CMP	stackguard, R2
-		p = obj.Appendp(ctxt, p)
-
-		p.As = AMOVD
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REGSP
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R2
-
-		p = obj.Appendp(ctxt, p)
-		p.As = ACMP
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_R1
-		p.Reg = REG_R2
-	} else if framesize <= obj.StackBig {
-		// large stack: SP-framesize < stackguard-StackSmall
-		//	SUB	$framesize, SP, R2
-		//	CMP	stackguard, R2
-		p = obj.Appendp(ctxt, p)
-
-		p.As = ASUB
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = int64(framesize)
-		p.Reg = REGSP
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R2
-
-		p = obj.Appendp(ctxt, p)
-		p.As = ACMP
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_R1
-		p.Reg = REG_R2
-	} else {
-		// Such a large stack we need to protect against wraparound
-		// if SP is close to zero.
-		//	SP-stackguard+StackGuard < framesize + (StackGuard-StackSmall)
-		// The +StackGuard on both sides is required to keep the left side positive:
-		// SP is allowed to be slightly below stackguard. See stack.h.
-		//	CMP	$StackPreempt, R1
-		//	BEQ	label_of_call_to_morestack
-		//	ADD	$StackGuard, SP, R2
-		//	SUB	R1, R2
-		//	MOV	$(framesize+(StackGuard-StackSmall)), R3
-		//	CMP	R3, R2
-		p = obj.Appendp(ctxt, p)
-
-		p.As = ACMP
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = obj.StackPreempt
-		p.Reg = REG_R1
-
-		p = obj.Appendp(ctxt, p)
-		q = p
-		p.As = ABEQ
-		p.To.Type = obj.TYPE_BRANCH
-
-		p = obj.Appendp(ctxt, p)
-		p.As = AADD
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = obj.StackGuard
-		p.Reg = REGSP
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R2
-
-		p = obj.Appendp(ctxt, p)
-		p.As = ASUB
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_R1
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R2
-
-		p = obj.Appendp(ctxt, p)
-		p.As = AMOVD
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = int64(framesize) + (obj.StackGuard - obj.StackSmall)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R3
-
-		p = obj.Appendp(ctxt, p)
-		p.As = ACMP
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_R3
-		p.Reg = REG_R2
-	}
-
-	// BLS	do-morestack
-	bls := obj.Appendp(ctxt, p)
-	bls.As = ABLS
-	bls.To.Type = obj.TYPE_BRANCH
-
-	var last *obj.Prog
-	for last = ctxt.Cursym.Text; last.Link != nil; last = last.Link {
-	}
-
-	// Now we are at the end of the function, but logically
-	// we are still in function prologue. We need to fix the
-	// SP data and PCDATA.
-	spfix := obj.Appendp(ctxt, last)
-	spfix.As = obj.ANOP
-	spfix.Spadj = -framesize
-
-	pcdata := obj.Appendp(ctxt, spfix)
-	pcdata.Lineno = ctxt.Cursym.Text.Lineno
-	pcdata.Mode = ctxt.Cursym.Text.Mode
-	pcdata.As = obj.APCDATA
-	pcdata.From.Type = obj.TYPE_CONST
-	pcdata.From.Offset = obj.PCDATA_StackMapIndex
-	pcdata.To.Type = obj.TYPE_CONST
-	pcdata.To.Offset = -1 // pcdata starts at -1 at function entry
-
-	// MOV	LR, R3
-	movlr := obj.Appendp(ctxt, pcdata)
-	movlr.As = AMOVD
-	movlr.From.Type = obj.TYPE_REG
-	movlr.From.Reg = REGLINK
-	movlr.To.Type = obj.TYPE_REG
-	movlr.To.Reg = REG_R3
-	if q != nil {
-		q.Pcond = movlr
-	}
-	bls.Pcond = movlr
-
-	debug := movlr
-	if false {
-		debug = obj.Appendp(ctxt, debug)
-		debug.As = AMOVD
-		debug.From.Type = obj.TYPE_CONST
-		debug.From.Offset = int64(framesize)
-		debug.To.Type = obj.TYPE_REG
-		debug.To.Reg = REGTMP
-	}
-
-	// BL	runtime.morestack(SB)
-	call := obj.Appendp(ctxt, debug)
-	call.As = ABL
-	call.To.Type = obj.TYPE_BRANCH
-	morestack := "runtime.morestack"
-	switch {
-	case ctxt.Cursym.CFunc():
-		morestack = "runtime.morestackc"
-	case ctxt.Cursym.Text.From3.Offset&obj.NEEDCTXT == 0:
-		morestack = "runtime.morestack_noctxt"
-	}
-	call.To.Sym = obj.Linklookup(ctxt, morestack, 0)
-
-	// B	start
-	jmp := obj.Appendp(ctxt, call)
-	jmp.As = AB
-	jmp.To.Type = obj.TYPE_BRANCH
-	jmp.Pcond = ctxt.Cursym.Text.Link
-	jmp.Spadj = +framesize
-
-	// placeholder for bls's jump target
-	// p = obj.Appendp(ctxt, p)
-	// p.As = obj.ANOP
-
-	return bls
-}
-
-func progedit(ctxt *obj.Link, p *obj.Prog) {
-	p.From.Class = 0
-	p.To.Class = 0
-
-	// $0 results in C_ZCON, which matches both C_REG and various
-	// C_xCON, however the C_REG cases in asmout don't expect a
-	// constant, so they will use the register fields and assemble
-	// a R0. To prevent that, rewrite $0 as ZR.
-	if p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 {
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REGZERO
-	}
-	if p.To.Type == obj.TYPE_CONST && p.To.Offset == 0 {
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REGZERO
-	}
-
-	// Rewrite BR/BL to symbol as TYPE_BRANCH.
-	switch p.As {
-	case AB,
-		ABL,
-		obj.ARET,
-		obj.ADUFFZERO,
-		obj.ADUFFCOPY:
-		if p.To.Sym != nil {
-			p.To.Type = obj.TYPE_BRANCH
-		}
-		break
-	}
-
-	// Rewrite float constants to values stored in memory.
-	switch p.As {
-	case AFMOVS:
-		if p.From.Type == obj.TYPE_FCONST {
-			f32 := float32(p.From.Val.(float64))
-			i32 := math.Float32bits(f32)
-			if i32 == 0 {
-				p.From.Type = obj.TYPE_REG
-				p.From.Reg = REGZERO
-				break
-			}
-			literal := fmt.Sprintf("$f32.%08x", i32)
-			s := obj.Linklookup(ctxt, literal, 0)
-			s.Size = 4
-			p.From.Type = obj.TYPE_MEM
-			p.From.Sym = s
-			p.From.Sym.Set(obj.AttrLocal, true)
-			p.From.Name = obj.NAME_EXTERN
-			p.From.Offset = 0
-		}
-
-	case AFMOVD:
-		if p.From.Type == obj.TYPE_FCONST {
-			i64 := math.Float64bits(p.From.Val.(float64))
-			if i64 == 0 {
-				p.From.Type = obj.TYPE_REG
-				p.From.Reg = REGZERO
-				break
-			}
-			literal := fmt.Sprintf("$f64.%016x", i64)
-			s := obj.Linklookup(ctxt, literal, 0)
-			s.Size = 8
-			p.From.Type = obj.TYPE_MEM
-			p.From.Sym = s
-			p.From.Sym.Set(obj.AttrLocal, true)
-			p.From.Name = obj.NAME_EXTERN
-			p.From.Offset = 0
-		}
-
-		break
-	}
-
-	// Rewrite negative immediates as positive immediates with
-	// complementary instruction.
-	switch p.As {
-	case AADD, ASUB, ACMP, ACMN:
-		if p.From.Type == obj.TYPE_CONST && p.From.Offset < 0 && p.From.Offset != -1<<63 {
-			p.From.Offset = -p.From.Offset
-			p.As = complements[p.As]
-		}
-	case AADDW, ASUBW, ACMPW, ACMNW:
-		if p.From.Type == obj.TYPE_CONST && p.From.Offset < 0 && int32(p.From.Offset) != -1<<31 {
-			p.From.Offset = -p.From.Offset
-			p.As = complements[p.As]
-		}
-	}
-
-	// For 32-bit logical instruction with constant,
-	// rewrite the high 32-bit to be a repetition of
-	// the low 32-bit, so that the BITCON test can be
-	// shared for both 32-bit and 64-bit. 32-bit ops
-	// will zero the high 32-bit of the destination
-	// register anyway.
-	switch p.As {
-	case AANDW, AORRW, AEORW, AANDSW:
-		if p.From.Type == obj.TYPE_CONST {
-			v := p.From.Offset & 0xffffffff
-			p.From.Offset = v | v<<32
-		}
-	}
-
-	if ctxt.Flag_dynlink {
-		rewriteToUseGot(ctxt, p)
-	}
-}
-
-// Rewrite p, if necessary, to access global data via the global offset table.
-func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog) {
-	if p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO {
-		//     ADUFFxxx $offset
-		// becomes
-		//     MOVD runtime.duffxxx@GOT, REGTMP
-		//     ADD $offset, REGTMP
-		//     CALL REGTMP
-		var sym *obj.LSym
-		if p.As == obj.ADUFFZERO {
-			sym = obj.Linklookup(ctxt, "runtime.duffzero", 0)
-		} else {
-			sym = obj.Linklookup(ctxt, "runtime.duffcopy", 0)
-		}
-		offset := p.To.Offset
-		p.As = AMOVD
-		p.From.Type = obj.TYPE_MEM
-		p.From.Name = obj.NAME_GOTREF
-		p.From.Sym = sym
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REGTMP
-		p.To.Name = obj.NAME_NONE
-		p.To.Offset = 0
-		p.To.Sym = nil
-		p1 := obj.Appendp(ctxt, p)
-		p1.As = AADD
-		p1.From.Type = obj.TYPE_CONST
-		p1.From.Offset = offset
-		p1.To.Type = obj.TYPE_REG
-		p1.To.Reg = REGTMP
-		p2 := obj.Appendp(ctxt, p1)
-		p2.As = obj.ACALL
-		p2.To.Type = obj.TYPE_REG
-		p2.To.Reg = REGTMP
-	}
-
-	// We only care about global data: NAME_EXTERN means a global
-	// symbol in the Go sense, and p.Sym.Local is true for a few
-	// internally defined symbols.
-	if p.From.Type == obj.TYPE_ADDR && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() {
-		// MOVD $sym, Rx becomes MOVD sym@GOT, Rx
-		// MOVD $sym+<off>, Rx becomes MOVD sym@GOT, Rx; ADD <off>, Rx
-		if p.As != AMOVD {
-			ctxt.Diag("do not know how to handle TYPE_ADDR in %v with -dynlink", p)
-		}
-		if p.To.Type != obj.TYPE_REG {
-			ctxt.Diag("do not know how to handle LEAQ-type insn to non-register in %v with -dynlink", p)
-		}
-		p.From.Type = obj.TYPE_MEM
-		p.From.Name = obj.NAME_GOTREF
-		if p.From.Offset != 0 {
-			q := obj.Appendp(ctxt, p)
-			q.As = AADD
-			q.From.Type = obj.TYPE_CONST
-			q.From.Offset = p.From.Offset
-			q.To = p.To
-			p.From.Offset = 0
-		}
-	}
-	if p.From3 != nil && p.From3.Name == obj.NAME_EXTERN {
-		ctxt.Diag("don't know how to handle %v with -dynlink", p)
-	}
-	var source *obj.Addr
-	// MOVx sym, Ry becomes MOVD sym@GOT, REGTMP; MOVx (REGTMP), Ry
-	// MOVx Ry, sym becomes MOVD sym@GOT, REGTMP; MOVD Ry, (REGTMP)
-	// An addition may be inserted between the two MOVs if there is an offset.
-	if p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() {
-		if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() {
-			ctxt.Diag("cannot handle NAME_EXTERN on both sides in %v with -dynlink", p)
-		}
-		source = &p.From
-	} else if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() {
-		source = &p.To
-	} else {
-		return
-	}
-	if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP {
-		return
-	}
-	if source.Sym.Type == obj.STLSBSS {
-		return
-	}
-	if source.Type != obj.TYPE_MEM {
-		ctxt.Diag("don't know how to handle %v with -dynlink", p)
-	}
-	p1 := obj.Appendp(ctxt, p)
-	p2 := obj.Appendp(ctxt, p1)
-	p1.As = AMOVD
-	p1.From.Type = obj.TYPE_MEM
-	p1.From.Sym = source.Sym
-	p1.From.Name = obj.NAME_GOTREF
-	p1.To.Type = obj.TYPE_REG
-	p1.To.Reg = REGTMP
-
-	p2.As = p.As
-	p2.From = p.From
-	p2.To = p.To
-	if p.From.Name == obj.NAME_EXTERN {
-		p2.From.Reg = REGTMP
-		p2.From.Name = obj.NAME_NONE
-		p2.From.Sym = nil
-	} else if p.To.Name == obj.NAME_EXTERN {
-		p2.To.Reg = REGTMP
-		p2.To.Name = obj.NAME_NONE
-		p2.To.Sym = nil
-	} else {
-		return
-	}
-	obj.Nopout(p)
-}
-
-func follow(ctxt *obj.Link, s *obj.LSym) {
-	ctxt.Cursym = s
-
-	firstp := ctxt.NewProg()
-	lastp := firstp
-	xfol(ctxt, s.Text, &lastp)
-	lastp.Link = nil
-	s.Text = firstp.Link
-}
-
-func relinv(a obj.As) obj.As {
-	switch a {
-	case ABEQ:
-		return ABNE
-	case ABNE:
-		return ABEQ
-	case ABCS:
-		return ABCC
-	case ABHS:
-		return ABLO
-	case ABCC:
-		return ABCS
-	case ABLO:
-		return ABHS
-	case ABMI:
-		return ABPL
-	case ABPL:
-		return ABMI
-	case ABVS:
-		return ABVC
-	case ABVC:
-		return ABVS
-	case ABHI:
-		return ABLS
-	case ABLS:
-		return ABHI
-	case ABGE:
-		return ABLT
-	case ABLT:
-		return ABGE
-	case ABGT:
-		return ABLE
-	case ABLE:
-		return ABGT
-	case ACBZ:
-		return ACBNZ
-	case ACBNZ:
-		return ACBZ
-	case ACBZW:
-		return ACBNZW
-	case ACBNZW:
-		return ACBZW
-	}
-
-	log.Fatalf("unknown relation: %s", Anames[a-obj.ABaseARM64])
-	return 0
-}
-
-func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) {
-	var q *obj.Prog
-	var r *obj.Prog
-	var i int
-
-loop:
-	if p == nil {
-		return
-	}
-	a := p.As
-	if a == AB {
-		q = p.Pcond
-		if q != nil {
-			p.Mark |= FOLL
-			p = q
-			if !(p.Mark&FOLL != 0) {
-				goto loop
-			}
-		}
-	}
-
-	if p.Mark&FOLL != 0 {
-		i = 0
-		q = p
-		for ; i < 4; i, q = i+1, q.Link {
-			if q == *last || q == nil {
-				break
-			}
-			a = q.As
-			if a == obj.ANOP {
-				i--
-				continue
-			}
-
-			if a == AB || a == obj.ARET || a == AERET {
-				goto copy
-			}
-			if q.Pcond == nil || (q.Pcond.Mark&FOLL != 0) {
-				continue
-			}
-			if a != ABEQ && a != ABNE {
-				continue
-			}
-
-		copy:
-			for {
-				r = ctxt.NewProg()
-				*r = *p
-				if !(r.Mark&FOLL != 0) {
-					fmt.Printf("can't happen 1\n")
-				}
-				r.Mark |= FOLL
-				if p != q {
-					p = p.Link
-					(*last).Link = r
-					*last = r
-					continue
-				}
-
-				(*last).Link = r
-				*last = r
-				if a == AB || a == obj.ARET || a == AERET {
-					return
-				}
-				if a == ABNE {
-					r.As = ABEQ
-				} else {
-					r.As = ABNE
-				}
-				r.Pcond = p.Link
-				r.Link = p.Pcond
-				if !(r.Link.Mark&FOLL != 0) {
-					xfol(ctxt, r.Link, last)
-				}
-				if !(r.Pcond.Mark&FOLL != 0) {
-					fmt.Printf("can't happen 2\n")
-				}
-				return
-			}
-		}
-
-		a = AB
-		q = ctxt.NewProg()
-		q.As = a
-		q.Lineno = p.Lineno
-		q.To.Type = obj.TYPE_BRANCH
-		q.To.Offset = p.Pc
-		q.Pcond = p
-		p = q
-	}
-
-	p.Mark |= FOLL
-	(*last).Link = p
-	*last = p
-	if a == AB || a == obj.ARET || a == AERET {
-		return
-	}
-	if p.Pcond != nil {
-		if a != ABL && p.Link != nil {
-			q = obj.Brchain(ctxt, p.Link)
-			if a != obj.ATEXT {
-				if q != nil && (q.Mark&FOLL != 0) {
-					p.As = relinv(a)
-					p.Link = p.Pcond
-					p.Pcond = q
-				}
-			}
-
-			xfol(ctxt, p.Link, last)
-			q = obj.Brchain(ctxt, p.Pcond)
-			if q == nil {
-				q = p.Pcond
-			}
-			if q.Mark&FOLL != 0 {
-				p.Pcond = q
-				return
-			}
-
-			p = q
-			goto loop
-		}
-	}
-
-	p = p.Link
-	goto loop
-}
-
-func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
-	ctxt.Cursym = cursym
-
-	if cursym.Text == nil || cursym.Text.Link == nil {
-		return
-	}
-
-	p := cursym.Text
-	textstksiz := p.To.Offset
-	aoffset := int32(textstksiz)
-
-	cursym.Args = p.To.Val.(int32)
-	cursym.Locals = int32(textstksiz)
-
-	/*
-	 * find leaf subroutines
-	 * strip NOPs
-	 * expand RET
-	 */
-	q := (*obj.Prog)(nil)
-	var q1 *obj.Prog
-	for p := cursym.Text; p != nil; p = p.Link {
-		switch p.As {
-		case obj.ATEXT:
-			p.Mark |= LEAF
-
-		case obj.ARET:
-			break
-
-		case obj.ANOP:
-			q1 = p.Link
-			q.Link = q1 /* q is non-nop */
-			q1.Mark |= p.Mark
-			continue
-
-		case ABL,
-			obj.ADUFFZERO,
-			obj.ADUFFCOPY:
-			cursym.Text.Mark &^= LEAF
-			fallthrough
-
-		case ACBNZ,
-			ACBZ,
-			ACBNZW,
-			ACBZW,
-			ATBZ,
-			ATBNZ,
-			AB,
-			ABEQ,
-			ABNE,
-			ABCS,
-			ABHS,
-			ABCC,
-			ABLO,
-			ABMI,
-			ABPL,
-			ABVS,
-			ABVC,
-			ABHI,
-			ABLS,
-			ABGE,
-			ABLT,
-			ABGT,
-			ABLE,
-			AADR, /* strange */
-			AADRP:
-			q1 = p.Pcond
-
-			if q1 != nil {
-				for q1.As == obj.ANOP {
-					q1 = q1.Link
-					p.Pcond = q1
-				}
-			}
-
-			break
-		}
-
-		q = p
-	}
-
-	var q2 *obj.Prog
-	var retjmp *obj.LSym
-	for p := cursym.Text; p != nil; p = p.Link {
-		o := p.As
-		switch o {
-		case obj.ATEXT:
-			cursym.Text = p
-			if textstksiz < 0 {
-				ctxt.Autosize = 0
-			} else {
-				ctxt.Autosize = int32(textstksiz + 8)
-			}
-			if (cursym.Text.Mark&LEAF != 0) && ctxt.Autosize <= 8 {
-				ctxt.Autosize = 0
-			} else if ctxt.Autosize&(16-1) != 0 {
-				// The frame includes an LR.
-				// If the frame size is 8, it's only an LR,
-				// so there's no potential for breaking references to
-				// local variables by growing the frame size,
-				// because there are no local variables.
-				// But otherwise, if there is a non-empty locals section,
-				// the author of the code is responsible for making sure
-				// that the frame size is 8 mod 16.
-				if ctxt.Autosize == 8 {
-					ctxt.Autosize += 8
-					cursym.Locals += 8
-				} else {
-					ctxt.Diag("%v: unaligned frame size %d - must be 8 mod 16 (or 0)", p, ctxt.Autosize-8)
-				}
-			}
-			p.To.Offset = int64(ctxt.Autosize) - 8
-			if ctxt.Autosize == 0 && !(cursym.Text.Mark&LEAF != 0) {
-				if ctxt.Debugvlog != 0 {
-					ctxt.Logf("save suppressed in: %s\n", cursym.Text.From.Sym.Name)
-				}
-				cursym.Text.Mark |= LEAF
-			}
-
-			if !(p.From3.Offset&obj.NOSPLIT != 0) {
-				p = stacksplit(ctxt, p, ctxt.Autosize) // emit split check
-			}
-
-			aoffset = ctxt.Autosize
-			if aoffset > 0xF0 {
-				aoffset = 0xF0
-			}
-			if cursym.Text.Mark&LEAF != 0 {
-				cursym.Set(obj.AttrLeaf, true)
-				if ctxt.Autosize == 0 {
-					break
-				}
-			}
-
-			// Frame is non-empty. Make sure to save link register, even if
-			// it is a leaf function, so that traceback works.
-			q = p
-			if ctxt.Autosize > aoffset {
-				// Frame size is too large for a MOVD.W instruction.
-				// Store link register before decrementing SP, so if a signal comes
-				// during the execution of the function prologue, the traceback
-				// code will not see a half-updated stack frame.
-				q = obj.Appendp(ctxt, q)
-				q.Lineno = p.Lineno
-				q.As = ASUB
-				q.From.Type = obj.TYPE_CONST
-				q.From.Offset = int64(ctxt.Autosize)
-				q.Reg = REGSP
-				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REGTMP
-
-				q = obj.Appendp(ctxt, q)
-				q.Lineno = p.Lineno
-				q.As = AMOVD
-				q.From.Type = obj.TYPE_REG
-				q.From.Reg = REGLINK
-				q.To.Type = obj.TYPE_MEM
-				q.To.Reg = REGTMP
-
-				q1 = obj.Appendp(ctxt, q)
-				q1.Lineno = p.Lineno
-				q1.As = AMOVD
-				q1.From.Type = obj.TYPE_REG
-				q1.From.Reg = REGTMP
-				q1.To.Type = obj.TYPE_REG
-				q1.To.Reg = REGSP
-				q1.Spadj = ctxt.Autosize
-			} else {
-				// small frame, update SP and save LR in a single MOVD.W instruction
-				q1 = obj.Appendp(ctxt, q)
-				q1.As = AMOVD
-				q1.Lineno = p.Lineno
-				q1.From.Type = obj.TYPE_REG
-				q1.From.Reg = REGLINK
-				q1.To.Type = obj.TYPE_MEM
-				q1.Scond = C_XPRE
-				q1.To.Offset = int64(-aoffset)
-				q1.To.Reg = REGSP
-				q1.Spadj = aoffset
-			}
-
-			if cursym.Text.From3.Offset&obj.WRAPPER != 0 {
-				// if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
-				//
-				//	MOV g_panic(g), R1
-				//	CMP ZR, R1
-				//	BEQ end
-				//	MOV panic_argp(R1), R2
-				//	ADD $(autosize+8), RSP, R3
-				//	CMP R2, R3
-				//	BNE end
-				//	ADD $8, RSP, R4
-				//	MOVD R4, panic_argp(R1)
-				// end:
-				//	NOP
-				//
-				// The NOP is needed to give the jumps somewhere to land.
-				// It is a liblink NOP, not a ARM64 NOP: it encodes to 0 instruction bytes.
-				q = q1
-
-				q = obj.Appendp(ctxt, q)
-				q.As = AMOVD
-				q.From.Type = obj.TYPE_MEM
-				q.From.Reg = REGG
-				q.From.Offset = 4 * int64(ctxt.Arch.PtrSize) // G.panic
-				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REG_R1
-
-				q = obj.Appendp(ctxt, q)
-				q.As = ACMP
-				q.From.Type = obj.TYPE_REG
-				q.From.Reg = REGZERO
-				q.Reg = REG_R1
-
-				q = obj.Appendp(ctxt, q)
-				q.As = ABEQ
-				q.To.Type = obj.TYPE_BRANCH
-				q1 = q
-
-				q = obj.Appendp(ctxt, q)
-				q.As = AMOVD
-				q.From.Type = obj.TYPE_MEM
-				q.From.Reg = REG_R1
-				q.From.Offset = 0 // Panic.argp
-				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REG_R2
-
-				q = obj.Appendp(ctxt, q)
-				q.As = AADD
-				q.From.Type = obj.TYPE_CONST
-				q.From.Offset = int64(ctxt.Autosize) + 8
-				q.Reg = REGSP
-				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REG_R3
-
-				q = obj.Appendp(ctxt, q)
-				q.As = ACMP
-				q.From.Type = obj.TYPE_REG
-				q.From.Reg = REG_R2
-				q.Reg = REG_R3
-
-				q = obj.Appendp(ctxt, q)
-				q.As = ABNE
-				q.To.Type = obj.TYPE_BRANCH
-				q2 = q
-
-				q = obj.Appendp(ctxt, q)
-				q.As = AADD
-				q.From.Type = obj.TYPE_CONST
-				q.From.Offset = 8
-				q.Reg = REGSP
-				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REG_R4
-
-				q = obj.Appendp(ctxt, q)
-				q.As = AMOVD
-				q.From.Type = obj.TYPE_REG
-				q.From.Reg = REG_R4
-				q.To.Type = obj.TYPE_MEM
-				q.To.Reg = REG_R1
-				q.To.Offset = 0 // Panic.argp
-
-				q = obj.Appendp(ctxt, q)
-
-				q.As = obj.ANOP
-				q1.Pcond = q
-				q2.Pcond = q
-			}
-
-		case obj.ARET:
-			nocache(p)
-			if p.From.Type == obj.TYPE_CONST {
-				ctxt.Diag("using BECOME (%v) is not supported!", p)
-				break
-			}
-
-			retjmp = p.To.Sym
-			p.To = obj.Addr{}
-			if cursym.Text.Mark&LEAF != 0 {
-				if ctxt.Autosize != 0 {
-					p.As = AADD
-					p.From.Type = obj.TYPE_CONST
-					p.From.Offset = int64(ctxt.Autosize)
-					p.To.Type = obj.TYPE_REG
-					p.To.Reg = REGSP
-					p.Spadj = -ctxt.Autosize
-				}
-			} else {
-				/* want write-back pre-indexed SP+autosize -> SP, loading REGLINK*/
-				aoffset = ctxt.Autosize
-
-				if aoffset > 0xF0 {
-					aoffset = 0xF0
-				}
-				p.As = AMOVD
-				p.From.Type = obj.TYPE_MEM
-				p.Scond = C_XPOST
-				p.From.Offset = int64(aoffset)
-				p.From.Reg = REGSP
-				p.To.Type = obj.TYPE_REG
-				p.To.Reg = REGLINK
-				p.Spadj = -aoffset
-				if ctxt.Autosize > aoffset {
-					q = ctxt.NewProg()
-					q.As = AADD
-					q.From.Type = obj.TYPE_CONST
-					q.From.Offset = int64(ctxt.Autosize) - int64(aoffset)
-					q.To.Type = obj.TYPE_REG
-					q.To.Reg = REGSP
-					q.Link = p.Link
-					q.Spadj = int32(-q.From.Offset)
-					q.Lineno = p.Lineno
-					p.Link = q
-					p = q
-				}
-			}
-
-			if p.As != obj.ARET {
-				q = ctxt.NewProg()
-				q.Lineno = p.Lineno
-				q.Link = p.Link
-				p.Link = q
-				p = q
-			}
-
-			if retjmp != nil { // retjmp
-				p.As = AB
-				p.To.Type = obj.TYPE_BRANCH
-				p.To.Sym = retjmp
-				p.Spadj = +ctxt.Autosize
-				break
-			}
-
-			p.As = obj.ARET
-			p.To.Type = obj.TYPE_MEM
-			p.To.Offset = 0
-			p.To.Reg = REGLINK
-			p.Spadj = +ctxt.Autosize
-
-		case AADD, ASUB:
-			if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.From.Type == obj.TYPE_CONST {
-				if p.As == AADD {
-					p.Spadj = int32(-p.From.Offset)
-				} else {
-					p.Spadj = int32(+p.From.Offset)
-				}
-			}
-			break
-		}
-	}
-}
-
-func nocache(p *obj.Prog) {
-	p.Optab = 0
-	p.From.Class = 0
-	p.To.Class = 0
-}
-
-var unaryDst = map[obj.As]bool{
-	AWORD:  true,
-	ADWORD: true,
-	ABL:    true,
-	AB:     true,
-	ASVC:   true,
-}
-
-var Linkarm64 = obj.LinkArch{
-	Arch:       sys.ArchARM64,
-	Preprocess: preprocess,
-	Assemble:   span7,
-	Follow:     follow,
-	Progedit:   progedit,
-	UnaryDst:   unaryDst,
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/data.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/data.go
deleted file mode 100644
index e82b718..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/data.go
+++ /dev/null
@@ -1,209 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/data.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/data.go:1
-// Derived from Inferno utils/6l/obj.c and utils/6l/span.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6l/obj.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6l/span.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package obj
-
-import (
-	"log"
-	"math"
-)
-
-// Grow increases the length of s.P to lsiz.
-func (s *LSym) Grow(lsiz int64) {
-	siz := int(lsiz)
-	if int64(siz) != lsiz {
-		log.Fatalf("LSym.Grow size %d too long", lsiz)
-	}
-	if len(s.P) >= siz {
-		return
-	}
-	// TODO(dfc) append cap-len at once, rather than
-	// one byte at a time.
-	for cap(s.P) < siz {
-		s.P = append(s.P[:cap(s.P)], 0)
-	}
-	s.P = s.P[:siz]
-}
-
-// GrowCap increases the capacity of s.P to c.
-func (s *LSym) GrowCap(c int64) {
-	if int64(cap(s.P)) >= c {
-		return
-	}
-	if s.P == nil {
-		s.P = make([]byte, 0, c)
-		return
-	}
-	b := make([]byte, len(s.P), c)
-	copy(b, s.P)
-	s.P = b
-}
-
-// prepwrite prepares to write data of size siz into s at offset off.
-func (s *LSym) prepwrite(ctxt *Link, off int64, siz int) {
-	if off < 0 || siz < 0 || off >= 1<<30 {
-		log.Fatalf("prepwrite: bad off=%d siz=%d", off, siz)
-	}
-	if s.Type == SBSS || s.Type == STLSBSS {
-		ctxt.Diag("cannot supply data for BSS var")
-	}
-	l := off + int64(siz)
-	s.Grow(l)
-	if l > s.Size {
-		s.Size = l
-	}
-}
-
-// WriteFloat32 writes f into s at offset off.
-func (s *LSym) WriteFloat32(ctxt *Link, off int64, f float32) {
-	s.prepwrite(ctxt, off, 4)
-	ctxt.Arch.ByteOrder.PutUint32(s.P[off:], math.Float32bits(f))
-}
-
-// WriteFloat64 writes f into s at offset off.
-func (s *LSym) WriteFloat64(ctxt *Link, off int64, f float64) {
-	s.prepwrite(ctxt, off, 8)
-	ctxt.Arch.ByteOrder.PutUint64(s.P[off:], math.Float64bits(f))
-}
-
-// WriteInt writes an integer i of size siz into s at offset off.
-func (s *LSym) WriteInt(ctxt *Link, off int64, siz int, i int64) {
-	s.prepwrite(ctxt, off, siz)
-	switch siz {
-	default:
-		ctxt.Diag("WriteInt: bad integer size: %d", siz)
-	case 1:
-		s.P[off] = byte(i)
-	case 2:
-		ctxt.Arch.ByteOrder.PutUint16(s.P[off:], uint16(i))
-	case 4:
-		ctxt.Arch.ByteOrder.PutUint32(s.P[off:], uint32(i))
-	case 8:
-		ctxt.Arch.ByteOrder.PutUint64(s.P[off:], uint64(i))
-	}
-}
-
-// WriteAddr writes an address of size siz into s at offset off.
-// rsym and roff specify the relocation for the address.
-func (s *LSym) WriteAddr(ctxt *Link, off int64, siz int, rsym *LSym, roff int64) {
-	if siz != ctxt.Arch.PtrSize {
-		ctxt.Diag("WriteAddr: bad address size %d in %s", siz, s.Name)
-	}
-	s.prepwrite(ctxt, off, siz)
-	r := Addrel(s)
-	r.Off = int32(off)
-	if int64(r.Off) != off {
-		ctxt.Diag("WriteAddr: off overflow %d in %s", off, s.Name)
-	}
-	r.Siz = uint8(siz)
-	r.Sym = rsym
-	r.Type = R_ADDR
-	r.Add = roff
-}
-
-// WriteOff writes a 4 byte offset to rsym+roff into s at offset off.
-// After linking the 4 bytes stored at s+off will be
-// rsym+roff-(start of section that s is in).
-func (s *LSym) WriteOff(ctxt *Link, off int64, rsym *LSym, roff int64) {
-	s.prepwrite(ctxt, off, 4)
-	r := Addrel(s)
-	r.Off = int32(off)
-	if int64(r.Off) != off {
-		ctxt.Diag("WriteOff: off overflow %d in %s", off, s.Name)
-	}
-	r.Siz = 4
-	r.Sym = rsym
-	r.Type = R_ADDROFF
-	r.Add = roff
-}
-
-// WriteWeakOff writes a weak 4 byte offset to rsym+roff into s at offset off.
-// After linking the 4 bytes stored at s+off will be
-// rsym+roff-(start of section that s is in).
-func (s *LSym) WriteWeakOff(ctxt *Link, off int64, rsym *LSym, roff int64) {
-	s.prepwrite(ctxt, off, 4)
-	r := Addrel(s)
-	r.Off = int32(off)
-	if int64(r.Off) != off {
-		ctxt.Diag("WriteOff: off overflow %d in %s", off, s.Name)
-	}
-	r.Siz = 4
-	r.Sym = rsym
-	r.Type = R_WEAKADDROFF
-	r.Add = roff
-}
-
-// WriteString writes a string of size siz into s at offset off.
-func (s *LSym) WriteString(ctxt *Link, off int64, siz int, str string) {
-	if siz < len(str) {
-		ctxt.Diag("WriteString: bad string size: %d < %d", siz, len(str))
-	}
-	s.prepwrite(ctxt, off, siz)
-	copy(s.P[off:off+int64(siz)], str)
-}
-
-// WriteBytes writes a slice of bytes into s at offset off.
-func (s *LSym) WriteBytes(ctxt *Link, off int64, b []byte) int64 {
-	s.prepwrite(ctxt, off, len(b))
-	copy(s.P[off:], b)
-	return off + int64(len(b))
-}
-
-func Addrel(s *LSym) *Reloc {
-	s.R = append(s.R, Reloc{})
-	return &s.R[len(s.R)-1]
-}
-
-func Setuintxx(ctxt *Link, s *LSym, off int64, v uint64, wid int64) int64 {
-	if s.Type == 0 {
-		s.Type = SDATA
-	}
-	if s.Size < off+wid {
-		s.Size = off + wid
-		s.Grow(s.Size)
-	}
-
-	switch wid {
-	case 1:
-		s.P[off] = uint8(v)
-	case 2:
-		ctxt.Arch.ByteOrder.PutUint16(s.P[off:], uint16(v))
-	case 4:
-		ctxt.Arch.ByteOrder.PutUint32(s.P[off:], uint32(v))
-	case 8:
-		ctxt.Arch.ByteOrder.PutUint64(s.P[off:], v)
-	}
-
-	return off + wid
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/flag.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/flag.go
deleted file mode 100644
index 1ce514b..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/flag.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/flag.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/flag.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package obj
-
-import (
-	"flag"
-	"fmt"
-	"os"
-	"strconv"
-)
-
-func Flagfn2(string, string, func(string, string)) { panic("flag") }
-
-func Flagcount(name, usage string, val *int) {
-	flag.Var((*count)(val), name, usage)
-}
-
-func Flagint32(name, usage string, val *int32) {
-	flag.Var((*int32Value)(val), name, usage)
-}
-
-func Flagint64(name, usage string, val *int64) {
-	flag.Int64Var(val, name, *val, usage)
-}
-
-func Flagstr(name, usage string, val *string) {
-	flag.StringVar(val, name, *val, usage)
-}
-
-func Flagfn0(name, usage string, f func()) {
-	flag.Var(fn0(f), name, usage)
-}
-
-func Flagfn1(name, usage string, f func(string)) {
-	flag.Var(fn1(f), name, usage)
-}
-
-func Flagprint(fd int) {
-	if fd == 1 {
-		flag.CommandLine.SetOutput(os.Stdout)
-	}
-	flag.PrintDefaults()
-}
-
-func Flagparse(usage func()) {
-	flag.Usage = usage
-	flag.Parse()
-}
-
-// count is a flag.Value that is like a flag.Bool and a flag.Int.
-// If used as -name, it increments the count, but -name=x sets the count.
-// Used for verbose flag -v.
-type count int
-
-func (c *count) String() string {
-	return fmt.Sprint(int(*c))
-}
-
-func (c *count) Set(s string) error {
-	switch s {
-	case "true":
-		*c++
-	case "false":
-		*c = 0
-	default:
-		n, err := strconv.Atoi(s)
-		if err != nil {
-			return fmt.Errorf("invalid count %q", s)
-		}
-		*c = count(n)
-	}
-	return nil
-}
-
-func (c *count) IsBoolFlag() bool {
-	return true
-}
-
-type int32Value int32
-
-func (i *int32Value) Set(s string) error {
-	v, err := strconv.ParseInt(s, 0, 64)
-	*i = int32Value(v)
-	return err
-}
-
-func (i *int32Value) Get() interface{} { return int32(*i) }
-
-func (i *int32Value) String() string { return fmt.Sprint(*i) }
-
-type fn0 func()
-
-func (f fn0) Set(s string) error {
-	f()
-	return nil
-}
-
-func (f fn0) Get() interface{} { return nil }
-
-func (f fn0) String() string { return "" }
-
-func (f fn0) IsBoolFlag() bool {
-	return true
-}
-
-type fn1 func(string)
-
-func (f fn1) Set(s string) error {
-	f(s)
-	return nil
-}
-
-func (f fn1) String() string { return "" }
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/funcdata.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/funcdata.go
deleted file mode 100644
index 2ce1bf1..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/funcdata.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/funcdata.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/funcdata.go:1
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package obj
-
-// This file defines the IDs for PCDATA and FUNCDATA instructions
-// in Go binaries. It is included by assembly sources, so it must
-// be written using #defines.
-//
-// The Go compiler also #includes this file, for now.
-//
-// symtab.go also contains a copy of these constants.
-
-// Pseudo-assembly statements.
-
-// GO_ARGS, GO_RESULTS_INITIALIZED, and NO_LOCAL_POINTERS are macros
-// that communicate to the runtime information about the location and liveness
-// of pointers in an assembly function's arguments, results, and stack frame.
-// This communication is only required in assembly functions that make calls
-// to other functions that might be preempted or grow the stack.
-// NOSPLIT functions that make no calls do not need to use these macros.
-
-// GO_ARGS indicates that the Go prototype for this assembly function
-// defines the pointer map for the function's arguments.
-// GO_ARGS should be the first instruction in a function that uses it.
-// It can be omitted if there are no arguments at all.
-// GO_ARGS is inserted implicitly by the linker for any function
-// that also has a Go prototype and therefore is usually not necessary
-// to write explicitly.
-
-// GO_RESULTS_INITIALIZED indicates that the assembly function
-// has initialized the stack space for its results and that those results
-// should be considered live for the remainder of the function.
-
-// NO_LOCAL_POINTERS indicates that the assembly function stores
-// no pointers to heap objects in its local stack variables.
-
-// ArgsSizeUnknown is set in Func.argsize to mark all functions
-// whose argument size is unknown (C vararg functions, and
-// assembly code without an explicit specification).
-// This value is generated by the compiler, assembler, or linker.
-const (
-	PCDATA_StackMapIndex       = 0
-	FUNCDATA_ArgsPointerMaps   = 0
-	FUNCDATA_LocalsPointerMaps = 1
-	ArgsSizeUnknown            = -0x80000000
-)
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/go.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/go.go
deleted file mode 100644
index d30f196..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/go.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/go.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/go.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package obj
-
-import (
-	"fmt"
-	"os"
-	"strings"
-)
-
-// go-specific code shared across loaders (5l, 6l, 8l).
-
-var (
-	framepointer_enabled     int
-	Fieldtrack_enabled       int
-	Preemptibleloops_enabled int
-)
-
-// Toolchain experiments.
-// These are controlled by the GOEXPERIMENT environment
-// variable recorded when the toolchain is built.
-// This list is also known to cmd/gc.
-var exper = []struct {
-	name string
-	val  *int
-}{
-	{"fieldtrack", &Fieldtrack_enabled},
-	{"framepointer", &framepointer_enabled},
-	{"preemptibleloops", &Preemptibleloops_enabled},
-}
-
-func addexp(s string) {
-	// Could do general integer parsing here, but the runtime copy doesn't yet.
-	v := 1
-	name := s
-	if len(name) > 2 && name[:2] == "no" {
-		v = 0
-		name = name[2:]
-	}
-	for i := 0; i < len(exper); i++ {
-		if exper[i].name == name {
-			if exper[i].val != nil {
-				*exper[i].val = v
-			}
-			return
-		}
-	}
-
-	fmt.Printf("unknown experiment %s\n", s)
-	os.Exit(2)
-}
-
-func init() {
-	framepointer_enabled = 1 // default
-	for _, f := range strings.Split(goexperiment, ",") {
-		if f != "" {
-			addexp(f)
-		}
-	}
-}
-
-func Framepointer_enabled(goos, goarch string) bool {
-	return framepointer_enabled != 0 && goarch == "amd64" && goos != "nacl"
-}
-
-func Nopout(p *Prog) {
-	p.As = ANOP
-	p.Scond = 0
-	p.From = Addr{}
-	p.From3 = nil
-	p.Reg = 0
-	p.To = Addr{}
-}
-
-func Expstring() string {
-	buf := "X"
-	for i := range exper {
-		if *exper[i].val != 0 {
-			buf += "," + exper[i].name
-		}
-	}
-	if buf == "X" {
-		buf += ",none"
-	}
-	return "X:" + buf[2:]
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/ld.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/ld.go
deleted file mode 100644
index 7cc0390..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/ld.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/ld.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/ld.go:1
-// Derived from Inferno utils/6l/obj.c and utils/6l/span.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6l/obj.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6l/span.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package obj
-
-/*
- * add library to library list.
- *	srcref: src file referring to package
- *	objref: object file referring to package
- *	file: object file, e.g., /home/rsc/go/pkg/container/vector.a
- *	pkg: package import path, e.g. container/vector
- */
-
-const (
-	LOG = 5
-)
-
-func mkfwd(sym *LSym) {
-	var dwn [LOG]int32
-	var cnt [LOG]int32
-	var lst [LOG]*Prog
-
-	for i := 0; i < LOG; i++ {
-		if i == 0 {
-			cnt[i] = 1
-		} else {
-			cnt[i] = LOG * cnt[i-1]
-		}
-		dwn[i] = 1
-		lst[i] = nil
-	}
-
-	i := 0
-	for p := sym.Text; p != nil && p.Link != nil; p = p.Link {
-		i--
-		if i < 0 {
-			i = LOG - 1
-		}
-		p.Forwd = nil
-		dwn[i]--
-		if dwn[i] <= 0 {
-			dwn[i] = cnt[i]
-			if lst[i] != nil {
-				lst[i].Forwd = p
-			}
-			lst[i] = p
-		}
-	}
-}
-
-func Copyp(ctxt *Link, q *Prog) *Prog {
-	p := ctxt.NewProg()
-	*p = *q
-	return p
-}
-
-func Appendp(ctxt *Link, q *Prog) *Prog {
-	p := ctxt.NewProg()
-	p.Link = q.Link
-	q.Link = p
-	p.Lineno = q.Lineno
-	p.Mode = q.Mode
-	return p
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/line.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/line.go
deleted file mode 100644
index 62bb0fa..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/line.go
+++ /dev/null
@@ -1,309 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/line.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/line.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package obj
-
-import (
-	"fmt"
-	"path/filepath"
-	"sort"
-	"strings"
-)
-
-// A LineHist records the history of the file input stack, which maps the virtual line number,
-// an incrementing count of lines processed in any input file and typically named lineno,
-// to a stack of file:line pairs showing the path of inclusions that led to that position.
-// The first line directive (//line in Go, #line in assembly) is treated as pushing
-// a new entry on the stack, so that errors can report both the actual and translated
-// line number.
-//
-// In typical use, the virtual lineno begins at 1, and file line numbers also begin at 1,
-// but the only requirements placed upon the numbers by this code are:
-//	- calls to Push, Update, and Pop must be monotonically increasing in lineno
-//	- except as specified by those methods, virtual and file line number increase
-//	  together, so that given (only) calls Push(10, "x.go", 1) and Pop(15),
-//	  virtual line 12 corresponds to x.go line 3.
-type LineHist struct {
-	Top               *LineStack  // current top of stack
-	Ranges            []LineRange // ranges for lookup
-	Dir               string      // directory to qualify relative paths
-	TrimPathPrefix    string      // remove leading TrimPath from recorded file names
-	PrintFilenameOnly bool        // ignore path when pretty-printing a line; internal use only
-	GOROOT            string      // current GOROOT
-}
-
-// A LineStack is an entry in the recorded line history.
-// Although the history at any given line number is a stack,
-// the record for all line processed forms a tree, with common
-// stack prefixes acting as parents.
-type LineStack struct {
-	Parent    *LineStack // parent in inclusion stack
-	Lineno    int        // virtual line number where this entry takes effect
-	File      string     // file name used to open source file, for error messages
-	AbsFile   string     // absolute file name, for pcln tables
-	FileLine  int        // line number in file at Lineno
-	Directive bool
-	Sym       *LSym // for linkgetline - TODO(rsc): remove
-}
-
-func (stk *LineStack) fileLineAt(lineno int) int {
-	return stk.FileLine + lineno - stk.Lineno
-}
-
-// The span of valid linenos in the recorded line history can be broken
-// into a set of ranges, each with a particular stack.
-// A LineRange records one such range.
-type LineRange struct {
-	Start int        // starting lineno
-	Stack *LineStack // top of stack for this range
-}
-
-// startRange starts a new range with the given top of stack.
-func (h *LineHist) startRange(lineno int, top *LineStack) {
-	h.Top = top
-	h.Ranges = append(h.Ranges, LineRange{top.Lineno, top})
-}
-
-// setFile sets stk.File = file and also derives stk.AbsFile.
-func (h *LineHist) setFile(stk *LineStack, file string) {
-	// Note: The exclusion of stk.Directive may be wrong but matches what we've done before.
-	// The check for < avoids putting a path prefix on "<autogenerated>".
-	abs := file
-	if h.Dir != "" && !filepath.IsAbs(file) && !strings.HasPrefix(file, "<") && !stk.Directive {
-		abs = filepath.Join(h.Dir, file)
-	}
-
-	// Remove leading TrimPathPrefix, or else rewrite $GOROOT to literal $GOROOT.
-	if h.TrimPathPrefix != "" && hasPathPrefix(abs, h.TrimPathPrefix) {
-		if abs == h.TrimPathPrefix {
-			abs = ""
-		} else {
-			abs = abs[len(h.TrimPathPrefix)+1:]
-		}
-	} else if hasPathPrefix(abs, h.GOROOT) {
-		abs = "$GOROOT" + abs[len(h.GOROOT):]
-	}
-	if abs == "" {
-		abs = "??"
-	}
-	abs = filepath.Clean(abs)
-	stk.AbsFile = abs
-
-	if file == "" {
-		file = "??"
-	}
-	stk.File = file
-}
-
-// Does s have t as a path prefix?
-// That is, does s == t or does s begin with t followed by a slash?
-// For portability, we allow ASCII case folding, so that hasPathPrefix("a/b/c", "A/B") is true.
-// Similarly, we allow slash folding, so that hasPathPrefix("a/b/c", "a\\b") is true.
-// We do not allow full Unicode case folding, for fear of causing more confusion
-// or harm than good. (For an example of the kinds of things that can go wrong,
-// see http://article.gmane.org/gmane.linux.kernel/1853266.)
-func hasPathPrefix(s string, t string) bool {
-	if len(t) > len(s) {
-		return false
-	}
-	var i int
-	for i = 0; i < len(t); i++ {
-		cs := int(s[i])
-		ct := int(t[i])
-		if 'A' <= cs && cs <= 'Z' {
-			cs += 'a' - 'A'
-		}
-		if 'A' <= ct && ct <= 'Z' {
-			ct += 'a' - 'A'
-		}
-		if cs == '\\' {
-			cs = '/'
-		}
-		if ct == '\\' {
-			ct = '/'
-		}
-		if cs != ct {
-			return false
-		}
-	}
-	return i >= len(s) || s[i] == '/' || s[i] == '\\'
-}
-
-// Push records that at that lineno a new file with the given name was pushed onto the input stack.
-func (h *LineHist) Push(lineno int, file string) {
-	stk := &LineStack{
-		Parent:   h.Top,
-		Lineno:   lineno,
-		FileLine: 1,
-	}
-	h.setFile(stk, file)
-	h.startRange(lineno, stk)
-}
-
-// Pop records that at lineno the current file was popped from the input stack.
-func (h *LineHist) Pop(lineno int) {
-	top := h.Top
-	if top == nil {
-		return
-	}
-	if top.Directive && top.Parent != nil { // pop #line level too
-		top = top.Parent
-	}
-	next := top.Parent
-	if next == nil {
-		h.Top = nil
-		h.Ranges = append(h.Ranges, LineRange{lineno, nil})
-		return
-	}
-
-	// Popping included file. Update parent offset to account for
-	// the virtual line number range taken by the included file.
-	// Cannot modify the LineStack directly, or else lookups
-	// for the earlier line numbers will get the wrong answers,
-	// so make a new one.
-	stk := new(LineStack)
-	*stk = *next
-	stk.Lineno = lineno
-	stk.FileLine = next.fileLineAt(top.Lineno)
-	h.startRange(lineno, stk)
-}
-
-// Update records that at lineno the file name and line number were changed using
-// a line directive (//line in Go, #line in assembly).
-func (h *LineHist) Update(lineno int, file string, line int) {
-	top := h.Top
-	if top == nil {
-		return // shouldn't happen
-	}
-	var stk *LineStack
-	if top.Directive {
-		// Update existing entry, except make copy to avoid changing earlier history.
-		stk = new(LineStack)
-		*stk = *top
-	} else {
-		// Push new entry.
-		stk = &LineStack{
-			Parent:    top,
-			Directive: true,
-		}
-	}
-	stk.Lineno = lineno
-	if stk.File != file {
-		h.setFile(stk, file) // only retain string if needed
-	}
-	stk.FileLine = line
-	h.startRange(lineno, stk)
-}
-
-// AddImport adds a package to the list of imported packages.
-func (ctxt *Link) AddImport(pkg string) {
-	ctxt.Imports = append(ctxt.Imports, pkg)
-}
-
-// At returns the input stack in effect at lineno.
-func (h *LineHist) At(lineno int) *LineStack {
-	i := sort.Search(len(h.Ranges), func(i int) bool {
-		return h.Ranges[i].Start > lineno
-	})
-	// Found first entry beyond lineno.
-	if i == 0 {
-		return nil
-	}
-	return h.Ranges[i-1].Stack
-}
-
-// LineString returns a string giving the file and line number
-// corresponding to lineno, for use in error messages.
-func (h *LineHist) LineString(lineno int) string {
-	stk := h.At(lineno)
-	if stk == nil {
-		return "<unknown line number>"
-	}
-
-	filename := stk.File
-	if h.PrintFilenameOnly {
-		filename = filepath.Base(filename)
-	}
-	text := fmt.Sprintf("%s:%d", filename, stk.fileLineAt(lineno))
-	if stk.Directive && stk.Parent != nil {
-		stk = stk.Parent
-		filename = stk.File
-		if h.PrintFilenameOnly {
-			filename = filepath.Base(filename)
-		}
-		text += fmt.Sprintf("[%s:%d]", filename, stk.fileLineAt(lineno))
-	}
-	const showFullStack = false // was used by old C compilers
-	if showFullStack {
-		for stk.Parent != nil {
-			lineno = stk.Lineno - 1
-			stk = stk.Parent
-			text += fmt.Sprintf(" %s:%d", filename, stk.fileLineAt(lineno))
-			if stk.Directive && stk.Parent != nil {
-				stk = stk.Parent
-				text += fmt.Sprintf("[%s:%d]", filename, stk.fileLineAt(lineno))
-			}
-		}
-	}
-	return text
-}
-
-// FileLine returns the file name and line number
-// at the top of the stack for the given lineno.
-func (h *LineHist) FileLine(lineno int) (file string, line int) {
-	stk := h.At(lineno)
-	if stk == nil {
-		return "??", 0
-	}
-	return stk.File, stk.fileLineAt(lineno)
-}
-
-// AbsFileLine returns the absolute file name and line number
-// at the top of the stack for the given lineno.
-func (h *LineHist) AbsFileLine(lineno int) (file string, line int) {
-	stk := h.At(lineno)
-	if stk == nil {
-		return "??", 0
-	}
-	return stk.AbsFile, stk.fileLineAt(lineno)
-}
-
-// This is a simplified copy of linklinefmt above.
-// It doesn't allow printing the full stack, and it returns the file name and line number separately.
-// TODO: Unify with linklinefmt somehow.
-func linkgetline(ctxt *Link, lineno int32) (f *LSym, l int32) {
-	stk := ctxt.LineHist.At(int(lineno))
-	if stk == nil || stk.AbsFile == "" {
-		return Linklookup(ctxt, "??", HistVersion), 0
-	}
-	if stk.Sym == nil {
-		stk.Sym = Linklookup(ctxt, stk.AbsFile, HistVersion)
-	}
-	return stk.Sym, int32(stk.fileLineAt(int(lineno)))
-}
-
-func Linkprfile(ctxt *Link, line int) {
-	fmt.Printf("%s ", ctxt.LineHist.LineString(line))
-}
-
-func fieldtrack(ctxt *Link, cursym *LSym) {
-	p := cursym.Text
-	if p == nil || p.Link == nil { // handle external functions and ELF section symbols
-		return
-	}
-	ctxt.Cursym = cursym
-
-	for ; p != nil; p = p.Link {
-		if p.As == AUSEFIELD {
-			r := Addrel(ctxt.Cursym)
-			r.Off = 0
-			r.Siz = 0
-			r.Sym = p.From.Sym
-			r.Type = R_USEFIELD
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/line_test.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/line_test.go
deleted file mode 100644
index 4d0f634..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/line_test.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/line_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/line_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package obj
-
-import (
-	"fmt"
-	"testing"
-)
-
-func TestLineHist(t *testing.T) {
-	ctxt := new(Link)
-	ctxt.Hash = make(map[SymVer]*LSym)
-
-	ctxt.LineHist.Push(1, "a.c")
-	ctxt.LineHist.Push(3, "a.h")
-	ctxt.LineHist.Pop(5)
-	ctxt.LineHist.Update(7, "linedir", 2)
-	ctxt.LineHist.Pop(9)
-	ctxt.LineHist.Push(11, "b.c")
-	ctxt.LineHist.Pop(13)
-
-	var expect = []string{
-		0:  "??:0",
-		1:  "a.c:1",
-		2:  "a.c:2",
-		3:  "a.h:1",
-		4:  "a.h:2",
-		5:  "a.c:3",
-		6:  "a.c:4",
-		7:  "linedir:2",
-		8:  "linedir:3",
-		9:  "??:0",
-		10: "??:0",
-		11: "b.c:1",
-		12: "b.c:2",
-		13: "??:0",
-		14: "??:0",
-	}
-
-	for i, want := range expect {
-		f, l := linkgetline(ctxt, int32(i))
-		have := fmt.Sprintf("%s:%d", f.Name, l)
-		if have != want {
-			t.Errorf("linkgetline(%d) = %q, want %q", i, have, want)
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/link.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/link.go
deleted file mode 100644
index ed0b358..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/link.go
+++ /dev/null
@@ -1,981 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/link.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/link.go:1
-// Derived from Inferno utils/6l/l.h and related files.
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6l/l.h
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package obj
-
-import (
-	"bufio"
-	"bootstrap/cmd/internal/sys"
-	"fmt"
-)
-
-// An Addr is an argument to an instruction.
-// The general forms and their encodings are:
-//
-//	sym±offset(symkind)(reg)(index*scale)
-//		Memory reference at address &sym(symkind) + offset + reg + index*scale.
-//		Any of sym(symkind), ±offset, (reg), (index*scale), and *scale can be omitted.
-//		If (reg) and *scale are both omitted, the resulting expression (index) is parsed as (reg).
-//		To force a parsing as index*scale, write (index*1).
-//		Encoding:
-//			type = TYPE_MEM
-//			name = symkind (NAME_AUTO, ...) or 0 (NAME_NONE)
-//			sym = sym
-//			offset = ±offset
-//			reg = reg (REG_*)
-//			index = index (REG_*)
-//			scale = scale (1, 2, 4, 8)
-//
-//	$<mem>
-//		Effective address of memory reference <mem>, defined above.
-//		Encoding: same as memory reference, but type = TYPE_ADDR.
-//
-//	$<±integer value>
-//		This is a special case of $<mem>, in which only ±offset is present.
-//		It has a separate type for easy recognition.
-//		Encoding:
-//			type = TYPE_CONST
-//			offset = ±integer value
-//
-//	*<mem>
-//		Indirect reference through memory reference <mem>, defined above.
-//		Only used on x86 for CALL/JMP *sym(SB), which calls/jumps to a function
-//		pointer stored in the data word sym(SB), not a function named sym(SB).
-//		Encoding: same as above, but type = TYPE_INDIR.
-//
-//	$*$<mem>
-//		No longer used.
-//		On machines with actual SB registers, $*$<mem> forced the
-//		instruction encoding to use a full 32-bit constant, never a
-//		reference relative to SB.
-//
-//	$<floating point literal>
-//		Floating point constant value.
-//		Encoding:
-//			type = TYPE_FCONST
-//			val = floating point value
-//
-//	$<string literal, up to 8 chars>
-//		String literal value (raw bytes used for DATA instruction).
-//		Encoding:
-//			type = TYPE_SCONST
-//			val = string
-//
-//	<register name>
-//		Any register: integer, floating point, control, segment, and so on.
-//		If looking for specific register kind, must check type and reg value range.
-//		Encoding:
-//			type = TYPE_REG
-//			reg = reg (REG_*)
-//
-//	x(PC)
-//		Encoding:
-//			type = TYPE_BRANCH
-//			val = Prog* reference OR ELSE offset = target pc (branch takes priority)
-//
-//	$±x-±y
-//		Final argument to TEXT, specifying local frame size x and argument size y.
-//		In this form, x and y are integer literals only, not arbitrary expressions.
-//		This avoids parsing ambiguities due to the use of - as a separator.
-//		The ± are optional.
-//		If the final argument to TEXT omits the -±y, the encoding should still
-//		use TYPE_TEXTSIZE (not TYPE_CONST), with u.argsize = ArgsSizeUnknown.
-//		Encoding:
-//			type = TYPE_TEXTSIZE
-//			offset = x
-//			val = int32(y)
-//
-//	reg<<shift, reg>>shift, reg->shift, reg@>shift
-//		Shifted register value, for ARM and ARM64.
-//		In this form, reg must be a register and shift can be a register or an integer constant.
-//		Encoding:
-//			type = TYPE_SHIFT
-//		On ARM:
-//			offset = (reg&15) | shifttype<<5 | count
-//			shifttype = 0, 1, 2, 3 for <<, >>, ->, @>
-//			count = (reg&15)<<8 | 1<<4 for a register shift count, (n&31)<<7 for an integer constant.
-//		On ARM64:
-//			offset = (reg&31)<<16 | shifttype<<22 | (count&63)<<10
-//			shifttype = 0, 1, 2 for <<, >>, ->
-//
-//	(reg, reg)
-//		A destination register pair. When used as the last argument of an instruction,
-//		this form makes clear that both registers are destinations.
-//		Encoding:
-//			type = TYPE_REGREG
-//			reg = first register
-//			offset = second register
-//
-//	[reg, reg, reg-reg]
-//		Register list for ARM.
-//		Encoding:
-//			type = TYPE_REGLIST
-//			offset = bit mask of registers in list; R0 is low bit.
-//
-//	reg, reg
-//		Register pair for ARM.
-//		TYPE_REGREG2
-//
-//	(reg+reg)
-//		Register pair for PPC64.
-//		Encoding:
-//			type = TYPE_MEM
-//			reg = first register
-//			index = second register
-//			scale = 1
-//
-type Addr struct {
-	Reg    int16
-	Index  int16
-	Scale  int16 // Sometimes holds a register.
-	Type   AddrType
-	Name   int8
-	Class  int8
-	Offset int64
-	Sym    *LSym
-
-	// argument value:
-	//	for TYPE_SCONST, a string
-	//	for TYPE_FCONST, a float64
-	//	for TYPE_BRANCH, a *Prog (optional)
-	//	for TYPE_TEXTSIZE, an int32 (optional)
-	Val interface{}
-
-	Node interface{} // for use by compiler
-}
-
-type AddrType uint8
-
-const (
-	NAME_NONE = 0 + iota
-	NAME_EXTERN
-	NAME_STATIC
-	NAME_AUTO
-	NAME_PARAM
-	// A reference to name@GOT(SB) is a reference to the entry in the global offset
-	// table for 'name'.
-	NAME_GOTREF
-)
-
-const (
-	TYPE_NONE AddrType = 0
-
-	TYPE_BRANCH AddrType = 5 + iota
-	TYPE_TEXTSIZE
-	TYPE_MEM
-	TYPE_CONST
-	TYPE_FCONST
-	TYPE_SCONST
-	TYPE_REG
-	TYPE_ADDR
-	TYPE_SHIFT
-	TYPE_REGREG
-	TYPE_REGREG2
-	TYPE_INDIR
-	TYPE_REGLIST
-)
-
-// Prog describes a single machine instruction.
-//
-// The general instruction form is:
-//
-//	As.Scond From, Reg, From3, To, RegTo2
-//
-// where As is an opcode and the others are arguments:
-// From, Reg, From3 are sources, and To, RegTo2 are destinations.
-// Usually, not all arguments are present.
-// For example, MOVL R1, R2 encodes using only As=MOVL, From=R1, To=R2.
-// The Scond field holds additional condition bits for systems (like arm)
-// that have generalized conditional execution.
-//
-// Jump instructions use the Pcond field to point to the target instruction,
-// which must be in the same linked list as the jump instruction.
-//
-// The Progs for a given function are arranged in a list linked through the Link field.
-//
-// Each Prog is charged to a specific source line in the debug information,
-// specified by Lineno, an index into the line history (see LineHist).
-// Every Prog has a Ctxt field that defines various context, including the current LineHist.
-// Progs should be allocated using ctxt.NewProg(), not new(Prog).
-//
-// The other fields not yet mentioned are for use by the back ends and should
-// be left zeroed by creators of Prog lists.
-type Prog struct {
-	Ctxt   *Link       // linker context
-	Link   *Prog       // next Prog in linked list
-	From   Addr        // first source operand
-	From3  *Addr       // third source operand (second is Reg below)
-	To     Addr        // destination operand (second is RegTo2 below)
-	Pcond  *Prog       // target of conditional jump
-	Opt    interface{} // available to optimization passes to hold per-Prog state
-	Forwd  *Prog       // for x86 back end
-	Rel    *Prog       // for x86, arm back ends
-	Pc     int64       // for back ends or assembler: virtual or actual program counter, depending on phase
-	Lineno int32       // line number of this instruction
-	Spadj  int32       // effect of instruction on stack pointer (increment or decrement amount)
-	As     As          // assembler opcode
-	Reg    int16       // 2nd source operand
-	RegTo2 int16       // 2nd destination operand
-	Mark   uint16      // bitmask of arch-specific items
-	Optab  uint16      // arch-specific opcode index
-	Scond  uint8       // condition bits for conditional instruction (e.g., on ARM)
-	Back   uint8       // for x86 back end: backwards branch state
-	Ft     uint8       // for x86 back end: type index of Prog.From
-	Tt     uint8       // for x86 back end: type index of Prog.To
-	Isize  uint8       // for x86 back end: size of the instruction in bytes
-	Mode   int8        // for x86 back end: 32- or 64-bit mode
-}
-
-// From3Type returns From3.Type, or TYPE_NONE when From3 is nil.
-func (p *Prog) From3Type() AddrType {
-	if p.From3 == nil {
-		return TYPE_NONE
-	}
-	return p.From3.Type
-}
-
-// From3Offset returns From3.Offset, or 0 when From3 is nil.
-func (p *Prog) From3Offset() int64 {
-	if p.From3 == nil {
-		return 0
-	}
-	return p.From3.Offset
-}
-
-// An As denotes an assembler opcode.
-// There are some portable opcodes, declared here in package obj,
-// that are common to all architectures.
-// However, the majority of opcodes are arch-specific
-// and are declared in their respective architecture's subpackage.
-type As int16
-
-// These are the portable opcodes.
-const (
-	AXXX As = iota
-	ACALL
-	ADUFFCOPY
-	ADUFFZERO
-	AEND
-	AFUNCDATA
-	AJMP
-	ANOP
-	APCDATA
-	ARET
-	ATEXT
-	ATYPE
-	AUNDEF
-	AUSEFIELD
-	AVARDEF
-	AVARKILL
-	AVARLIVE
-	A_ARCHSPECIFIC
-)
-
-// Each architecture is allotted a distinct subspace of opcode values
-// for declaring its arch-specific opcodes.
-// Within this subspace, the first arch-specific opcode should be
-// at offset A_ARCHSPECIFIC.
-//
-// Subspaces are aligned to a power of two so opcodes can be masked
-// with AMask and used as compact array indices.
-const (
-	ABase386 = (1 + iota) << 10
-	ABaseARM
-	ABaseAMD64
-	ABasePPC64
-	ABaseARM64
-	ABaseMIPS
-	ABaseS390X
-
-	AllowedOpCodes = 1 << 10            // The number of opcodes available for any given architecture.
-	AMask          = AllowedOpCodes - 1 // AND with this to use the opcode as an array index.
-)
-
-// An LSym is the sort of symbol that is written to an object file.
-type LSym struct {
-	Name    string
-	Type    SymKind
-	Version int16
-	Attribute
-
-	RefIdx int // Index of this symbol in the symbol reference list.
-	Args   int32
-	Locals int32
-	Size   int64
-	Gotype *LSym
-	Autom  *Auto
-	Text   *Prog
-	Pcln   *Pcln
-	P      []byte
-	R      []Reloc
-}
-
-// Attribute is a set of symbol attributes.
-type Attribute int16
-
-const (
-	AttrDuplicateOK Attribute = 1 << iota
-	AttrCFunc
-	AttrNoSplit
-	AttrLeaf
-	AttrSeenGlobl
-	AttrOnList
-
-	// MakeTypelink means that the type should have an entry in the typelink table.
-	AttrMakeTypelink
-
-	// ReflectMethod means the function may call reflect.Type.Method or
-	// reflect.Type.MethodByName. Matching is imprecise (as reflect.Type
-	// can be used through a custom interface), so ReflectMethod may be
-	// set in some cases when the reflect package is not called.
-	//
-	// Used by the linker to determine what methods can be pruned.
-	AttrReflectMethod
-
-	// Local means make the symbol local even when compiling Go code to reference Go
-	// symbols in other shared libraries, as in this mode symbols are global by
-	// default. "local" here means in the sense of the dynamic linker, i.e. not
-	// visible outside of the module (shared library or executable) that contains its
-	// definition. (When not compiling to support Go shared libraries, all symbols are
-	// local in this sense unless there is a cgo_export_* directive).
-	AttrLocal
-)
-
-func (a Attribute) DuplicateOK() bool   { return a&AttrDuplicateOK != 0 }
-func (a Attribute) MakeTypelink() bool  { return a&AttrMakeTypelink != 0 }
-func (a Attribute) CFunc() bool         { return a&AttrCFunc != 0 }
-func (a Attribute) NoSplit() bool       { return a&AttrNoSplit != 0 }
-func (a Attribute) Leaf() bool          { return a&AttrLeaf != 0 }
-func (a Attribute) SeenGlobl() bool     { return a&AttrSeenGlobl != 0 }
-func (a Attribute) OnList() bool        { return a&AttrOnList != 0 }
-func (a Attribute) ReflectMethod() bool { return a&AttrReflectMethod != 0 }
-func (a Attribute) Local() bool         { return a&AttrLocal != 0 }
-
-func (a *Attribute) Set(flag Attribute, value bool) {
-	if value {
-		*a |= flag
-	} else {
-		*a &^= flag
-	}
-}
-
-// The compiler needs LSym to satisfy fmt.Stringer, because it stores
-// an LSym in ssa.ExternSymbol.
-func (s *LSym) String() string {
-	return s.Name
-}
-
-type Pcln struct {
-	Pcsp        Pcdata
-	Pcfile      Pcdata
-	Pcline      Pcdata
-	Pcdata      []Pcdata
-	Funcdata    []*LSym
-	Funcdataoff []int64
-	File        []*LSym
-	Lastfile    *LSym
-	Lastindex   int
-}
-
-// A SymKind describes the kind of memory represented by a symbol.
-type SymKind int16
-
-// Defined SymKind values.
-//
-// TODO(rsc): Give idiomatic Go names.
-// TODO(rsc): Reduce the number of symbol types in the object files.
-//go:generate stringer -type=SymKind
-const (
-	Sxxx SymKind = iota
-	STEXT
-	SELFRXSECT
-
-	// Read-only sections.
-	STYPE
-	SSTRING
-	SGOSTRING
-	SGOFUNC
-	SGCBITS
-	SRODATA
-	SFUNCTAB
-
-	SELFROSECT
-	SMACHOPLT
-
-	// Read-only sections with relocations.
-	//
-	// Types STYPE-SFUNCTAB above are written to the .rodata section by default.
-	// When linking a shared object, some conceptually "read only" types need to
-	// be written to by relocations and putting them in a section called
-	// ".rodata" interacts poorly with the system linkers. The GNU linkers
-	// support this situation by arranging for sections of the name
-	// ".data.rel.ro.XXX" to be mprotected read only by the dynamic linker after
-	// relocations have applied, so when the Go linker is creating a shared
-	// object it checks all objects of the above types and bumps any object that
-	// has a relocation to it to the corresponding type below, which are then
-	// written to sections with appropriate magic names.
-	STYPERELRO
-	SSTRINGRELRO
-	SGOSTRINGRELRO
-	SGOFUNCRELRO
-	SGCBITSRELRO
-	SRODATARELRO
-	SFUNCTABRELRO
-
-	// Part of .data.rel.ro if it exists, otherwise part of .rodata.
-	STYPELINK
-	SITABLINK
-	SSYMTAB
-	SPCLNTAB
-
-	// Writable sections.
-	SELFSECT
-	SMACHO
-	SMACHOGOT
-	SWINDOWS
-	SELFGOT
-	SNOPTRDATA
-	SINITARR
-	SDATA
-	SBSS
-	SNOPTRBSS
-	STLSBSS
-	SXREF
-	SMACHOSYMSTR
-	SMACHOSYMTAB
-	SMACHOINDIRECTPLT
-	SMACHOINDIRECTGOT
-	SFILE
-	SFILEPATH
-	SCONST
-	SDYNIMPORT
-	SHOSTOBJ
-	SDWARFSECT
-	SDWARFINFO
-	SSUB       = SymKind(1 << 8)
-	SMASK      = SymKind(SSUB - 1)
-	SHIDDEN    = SymKind(1 << 9)
-	SCONTAINER = SymKind(1 << 10) // has a sub-symbol
-)
-
-// ReadOnly are the symbol kinds that form read-only sections. In some
-// cases, if they will require relocations, they are transformed into
-// rel-ro sections using RelROMap.
-var ReadOnly = []SymKind{
-	STYPE,
-	SSTRING,
-	SGOSTRING,
-	SGOFUNC,
-	SGCBITS,
-	SRODATA,
-	SFUNCTAB,
-}
-
-// RelROMap describes the transformation of read-only symbols to rel-ro
-// symbols.
-var RelROMap = map[SymKind]SymKind{
-	STYPE:     STYPERELRO,
-	SSTRING:   SSTRINGRELRO,
-	SGOSTRING: SGOSTRINGRELRO,
-	SGOFUNC:   SGOFUNCRELRO,
-	SGCBITS:   SGCBITSRELRO,
-	SRODATA:   SRODATARELRO,
-	SFUNCTAB:  SFUNCTABRELRO,
-}
-
-type Reloc struct {
-	Off  int32
-	Siz  uint8
-	Type RelocType
-	Add  int64
-	Sym  *LSym
-}
-
-type RelocType int32
-
-//go:generate stringer -type=RelocType
-const (
-	R_ADDR RelocType = 1 + iota
-	// R_ADDRPOWER relocates a pair of "D-form" instructions (instructions with 16-bit
-	// immediates in the low half of the instruction word), usually addis followed by
-	// another add or a load, inserting the "high adjusted" 16 bits of the address of
-	// the referenced symbol into the immediate field of the first instruction and the
-	// low 16 bits into that of the second instruction.
-	R_ADDRPOWER
-	// R_ADDRARM64 relocates an adrp, add pair to compute the address of the
-	// referenced symbol.
-	R_ADDRARM64
-	// R_ADDRMIPS (only used on mips/mips64) resolves to the low 16 bits of an external
-	// address, by encoding it into the instruction.
-	R_ADDRMIPS
-	// R_ADDROFF resolves to a 32-bit offset from the beginning of the section
-	// holding the data being relocated to the referenced symbol.
-	R_ADDROFF
-	// R_WEAKADDROFF resolves just like R_ADDROFF but is a weak relocation.
-	// A weak relocation does not make the symbol it refers to reachable,
-	// and is only honored by the linker if the symbol is in some other way
-	// reachable.
-	R_WEAKADDROFF
-	R_SIZE
-	R_CALL
-	R_CALLARM
-	R_CALLARM64
-	R_CALLIND
-	R_CALLPOWER
-	// R_CALLMIPS (only used on mips64) resolves to non-PC-relative target address
-	// of a CALL (JAL) instruction, by encoding the address into the instruction.
-	R_CALLMIPS
-	R_CONST
-	R_PCREL
-	// R_TLS_LE, used on 386, amd64, and ARM, resolves to the offset of the
-	// thread-local symbol from the thread local base and is used to implement the
-	// "local exec" model for tls access (r.Sym is not set on intel platforms but is
-	// set to a TLS symbol -- runtime.tlsg -- in the linker when externally linking).
-	R_TLS_LE
-	// R_TLS_IE, used 386, amd64, and ARM resolves to the PC-relative offset to a GOT
-	// slot containing the offset from the thread-local symbol from the thread local
-	// base and is used to implemented the "initial exec" model for tls access (r.Sym
-	// is not set on intel platforms but is set to a TLS symbol -- runtime.tlsg -- in
-	// the linker when externally linking).
-	R_TLS_IE
-	R_GOTOFF
-	R_PLT0
-	R_PLT1
-	R_PLT2
-	R_USEFIELD
-	// R_USETYPE resolves to an *rtype, but no relocation is created. The
-	// linker uses this as a signal that the pointed-to type information
-	// should be linked into the final binary, even if there are no other
-	// direct references. (This is used for types reachable by reflection.)
-	R_USETYPE
-	// R_METHODOFF resolves to a 32-bit offset from the beginning of the section
-	// holding the data being relocated to the referenced symbol.
-	// It is a variant of R_ADDROFF used when linking from the uncommonType of a
-	// *rtype, and may be set to zero by the linker if it determines the method
-	// text is unreachable by the linked program.
-	R_METHODOFF
-	R_POWER_TOC
-	R_GOTPCREL
-	// R_JMPMIPS (only used on mips64) resolves to non-PC-relative target address
-	// of a JMP instruction, by encoding the address into the instruction.
-	// The stack nosplit check ignores this since it is not a function call.
-	R_JMPMIPS
-	// R_DWARFREF resolves to the offset of the symbol from its section.
-	R_DWARFREF
-
-	// Platform dependent relocations. Architectures with fixed width instructions
-	// have the inherent issue that a 32-bit (or 64-bit!) displacement cannot be
-	// stuffed into a 32-bit instruction, so an address needs to be spread across
-	// several instructions, and in turn this requires a sequence of relocations, each
-	// updating a part of an instruction. This leads to relocation codes that are
-	// inherently processor specific.
-
-	// Arm64.
-
-	// Set a MOV[NZ] immediate field to bits [15:0] of the offset from the thread
-	// local base to the thread local variable defined by the referenced (thread
-	// local) symbol. Error if the offset does not fit into 16 bits.
-	R_ARM64_TLS_LE
-
-	// Relocates an ADRP; LD64 instruction sequence to load the offset between
-	// the thread local base and the thread local variable defined by the
-	// referenced (thread local) symbol from the GOT.
-	R_ARM64_TLS_IE
-
-	// R_ARM64_GOTPCREL relocates an adrp, ld64 pair to compute the address of the GOT
-	// slot of the referenced symbol.
-	R_ARM64_GOTPCREL
-
-	// PPC64.
-
-	// R_POWER_TLS_LE is used to implement the "local exec" model for tls
-	// access. It resolves to the offset of the thread-local symbol from the
-	// thread pointer (R13) and inserts this value into the low 16 bits of an
-	// instruction word.
-	R_POWER_TLS_LE
-
-	// R_POWER_TLS_IE is used to implement the "initial exec" model for tls access. It
-	// relocates a D-form, DS-form instruction sequence like R_ADDRPOWER_DS. It
-	// inserts to the offset of GOT slot for the thread-local symbol from the TOC (the
-	// GOT slot is filled by the dynamic linker with the offset of the thread-local
-	// symbol from the thread pointer (R13)).
-	R_POWER_TLS_IE
-
-	// R_POWER_TLS marks an X-form instruction such as "MOVD 0(R13)(R31*1), g" as
-	// accessing a particular thread-local symbol. It does not affect code generation
-	// but is used by the system linker when relaxing "initial exec" model code to
-	// "local exec" model code.
-	R_POWER_TLS
-
-	// R_ADDRPOWER_DS is similar to R_ADDRPOWER above, but assumes the second
-	// instruction is a "DS-form" instruction, which has an immediate field occupying
-	// bits [15:2] of the instruction word. Bits [15:2] of the address of the
-	// relocated symbol are inserted into this field; it is an error if the last two
-	// bits of the address are not 0.
-	R_ADDRPOWER_DS
-
-	// R_ADDRPOWER_PCREL relocates a D-form, DS-form instruction sequence like
-	// R_ADDRPOWER_DS but inserts the offset of the GOT slot for the referenced symbol
-	// from the TOC rather than the symbol's address.
-	R_ADDRPOWER_GOT
-
-	// R_ADDRPOWER_PCREL relocates two D-form instructions like R_ADDRPOWER, but
-	// inserts the displacement from the place being relocated to the address of the
-	// the relocated symbol instead of just its address.
-	R_ADDRPOWER_PCREL
-
-	// R_ADDRPOWER_TOCREL relocates two D-form instructions like R_ADDRPOWER, but
-	// inserts the offset from the TOC to the address of the the relocated symbol
-	// rather than the symbol's address.
-	R_ADDRPOWER_TOCREL
-
-	// R_ADDRPOWER_TOCREL relocates a D-form, DS-form instruction sequence like
-	// R_ADDRPOWER_DS but inserts the offset from the TOC to the address of the the
-	// relocated symbol rather than the symbol's address.
-	R_ADDRPOWER_TOCREL_DS
-
-	// R_PCRELDBL relocates s390x 2-byte aligned PC-relative addresses.
-	// TODO(mundaym): remove once variants can be serialized - see issue 14218.
-	R_PCRELDBL
-
-	// R_ADDRMIPSU (only used on mips/mips64) resolves to the sign-adjusted "upper" 16
-	// bits (bit 16-31) of an external address, by encoding it into the instruction.
-	R_ADDRMIPSU
-	// R_ADDRMIPSTLS (only used on mips64) resolves to the low 16 bits of a TLS
-	// address (offset from thread pointer), by encoding it into the instruction.
-	R_ADDRMIPSTLS
-)
-
-// IsDirectJump returns whether r is a relocation for a direct jump.
-// A direct jump is a CALL or JMP instruction that takes the target address
-// as immediate. The address is embedded into the instruction, possibly
-// with limited width.
-// An indirect jump is a CALL or JMP instruction that takes the target address
-// in register or memory.
-func (r RelocType) IsDirectJump() bool {
-	switch r {
-	case R_CALL, R_CALLARM, R_CALLARM64, R_CALLPOWER, R_CALLMIPS, R_JMPMIPS:
-		return true
-	}
-	return false
-}
-
-type Auto struct {
-	Asym    *LSym
-	Link    *Auto
-	Aoffset int32
-	Name    int16
-	Gotype  *LSym
-}
-
-// Auto.name
-const (
-	A_AUTO = 1 + iota
-	A_PARAM
-)
-
-type Pcdata struct {
-	P []byte
-}
-
-// symbol version, incremented each time a file is loaded.
-// version==1 is reserved for savehist.
-const (
-	HistVersion = 1
-)
-
-// Link holds the context for writing object code from a compiler
-// to be linker input or for reading that input into the linker.
-type Link struct {
-	Headtype      HeadType
-	Arch          *LinkArch
-	Debugasm      int32
-	Debugvlog     int32
-	Debugdivmod   int32
-	Debugpcln     int32
-	Flag_shared   bool
-	Flag_dynlink  bool
-	Flag_optimize bool
-	Bso           *bufio.Writer
-	Pathname      string
-	Hash          map[SymVer]*LSym
-	LineHist      LineHist
-	Imports       []string
-	Plists        []*Plist
-	Sym_div       *LSym
-	Sym_divu      *LSym
-	Sym_mod       *LSym
-	Sym_modu      *LSym
-	Plan9privates *LSym
-	Curp          *Prog
-	Printp        *Prog
-	Blitrl        *Prog
-	Elitrl        *Prog
-	Rexflag       int
-	Vexflag       int
-	Rep           int
-	Repn          int
-	Lock          int
-	Asmode        int
-	AsmBuf        AsmBuf // instruction buffer for x86
-	Instoffset    int64
-	Autosize      int32
-	Armsize       int32
-	Pc            int64
-	DiagFunc      func(string, ...interface{})
-	Mode          int
-	Cursym        *LSym
-	Version       int
-	Errors        int
-
-	Framepointer_enabled bool
-
-	// state for writing objects
-	Text []*LSym
-	Data []*LSym
-
-	// Cache of Progs
-	allocIdx int
-	progs    [10000]Prog
-}
-
-func (ctxt *Link) Diag(format string, args ...interface{}) {
-	ctxt.Errors++
-	ctxt.DiagFunc(format, args...)
-}
-
-func (ctxt *Link) Logf(format string, args ...interface{}) {
-	fmt.Fprintf(ctxt.Bso, format, args...)
-	ctxt.Bso.Flush()
-}
-
-// The smallest possible offset from the hardware stack pointer to a local
-// variable on the stack. Architectures that use a link register save its value
-// on the stack in the function prologue and so always have a pointer between
-// the hardware stack pointer and the local variable area.
-func (ctxt *Link) FixedFrameSize() int64 {
-	switch ctxt.Arch.Family {
-	case sys.AMD64, sys.I386:
-		return 0
-	case sys.PPC64:
-		// PIC code on ppc64le requires 32 bytes of stack, and it's easier to
-		// just use that much stack always on ppc64x.
-		return int64(4 * ctxt.Arch.PtrSize)
-	default:
-		return int64(ctxt.Arch.PtrSize)
-	}
-}
-
-type SymVer struct {
-	Name    string
-	Version int // TODO: make int16 to match LSym.Version?
-}
-
-// LinkArch is the definition of a single architecture.
-type LinkArch struct {
-	*sys.Arch
-	Preprocess func(*Link, *LSym)
-	Assemble   func(*Link, *LSym)
-	Follow     func(*Link, *LSym)
-	Progedit   func(*Link, *Prog)
-	UnaryDst   map[As]bool // Instruction takes one operand, a destination.
-}
-
-// HeadType is the executable header type.
-type HeadType uint8
-
-const (
-	Hunknown HeadType = iota
-	Hdarwin
-	Hdragonfly
-	Hfreebsd
-	Hlinux
-	Hnacl
-	Hnetbsd
-	Hopenbsd
-	Hplan9
-	Hsolaris
-	Hwindows
-	Hwindowsgui
-)
-
-func (h *HeadType) Set(s string) error {
-	switch s {
-	case "darwin":
-		*h = Hdarwin
-	case "dragonfly":
-		*h = Hdragonfly
-	case "freebsd":
-		*h = Hfreebsd
-	case "linux", "android":
-		*h = Hlinux
-	case "nacl":
-		*h = Hnacl
-	case "netbsd":
-		*h = Hnetbsd
-	case "openbsd":
-		*h = Hopenbsd
-	case "plan9":
-		*h = Hplan9
-	case "solaris":
-		*h = Hsolaris
-	case "windows":
-		*h = Hwindows
-	case "windowsgui":
-		*h = Hwindowsgui
-	default:
-		return fmt.Errorf("invalid headtype: %q", s)
-	}
-	return nil
-}
-
-func (h *HeadType) String() string {
-	switch *h {
-	case Hdarwin:
-		return "darwin"
-	case Hdragonfly:
-		return "dragonfly"
-	case Hfreebsd:
-		return "freebsd"
-	case Hlinux:
-		return "linux"
-	case Hnacl:
-		return "nacl"
-	case Hnetbsd:
-		return "netbsd"
-	case Hopenbsd:
-		return "openbsd"
-	case Hplan9:
-		return "plan9"
-	case Hsolaris:
-		return "solaris"
-	case Hwindows:
-		return "windows"
-	case Hwindowsgui:
-		return "windowsgui"
-	}
-	return fmt.Sprintf("HeadType(%d)", *h)
-}
-
-// AsmBuf is a simple buffer to assemble variable-length x86 instructions into.
-type AsmBuf struct {
-	buf [100]byte
-	off int
-}
-
-// Put1 appends one byte to the end of the buffer.
-func (a *AsmBuf) Put1(x byte) {
-	a.buf[a.off] = x
-	a.off++
-}
-
-// Put2 appends two bytes to the end of the buffer.
-func (a *AsmBuf) Put2(x, y byte) {
-	a.buf[a.off+0] = x
-	a.buf[a.off+1] = y
-	a.off += 2
-}
-
-// Put3 appends three bytes to the end of the buffer.
-func (a *AsmBuf) Put3(x, y, z byte) {
-	a.buf[a.off+0] = x
-	a.buf[a.off+1] = y
-	a.buf[a.off+2] = z
-	a.off += 3
-}
-
-// Put4 appends four bytes to the end of the buffer.
-func (a *AsmBuf) Put4(x, y, z, w byte) {
-	a.buf[a.off+0] = x
-	a.buf[a.off+1] = y
-	a.buf[a.off+2] = z
-	a.buf[a.off+3] = w
-	a.off += 4
-}
-
-// PutInt16 writes v into the buffer using little-endian encoding.
-func (a *AsmBuf) PutInt16(v int16) {
-	a.buf[a.off+0] = byte(v)
-	a.buf[a.off+1] = byte(v >> 8)
-	a.off += 2
-}
-
-// PutInt32 writes v into the buffer using little-endian encoding.
-func (a *AsmBuf) PutInt32(v int32) {
-	a.buf[a.off+0] = byte(v)
-	a.buf[a.off+1] = byte(v >> 8)
-	a.buf[a.off+2] = byte(v >> 16)
-	a.buf[a.off+3] = byte(v >> 24)
-	a.off += 4
-}
-
-// PutInt64 writes v into the buffer using little-endian encoding.
-func (a *AsmBuf) PutInt64(v int64) {
-	a.buf[a.off+0] = byte(v)
-	a.buf[a.off+1] = byte(v >> 8)
-	a.buf[a.off+2] = byte(v >> 16)
-	a.buf[a.off+3] = byte(v >> 24)
-	a.buf[a.off+4] = byte(v >> 32)
-	a.buf[a.off+5] = byte(v >> 40)
-	a.buf[a.off+6] = byte(v >> 48)
-	a.buf[a.off+7] = byte(v >> 56)
-	a.off += 8
-}
-
-// Put copies b into the buffer.
-func (a *AsmBuf) Put(b []byte) {
-	copy(a.buf[a.off:], b)
-	a.off += len(b)
-}
-
-// Insert inserts b at offset i.
-func (a *AsmBuf) Insert(i int, b byte) {
-	a.off++
-	copy(a.buf[i+1:a.off], a.buf[i:a.off-1])
-	a.buf[i] = b
-}
-
-// Last returns the byte at the end of the buffer.
-func (a *AsmBuf) Last() byte { return a.buf[a.off-1] }
-
-// Len returns the length of the buffer.
-func (a *AsmBuf) Len() int { return a.off }
-
-// Bytes returns the contents of the buffer.
-func (a *AsmBuf) Bytes() []byte { return a.buf[:a.off] }
-
-// Reset empties the buffer.
-func (a *AsmBuf) Reset() { a.off = 0 }
-
-// Peek returns the byte at offset i.
-func (a *AsmBuf) Peek(i int) byte { return a.buf[i] }
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/mips/a.out.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/mips/a.out.go
deleted file mode 100644
index 77cc497..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/mips/a.out.go
+++ /dev/null
@@ -1,396 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/mips/a.out.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/mips/a.out.go:1
-// cmd/9c/9.out.h from Vita Nuova.
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package mips
-
-import "bootstrap/cmd/internal/obj"
-
-//go:generate go run ../stringer.go -i $GOFILE -o anames.go -p mips
-
-/*
- * mips 64
- */
-const (
-	NSNAME = 8
-	NSYM   = 50
-	NREG   = 32 /* number of general registers */
-	NFREG  = 32 /* number of floating point registers */
-)
-
-const (
-	REG_R0 = obj.RBaseMIPS + iota
-	REG_R1
-	REG_R2
-	REG_R3
-	REG_R4
-	REG_R5
-	REG_R6
-	REG_R7
-	REG_R8
-	REG_R9
-	REG_R10
-	REG_R11
-	REG_R12
-	REG_R13
-	REG_R14
-	REG_R15
-	REG_R16
-	REG_R17
-	REG_R18
-	REG_R19
-	REG_R20
-	REG_R21
-	REG_R22
-	REG_R23
-	REG_R24
-	REG_R25
-	REG_R26
-	REG_R27
-	REG_R28
-	REG_R29
-	REG_R30
-	REG_R31
-
-	REG_F0
-	REG_F1
-	REG_F2
-	REG_F3
-	REG_F4
-	REG_F5
-	REG_F6
-	REG_F7
-	REG_F8
-	REG_F9
-	REG_F10
-	REG_F11
-	REG_F12
-	REG_F13
-	REG_F14
-	REG_F15
-	REG_F16
-	REG_F17
-	REG_F18
-	REG_F19
-	REG_F20
-	REG_F21
-	REG_F22
-	REG_F23
-	REG_F24
-	REG_F25
-	REG_F26
-	REG_F27
-	REG_F28
-	REG_F29
-	REG_F30
-	REG_F31
-
-	REG_HI
-	REG_LO
-
-	// co-processor 0 control registers
-	REG_M0
-	REG_M1
-	REG_M2
-	REG_M3
-	REG_M4
-	REG_M5
-	REG_M6
-	REG_M7
-	REG_M8
-	REG_M9
-	REG_M10
-	REG_M11
-	REG_M12
-	REG_M13
-	REG_M14
-	REG_M15
-	REG_M16
-	REG_M17
-	REG_M18
-	REG_M19
-	REG_M20
-	REG_M21
-	REG_M22
-	REG_M23
-	REG_M24
-	REG_M25
-	REG_M26
-	REG_M27
-	REG_M28
-	REG_M29
-	REG_M30
-	REG_M31
-
-	// FPU control registers
-	REG_FCR0
-	REG_FCR1
-	REG_FCR2
-	REG_FCR3
-	REG_FCR4
-	REG_FCR5
-	REG_FCR6
-	REG_FCR7
-	REG_FCR8
-	REG_FCR9
-	REG_FCR10
-	REG_FCR11
-	REG_FCR12
-	REG_FCR13
-	REG_FCR14
-	REG_FCR15
-	REG_FCR16
-	REG_FCR17
-	REG_FCR18
-	REG_FCR19
-	REG_FCR20
-	REG_FCR21
-	REG_FCR22
-	REG_FCR23
-	REG_FCR24
-	REG_FCR25
-	REG_FCR26
-	REG_FCR27
-	REG_FCR28
-	REG_FCR29
-	REG_FCR30
-	REG_FCR31
-
-	REG_LAST = REG_FCR31 // the last defined register
-
-	REG_SPECIAL = REG_M0
-
-	REGZERO = REG_R0 /* set to zero */
-	REGSP   = REG_R29
-	REGSB   = REG_R28
-	REGLINK = REG_R31
-	REGRET  = REG_R1
-	REGARG  = -1      /* -1 disables passing the first argument in register */
-	REGRT1  = REG_R1  /* reserved for runtime, duffzero and duffcopy */
-	REGRT2  = REG_R2  /* reserved for runtime, duffcopy */
-	REGCTXT = REG_R22 /* context for closures */
-	REGG    = REG_R30 /* G */
-	REGTMP  = REG_R23 /* used by the linker */
-	FREGRET = REG_F0
-)
-
-const (
-	BIG = 32766
-)
-
-const (
-	/* mark flags */
-	FOLL    = 1 << 0
-	LABEL   = 1 << 1
-	LEAF    = 1 << 2
-	SYNC    = 1 << 3
-	BRANCH  = 1 << 4
-	LOAD    = 1 << 5
-	FCMP    = 1 << 6
-	NOSCHED = 1 << 7
-
-	NSCHED = 20
-)
-
-const (
-	Mips32 = 32
-	Mips64 = 64
-)
-
-const (
-	C_NONE = iota
-	C_REG
-	C_FREG
-	C_FCREG
-	C_MREG /* special processor register */
-	C_HI
-	C_LO
-	C_ZCON
-	C_SCON /* 16 bit signed */
-	C_UCON /* 32 bit signed, low 16 bits 0 */
-	C_ADD0CON
-	C_AND0CON
-	C_ADDCON /* -0x8000 <= v < 0 */
-	C_ANDCON /* 0 < v <= 0xFFFF */
-	C_LCON   /* other 32 */
-	C_DCON   /* other 64 (could subdivide further) */
-	C_SACON  /* $n(REG) where n <= int16 */
-	C_SECON
-	C_LACON /* $n(REG) where int16 < n <= int32 */
-	C_LECON
-	C_DACON /* $n(REG) where int32 < n */
-	C_STCON /* $tlsvar */
-	C_SBRA
-	C_LBRA
-	C_SAUTO
-	C_LAUTO
-	C_SEXT
-	C_LEXT
-	C_ZOREG
-	C_SOREG
-	C_LOREG
-	C_GOK
-	C_ADDR
-	C_TLS
-	C_TEXTSIZE
-
-	C_NCLASS /* must be the last */
-)
-
-const (
-	AABSD = obj.ABaseMIPS + obj.A_ARCHSPECIFIC + iota
-	AABSF
-	AABSW
-	AADD
-	AADDD
-	AADDF
-	AADDU
-	AADDW
-	AAND
-	ABEQ
-	ABFPF
-	ABFPT
-	ABGEZ
-	ABGEZAL
-	ABGTZ
-	ABLEZ
-	ABLTZ
-	ABLTZAL
-	ABNE
-	ABREAK
-	ACLO
-	ACLZ
-	ACMOVF
-	ACMOVN
-	ACMOVT
-	ACMOVZ
-	ACMPEQD
-	ACMPEQF
-	ACMPGED
-	ACMPGEF
-	ACMPGTD
-	ACMPGTF
-	ADIV
-	ADIVD
-	ADIVF
-	ADIVU
-	ADIVW
-	AGOK
-	ALL
-	ALUI
-	AMOVB
-	AMOVBU
-	AMOVD
-	AMOVDF
-	AMOVDW
-	AMOVF
-	AMOVFD
-	AMOVFW
-	AMOVH
-	AMOVHU
-	AMOVW
-	AMOVWD
-	AMOVWF
-	AMOVWL
-	AMOVWR
-	AMUL
-	AMULD
-	AMULF
-	AMULU
-	AMULW
-	ANEGD
-	ANEGF
-	ANEGW
-	ANOR
-	AOR
-	AREM
-	AREMU
-	ARFE
-	ASC
-	ASGT
-	ASGTU
-	ASLL
-	ASQRTD
-	ASQRTF
-	ASRA
-	ASRL
-	ASUB
-	ASUBD
-	ASUBF
-	ASUBU
-	ASUBW
-	ASYNC
-	ASYSCALL
-	ATEQ
-	ATLBP
-	ATLBR
-	ATLBWI
-	ATLBWR
-	ATNE
-	AWORD
-	AXOR
-
-	/* 64-bit */
-	AMOVV
-	AMOVVL
-	AMOVVR
-	ASLLV
-	ASRAV
-	ASRLV
-	ADIVV
-	ADIVVU
-	AREMV
-	AREMVU
-	AMULV
-	AMULVU
-	AADDV
-	AADDVU
-	ASUBV
-	ASUBVU
-
-	/* 64-bit FP */
-	ATRUNCFV
-	ATRUNCDV
-	ATRUNCFW
-	ATRUNCDW
-	AMOVWU
-	AMOVFV
-	AMOVDV
-	AMOVVF
-	AMOVVD
-
-	ALAST
-
-	// aliases
-	AJMP = obj.AJMP
-	AJAL = obj.ACALL
-	ARET = obj.ARET
-)
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/mips/anames.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/mips/anames.go
deleted file mode 100644
index 4811ddc..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/mips/anames.go
+++ /dev/null
@@ -1,129 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/mips/anames.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/mips/anames.go:1
-// Generated by stringer -i a.out.go -o anames.go -p mips
-// Do not edit.
-
-package mips
-
-import "bootstrap/cmd/internal/obj"
-
-var Anames = []string{
-	obj.A_ARCHSPECIFIC: "ABSD",
-	"ABSF",
-	"ABSW",
-	"ADD",
-	"ADDD",
-	"ADDF",
-	"ADDU",
-	"ADDW",
-	"AND",
-	"BEQ",
-	"BFPF",
-	"BFPT",
-	"BGEZ",
-	"BGEZAL",
-	"BGTZ",
-	"BLEZ",
-	"BLTZ",
-	"BLTZAL",
-	"BNE",
-	"BREAK",
-	"CLO",
-	"CLZ",
-	"CMOVF",
-	"CMOVN",
-	"CMOVT",
-	"CMOVZ",
-	"CMPEQD",
-	"CMPEQF",
-	"CMPGED",
-	"CMPGEF",
-	"CMPGTD",
-	"CMPGTF",
-	"DIV",
-	"DIVD",
-	"DIVF",
-	"DIVU",
-	"DIVW",
-	"GOK",
-	"LL",
-	"LUI",
-	"MOVB",
-	"MOVBU",
-	"MOVD",
-	"MOVDF",
-	"MOVDW",
-	"MOVF",
-	"MOVFD",
-	"MOVFW",
-	"MOVH",
-	"MOVHU",
-	"MOVW",
-	"MOVWD",
-	"MOVWF",
-	"MOVWL",
-	"MOVWR",
-	"MUL",
-	"MULD",
-	"MULF",
-	"MULU",
-	"MULW",
-	"NEGD",
-	"NEGF",
-	"NEGW",
-	"NOR",
-	"OR",
-	"REM",
-	"REMU",
-	"RFE",
-	"SC",
-	"SGT",
-	"SGTU",
-	"SLL",
-	"SQRTD",
-	"SQRTF",
-	"SRA",
-	"SRL",
-	"SUB",
-	"SUBD",
-	"SUBF",
-	"SUBU",
-	"SUBW",
-	"SYNC",
-	"SYSCALL",
-	"TEQ",
-	"TLBP",
-	"TLBR",
-	"TLBWI",
-	"TLBWR",
-	"TNE",
-	"WORD",
-	"XOR",
-	"MOVV",
-	"MOVVL",
-	"MOVVR",
-	"SLLV",
-	"SRAV",
-	"SRLV",
-	"DIVV",
-	"DIVVU",
-	"REMV",
-	"REMVU",
-	"MULV",
-	"MULVU",
-	"ADDV",
-	"ADDVU",
-	"SUBV",
-	"SUBVU",
-	"TRUNCFV",
-	"TRUNCDV",
-	"TRUNCFW",
-	"TRUNCDW",
-	"MOVWU",
-	"MOVFV",
-	"MOVDV",
-	"MOVVF",
-	"MOVVD",
-	"LAST",
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/mips/anames0.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/mips/anames0.go
deleted file mode 100644
index 04035db..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/mips/anames0.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/mips/anames0.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/mips/anames0.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package mips
-
-var cnames0 = []string{
-	"NONE",
-	"REG",
-	"FREG",
-	"FCREG",
-	"MREG",
-	"HI",
-	"LO",
-	"ZCON",
-	"SCON",
-	"UCON",
-	"ADD0CON",
-	"AND0CON",
-	"ADDCON",
-	"ANDCON",
-	"LCON",
-	"DCON",
-	"SACON",
-	"SECON",
-	"LACON",
-	"LECON",
-	"DACON",
-	"STCON",
-	"SBRA",
-	"LBRA",
-	"SAUTO",
-	"LAUTO",
-	"SEXT",
-	"LEXT",
-	"ZOREG",
-	"SOREG",
-	"LOREG",
-	"GOK",
-	"ADDR",
-	"TLS",
-	"TEXTSIZE",
-	"NCLASS",
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/mips/asm0.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/mips/asm0.go
deleted file mode 100644
index 5cede5f..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/mips/asm0.go
+++ /dev/null
@@ -1,1929 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/mips/asm0.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/mips/asm0.go:1
-// cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package mips
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"fmt"
-	"log"
-	"sort"
-)
-
-// Instruction layout.
-
-const (
-	mips64FuncAlign = 8
-)
-
-const (
-	r0iszero = 1
-)
-
-type Optab struct {
-	as    obj.As
-	a1    uint8
-	a2    uint8
-	a3    uint8
-	type_ int8
-	size  int8
-	param int16
-	mode  int
-}
-
-var optab = []Optab{
-	{obj.ATEXT, C_LEXT, C_NONE, C_TEXTSIZE, 0, 0, 0, Mips64},
-	{obj.ATEXT, C_ADDR, C_NONE, C_TEXTSIZE, 0, 0, 0, 0},
-
-	{AMOVW, C_REG, C_NONE, C_REG, 1, 4, 0, 0},
-	{AMOVV, C_REG, C_NONE, C_REG, 1, 4, 0, Mips64},
-	{AMOVB, C_REG, C_NONE, C_REG, 12, 8, 0, 0},
-	{AMOVBU, C_REG, C_NONE, C_REG, 13, 4, 0, 0},
-	{AMOVWU, C_REG, C_NONE, C_REG, 14, 8, 0, Mips64},
-
-	{ASUB, C_REG, C_REG, C_REG, 2, 4, 0, 0},
-	{ASUBV, C_REG, C_REG, C_REG, 2, 4, 0, Mips64},
-	{AADD, C_REG, C_REG, C_REG, 2, 4, 0, 0},
-	{AADDV, C_REG, C_REG, C_REG, 2, 4, 0, Mips64},
-	{AAND, C_REG, C_REG, C_REG, 2, 4, 0, 0},
-	{ASUB, C_REG, C_NONE, C_REG, 2, 4, 0, 0},
-	{ASUBV, C_REG, C_NONE, C_REG, 2, 4, 0, Mips64},
-	{AADD, C_REG, C_NONE, C_REG, 2, 4, 0, 0},
-	{AADDV, C_REG, C_NONE, C_REG, 2, 4, 0, Mips64},
-	{AAND, C_REG, C_NONE, C_REG, 2, 4, 0, 0},
-	{ACMOVN, C_REG, C_REG, C_REG, 2, 4, 0, 0},
-
-	{ASLL, C_REG, C_NONE, C_REG, 9, 4, 0, 0},
-	{ASLL, C_REG, C_REG, C_REG, 9, 4, 0, 0},
-	{ASLLV, C_REG, C_NONE, C_REG, 9, 4, 0, Mips64},
-	{ASLLV, C_REG, C_REG, C_REG, 9, 4, 0, Mips64},
-	{ACLO, C_REG, C_NONE, C_REG, 9, 4, 0, 0},
-
-	{AADDF, C_FREG, C_NONE, C_FREG, 32, 4, 0, 0},
-	{AADDF, C_FREG, C_REG, C_FREG, 32, 4, 0, 0},
-	{ACMPEQF, C_FREG, C_REG, C_NONE, 32, 4, 0, 0},
-	{AABSF, C_FREG, C_NONE, C_FREG, 33, 4, 0, 0},
-	{AMOVVF, C_FREG, C_NONE, C_FREG, 33, 4, 0, Mips64},
-	{AMOVF, C_FREG, C_NONE, C_FREG, 33, 4, 0, 0},
-	{AMOVD, C_FREG, C_NONE, C_FREG, 33, 4, 0, 0},
-
-	{AMOVW, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, Mips64},
-	{AMOVWU, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, Mips64},
-	{AMOVV, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, Mips64},
-	{AMOVB, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, Mips64},
-	{AMOVBU, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, Mips64},
-	{AMOVWL, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, Mips64},
-	{AMOVVL, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, Mips64},
-	{AMOVW, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, 0},
-	{AMOVWU, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, Mips64},
-	{AMOVV, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, Mips64},
-	{AMOVB, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, 0},
-	{AMOVBU, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, 0},
-	{AMOVWL, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, 0},
-	{AMOVVL, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, Mips64},
-	{AMOVW, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, 0},
-	{AMOVWU, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, Mips64},
-	{AMOVV, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, Mips64},
-	{AMOVB, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, 0},
-	{AMOVBU, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, 0},
-	{AMOVWL, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, 0},
-	{AMOVVL, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, Mips64},
-	{ASC, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, 0},
-
-	{AMOVW, C_SEXT, C_NONE, C_REG, 8, 4, REGSB, Mips64},
-	{AMOVWU, C_SEXT, C_NONE, C_REG, 8, 4, REGSB, Mips64},
-	{AMOVV, C_SEXT, C_NONE, C_REG, 8, 4, REGSB, Mips64},
-	{AMOVB, C_SEXT, C_NONE, C_REG, 8, 4, REGSB, Mips64},
-	{AMOVBU, C_SEXT, C_NONE, C_REG, 8, 4, REGSB, Mips64},
-	{AMOVWL, C_SEXT, C_NONE, C_REG, 8, 4, REGSB, Mips64},
-	{AMOVVL, C_SEXT, C_NONE, C_REG, 8, 4, REGSB, Mips64},
-	{AMOVW, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, 0},
-	{AMOVWU, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, Mips64},
-	{AMOVV, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, Mips64},
-	{AMOVB, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, 0},
-	{AMOVBU, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, 0},
-	{AMOVWL, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, 0},
-	{AMOVVL, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, Mips64},
-	{AMOVW, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0},
-	{AMOVWU, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, Mips64},
-	{AMOVV, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, Mips64},
-	{AMOVB, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0},
-	{AMOVBU, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0},
-	{AMOVWL, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0},
-	{AMOVVL, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, Mips64},
-	{ALL, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0},
-
-	{AMOVW, C_REG, C_NONE, C_LEXT, 35, 12, REGSB, Mips64},
-	{AMOVWU, C_REG, C_NONE, C_LEXT, 35, 12, REGSB, Mips64},
-	{AMOVV, C_REG, C_NONE, C_LEXT, 35, 12, REGSB, Mips64},
-	{AMOVB, C_REG, C_NONE, C_LEXT, 35, 12, REGSB, Mips64},
-	{AMOVBU, C_REG, C_NONE, C_LEXT, 35, 12, REGSB, Mips64},
-	{AMOVW, C_REG, C_NONE, C_LAUTO, 35, 12, REGSP, 0},
-	{AMOVWU, C_REG, C_NONE, C_LAUTO, 35, 12, REGSP, Mips64},
-	{AMOVV, C_REG, C_NONE, C_LAUTO, 35, 12, REGSP, Mips64},
-	{AMOVB, C_REG, C_NONE, C_LAUTO, 35, 12, REGSP, 0},
-	{AMOVBU, C_REG, C_NONE, C_LAUTO, 35, 12, REGSP, 0},
-	{AMOVW, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, 0},
-	{AMOVWU, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, Mips64},
-	{AMOVV, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, Mips64},
-	{AMOVB, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, 0},
-	{AMOVBU, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, 0},
-	{ASC, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, 0},
-	{AMOVW, C_REG, C_NONE, C_ADDR, 50, 8, 0, Mips32},
-	{AMOVW, C_REG, C_NONE, C_ADDR, 50, 12, 0, Mips64},
-	{AMOVWU, C_REG, C_NONE, C_ADDR, 50, 12, 0, Mips64},
-	{AMOVV, C_REG, C_NONE, C_ADDR, 50, 12, 0, Mips64},
-	{AMOVB, C_REG, C_NONE, C_ADDR, 50, 8, 0, Mips32},
-	{AMOVB, C_REG, C_NONE, C_ADDR, 50, 12, 0, Mips64},
-	{AMOVBU, C_REG, C_NONE, C_ADDR, 50, 8, 0, Mips32},
-	{AMOVBU, C_REG, C_NONE, C_ADDR, 50, 12, 0, Mips64},
-	{AMOVW, C_REG, C_NONE, C_TLS, 53, 8, 0, 0},
-	{AMOVWU, C_REG, C_NONE, C_TLS, 53, 8, 0, Mips64},
-	{AMOVV, C_REG, C_NONE, C_TLS, 53, 8, 0, Mips64},
-	{AMOVB, C_REG, C_NONE, C_TLS, 53, 8, 0, 0},
-	{AMOVBU, C_REG, C_NONE, C_TLS, 53, 8, 0, 0},
-
-	{AMOVW, C_LEXT, C_NONE, C_REG, 36, 12, REGSB, Mips64},
-	{AMOVWU, C_LEXT, C_NONE, C_REG, 36, 12, REGSB, Mips64},
-	{AMOVV, C_LEXT, C_NONE, C_REG, 36, 12, REGSB, Mips64},
-	{AMOVB, C_LEXT, C_NONE, C_REG, 36, 12, REGSB, Mips64},
-	{AMOVBU, C_LEXT, C_NONE, C_REG, 36, 12, REGSB, Mips64},
-	{AMOVW, C_LAUTO, C_NONE, C_REG, 36, 12, REGSP, 0},
-	{AMOVWU, C_LAUTO, C_NONE, C_REG, 36, 12, REGSP, Mips64},
-	{AMOVV, C_LAUTO, C_NONE, C_REG, 36, 12, REGSP, Mips64},
-	{AMOVB, C_LAUTO, C_NONE, C_REG, 36, 12, REGSP, 0},
-	{AMOVBU, C_LAUTO, C_NONE, C_REG, 36, 12, REGSP, 0},
-	{AMOVW, C_LOREG, C_NONE, C_REG, 36, 12, REGZERO, 0},
-	{AMOVWU, C_LOREG, C_NONE, C_REG, 36, 12, REGZERO, Mips64},
-	{AMOVV, C_LOREG, C_NONE, C_REG, 36, 12, REGZERO, Mips64},
-	{AMOVB, C_LOREG, C_NONE, C_REG, 36, 12, REGZERO, 0},
-	{AMOVBU, C_LOREG, C_NONE, C_REG, 36, 12, REGZERO, 0},
-	{AMOVW, C_ADDR, C_NONE, C_REG, 51, 8, 0, Mips32},
-	{AMOVW, C_ADDR, C_NONE, C_REG, 51, 12, 0, Mips64},
-	{AMOVWU, C_ADDR, C_NONE, C_REG, 51, 12, 0, Mips64},
-	{AMOVV, C_ADDR, C_NONE, C_REG, 51, 12, 0, Mips64},
-	{AMOVB, C_ADDR, C_NONE, C_REG, 51, 8, 0, Mips32},
-	{AMOVB, C_ADDR, C_NONE, C_REG, 51, 12, 0, Mips64},
-	{AMOVBU, C_ADDR, C_NONE, C_REG, 51, 8, 0, Mips32},
-	{AMOVBU, C_ADDR, C_NONE, C_REG, 51, 12, 0, Mips64},
-	{AMOVW, C_TLS, C_NONE, C_REG, 54, 8, 0, 0},
-	{AMOVWU, C_TLS, C_NONE, C_REG, 54, 8, 0, Mips64},
-	{AMOVV, C_TLS, C_NONE, C_REG, 54, 8, 0, Mips64},
-	{AMOVB, C_TLS, C_NONE, C_REG, 54, 8, 0, 0},
-	{AMOVBU, C_TLS, C_NONE, C_REG, 54, 8, 0, 0},
-
-	{AMOVW, C_SECON, C_NONE, C_REG, 3, 4, REGSB, Mips64},
-	{AMOVV, C_SECON, C_NONE, C_REG, 3, 4, REGSB, Mips64},
-	{AMOVW, C_SACON, C_NONE, C_REG, 3, 4, REGSP, 0},
-	{AMOVV, C_SACON, C_NONE, C_REG, 3, 4, REGSP, Mips64},
-	{AMOVW, C_LECON, C_NONE, C_REG, 52, 8, REGSB, Mips32},
-	{AMOVW, C_LECON, C_NONE, C_REG, 52, 12, REGSB, Mips64},
-	{AMOVV, C_LECON, C_NONE, C_REG, 52, 12, REGSB, Mips64},
-
-	{AMOVW, C_LACON, C_NONE, C_REG, 26, 12, REGSP, 0},
-	{AMOVV, C_LACON, C_NONE, C_REG, 26, 12, REGSP, Mips64},
-	{AMOVW, C_ADDCON, C_NONE, C_REG, 3, 4, REGZERO, 0},
-	{AMOVV, C_ADDCON, C_NONE, C_REG, 3, 4, REGZERO, Mips64},
-	{AMOVW, C_ANDCON, C_NONE, C_REG, 3, 4, REGZERO, 0},
-	{AMOVV, C_ANDCON, C_NONE, C_REG, 3, 4, REGZERO, Mips64},
-	{AMOVW, C_STCON, C_NONE, C_REG, 55, 8, 0, 0},
-	{AMOVV, C_STCON, C_NONE, C_REG, 55, 8, 0, Mips64},
-
-	{AMOVW, C_UCON, C_NONE, C_REG, 24, 4, 0, 0},
-	{AMOVV, C_UCON, C_NONE, C_REG, 24, 4, 0, Mips64},
-	{AMOVW, C_LCON, C_NONE, C_REG, 19, 8, 0, 0},
-	{AMOVV, C_LCON, C_NONE, C_REG, 19, 8, 0, Mips64},
-
-	{AMOVW, C_HI, C_NONE, C_REG, 20, 4, 0, 0},
-	{AMOVV, C_HI, C_NONE, C_REG, 20, 4, 0, Mips64},
-	{AMOVW, C_LO, C_NONE, C_REG, 20, 4, 0, 0},
-	{AMOVV, C_LO, C_NONE, C_REG, 20, 4, 0, Mips64},
-	{AMOVW, C_REG, C_NONE, C_HI, 21, 4, 0, 0},
-	{AMOVV, C_REG, C_NONE, C_HI, 21, 4, 0, Mips64},
-	{AMOVW, C_REG, C_NONE, C_LO, 21, 4, 0, 0},
-	{AMOVV, C_REG, C_NONE, C_LO, 21, 4, 0, Mips64},
-
-	{AMUL, C_REG, C_REG, C_NONE, 22, 4, 0, 0},
-	{AMUL, C_REG, C_REG, C_REG, 22, 4, 0, 0},
-	{AMULV, C_REG, C_REG, C_NONE, 22, 4, 0, Mips64},
-
-	{AADD, C_ADD0CON, C_REG, C_REG, 4, 4, 0, 0},
-	{AADD, C_ADD0CON, C_NONE, C_REG, 4, 4, 0, 0},
-	{AADD, C_ANDCON, C_REG, C_REG, 10, 8, 0, 0},
-	{AADD, C_ANDCON, C_NONE, C_REG, 10, 8, 0, 0},
-
-	{AADDV, C_ADD0CON, C_REG, C_REG, 4, 4, 0, Mips64},
-	{AADDV, C_ADD0CON, C_NONE, C_REG, 4, 4, 0, Mips64},
-	{AADDV, C_ANDCON, C_REG, C_REG, 10, 8, 0, Mips64},
-	{AADDV, C_ANDCON, C_NONE, C_REG, 10, 8, 0, Mips64},
-
-	{AAND, C_AND0CON, C_REG, C_REG, 4, 4, 0, 0},
-	{AAND, C_AND0CON, C_NONE, C_REG, 4, 4, 0, 0},
-	{AAND, C_ADDCON, C_REG, C_REG, 10, 8, 0, 0},
-	{AAND, C_ADDCON, C_NONE, C_REG, 10, 8, 0, 0},
-
-	{AADD, C_UCON, C_REG, C_REG, 25, 8, 0, 0},
-	{AADD, C_UCON, C_NONE, C_REG, 25, 8, 0, 0},
-	{AADDV, C_UCON, C_REG, C_REG, 25, 8, 0, Mips64},
-	{AADDV, C_UCON, C_NONE, C_REG, 25, 8, 0, Mips64},
-	{AAND, C_UCON, C_REG, C_REG, 25, 8, 0, 0},
-	{AAND, C_UCON, C_NONE, C_REG, 25, 8, 0, 0},
-
-	{AADD, C_LCON, C_NONE, C_REG, 23, 12, 0, 0},
-	{AADDV, C_LCON, C_NONE, C_REG, 23, 12, 0, Mips64},
-	{AAND, C_LCON, C_NONE, C_REG, 23, 12, 0, 0},
-	{AADD, C_LCON, C_REG, C_REG, 23, 12, 0, 0},
-	{AADDV, C_LCON, C_REG, C_REG, 23, 12, 0, Mips64},
-	{AAND, C_LCON, C_REG, C_REG, 23, 12, 0, 0},
-
-	{ASLL, C_SCON, C_REG, C_REG, 16, 4, 0, 0},
-	{ASLL, C_SCON, C_NONE, C_REG, 16, 4, 0, 0},
-
-	{ASLLV, C_SCON, C_REG, C_REG, 16, 4, 0, Mips64},
-	{ASLLV, C_SCON, C_NONE, C_REG, 16, 4, 0, Mips64},
-
-	{ASYSCALL, C_NONE, C_NONE, C_NONE, 5, 4, 0, 0},
-
-	{ABEQ, C_REG, C_REG, C_SBRA, 6, 4, 0, 0},
-	{ABEQ, C_REG, C_NONE, C_SBRA, 6, 4, 0, 0},
-	{ABLEZ, C_REG, C_NONE, C_SBRA, 6, 4, 0, 0},
-	{ABFPT, C_NONE, C_NONE, C_SBRA, 6, 8, 0, 0},
-
-	{AJMP, C_NONE, C_NONE, C_LBRA, 11, 4, 0, 0},
-	{AJAL, C_NONE, C_NONE, C_LBRA, 11, 4, 0, 0},
-
-	{AJMP, C_NONE, C_NONE, C_ZOREG, 18, 4, REGZERO, 0},
-	{AJAL, C_NONE, C_NONE, C_ZOREG, 18, 4, REGLINK, 0},
-
-	{AMOVW, C_SEXT, C_NONE, C_FREG, 27, 4, REGSB, Mips64},
-	{AMOVF, C_SEXT, C_NONE, C_FREG, 27, 4, REGSB, Mips64},
-	{AMOVD, C_SEXT, C_NONE, C_FREG, 27, 4, REGSB, Mips64},
-	{AMOVW, C_SAUTO, C_NONE, C_FREG, 27, 4, REGSP, Mips64},
-	{AMOVF, C_SAUTO, C_NONE, C_FREG, 27, 4, REGSP, 0},
-	{AMOVD, C_SAUTO, C_NONE, C_FREG, 27, 4, REGSP, 0},
-	{AMOVW, C_SOREG, C_NONE, C_FREG, 27, 4, REGZERO, Mips64},
-	{AMOVF, C_SOREG, C_NONE, C_FREG, 27, 4, REGZERO, 0},
-	{AMOVD, C_SOREG, C_NONE, C_FREG, 27, 4, REGZERO, 0},
-
-	{AMOVW, C_LEXT, C_NONE, C_FREG, 27, 12, REGSB, Mips64},
-	{AMOVF, C_LEXT, C_NONE, C_FREG, 27, 12, REGSB, Mips64},
-	{AMOVD, C_LEXT, C_NONE, C_FREG, 27, 12, REGSB, Mips64},
-	{AMOVW, C_LAUTO, C_NONE, C_FREG, 27, 12, REGSP, Mips64},
-	{AMOVF, C_LAUTO, C_NONE, C_FREG, 27, 12, REGSP, 0},
-	{AMOVD, C_LAUTO, C_NONE, C_FREG, 27, 12, REGSP, 0},
-	{AMOVW, C_LOREG, C_NONE, C_FREG, 27, 12, REGZERO, Mips64},
-	{AMOVF, C_LOREG, C_NONE, C_FREG, 27, 12, REGZERO, 0},
-	{AMOVD, C_LOREG, C_NONE, C_FREG, 27, 12, REGZERO, 0},
-	{AMOVF, C_ADDR, C_NONE, C_FREG, 51, 8, 0, Mips32},
-	{AMOVF, C_ADDR, C_NONE, C_FREG, 51, 12, 0, Mips64},
-	{AMOVD, C_ADDR, C_NONE, C_FREG, 51, 8, 0, Mips32},
-	{AMOVD, C_ADDR, C_NONE, C_FREG, 51, 12, 0, Mips64},
-
-	{AMOVW, C_FREG, C_NONE, C_SEXT, 28, 4, REGSB, Mips64},
-	{AMOVF, C_FREG, C_NONE, C_SEXT, 28, 4, REGSB, Mips64},
-	{AMOVD, C_FREG, C_NONE, C_SEXT, 28, 4, REGSB, Mips64},
-	{AMOVW, C_FREG, C_NONE, C_SAUTO, 28, 4, REGSP, Mips64},
-	{AMOVF, C_FREG, C_NONE, C_SAUTO, 28, 4, REGSP, 0},
-	{AMOVD, C_FREG, C_NONE, C_SAUTO, 28, 4, REGSP, 0},
-	{AMOVW, C_FREG, C_NONE, C_SOREG, 28, 4, REGZERO, Mips64},
-	{AMOVF, C_FREG, C_NONE, C_SOREG, 28, 4, REGZERO, 0},
-	{AMOVD, C_FREG, C_NONE, C_SOREG, 28, 4, REGZERO, 0},
-
-	{AMOVW, C_FREG, C_NONE, C_LEXT, 28, 12, REGSB, Mips64},
-	{AMOVF, C_FREG, C_NONE, C_LEXT, 28, 12, REGSB, Mips64},
-	{AMOVD, C_FREG, C_NONE, C_LEXT, 28, 12, REGSB, Mips64},
-	{AMOVW, C_FREG, C_NONE, C_LAUTO, 28, 12, REGSP, Mips64},
-	{AMOVF, C_FREG, C_NONE, C_LAUTO, 28, 12, REGSP, 0},
-	{AMOVD, C_FREG, C_NONE, C_LAUTO, 28, 12, REGSP, 0},
-	{AMOVW, C_FREG, C_NONE, C_LOREG, 28, 12, REGZERO, Mips64},
-	{AMOVF, C_FREG, C_NONE, C_LOREG, 28, 12, REGZERO, 0},
-	{AMOVD, C_FREG, C_NONE, C_LOREG, 28, 12, REGZERO, 0},
-	{AMOVF, C_FREG, C_NONE, C_ADDR, 50, 8, 0, Mips32},
-	{AMOVF, C_FREG, C_NONE, C_ADDR, 50, 12, 0, Mips64},
-	{AMOVD, C_FREG, C_NONE, C_ADDR, 50, 8, 0, Mips32},
-	{AMOVD, C_FREG, C_NONE, C_ADDR, 50, 12, 0, Mips64},
-
-	{AMOVW, C_REG, C_NONE, C_FREG, 30, 4, 0, 0},
-	{AMOVW, C_FREG, C_NONE, C_REG, 31, 4, 0, 0},
-	{AMOVV, C_REG, C_NONE, C_FREG, 47, 4, 0, Mips64},
-	{AMOVV, C_FREG, C_NONE, C_REG, 48, 4, 0, Mips64},
-
-	{AMOVW, C_ADDCON, C_NONE, C_FREG, 34, 8, 0, Mips64},
-	{AMOVW, C_ANDCON, C_NONE, C_FREG, 34, 8, 0, Mips64},
-
-	{AMOVW, C_REG, C_NONE, C_MREG, 37, 4, 0, 0},
-	{AMOVV, C_REG, C_NONE, C_MREG, 37, 4, 0, Mips64},
-	{AMOVW, C_MREG, C_NONE, C_REG, 38, 4, 0, 0},
-	{AMOVV, C_MREG, C_NONE, C_REG, 38, 4, 0, Mips64},
-
-	{AWORD, C_LCON, C_NONE, C_NONE, 40, 4, 0, 0},
-
-	{AMOVW, C_REG, C_NONE, C_FCREG, 41, 8, 0, 0},
-	{AMOVV, C_REG, C_NONE, C_FCREG, 41, 8, 0, Mips64},
-	{AMOVW, C_FCREG, C_NONE, C_REG, 42, 4, 0, 0},
-	{AMOVV, C_FCREG, C_NONE, C_REG, 42, 4, 0, Mips64},
-
-	{ATEQ, C_SCON, C_REG, C_REG, 15, 4, 0, 0},
-	{ATEQ, C_SCON, C_NONE, C_REG, 15, 4, 0, 0},
-	{ACMOVT, C_REG, C_NONE, C_REG, 17, 4, 0, 0},
-
-	{ABREAK, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, Mips64}, /* really CACHE instruction */
-	{ABREAK, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, Mips64},
-	{ABREAK, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, Mips64},
-	{ABREAK, C_NONE, C_NONE, C_NONE, 5, 4, 0, 0},
-
-	{obj.AUNDEF, C_NONE, C_NONE, C_NONE, 49, 4, 0, 0},
-	{obj.AUSEFIELD, C_ADDR, C_NONE, C_NONE, 0, 0, 0, 0},
-	{obj.APCDATA, C_LCON, C_NONE, C_LCON, 0, 0, 0, 0},
-	{obj.AFUNCDATA, C_SCON, C_NONE, C_ADDR, 0, 0, 0, 0},
-	{obj.ANOP, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0},
-	{obj.ADUFFZERO, C_NONE, C_NONE, C_LBRA, 11, 4, 0, 0}, // same as AJMP
-	{obj.ADUFFCOPY, C_NONE, C_NONE, C_LBRA, 11, 4, 0, 0}, // same as AJMP
-
-	{obj.AXXX, C_NONE, C_NONE, C_NONE, 0, 4, 0, 0},
-}
-
-var oprange [ALAST & obj.AMask][]Optab
-
-var xcmp [C_NCLASS][C_NCLASS]bool
-
-func span0(ctxt *obj.Link, cursym *obj.LSym) {
-	p := cursym.Text
-	if p == nil || p.Link == nil { // handle external functions and ELF section symbols
-		return
-	}
-	ctxt.Cursym = cursym
-	ctxt.Autosize = int32(p.To.Offset + ctxt.FixedFrameSize())
-
-	if oprange[AOR&obj.AMask] == nil {
-		buildop(ctxt)
-	}
-
-	c := int64(0)
-	p.Pc = c
-
-	var m int
-	var o *Optab
-	for p = p.Link; p != nil; p = p.Link {
-		ctxt.Curp = p
-		p.Pc = c
-		o = oplook(ctxt, p)
-		m = int(o.size)
-		if m == 0 {
-			if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA && p.As != obj.AUSEFIELD {
-				ctxt.Diag("zero-width instruction\n%v", p)
-			}
-			continue
-		}
-
-		c += int64(m)
-	}
-
-	cursym.Size = c
-
-	/*
-	 * if any procedure is large enough to
-	 * generate a large SBRA branch, then
-	 * generate extra passes putting branches
-	 * around jmps to fix. this is rare.
-	 */
-	bflag := 1
-
-	var otxt int64
-	var q *obj.Prog
-	for bflag != 0 {
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f span1\n", obj.Cputime())
-		}
-		bflag = 0
-		c = 0
-		for p = cursym.Text.Link; p != nil; p = p.Link {
-			p.Pc = c
-			o = oplook(ctxt, p)
-
-			// very large conditional branches
-			if o.type_ == 6 && p.Pcond != nil {
-				otxt = p.Pcond.Pc - c
-				if otxt < -(1<<17)+10 || otxt >= (1<<17)-10 {
-					q = ctxt.NewProg()
-					q.Link = p.Link
-					p.Link = q
-					q.As = AJMP
-					q.Lineno = p.Lineno
-					q.To.Type = obj.TYPE_BRANCH
-					q.Pcond = p.Pcond
-					p.Pcond = q
-					q = ctxt.NewProg()
-					q.Link = p.Link
-					p.Link = q
-					q.As = AJMP
-					q.Lineno = p.Lineno
-					q.To.Type = obj.TYPE_BRANCH
-					q.Pcond = q.Link.Link
-
-					addnop(ctxt, p.Link)
-					addnop(ctxt, p)
-					bflag = 1
-				}
-			}
-
-			m = int(o.size)
-			if m == 0 {
-				if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA && p.As != obj.AUSEFIELD {
-					ctxt.Diag("zero-width instruction\n%v", p)
-				}
-				continue
-			}
-
-			c += int64(m)
-		}
-
-		cursym.Size = c
-	}
-	if ctxt.Mode&Mips64 != 0 {
-		c += -c & (mips64FuncAlign - 1)
-	}
-	cursym.Size = c
-
-	/*
-	 * lay out the code, emitting code and data relocations.
-	 */
-
-	cursym.Grow(cursym.Size)
-
-	bp := cursym.P
-	var i int32
-	var out [4]uint32
-	for p := cursym.Text.Link; p != nil; p = p.Link {
-		ctxt.Pc = p.Pc
-		ctxt.Curp = p
-		o = oplook(ctxt, p)
-		if int(o.size) > 4*len(out) {
-			log.Fatalf("out array in span0 is too small, need at least %d for %v", o.size/4, p)
-		}
-		asmout(ctxt, p, o, out[:])
-		for i = 0; i < int32(o.size/4); i++ {
-			ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
-			bp = bp[4:]
-		}
-	}
-}
-
-func isint32(v int64) bool {
-	return int64(int32(v)) == v
-}
-
-func isuint32(v uint64) bool {
-	return uint64(uint32(v)) == v
-}
-
-func aclass(ctxt *obj.Link, a *obj.Addr) int {
-	switch a.Type {
-	case obj.TYPE_NONE:
-		return C_NONE
-
-	case obj.TYPE_REG:
-		if REG_R0 <= a.Reg && a.Reg <= REG_R31 {
-			return C_REG
-		}
-		if REG_F0 <= a.Reg && a.Reg <= REG_F31 {
-			return C_FREG
-		}
-		if REG_M0 <= a.Reg && a.Reg <= REG_M31 {
-			return C_MREG
-		}
-		if REG_FCR0 <= a.Reg && a.Reg <= REG_FCR31 {
-			return C_FCREG
-		}
-		if a.Reg == REG_LO {
-			return C_LO
-		}
-		if a.Reg == REG_HI {
-			return C_HI
-		}
-		return C_GOK
-
-	case obj.TYPE_MEM:
-		switch a.Name {
-		case obj.NAME_EXTERN,
-			obj.NAME_STATIC:
-			if a.Sym == nil {
-				break
-			}
-			ctxt.Instoffset = a.Offset
-			if a.Sym != nil { // use relocation
-				if a.Sym.Type == obj.STLSBSS {
-					return C_TLS
-				}
-				return C_ADDR
-			}
-			return C_LEXT
-
-		case obj.NAME_AUTO:
-			ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
-			if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
-				return C_SAUTO
-			}
-			return C_LAUTO
-
-		case obj.NAME_PARAM:
-			ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + ctxt.FixedFrameSize()
-			if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
-				return C_SAUTO
-			}
-			return C_LAUTO
-
-		case obj.NAME_NONE:
-			ctxt.Instoffset = a.Offset
-			if ctxt.Instoffset == 0 {
-				return C_ZOREG
-			}
-			if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
-				return C_SOREG
-			}
-			return C_LOREG
-		}
-
-		return C_GOK
-
-	case obj.TYPE_TEXTSIZE:
-		return C_TEXTSIZE
-
-	case obj.TYPE_CONST,
-		obj.TYPE_ADDR:
-		switch a.Name {
-		case obj.NAME_NONE:
-			ctxt.Instoffset = a.Offset
-			if a.Reg != 0 {
-				if -BIG <= ctxt.Instoffset && ctxt.Instoffset <= BIG {
-					return C_SACON
-				}
-				if isint32(ctxt.Instoffset) {
-					return C_LACON
-				}
-				return C_DACON
-			}
-
-			goto consize
-
-		case obj.NAME_EXTERN,
-			obj.NAME_STATIC:
-			s := a.Sym
-			if s == nil {
-				break
-			}
-			if s.Type == obj.SCONST {
-				ctxt.Instoffset = a.Offset
-				goto consize
-			}
-
-			ctxt.Instoffset = a.Offset
-			if s.Type == obj.STLSBSS {
-				return C_STCON // address of TLS variable
-			}
-			return C_LECON
-
-		case obj.NAME_AUTO:
-			ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
-			if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
-				return C_SACON
-			}
-			return C_LACON
-
-		case obj.NAME_PARAM:
-			ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + ctxt.FixedFrameSize()
-			if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
-				return C_SACON
-			}
-			return C_LACON
-		}
-
-		return C_GOK
-
-	consize:
-		if ctxt.Instoffset >= 0 {
-			if ctxt.Instoffset == 0 {
-				return C_ZCON
-			}
-			if ctxt.Instoffset <= 0x7fff {
-				return C_SCON
-			}
-			if ctxt.Instoffset <= 0xffff {
-				return C_ANDCON
-			}
-			if ctxt.Instoffset&0xffff == 0 && isuint32(uint64(ctxt.Instoffset)) { /* && (instoffset & (1<<31)) == 0) */
-				return C_UCON
-			}
-			if isint32(ctxt.Instoffset) || isuint32(uint64(ctxt.Instoffset)) {
-				return C_LCON
-			}
-			return C_LCON // C_DCON
-		}
-
-		if ctxt.Instoffset >= -0x8000 {
-			return C_ADDCON
-		}
-		if ctxt.Instoffset&0xffff == 0 && isint32(ctxt.Instoffset) {
-			return C_UCON
-		}
-		if isint32(ctxt.Instoffset) {
-			return C_LCON
-		}
-		return C_LCON // C_DCON
-
-	case obj.TYPE_BRANCH:
-		return C_SBRA
-	}
-
-	return C_GOK
-}
-
-func prasm(p *obj.Prog) {
-	fmt.Printf("%v\n", p)
-}
-
-func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
-	if oprange[AOR&obj.AMask] == nil {
-		buildop(ctxt)
-	}
-
-	a1 := int(p.Optab)
-	if a1 != 0 {
-		return &optab[a1-1]
-	}
-	a1 = int(p.From.Class)
-	if a1 == 0 {
-		a1 = aclass(ctxt, &p.From) + 1
-		p.From.Class = int8(a1)
-	}
-
-	a1--
-	a3 := int(p.To.Class)
-	if a3 == 0 {
-		a3 = aclass(ctxt, &p.To) + 1
-		p.To.Class = int8(a3)
-	}
-
-	a3--
-	a2 := C_NONE
-	if p.Reg != 0 {
-		a2 = C_REG
-	}
-
-	//print("oplook %P %d %d %d\n", p, a1, a2, a3);
-
-	ops := oprange[p.As&obj.AMask]
-	c1 := &xcmp[a1]
-	c3 := &xcmp[a3]
-	for i := range ops {
-		op := &ops[i]
-		if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] && (ctxt.Mode&op.mode == op.mode) {
-			p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
-			return op
-		}
-	}
-
-	ctxt.Diag("illegal combination %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3))
-	prasm(p)
-	if ops == nil {
-		ops = optab
-	}
-	return &ops[0]
-}
-
-func cmp(a int, b int) bool {
-	if a == b {
-		return true
-	}
-	switch a {
-	case C_LCON:
-		if b == C_ZCON || b == C_SCON || b == C_UCON || b == C_ADDCON || b == C_ANDCON {
-			return true
-		}
-
-	case C_ADD0CON:
-		if b == C_ADDCON {
-			return true
-		}
-		fallthrough
-
-	case C_ADDCON:
-		if b == C_ZCON || b == C_SCON {
-			return true
-		}
-
-	case C_AND0CON:
-		if b == C_ANDCON {
-			return true
-		}
-		fallthrough
-
-	case C_ANDCON:
-		if b == C_ZCON || b == C_SCON {
-			return true
-		}
-
-	case C_UCON:
-		if b == C_ZCON {
-			return true
-		}
-
-	case C_SCON:
-		if b == C_ZCON {
-			return true
-		}
-
-	case C_LACON:
-		if b == C_SACON {
-			return true
-		}
-
-	case C_LBRA:
-		if b == C_SBRA {
-			return true
-		}
-
-	case C_LEXT:
-		if b == C_SEXT {
-			return true
-		}
-
-	case C_LAUTO:
-		if b == C_SAUTO {
-			return true
-		}
-
-	case C_REG:
-		if b == C_ZCON {
-			return r0iszero != 0 /*TypeKind(100016)*/
-		}
-
-	case C_LOREG:
-		if b == C_ZOREG || b == C_SOREG {
-			return true
-		}
-
-	case C_SOREG:
-		if b == C_ZOREG {
-			return true
-		}
-	}
-
-	return false
-}
-
-type ocmp []Optab
-
-func (x ocmp) Len() int {
-	return len(x)
-}
-
-func (x ocmp) Swap(i, j int) {
-	x[i], x[j] = x[j], x[i]
-}
-
-func (x ocmp) Less(i, j int) bool {
-	p1 := &x[i]
-	p2 := &x[j]
-	n := int(p1.as) - int(p2.as)
-	if n != 0 {
-		return n < 0
-	}
-	n = int(p1.a1) - int(p2.a1)
-	if n != 0 {
-		return n < 0
-	}
-	n = int(p1.a2) - int(p2.a2)
-	if n != 0 {
-		return n < 0
-	}
-	n = int(p1.a3) - int(p2.a3)
-	if n != 0 {
-		return n < 0
-	}
-	return false
-}
-
-func opset(a, b0 obj.As) {
-	oprange[a&obj.AMask] = oprange[b0]
-}
-
-func buildop(ctxt *obj.Link) {
-	var n int
-
-	for i := 0; i < C_NCLASS; i++ {
-		for n = 0; n < C_NCLASS; n++ {
-			if cmp(n, i) {
-				xcmp[i][n] = true
-			}
-		}
-	}
-	for n = 0; optab[n].as != obj.AXXX; n++ {
-	}
-	sort.Sort(ocmp(optab[:n]))
-	for i := 0; i < n; i++ {
-		r := optab[i].as
-		r0 := r & obj.AMask
-		start := i
-		for optab[i].as == r {
-			i++
-		}
-		oprange[r0] = optab[start:i]
-		i--
-
-		switch r {
-		default:
-			ctxt.Diag("unknown op in build: %v", r)
-			log.Fatalf("bad code")
-
-		case AABSF:
-			opset(AMOVFD, r0)
-			opset(AMOVDF, r0)
-			opset(AMOVWF, r0)
-			opset(AMOVFW, r0)
-			opset(AMOVWD, r0)
-			opset(AMOVDW, r0)
-			opset(ANEGF, r0)
-			opset(ANEGD, r0)
-			opset(AABSD, r0)
-			opset(ATRUNCDW, r0)
-			opset(ATRUNCFW, r0)
-			opset(ASQRTF, r0)
-			opset(ASQRTD, r0)
-
-		case AMOVVF:
-			opset(AMOVVD, r0)
-			opset(AMOVFV, r0)
-			opset(AMOVDV, r0)
-			opset(ATRUNCDV, r0)
-			opset(ATRUNCFV, r0)
-
-		case AADD:
-			opset(ASGT, r0)
-			opset(ASGTU, r0)
-			opset(AADDU, r0)
-
-		case AADDV:
-			opset(AADDVU, r0)
-
-		case AADDF:
-			opset(ADIVF, r0)
-			opset(ADIVD, r0)
-			opset(AMULF, r0)
-			opset(AMULD, r0)
-			opset(ASUBF, r0)
-			opset(ASUBD, r0)
-			opset(AADDD, r0)
-
-		case AAND:
-			opset(AOR, r0)
-			opset(AXOR, r0)
-
-		case ABEQ:
-			opset(ABNE, r0)
-
-		case ABLEZ:
-			opset(ABGEZ, r0)
-			opset(ABGEZAL, r0)
-			opset(ABLTZ, r0)
-			opset(ABLTZAL, r0)
-			opset(ABGTZ, r0)
-
-		case AMOVB:
-			opset(AMOVH, r0)
-
-		case AMOVBU:
-			opset(AMOVHU, r0)
-
-		case AMUL:
-			opset(AREM, r0)
-			opset(AREMU, r0)
-			opset(ADIVU, r0)
-			opset(AMULU, r0)
-			opset(ADIV, r0)
-
-		case AMULV:
-			opset(ADIVV, r0)
-			opset(ADIVVU, r0)
-			opset(AMULVU, r0)
-			opset(AREMV, r0)
-			opset(AREMVU, r0)
-
-		case ASLL:
-			opset(ASRL, r0)
-			opset(ASRA, r0)
-
-		case ASLLV:
-			opset(ASRAV, r0)
-			opset(ASRLV, r0)
-
-		case ASUB:
-			opset(ASUBU, r0)
-			opset(ANOR, r0)
-
-		case ASUBV:
-			opset(ASUBVU, r0)
-
-		case ASYSCALL:
-			opset(ASYNC, r0)
-			opset(ATLBP, r0)
-			opset(ATLBR, r0)
-			opset(ATLBWI, r0)
-			opset(ATLBWR, r0)
-
-		case ACMPEQF:
-			opset(ACMPGTF, r0)
-			opset(ACMPGTD, r0)
-			opset(ACMPGEF, r0)
-			opset(ACMPGED, r0)
-			opset(ACMPEQD, r0)
-
-		case ABFPT:
-			opset(ABFPF, r0)
-
-		case AMOVWL:
-			opset(AMOVWR, r0)
-
-		case AMOVVL:
-			opset(AMOVVR, r0)
-
-		case AMOVW,
-			AMOVD,
-			AMOVF,
-			AMOVV,
-			ABREAK,
-			ARFE,
-			AJAL,
-			AJMP,
-			AMOVWU,
-			ALL,
-			ASC,
-			AWORD,
-			obj.ANOP,
-			obj.ATEXT,
-			obj.AUNDEF,
-			obj.AUSEFIELD,
-			obj.AFUNCDATA,
-			obj.APCDATA,
-			obj.ADUFFZERO,
-			obj.ADUFFCOPY:
-			break
-
-		case ACMOVN:
-			opset(ACMOVZ, r0)
-
-		case ACMOVT:
-			opset(ACMOVF, r0)
-
-		case ACLO:
-			opset(ACLZ, r0)
-
-		case ATEQ:
-			opset(ATNE, r0)
-		}
-	}
-}
-
-func OP(x uint32, y uint32) uint32 {
-	return x<<3 | y<<0
-}
-
-func SP(x uint32, y uint32) uint32 {
-	return x<<29 | y<<26
-}
-
-func BCOND(x uint32, y uint32) uint32 {
-	return x<<19 | y<<16
-}
-
-func MMU(x uint32, y uint32) uint32 {
-	return SP(2, 0) | 16<<21 | x<<3 | y<<0
-}
-
-func FPF(x uint32, y uint32) uint32 {
-	return SP(2, 1) | 16<<21 | x<<3 | y<<0
-}
-
-func FPD(x uint32, y uint32) uint32 {
-	return SP(2, 1) | 17<<21 | x<<3 | y<<0
-}
-
-func FPW(x uint32, y uint32) uint32 {
-	return SP(2, 1) | 20<<21 | x<<3 | y<<0
-}
-
-func FPV(x uint32, y uint32) uint32 {
-	return SP(2, 1) | 21<<21 | x<<3 | y<<0
-}
-
-func OP_RRR(op uint32, r1 uint32, r2 uint32, r3 uint32) uint32 {
-	return op | (r1&31)<<16 | (r2&31)<<21 | (r3&31)<<11
-}
-
-func OP_IRR(op uint32, i uint32, r2 uint32, r3 uint32) uint32 {
-	return op | i&0xFFFF | (r2&31)<<21 | (r3&31)<<16
-}
-
-func OP_SRR(op uint32, s uint32, r2 uint32, r3 uint32) uint32 {
-	return op | (s&31)<<6 | (r2&31)<<16 | (r3&31)<<11
-}
-
-func OP_FRRR(op uint32, r1 uint32, r2 uint32, r3 uint32) uint32 {
-	return op | (r1&31)<<16 | (r2&31)<<11 | (r3&31)<<6
-}
-
-func OP_JMP(op uint32, i uint32) uint32 {
-	return op | i&0x3FFFFFF
-}
-
-func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
-	o1 := uint32(0)
-	o2 := uint32(0)
-	o3 := uint32(0)
-	o4 := uint32(0)
-
-	add := AADDU
-
-	if ctxt.Mode&Mips64 != 0 {
-		add = AADDVU
-	}
-	switch o.type_ {
-	default:
-		ctxt.Diag("unknown type %d %v", o.type_)
-		prasm(p)
-
-	case 0: /* pseudo ops */
-		break
-
-	case 1: /* mov r1,r2 ==> OR r1,r0,r2 */
-		a := AOR
-		if p.As == AMOVW && ctxt.Mode&Mips64 != 0 {
-			a = AADDU // sign-extended to high 32 bits
-		}
-		o1 = OP_RRR(oprrr(ctxt, a), uint32(REGZERO), uint32(p.From.Reg), uint32(p.To.Reg))
-
-	case 2: /* add/sub r1,[r2],r3 */
-		r := int(p.Reg)
-
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-		o1 = OP_RRR(oprrr(ctxt, p.As), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
-
-	case 3: /* mov $soreg, r ==> or/add $i,o,r */
-		v := regoff(ctxt, &p.From)
-
-		r := int(p.From.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		a := add
-		if o.a1 == C_ANDCON {
-			a = AOR
-		}
-
-		o1 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(r), uint32(p.To.Reg))
-
-	case 4: /* add $scon,[r1],r2 */
-		v := regoff(ctxt, &p.From)
-
-		r := int(p.Reg)
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-
-		o1 = OP_IRR(opirr(ctxt, p.As), uint32(v), uint32(r), uint32(p.To.Reg))
-
-	case 5: /* syscall */
-		o1 = oprrr(ctxt, p.As)
-
-	case 6: /* beq r1,[r2],sbra */
-		v := int32(0)
-		if p.Pcond == nil {
-			v = int32(-4) >> 2
-		} else {
-			v = int32(p.Pcond.Pc-p.Pc-4) >> 2
-		}
-		if (v<<16)>>16 != v {
-			ctxt.Diag("short branch too far\n%v", p)
-		}
-		o1 = OP_IRR(opirr(ctxt, p.As), uint32(v), uint32(p.From.Reg), uint32(p.Reg))
-		// for ABFPT and ABFPF only: always fill delay slot with 0
-		// see comments in func preprocess for details.
-		o2 = 0
-
-	case 7: /* mov r, soreg ==> sw o(r) */
-		r := int(p.To.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		v := regoff(ctxt, &p.To)
-		o1 = OP_IRR(opirr(ctxt, p.As), uint32(v), uint32(r), uint32(p.From.Reg))
-
-	case 8: /* mov soreg, r ==> lw o(r) */
-		r := int(p.From.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		v := regoff(ctxt, &p.From)
-		o1 = OP_IRR(opirr(ctxt, -p.As), uint32(v), uint32(r), uint32(p.To.Reg))
-
-	case 9: /* sll r1,[r2],r3 */
-		r := int(p.Reg)
-
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-		o1 = OP_RRR(oprrr(ctxt, p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
-
-	case 10: /* add $con,[r1],r2 ==> mov $con, t; add t,[r1],r2 */
-		v := regoff(ctxt, &p.From)
-		a := AOR
-		if v < 0 {
-			a = AADDU
-		}
-		o1 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(0), uint32(REGTMP))
-		r := int(p.Reg)
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-		o2 = OP_RRR(oprrr(ctxt, p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
-
-	case 11: /* jmp lbra */
-		v := int32(0)
-		if aclass(ctxt, &p.To) == C_SBRA && p.To.Sym == nil && p.As == AJMP {
-			// use PC-relative branch for short branches
-			// BEQ	R0, R0, sbra
-			if p.Pcond == nil {
-				v = int32(-4) >> 2
-			} else {
-				v = int32(p.Pcond.Pc-p.Pc-4) >> 2
-			}
-			if (v<<16)>>16 == v {
-				o1 = OP_IRR(opirr(ctxt, ABEQ), uint32(v), uint32(REGZERO), uint32(REGZERO))
-				break
-			}
-		}
-		if p.Pcond == nil {
-			v = int32(p.Pc) >> 2
-		} else {
-			v = int32(p.Pcond.Pc) >> 2
-		}
-		o1 = OP_JMP(opirr(ctxt, p.As), uint32(v))
-		if p.To.Sym == nil {
-			p.To.Sym = ctxt.Cursym.Text.From.Sym
-			p.To.Offset = p.Pcond.Pc
-		}
-		rel := obj.Addrel(ctxt.Cursym)
-		rel.Off = int32(ctxt.Pc)
-		rel.Siz = 4
-		rel.Sym = p.To.Sym
-		rel.Add = p.To.Offset
-		if p.As == AJAL {
-			rel.Type = obj.R_CALLMIPS
-		} else {
-			rel.Type = obj.R_JMPMIPS
-		}
-
-	case 12: /* movbs r,r */
-		v := 16
-		if p.As == AMOVB {
-			v = 24
-		}
-		o1 = OP_SRR(opirr(ctxt, ASLL), uint32(v), uint32(p.From.Reg), uint32(p.To.Reg))
-		o2 = OP_SRR(opirr(ctxt, ASRA), uint32(v), uint32(p.To.Reg), uint32(p.To.Reg))
-
-	case 13: /* movbu r,r */
-		if p.As == AMOVBU {
-			o1 = OP_IRR(opirr(ctxt, AAND), uint32(0xff), uint32(p.From.Reg), uint32(p.To.Reg))
-		} else {
-			o1 = OP_IRR(opirr(ctxt, AAND), uint32(0xffff), uint32(p.From.Reg), uint32(p.To.Reg))
-		}
-
-	case 14: /* movwu r,r */
-		o1 = OP_SRR(opirr(ctxt, -ASLLV), uint32(0), uint32(p.From.Reg), uint32(p.To.Reg))
-		o2 = OP_SRR(opirr(ctxt, -ASRLV), uint32(0), uint32(p.To.Reg), uint32(p.To.Reg))
-
-	case 15: /* teq $c r,r */
-		v := regoff(ctxt, &p.From)
-		r := int(p.Reg)
-		if r == 0 {
-			r = REGZERO
-		}
-		/* only use 10 bits of trap code */
-		o1 = OP_IRR(opirr(ctxt, p.As), (uint32(v)&0x3FF)<<6, uint32(p.Reg), uint32(p.To.Reg))
-
-	case 16: /* sll $c,[r1],r2 */
-		v := regoff(ctxt, &p.From)
-		r := int(p.Reg)
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-
-		/* OP_SRR will use only the low 5 bits of the shift value */
-		if v >= 32 && vshift(p.As) {
-			o1 = OP_SRR(opirr(ctxt, -p.As), uint32(v-32), uint32(r), uint32(p.To.Reg))
-		} else {
-			o1 = OP_SRR(opirr(ctxt, p.As), uint32(v), uint32(r), uint32(p.To.Reg))
-		}
-
-	case 17:
-		o1 = OP_RRR(oprrr(ctxt, p.As), uint32(REGZERO), uint32(p.From.Reg), uint32(p.To.Reg))
-
-	case 18: /* jmp [r1],0(r2) */
-		r := int(p.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		o1 = OP_RRR(oprrr(ctxt, p.As), uint32(0), uint32(p.To.Reg), uint32(r))
-		rel := obj.Addrel(ctxt.Cursym)
-		rel.Off = int32(ctxt.Pc)
-		rel.Siz = 0
-		rel.Type = obj.R_CALLIND
-
-	case 19: /* mov $lcon,r ==> lu+or */
-		v := regoff(ctxt, &p.From)
-		o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(p.To.Reg))
-		o2 = OP_IRR(opirr(ctxt, AOR), uint32(v), uint32(p.To.Reg), uint32(p.To.Reg))
-
-	case 20: /* mov lo/hi,r */
-		a := OP(2, 0) /* mfhi */
-		if p.From.Reg == REG_LO {
-			a = OP(2, 2) /* mflo */
-		}
-		o1 = OP_RRR(a, uint32(REGZERO), uint32(REGZERO), uint32(p.To.Reg))
-
-	case 21: /* mov r,lo/hi */
-		a := OP(2, 1) /* mthi */
-		if p.To.Reg == REG_LO {
-			a = OP(2, 3) /* mtlo */
-		}
-		o1 = OP_RRR(a, uint32(REGZERO), uint32(p.From.Reg), uint32(REGZERO))
-
-	case 22: /* mul r1,r2 [r3]*/
-		if p.To.Reg != 0 {
-			r := int(p.Reg)
-			if r == 0 {
-				r = int(p.To.Reg)
-			}
-			a := SP(3, 4) | 2 /* mul */
-			o1 = OP_RRR(a, uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
-		} else {
-			o1 = OP_RRR(oprrr(ctxt, p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(REGZERO))
-		}
-
-	case 23: /* add $lcon,r1,r2 ==> lu+or+add */
-		v := regoff(ctxt, &p.From)
-		o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
-		o2 = OP_IRR(opirr(ctxt, AOR), uint32(v), uint32(REGTMP), uint32(REGTMP))
-		r := int(p.Reg)
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-		o3 = OP_RRR(oprrr(ctxt, p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
-
-	case 24: /* mov $ucon,r ==> lu r */
-		v := regoff(ctxt, &p.From)
-		o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(p.To.Reg))
-
-	case 25: /* add/and $ucon,[r1],r2 ==> lu $con,t; add t,[r1],r2 */
-		v := regoff(ctxt, &p.From)
-		o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
-		r := int(p.Reg)
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-		o2 = OP_RRR(oprrr(ctxt, p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
-
-	case 26: /* mov $lsext/auto/oreg,r ==> lu+or+add */
-		v := regoff(ctxt, &p.From)
-		o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
-		o2 = OP_IRR(opirr(ctxt, AOR), uint32(v), uint32(REGTMP), uint32(REGTMP))
-		r := int(p.From.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		o3 = OP_RRR(oprrr(ctxt, add), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
-
-	case 27: /* mov [sl]ext/auto/oreg,fr ==> lwc1 o(r) */
-		v := regoff(ctxt, &p.From)
-		r := int(p.From.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		a := -AMOVF
-		if p.As == AMOVD {
-			a = -AMOVD
-		}
-		switch o.size {
-		case 12:
-			o1 = OP_IRR(opirr(ctxt, ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP))
-			o2 = OP_RRR(oprrr(ctxt, add), uint32(r), uint32(REGTMP), uint32(REGTMP))
-			o3 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(REGTMP), uint32(p.To.Reg))
-
-		case 4:
-			o1 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(r), uint32(p.To.Reg))
-		}
-
-	case 28: /* mov fr,[sl]ext/auto/oreg ==> swc1 o(r) */
-		v := regoff(ctxt, &p.To)
-		r := int(p.To.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		a := AMOVF
-		if p.As == AMOVD {
-			a = AMOVD
-		}
-		switch o.size {
-		case 12:
-			o1 = OP_IRR(opirr(ctxt, ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP))
-			o2 = OP_RRR(oprrr(ctxt, add), uint32(r), uint32(REGTMP), uint32(REGTMP))
-			o3 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(REGTMP), uint32(p.From.Reg))
-
-		case 4:
-			o1 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(r), uint32(p.From.Reg))
-		}
-
-	case 30: /* movw r,fr */
-		a := SP(2, 1) | (4 << 21) /* mtc1 */
-		o1 = OP_RRR(a, uint32(p.From.Reg), uint32(0), uint32(p.To.Reg))
-
-	case 31: /* movw fr,r */
-		a := SP(2, 1) | (0 << 21) /* mtc1 */
-		o1 = OP_RRR(a, uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
-
-	case 32: /* fadd fr1,[fr2],fr3 */
-		r := int(p.Reg)
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-		o1 = OP_FRRR(oprrr(ctxt, p.As), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
-
-	case 33: /* fabs fr1, fr3 */
-		o1 = OP_FRRR(oprrr(ctxt, p.As), uint32(0), uint32(p.From.Reg), uint32(p.To.Reg))
-
-	case 34: /* mov $con,fr ==> or/add $i,t; mov t,fr */
-		v := regoff(ctxt, &p.From)
-		a := AADDU
-		if o.a1 == C_ANDCON {
-			a = AOR
-		}
-		o1 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(0), uint32(REGTMP))
-		o2 = OP_RRR(SP(2, 1)|(4<<21), uint32(REGTMP), uint32(0), uint32(p.To.Reg)) /* mtc1 */
-
-	case 35: /* mov r,lext/auto/oreg ==> sw o(REGTMP) */
-		v := regoff(ctxt, &p.To)
-		r := int(p.To.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		o1 = OP_IRR(opirr(ctxt, ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP))
-		o2 = OP_RRR(oprrr(ctxt, add), uint32(r), uint32(REGTMP), uint32(REGTMP))
-		o3 = OP_IRR(opirr(ctxt, p.As), uint32(v), uint32(REGTMP), uint32(p.From.Reg))
-
-	case 36: /* mov lext/auto/oreg,r ==> lw o(REGTMP) */
-		v := regoff(ctxt, &p.From)
-		r := int(p.From.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		o1 = OP_IRR(opirr(ctxt, ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP))
-		o2 = OP_RRR(oprrr(ctxt, add), uint32(r), uint32(REGTMP), uint32(REGTMP))
-		o3 = OP_IRR(opirr(ctxt, -p.As), uint32(v), uint32(REGTMP), uint32(p.To.Reg))
-
-	case 37: /* movw r,mr */
-		a := SP(2, 0) | (4 << 21) /* mtc0 */
-		if p.As == AMOVV {
-			a = SP(2, 0) | (5 << 21) /* dmtc0 */
-		}
-		o1 = OP_RRR(a, uint32(p.From.Reg), uint32(0), uint32(p.To.Reg))
-
-	case 38: /* movw mr,r */
-		a := SP(2, 0) | (0 << 21) /* mfc0 */
-		if p.As == AMOVV {
-			a = SP(2, 0) | (1 << 21) /* dmfc0 */
-		}
-		o1 = OP_RRR(a, uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
-
-	case 40: /* word */
-		o1 = uint32(regoff(ctxt, &p.From))
-
-	case 41: /* movw f,fcr */
-		o1 = OP_RRR(SP(2, 1)|(2<<21), uint32(REGZERO), uint32(0), uint32(p.To.Reg))    /* mfcc1 */
-		o2 = OP_RRR(SP(2, 1)|(6<<21), uint32(p.From.Reg), uint32(0), uint32(p.To.Reg)) /* mtcc1 */
-
-	case 42: /* movw fcr,r */
-		o1 = OP_RRR(SP(2, 1)|(2<<21), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg)) /* mfcc1 */
-
-	case 47: /* movv r,fr */
-		a := SP(2, 1) | (5 << 21) /* dmtc1 */
-		o1 = OP_RRR(a, uint32(p.From.Reg), uint32(0), uint32(p.To.Reg))
-
-	case 48: /* movv fr,r */
-		a := SP(2, 1) | (1 << 21) /* dmtc1 */
-		o1 = OP_RRR(a, uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
-
-	case 49: /* undef */
-		o1 = 52 /* trap -- teq r0, r0 */
-
-	/* relocation operations */
-	case 50: /* mov r,addr ==> lu + add REGSB, REGTMP + sw o(REGTMP) */
-		o1 = OP_IRR(opirr(ctxt, ALUI), uint32(0), uint32(REGZERO), uint32(REGTMP))
-		rel := obj.Addrel(ctxt.Cursym)
-		rel.Off = int32(ctxt.Pc)
-		rel.Siz = 4
-		rel.Sym = p.To.Sym
-		rel.Add = p.To.Offset
-		rel.Type = obj.R_ADDRMIPSU
-		o2 = OP_IRR(opirr(ctxt, p.As), uint32(0), uint32(REGTMP), uint32(p.From.Reg))
-		rel2 := obj.Addrel(ctxt.Cursym)
-		rel2.Off = int32(ctxt.Pc + 4)
-		rel2.Siz = 4
-		rel2.Sym = p.To.Sym
-		rel2.Add = p.To.Offset
-		rel2.Type = obj.R_ADDRMIPS
-
-		if o.size == 12 {
-			o3 = o2
-			o2 = OP_RRR(oprrr(ctxt, AADDVU), uint32(REGSB), uint32(REGTMP), uint32(REGTMP))
-			rel2.Off += 4
-		}
-
-	case 51: /* mov addr,r ==> lu + add REGSB, REGTMP + lw o(REGTMP) */
-		o1 = OP_IRR(opirr(ctxt, ALUI), uint32(0), uint32(REGZERO), uint32(REGTMP))
-		rel := obj.Addrel(ctxt.Cursym)
-		rel.Off = int32(ctxt.Pc)
-		rel.Siz = 4
-		rel.Sym = p.From.Sym
-		rel.Add = p.From.Offset
-		rel.Type = obj.R_ADDRMIPSU
-		o2 = OP_IRR(opirr(ctxt, -p.As), uint32(0), uint32(REGTMP), uint32(p.To.Reg))
-		rel2 := obj.Addrel(ctxt.Cursym)
-		rel2.Off = int32(ctxt.Pc + 4)
-		rel2.Siz = 4
-		rel2.Sym = p.From.Sym
-		rel2.Add = p.From.Offset
-		rel2.Type = obj.R_ADDRMIPS
-
-		if o.size == 12 {
-			o3 = o2
-			o2 = OP_RRR(oprrr(ctxt, AADDVU), uint32(REGSB), uint32(REGTMP), uint32(REGTMP))
-			rel2.Off += 4
-		}
-
-	case 52: /* mov $lext, r ==> lu + add REGSB, r + add */
-		o1 = OP_IRR(opirr(ctxt, ALUI), uint32(0), uint32(REGZERO), uint32(p.To.Reg))
-		rel := obj.Addrel(ctxt.Cursym)
-		rel.Off = int32(ctxt.Pc)
-		rel.Siz = 4
-		rel.Sym = p.From.Sym
-		rel.Add = p.From.Offset
-		rel.Type = obj.R_ADDRMIPSU
-		o2 = OP_IRR(opirr(ctxt, add), uint32(0), uint32(p.To.Reg), uint32(p.To.Reg))
-		rel2 := obj.Addrel(ctxt.Cursym)
-		rel2.Off = int32(ctxt.Pc + 4)
-		rel2.Siz = 4
-		rel2.Sym = p.From.Sym
-		rel2.Add = p.From.Offset
-		rel2.Type = obj.R_ADDRMIPS
-
-		if o.size == 12 {
-			o3 = o2
-			o2 = OP_RRR(oprrr(ctxt, AADDVU), uint32(REGSB), uint32(p.To.Reg), uint32(p.To.Reg))
-			rel2.Off += 4
-		}
-
-	case 53: /* mov r, tlsvar ==> rdhwr + sw o(r3) */
-		// clobbers R3 !
-		// load thread pointer with RDHWR, R3 is used for fast kernel emulation on Linux
-		o1 = (037<<26 + 073) | (29 << 11) | (3 << 16) // rdhwr $29, r3
-		o2 = OP_IRR(opirr(ctxt, p.As), uint32(0), uint32(REG_R3), uint32(p.From.Reg))
-		rel := obj.Addrel(ctxt.Cursym)
-		rel.Off = int32(ctxt.Pc + 4)
-		rel.Siz = 4
-		rel.Sym = p.To.Sym
-		rel.Add = p.To.Offset
-		rel.Type = obj.R_ADDRMIPSTLS
-
-	case 54: /* mov tlsvar, r ==> rdhwr + lw o(r3) */
-		// clobbers R3 !
-		o1 = (037<<26 + 073) | (29 << 11) | (3 << 16) // rdhwr $29, r3
-		o2 = OP_IRR(opirr(ctxt, -p.As), uint32(0), uint32(REG_R3), uint32(p.To.Reg))
-		rel := obj.Addrel(ctxt.Cursym)
-		rel.Off = int32(ctxt.Pc + 4)
-		rel.Siz = 4
-		rel.Sym = p.From.Sym
-		rel.Add = p.From.Offset
-		rel.Type = obj.R_ADDRMIPSTLS
-
-	case 55: /* mov $tlsvar, r ==> rdhwr + add */
-		// clobbers R3 !
-		o1 = (037<<26 + 073) | (29 << 11) | (3 << 16) // rdhwr $29, r3
-		o2 = OP_IRR(opirr(ctxt, add), uint32(0), uint32(REG_R3), uint32(p.To.Reg))
-		rel := obj.Addrel(ctxt.Cursym)
-		rel.Off = int32(ctxt.Pc + 4)
-		rel.Siz = 4
-		rel.Sym = p.From.Sym
-		rel.Add = p.From.Offset
-		rel.Type = obj.R_ADDRMIPSTLS
-	}
-
-	out[0] = o1
-	out[1] = o2
-	out[2] = o3
-	out[3] = o4
-	return
-}
-
-func vregoff(ctxt *obj.Link, a *obj.Addr) int64 {
-	ctxt.Instoffset = 0
-	aclass(ctxt, a)
-	return ctxt.Instoffset
-}
-
-func regoff(ctxt *obj.Link, a *obj.Addr) int32 {
-	return int32(vregoff(ctxt, a))
-}
-
-func oprrr(ctxt *obj.Link, a obj.As) uint32 {
-	switch a {
-	case AADD:
-		return OP(4, 0)
-	case AADDU:
-		return OP(4, 1)
-	case ASGT:
-		return OP(5, 2)
-	case ASGTU:
-		return OP(5, 3)
-	case AAND:
-		return OP(4, 4)
-	case AOR:
-		return OP(4, 5)
-	case AXOR:
-		return OP(4, 6)
-	case ASUB:
-		return OP(4, 2)
-	case ASUBU:
-		return OP(4, 3)
-	case ANOR:
-		return OP(4, 7)
-	case ASLL:
-		return OP(0, 4)
-	case ASRL:
-		return OP(0, 6)
-	case ASRA:
-		return OP(0, 7)
-	case ASLLV:
-		return OP(2, 4)
-	case ASRLV:
-		return OP(2, 6)
-	case ASRAV:
-		return OP(2, 7)
-	case AADDV:
-		return OP(5, 4)
-	case AADDVU:
-		return OP(5, 5)
-	case ASUBV:
-		return OP(5, 6)
-	case ASUBVU:
-		return OP(5, 7)
-	case AREM,
-		ADIV:
-		return OP(3, 2)
-	case AREMU,
-		ADIVU:
-		return OP(3, 3)
-	case AMUL:
-		return OP(3, 0)
-	case AMULU:
-		return OP(3, 1)
-	case AREMV,
-		ADIVV:
-		return OP(3, 6)
-	case AREMVU,
-		ADIVVU:
-		return OP(3, 7)
-	case AMULV:
-		return OP(3, 4)
-	case AMULVU:
-		return OP(3, 5)
-
-	case AJMP:
-		return OP(1, 0)
-	case AJAL:
-		return OP(1, 1)
-
-	case ABREAK:
-		return OP(1, 5)
-	case ASYSCALL:
-		return OP(1, 4)
-	case ATLBP:
-		return MMU(1, 0)
-	case ATLBR:
-		return MMU(0, 1)
-	case ATLBWI:
-		return MMU(0, 2)
-	case ATLBWR:
-		return MMU(0, 6)
-	case ARFE:
-		return MMU(2, 0)
-
-	case ADIVF:
-		return FPF(0, 3)
-	case ADIVD:
-		return FPD(0, 3)
-	case AMULF:
-		return FPF(0, 2)
-	case AMULD:
-		return FPD(0, 2)
-	case ASUBF:
-		return FPF(0, 1)
-	case ASUBD:
-		return FPD(0, 1)
-	case AADDF:
-		return FPF(0, 0)
-	case AADDD:
-		return FPD(0, 0)
-	case ATRUNCFV:
-		return FPF(1, 1)
-	case ATRUNCDV:
-		return FPD(1, 1)
-	case ATRUNCFW:
-		return FPF(1, 5)
-	case ATRUNCDW:
-		return FPD(1, 5)
-	case AMOVFV:
-		return FPF(4, 5)
-	case AMOVDV:
-		return FPD(4, 5)
-	case AMOVVF:
-		return FPV(4, 0)
-	case AMOVVD:
-		return FPV(4, 1)
-	case AMOVFW:
-		return FPF(4, 4)
-	case AMOVDW:
-		return FPD(4, 4)
-	case AMOVWF:
-		return FPW(4, 0)
-	case AMOVDF:
-		return FPD(4, 0)
-	case AMOVWD:
-		return FPW(4, 1)
-	case AMOVFD:
-		return FPF(4, 1)
-	case AABSF:
-		return FPF(0, 5)
-	case AABSD:
-		return FPD(0, 5)
-	case AMOVF:
-		return FPF(0, 6)
-	case AMOVD:
-		return FPD(0, 6)
-	case ANEGF:
-		return FPF(0, 7)
-	case ANEGD:
-		return FPD(0, 7)
-	case ACMPEQF:
-		return FPF(6, 2)
-	case ACMPEQD:
-		return FPD(6, 2)
-	case ACMPGTF:
-		return FPF(7, 4)
-	case ACMPGTD:
-		return FPD(7, 4)
-	case ACMPGEF:
-		return FPF(7, 6)
-	case ACMPGED:
-		return FPD(7, 6)
-
-	case ASQRTF:
-		return FPF(0, 4)
-	case ASQRTD:
-		return FPD(0, 4)
-
-	case ASYNC:
-		return OP(1, 7)
-
-	case ACMOVN:
-		return OP(1, 3)
-	case ACMOVZ:
-		return OP(1, 2)
-	case ACMOVT:
-		return OP(0, 1) | (1 << 16)
-	case ACMOVF:
-		return OP(0, 1) | (0 << 16)
-	case ACLO:
-		return SP(3, 4) | OP(4, 1)
-	case ACLZ:
-		return SP(3, 4) | OP(4, 0)
-	}
-
-	if a < 0 {
-		ctxt.Diag("bad rrr opcode -%v", -a)
-	} else {
-		ctxt.Diag("bad rrr opcode %v", a)
-	}
-	return 0
-}
-
-func opirr(ctxt *obj.Link, a obj.As) uint32 {
-	switch a {
-	case AADD:
-		return SP(1, 0)
-	case AADDU:
-		return SP(1, 1)
-	case ASGT:
-		return SP(1, 2)
-	case ASGTU:
-		return SP(1, 3)
-	case AAND:
-		return SP(1, 4)
-	case AOR:
-		return SP(1, 5)
-	case AXOR:
-		return SP(1, 6)
-	case ALUI:
-		return SP(1, 7)
-	case ASLL:
-		return OP(0, 0)
-	case ASRL:
-		return OP(0, 2)
-	case ASRA:
-		return OP(0, 3)
-	case AADDV:
-		return SP(3, 0)
-	case AADDVU:
-		return SP(3, 1)
-
-	case AJMP:
-		return SP(0, 2)
-	case AJAL,
-		obj.ADUFFZERO,
-		obj.ADUFFCOPY:
-		return SP(0, 3)
-	case ABEQ:
-		return SP(0, 4)
-	case -ABEQ:
-		return SP(2, 4) /* likely */
-	case ABNE:
-		return SP(0, 5)
-	case -ABNE:
-		return SP(2, 5) /* likely */
-	case ABGEZ:
-		return SP(0, 1) | BCOND(0, 1)
-	case -ABGEZ:
-		return SP(0, 1) | BCOND(0, 3) /* likely */
-	case ABGEZAL:
-		return SP(0, 1) | BCOND(2, 1)
-	case -ABGEZAL:
-		return SP(0, 1) | BCOND(2, 3) /* likely */
-	case ABGTZ:
-		return SP(0, 7)
-	case -ABGTZ:
-		return SP(2, 7) /* likely */
-	case ABLEZ:
-		return SP(0, 6)
-	case -ABLEZ:
-		return SP(2, 6) /* likely */
-	case ABLTZ:
-		return SP(0, 1) | BCOND(0, 0)
-	case -ABLTZ:
-		return SP(0, 1) | BCOND(0, 2) /* likely */
-	case ABLTZAL:
-		return SP(0, 1) | BCOND(2, 0)
-	case -ABLTZAL:
-		return SP(0, 1) | BCOND(2, 2) /* likely */
-	case ABFPT:
-		return SP(2, 1) | (257 << 16)
-	case -ABFPT:
-		return SP(2, 1) | (259 << 16) /* likely */
-	case ABFPF:
-		return SP(2, 1) | (256 << 16)
-	case -ABFPF:
-		return SP(2, 1) | (258 << 16) /* likely */
-
-	case AMOVB,
-		AMOVBU:
-		return SP(5, 0)
-	case AMOVH,
-		AMOVHU:
-		return SP(5, 1)
-	case AMOVW,
-		AMOVWU:
-		return SP(5, 3)
-	case AMOVV:
-		return SP(7, 7)
-	case AMOVF:
-		return SP(7, 1)
-	case AMOVD:
-		return SP(7, 5)
-	case AMOVWL:
-		return SP(5, 2)
-	case AMOVWR:
-		return SP(5, 6)
-	case AMOVVL:
-		return SP(5, 4)
-	case AMOVVR:
-		return SP(5, 5)
-
-	case ABREAK:
-		return SP(5, 7)
-
-	case -AMOVWL:
-		return SP(4, 2)
-	case -AMOVWR:
-		return SP(4, 6)
-	case -AMOVVL:
-		return SP(3, 2)
-	case -AMOVVR:
-		return SP(3, 3)
-	case -AMOVB:
-		return SP(4, 0)
-	case -AMOVBU:
-		return SP(4, 4)
-	case -AMOVH:
-		return SP(4, 1)
-	case -AMOVHU:
-		return SP(4, 5)
-	case -AMOVW:
-		return SP(4, 3)
-	case -AMOVWU:
-		return SP(4, 7)
-	case -AMOVV:
-		return SP(6, 7)
-	case -AMOVF:
-		return SP(6, 1)
-	case -AMOVD:
-		return SP(6, 5)
-
-	case ASLLV:
-		return OP(7, 0)
-	case ASRLV:
-		return OP(7, 2)
-	case ASRAV:
-		return OP(7, 3)
-	case -ASLLV:
-		return OP(7, 4)
-	case -ASRLV:
-		return OP(7, 6)
-	case -ASRAV:
-		return OP(7, 7)
-
-	case ATEQ:
-		return OP(6, 4)
-	case ATNE:
-		return OP(6, 6)
-	case -ALL:
-		return SP(6, 0)
-	case ASC:
-		return SP(7, 0)
-	}
-
-	if a < 0 {
-		ctxt.Diag("bad irr opcode -%v", -a)
-	} else {
-		ctxt.Diag("bad irr opcode %v", a)
-	}
-	return 0
-}
-
-func vshift(a obj.As) bool {
-	switch a {
-	case ASLLV,
-		ASRLV,
-		ASRAV:
-		return true
-	}
-	return false
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/mips/list0.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/mips/list0.go
deleted file mode 100644
index e36a5c2..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/mips/list0.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/mips/list0.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/mips/list0.go:1
-// cmd/9l/list.c from Vita Nuova.
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package mips
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"fmt"
-)
-
-func init() {
-	obj.RegisterRegister(obj.RBaseMIPS, REG_LAST+1, Rconv)
-	obj.RegisterOpcode(obj.ABaseMIPS, Anames)
-}
-
-func Rconv(r int) string {
-	if r == 0 {
-		return "NONE"
-	}
-	if r == REGG {
-		// Special case.
-		return "g"
-	}
-	if r == REGSB {
-		// Special case.
-		return "RSB"
-	}
-	if REG_R0 <= r && r <= REG_R31 {
-		return fmt.Sprintf("R%d", r-REG_R0)
-	}
-	if REG_F0 <= r && r <= REG_F31 {
-		return fmt.Sprintf("F%d", r-REG_F0)
-	}
-	if REG_M0 <= r && r <= REG_M31 {
-		return fmt.Sprintf("M%d", r-REG_M0)
-	}
-	if REG_FCR0 <= r && r <= REG_FCR31 {
-		return fmt.Sprintf("FCR%d", r-REG_FCR0)
-	}
-	if r == REG_HI {
-		return "HI"
-	}
-	if r == REG_LO {
-		return "LO"
-	}
-
-	return fmt.Sprintf("Rgok(%d)", r-obj.RBaseMIPS)
-}
-
-func DRconv(a int) string {
-	s := "C_??"
-	if a >= C_NONE && a <= C_NCLASS {
-		s = cnames0[a]
-	}
-	var fp string
-	fp += s
-	return fp
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/mips/obj0.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/mips/obj0.go
deleted file mode 100644
index 22e7d71..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/mips/obj0.go
+++ /dev/null
@@ -1,1594 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/mips/obj0.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/mips/obj0.go:1
-// cmd/9l/noop.c, cmd/9l/pass.c, cmd/9l/span.c from Vita Nuova.
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package mips
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"encoding/binary"
-	"fmt"
-	"math"
-)
-
-func progedit(ctxt *obj.Link, p *obj.Prog) {
-	// Maintain information about code generation mode.
-	if ctxt.Mode == 0 {
-		switch ctxt.Arch.Family {
-		default:
-			ctxt.Diag("unsupported arch family")
-		case sys.MIPS:
-			ctxt.Mode = Mips32
-		case sys.MIPS64:
-			ctxt.Mode = Mips64
-		}
-	}
-
-	p.From.Class = 0
-	p.To.Class = 0
-
-	// Rewrite JMP/JAL to symbol as TYPE_BRANCH.
-	switch p.As {
-	case AJMP,
-		AJAL,
-		ARET,
-		obj.ADUFFZERO,
-		obj.ADUFFCOPY:
-		if p.To.Sym != nil {
-			p.To.Type = obj.TYPE_BRANCH
-		}
-	}
-
-	// Rewrite float constants to values stored in memory.
-	switch p.As {
-	case AMOVF:
-		if p.From.Type == obj.TYPE_FCONST {
-			f32 := float32(p.From.Val.(float64))
-			i32 := math.Float32bits(f32)
-			if i32 == 0 {
-				p.As = AMOVW
-				p.From.Type = obj.TYPE_REG
-				p.From.Reg = REGZERO
-				break
-			}
-			literal := fmt.Sprintf("$f32.%08x", i32)
-			s := obj.Linklookup(ctxt, literal, 0)
-			s.Size = 4
-			p.From.Type = obj.TYPE_MEM
-			p.From.Sym = s
-			p.From.Name = obj.NAME_EXTERN
-			p.From.Offset = 0
-		}
-
-	case AMOVD:
-		if p.From.Type == obj.TYPE_FCONST {
-			i64 := math.Float64bits(p.From.Val.(float64))
-			if i64 == 0 && ctxt.Mode&Mips64 != 0 {
-				p.As = AMOVV
-				p.From.Type = obj.TYPE_REG
-				p.From.Reg = REGZERO
-				break
-			}
-			literal := fmt.Sprintf("$f64.%016x", i64)
-			s := obj.Linklookup(ctxt, literal, 0)
-			s.Size = 8
-			p.From.Type = obj.TYPE_MEM
-			p.From.Sym = s
-			p.From.Name = obj.NAME_EXTERN
-			p.From.Offset = 0
-		}
-
-		// Put >32-bit constants in memory and load them
-	case AMOVV:
-		if p.From.Type == obj.TYPE_CONST && p.From.Name == obj.NAME_NONE && p.From.Reg == 0 && int64(int32(p.From.Offset)) != p.From.Offset {
-			literal := fmt.Sprintf("$i64.%016x", uint64(p.From.Offset))
-			s := obj.Linklookup(ctxt, literal, 0)
-			s.Size = 8
-			p.From.Type = obj.TYPE_MEM
-			p.From.Sym = s
-			p.From.Name = obj.NAME_EXTERN
-			p.From.Offset = 0
-		}
-	}
-
-	// Rewrite SUB constants into ADD.
-	switch p.As {
-	case ASUB:
-		if p.From.Type == obj.TYPE_CONST {
-			p.From.Offset = -p.From.Offset
-			p.As = AADD
-		}
-
-	case ASUBU:
-		if p.From.Type == obj.TYPE_CONST {
-			p.From.Offset = -p.From.Offset
-			p.As = AADDU
-		}
-
-	case ASUBV:
-		if p.From.Type == obj.TYPE_CONST {
-			p.From.Offset = -p.From.Offset
-			p.As = AADDV
-		}
-
-	case ASUBVU:
-		if p.From.Type == obj.TYPE_CONST {
-			p.From.Offset = -p.From.Offset
-			p.As = AADDVU
-		}
-	}
-}
-
-func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
-	// TODO(minux): add morestack short-cuts with small fixed frame-size.
-	ctxt.Cursym = cursym
-
-	// a switch for enabling/disabling instruction scheduling
-	nosched := true
-
-	if cursym.Text == nil || cursym.Text.Link == nil {
-		return
-	}
-
-	p := cursym.Text
-	textstksiz := p.To.Offset
-
-	cursym.Args = p.To.Val.(int32)
-	cursym.Locals = int32(textstksiz)
-
-	/*
-	 * find leaf subroutines
-	 * strip NOPs
-	 * expand RET
-	 * expand BECOME pseudo
-	 */
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f noops\n", obj.Cputime())
-	}
-
-	var q *obj.Prog
-	var q1 *obj.Prog
-	for p := cursym.Text; p != nil; p = p.Link {
-		switch p.As {
-		/* too hard, just leave alone */
-		case obj.ATEXT:
-			q = p
-
-			p.Mark |= LABEL | LEAF | SYNC
-			if p.Link != nil {
-				p.Link.Mark |= LABEL
-			}
-
-		/* too hard, just leave alone */
-		case AMOVW,
-			AMOVV:
-			q = p
-			if p.To.Type == obj.TYPE_REG && p.To.Reg >= REG_SPECIAL {
-				p.Mark |= LABEL | SYNC
-				break
-			}
-			if p.From.Type == obj.TYPE_REG && p.From.Reg >= REG_SPECIAL {
-				p.Mark |= LABEL | SYNC
-			}
-
-		/* too hard, just leave alone */
-		case ASYSCALL,
-			AWORD,
-			ATLBWR,
-			ATLBWI,
-			ATLBP,
-			ATLBR:
-			q = p
-			p.Mark |= LABEL | SYNC
-
-		case ANOR:
-			q = p
-			if p.To.Type == obj.TYPE_REG {
-				if p.To.Reg == REGZERO {
-					p.Mark |= LABEL | SYNC
-				}
-			}
-
-		case ABGEZAL,
-			ABLTZAL,
-			AJAL,
-			obj.ADUFFZERO,
-			obj.ADUFFCOPY:
-			cursym.Text.Mark &^= LEAF
-			fallthrough
-
-		case AJMP,
-			ABEQ,
-			ABGEZ,
-			ABGTZ,
-			ABLEZ,
-			ABLTZ,
-			ABNE,
-			ABFPT, ABFPF:
-			if p.As == ABFPT || p.As == ABFPF {
-				// We don't treat ABFPT and ABFPF as branches here,
-				// so that we will always fill nop (0x0) in their
-				// delay slot during assembly.
-				// This is to workaround a kernel FPU emulator bug
-				// where it uses the user stack to simulate the
-				// instruction in the delay slot if it's not 0x0,
-				// and somehow that leads to SIGSEGV when the kernel
-				// jump to the stack.
-				p.Mark |= SYNC
-			} else {
-				p.Mark |= BRANCH
-			}
-			q = p
-			q1 = p.Pcond
-			if q1 != nil {
-				for q1.As == obj.ANOP {
-					q1 = q1.Link
-					p.Pcond = q1
-				}
-
-				if q1.Mark&LEAF == 0 {
-					q1.Mark |= LABEL
-				}
-			}
-			//else {
-			//	p.Mark |= LABEL
-			//}
-			q1 = p.Link
-			if q1 != nil {
-				q1.Mark |= LABEL
-			}
-			continue
-
-		case ARET:
-			q = p
-			if p.Link != nil {
-				p.Link.Mark |= LABEL
-			}
-			continue
-
-		case obj.ANOP:
-			q1 = p.Link
-			q.Link = q1 /* q is non-nop */
-			q1.Mark |= p.Mark
-			continue
-
-		default:
-			q = p
-			continue
-		}
-	}
-
-	var mov, add obj.As
-	if ctxt.Mode&Mips64 != 0 {
-		add = AADDV
-		mov = AMOVV
-	} else {
-		add = AADDU
-		mov = AMOVW
-	}
-
-	autosize := int32(0)
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-	for p := cursym.Text; p != nil; p = p.Link {
-		o := p.As
-		switch o {
-		case obj.ATEXT:
-			autosize = int32(textstksiz + ctxt.FixedFrameSize())
-			if (p.Mark&LEAF != 0) && autosize <= int32(ctxt.FixedFrameSize()) {
-				autosize = 0
-			} else if autosize&4 != 0 && ctxt.Mode&Mips64 != 0 {
-				autosize += 4
-			}
-
-			p.To.Offset = int64(autosize) - ctxt.FixedFrameSize()
-
-			if p.From3.Offset&obj.NOSPLIT == 0 {
-				p = stacksplit(ctxt, p, autosize) // emit split check
-			}
-
-			q = p
-
-			if autosize != 0 {
-				// Make sure to save link register for non-empty frame, even if
-				// it is a leaf function, so that traceback works.
-				// Store link register before decrement SP, so if a signal comes
-				// during the execution of the function prologue, the traceback
-				// code will not see a half-updated stack frame.
-				q = obj.Appendp(ctxt, q)
-				q.As = mov
-				q.Lineno = p.Lineno
-				q.From.Type = obj.TYPE_REG
-				q.From.Reg = REGLINK
-				q.To.Type = obj.TYPE_MEM
-				q.To.Offset = int64(-autosize)
-				q.To.Reg = REGSP
-
-				q = obj.Appendp(ctxt, q)
-				q.As = add
-				q.Lineno = p.Lineno
-				q.From.Type = obj.TYPE_CONST
-				q.From.Offset = int64(-autosize)
-				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REGSP
-				q.Spadj = +autosize
-			} else if cursym.Text.Mark&LEAF == 0 {
-				if cursym.Text.From3.Offset&obj.NOSPLIT != 0 {
-					if ctxt.Debugvlog != 0 {
-						ctxt.Logf("save suppressed in: %s\n", cursym.Name)
-					}
-
-					cursym.Text.Mark |= LEAF
-				}
-			}
-
-			if cursym.Text.Mark&LEAF != 0 {
-				cursym.Set(obj.AttrLeaf, true)
-				break
-			}
-
-			if cursym.Text.From3.Offset&obj.WRAPPER != 0 {
-				// if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
-				//
-				//	MOV	g_panic(g), R1
-				//	BEQ	R1, end
-				//	MOV	panic_argp(R1), R2
-				//	ADD	$(autosize+FIXED_FRAME), R29, R3
-				//	BNE	R2, R3, end
-				//	ADD	$FIXED_FRAME, R29, R2
-				//	MOV	R2, panic_argp(R1)
-				// end:
-				//	NOP
-				//
-				// The NOP is needed to give the jumps somewhere to land.
-				// It is a liblink NOP, not an mips NOP: it encodes to 0 instruction bytes.
-
-				q = obj.Appendp(ctxt, q)
-
-				q.As = mov
-				q.From.Type = obj.TYPE_MEM
-				q.From.Reg = REGG
-				q.From.Offset = 4 * int64(ctxt.Arch.PtrSize) // G.panic
-				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REG_R1
-
-				q = obj.Appendp(ctxt, q)
-				q.As = ABEQ
-				q.From.Type = obj.TYPE_REG
-				q.From.Reg = REG_R1
-				q.To.Type = obj.TYPE_BRANCH
-				q.Mark |= BRANCH
-				p1 = q
-
-				q = obj.Appendp(ctxt, q)
-				q.As = mov
-				q.From.Type = obj.TYPE_MEM
-				q.From.Reg = REG_R1
-				q.From.Offset = 0 // Panic.argp
-				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REG_R2
-
-				q = obj.Appendp(ctxt, q)
-				q.As = add
-				q.From.Type = obj.TYPE_CONST
-				q.From.Offset = int64(autosize) + ctxt.FixedFrameSize()
-				q.Reg = REGSP
-				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REG_R3
-
-				q = obj.Appendp(ctxt, q)
-				q.As = ABNE
-				q.From.Type = obj.TYPE_REG
-				q.From.Reg = REG_R2
-				q.Reg = REG_R3
-				q.To.Type = obj.TYPE_BRANCH
-				q.Mark |= BRANCH
-				p2 = q
-
-				q = obj.Appendp(ctxt, q)
-				q.As = add
-				q.From.Type = obj.TYPE_CONST
-				q.From.Offset = ctxt.FixedFrameSize()
-				q.Reg = REGSP
-				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REG_R2
-
-				q = obj.Appendp(ctxt, q)
-				q.As = mov
-				q.From.Type = obj.TYPE_REG
-				q.From.Reg = REG_R2
-				q.To.Type = obj.TYPE_MEM
-				q.To.Reg = REG_R1
-				q.To.Offset = 0 // Panic.argp
-
-				q = obj.Appendp(ctxt, q)
-
-				q.As = obj.ANOP
-				p1.Pcond = q
-				p2.Pcond = q
-			}
-
-		case ARET:
-			if p.From.Type == obj.TYPE_CONST {
-				ctxt.Diag("using BECOME (%v) is not supported!", p)
-				break
-			}
-
-			retSym := p.To.Sym
-			p.To.Name = obj.NAME_NONE // clear fields as we may modify p to other instruction
-			p.To.Sym = nil
-
-			if cursym.Text.Mark&LEAF != 0 {
-				if autosize == 0 {
-					p.As = AJMP
-					p.From = obj.Addr{}
-					if retSym != nil { // retjmp
-						p.To.Type = obj.TYPE_BRANCH
-						p.To.Name = obj.NAME_EXTERN
-						p.To.Sym = retSym
-					} else {
-						p.To.Type = obj.TYPE_MEM
-						p.To.Reg = REGLINK
-						p.To.Offset = 0
-					}
-					p.Mark |= BRANCH
-					break
-				}
-
-				p.As = add
-				p.From.Type = obj.TYPE_CONST
-				p.From.Offset = int64(autosize)
-				p.To.Type = obj.TYPE_REG
-				p.To.Reg = REGSP
-				p.Spadj = -autosize
-
-				q = ctxt.NewProg()
-				q.As = AJMP
-				q.Lineno = p.Lineno
-				q.To.Type = obj.TYPE_MEM
-				q.To.Offset = 0
-				q.To.Reg = REGLINK
-				q.Mark |= BRANCH
-				q.Spadj = +autosize
-
-				q.Link = p.Link
-				p.Link = q
-				break
-			}
-
-			p.As = mov
-			p.From.Type = obj.TYPE_MEM
-			p.From.Offset = 0
-			p.From.Reg = REGSP
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = REG_R4
-			if retSym != nil { // retjmp from non-leaf, need to restore LINK register
-				p.To.Reg = REGLINK
-			}
-
-			if autosize != 0 {
-				q = ctxt.NewProg()
-				q.As = add
-				q.Lineno = p.Lineno
-				q.From.Type = obj.TYPE_CONST
-				q.From.Offset = int64(autosize)
-				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REGSP
-				q.Spadj = -autosize
-
-				q.Link = p.Link
-				p.Link = q
-			}
-
-			q1 = ctxt.NewProg()
-			q1.As = AJMP
-			q1.Lineno = p.Lineno
-			if retSym != nil { // retjmp
-				q1.To.Type = obj.TYPE_BRANCH
-				q1.To.Name = obj.NAME_EXTERN
-				q1.To.Sym = retSym
-			} else {
-				q1.To.Type = obj.TYPE_MEM
-				q1.To.Offset = 0
-				q1.To.Reg = REG_R4
-			}
-			q1.Mark |= BRANCH
-			q1.Spadj = +autosize
-
-			q1.Link = q.Link
-			q.Link = q1
-
-		case AADD,
-			AADDU,
-			AADDV,
-			AADDVU:
-			if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.From.Type == obj.TYPE_CONST {
-				p.Spadj = int32(-p.From.Offset)
-			}
-		}
-	}
-
-	if ctxt.Mode&Mips32 != 0 {
-		// rewrite MOVD into two MOVF in 32-bit mode to avoid unaligned memory access
-		for p = cursym.Text; p != nil; p = p1 {
-			p1 = p.Link
-
-			if p.As != AMOVD {
-				continue
-			}
-			if p.From.Type != obj.TYPE_MEM && p.To.Type != obj.TYPE_MEM {
-				continue
-			}
-
-			p.As = AMOVF
-			q = ctxt.NewProg()
-			*q = *p
-			q.Link = p.Link
-			p.Link = q
-			p1 = q.Link
-
-			var regOff int16
-			if ctxt.Arch.ByteOrder == binary.BigEndian {
-				regOff = 1 // load odd register first
-			}
-			if p.From.Type == obj.TYPE_MEM {
-				reg := REG_F0 + (p.To.Reg-REG_F0)&^1
-				p.To.Reg = reg + regOff
-				q.To.Reg = reg + 1 - regOff
-				q.From.Offset += 4
-			} else if p.To.Type == obj.TYPE_MEM {
-				reg := REG_F0 + (p.From.Reg-REG_F0)&^1
-				p.From.Reg = reg + regOff
-				q.From.Reg = reg + 1 - regOff
-				q.To.Offset += 4
-			}
-		}
-	}
-
-	if nosched {
-		// if we don't do instruction scheduling, simply add
-		// NOP after each branch instruction.
-		for p = cursym.Text; p != nil; p = p.Link {
-			if p.Mark&BRANCH != 0 {
-				addnop(ctxt, p)
-			}
-		}
-		return
-	}
-
-	// instruction scheduling
-	q = nil          // p - 1
-	q1 = cursym.Text // top of block
-	o := 0           // count of instructions
-	for p = cursym.Text; p != nil; p = p1 {
-		p1 = p.Link
-		o++
-		if p.Mark&NOSCHED != 0 {
-			if q1 != p {
-				sched(ctxt, q1, q)
-			}
-			for ; p != nil; p = p.Link {
-				if p.Mark&NOSCHED == 0 {
-					break
-				}
-				q = p
-			}
-			p1 = p
-			q1 = p
-			o = 0
-			continue
-		}
-		if p.Mark&(LABEL|SYNC) != 0 {
-			if q1 != p {
-				sched(ctxt, q1, q)
-			}
-			q1 = p
-			o = 1
-		}
-		if p.Mark&(BRANCH|SYNC) != 0 {
-			sched(ctxt, q1, p)
-			q1 = p1
-			o = 0
-		}
-		if o >= NSCHED {
-			sched(ctxt, q1, p)
-			q1 = p1
-			o = 0
-		}
-		q = p
-	}
-}
-
-func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32) *obj.Prog {
-	// Leaf function with no frame is effectively NOSPLIT.
-	if framesize == 0 {
-		return p
-	}
-
-	var mov, add, sub obj.As
-
-	if ctxt.Mode&Mips64 != 0 {
-		add = AADDV
-		mov = AMOVV
-		sub = ASUBVU
-	} else {
-		add = AADDU
-		mov = AMOVW
-		sub = ASUBU
-	}
-
-	// MOV	g_stackguard(g), R1
-	p = obj.Appendp(ctxt, p)
-
-	p.As = mov
-	p.From.Type = obj.TYPE_MEM
-	p.From.Reg = REGG
-	p.From.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0
-	if ctxt.Cursym.CFunc() {
-		p.From.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1
-	}
-	p.To.Type = obj.TYPE_REG
-	p.To.Reg = REG_R1
-
-	var q *obj.Prog
-	if framesize <= obj.StackSmall {
-		// small stack: SP < stackguard
-		//	AGTU	SP, stackguard, R1
-		p = obj.Appendp(ctxt, p)
-
-		p.As = ASGTU
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REGSP
-		p.Reg = REG_R1
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R1
-	} else if framesize <= obj.StackBig {
-		// large stack: SP-framesize < stackguard-StackSmall
-		//	ADD	$-framesize, SP, R2
-		//	SGTU	R2, stackguard, R1
-		p = obj.Appendp(ctxt, p)
-
-		p.As = add
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = int64(-framesize)
-		p.Reg = REGSP
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R2
-
-		p = obj.Appendp(ctxt, p)
-		p.As = ASGTU
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_R2
-		p.Reg = REG_R1
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R1
-	} else {
-		// Such a large stack we need to protect against wraparound.
-		// If SP is close to zero:
-		//	SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall)
-		// The +StackGuard on both sides is required to keep the left side positive:
-		// SP is allowed to be slightly below stackguard. See stack.h.
-		//
-		// Preemption sets stackguard to StackPreempt, a very large value.
-		// That breaks the math above, so we have to check for that explicitly.
-		//	// stackguard is R1
-		//	MOV	$StackPreempt, R2
-		//	BEQ	R1, R2, label-of-call-to-morestack
-		//	ADD	$StackGuard, SP, R2
-		//	SUB	R1, R2
-		//	MOV	$(framesize+(StackGuard-StackSmall)), R1
-		//	SGTU	R2, R1, R1
-		p = obj.Appendp(ctxt, p)
-
-		p.As = mov
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = obj.StackPreempt
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R2
-
-		p = obj.Appendp(ctxt, p)
-		q = p
-		p.As = ABEQ
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_R1
-		p.Reg = REG_R2
-		p.To.Type = obj.TYPE_BRANCH
-		p.Mark |= BRANCH
-
-		p = obj.Appendp(ctxt, p)
-		p.As = add
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = obj.StackGuard
-		p.Reg = REGSP
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R2
-
-		p = obj.Appendp(ctxt, p)
-		p.As = sub
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_R1
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R2
-
-		p = obj.Appendp(ctxt, p)
-		p.As = mov
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = int64(framesize) + obj.StackGuard - obj.StackSmall
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R1
-
-		p = obj.Appendp(ctxt, p)
-		p.As = ASGTU
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_R2
-		p.Reg = REG_R1
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R1
-	}
-
-	// q1: BNE	R1, done
-	p = obj.Appendp(ctxt, p)
-	q1 := p
-
-	p.As = ABNE
-	p.From.Type = obj.TYPE_REG
-	p.From.Reg = REG_R1
-	p.To.Type = obj.TYPE_BRANCH
-	p.Mark |= BRANCH
-
-	// MOV	LINK, R3
-	p = obj.Appendp(ctxt, p)
-
-	p.As = mov
-	p.From.Type = obj.TYPE_REG
-	p.From.Reg = REGLINK
-	p.To.Type = obj.TYPE_REG
-	p.To.Reg = REG_R3
-	if q != nil {
-		q.Pcond = p
-		p.Mark |= LABEL
-	}
-
-	// JAL	runtime.morestack(SB)
-	p = obj.Appendp(ctxt, p)
-
-	p.As = AJAL
-	p.To.Type = obj.TYPE_BRANCH
-	if ctxt.Cursym.CFunc() {
-		p.To.Sym = obj.Linklookup(ctxt, "runtime.morestackc", 0)
-	} else if ctxt.Cursym.Text.From3.Offset&obj.NEEDCTXT == 0 {
-		p.To.Sym = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
-	} else {
-		p.To.Sym = obj.Linklookup(ctxt, "runtime.morestack", 0)
-	}
-	p.Mark |= BRANCH
-
-	// JMP	start
-	p = obj.Appendp(ctxt, p)
-
-	p.As = AJMP
-	p.To.Type = obj.TYPE_BRANCH
-	p.Pcond = ctxt.Cursym.Text.Link
-	p.Mark |= BRANCH
-
-	// placeholder for q1's jump target
-	p = obj.Appendp(ctxt, p)
-
-	p.As = obj.ANOP // zero-width place holder
-	q1.Pcond = p
-
-	return p
-}
-
-func addnop(ctxt *obj.Link, p *obj.Prog) {
-	q := ctxt.NewProg()
-	// we want to use the canonical NOP (SLL $0,R0,R0) here,
-	// however, as the assembler will always replace $0
-	// as R0, we have to resort to manually encode the SLL
-	// instruction as WORD $0.
-	q.As = AWORD
-	q.Lineno = p.Lineno
-	q.From.Type = obj.TYPE_CONST
-	q.From.Name = obj.NAME_NONE
-	q.From.Offset = 0
-
-	q.Link = p.Link
-	p.Link = q
-}
-
-const (
-	E_HILO  = 1 << 0
-	E_FCR   = 1 << 1
-	E_MCR   = 1 << 2
-	E_MEM   = 1 << 3
-	E_MEMSP = 1 << 4 /* uses offset and size */
-	E_MEMSB = 1 << 5 /* uses offset and size */
-	ANYMEM  = E_MEM | E_MEMSP | E_MEMSB
-	//DELAY = LOAD|BRANCH|FCMP
-	DELAY = BRANCH /* only schedule branch */
-)
-
-type Dep struct {
-	ireg uint32
-	freg uint32
-	cc   uint32
-}
-
-type Sch struct {
-	p       obj.Prog
-	set     Dep
-	used    Dep
-	soffset int32
-	size    uint8
-	nop     uint8
-	comp    bool
-}
-
-func sched(ctxt *obj.Link, p0, pe *obj.Prog) {
-	var sch [NSCHED]Sch
-
-	/*
-	 * build side structure
-	 */
-	s := sch[:]
-	for p := p0; ; p = p.Link {
-		s[0].p = *p
-		markregused(ctxt, &s[0])
-		if p == pe {
-			break
-		}
-		s = s[1:]
-	}
-	se := s
-
-	for i := cap(sch) - cap(se); i >= 0; i-- {
-		s = sch[i:]
-		if s[0].p.Mark&DELAY == 0 {
-			continue
-		}
-		if -cap(s) < -cap(se) {
-			if !conflict(&s[0], &s[1]) {
-				continue
-			}
-		}
-
-		var t []Sch
-		var j int
-		for j = cap(sch) - cap(s) - 1; j >= 0; j-- {
-			t = sch[j:]
-			if t[0].comp {
-				if s[0].p.Mark&BRANCH != 0 {
-					goto no2
-				}
-			}
-			if t[0].p.Mark&DELAY != 0 {
-				if -cap(s) >= -cap(se) || conflict(&t[0], &s[1]) {
-					goto no2
-				}
-			}
-			for u := t[1:]; -cap(u) <= -cap(s); u = u[1:] {
-				if depend(ctxt, &u[0], &t[0]) {
-					goto no2
-				}
-			}
-			goto out2
-		no2:
-		}
-
-		if s[0].p.Mark&BRANCH != 0 {
-			s[0].nop = 1
-		}
-		continue
-
-	out2:
-		// t[0] is the instruction being moved to fill the delay
-		stmp := t[0]
-		copy(t[:i-j], t[1:i-j+1])
-		s[0] = stmp
-
-		if t[i-j-1].p.Mark&BRANCH != 0 {
-			// t[i-j] is being put into a branch delay slot
-			// combine its Spadj with the branch instruction
-			t[i-j-1].p.Spadj += t[i-j].p.Spadj
-			t[i-j].p.Spadj = 0
-		}
-
-		i--
-	}
-
-	/*
-	 * put it all back
-	 */
-	var p *obj.Prog
-	var q *obj.Prog
-	for s, p = sch[:], p0; -cap(s) <= -cap(se); s, p = s[1:], q {
-		q = p.Link
-		if q != s[0].p.Link {
-			*p = s[0].p
-			p.Link = q
-		}
-		for s[0].nop != 0 {
-			s[0].nop--
-			addnop(ctxt, p)
-		}
-	}
-}
-
-func markregused(ctxt *obj.Link, s *Sch) {
-	p := &s.p
-	s.comp = compound(ctxt, p)
-	s.nop = 0
-	if s.comp {
-		s.set.ireg |= 1 << (REGTMP - REG_R0)
-		s.used.ireg |= 1 << (REGTMP - REG_R0)
-	}
-
-	ar := 0  /* dest is really reference */
-	ad := 0  /* source/dest is really address */
-	ld := 0  /* opcode is load instruction */
-	sz := 20 /* size of load/store for overlap computation */
-
-	/*
-	 * flags based on opcode
-	 */
-	switch p.As {
-	case obj.ATEXT:
-		ctxt.Autosize = int32(p.To.Offset + 8)
-		ad = 1
-
-	case AJAL:
-		c := p.Reg
-		if c == 0 {
-			c = REGLINK
-		}
-		s.set.ireg |= 1 << uint(c-REG_R0)
-		ar = 1
-		ad = 1
-
-	case ABGEZAL,
-		ABLTZAL:
-		s.set.ireg |= 1 << (REGLINK - REG_R0)
-		fallthrough
-	case ABEQ,
-		ABGEZ,
-		ABGTZ,
-		ABLEZ,
-		ABLTZ,
-		ABNE:
-		ar = 1
-		ad = 1
-
-	case ABFPT,
-		ABFPF:
-		ad = 1
-		s.used.cc |= E_FCR
-
-	case ACMPEQD,
-		ACMPEQF,
-		ACMPGED,
-		ACMPGEF,
-		ACMPGTD,
-		ACMPGTF:
-		ar = 1
-		s.set.cc |= E_FCR
-		p.Mark |= FCMP
-
-	case AJMP:
-		ar = 1
-		ad = 1
-
-	case AMOVB,
-		AMOVBU:
-		sz = 1
-		ld = 1
-
-	case AMOVH,
-		AMOVHU:
-		sz = 2
-		ld = 1
-
-	case AMOVF,
-		AMOVW,
-		AMOVWL,
-		AMOVWR:
-		sz = 4
-		ld = 1
-
-	case AMOVD,
-		AMOVV,
-		AMOVVL,
-		AMOVVR:
-		sz = 8
-		ld = 1
-
-	case ADIV,
-		ADIVU,
-		AMUL,
-		AMULU,
-		AREM,
-		AREMU,
-		ADIVV,
-		ADIVVU,
-		AMULV,
-		AMULVU,
-		AREMV,
-		AREMVU:
-		s.set.cc = E_HILO
-		fallthrough
-	case AADD,
-		AADDU,
-		AADDV,
-		AADDVU,
-		AAND,
-		ANOR,
-		AOR,
-		ASGT,
-		ASGTU,
-		ASLL,
-		ASRA,
-		ASRL,
-		ASLLV,
-		ASRAV,
-		ASRLV,
-		ASUB,
-		ASUBU,
-		ASUBV,
-		ASUBVU,
-		AXOR,
-
-		AADDD,
-		AADDF,
-		AADDW,
-		ASUBD,
-		ASUBF,
-		ASUBW,
-		AMULF,
-		AMULD,
-		AMULW,
-		ADIVF,
-		ADIVD,
-		ADIVW:
-		if p.Reg == 0 {
-			if p.To.Type == obj.TYPE_REG {
-				p.Reg = p.To.Reg
-			}
-			//if(p->reg == NREG)
-			//	print("botch %P\n", p);
-		}
-	}
-
-	/*
-	 * flags based on 'to' field
-	 */
-	c := int(p.To.Class)
-	if c == 0 {
-		c = aclass(ctxt, &p.To) + 1
-		p.To.Class = int8(c)
-	}
-	c--
-	switch c {
-	default:
-		fmt.Printf("unknown class %d %v\n", c, p)
-
-	case C_ZCON,
-		C_SCON,
-		C_ADD0CON,
-		C_AND0CON,
-		C_ADDCON,
-		C_ANDCON,
-		C_UCON,
-		C_LCON,
-		C_NONE,
-		C_SBRA,
-		C_LBRA,
-		C_ADDR,
-		C_TEXTSIZE:
-		break
-
-	case C_HI,
-		C_LO:
-		s.set.cc |= E_HILO
-
-	case C_FCREG:
-		s.set.cc |= E_FCR
-
-	case C_MREG:
-		s.set.cc |= E_MCR
-
-	case C_ZOREG,
-		C_SOREG,
-		C_LOREG:
-		c = int(p.To.Reg)
-		s.used.ireg |= 1 << uint(c-REG_R0)
-		if ad != 0 {
-			break
-		}
-		s.size = uint8(sz)
-		s.soffset = regoff(ctxt, &p.To)
-
-		m := uint32(ANYMEM)
-		if c == REGSB {
-			m = E_MEMSB
-		}
-		if c == REGSP {
-			m = E_MEMSP
-		}
-
-		if ar != 0 {
-			s.used.cc |= m
-		} else {
-			s.set.cc |= m
-		}
-
-	case C_SACON,
-		C_LACON:
-		s.used.ireg |= 1 << (REGSP - REG_R0)
-
-	case C_SECON,
-		C_LECON:
-		s.used.ireg |= 1 << (REGSB - REG_R0)
-
-	case C_REG:
-		if ar != 0 {
-			s.used.ireg |= 1 << uint(p.To.Reg-REG_R0)
-		} else {
-			s.set.ireg |= 1 << uint(p.To.Reg-REG_R0)
-		}
-
-	case C_FREG:
-		if ar != 0 {
-			s.used.freg |= 1 << uint(p.To.Reg-REG_F0)
-		} else {
-			s.set.freg |= 1 << uint(p.To.Reg-REG_F0)
-		}
-		if ld != 0 && p.From.Type == obj.TYPE_REG {
-			p.Mark |= LOAD
-		}
-
-	case C_SAUTO,
-		C_LAUTO:
-		s.used.ireg |= 1 << (REGSP - REG_R0)
-		if ad != 0 {
-			break
-		}
-		s.size = uint8(sz)
-		s.soffset = regoff(ctxt, &p.To)
-
-		if ar != 0 {
-			s.used.cc |= E_MEMSP
-		} else {
-			s.set.cc |= E_MEMSP
-		}
-
-	case C_SEXT,
-		C_LEXT:
-		s.used.ireg |= 1 << (REGSB - REG_R0)
-		if ad != 0 {
-			break
-		}
-		s.size = uint8(sz)
-		s.soffset = regoff(ctxt, &p.To)
-
-		if ar != 0 {
-			s.used.cc |= E_MEMSB
-		} else {
-			s.set.cc |= E_MEMSB
-		}
-	}
-
-	/*
-	 * flags based on 'from' field
-	 */
-	c = int(p.From.Class)
-	if c == 0 {
-		c = aclass(ctxt, &p.From) + 1
-		p.From.Class = int8(c)
-	}
-	c--
-	switch c {
-	default:
-		fmt.Printf("unknown class %d %v\n", c, p)
-
-	case C_ZCON,
-		C_SCON,
-		C_ADD0CON,
-		C_AND0CON,
-		C_ADDCON,
-		C_ANDCON,
-		C_UCON,
-		C_LCON,
-		C_NONE,
-		C_SBRA,
-		C_LBRA,
-		C_ADDR,
-		C_TEXTSIZE:
-		break
-
-	case C_HI,
-		C_LO:
-		s.used.cc |= E_HILO
-
-	case C_FCREG:
-		s.used.cc |= E_FCR
-
-	case C_MREG:
-		s.used.cc |= E_MCR
-
-	case C_ZOREG,
-		C_SOREG,
-		C_LOREG:
-		c = int(p.From.Reg)
-		s.used.ireg |= 1 << uint(c-REG_R0)
-		if ld != 0 {
-			p.Mark |= LOAD
-		}
-		s.size = uint8(sz)
-		s.soffset = regoff(ctxt, &p.From)
-
-		m := uint32(ANYMEM)
-		if c == REGSB {
-			m = E_MEMSB
-		}
-		if c == REGSP {
-			m = E_MEMSP
-		}
-
-		s.used.cc |= m
-
-	case C_SACON,
-		C_LACON:
-		c = int(p.From.Reg)
-		if c == 0 {
-			c = REGSP
-		}
-		s.used.ireg |= 1 << uint(c-REG_R0)
-
-	case C_SECON,
-		C_LECON:
-		s.used.ireg |= 1 << (REGSB - REG_R0)
-
-	case C_REG:
-		s.used.ireg |= 1 << uint(p.From.Reg-REG_R0)
-
-	case C_FREG:
-		s.used.freg |= 1 << uint(p.From.Reg-REG_F0)
-		if ld != 0 && p.To.Type == obj.TYPE_REG {
-			p.Mark |= LOAD
-		}
-
-	case C_SAUTO,
-		C_LAUTO:
-		s.used.ireg |= 1 << (REGSP - REG_R0)
-		if ld != 0 {
-			p.Mark |= LOAD
-		}
-		if ad != 0 {
-			break
-		}
-		s.size = uint8(sz)
-		s.soffset = regoff(ctxt, &p.From)
-
-		s.used.cc |= E_MEMSP
-
-	case C_SEXT:
-	case C_LEXT:
-		s.used.ireg |= 1 << (REGSB - REG_R0)
-		if ld != 0 {
-			p.Mark |= LOAD
-		}
-		if ad != 0 {
-			break
-		}
-		s.size = uint8(sz)
-		s.soffset = regoff(ctxt, &p.From)
-
-		s.used.cc |= E_MEMSB
-	}
-
-	c = int(p.Reg)
-	if c != 0 {
-		if REG_F0 <= c && c <= REG_F31 {
-			s.used.freg |= 1 << uint(c-REG_F0)
-		} else {
-			s.used.ireg |= 1 << uint(c-REG_R0)
-		}
-	}
-	s.set.ireg &^= (1 << (REGZERO - REG_R0)) /* R0 can't be set */
-}
-
-/*
- * test to see if two instructions can be
- * interchanged without changing semantics
- */
-func depend(ctxt *obj.Link, sa, sb *Sch) bool {
-	if sa.set.ireg&(sb.set.ireg|sb.used.ireg) != 0 {
-		return true
-	}
-	if sb.set.ireg&sa.used.ireg != 0 {
-		return true
-	}
-
-	if sa.set.freg&(sb.set.freg|sb.used.freg) != 0 {
-		return true
-	}
-	if sb.set.freg&sa.used.freg != 0 {
-		return true
-	}
-
-	/*
-	 * special case.
-	 * loads from same address cannot pass.
-	 * this is for hardware fifo's and the like
-	 */
-	if sa.used.cc&sb.used.cc&E_MEM != 0 {
-		if sa.p.Reg == sb.p.Reg {
-			if regoff(ctxt, &sa.p.From) == regoff(ctxt, &sb.p.From) {
-				return true
-			}
-		}
-	}
-
-	x := (sa.set.cc & (sb.set.cc | sb.used.cc)) | (sb.set.cc & sa.used.cc)
-	if x != 0 {
-		/*
-		 * allow SB and SP to pass each other.
-		 * allow SB to pass SB iff doffsets are ok
-		 * anything else conflicts
-		 */
-		if x != E_MEMSP && x != E_MEMSB {
-			return true
-		}
-		x = sa.set.cc | sb.set.cc | sa.used.cc | sb.used.cc
-		if x&E_MEM != 0 {
-			return true
-		}
-		if offoverlap(sa, sb) {
-			return true
-		}
-	}
-
-	return false
-}
-
-func offoverlap(sa, sb *Sch) bool {
-	if sa.soffset < sb.soffset {
-		if sa.soffset+int32(sa.size) > sb.soffset {
-			return true
-		}
-		return false
-	}
-	if sb.soffset+int32(sb.size) > sa.soffset {
-		return true
-	}
-	return false
-}
-
-/*
- * test 2 adjacent instructions
- * and find out if inserted instructions
- * are desired to prevent stalls.
- */
-func conflict(sa, sb *Sch) bool {
-	if sa.set.ireg&sb.used.ireg != 0 {
-		return true
-	}
-	if sa.set.freg&sb.used.freg != 0 {
-		return true
-	}
-	if sa.set.cc&sb.used.cc != 0 {
-		return true
-	}
-	return false
-}
-
-func compound(ctxt *obj.Link, p *obj.Prog) bool {
-	o := oplook(ctxt, p)
-	if o.size != 4 {
-		return true
-	}
-	if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSB {
-		return true
-	}
-	return false
-}
-
-func follow(ctxt *obj.Link, s *obj.LSym) {
-	ctxt.Cursym = s
-
-	firstp := ctxt.NewProg()
-	lastp := firstp
-	xfol(ctxt, s.Text, &lastp)
-	lastp.Link = nil
-	s.Text = firstp.Link
-}
-
-func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) {
-	var q *obj.Prog
-	var r *obj.Prog
-	var i int
-
-loop:
-	if p == nil {
-		return
-	}
-	a := p.As
-	if a == AJMP {
-		q = p.Pcond
-		if (p.Mark&NOSCHED != 0) || q != nil && (q.Mark&NOSCHED != 0) {
-			p.Mark |= FOLL
-			(*last).Link = p
-			*last = p
-			p = p.Link
-			xfol(ctxt, p, last)
-			p = q
-			if p != nil && p.Mark&FOLL == 0 {
-				goto loop
-			}
-			return
-		}
-
-		if q != nil {
-			p.Mark |= FOLL
-			p = q
-			if p.Mark&FOLL == 0 {
-				goto loop
-			}
-		}
-	}
-
-	if p.Mark&FOLL != 0 {
-		i = 0
-		q = p
-		for ; i < 4; i, q = i+1, q.Link {
-			if q == *last || (q.Mark&NOSCHED != 0) {
-				break
-			}
-			a = q.As
-			if a == obj.ANOP {
-				i--
-				continue
-			}
-
-			if a == AJMP || a == ARET || a == ARFE {
-				goto copy
-			}
-			if q.Pcond == nil || (q.Pcond.Mark&FOLL != 0) {
-				continue
-			}
-			if a != ABEQ && a != ABNE {
-				continue
-			}
-
-		copy:
-			for {
-				r = ctxt.NewProg()
-				*r = *p
-				if r.Mark&FOLL == 0 {
-					fmt.Printf("can't happen 1\n")
-				}
-				r.Mark |= FOLL
-				if p != q {
-					p = p.Link
-					(*last).Link = r
-					*last = r
-					continue
-				}
-
-				(*last).Link = r
-				*last = r
-				if a == AJMP || a == ARET || a == ARFE {
-					return
-				}
-				r.As = ABNE
-				if a == ABNE {
-					r.As = ABEQ
-				}
-				r.Pcond = p.Link
-				r.Link = p.Pcond
-				if r.Link.Mark&FOLL == 0 {
-					xfol(ctxt, r.Link, last)
-				}
-				if r.Pcond.Mark&FOLL == 0 {
-					fmt.Printf("can't happen 2\n")
-				}
-				return
-			}
-		}
-
-		a = AJMP
-		q = ctxt.NewProg()
-		q.As = a
-		q.Lineno = p.Lineno
-		q.To.Type = obj.TYPE_BRANCH
-		q.To.Offset = p.Pc
-		q.Pcond = p
-		p = q
-	}
-
-	p.Mark |= FOLL
-	(*last).Link = p
-	*last = p
-	if a == AJMP || a == ARET || a == ARFE {
-		if p.Mark&NOSCHED != 0 {
-			p = p.Link
-			goto loop
-		}
-
-		return
-	}
-
-	if p.Pcond != nil {
-		if a != AJAL && p.Link != nil {
-			xfol(ctxt, p.Link, last)
-			p = p.Pcond
-			if p == nil || (p.Mark&FOLL != 0) {
-				return
-			}
-			goto loop
-		}
-	}
-
-	p = p.Link
-	goto loop
-}
-
-var Linkmips64 = obj.LinkArch{
-	Arch:       sys.ArchMIPS64,
-	Preprocess: preprocess,
-	Assemble:   span0,
-	Follow:     follow,
-	Progedit:   progedit,
-}
-
-var Linkmips64le = obj.LinkArch{
-	Arch:       sys.ArchMIPS64LE,
-	Preprocess: preprocess,
-	Assemble:   span0,
-	Follow:     follow,
-	Progedit:   progedit,
-}
-
-var Linkmips = obj.LinkArch{
-	Arch:       sys.ArchMIPS,
-	Preprocess: preprocess,
-	Assemble:   span0,
-	Follow:     follow,
-	Progedit:   progedit,
-}
-
-var Linkmipsle = obj.LinkArch{
-	Arch:       sys.ArchMIPSLE,
-	Preprocess: preprocess,
-	Assemble:   span0,
-	Follow:     follow,
-	Progedit:   progedit,
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/objfile.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/objfile.go
deleted file mode 100644
index 2752412..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/objfile.go
+++ /dev/null
@@ -1,608 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/objfile.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/objfile.go:1
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Writing of Go object files.
-//
-// Originally, Go object files were Plan 9 object files, but no longer.
-// Now they are more like standard object files, in that each symbol is defined
-// by an associated memory image (bytes) and a list of relocations to apply
-// during linking. We do not (yet?) use a standard file format, however.
-// For now, the format is chosen to be as simple as possible to read and write.
-// It may change for reasons of efficiency, or we may even switch to a
-// standard file format if there are compelling benefits to doing so.
-// See golang.org/s/go13linker for more background.
-//
-// The file format is:
-//
-//	- magic header: "\x00\x00go17ld"
-//	- byte 1 - version number
-//	- sequence of strings giving dependencies (imported packages)
-//	- empty string (marks end of sequence)
-//	- sequence of symbol references used by the defined symbols
-//	- byte 0xff (marks end of sequence)
-//	- sequence of integer lengths:
-//		- total data length
-//		- total number of relocations
-//		- total number of pcdata
-//		- total number of automatics
-//		- total number of funcdata
-//		- total number of files
-//	- data, the content of the defined symbols
-//	- sequence of defined symbols
-//	- byte 0xff (marks end of sequence)
-//	- magic footer: "\xff\xffgo17ld"
-//
-// All integers are stored in a zigzag varint format.
-// See golang.org/s/go12symtab for a definition.
-//
-// Data blocks and strings are both stored as an integer
-// followed by that many bytes.
-//
-// A symbol reference is a string name followed by a version.
-//
-// A symbol points to other symbols using an index into the symbol
-// reference sequence. Index 0 corresponds to a nil LSym* pointer.
-// In the symbol layout described below "symref index" stands for this
-// index.
-//
-// Each symbol is laid out as the following fields (taken from LSym*):
-//
-//	- byte 0xfe (sanity check for synchronization)
-//	- type [int]
-//	- name & version [symref index]
-//	- flags [int]
-//		1<<0 dupok
-//		1<<1 local
-//		1<<2 add to typelink table
-//	- size [int]
-//	- gotype [symref index]
-//	- p [data block]
-//	- nr [int]
-//	- r [nr relocations, sorted by off]
-//
-// If type == STEXT, there are a few more fields:
-//
-//	- args [int]
-//	- locals [int]
-//	- nosplit [int]
-//	- flags [int]
-//		1<<0 leaf
-//		1<<1 C function
-//		1<<2 function may call reflect.Type.Method
-//	- nlocal [int]
-//	- local [nlocal automatics]
-//	- pcln [pcln table]
-//
-// Each relocation has the encoding:
-//
-//	- off [int]
-//	- siz [int]
-//	- type [int]
-//	- add [int]
-//	- sym [symref index]
-//
-// Each local has the encoding:
-//
-//	- asym [symref index]
-//	- offset [int]
-//	- type [int]
-//	- gotype [symref index]
-//
-// The pcln table has the encoding:
-//
-//	- pcsp [data block]
-//	- pcfile [data block]
-//	- pcline [data block]
-//	- npcdata [int]
-//	- pcdata [npcdata data blocks]
-//	- nfuncdata [int]
-//	- funcdata [nfuncdata symref index]
-//	- funcdatasym [nfuncdata ints]
-//	- nfile [int]
-//	- file [nfile symref index]
-//
-// The file layout and meaning of type integers are architecture-independent.
-//
-// TODO(rsc): The file format is good for a first pass but needs work.
-//	- There are SymID in the object file that should really just be strings.
-
-package obj
-
-import (
-	"bufio"
-	"bootstrap/cmd/internal/dwarf"
-	"bootstrap/cmd/internal/sys"
-	"fmt"
-	"log"
-	"path/filepath"
-	"sort"
-)
-
-// The Go and C compilers, and the assembler, call writeobj to write
-// out a Go object file. The linker does not call this; the linker
-// does not write out object files.
-func Writeobjdirect(ctxt *Link, b *bufio.Writer) {
-	Flushplist(ctxt)
-	WriteObjFile(ctxt, b)
-}
-
-// objWriter writes Go object files.
-type objWriter struct {
-	wr   *bufio.Writer
-	ctxt *Link
-	// Temporary buffer for zigzag int writing.
-	varintbuf [10]uint8
-
-	// Provide the the index of a symbol reference by symbol name.
-	// One map for versioned symbols and one for unversioned symbols.
-	// Used for deduplicating the symbol reference list.
-	refIdx  map[string]int
-	vrefIdx map[string]int
-
-	// Number of objects written of each type.
-	nRefs     int
-	nData     int
-	nReloc    int
-	nPcdata   int
-	nAutom    int
-	nFuncdata int
-	nFile     int
-}
-
-func (w *objWriter) addLengths(s *LSym) {
-	w.nData += len(s.P)
-	w.nReloc += len(s.R)
-
-	if s.Type != STEXT {
-		return
-	}
-
-	pc := s.Pcln
-
-	data := 0
-	data += len(pc.Pcsp.P)
-	data += len(pc.Pcfile.P)
-	data += len(pc.Pcline.P)
-	for i := 0; i < len(pc.Pcdata); i++ {
-		data += len(pc.Pcdata[i].P)
-	}
-
-	w.nData += data
-	w.nPcdata += len(pc.Pcdata)
-
-	autom := 0
-	for a := s.Autom; a != nil; a = a.Link {
-		autom++
-	}
-	w.nAutom += autom
-	w.nFuncdata += len(pc.Funcdataoff)
-	w.nFile += len(pc.File)
-}
-
-func (w *objWriter) writeLengths() {
-	w.writeInt(int64(w.nData))
-	w.writeInt(int64(w.nReloc))
-	w.writeInt(int64(w.nPcdata))
-	w.writeInt(int64(w.nAutom))
-	w.writeInt(int64(w.nFuncdata))
-	w.writeInt(int64(w.nFile))
-}
-
-func newObjWriter(ctxt *Link, b *bufio.Writer) *objWriter {
-	return &objWriter{
-		ctxt:    ctxt,
-		wr:      b,
-		vrefIdx: make(map[string]int),
-		refIdx:  make(map[string]int),
-	}
-}
-
-func WriteObjFile(ctxt *Link, b *bufio.Writer) {
-	w := newObjWriter(ctxt, b)
-
-	// Magic header
-	w.wr.WriteString("\x00\x00go17ld")
-
-	// Version
-	w.wr.WriteByte(1)
-
-	// Autolib
-	for _, pkg := range ctxt.Imports {
-		w.writeString(pkg)
-	}
-	w.writeString("")
-
-	// Symbol references
-	for _, s := range ctxt.Text {
-		w.writeRefs(s)
-		w.addLengths(s)
-	}
-	for _, s := range ctxt.Data {
-		w.writeRefs(s)
-		w.addLengths(s)
-	}
-	// End symbol references
-	w.wr.WriteByte(0xff)
-
-	// Lengths
-	w.writeLengths()
-
-	// Data block
-	for _, s := range ctxt.Text {
-		w.wr.Write(s.P)
-		pc := s.Pcln
-		w.wr.Write(pc.Pcsp.P)
-		w.wr.Write(pc.Pcfile.P)
-		w.wr.Write(pc.Pcline.P)
-		for i := 0; i < len(pc.Pcdata); i++ {
-			w.wr.Write(pc.Pcdata[i].P)
-		}
-	}
-	for _, s := range ctxt.Data {
-		w.wr.Write(s.P)
-	}
-
-	// Symbols
-	for _, s := range ctxt.Text {
-		w.writeSym(s)
-	}
-	for _, s := range ctxt.Data {
-		w.writeSym(s)
-	}
-
-	// Magic footer
-	w.wr.WriteString("\xff\xffgo17ld")
-}
-
-// Symbols are prefixed so their content doesn't get confused with the magic footer.
-const symPrefix = 0xfe
-
-func (w *objWriter) writeRef(s *LSym, isPath bool) {
-	if s == nil || s.RefIdx != 0 {
-		return
-	}
-	var m map[string]int
-	switch s.Version {
-	case 0:
-		m = w.refIdx
-	case 1:
-		m = w.vrefIdx
-	default:
-		log.Fatalf("%s: invalid version number %d", s.Name, s.Version)
-	}
-
-	idx := m[s.Name]
-	if idx != 0 {
-		s.RefIdx = idx
-		return
-	}
-	w.wr.WriteByte(symPrefix)
-	if isPath {
-		w.writeString(filepath.ToSlash(s.Name))
-	} else {
-		w.writeString(s.Name)
-	}
-	w.writeInt(int64(s.Version))
-	w.nRefs++
-	s.RefIdx = w.nRefs
-	m[s.Name] = w.nRefs
-}
-
-func (w *objWriter) writeRefs(s *LSym) {
-	w.writeRef(s, false)
-	w.writeRef(s.Gotype, false)
-	for i := range s.R {
-		w.writeRef(s.R[i].Sym, false)
-	}
-
-	if s.Type == STEXT {
-		for a := s.Autom; a != nil; a = a.Link {
-			w.writeRef(a.Asym, false)
-			w.writeRef(a.Gotype, false)
-		}
-		pc := s.Pcln
-		for _, d := range pc.Funcdata {
-			w.writeRef(d, false)
-		}
-		for _, f := range pc.File {
-			w.writeRef(f, true)
-		}
-	}
-}
-
-func (w *objWriter) writeSymDebug(s *LSym) {
-	ctxt := w.ctxt
-	fmt.Fprintf(ctxt.Bso, "%s ", s.Name)
-	if s.Version != 0 {
-		fmt.Fprintf(ctxt.Bso, "v=%d ", s.Version)
-	}
-	if s.Type != 0 {
-		fmt.Fprintf(ctxt.Bso, "t=%d ", s.Type)
-	}
-	if s.DuplicateOK() {
-		fmt.Fprintf(ctxt.Bso, "dupok ")
-	}
-	if s.CFunc() {
-		fmt.Fprintf(ctxt.Bso, "cfunc ")
-	}
-	if s.NoSplit() {
-		fmt.Fprintf(ctxt.Bso, "nosplit ")
-	}
-	fmt.Fprintf(ctxt.Bso, "size=%d", s.Size)
-	if s.Type == STEXT {
-		fmt.Fprintf(ctxt.Bso, " args=%#x locals=%#x", uint64(s.Args), uint64(s.Locals))
-		if s.Leaf() {
-			fmt.Fprintf(ctxt.Bso, " leaf")
-		}
-	}
-
-	fmt.Fprintf(ctxt.Bso, "\n")
-	for p := s.Text; p != nil; p = p.Link {
-		fmt.Fprintf(ctxt.Bso, "\t%#04x %v\n", uint(int(p.Pc)), p)
-	}
-	var c int
-	var j int
-	for i := 0; i < len(s.P); {
-		fmt.Fprintf(ctxt.Bso, "\t%#04x", uint(i))
-		for j = i; j < i+16 && j < len(s.P); j++ {
-			fmt.Fprintf(ctxt.Bso, " %02x", s.P[j])
-		}
-		for ; j < i+16; j++ {
-			fmt.Fprintf(ctxt.Bso, "   ")
-		}
-		fmt.Fprintf(ctxt.Bso, "  ")
-		for j = i; j < i+16 && j < len(s.P); j++ {
-			c = int(s.P[j])
-			if ' ' <= c && c <= 0x7e {
-				fmt.Fprintf(ctxt.Bso, "%c", c)
-			} else {
-				fmt.Fprintf(ctxt.Bso, ".")
-			}
-		}
-
-		fmt.Fprintf(ctxt.Bso, "\n")
-		i += 16
-	}
-
-	sort.Sort(relocByOff(s.R)) // generate stable output
-	for _, r := range s.R {
-		name := ""
-		if r.Sym != nil {
-			name = r.Sym.Name
-		} else if r.Type == R_TLS_LE {
-			name = "TLS"
-		}
-		if ctxt.Arch.InFamily(sys.ARM, sys.PPC64) {
-			fmt.Fprintf(ctxt.Bso, "\trel %d+%d t=%d %s+%x\n", int(r.Off), r.Siz, r.Type, name, uint64(r.Add))
-		} else {
-			fmt.Fprintf(ctxt.Bso, "\trel %d+%d t=%d %s+%d\n", int(r.Off), r.Siz, r.Type, name, r.Add)
-		}
-	}
-}
-
-func (w *objWriter) writeSym(s *LSym) {
-	ctxt := w.ctxt
-	if ctxt.Debugasm != 0 {
-		w.writeSymDebug(s)
-	}
-
-	w.wr.WriteByte(symPrefix)
-	w.writeInt(int64(s.Type))
-	w.writeRefIndex(s)
-	flags := int64(0)
-	if s.DuplicateOK() {
-		flags |= 1
-	}
-	if s.Local() {
-		flags |= 1 << 1
-	}
-	if s.MakeTypelink() {
-		flags |= 1 << 2
-	}
-	w.writeInt(flags)
-	w.writeInt(s.Size)
-	w.writeRefIndex(s.Gotype)
-	w.writeInt(int64(len(s.P)))
-
-	w.writeInt(int64(len(s.R)))
-	var r *Reloc
-	for i := 0; i < len(s.R); i++ {
-		r = &s.R[i]
-		w.writeInt(int64(r.Off))
-		w.writeInt(int64(r.Siz))
-		w.writeInt(int64(r.Type))
-		w.writeInt(r.Add)
-		w.writeRefIndex(r.Sym)
-	}
-
-	if s.Type != STEXT {
-		return
-	}
-
-	w.writeInt(int64(s.Args))
-	w.writeInt(int64(s.Locals))
-	if s.NoSplit() {
-		w.writeInt(1)
-	} else {
-		w.writeInt(0)
-	}
-	flags = int64(0)
-	if s.Leaf() {
-		flags |= 1
-	}
-	if s.CFunc() {
-		flags |= 1 << 1
-	}
-	if s.ReflectMethod() {
-		flags |= 1 << 2
-	}
-	w.writeInt(flags)
-	n := 0
-	for a := s.Autom; a != nil; a = a.Link {
-		n++
-	}
-	w.writeInt(int64(n))
-	for a := s.Autom; a != nil; a = a.Link {
-		w.writeRefIndex(a.Asym)
-		w.writeInt(int64(a.Aoffset))
-		if a.Name == NAME_AUTO {
-			w.writeInt(A_AUTO)
-		} else if a.Name == NAME_PARAM {
-			w.writeInt(A_PARAM)
-		} else {
-			log.Fatalf("%s: invalid local variable type %d", s.Name, a.Name)
-		}
-		w.writeRefIndex(a.Gotype)
-	}
-
-	pc := s.Pcln
-	w.writeInt(int64(len(pc.Pcsp.P)))
-	w.writeInt(int64(len(pc.Pcfile.P)))
-	w.writeInt(int64(len(pc.Pcline.P)))
-	w.writeInt(int64(len(pc.Pcdata)))
-	for i := 0; i < len(pc.Pcdata); i++ {
-		w.writeInt(int64(len(pc.Pcdata[i].P)))
-	}
-	w.writeInt(int64(len(pc.Funcdataoff)))
-	for i := 0; i < len(pc.Funcdataoff); i++ {
-		w.writeRefIndex(pc.Funcdata[i])
-	}
-	for i := 0; i < len(pc.Funcdataoff); i++ {
-		w.writeInt(pc.Funcdataoff[i])
-	}
-	w.writeInt(int64(len(pc.File)))
-	for _, f := range pc.File {
-		w.writeRefIndex(f)
-	}
-}
-
-func (w *objWriter) writeInt(sval int64) {
-	var v uint64
-	uv := (uint64(sval) << 1) ^ uint64(sval>>63)
-	p := w.varintbuf[:]
-	for v = uv; v >= 0x80; v >>= 7 {
-		p[0] = uint8(v | 0x80)
-		p = p[1:]
-	}
-	p[0] = uint8(v)
-	p = p[1:]
-	w.wr.Write(w.varintbuf[:len(w.varintbuf)-len(p)])
-}
-
-func (w *objWriter) writeString(s string) {
-	w.writeInt(int64(len(s)))
-	w.wr.WriteString(s)
-}
-
-func (w *objWriter) writeRefIndex(s *LSym) {
-	if s == nil {
-		w.writeInt(0)
-		return
-	}
-	if s.RefIdx == 0 {
-		log.Fatalln("writing an unreferenced symbol", s.Name)
-	}
-	w.writeInt(int64(s.RefIdx))
-}
-
-// relocByOff sorts relocations by their offsets.
-type relocByOff []Reloc
-
-func (x relocByOff) Len() int           { return len(x) }
-func (x relocByOff) Less(i, j int) bool { return x[i].Off < x[j].Off }
-func (x relocByOff) Swap(i, j int)      { x[i], x[j] = x[j], x[i] }
-
-// implement dwarf.Context
-type dwCtxt struct{ *Link }
-
-func (c dwCtxt) PtrSize() int {
-	return c.Arch.PtrSize
-}
-func (c dwCtxt) AddInt(s dwarf.Sym, size int, i int64) {
-	ls := s.(*LSym)
-	ls.WriteInt(c.Link, ls.Size, size, i)
-}
-func (c dwCtxt) AddBytes(s dwarf.Sym, b []byte) {
-	ls := s.(*LSym)
-	ls.WriteBytes(c.Link, ls.Size, b)
-}
-func (c dwCtxt) AddString(s dwarf.Sym, v string) {
-	ls := s.(*LSym)
-	ls.WriteString(c.Link, ls.Size, len(v), v)
-	ls.WriteInt(c.Link, ls.Size, 1, 0)
-}
-func (c dwCtxt) SymValue(s dwarf.Sym) int64 {
-	return 0
-}
-func (c dwCtxt) AddAddress(s dwarf.Sym, data interface{}, value int64) {
-	rsym := data.(*LSym)
-	ls := s.(*LSym)
-	size := c.PtrSize()
-	ls.WriteAddr(c.Link, ls.Size, size, rsym, value)
-}
-func (c dwCtxt) AddSectionOffset(s dwarf.Sym, size int, t interface{}, ofs int64) {
-	ls := s.(*LSym)
-	rsym := t.(*LSym)
-	ls.WriteAddr(c.Link, ls.Size, size, rsym, ofs)
-	r := &ls.R[len(ls.R)-1]
-	r.Type = R_DWARFREF
-}
-
-func gendwarf(ctxt *Link, text []*LSym) []*LSym {
-	dctxt := dwCtxt{ctxt}
-	var dw []*LSym
-
-	for _, s := range text {
-		dsym := Linklookup(ctxt, dwarf.InfoPrefix+s.Name, int(s.Version))
-		if dsym.Size != 0 {
-			continue
-		}
-		dw = append(dw, dsym)
-		dsym.Type = SDWARFINFO
-		dsym.Set(AttrDuplicateOK, s.DuplicateOK())
-		var vars dwarf.Var
-		var abbrev int
-		var offs int32
-		for a := s.Autom; a != nil; a = a.Link {
-			switch a.Name {
-			case NAME_AUTO:
-				abbrev = dwarf.DW_ABRV_AUTO
-				offs = a.Aoffset
-				if ctxt.FixedFrameSize() == 0 {
-					offs -= int32(ctxt.Arch.PtrSize)
-				}
-				if Framepointer_enabled(GOOS, GOARCH) {
-					offs -= int32(ctxt.Arch.PtrSize)
-				}
-
-			case NAME_PARAM:
-				abbrev = dwarf.DW_ABRV_PARAM
-				offs = a.Aoffset + int32(ctxt.FixedFrameSize())
-
-			default:
-				continue
-			}
-			typename := dwarf.InfoPrefix + a.Gotype.Name[len("type."):]
-			dwvar := &dwarf.Var{
-				Name:   a.Asym.Name,
-				Abbrev: abbrev,
-				Offset: int32(offs),
-				Type:   Linklookup(ctxt, typename, 0),
-			}
-			dws := &vars.Link
-			for ; *dws != nil; dws = &(*dws).Link {
-				if offs <= (*dws).Offset {
-					break
-				}
-			}
-			dwvar.Link = *dws
-			*dws = dwvar
-		}
-		dwarf.PutFunc(dctxt, dsym, s.Name, s.Version == 0, s, s.Size, vars.Link)
-	}
-	return dw
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/pass.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/pass.go
deleted file mode 100644
index c210caf..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/pass.go
+++ /dev/null
@@ -1,220 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/pass.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/pass.go:1
-// Inferno utils/6l/pass.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6l/pass.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package obj
-
-// Code and data passes.
-
-func Brchain(ctxt *Link, p *Prog) *Prog {
-	for i := 0; i < 20; i++ {
-		if p == nil || p.As != AJMP || p.Pcond == nil {
-			return p
-		}
-		p = p.Pcond
-	}
-
-	return nil
-}
-
-func brloop(ctxt *Link, p *Prog) *Prog {
-	var q *Prog
-
-	c := 0
-	for q = p; q != nil; q = q.Pcond {
-		if q.As != AJMP || q.Pcond == nil {
-			break
-		}
-		c++
-		if c >= 5000 {
-			return nil
-		}
-	}
-
-	return q
-}
-
-func checkaddr(ctxt *Link, p *Prog, a *Addr) {
-	// Check expected encoding, especially TYPE_CONST vs TYPE_ADDR.
-	switch a.Type {
-	case TYPE_NONE:
-		return
-
-	case TYPE_BRANCH:
-		if a.Reg != 0 || a.Index != 0 || a.Scale != 0 || a.Name != 0 {
-			break
-		}
-		return
-
-	case TYPE_TEXTSIZE:
-		if a.Reg != 0 || a.Index != 0 || a.Scale != 0 || a.Name != 0 {
-			break
-		}
-		return
-
-		//if(a->u.bits != 0)
-	//	break;
-	case TYPE_MEM:
-		return
-
-		// TODO(rsc): After fixing SHRQ, check a->index != 0 too.
-	case TYPE_CONST:
-		if a.Name != 0 || a.Sym != nil || a.Reg != 0 {
-			ctxt.Diag("argument is TYPE_CONST, should be TYPE_ADDR, in %v", p)
-			return
-		}
-
-		if a.Reg != 0 || a.Scale != 0 || a.Name != 0 || a.Sym != nil || a.Val != nil {
-			break
-		}
-		return
-
-	case TYPE_FCONST, TYPE_SCONST:
-		if a.Reg != 0 || a.Index != 0 || a.Scale != 0 || a.Name != 0 || a.Offset != 0 || a.Sym != nil {
-			break
-		}
-		return
-
-	// TODO(rsc): After fixing PINSRQ, check a->offset != 0 too.
-	// TODO(rsc): After fixing SHRQ, check a->index != 0 too.
-	case TYPE_REG:
-		if a.Scale != 0 || a.Name != 0 || a.Sym != nil {
-			break
-		}
-		return
-
-	case TYPE_ADDR:
-		if a.Val != nil {
-			break
-		}
-		if a.Reg == 0 && a.Index == 0 && a.Scale == 0 && a.Name == 0 && a.Sym == nil {
-			ctxt.Diag("argument is TYPE_ADDR, should be TYPE_CONST, in %v", p)
-		}
-		return
-
-	case TYPE_SHIFT:
-		if a.Index != 0 || a.Scale != 0 || a.Name != 0 || a.Sym != nil || a.Val != nil {
-			break
-		}
-		return
-
-	case TYPE_REGREG:
-		if a.Index != 0 || a.Scale != 0 || a.Name != 0 || a.Sym != nil || a.Val != nil {
-			break
-		}
-		return
-
-	case TYPE_REGREG2:
-		return
-
-	case TYPE_REGLIST:
-		return
-
-	// Expect sym and name to be set, nothing else.
-	// Technically more is allowed, but this is only used for *name(SB).
-	case TYPE_INDIR:
-		if a.Reg != 0 || a.Index != 0 || a.Scale != 0 || a.Name == 0 || a.Offset != 0 || a.Sym == nil || a.Val != nil {
-			break
-		}
-		return
-	}
-
-	ctxt.Diag("invalid encoding for argument %v", p)
-}
-
-func linkpatch(ctxt *Link, sym *LSym) {
-	var c int32
-	var name string
-	var q *Prog
-
-	ctxt.Cursym = sym
-
-	for p := sym.Text; p != nil; p = p.Link {
-		checkaddr(ctxt, p, &p.From)
-		if p.From3 != nil {
-			checkaddr(ctxt, p, p.From3)
-		}
-		checkaddr(ctxt, p, &p.To)
-
-		if ctxt.Arch.Progedit != nil {
-			ctxt.Arch.Progedit(ctxt, p)
-		}
-		if p.To.Type != TYPE_BRANCH {
-			continue
-		}
-		if p.To.Val != nil {
-			// TODO: Remove To.Val.(*Prog) in favor of p->pcond.
-			p.Pcond = p.To.Val.(*Prog)
-			continue
-		}
-
-		if p.To.Sym != nil {
-			continue
-		}
-		c = int32(p.To.Offset)
-		for q = sym.Text; q != nil; {
-			if int64(c) == q.Pc {
-				break
-			}
-			if q.Forwd != nil && int64(c) >= q.Forwd.Pc {
-				q = q.Forwd
-			} else {
-				q = q.Link
-			}
-		}
-
-		if q == nil {
-			name = "<nil>"
-			if p.To.Sym != nil {
-				name = p.To.Sym.Name
-			}
-			ctxt.Diag("branch out of range (%#x)\n%v [%s]", uint32(c), p, name)
-			p.To.Type = TYPE_NONE
-		}
-
-		p.To.Val = q
-		p.Pcond = q
-	}
-
-	if ctxt.Flag_optimize {
-		for p := sym.Text; p != nil; p = p.Link {
-			if p.Pcond != nil {
-				p.Pcond = brloop(ctxt, p.Pcond)
-				if p.Pcond != nil {
-					if p.To.Type == TYPE_BRANCH {
-						p.To.Offset = p.Pcond.Pc
-					}
-				}
-			}
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/pcln.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/pcln.go
deleted file mode 100644
index 29bc131..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/pcln.go
+++ /dev/null
@@ -1,283 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/pcln.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/pcln.go:1
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package obj
-
-import "log"
-
-func addvarint(d *Pcdata, v uint32) {
-	for ; v >= 0x80; v >>= 7 {
-		d.P = append(d.P, uint8(v|0x80))
-	}
-	d.P = append(d.P, uint8(v))
-}
-
-// funcpctab writes to dst a pc-value table mapping the code in func to the values
-// returned by valfunc parameterized by arg. The invocation of valfunc to update the
-// current value is, for each p,
-//
-//	val = valfunc(func, val, p, 0, arg);
-//	record val as value at p->pc;
-//	val = valfunc(func, val, p, 1, arg);
-//
-// where func is the function, val is the current value, p is the instruction being
-// considered, and arg can be used to further parameterize valfunc.
-func funcpctab(ctxt *Link, dst *Pcdata, func_ *LSym, desc string, valfunc func(*Link, *LSym, int32, *Prog, int32, interface{}) int32, arg interface{}) {
-	// To debug a specific function, uncomment lines and change name.
-	dbg := 0
-
-	//if func_.Name == "main.main" || desc == "pctospadj" {
-	//	dbg = 1
-	//}
-
-	ctxt.Debugpcln += int32(dbg)
-
-	dst.P = dst.P[:0]
-
-	if ctxt.Debugpcln != 0 {
-		ctxt.Logf("funcpctab %s [valfunc=%s]\n", func_.Name, desc)
-	}
-
-	val := int32(-1)
-	oldval := val
-	if func_.Text == nil {
-		ctxt.Debugpcln -= int32(dbg)
-		return
-	}
-
-	pc := func_.Text.Pc
-
-	if ctxt.Debugpcln != 0 {
-		ctxt.Logf("%6x %6d %v\n", uint64(pc), val, func_.Text)
-	}
-
-	started := int32(0)
-	var delta uint32
-	for p := func_.Text; p != nil; p = p.Link {
-		// Update val. If it's not changing, keep going.
-		val = valfunc(ctxt, func_, val, p, 0, arg)
-
-		if val == oldval && started != 0 {
-			val = valfunc(ctxt, func_, val, p, 1, arg)
-			if ctxt.Debugpcln != 0 {
-				ctxt.Logf("%6x %6s %v\n", uint64(p.Pc), "", p)
-			}
-			continue
-		}
-
-		// If the pc of the next instruction is the same as the
-		// pc of this instruction, this instruction is not a real
-		// instruction. Keep going, so that we only emit a delta
-		// for a true instruction boundary in the program.
-		if p.Link != nil && p.Link.Pc == p.Pc {
-			val = valfunc(ctxt, func_, val, p, 1, arg)
-			if ctxt.Debugpcln != 0 {
-				ctxt.Logf("%6x %6s %v\n", uint64(p.Pc), "", p)
-			}
-			continue
-		}
-
-		// The table is a sequence of (value, pc) pairs, where each
-		// pair states that the given value is in effect from the current position
-		// up to the given pc, which becomes the new current position.
-		// To generate the table as we scan over the program instructions,
-		// we emit a "(value" when pc == func->value, and then
-		// each time we observe a change in value we emit ", pc) (value".
-		// When the scan is over, we emit the closing ", pc)".
-		//
-		// The table is delta-encoded. The value deltas are signed and
-		// transmitted in zig-zag form, where a complement bit is placed in bit 0,
-		// and the pc deltas are unsigned. Both kinds of deltas are sent
-		// as variable-length little-endian base-128 integers,
-		// where the 0x80 bit indicates that the integer continues.
-
-		if ctxt.Debugpcln != 0 {
-			ctxt.Logf("%6x %6d %v\n", uint64(p.Pc), val, p)
-		}
-
-		if started != 0 {
-			addvarint(dst, uint32((p.Pc-pc)/int64(ctxt.Arch.MinLC)))
-			pc = p.Pc
-		}
-
-		delta = uint32(val) - uint32(oldval)
-		if delta>>31 != 0 {
-			delta = 1 | ^(delta << 1)
-		} else {
-			delta <<= 1
-		}
-		addvarint(dst, delta)
-		oldval = val
-		started = 1
-		val = valfunc(ctxt, func_, val, p, 1, arg)
-	}
-
-	if started != 0 {
-		if ctxt.Debugpcln != 0 {
-			ctxt.Logf("%6x done\n", uint64(func_.Text.Pc+func_.Size))
-		}
-		addvarint(dst, uint32((func_.Size-pc)/int64(ctxt.Arch.MinLC)))
-		addvarint(dst, 0) // terminator
-	}
-
-	if ctxt.Debugpcln != 0 {
-		ctxt.Logf("wrote %d bytes to %p\n", len(dst.P), dst)
-		for i := 0; i < len(dst.P); i++ {
-			ctxt.Logf(" %02x", dst.P[i])
-		}
-		ctxt.Logf("\n")
-	}
-
-	ctxt.Debugpcln -= int32(dbg)
-}
-
-// pctofileline computes either the file number (arg == 0)
-// or the line number (arg == 1) to use at p.
-// Because p->lineno applies to p, phase == 0 (before p)
-// takes care of the update.
-func pctofileline(ctxt *Link, sym *LSym, oldval int32, p *Prog, phase int32, arg interface{}) int32 {
-	if p.As == ATEXT || p.As == ANOP || p.As == AUSEFIELD || p.Lineno == 0 || phase == 1 {
-		return oldval
-	}
-	f, l := linkgetline(ctxt, p.Lineno)
-	if f == nil {
-		//	print("getline failed for %s %v\n", ctxt->cursym->name, p);
-		return oldval
-	}
-
-	if arg == nil {
-		return l
-	}
-	pcln := arg.(*Pcln)
-
-	if f == pcln.Lastfile {
-		return int32(pcln.Lastindex)
-	}
-
-	for i, file := range pcln.File {
-		if file == f {
-			pcln.Lastfile = f
-			pcln.Lastindex = i
-			return int32(i)
-		}
-	}
-	i := len(pcln.File)
-	pcln.File = append(pcln.File, f)
-	pcln.Lastfile = f
-	pcln.Lastindex = i
-	return int32(i)
-}
-
-// pctospadj computes the sp adjustment in effect.
-// It is oldval plus any adjustment made by p itself.
-// The adjustment by p takes effect only after p, so we
-// apply the change during phase == 1.
-func pctospadj(ctxt *Link, sym *LSym, oldval int32, p *Prog, phase int32, arg interface{}) int32 {
-	if oldval == -1 { // starting
-		oldval = 0
-	}
-	if phase == 0 {
-		return oldval
-	}
-	if oldval+p.Spadj < -10000 || oldval+p.Spadj > 1100000000 {
-		ctxt.Diag("overflow in spadj: %d + %d = %d", oldval, p.Spadj, oldval+p.Spadj)
-		log.Fatalf("bad code")
-	}
-
-	return oldval + p.Spadj
-}
-
-// pctopcdata computes the pcdata value in effect at p.
-// A PCDATA instruction sets the value in effect at future
-// non-PCDATA instructions.
-// Since PCDATA instructions have no width in the final code,
-// it does not matter which phase we use for the update.
-func pctopcdata(ctxt *Link, sym *LSym, oldval int32, p *Prog, phase int32, arg interface{}) int32 {
-	if phase == 0 || p.As != APCDATA || p.From.Offset != int64(arg.(uint32)) {
-		return oldval
-	}
-	if int64(int32(p.To.Offset)) != p.To.Offset {
-		ctxt.Diag("overflow in PCDATA instruction: %v", p)
-		log.Fatalf("bad code")
-	}
-
-	return int32(p.To.Offset)
-}
-
-func linkpcln(ctxt *Link, cursym *LSym) {
-	ctxt.Cursym = cursym
-
-	pcln := new(Pcln)
-	cursym.Pcln = pcln
-
-	npcdata := 0
-	nfuncdata := 0
-	for p := cursym.Text; p != nil; p = p.Link {
-		// Find the highest ID of any used PCDATA table. This ignores PCDATA table
-		// that consist entirely of "-1", since that's the assumed default value.
-		//   From.Offset is table ID
-		//   To.Offset is data
-		if p.As == APCDATA && p.From.Offset >= int64(npcdata) && p.To.Offset != -1 { // ignore -1 as we start at -1, if we only see -1, nothing changed
-			npcdata = int(p.From.Offset + 1)
-		}
-		// Find the highest ID of any FUNCDATA table.
-		//   From.Offset is table ID
-		if p.As == AFUNCDATA && p.From.Offset >= int64(nfuncdata) {
-			nfuncdata = int(p.From.Offset + 1)
-		}
-	}
-
-	pcln.Pcdata = make([]Pcdata, npcdata)
-	pcln.Pcdata = pcln.Pcdata[:npcdata]
-	pcln.Funcdata = make([]*LSym, nfuncdata)
-	pcln.Funcdataoff = make([]int64, nfuncdata)
-	pcln.Funcdataoff = pcln.Funcdataoff[:nfuncdata]
-
-	funcpctab(ctxt, &pcln.Pcsp, cursym, "pctospadj", pctospadj, nil)
-	funcpctab(ctxt, &pcln.Pcfile, cursym, "pctofile", pctofileline, pcln)
-	funcpctab(ctxt, &pcln.Pcline, cursym, "pctoline", pctofileline, nil)
-
-	// tabulate which pc and func data we have.
-	havepc := make([]uint32, (npcdata+31)/32)
-	havefunc := make([]uint32, (nfuncdata+31)/32)
-	for p := cursym.Text; p != nil; p = p.Link {
-		if p.As == AFUNCDATA {
-			if (havefunc[p.From.Offset/32]>>uint64(p.From.Offset%32))&1 != 0 {
-				ctxt.Diag("multiple definitions for FUNCDATA $%d", p.From.Offset)
-			}
-			havefunc[p.From.Offset/32] |= 1 << uint64(p.From.Offset%32)
-		}
-
-		if p.As == APCDATA && p.To.Offset != -1 {
-			havepc[p.From.Offset/32] |= 1 << uint64(p.From.Offset%32)
-		}
-	}
-
-	// pcdata.
-	for i := 0; i < npcdata; i++ {
-		if (havepc[i/32]>>uint(i%32))&1 == 0 {
-			continue
-		}
-		funcpctab(ctxt, &pcln.Pcdata[i], cursym, "pctopcdata", pctopcdata, interface{}(uint32(i)))
-	}
-
-	// funcdata
-	if nfuncdata > 0 {
-		var i int
-		for p := cursym.Text; p != nil; p = p.Link {
-			if p.As == AFUNCDATA {
-				i = int(p.From.Offset)
-				pcln.Funcdataoff[i] = p.To.Offset
-				if p.To.Type != TYPE_CONST {
-					// TODO: Dedup.
-					//funcdata_bytes += p->to.sym->size;
-					pcln.Funcdata[i] = p.To.Sym
-				}
-			}
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/plist.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/plist.go
deleted file mode 100644
index 57855a6..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/plist.go
+++ /dev/null
@@ -1,211 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/plist.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/plist.go:1
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package obj
-
-import (
-	"fmt"
-	"log"
-	"strings"
-)
-
-type Plist struct {
-	Firstpc *Prog
-}
-
-/*
- * start a new Prog list.
- */
-func Linknewplist(ctxt *Link) *Plist {
-	pl := new(Plist)
-	ctxt.Plists = append(ctxt.Plists, pl)
-	return pl
-}
-
-func Flushplist(ctxt *Link) {
-	flushplist(ctxt, ctxt.Debugasm == 0)
-}
-func FlushplistNoFree(ctxt *Link) {
-	flushplist(ctxt, false)
-}
-func flushplist(ctxt *Link, freeProgs bool) {
-	// Build list of symbols, and assign instructions to lists.
-	// Ignore ctxt->plist boundaries. There are no guarantees there,
-	// and the assemblers just use one big list.
-	var curtext *LSym
-	var etext *Prog
-	var text []*LSym
-
-	for _, pl := range ctxt.Plists {
-		var plink *Prog
-		for p := pl.Firstpc; p != nil; p = plink {
-			if ctxt.Debugasm != 0 && ctxt.Debugvlog != 0 {
-				fmt.Printf("obj: %v\n", p)
-			}
-			plink = p.Link
-			p.Link = nil
-
-			switch p.As {
-			case AEND:
-				continue
-
-			case ATYPE:
-				// Assume each TYPE instruction describes
-				// a different local variable or parameter,
-				// so no dedup.
-				// Using only the TYPE instructions means
-				// that we discard location information about local variables
-				// in C and assembly functions; that information is inferred
-				// from ordinary references, because there are no TYPE
-				// instructions there. Without the type information, gdb can't
-				// use the locations, so we don't bother to save them.
-				// If something else could use them, we could arrange to
-				// preserve them.
-				if curtext == nil {
-					continue
-				}
-				a := new(Auto)
-				a.Asym = p.From.Sym
-				a.Aoffset = int32(p.From.Offset)
-				a.Name = int16(p.From.Name)
-				a.Gotype = p.To.Sym
-				a.Link = curtext.Autom
-				curtext.Autom = a
-				continue
-
-			case ATEXT:
-				s := p.From.Sym
-				if s == nil {
-					// func _() { }
-					curtext = nil
-
-					continue
-				}
-
-				if s.Text != nil {
-					log.Fatalf("duplicate TEXT for %s", s.Name)
-				}
-				if s.OnList() {
-					log.Fatalf("symbol %s listed multiple times", s.Name)
-				}
-				s.Set(AttrOnList, true)
-				text = append(text, s)
-				flag := int(p.From3Offset())
-				if flag&DUPOK != 0 {
-					s.Set(AttrDuplicateOK, true)
-				}
-				if flag&NOSPLIT != 0 {
-					s.Set(AttrNoSplit, true)
-				}
-				if flag&REFLECTMETHOD != 0 {
-					s.Set(AttrReflectMethod, true)
-				}
-				s.Type = STEXT
-				s.Text = p
-				etext = p
-				curtext = s
-				continue
-
-			case AFUNCDATA:
-				// Rewrite reference to go_args_stackmap(SB) to the Go-provided declaration information.
-				if curtext == nil { // func _() {}
-					continue
-				}
-				if p.To.Sym.Name == "go_args_stackmap" {
-					if p.From.Type != TYPE_CONST || p.From.Offset != FUNCDATA_ArgsPointerMaps {
-						ctxt.Diag("FUNCDATA use of go_args_stackmap(SB) without FUNCDATA_ArgsPointerMaps")
-					}
-					p.To.Sym = Linklookup(ctxt, fmt.Sprintf("%s.args_stackmap", curtext.Name), int(curtext.Version))
-				}
-
-			}
-
-			if curtext == nil {
-				etext = nil
-				continue
-			}
-			etext.Link = p
-			etext = p
-		}
-	}
-
-	// Add reference to Go arguments for C or assembly functions without them.
-	for _, s := range text {
-		if !strings.HasPrefix(s.Name, "\"\".") {
-			continue
-		}
-		found := false
-		var p *Prog
-		for p = s.Text; p != nil; p = p.Link {
-			if p.As == AFUNCDATA && p.From.Type == TYPE_CONST && p.From.Offset == FUNCDATA_ArgsPointerMaps {
-				found = true
-				break
-			}
-		}
-
-		if !found {
-			p = Appendp(ctxt, s.Text)
-			p.As = AFUNCDATA
-			p.From.Type = TYPE_CONST
-			p.From.Offset = FUNCDATA_ArgsPointerMaps
-			p.To.Type = TYPE_MEM
-			p.To.Name = NAME_EXTERN
-			p.To.Sym = Linklookup(ctxt, fmt.Sprintf("%s.args_stackmap", s.Name), int(s.Version))
-		}
-	}
-
-	// Turn functions into machine code images.
-	for _, s := range text {
-		mkfwd(s)
-		linkpatch(ctxt, s)
-		if ctxt.Flag_optimize {
-			ctxt.Arch.Follow(ctxt, s)
-		}
-		ctxt.Arch.Preprocess(ctxt, s)
-		ctxt.Arch.Assemble(ctxt, s)
-		fieldtrack(ctxt, s)
-		linkpcln(ctxt, s)
-		if freeProgs {
-			s.Text = nil
-		}
-	}
-
-	// Add to running list in ctxt.
-	ctxt.Text = append(ctxt.Text, text...)
-	ctxt.Data = append(ctxt.Data, gendwarf(ctxt, text)...)
-	ctxt.Plists = nil
-	ctxt.Curp = nil
-	if freeProgs {
-		ctxt.freeProgs()
-	}
-}
-
-func (ctxt *Link) Globl(s *LSym, size int64, flag int) {
-	if s.SeenGlobl() {
-		fmt.Printf("duplicate %v\n", s)
-	}
-	s.Set(AttrSeenGlobl, true)
-	if s.OnList() {
-		log.Fatalf("symbol %s listed multiple times", s.Name)
-	}
-	s.Set(AttrOnList, true)
-	ctxt.Data = append(ctxt.Data, s)
-	s.Size = size
-	if s.Type == 0 || s.Type == SXREF {
-		s.Type = SBSS
-	}
-	if flag&DUPOK != 0 {
-		s.Set(AttrDuplicateOK, true)
-	}
-	if flag&RODATA != 0 {
-		s.Type = SRODATA
-	} else if flag&NOPTR != 0 {
-		s.Type = SNOPTRBSS
-	} else if flag&TLSBSS != 0 {
-		s.Type = STLSBSS
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/ppc64/a.out.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/ppc64/a.out.go
deleted file mode 100644
index d9cccac..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/ppc64/a.out.go
+++ /dev/null
@@ -1,944 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/ppc64/a.out.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/ppc64/a.out.go:1
-// cmd/9c/9.out.h from Vita Nuova.
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package ppc64
-
-import "bootstrap/cmd/internal/obj"
-
-//go:generate go run ../stringer.go -i $GOFILE -o anames.go -p ppc64
-
-/*
- * powerpc 64
- */
-const (
-	NSNAME = 8
-	NSYM   = 50
-	NREG   = 32 /* number of general registers */
-	NFREG  = 32 /* number of floating point registers */
-)
-
-const (
-	/* RBasePPC64 = 4096 */
-	/* R0=4096 ... R31=4127 */
-	REG_R0 = obj.RBasePPC64 + iota
-	REG_R1
-	REG_R2
-	REG_R3
-	REG_R4
-	REG_R5
-	REG_R6
-	REG_R7
-	REG_R8
-	REG_R9
-	REG_R10
-	REG_R11
-	REG_R12
-	REG_R13
-	REG_R14
-	REG_R15
-	REG_R16
-	REG_R17
-	REG_R18
-	REG_R19
-	REG_R20
-	REG_R21
-	REG_R22
-	REG_R23
-	REG_R24
-	REG_R25
-	REG_R26
-	REG_R27
-	REG_R28
-	REG_R29
-	REG_R30
-	REG_R31
-
-	/* F0=4128 ... F31=4159 */
-	REG_F0
-	REG_F1
-	REG_F2
-	REG_F3
-	REG_F4
-	REG_F5
-	REG_F6
-	REG_F7
-	REG_F8
-	REG_F9
-	REG_F10
-	REG_F11
-	REG_F12
-	REG_F13
-	REG_F14
-	REG_F15
-	REG_F16
-	REG_F17
-	REG_F18
-	REG_F19
-	REG_F20
-	REG_F21
-	REG_F22
-	REG_F23
-	REG_F24
-	REG_F25
-	REG_F26
-	REG_F27
-	REG_F28
-	REG_F29
-	REG_F30
-	REG_F31
-
-	/* V0=4160 ... V31=4191 */
-	REG_V0
-	REG_V1
-	REG_V2
-	REG_V3
-	REG_V4
-	REG_V5
-	REG_V6
-	REG_V7
-	REG_V8
-	REG_V9
-	REG_V10
-	REG_V11
-	REG_V12
-	REG_V13
-	REG_V14
-	REG_V15
-	REG_V16
-	REG_V17
-	REG_V18
-	REG_V19
-	REG_V20
-	REG_V21
-	REG_V22
-	REG_V23
-	REG_V24
-	REG_V25
-	REG_V26
-	REG_V27
-	REG_V28
-	REG_V29
-	REG_V30
-	REG_V31
-
-	/* VS0=4192 ... VS63=4255 */
-	REG_VS0
-	REG_VS1
-	REG_VS2
-	REG_VS3
-	REG_VS4
-	REG_VS5
-	REG_VS6
-	REG_VS7
-	REG_VS8
-	REG_VS9
-	REG_VS10
-	REG_VS11
-	REG_VS12
-	REG_VS13
-	REG_VS14
-	REG_VS15
-	REG_VS16
-	REG_VS17
-	REG_VS18
-	REG_VS19
-	REG_VS20
-	REG_VS21
-	REG_VS22
-	REG_VS23
-	REG_VS24
-	REG_VS25
-	REG_VS26
-	REG_VS27
-	REG_VS28
-	REG_VS29
-	REG_VS30
-	REG_VS31
-	REG_VS32
-	REG_VS33
-	REG_VS34
-	REG_VS35
-	REG_VS36
-	REG_VS37
-	REG_VS38
-	REG_VS39
-	REG_VS40
-	REG_VS41
-	REG_VS42
-	REG_VS43
-	REG_VS44
-	REG_VS45
-	REG_VS46
-	REG_VS47
-	REG_VS48
-	REG_VS49
-	REG_VS50
-	REG_VS51
-	REG_VS52
-	REG_VS53
-	REG_VS54
-	REG_VS55
-	REG_VS56
-	REG_VS57
-	REG_VS58
-	REG_VS59
-	REG_VS60
-	REG_VS61
-	REG_VS62
-	REG_VS63
-
-	REG_CR0
-	REG_CR1
-	REG_CR2
-	REG_CR3
-	REG_CR4
-	REG_CR5
-	REG_CR6
-	REG_CR7
-
-	REG_MSR
-	REG_FPSCR
-	REG_CR
-
-	REG_SPECIAL = REG_CR0
-
-	REG_SPR0 = obj.RBasePPC64 + 1024 // first of 1024 registers
-	REG_DCR0 = obj.RBasePPC64 + 2048 // first of 1024 registers
-
-	REG_XER = REG_SPR0 + 1
-	REG_LR  = REG_SPR0 + 8
-	REG_CTR = REG_SPR0 + 9
-
-	REGZERO = REG_R0 /* set to zero */
-	REGSP   = REG_R1
-	REGSB   = REG_R2
-	REGRET  = REG_R3
-	REGARG  = -1      /* -1 disables passing the first argument in register */
-	REGRT1  = REG_R3  /* reserved for runtime, duffzero and duffcopy */
-	REGRT2  = REG_R4  /* reserved for runtime, duffcopy */
-	REGMIN  = REG_R7  /* register variables allocated from here to REGMAX */
-	REGCTXT = REG_R11 /* context for closures */
-	REGTLS  = REG_R13 /* C ABI TLS base pointer */
-	REGMAX  = REG_R27
-	REGEXT  = REG_R30 /* external registers allocated from here down */
-	REGG    = REG_R30 /* G */
-	REGTMP  = REG_R31 /* used by the linker */
-	FREGRET = REG_F0
-	FREGMIN = REG_F17 /* first register variable */
-	FREGMAX = REG_F26 /* last register variable for 9g only */
-	FREGEXT = REG_F26 /* first external register */
-)
-
-/*
- * GENERAL:
- *
- * compiler allocates R3 up as temps
- * compiler allocates register variables R7-R27
- * compiler allocates external registers R30 down
- *
- * compiler allocates register variables F17-F26
- * compiler allocates external registers F26 down
- */
-const (
-	BIG = 32768 - 8
-)
-
-const (
-	/* mark flags */
-	LABEL   = 1 << 0
-	LEAF    = 1 << 1
-	FLOAT   = 1 << 2
-	BRANCH  = 1 << 3
-	LOAD    = 1 << 4
-	FCMP    = 1 << 5
-	SYNC    = 1 << 6
-	LIST    = 1 << 7
-	FOLL    = 1 << 8
-	NOSCHED = 1 << 9
-)
-
-// Values for use in branch instruction BC
-// BC B0,BI,label
-// BO is type of branch + likely bits described below
-// BI is CR value + branch type
-// ex: BEQ CR2,label is BC 12,10,label
-//   12 = BO_BCR
-//   10 = BI_CR2 + BI_EQ
-
-const (
-	BI_CR0 = 0
-	BI_CR1 = 4
-	BI_CR2 = 8
-	BI_CR3 = 12
-	BI_CR4 = 16
-	BI_CR5 = 20
-	BI_CR6 = 24
-	BI_CR7 = 28
-	BI_LT  = 0
-	BI_GT  = 1
-	BI_EQ  = 2
-	BI_OVF = 3
-)
-
-// Values for the BO field.  Add the branch type to
-// the likely bits, if a likely setting is known.
-// If branch likely or unlikely is not known, don't set it.
-// e.g. branch on cr+likely = 15
-
-const (
-	BO_BCTR     = 16 // branch on ctr value
-	BO_BCR      = 12 // branch on cr value
-	BO_BCRBCTR  = 8  // branch on ctr and cr value
-	BO_NOTBCR   = 4  // branch on not cr value
-	BO_UNLIKELY = 2  // value for unlikely
-	BO_LIKELY   = 3  // value for likely
-)
-
-// Bit settings from the CR
-
-const (
-	C_COND_LT = iota // 0 result is negative
-	C_COND_GT        // 1 result is positive
-	C_COND_EQ        // 2 result is zero
-	C_COND_SO        // 3 summary overflow or FP compare w/ NaN
-)
-
-const (
-	C_NONE = iota
-	C_REG
-	C_FREG
-	C_VREG
-	C_VSREG
-	C_CREG
-	C_SPR /* special processor register */
-	C_ZCON
-	C_SCON   /* 16 bit signed */
-	C_UCON   /* 32 bit signed, low 16 bits 0 */
-	C_ADDCON /* -0x8000 <= v < 0 */
-	C_ANDCON /* 0 < v <= 0xFFFF */
-	C_LCON   /* other 32 */
-	C_DCON   /* other 64 (could subdivide further) */
-	C_SACON  /* $n(REG) where n <= int16 */
-	C_SECON
-	C_LACON /* $n(REG) where int16 < n <= int32 */
-	C_LECON
-	C_DACON /* $n(REG) where int32 < n */
-	C_SBRA
-	C_LBRA
-	C_LBRAPIC
-	C_SAUTO
-	C_LAUTO
-	C_SEXT
-	C_LEXT
-	C_ZOREG // conjecture: either (1) register + zeroed offset, or (2) "R0" implies zero or C_REG
-	C_SOREG // register + signed offset
-	C_LOREG
-	C_FPSCR
-	C_MSR
-	C_XER
-	C_LR
-	C_CTR
-	C_ANY
-	C_GOK
-	C_ADDR
-	C_GOTADDR
-	C_TLS_LE
-	C_TLS_IE
-	C_TEXTSIZE
-
-	C_NCLASS /* must be the last */
-)
-
-const (
-	AADD = obj.ABasePPC64 + obj.A_ARCHSPECIFIC + iota
-	AADDCC
-	AADDV
-	AADDVCC
-	AADDC
-	AADDCCC
-	AADDCV
-	AADDCVCC
-	AADDME
-	AADDMECC
-	AADDMEVCC
-	AADDMEV
-	AADDE
-	AADDECC
-	AADDEVCC
-	AADDEV
-	AADDZE
-	AADDZECC
-	AADDZEVCC
-	AADDZEV
-	AAND
-	AANDCC
-	AANDN
-	AANDNCC
-	ABC
-	ABCL
-	ABEQ
-	ABGE // not LT = G/E/U
-	ABGT
-	ABLE // not GT = L/E/U
-	ABLT
-	ABNE // not EQ = L/G/U
-	ABVC // Unordered-clear
-	ABVS // Unordered-set
-	ACMP
-	ACMPU
-	ACNTLZW
-	ACNTLZWCC
-	ACRAND
-	ACRANDN
-	ACREQV
-	ACRNAND
-	ACRNOR
-	ACROR
-	ACRORN
-	ACRXOR
-	ADIVW
-	ADIVWCC
-	ADIVWVCC
-	ADIVWV
-	ADIVWU
-	ADIVWUCC
-	ADIVWUVCC
-	ADIVWUV
-	AEQV
-	AEQVCC
-	AEXTSB
-	AEXTSBCC
-	AEXTSH
-	AEXTSHCC
-	AFABS
-	AFABSCC
-	AFADD
-	AFADDCC
-	AFADDS
-	AFADDSCC
-	AFCMPO
-	AFCMPU
-	AFCTIW
-	AFCTIWCC
-	AFCTIWZ
-	AFCTIWZCC
-	AFDIV
-	AFDIVCC
-	AFDIVS
-	AFDIVSCC
-	AFMADD
-	AFMADDCC
-	AFMADDS
-	AFMADDSCC
-	AFMOVD
-	AFMOVDCC
-	AFMOVDU
-	AFMOVS
-	AFMOVSU
-	AFMOVSX
-	AFMOVSZ
-	AFMSUB
-	AFMSUBCC
-	AFMSUBS
-	AFMSUBSCC
-	AFMUL
-	AFMULCC
-	AFMULS
-	AFMULSCC
-	AFNABS
-	AFNABSCC
-	AFNEG
-	AFNEGCC
-	AFNMADD
-	AFNMADDCC
-	AFNMADDS
-	AFNMADDSCC
-	AFNMSUB
-	AFNMSUBCC
-	AFNMSUBS
-	AFNMSUBSCC
-	AFRSP
-	AFRSPCC
-	AFSUB
-	AFSUBCC
-	AFSUBS
-	AFSUBSCC
-	AISEL
-	AMOVMW
-	ALBAR
-	ALSW
-	ALWAR
-	ALWSYNC
-	AMOVDBR
-	AMOVWBR
-	AMOVB
-	AMOVBU
-	AMOVBZ
-	AMOVBZU
-	AMOVH
-	AMOVHBR
-	AMOVHU
-	AMOVHZ
-	AMOVHZU
-	AMOVW
-	AMOVWU
-	AMOVFL
-	AMOVCRFS
-	AMTFSB0
-	AMTFSB0CC
-	AMTFSB1
-	AMTFSB1CC
-	AMULHW
-	AMULHWCC
-	AMULHWU
-	AMULHWUCC
-	AMULLW
-	AMULLWCC
-	AMULLWVCC
-	AMULLWV
-	ANAND
-	ANANDCC
-	ANEG
-	ANEGCC
-	ANEGVCC
-	ANEGV
-	ANOR
-	ANORCC
-	AOR
-	AORCC
-	AORN
-	AORNCC
-	AREM
-	AREMCC
-	AREMV
-	AREMVCC
-	AREMU
-	AREMUCC
-	AREMUV
-	AREMUVCC
-	ARFI
-	ARLWMI
-	ARLWMICC
-	ARLWNM
-	ARLWNMCC
-	ASLW
-	ASLWCC
-	ASRW
-	ASRAW
-	ASRAWCC
-	ASRWCC
-	ASTBCCC
-	ASTSW
-	ASTWCCC
-	ASUB
-	ASUBCC
-	ASUBVCC
-	ASUBC
-	ASUBCCC
-	ASUBCV
-	ASUBCVCC
-	ASUBME
-	ASUBMECC
-	ASUBMEVCC
-	ASUBMEV
-	ASUBV
-	ASUBE
-	ASUBECC
-	ASUBEV
-	ASUBEVCC
-	ASUBZE
-	ASUBZECC
-	ASUBZEVCC
-	ASUBZEV
-	ASYNC
-	AXOR
-	AXORCC
-
-	ADCBF
-	ADCBI
-	ADCBST
-	ADCBT
-	ADCBTST
-	ADCBZ
-	AECIWX
-	AECOWX
-	AEIEIO
-	AICBI
-	AISYNC
-	APTESYNC
-	ATLBIE
-	ATLBIEL
-	ATLBSYNC
-	ATW
-
-	ASYSCALL
-	AWORD
-
-	ARFCI
-
-	/* optional on 32-bit */
-	AFRES
-	AFRESCC
-	AFRIM
-	AFRIMCC
-	AFRIP
-	AFRIPCC
-	AFRIZ
-	AFRIZCC
-	AFRSQRTE
-	AFRSQRTECC
-	AFSEL
-	AFSELCC
-	AFSQRT
-	AFSQRTCC
-	AFSQRTS
-	AFSQRTSCC
-
-	/* 64-bit */
-
-	ACNTLZD
-	ACNTLZDCC
-	ACMPW /* CMP with L=0 */
-	ACMPWU
-	ADIVD
-	ADIVDCC
-	ADIVDE
-	ADIVDECC
-	ADIVDEU
-	ADIVDEUCC
-	ADIVDVCC
-	ADIVDV
-	ADIVDU
-	ADIVDUCC
-	ADIVDUVCC
-	ADIVDUV
-	AEXTSW
-	AEXTSWCC
-	/* AFCFIW; AFCFIWCC */
-	AFCFID
-	AFCFIDCC
-	AFCFIDU
-	AFCFIDUCC
-	AFCTID
-	AFCTIDCC
-	AFCTIDZ
-	AFCTIDZCC
-	ALDAR
-	AMOVD
-	AMOVDU
-	AMOVWZ
-	AMOVWZU
-	AMULHD
-	AMULHDCC
-	AMULHDU
-	AMULHDUCC
-	AMULLD
-	AMULLDCC
-	AMULLDVCC
-	AMULLDV
-	ARFID
-	ARLDMI
-	ARLDMICC
-	ARLDIMI
-	ARLDIMICC
-	ARLDC
-	ARLDCCC
-	ARLDCR
-	ARLDCRCC
-	ARLDICR
-	ARLDICRCC
-	ARLDCL
-	ARLDCLCC
-	ARLDICL
-	ARLDICLCC
-	ASLBIA
-	ASLBIE
-	ASLBMFEE
-	ASLBMFEV
-	ASLBMTE
-	ASLD
-	ASLDCC
-	ASRD
-	ASRAD
-	ASRADCC
-	ASRDCC
-	ASTDCCC
-	ATD
-
-	/* 64-bit pseudo operation */
-	ADWORD
-	AREMD
-	AREMDCC
-	AREMDV
-	AREMDVCC
-	AREMDU
-	AREMDUCC
-	AREMDUV
-	AREMDUVCC
-
-	/* more 64-bit operations */
-	AHRFID
-
-	/* Vector */
-	ALV
-	ALVEBX
-	ALVEHX
-	ALVEWX
-	ALVX
-	ALVXL
-	ALVSL
-	ALVSR
-	ASTV
-	ASTVEBX
-	ASTVEHX
-	ASTVEWX
-	ASTVX
-	ASTVXL
-	AVAND
-	AVANDL
-	AVANDC
-	AVNAND
-	AVOR
-	AVORL
-	AVORC
-	AVNOR
-	AVXOR
-	AVEQV
-	AVADDUM
-	AVADDUBM
-	AVADDUHM
-	AVADDUWM
-	AVADDUDM
-	AVADDUQM
-	AVADDCU
-	AVADDCUQ
-	AVADDCUW
-	AVADDUS
-	AVADDUBS
-	AVADDUHS
-	AVADDUWS
-	AVADDSS
-	AVADDSBS
-	AVADDSHS
-	AVADDSWS
-	AVADDE
-	AVADDEUQM
-	AVADDECUQ
-	AVSUBUM
-	AVSUBUBM
-	AVSUBUHM
-	AVSUBUWM
-	AVSUBUDM
-	AVSUBUQM
-	AVSUBCU
-	AVSUBCUQ
-	AVSUBCUW
-	AVSUBUS
-	AVSUBUBS
-	AVSUBUHS
-	AVSUBUWS
-	AVSUBSS
-	AVSUBSBS
-	AVSUBSHS
-	AVSUBSWS
-	AVSUBE
-	AVSUBEUQM
-	AVSUBECUQ
-	AVR
-	AVRLB
-	AVRLH
-	AVRLW
-	AVRLD
-	AVS
-	AVSLB
-	AVSLH
-	AVSLW
-	AVSL
-	AVSLO
-	AVSRB
-	AVSRH
-	AVSRW
-	AVSR
-	AVSRO
-	AVSLD
-	AVSRD
-	AVSA
-	AVSRAB
-	AVSRAH
-	AVSRAW
-	AVSRAD
-	AVSOI
-	AVSLDOI
-	AVCLZ
-	AVCLZB
-	AVCLZH
-	AVCLZW
-	AVCLZD
-	AVPOPCNT
-	AVPOPCNTB
-	AVPOPCNTH
-	AVPOPCNTW
-	AVPOPCNTD
-	AVCMPEQ
-	AVCMPEQUB
-	AVCMPEQUBCC
-	AVCMPEQUH
-	AVCMPEQUHCC
-	AVCMPEQUW
-	AVCMPEQUWCC
-	AVCMPEQUD
-	AVCMPEQUDCC
-	AVCMPGT
-	AVCMPGTUB
-	AVCMPGTUBCC
-	AVCMPGTUH
-	AVCMPGTUHCC
-	AVCMPGTUW
-	AVCMPGTUWCC
-	AVCMPGTUD
-	AVCMPGTUDCC
-	AVCMPGTSB
-	AVCMPGTSBCC
-	AVCMPGTSH
-	AVCMPGTSHCC
-	AVCMPGTSW
-	AVCMPGTSWCC
-	AVCMPGTSD
-	AVCMPGTSDCC
-	AVPERM
-	AVSEL
-	AVSPLT
-	AVSPLTB
-	AVSPLTH
-	AVSPLTW
-	AVSPLTI
-	AVSPLTISB
-	AVSPLTISH
-	AVSPLTISW
-	AVCIPH
-	AVCIPHER
-	AVCIPHERLAST
-	AVNCIPH
-	AVNCIPHER
-	AVNCIPHERLAST
-	AVSBOX
-	AVSHASIGMA
-	AVSHASIGMAW
-	AVSHASIGMAD
-
-	/* VSX */
-	ALXV
-	ALXVD2X
-	ALXVDSX
-	ALXVW4X
-	ASTXV
-	ASTXVD2X
-	ASTXVW4X
-	ALXS
-	ALXSDX
-	ASTXS
-	ASTXSDX
-	ALXSI
-	ALXSIWAX
-	ALXSIWZX
-	ASTXSI
-	ASTXSIWX
-	AMFVSR
-	AMFVSRD
-	AMFVSRWZ
-	AMTVSR
-	AMTVSRD
-	AMTVSRWA
-	AMTVSRWZ
-	AXXLAND
-	AXXLANDQ
-	AXXLANDC
-	AXXLEQV
-	AXXLNAND
-	AXXLOR
-	AXXLORC
-	AXXLNOR
-	AXXLORQ
-	AXXLXOR
-	AXXSEL
-	AXXMRG
-	AXXMRGHW
-	AXXMRGLW
-	AXXSPLT
-	AXXSPLTW
-	AXXPERM
-	AXXPERMDI
-	AXXSI
-	AXXSLDWI
-	AXSCV
-	AXSCVDPSP
-	AXSCVSPDP
-	AXSCVDPSPN
-	AXSCVSPDPN
-	AXVCV
-	AXVCVDPSP
-	AXVCVSPDP
-	AXSCVX
-	AXSCVDPSXDS
-	AXSCVDPSXWS
-	AXSCVDPUXDS
-	AXSCVDPUXWS
-	AXSCVXP
-	AXSCVSXDDP
-	AXSCVUXDDP
-	AXSCVSXDSP
-	AXSCVUXDSP
-	AXVCVX
-	AXVCVDPSXDS
-	AXVCVDPSXWS
-	AXVCVDPUXDS
-	AXVCVDPUXWS
-	AXVCVSPSXDS
-	AXVCVSPSXWS
-	AXVCVSPUXDS
-	AXVCVSPUXWS
-	AXVCVXP
-	AXVCVSXDDP
-	AXVCVSXWDP
-	AXVCVUXDDP
-	AXVCVUXWDP
-	AXVCVSXDSP
-	AXVCVSXWSP
-	AXVCVUXDSP
-	AXVCVUXWSP
-
-	ALAST
-
-	// aliases
-	ABR = obj.AJMP
-	ABL = obj.ACALL
-)
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/ppc64/anames.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/ppc64/anames.go
deleted file mode 100644
index 2d27716..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/ppc64/anames.go
+++ /dev/null
@@ -1,552 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/ppc64/anames.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/ppc64/anames.go:1
-// Generated by stringer -i a.out.go -o anames.go -p ppc64
-// Do not edit.
-
-package ppc64
-
-import "bootstrap/cmd/internal/obj"
-
-var Anames = []string{
-	obj.A_ARCHSPECIFIC: "ADD",
-	"ADDCC",
-	"ADDV",
-	"ADDVCC",
-	"ADDC",
-	"ADDCCC",
-	"ADDCV",
-	"ADDCVCC",
-	"ADDME",
-	"ADDMECC",
-	"ADDMEVCC",
-	"ADDMEV",
-	"ADDE",
-	"ADDECC",
-	"ADDEVCC",
-	"ADDEV",
-	"ADDZE",
-	"ADDZECC",
-	"ADDZEVCC",
-	"ADDZEV",
-	"AND",
-	"ANDCC",
-	"ANDN",
-	"ANDNCC",
-	"BC",
-	"BCL",
-	"BEQ",
-	"BGE",
-	"BGT",
-	"BLE",
-	"BLT",
-	"BNE",
-	"BVC",
-	"BVS",
-	"CMP",
-	"CMPU",
-	"CNTLZW",
-	"CNTLZWCC",
-	"CRAND",
-	"CRANDN",
-	"CREQV",
-	"CRNAND",
-	"CRNOR",
-	"CROR",
-	"CRORN",
-	"CRXOR",
-	"DIVW",
-	"DIVWCC",
-	"DIVWVCC",
-	"DIVWV",
-	"DIVWU",
-	"DIVWUCC",
-	"DIVWUVCC",
-	"DIVWUV",
-	"EQV",
-	"EQVCC",
-	"EXTSB",
-	"EXTSBCC",
-	"EXTSH",
-	"EXTSHCC",
-	"FABS",
-	"FABSCC",
-	"FADD",
-	"FADDCC",
-	"FADDS",
-	"FADDSCC",
-	"FCMPO",
-	"FCMPU",
-	"FCTIW",
-	"FCTIWCC",
-	"FCTIWZ",
-	"FCTIWZCC",
-	"FDIV",
-	"FDIVCC",
-	"FDIVS",
-	"FDIVSCC",
-	"FMADD",
-	"FMADDCC",
-	"FMADDS",
-	"FMADDSCC",
-	"FMOVD",
-	"FMOVDCC",
-	"FMOVDU",
-	"FMOVS",
-	"FMOVSU",
-	"FMOVSX",
-	"FMOVSZ",
-	"FMSUB",
-	"FMSUBCC",
-	"FMSUBS",
-	"FMSUBSCC",
-	"FMUL",
-	"FMULCC",
-	"FMULS",
-	"FMULSCC",
-	"FNABS",
-	"FNABSCC",
-	"FNEG",
-	"FNEGCC",
-	"FNMADD",
-	"FNMADDCC",
-	"FNMADDS",
-	"FNMADDSCC",
-	"FNMSUB",
-	"FNMSUBCC",
-	"FNMSUBS",
-	"FNMSUBSCC",
-	"FRSP",
-	"FRSPCC",
-	"FSUB",
-	"FSUBCC",
-	"FSUBS",
-	"FSUBSCC",
-	"ISEL",
-	"MOVMW",
-	"LBAR",
-	"LSW",
-	"LWAR",
-	"LWSYNC",
-	"MOVDBR",
-	"MOVWBR",
-	"MOVB",
-	"MOVBU",
-	"MOVBZ",
-	"MOVBZU",
-	"MOVH",
-	"MOVHBR",
-	"MOVHU",
-	"MOVHZ",
-	"MOVHZU",
-	"MOVW",
-	"MOVWU",
-	"MOVFL",
-	"MOVCRFS",
-	"MTFSB0",
-	"MTFSB0CC",
-	"MTFSB1",
-	"MTFSB1CC",
-	"MULHW",
-	"MULHWCC",
-	"MULHWU",
-	"MULHWUCC",
-	"MULLW",
-	"MULLWCC",
-	"MULLWVCC",
-	"MULLWV",
-	"NAND",
-	"NANDCC",
-	"NEG",
-	"NEGCC",
-	"NEGVCC",
-	"NEGV",
-	"NOR",
-	"NORCC",
-	"OR",
-	"ORCC",
-	"ORN",
-	"ORNCC",
-	"REM",
-	"REMCC",
-	"REMV",
-	"REMVCC",
-	"REMU",
-	"REMUCC",
-	"REMUV",
-	"REMUVCC",
-	"RFI",
-	"RLWMI",
-	"RLWMICC",
-	"RLWNM",
-	"RLWNMCC",
-	"SLW",
-	"SLWCC",
-	"SRW",
-	"SRAW",
-	"SRAWCC",
-	"SRWCC",
-	"STBCCC",
-	"STSW",
-	"STWCCC",
-	"SUB",
-	"SUBCC",
-	"SUBVCC",
-	"SUBC",
-	"SUBCCC",
-	"SUBCV",
-	"SUBCVCC",
-	"SUBME",
-	"SUBMECC",
-	"SUBMEVCC",
-	"SUBMEV",
-	"SUBV",
-	"SUBE",
-	"SUBECC",
-	"SUBEV",
-	"SUBEVCC",
-	"SUBZE",
-	"SUBZECC",
-	"SUBZEVCC",
-	"SUBZEV",
-	"SYNC",
-	"XOR",
-	"XORCC",
-	"DCBF",
-	"DCBI",
-	"DCBST",
-	"DCBT",
-	"DCBTST",
-	"DCBZ",
-	"ECIWX",
-	"ECOWX",
-	"EIEIO",
-	"ICBI",
-	"ISYNC",
-	"PTESYNC",
-	"TLBIE",
-	"TLBIEL",
-	"TLBSYNC",
-	"TW",
-	"SYSCALL",
-	"WORD",
-	"RFCI",
-	"FRES",
-	"FRESCC",
-	"FRIM",
-	"FRIMCC",
-	"FRIP",
-	"FRIPCC",
-	"FRIZ",
-	"FRIZCC",
-	"FRSQRTE",
-	"FRSQRTECC",
-	"FSEL",
-	"FSELCC",
-	"FSQRT",
-	"FSQRTCC",
-	"FSQRTS",
-	"FSQRTSCC",
-	"CNTLZD",
-	"CNTLZDCC",
-	"CMPW",
-	"CMPWU",
-	"DIVD",
-	"DIVDCC",
-	"DIVDE",
-	"DIVDECC",
-	"DIVDEU",
-	"DIVDEUCC",
-	"DIVDVCC",
-	"DIVDV",
-	"DIVDU",
-	"DIVDUCC",
-	"DIVDUVCC",
-	"DIVDUV",
-	"EXTSW",
-	"EXTSWCC",
-	"FCFID",
-	"FCFIDCC",
-	"FCFIDU",
-	"FCFIDUCC",
-	"FCTID",
-	"FCTIDCC",
-	"FCTIDZ",
-	"FCTIDZCC",
-	"LDAR",
-	"MOVD",
-	"MOVDU",
-	"MOVWZ",
-	"MOVWZU",
-	"MULHD",
-	"MULHDCC",
-	"MULHDU",
-	"MULHDUCC",
-	"MULLD",
-	"MULLDCC",
-	"MULLDVCC",
-	"MULLDV",
-	"RFID",
-	"RLDMI",
-	"RLDMICC",
-	"RLDIMI",
-	"RLDIMICC",
-	"RLDC",
-	"RLDCCC",
-	"RLDCR",
-	"RLDCRCC",
-	"RLDICR",
-	"RLDICRCC",
-	"RLDCL",
-	"RLDCLCC",
-	"RLDICL",
-	"RLDICLCC",
-	"SLBIA",
-	"SLBIE",
-	"SLBMFEE",
-	"SLBMFEV",
-	"SLBMTE",
-	"SLD",
-	"SLDCC",
-	"SRD",
-	"SRAD",
-	"SRADCC",
-	"SRDCC",
-	"STDCCC",
-	"TD",
-	"DWORD",
-	"REMD",
-	"REMDCC",
-	"REMDV",
-	"REMDVCC",
-	"REMDU",
-	"REMDUCC",
-	"REMDUV",
-	"REMDUVCC",
-	"HRFID",
-	"LV",
-	"LVEBX",
-	"LVEHX",
-	"LVEWX",
-	"LVX",
-	"LVXL",
-	"LVSL",
-	"LVSR",
-	"STV",
-	"STVEBX",
-	"STVEHX",
-	"STVEWX",
-	"STVX",
-	"STVXL",
-	"VAND",
-	"VANDL",
-	"VANDC",
-	"VNAND",
-	"VOR",
-	"VORL",
-	"VORC",
-	"VNOR",
-	"VXOR",
-	"VEQV",
-	"VADDUM",
-	"VADDUBM",
-	"VADDUHM",
-	"VADDUWM",
-	"VADDUDM",
-	"VADDUQM",
-	"VADDCU",
-	"VADDCUQ",
-	"VADDCUW",
-	"VADDUS",
-	"VADDUBS",
-	"VADDUHS",
-	"VADDUWS",
-	"VADDSS",
-	"VADDSBS",
-	"VADDSHS",
-	"VADDSWS",
-	"VADDE",
-	"VADDEUQM",
-	"VADDECUQ",
-	"VSUBUM",
-	"VSUBUBM",
-	"VSUBUHM",
-	"VSUBUWM",
-	"VSUBUDM",
-	"VSUBUQM",
-	"VSUBCU",
-	"VSUBCUQ",
-	"VSUBCUW",
-	"VSUBUS",
-	"VSUBUBS",
-	"VSUBUHS",
-	"VSUBUWS",
-	"VSUBSS",
-	"VSUBSBS",
-	"VSUBSHS",
-	"VSUBSWS",
-	"VSUBE",
-	"VSUBEUQM",
-	"VSUBECUQ",
-	"VR",
-	"VRLB",
-	"VRLH",
-	"VRLW",
-	"VRLD",
-	"VS",
-	"VSLB",
-	"VSLH",
-	"VSLW",
-	"VSL",
-	"VSLO",
-	"VSRB",
-	"VSRH",
-	"VSRW",
-	"VSR",
-	"VSRO",
-	"VSLD",
-	"VSRD",
-	"VSA",
-	"VSRAB",
-	"VSRAH",
-	"VSRAW",
-	"VSRAD",
-	"VSOI",
-	"VSLDOI",
-	"VCLZ",
-	"VCLZB",
-	"VCLZH",
-	"VCLZW",
-	"VCLZD",
-	"VPOPCNT",
-	"VPOPCNTB",
-	"VPOPCNTH",
-	"VPOPCNTW",
-	"VPOPCNTD",
-	"VCMPEQ",
-	"VCMPEQUB",
-	"VCMPEQUBCC",
-	"VCMPEQUH",
-	"VCMPEQUHCC",
-	"VCMPEQUW",
-	"VCMPEQUWCC",
-	"VCMPEQUD",
-	"VCMPEQUDCC",
-	"VCMPGT",
-	"VCMPGTUB",
-	"VCMPGTUBCC",
-	"VCMPGTUH",
-	"VCMPGTUHCC",
-	"VCMPGTUW",
-	"VCMPGTUWCC",
-	"VCMPGTUD",
-	"VCMPGTUDCC",
-	"VCMPGTSB",
-	"VCMPGTSBCC",
-	"VCMPGTSH",
-	"VCMPGTSHCC",
-	"VCMPGTSW",
-	"VCMPGTSWCC",
-	"VCMPGTSD",
-	"VCMPGTSDCC",
-	"VPERM",
-	"VSEL",
-	"VSPLT",
-	"VSPLTB",
-	"VSPLTH",
-	"VSPLTW",
-	"VSPLTI",
-	"VSPLTISB",
-	"VSPLTISH",
-	"VSPLTISW",
-	"VCIPH",
-	"VCIPHER",
-	"VCIPHERLAST",
-	"VNCIPH",
-	"VNCIPHER",
-	"VNCIPHERLAST",
-	"VSBOX",
-	"VSHASIGMA",
-	"VSHASIGMAW",
-	"VSHASIGMAD",
-	"LXV",
-	"LXVD2X",
-	"LXVDSX",
-	"LXVW4X",
-	"STXV",
-	"STXVD2X",
-	"STXVW4X",
-	"LXS",
-	"LXSDX",
-	"STXS",
-	"STXSDX",
-	"LXSI",
-	"LXSIWAX",
-	"LXSIWZX",
-	"STXSI",
-	"STXSIWX",
-	"MFVSR",
-	"MFVSRD",
-	"MFVSRWZ",
-	"MTVSR",
-	"MTVSRD",
-	"MTVSRWA",
-	"MTVSRWZ",
-	"XXLAND",
-	"XXLANDQ",
-	"XXLANDC",
-	"XXLEQV",
-	"XXLNAND",
-	"XXLOR",
-	"XXLORC",
-	"XXLNOR",
-	"XXLORQ",
-	"XXLXOR",
-	"XXSEL",
-	"XXMRG",
-	"XXMRGHW",
-	"XXMRGLW",
-	"XXSPLT",
-	"XXSPLTW",
-	"XXPERM",
-	"XXPERMDI",
-	"XXSI",
-	"XXSLDWI",
-	"XSCV",
-	"XSCVDPSP",
-	"XSCVSPDP",
-	"XSCVDPSPN",
-	"XSCVSPDPN",
-	"XVCV",
-	"XVCVDPSP",
-	"XVCVSPDP",
-	"XSCVX",
-	"XSCVDPSXDS",
-	"XSCVDPSXWS",
-	"XSCVDPUXDS",
-	"XSCVDPUXWS",
-	"XSCVXP",
-	"XSCVSXDDP",
-	"XSCVUXDDP",
-	"XSCVSXDSP",
-	"XSCVUXDSP",
-	"XVCVX",
-	"XVCVDPSXDS",
-	"XVCVDPSXWS",
-	"XVCVDPUXDS",
-	"XVCVDPUXWS",
-	"XVCVSPSXDS",
-	"XVCVSPSXWS",
-	"XVCVSPUXDS",
-	"XVCVSPUXWS",
-	"XVCVXP",
-	"XVCVSXDDP",
-	"XVCVSXWDP",
-	"XVCVUXDDP",
-	"XVCVUXWDP",
-	"XVCVSXDSP",
-	"XVCVSXWSP",
-	"XVCVUXDSP",
-	"XVCVUXWSP",
-	"LAST",
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/ppc64/anames9.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/ppc64/anames9.go
deleted file mode 100644
index 9218c2f..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/ppc64/anames9.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/ppc64/anames9.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/ppc64/anames9.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ppc64
-
-var cnames9 = []string{
-	"NONE",
-	"REG",
-	"FREG",
-	"VREG",
-	"VSREG",
-	"CREG",
-	"SPR",
-	"ZCON",
-	"SCON",
-	"UCON",
-	"ADDCON",
-	"ANDCON",
-	"LCON",
-	"DCON",
-	"SACON",
-	"SECON",
-	"LACON",
-	"LECON",
-	"DACON",
-	"SBRA",
-	"LBRA",
-	"SAUTO",
-	"LAUTO",
-	"SEXT",
-	"LEXT",
-	"ZOREG",
-	"SOREG",
-	"LOREG",
-	"FPSCR",
-	"MSR",
-	"XER",
-	"LR",
-	"CTR",
-	"ANY",
-	"GOK",
-	"ADDR",
-	"GOTADDR",
-	"TLS_LE",
-	"TLS_IE",
-	"TEXTSIZE",
-	"NCLASS",
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/ppc64/asm9.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/ppc64/asm9.go
deleted file mode 100644
index fb4c730..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/ppc64/asm9.go
+++ /dev/null
@@ -1,4554 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/ppc64/asm9.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/ppc64/asm9.go:1
-// cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package ppc64
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"encoding/binary"
-	"fmt"
-	"log"
-	"sort"
-)
-
-// Instruction layout.
-
-const (
-	funcAlign = 8
-)
-
-const (
-	r0iszero = 1
-)
-
-type Optab struct {
-	as    obj.As // Opcode
-	a1    uint8
-	a2    uint8
-	a3    uint8
-	a4    uint8
-	type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
-	size  int8
-	param int16
-}
-
-var optab = []Optab{
-	{obj.ATEXT, C_LEXT, C_NONE, C_NONE, C_TEXTSIZE, 0, 0, 0},
-	{obj.ATEXT, C_LEXT, C_NONE, C_LCON, C_TEXTSIZE, 0, 0, 0},
-	{obj.ATEXT, C_ADDR, C_NONE, C_NONE, C_TEXTSIZE, 0, 0, 0},
-	{obj.ATEXT, C_ADDR, C_NONE, C_LCON, C_TEXTSIZE, 0, 0, 0},
-	/* move register */
-	{AMOVD, C_REG, C_NONE, C_NONE, C_REG, 1, 4, 0},
-	{AMOVB, C_REG, C_NONE, C_NONE, C_REG, 12, 4, 0},
-	{AMOVBZ, C_REG, C_NONE, C_NONE, C_REG, 13, 4, 0},
-	{AMOVW, C_REG, C_NONE, C_NONE, C_REG, 12, 4, 0},
-	{AMOVWZ, C_REG, C_NONE, C_NONE, C_REG, 13, 4, 0},
-	{AADD, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0},
-	{AADD, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
-	{AADD, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
-	{AADD, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
-	{AADD, C_UCON, C_REG, C_NONE, C_REG, 20, 4, 0},
-	{AADD, C_UCON, C_NONE, C_NONE, C_REG, 20, 4, 0},
-	{AADD, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0},
-	{AADD, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0},
-	{AADDC, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0},
-	{AADDC, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
-	{AADDC, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
-	{AADDC, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
-	{AADDC, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0},
-	{AADDC, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0},
-	{AAND, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, /* logical, no literal */
-	{AAND, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
-	{AANDCC, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
-	{AANDCC, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
-	{AANDCC, C_ANDCON, C_NONE, C_NONE, C_REG, 58, 4, 0},
-	{AANDCC, C_ANDCON, C_REG, C_NONE, C_REG, 58, 4, 0},
-	{AANDCC, C_UCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
-	{AANDCC, C_UCON, C_REG, C_NONE, C_REG, 59, 4, 0},
-	{AANDCC, C_LCON, C_NONE, C_NONE, C_REG, 23, 12, 0},
-	{AANDCC, C_LCON, C_REG, C_NONE, C_REG, 23, 12, 0},
-	{AMULLW, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0},
-	{AMULLW, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
-	{AMULLW, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
-	{AMULLW, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
-	{AMULLW, C_ANDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
-	{AMULLW, C_ANDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
-	{AMULLW, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0},
-	{AMULLW, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0},
-	{ASUBC, C_REG, C_REG, C_NONE, C_REG, 10, 4, 0},
-	{ASUBC, C_REG, C_NONE, C_NONE, C_REG, 10, 4, 0},
-	{ASUBC, C_REG, C_NONE, C_ADDCON, C_REG, 27, 4, 0},
-	{ASUBC, C_REG, C_NONE, C_LCON, C_REG, 28, 12, 0},
-	{AOR, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, /* logical, literal not cc (or/xor) */
-	{AOR, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
-	{AOR, C_ANDCON, C_NONE, C_NONE, C_REG, 58, 4, 0},
-	{AOR, C_ANDCON, C_REG, C_NONE, C_REG, 58, 4, 0},
-	{AOR, C_UCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
-	{AOR, C_UCON, C_REG, C_NONE, C_REG, 59, 4, 0},
-	{AOR, C_LCON, C_NONE, C_NONE, C_REG, 23, 12, 0},
-	{AOR, C_LCON, C_REG, C_NONE, C_REG, 23, 12, 0},
-	{ADIVW, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0}, /* op r1[,r2],r3 */
-	{ADIVW, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
-	{ASUB, C_REG, C_REG, C_NONE, C_REG, 10, 4, 0}, /* op r2[,r1],r3 */
-	{ASUB, C_REG, C_NONE, C_NONE, C_REG, 10, 4, 0},
-	{ASLW, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
-	{ASLW, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
-	{ASLD, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
-	{ASLD, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
-	{ASLD, C_SCON, C_REG, C_NONE, C_REG, 25, 4, 0},
-	{ASLD, C_SCON, C_NONE, C_NONE, C_REG, 25, 4, 0},
-	{ASLW, C_SCON, C_REG, C_NONE, C_REG, 57, 4, 0},
-	{ASLW, C_SCON, C_NONE, C_NONE, C_REG, 57, 4, 0},
-	{ASRAW, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
-	{ASRAW, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
-	{ASRAW, C_SCON, C_REG, C_NONE, C_REG, 56, 4, 0},
-	{ASRAW, C_SCON, C_NONE, C_NONE, C_REG, 56, 4, 0},
-	{ASRAD, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
-	{ASRAD, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
-	{ASRAD, C_SCON, C_REG, C_NONE, C_REG, 56, 4, 0},
-	{ASRAD, C_SCON, C_NONE, C_NONE, C_REG, 56, 4, 0},
-	{ARLWMI, C_SCON, C_REG, C_LCON, C_REG, 62, 4, 0},
-	{ARLWMI, C_REG, C_REG, C_LCON, C_REG, 63, 4, 0},
-	{ARLDMI, C_SCON, C_REG, C_LCON, C_REG, 30, 4, 0},
-	{ARLDC, C_SCON, C_REG, C_LCON, C_REG, 29, 4, 0},
-	{ARLDCL, C_SCON, C_REG, C_LCON, C_REG, 29, 4, 0},
-	{ARLDCL, C_REG, C_REG, C_LCON, C_REG, 14, 4, 0},
-	{ARLDICL, C_REG, C_REG, C_LCON, C_REG, 14, 4, 0},
-	{ARLDICL, C_SCON, C_REG, C_LCON, C_REG, 14, 4, 0},
-	{ARLDCL, C_REG, C_NONE, C_LCON, C_REG, 14, 4, 0},
-	{AFADD, C_FREG, C_NONE, C_NONE, C_FREG, 2, 4, 0},
-	{AFADD, C_FREG, C_FREG, C_NONE, C_FREG, 2, 4, 0},
-	{AFABS, C_FREG, C_NONE, C_NONE, C_FREG, 33, 4, 0},
-	{AFABS, C_NONE, C_NONE, C_NONE, C_FREG, 33, 4, 0},
-	{AFMOVD, C_FREG, C_NONE, C_NONE, C_FREG, 33, 4, 0},
-	{AFMADD, C_FREG, C_FREG, C_FREG, C_FREG, 34, 4, 0},
-	{AFMUL, C_FREG, C_NONE, C_NONE, C_FREG, 32, 4, 0},
-	{AFMUL, C_FREG, C_FREG, C_NONE, C_FREG, 32, 4, 0},
-
-	/* store, short offset */
-	{AMOVD, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
-	{AMOVW, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
-	{AMOVWZ, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
-	{AMOVBZ, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
-	{AMOVBZU, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
-	{AMOVB, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
-	{AMOVBU, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
-	{AMOVD, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
-	{AMOVW, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
-	{AMOVWZ, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
-	{AMOVBZ, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
-	{AMOVB, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
-	{AMOVD, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
-	{AMOVW, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
-	{AMOVWZ, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
-	{AMOVBZ, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
-	{AMOVB, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
-	{AMOVD, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
-	{AMOVW, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
-	{AMOVWZ, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
-	{AMOVBZ, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
-	{AMOVBZU, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
-	{AMOVB, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
-	{AMOVBU, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
-
-	/* load, short offset */
-	{AMOVD, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
-	{AMOVW, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
-	{AMOVWZ, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
-	{AMOVBZ, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
-	{AMOVBZU, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
-	{AMOVB, C_ZOREG, C_REG, C_NONE, C_REG, 9, 8, REGZERO},
-	{AMOVBU, C_ZOREG, C_REG, C_NONE, C_REG, 9, 8, REGZERO},
-	{AMOVD, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
-	{AMOVW, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
-	{AMOVWZ, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
-	{AMOVBZ, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
-	{AMOVB, C_SEXT, C_NONE, C_NONE, C_REG, 9, 8, REGSB},
-	{AMOVD, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
-	{AMOVW, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
-	{AMOVWZ, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
-	{AMOVBZ, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
-	{AMOVB, C_SAUTO, C_NONE, C_NONE, C_REG, 9, 8, REGSP},
-	{AMOVD, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
-	{AMOVW, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
-	{AMOVWZ, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
-	{AMOVBZ, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
-	{AMOVBZU, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
-	{AMOVB, C_SOREG, C_NONE, C_NONE, C_REG, 9, 8, REGZERO},
-	{AMOVBU, C_SOREG, C_NONE, C_NONE, C_REG, 9, 8, REGZERO},
-
-	/* store, long offset */
-	{AMOVD, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
-	{AMOVW, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
-	{AMOVWZ, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
-	{AMOVBZ, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
-	{AMOVB, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
-	{AMOVD, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
-	{AMOVW, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
-	{AMOVWZ, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
-	{AMOVBZ, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
-	{AMOVB, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
-	{AMOVD, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
-	{AMOVW, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
-	{AMOVWZ, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
-	{AMOVBZ, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
-	{AMOVB, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
-	{AMOVD, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
-	{AMOVW, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
-	{AMOVWZ, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
-	{AMOVBZ, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
-	{AMOVB, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
-
-	/* load, long offset */
-	{AMOVD, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
-	{AMOVW, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
-	{AMOVWZ, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
-	{AMOVBZ, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
-	{AMOVB, C_LEXT, C_NONE, C_NONE, C_REG, 37, 12, REGSB},
-	{AMOVD, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
-	{AMOVW, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
-	{AMOVWZ, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
-	{AMOVBZ, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
-	{AMOVB, C_LAUTO, C_NONE, C_NONE, C_REG, 37, 12, REGSP},
-	{AMOVD, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
-	{AMOVW, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
-	{AMOVWZ, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
-	{AMOVBZ, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
-	{AMOVB, C_LOREG, C_NONE, C_NONE, C_REG, 37, 12, REGZERO},
-	{AMOVD, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
-	{AMOVW, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
-	{AMOVWZ, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
-	{AMOVBZ, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
-	{AMOVB, C_ADDR, C_NONE, C_NONE, C_REG, 76, 12, 0},
-
-	{AMOVD, C_TLS_LE, C_NONE, C_NONE, C_REG, 79, 4, 0},
-	{AMOVD, C_TLS_IE, C_NONE, C_NONE, C_REG, 80, 8, 0},
-
-	{AMOVD, C_GOTADDR, C_NONE, C_NONE, C_REG, 81, 8, 0},
-
-	/* load constant */
-	{AMOVD, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB},
-	{AMOVD, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP},
-	{AMOVD, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB},
-	{AMOVD, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP},
-	{AMOVD, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
-	{AMOVW, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB}, /* TO DO: check */
-	{AMOVW, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP},
-	{AMOVW, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB},
-	{AMOVW, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP},
-	{AMOVW, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
-	{AMOVWZ, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB}, /* TO DO: check */
-	{AMOVWZ, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP},
-	{AMOVWZ, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB},
-	{AMOVWZ, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP},
-	{AMOVWZ, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
-
-	/* load unsigned/long constants (TO DO: check) */
-	{AMOVD, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
-	{AMOVD, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0},
-	{AMOVW, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
-	{AMOVW, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0},
-	{AMOVWZ, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
-	{AMOVWZ, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0},
-	{AMOVHBR, C_ZOREG, C_REG, C_NONE, C_REG, 45, 4, 0},
-	{AMOVHBR, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
-	{AMOVHBR, C_REG, C_REG, C_NONE, C_ZOREG, 44, 4, 0},
-	{AMOVHBR, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
-	{ASYSCALL, C_NONE, C_NONE, C_NONE, C_NONE, 5, 4, 0},
-	{ASYSCALL, C_REG, C_NONE, C_NONE, C_NONE, 77, 12, 0},
-	{ASYSCALL, C_SCON, C_NONE, C_NONE, C_NONE, 77, 12, 0},
-	{ABEQ, C_NONE, C_NONE, C_NONE, C_SBRA, 16, 4, 0},
-	{ABEQ, C_CREG, C_NONE, C_NONE, C_SBRA, 16, 4, 0},
-	{ABR, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0},
-	{ABR, C_NONE, C_NONE, C_NONE, C_LBRAPIC, 11, 8, 0},
-	{ABC, C_SCON, C_REG, C_NONE, C_SBRA, 16, 4, 0},
-	{ABC, C_SCON, C_REG, C_NONE, C_LBRA, 17, 4, 0},
-	{ABR, C_NONE, C_NONE, C_NONE, C_LR, 18, 4, 0},
-	{ABR, C_NONE, C_NONE, C_NONE, C_CTR, 18, 4, 0},
-	{ABR, C_REG, C_NONE, C_NONE, C_CTR, 18, 4, 0},
-	{ABR, C_NONE, C_NONE, C_NONE, C_ZOREG, 15, 8, 0},
-	{ABC, C_NONE, C_REG, C_NONE, C_LR, 18, 4, 0},
-	{ABC, C_NONE, C_REG, C_NONE, C_CTR, 18, 4, 0},
-	{ABC, C_SCON, C_REG, C_NONE, C_LR, 18, 4, 0},
-	{ABC, C_SCON, C_REG, C_NONE, C_CTR, 18, 4, 0},
-	{ABC, C_NONE, C_NONE, C_NONE, C_ZOREG, 15, 8, 0},
-	{AFMOVD, C_SEXT, C_NONE, C_NONE, C_FREG, 8, 4, REGSB},
-	{AFMOVD, C_SAUTO, C_NONE, C_NONE, C_FREG, 8, 4, REGSP},
-	{AFMOVD, C_SOREG, C_NONE, C_NONE, C_FREG, 8, 4, REGZERO},
-	{AFMOVD, C_LEXT, C_NONE, C_NONE, C_FREG, 36, 8, REGSB},
-	{AFMOVD, C_LAUTO, C_NONE, C_NONE, C_FREG, 36, 8, REGSP},
-	{AFMOVD, C_LOREG, C_NONE, C_NONE, C_FREG, 36, 8, REGZERO},
-	{AFMOVD, C_ADDR, C_NONE, C_NONE, C_FREG, 75, 8, 0},
-	{AFMOVD, C_FREG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
-	{AFMOVD, C_FREG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
-	{AFMOVD, C_FREG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
-	{AFMOVD, C_FREG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
-	{AFMOVD, C_FREG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
-	{AFMOVD, C_FREG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
-	{AFMOVD, C_FREG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
-	{AFMOVSX, C_ZOREG, C_REG, C_NONE, C_FREG, 45, 4, 0},
-	{AFMOVSX, C_ZOREG, C_NONE, C_NONE, C_FREG, 45, 4, 0},
-	{AFMOVSX, C_FREG, C_REG, C_NONE, C_ZOREG, 44, 4, 0},
-	{AFMOVSX, C_FREG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
-	{AFMOVSZ, C_ZOREG, C_REG, C_NONE, C_FREG, 45, 4, 0},
-	{AFMOVSZ, C_ZOREG, C_NONE, C_NONE, C_FREG, 45, 4, 0},
-	{ASYNC, C_NONE, C_NONE, C_NONE, C_NONE, 46, 4, 0},
-	{AWORD, C_LCON, C_NONE, C_NONE, C_NONE, 40, 4, 0},
-	{ADWORD, C_LCON, C_NONE, C_NONE, C_NONE, 31, 8, 0},
-	{ADWORD, C_DCON, C_NONE, C_NONE, C_NONE, 31, 8, 0},
-	{AADDME, C_REG, C_NONE, C_NONE, C_REG, 47, 4, 0},
-	{AEXTSB, C_REG, C_NONE, C_NONE, C_REG, 48, 4, 0},
-	{AEXTSB, C_NONE, C_NONE, C_NONE, C_REG, 48, 4, 0},
-	{AISEL, C_LCON, C_REG, C_REG, C_REG, 84, 4, 0},
-	{AISEL, C_ZCON, C_REG, C_REG, C_REG, 84, 4, 0},
-	{ANEG, C_REG, C_NONE, C_NONE, C_REG, 47, 4, 0},
-	{ANEG, C_NONE, C_NONE, C_NONE, C_REG, 47, 4, 0},
-	{AREM, C_REG, C_NONE, C_NONE, C_REG, 50, 12, 0},
-	{AREM, C_REG, C_REG, C_NONE, C_REG, 50, 12, 0},
-	{AREMU, C_REG, C_NONE, C_NONE, C_REG, 50, 16, 0},
-	{AREMU, C_REG, C_REG, C_NONE, C_REG, 50, 16, 0},
-	{AREMD, C_REG, C_NONE, C_NONE, C_REG, 51, 12, 0},
-	{AREMD, C_REG, C_REG, C_NONE, C_REG, 51, 12, 0},
-	{AREMDU, C_REG, C_NONE, C_NONE, C_REG, 51, 12, 0},
-	{AREMDU, C_REG, C_REG, C_NONE, C_REG, 51, 12, 0},
-	{AMTFSB0, C_SCON, C_NONE, C_NONE, C_NONE, 52, 4, 0},
-	{AMOVFL, C_FPSCR, C_NONE, C_NONE, C_FREG, 53, 4, 0},
-	{AMOVFL, C_FREG, C_NONE, C_NONE, C_FPSCR, 64, 4, 0},
-	{AMOVFL, C_FREG, C_NONE, C_LCON, C_FPSCR, 64, 4, 0},
-	{AMOVFL, C_LCON, C_NONE, C_NONE, C_FPSCR, 65, 4, 0},
-	{AMOVD, C_MSR, C_NONE, C_NONE, C_REG, 54, 4, 0},  /* mfmsr */
-	{AMOVD, C_REG, C_NONE, C_NONE, C_MSR, 54, 4, 0},  /* mtmsrd */
-	{AMOVWZ, C_REG, C_NONE, C_NONE, C_MSR, 54, 4, 0}, /* mtmsr */
-
-	/* Vector instructions */
-
-	/* Vector load */
-	{ALV, C_SOREG, C_NONE, C_NONE, C_VREG, 45, 4, 0}, /* vector load, x-form */
-
-	/* Vector store */
-	{ASTV, C_VREG, C_NONE, C_NONE, C_SOREG, 44, 4, 0}, /* vector store, x-form */
-
-	/* Vector logical */
-	{AVAND, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector and, vx-form */
-	{AVOR, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0},  /* vector or, vx-form */
-
-	/* Vector add */
-	{AVADDUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add unsigned modulo, vx-form */
-	{AVADDCU, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add & write carry unsigned, vx-form */
-	{AVADDUS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add unsigned saturate, vx-form */
-	{AVADDSS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add signed saturate, vx-form */
-	{AVADDE, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0},  /* vector add extended, va-form */
-
-	/* Vector subtract */
-	{AVSUBUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract unsigned modulo, vx-form */
-	{AVSUBCU, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract & write carry unsigned, vx-form */
-	{AVSUBUS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract unsigned saturate, vx-form */
-	{AVSUBSS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract signed saturate, vx-form */
-	{AVSUBE, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0},  /* vector subtract extended, va-form */
-
-	/* Vector rotate */
-	{AVR, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector rotate, vx-form */
-
-	/* Vector shift */
-	{AVS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0},     /* vector shift, vx-form */
-	{AVSA, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0},    /* vector shift algebraic, vx-form */
-	{AVSOI, C_ANDCON, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector shift by octet immediate, va-form */
-
-	/* Vector count */
-	{AVCLZ, C_VREG, C_NONE, C_NONE, C_VREG, 85, 4, 0},    /* vector count leading zeros, vx-form */
-	{AVPOPCNT, C_VREG, C_NONE, C_NONE, C_VREG, 85, 4, 0}, /* vector population count, vx-form */
-
-	/* Vector compare */
-	{AVCMPEQ, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare equal, vc-form */
-	{AVCMPGT, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare greater than, vc-form */
-
-	/* Vector permute */
-	{AVPERM, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector permute, va-form */
-
-	/* Vector select */
-	{AVSEL, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector select, va-form */
-
-	/* Vector splat */
-	{AVSPLT, C_SCON, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector splat, vx-form */
-	{AVSPLT, C_ADDCON, C_VREG, C_NONE, C_VREG, 82, 4, 0},
-	{AVSPLTI, C_SCON, C_NONE, C_NONE, C_VREG, 82, 4, 0}, /* vector splat immediate, vx-form */
-	{AVSPLTI, C_ADDCON, C_NONE, C_NONE, C_VREG, 82, 4, 0},
-
-	/* Vector AES */
-	{AVCIPH, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0},  /* vector AES cipher, vx-form */
-	{AVNCIPH, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector AES inverse cipher, vx-form */
-	{AVSBOX, C_VREG, C_NONE, C_NONE, C_VREG, 82, 4, 0},  /* vector AES subbytes, vx-form */
-
-	/* Vector SHA */
-	{AVSHASIGMA, C_ANDCON, C_VREG, C_ANDCON, C_VREG, 82, 4, 0}, /* vector SHA sigma, vx-form */
-
-	/* VSX vector load */
-	{ALXV, C_SOREG, C_NONE, C_NONE, C_VSREG, 87, 4, 0}, /* vsx vector load, xx1-form */
-
-	/* VSX vector store */
-	{ASTXV, C_VSREG, C_NONE, C_NONE, C_SOREG, 86, 4, 0}, /* vsx vector store, xx1-form */
-
-	/* VSX scalar load */
-	{ALXS, C_SOREG, C_NONE, C_NONE, C_VSREG, 87, 4, 0}, /* vsx scalar load, xx1-form */
-
-	/* VSX scalar store */
-	{ASTXS, C_VSREG, C_NONE, C_NONE, C_SOREG, 86, 4, 0}, /* vsx scalar store, xx1-form */
-
-	/* VSX scalar as integer load */
-	{ALXSI, C_SOREG, C_NONE, C_NONE, C_VSREG, 87, 4, 0}, /* vsx scalar as integer load, xx1-form */
-
-	/* VSX scalar store as integer */
-	{ASTXSI, C_VSREG, C_NONE, C_NONE, C_SOREG, 86, 4, 0}, /* vsx scalar as integer store, xx1-form */
-
-	/* VSX move from VSR */
-	{AMFVSR, C_VSREG, C_NONE, C_NONE, C_REG, 88, 4, 0}, /* vsx move from vsr, xx1-form */
-
-	/* VSX move to VSR */
-	{AMTVSR, C_REG, C_NONE, C_NONE, C_VSREG, 88, 4, 0}, /* vsx move to vsr, xx1-form */
-
-	/* VSX logical */
-	{AXXLAND, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx and, xx3-form */
-	{AXXLOR, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0},  /* vsx or, xx3-form */
-
-	/* VSX select */
-	{AXXSEL, C_VSREG, C_VSREG, C_VSREG, C_VSREG, 91, 4, 0}, /* vsx select, xx4-form */
-
-	/* VSX merge */
-	{AXXMRG, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx merge, xx3-form */
-
-	/* VSX splat */
-	{AXXSPLT, C_VSREG, C_NONE, C_SCON, C_VSREG, 89, 4, 0}, /* vsx splat, xx2-form */
-
-	/* VSX permute */
-	{AXXPERM, C_VSREG, C_VSREG, C_SCON, C_VSREG, 90, 4, 0}, /* vsx permute, xx3-form */
-
-	/* VSX shift */
-	{AXXSI, C_VSREG, C_VSREG, C_SCON, C_VSREG, 90, 4, 0}, /* vsx shift immediate, xx3-form */
-
-	/* VSX scalar FP-FP conversion */
-	{AXSCV, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx scalar fp-fp conversion, xx2-form */
-
-	/* VSX vector FP-FP conversion */
-	{AXVCV, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx vector fp-fp conversion, xx2-form */
-
-	/* VSX scalar FP-integer conversion */
-	{AXSCVX, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx scalar fp-integer conversion, xx2-form */
-
-	/* VSX scalar integer-FP conversion */
-	{AXSCVXP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx scalar integer-fp conversion, xx2-form */
-
-	/* VSX vector FP-integer conversion */
-	{AXVCVX, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx vector fp-integer conversion, xx2-form */
-
-	/* VSX vector integer-FP conversion */
-	{AXVCVXP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx vector integer-fp conversion, xx2-form */
-
-	/* 64-bit special registers */
-	{AMOVD, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0},
-	{AMOVD, C_REG, C_NONE, C_NONE, C_LR, 66, 4, 0},
-	{AMOVD, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0},
-	{AMOVD, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0},
-	{AMOVD, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0},
-	{AMOVD, C_LR, C_NONE, C_NONE, C_REG, 66, 4, 0},
-	{AMOVD, C_CTR, C_NONE, C_NONE, C_REG, 66, 4, 0},
-	{AMOVD, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0},
-
-	/* 32-bit special registers (gloss over sign-extension or not?) */
-	{AMOVW, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0},
-	{AMOVW, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0},
-	{AMOVW, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0},
-	{AMOVW, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0},
-	{AMOVW, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0},
-	{AMOVWZ, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0},
-	{AMOVWZ, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0},
-	{AMOVWZ, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0},
-	{AMOVWZ, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0},
-	{AMOVWZ, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0},
-	{AMOVFL, C_FPSCR, C_NONE, C_NONE, C_CREG, 73, 4, 0},
-	{AMOVFL, C_CREG, C_NONE, C_NONE, C_CREG, 67, 4, 0},
-	{AMOVW, C_CREG, C_NONE, C_NONE, C_REG, 68, 4, 0},
-	{AMOVWZ, C_CREG, C_NONE, C_NONE, C_REG, 68, 4, 0},
-	{AMOVFL, C_REG, C_NONE, C_LCON, C_CREG, 69, 4, 0},
-	{AMOVFL, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0},
-	{AMOVW, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0},
-	{AMOVWZ, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0},
-	{ACMP, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0},
-	{ACMP, C_REG, C_REG, C_NONE, C_REG, 70, 4, 0},
-	{ACMP, C_REG, C_NONE, C_NONE, C_ADDCON, 71, 4, 0},
-	{ACMP, C_REG, C_REG, C_NONE, C_ADDCON, 71, 4, 0},
-	{ACMPU, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0},
-	{ACMPU, C_REG, C_REG, C_NONE, C_REG, 70, 4, 0},
-	{ACMPU, C_REG, C_NONE, C_NONE, C_ANDCON, 71, 4, 0},
-	{ACMPU, C_REG, C_REG, C_NONE, C_ANDCON, 71, 4, 0},
-	{AFCMPO, C_FREG, C_NONE, C_NONE, C_FREG, 70, 4, 0},
-	{AFCMPO, C_FREG, C_REG, C_NONE, C_FREG, 70, 4, 0},
-	{ATW, C_LCON, C_REG, C_NONE, C_REG, 60, 4, 0},
-	{ATW, C_LCON, C_REG, C_NONE, C_ADDCON, 61, 4, 0},
-	{ADCBF, C_ZOREG, C_NONE, C_NONE, C_NONE, 43, 4, 0},
-	{ADCBF, C_ZOREG, C_REG, C_NONE, C_NONE, 43, 4, 0},
-	{AECOWX, C_REG, C_REG, C_NONE, C_ZOREG, 44, 4, 0},
-	{AECIWX, C_ZOREG, C_REG, C_NONE, C_REG, 45, 4, 0},
-	{AECOWX, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
-	{AECIWX, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
-	{AEIEIO, C_NONE, C_NONE, C_NONE, C_NONE, 46, 4, 0},
-	{ATLBIE, C_REG, C_NONE, C_NONE, C_NONE, 49, 4, 0},
-	{ATLBIE, C_SCON, C_NONE, C_NONE, C_REG, 49, 4, 0},
-	{ASLBMFEE, C_REG, C_NONE, C_NONE, C_REG, 55, 4, 0},
-	{ASLBMTE, C_REG, C_NONE, C_NONE, C_REG, 55, 4, 0},
-	{ASTSW, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
-	{ASTSW, C_REG, C_NONE, C_LCON, C_ZOREG, 41, 4, 0},
-	{ALSW, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
-	{ALSW, C_ZOREG, C_NONE, C_LCON, C_REG, 42, 4, 0},
-	{obj.AUNDEF, C_NONE, C_NONE, C_NONE, C_NONE, 78, 4, 0},
-	{obj.AUSEFIELD, C_ADDR, C_NONE, C_NONE, C_NONE, 0, 0, 0},
-	{obj.APCDATA, C_LCON, C_NONE, C_NONE, C_LCON, 0, 0, 0},
-	{obj.AFUNCDATA, C_SCON, C_NONE, C_NONE, C_ADDR, 0, 0, 0},
-	{obj.ANOP, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0},
-	{obj.ADUFFZERO, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0}, // same as ABR/ABL
-	{obj.ADUFFCOPY, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0}, // same as ABR/ABL
-
-	{obj.AXXX, C_NONE, C_NONE, C_NONE, C_NONE, 0, 4, 0},
-}
-
-var oprange [ALAST & obj.AMask][]Optab
-
-var xcmp [C_NCLASS][C_NCLASS]bool
-
-func span9(ctxt *obj.Link, cursym *obj.LSym) {
-	p := cursym.Text
-	if p == nil || p.Link == nil { // handle external functions and ELF section symbols
-		return
-	}
-	ctxt.Cursym = cursym
-	ctxt.Autosize = int32(p.To.Offset)
-
-	if oprange[AANDN&obj.AMask] == nil {
-		buildop(ctxt)
-	}
-
-	c := int64(0)
-	p.Pc = c
-
-	var m int
-	var o *Optab
-	for p = p.Link; p != nil; p = p.Link {
-		ctxt.Curp = p
-		p.Pc = c
-		o = oplook(ctxt, p)
-		m = int(o.size)
-		if m == 0 {
-			if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA && p.As != obj.AUSEFIELD {
-				ctxt.Diag("zero-width instruction\n%v", p)
-			}
-			continue
-		}
-
-		c += int64(m)
-	}
-
-	cursym.Size = c
-
-	/*
-	 * if any procedure is large enough to
-	 * generate a large SBRA branch, then
-	 * generate extra passes putting branches
-	 * around jmps to fix. this is rare.
-	 */
-	bflag := 1
-
-	var otxt int64
-	var q *obj.Prog
-	for bflag != 0 {
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f span1\n", obj.Cputime())
-		}
-		bflag = 0
-		c = 0
-		for p = cursym.Text.Link; p != nil; p = p.Link {
-			p.Pc = c
-			o = oplook(ctxt, p)
-
-			// very large conditional branches
-			if (o.type_ == 16 || o.type_ == 17) && p.Pcond != nil {
-				otxt = p.Pcond.Pc - c
-				if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
-					q = ctxt.NewProg()
-					q.Link = p.Link
-					p.Link = q
-					q.As = ABR
-					q.To.Type = obj.TYPE_BRANCH
-					q.Pcond = p.Pcond
-					p.Pcond = q
-					q = ctxt.NewProg()
-					q.Link = p.Link
-					p.Link = q
-					q.As = ABR
-					q.To.Type = obj.TYPE_BRANCH
-					q.Pcond = q.Link.Link
-
-					//addnop(p->link);
-					//addnop(p);
-					bflag = 1
-				}
-			}
-
-			m = int(o.size)
-			if m == 0 {
-				if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA && p.As != obj.AUSEFIELD {
-					ctxt.Diag("zero-width instruction\n%v", p)
-				}
-				continue
-			}
-
-			c += int64(m)
-		}
-
-		cursym.Size = c
-	}
-
-	c += -c & (funcAlign - 1)
-	cursym.Size = c
-
-	/*
-	 * lay out the code, emitting code and data relocations.
-	 */
-
-	cursym.Grow(cursym.Size)
-
-	bp := cursym.P
-	var i int32
-	var out [6]uint32
-	for p := cursym.Text.Link; p != nil; p = p.Link {
-		ctxt.Pc = p.Pc
-		ctxt.Curp = p
-		o = oplook(ctxt, p)
-		if int(o.size) > 4*len(out) {
-			log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
-		}
-		asmout(ctxt, p, o, out[:])
-		for i = 0; i < int32(o.size/4); i++ {
-			ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
-			bp = bp[4:]
-		}
-	}
-}
-
-func isint32(v int64) bool {
-	return int64(int32(v)) == v
-}
-
-func isuint32(v uint64) bool {
-	return uint64(uint32(v)) == v
-}
-
-func aclass(ctxt *obj.Link, a *obj.Addr) int {
-	switch a.Type {
-	case obj.TYPE_NONE:
-		return C_NONE
-
-	case obj.TYPE_REG:
-		if REG_R0 <= a.Reg && a.Reg <= REG_R31 {
-			return C_REG
-		}
-		if REG_F0 <= a.Reg && a.Reg <= REG_F31 {
-			return C_FREG
-		}
-		if REG_V0 <= a.Reg && a.Reg <= REG_V31 {
-			return C_VREG
-		}
-		if REG_VS0 <= a.Reg && a.Reg <= REG_VS63 {
-			return C_VSREG
-		}
-		if REG_CR0 <= a.Reg && a.Reg <= REG_CR7 || a.Reg == REG_CR {
-			return C_CREG
-		}
-		if REG_SPR0 <= a.Reg && a.Reg <= REG_SPR0+1023 {
-			switch a.Reg {
-			case REG_LR:
-				return C_LR
-
-			case REG_XER:
-				return C_XER
-
-			case REG_CTR:
-				return C_CTR
-			}
-
-			return C_SPR
-		}
-
-		if REG_DCR0 <= a.Reg && a.Reg <= REG_DCR0+1023 {
-			return C_SPR
-		}
-		if a.Reg == REG_FPSCR {
-			return C_FPSCR
-		}
-		if a.Reg == REG_MSR {
-			return C_MSR
-		}
-		return C_GOK
-
-	case obj.TYPE_MEM:
-		switch a.Name {
-		case obj.NAME_EXTERN,
-			obj.NAME_STATIC:
-			if a.Sym == nil {
-				break
-			}
-			ctxt.Instoffset = a.Offset
-			if a.Sym != nil { // use relocation
-				if a.Sym.Type == obj.STLSBSS {
-					if ctxt.Flag_shared {
-						return C_TLS_IE
-					} else {
-						return C_TLS_LE
-					}
-				}
-				return C_ADDR
-			}
-			return C_LEXT
-
-		case obj.NAME_GOTREF:
-			return C_GOTADDR
-
-		case obj.NAME_AUTO:
-			ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
-			if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
-				return C_SAUTO
-			}
-			return C_LAUTO
-
-		case obj.NAME_PARAM:
-			ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + ctxt.FixedFrameSize()
-			if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
-				return C_SAUTO
-			}
-			return C_LAUTO
-
-		case obj.NAME_NONE:
-			ctxt.Instoffset = a.Offset
-			if ctxt.Instoffset == 0 {
-				return C_ZOREG
-			}
-			if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
-				return C_SOREG
-			}
-			return C_LOREG
-		}
-
-		return C_GOK
-
-	case obj.TYPE_TEXTSIZE:
-		return C_TEXTSIZE
-
-	case obj.TYPE_CONST,
-		obj.TYPE_ADDR:
-		switch a.Name {
-		case obj.NAME_NONE:
-			ctxt.Instoffset = a.Offset
-			if a.Reg != 0 {
-				if -BIG <= ctxt.Instoffset && ctxt.Instoffset <= BIG {
-					return C_SACON
-				}
-				if isint32(ctxt.Instoffset) {
-					return C_LACON
-				}
-				return C_DACON
-			}
-
-			goto consize
-
-		case obj.NAME_EXTERN,
-			obj.NAME_STATIC:
-			s := a.Sym
-			if s == nil {
-				break
-			}
-			if s.Type == obj.SCONST {
-				ctxt.Instoffset = a.Offset
-				goto consize
-			}
-
-			ctxt.Instoffset = a.Offset
-
-			/* not sure why this barfs */
-			return C_LCON
-
-		case obj.NAME_AUTO:
-			ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
-			if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
-				return C_SACON
-			}
-			return C_LACON
-
-		case obj.NAME_PARAM:
-			ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + ctxt.FixedFrameSize()
-			if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
-				return C_SACON
-			}
-			return C_LACON
-		}
-
-		return C_GOK
-
-	consize:
-		if ctxt.Instoffset >= 0 {
-			if ctxt.Instoffset == 0 {
-				return C_ZCON
-			}
-			if ctxt.Instoffset <= 0x7fff {
-				return C_SCON
-			}
-			if ctxt.Instoffset <= 0xffff {
-				return C_ANDCON
-			}
-			if ctxt.Instoffset&0xffff == 0 && isuint32(uint64(ctxt.Instoffset)) { /* && (instoffset & (1<<31)) == 0) */
-				return C_UCON
-			}
-			if isint32(ctxt.Instoffset) || isuint32(uint64(ctxt.Instoffset)) {
-				return C_LCON
-			}
-			return C_DCON
-		}
-
-		if ctxt.Instoffset >= -0x8000 {
-			return C_ADDCON
-		}
-		if ctxt.Instoffset&0xffff == 0 && isint32(ctxt.Instoffset) {
-			return C_UCON
-		}
-		if isint32(ctxt.Instoffset) {
-			return C_LCON
-		}
-		return C_DCON
-
-	case obj.TYPE_BRANCH:
-		if a.Sym != nil && ctxt.Flag_dynlink {
-			return C_LBRAPIC
-		}
-		return C_SBRA
-	}
-
-	return C_GOK
-}
-
-func prasm(p *obj.Prog) {
-	fmt.Printf("%v\n", p)
-}
-
-func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
-	a1 := int(p.Optab)
-	if a1 != 0 {
-		return &optab[a1-1]
-	}
-	a1 = int(p.From.Class)
-	if a1 == 0 {
-		a1 = aclass(ctxt, &p.From) + 1
-		p.From.Class = int8(a1)
-	}
-
-	a1--
-	a3 := C_NONE + 1
-	if p.From3 != nil {
-		a3 = int(p.From3.Class)
-		if a3 == 0 {
-			a3 = aclass(ctxt, p.From3) + 1
-			p.From3.Class = int8(a3)
-		}
-	}
-
-	a3--
-	a4 := int(p.To.Class)
-	if a4 == 0 {
-		a4 = aclass(ctxt, &p.To) + 1
-		p.To.Class = int8(a4)
-	}
-
-	a4--
-	a2 := C_NONE
-	if p.Reg != 0 {
-		if REG_R0 <= p.Reg && p.Reg <= REG_R31 {
-			a2 = C_REG
-		} else if REG_V0 <= p.Reg && p.Reg <= REG_V31 {
-			a2 = C_VREG
-		} else if REG_VS0 <= p.Reg && p.Reg <= REG_VS63 {
-			a2 = C_VSREG
-		} else if REG_F0 <= p.Reg && p.Reg <= REG_F31 {
-			a2 = C_FREG
-		}
-	}
-
-	//print("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4);
-	ops := oprange[p.As&obj.AMask]
-	c1 := &xcmp[a1]
-	c3 := &xcmp[a3]
-	c4 := &xcmp[a4]
-	for i := range ops {
-		op := &ops[i]
-		if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] && c4[op.a4] {
-			p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
-			return op
-		}
-	}
-
-	ctxt.Diag("illegal combination %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4))
-	prasm(p)
-	if ops == nil {
-		ops = optab
-	}
-	return &ops[0]
-}
-
-func cmp(a int, b int) bool {
-	if a == b {
-		return true
-	}
-	switch a {
-	case C_LCON:
-		if b == C_ZCON || b == C_SCON || b == C_UCON || b == C_ADDCON || b == C_ANDCON {
-			return true
-		}
-
-	case C_ADDCON:
-		if b == C_ZCON || b == C_SCON {
-			return true
-		}
-
-	case C_ANDCON:
-		if b == C_ZCON || b == C_SCON {
-			return true
-		}
-
-	case C_SPR:
-		if b == C_LR || b == C_XER || b == C_CTR {
-			return true
-		}
-
-	case C_UCON:
-		if b == C_ZCON {
-			return true
-		}
-
-	case C_SCON:
-		if b == C_ZCON {
-			return true
-		}
-
-	case C_LACON:
-		if b == C_SACON {
-			return true
-		}
-
-	case C_LBRA:
-		if b == C_SBRA {
-			return true
-		}
-
-	case C_LEXT:
-		if b == C_SEXT {
-			return true
-		}
-
-	case C_LAUTO:
-		if b == C_SAUTO {
-			return true
-		}
-
-	case C_REG:
-		if b == C_ZCON {
-			return r0iszero != 0 /*TypeKind(100016)*/
-		}
-
-	case C_LOREG:
-		if b == C_ZOREG || b == C_SOREG {
-			return true
-		}
-
-	case C_SOREG:
-		if b == C_ZOREG {
-			return true
-		}
-
-	case C_ANY:
-		return true
-	}
-
-	return false
-}
-
-type ocmp []Optab
-
-func (x ocmp) Len() int {
-	return len(x)
-}
-
-func (x ocmp) Swap(i, j int) {
-	x[i], x[j] = x[j], x[i]
-}
-
-func (x ocmp) Less(i, j int) bool {
-	p1 := &x[i]
-	p2 := &x[j]
-	n := int(p1.as) - int(p2.as)
-	if n != 0 {
-		return n < 0
-	}
-	n = int(p1.a1) - int(p2.a1)
-	if n != 0 {
-		return n < 0
-	}
-	n = int(p1.a2) - int(p2.a2)
-	if n != 0 {
-		return n < 0
-	}
-	n = int(p1.a3) - int(p2.a3)
-	if n != 0 {
-		return n < 0
-	}
-	n = int(p1.a4) - int(p2.a4)
-	if n != 0 {
-		return n < 0
-	}
-	return false
-}
-func opset(a, b0 obj.As) {
-	oprange[a&obj.AMask] = oprange[b0]
-}
-
-func buildop(ctxt *obj.Link) {
-	var n int
-
-	for i := 0; i < C_NCLASS; i++ {
-		for n = 0; n < C_NCLASS; n++ {
-			if cmp(n, i) {
-				xcmp[i][n] = true
-			}
-		}
-	}
-	for n = 0; optab[n].as != obj.AXXX; n++ {
-	}
-	sort.Sort(ocmp(optab[:n]))
-	for i := 0; i < n; i++ {
-		r := optab[i].as
-		r0 := r & obj.AMask
-		start := i
-		for optab[i].as == r {
-			i++
-		}
-		oprange[r0] = optab[start:i]
-		i--
-
-		switch r {
-		default:
-			ctxt.Diag("unknown op in build: %v", r)
-			log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
-
-		case ADCBF: /* unary indexed: op (b+a); op (b) */
-			opset(ADCBI, r0)
-
-			opset(ADCBST, r0)
-			opset(ADCBT, r0)
-			opset(ADCBTST, r0)
-			opset(ADCBZ, r0)
-			opset(AICBI, r0)
-
-		case AECOWX: /* indexed store: op s,(b+a); op s,(b) */
-			opset(ASTWCCC, r0)
-			opset(ASTBCCC, r0)
-
-			opset(ASTDCCC, r0)
-
-		case AREM: /* macro */
-			opset(AREMCC, r0)
-
-			opset(AREMV, r0)
-			opset(AREMVCC, r0)
-
-		case AREMU:
-			opset(AREMU, r0)
-			opset(AREMUCC, r0)
-			opset(AREMUV, r0)
-			opset(AREMUVCC, r0)
-
-		case AREMD:
-			opset(AREMDCC, r0)
-			opset(AREMDV, r0)
-			opset(AREMDVCC, r0)
-
-		case AREMDU:
-			opset(AREMDU, r0)
-			opset(AREMDUCC, r0)
-			opset(AREMDUV, r0)
-			opset(AREMDUVCC, r0)
-
-		case ADIVW: /* op Rb[,Ra],Rd */
-			opset(AMULHW, r0)
-
-			opset(AMULHWCC, r0)
-			opset(AMULHWU, r0)
-			opset(AMULHWUCC, r0)
-			opset(AMULLWCC, r0)
-			opset(AMULLWVCC, r0)
-			opset(AMULLWV, r0)
-			opset(ADIVWCC, r0)
-			opset(ADIVWV, r0)
-			opset(ADIVWVCC, r0)
-			opset(ADIVWU, r0)
-			opset(ADIVWUCC, r0)
-			opset(ADIVWUV, r0)
-			opset(ADIVWUVCC, r0)
-			opset(AADDCC, r0)
-			opset(AADDCV, r0)
-			opset(AADDCVCC, r0)
-			opset(AADDV, r0)
-			opset(AADDVCC, r0)
-			opset(AADDE, r0)
-			opset(AADDECC, r0)
-			opset(AADDEV, r0)
-			opset(AADDEVCC, r0)
-			opset(ACRAND, r0)
-			opset(ACRANDN, r0)
-			opset(ACREQV, r0)
-			opset(ACRNAND, r0)
-			opset(ACRNOR, r0)
-			opset(ACROR, r0)
-			opset(ACRORN, r0)
-			opset(ACRXOR, r0)
-			opset(AMULHD, r0)
-			opset(AMULHDCC, r0)
-			opset(AMULHDU, r0)
-			opset(AMULHDUCC, r0)
-			opset(AMULLD, r0)
-			opset(AMULLDCC, r0)
-			opset(AMULLDVCC, r0)
-			opset(AMULLDV, r0)
-			opset(ADIVD, r0)
-			opset(ADIVDCC, r0)
-			opset(ADIVDE, r0)
-			opset(ADIVDEU, r0)
-			opset(ADIVDECC, r0)
-			opset(ADIVDEUCC, r0)
-			opset(ADIVDVCC, r0)
-			opset(ADIVDV, r0)
-			opset(ADIVDU, r0)
-			opset(ADIVDUCC, r0)
-			opset(ADIVDUVCC, r0)
-			opset(ADIVDUCC, r0)
-
-		case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
-			opset(AMOVH, r0)
-
-			opset(AMOVHZ, r0)
-
-		case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
-			opset(AMOVHU, r0)
-
-			opset(AMOVHZU, r0)
-			opset(AMOVWU, r0)
-			opset(AMOVWZU, r0)
-			opset(AMOVDU, r0)
-			opset(AMOVMW, r0)
-
-		case ALV: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
-			opset(ALVEBX, r0)
-			opset(ALVEHX, r0)
-			opset(ALVEWX, r0)
-			opset(ALVX, r0)
-			opset(ALVXL, r0)
-			opset(ALVSL, r0)
-			opset(ALVSR, r0)
-
-		case ASTV: /* stvebx, stvehx, stvewx, stvx, stvxl */
-			opset(ASTVEBX, r0)
-			opset(ASTVEHX, r0)
-			opset(ASTVEWX, r0)
-			opset(ASTVX, r0)
-			opset(ASTVXL, r0)
-
-		case AVAND: /* vand, vandc, vnand */
-			opset(AVANDL, r0)
-			opset(AVANDC, r0)
-			opset(AVNAND, r0)
-
-		case AVOR: /* vor, vorc, vxor, vnor, veqv */
-			opset(AVORL, r0)
-			opset(AVORC, r0)
-			opset(AVXOR, r0)
-			opset(AVNOR, r0)
-			opset(AVEQV, r0)
-
-		case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
-			opset(AVADDUBM, r0)
-			opset(AVADDUHM, r0)
-			opset(AVADDUWM, r0)
-			opset(AVADDUDM, r0)
-			opset(AVADDUQM, r0)
-
-		case AVADDCU: /* vaddcuq, vaddcuw */
-			opset(AVADDCUQ, r0)
-			opset(AVADDCUW, r0)
-
-		case AVADDUS: /* vaddubs, vadduhs, vadduws */
-			opset(AVADDUBS, r0)
-			opset(AVADDUHS, r0)
-			opset(AVADDUWS, r0)
-
-		case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
-			opset(AVADDSBS, r0)
-			opset(AVADDSHS, r0)
-			opset(AVADDSWS, r0)
-
-		case AVADDE: /* vaddeuqm, vaddecuq */
-			opset(AVADDEUQM, r0)
-			opset(AVADDECUQ, r0)
-
-		case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
-			opset(AVSUBUBM, r0)
-			opset(AVSUBUHM, r0)
-			opset(AVSUBUWM, r0)
-			opset(AVSUBUDM, r0)
-			opset(AVSUBUQM, r0)
-
-		case AVSUBCU: /* vsubcuq, vsubcuw */
-			opset(AVSUBCUQ, r0)
-			opset(AVSUBCUW, r0)
-
-		case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
-			opset(AVSUBUBS, r0)
-			opset(AVSUBUHS, r0)
-			opset(AVSUBUWS, r0)
-
-		case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
-			opset(AVSUBSBS, r0)
-			opset(AVSUBSHS, r0)
-			opset(AVSUBSWS, r0)
-
-		case AVSUBE: /* vsubeuqm, vsubecuq */
-			opset(AVSUBEUQM, r0)
-			opset(AVSUBECUQ, r0)
-
-		case AVR: /* vrlb, vrlh, vrlw, vrld */
-			opset(AVRLB, r0)
-			opset(AVRLH, r0)
-			opset(AVRLW, r0)
-			opset(AVRLD, r0)
-
-		case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
-			opset(AVSLB, r0)
-			opset(AVSLH, r0)
-			opset(AVSLW, r0)
-			opset(AVSL, r0)
-			opset(AVSLO, r0)
-			opset(AVSRB, r0)
-			opset(AVSRH, r0)
-			opset(AVSRW, r0)
-			opset(AVSR, r0)
-			opset(AVSRO, r0)
-			opset(AVSLD, r0)
-			opset(AVSRD, r0)
-
-		case AVSA: /* vsrab, vsrah, vsraw, vsrad */
-			opset(AVSRAB, r0)
-			opset(AVSRAH, r0)
-			opset(AVSRAW, r0)
-			opset(AVSRAD, r0)
-
-		case AVSOI: /* vsldoi */
-			opset(AVSLDOI, r0)
-
-		case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
-			opset(AVCLZB, r0)
-			opset(AVCLZH, r0)
-			opset(AVCLZW, r0)
-			opset(AVCLZD, r0)
-
-		case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
-			opset(AVPOPCNTB, r0)
-			opset(AVPOPCNTH, r0)
-			opset(AVPOPCNTW, r0)
-			opset(AVPOPCNTD, r0)
-
-		case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
-			opset(AVCMPEQUB, r0)
-			opset(AVCMPEQUBCC, r0)
-			opset(AVCMPEQUH, r0)
-			opset(AVCMPEQUHCC, r0)
-			opset(AVCMPEQUW, r0)
-			opset(AVCMPEQUWCC, r0)
-			opset(AVCMPEQUD, r0)
-			opset(AVCMPEQUDCC, r0)
-
-		case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
-			opset(AVCMPGTUB, r0)
-			opset(AVCMPGTUBCC, r0)
-			opset(AVCMPGTUH, r0)
-			opset(AVCMPGTUHCC, r0)
-			opset(AVCMPGTUW, r0)
-			opset(AVCMPGTUWCC, r0)
-			opset(AVCMPGTUD, r0)
-			opset(AVCMPGTUDCC, r0)
-			opset(AVCMPGTSB, r0)
-			opset(AVCMPGTSBCC, r0)
-			opset(AVCMPGTSH, r0)
-			opset(AVCMPGTSHCC, r0)
-			opset(AVCMPGTSW, r0)
-			opset(AVCMPGTSWCC, r0)
-			opset(AVCMPGTSD, r0)
-			opset(AVCMPGTSDCC, r0)
-
-		case AVPERM: /* vperm */
-			opset(AVPERM, r0)
-
-		case AVSEL: /* vsel */
-			opset(AVSEL, r0)
-
-		case AVSPLT: /* vspltb, vsplth, vspltw */
-			opset(AVSPLTB, r0)
-			opset(AVSPLTH, r0)
-			opset(AVSPLTW, r0)
-
-		case AVSPLTI: /* vspltisb, vspltish, vspltisw */
-			opset(AVSPLTISB, r0)
-			opset(AVSPLTISH, r0)
-			opset(AVSPLTISW, r0)
-
-		case AVCIPH: /* vcipher, vcipherlast */
-			opset(AVCIPHER, r0)
-			opset(AVCIPHERLAST, r0)
-
-		case AVNCIPH: /* vncipher, vncipherlast */
-			opset(AVNCIPHER, r0)
-			opset(AVNCIPHERLAST, r0)
-
-		case AVSBOX: /* vsbox */
-			opset(AVSBOX, r0)
-
-		case AVSHASIGMA: /* vshasigmaw, vshasigmad */
-			opset(AVSHASIGMAW, r0)
-			opset(AVSHASIGMAD, r0)
-
-		case ALXV: /* lxvd2x, lxvdsx, lxvw4x */
-			opset(ALXVD2X, r0)
-			opset(ALXVDSX, r0)
-			opset(ALXVW4X, r0)
-
-		case ASTXV: /* stxvd2x, stxvdsx, stxvw4x */
-			opset(ASTXVD2X, r0)
-			opset(ASTXVW4X, r0)
-
-		case ALXS: /* lxsdx  */
-			opset(ALXSDX, r0)
-
-		case ASTXS: /* stxsdx */
-			opset(ASTXSDX, r0)
-
-		case ALXSI: /* lxsiwax, lxsiwzx  */
-			opset(ALXSIWAX, r0)
-			opset(ALXSIWZX, r0)
-
-		case ASTXSI: /* stxsiwx */
-			opset(ASTXSIWX, r0)
-
-		case AMFVSR: /* mfvsrd, mfvsrwz */
-			opset(AMFVSRD, r0)
-			opset(AMFVSRWZ, r0)
-
-		case AMTVSR: /* mtvsrd, mtvsrwa, mtvsrwz */
-			opset(AMTVSRD, r0)
-			opset(AMTVSRWA, r0)
-			opset(AMTVSRWZ, r0)
-
-		case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
-			opset(AXXLANDQ, r0)
-			opset(AXXLANDC, r0)
-			opset(AXXLEQV, r0)
-			opset(AXXLNAND, r0)
-
-		case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
-			opset(AXXLORC, r0)
-			opset(AXXLNOR, r0)
-			opset(AXXLORQ, r0)
-			opset(AXXLXOR, r0)
-
-		case AXXSEL: /* xxsel */
-			opset(AXXSEL, r0)
-
-		case AXXMRG: /* xxmrghw, xxmrglw */
-			opset(AXXMRGHW, r0)
-			opset(AXXMRGLW, r0)
-
-		case AXXSPLT: /* xxspltw */
-			opset(AXXSPLTW, r0)
-
-		case AXXPERM: /* xxpermdi */
-			opset(AXXPERMDI, r0)
-
-		case AXXSI: /* xxsldwi */
-			opset(AXXSLDWI, r0)
-
-		case AXSCV: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
-			opset(AXSCVDPSP, r0)
-			opset(AXSCVSPDP, r0)
-			opset(AXSCVDPSPN, r0)
-			opset(AXSCVSPDPN, r0)
-
-		case AXVCV: /* xvcvdpsp, xvcvspdp */
-			opset(AXVCVDPSP, r0)
-			opset(AXVCVSPDP, r0)
-
-		case AXSCVX: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
-			opset(AXSCVDPSXDS, r0)
-			opset(AXSCVDPSXWS, r0)
-			opset(AXSCVDPUXDS, r0)
-			opset(AXSCVDPUXWS, r0)
-
-		case AXSCVXP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
-			opset(AXSCVSXDDP, r0)
-			opset(AXSCVUXDDP, r0)
-			opset(AXSCVSXDSP, r0)
-			opset(AXSCVUXDSP, r0)
-
-		case AXVCVX: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
-			opset(AXVCVDPSXDS, r0)
-			opset(AXVCVDPSXWS, r0)
-			opset(AXVCVDPUXDS, r0)
-			opset(AXVCVDPUXWS, r0)
-			opset(AXVCVSPSXDS, r0)
-			opset(AXVCVSPSXWS, r0)
-			opset(AXVCVSPUXDS, r0)
-			opset(AXVCVSPUXWS, r0)
-
-		case AXVCVXP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
-			opset(AXVCVSXDDP, r0)
-			opset(AXVCVSXWDP, r0)
-			opset(AXVCVUXDDP, r0)
-			opset(AXVCVUXWDP, r0)
-			opset(AXVCVSXDSP, r0)
-			opset(AXVCVSXWSP, r0)
-			opset(AXVCVUXDSP, r0)
-			opset(AXVCVUXWSP, r0)
-
-		case AAND: /* logical op Rb,Rs,Ra; no literal */
-			opset(AANDN, r0)
-
-			opset(AANDNCC, r0)
-			opset(AEQV, r0)
-			opset(AEQVCC, r0)
-			opset(ANAND, r0)
-			opset(ANANDCC, r0)
-			opset(ANOR, r0)
-			opset(ANORCC, r0)
-			opset(AORCC, r0)
-			opset(AORN, r0)
-			opset(AORNCC, r0)
-			opset(AXORCC, r0)
-
-		case AADDME: /* op Ra, Rd */
-			opset(AADDMECC, r0)
-
-			opset(AADDMEV, r0)
-			opset(AADDMEVCC, r0)
-			opset(AADDZE, r0)
-			opset(AADDZECC, r0)
-			opset(AADDZEV, r0)
-			opset(AADDZEVCC, r0)
-			opset(ASUBME, r0)
-			opset(ASUBMECC, r0)
-			opset(ASUBMEV, r0)
-			opset(ASUBMEVCC, r0)
-			opset(ASUBZE, r0)
-			opset(ASUBZECC, r0)
-			opset(ASUBZEV, r0)
-			opset(ASUBZEVCC, r0)
-
-		case AADDC:
-			opset(AADDCCC, r0)
-
-		case ABEQ:
-			opset(ABGE, r0)
-			opset(ABGT, r0)
-			opset(ABLE, r0)
-			opset(ABLT, r0)
-			opset(ABNE, r0)
-			opset(ABVC, r0)
-			opset(ABVS, r0)
-
-		case ABR:
-			opset(ABL, r0)
-
-		case ABC:
-			opset(ABCL, r0)
-
-		case AEXTSB: /* op Rs, Ra */
-			opset(AEXTSBCC, r0)
-
-			opset(AEXTSH, r0)
-			opset(AEXTSHCC, r0)
-			opset(ACNTLZW, r0)
-			opset(ACNTLZWCC, r0)
-			opset(ACNTLZD, r0)
-			opset(AEXTSW, r0)
-			opset(AEXTSWCC, r0)
-			opset(ACNTLZDCC, r0)
-
-		case AFABS: /* fop [s,]d */
-			opset(AFABSCC, r0)
-
-			opset(AFNABS, r0)
-			opset(AFNABSCC, r0)
-			opset(AFNEG, r0)
-			opset(AFNEGCC, r0)
-			opset(AFRSP, r0)
-			opset(AFRSPCC, r0)
-			opset(AFCTIW, r0)
-			opset(AFCTIWCC, r0)
-			opset(AFCTIWZ, r0)
-			opset(AFCTIWZCC, r0)
-			opset(AFCTID, r0)
-			opset(AFCTIDCC, r0)
-			opset(AFCTIDZ, r0)
-			opset(AFCTIDZCC, r0)
-			opset(AFCFID, r0)
-			opset(AFCFIDCC, r0)
-			opset(AFCFIDU, r0)
-			opset(AFCFIDUCC, r0)
-			opset(AFRES, r0)
-			opset(AFRESCC, r0)
-			opset(AFRIM, r0)
-			opset(AFRIMCC, r0)
-			opset(AFRIP, r0)
-			opset(AFRIPCC, r0)
-			opset(AFRIZ, r0)
-			opset(AFRIZCC, r0)
-			opset(AFRSQRTE, r0)
-			opset(AFRSQRTECC, r0)
-			opset(AFSQRT, r0)
-			opset(AFSQRTCC, r0)
-			opset(AFSQRTS, r0)
-			opset(AFSQRTSCC, r0)
-
-		case AFADD:
-			opset(AFADDS, r0)
-			opset(AFADDCC, r0)
-			opset(AFADDSCC, r0)
-			opset(AFDIV, r0)
-			opset(AFDIVS, r0)
-			opset(AFDIVCC, r0)
-			opset(AFDIVSCC, r0)
-			opset(AFSUB, r0)
-			opset(AFSUBS, r0)
-			opset(AFSUBCC, r0)
-			opset(AFSUBSCC, r0)
-
-		case AFMADD:
-			opset(AFMADDCC, r0)
-			opset(AFMADDS, r0)
-			opset(AFMADDSCC, r0)
-			opset(AFMSUB, r0)
-			opset(AFMSUBCC, r0)
-			opset(AFMSUBS, r0)
-			opset(AFMSUBSCC, r0)
-			opset(AFNMADD, r0)
-			opset(AFNMADDCC, r0)
-			opset(AFNMADDS, r0)
-			opset(AFNMADDSCC, r0)
-			opset(AFNMSUB, r0)
-			opset(AFNMSUBCC, r0)
-			opset(AFNMSUBS, r0)
-			opset(AFNMSUBSCC, r0)
-			opset(AFSEL, r0)
-			opset(AFSELCC, r0)
-
-		case AFMUL:
-			opset(AFMULS, r0)
-			opset(AFMULCC, r0)
-			opset(AFMULSCC, r0)
-
-		case AFCMPO:
-			opset(AFCMPU, r0)
-
-		case AISEL:
-			opset(AISEL, r0)
-
-		case AMTFSB0:
-			opset(AMTFSB0CC, r0)
-			opset(AMTFSB1, r0)
-			opset(AMTFSB1CC, r0)
-
-		case ANEG: /* op [Ra,] Rd */
-			opset(ANEGCC, r0)
-
-			opset(ANEGV, r0)
-			opset(ANEGVCC, r0)
-
-		case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,Ra; oris/xoris $uimm,Rs,Ra */
-			opset(AXOR, r0)
-
-		case ASLW:
-			opset(ASLWCC, r0)
-			opset(ASRW, r0)
-			opset(ASRWCC, r0)
-
-		case ASLD:
-			opset(ASLDCC, r0)
-			opset(ASRD, r0)
-			opset(ASRDCC, r0)
-
-		case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
-			opset(ASRAWCC, r0)
-
-		case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
-			opset(ASRADCC, r0)
-
-		case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
-			opset(ASUB, r0)
-
-			opset(ASUBCC, r0)
-			opset(ASUBV, r0)
-			opset(ASUBVCC, r0)
-			opset(ASUBCCC, r0)
-			opset(ASUBCV, r0)
-			opset(ASUBCVCC, r0)
-			opset(ASUBE, r0)
-			opset(ASUBECC, r0)
-			opset(ASUBEV, r0)
-			opset(ASUBEVCC, r0)
-
-		case ASYNC:
-			opset(AISYNC, r0)
-			opset(ALWSYNC, r0)
-			opset(APTESYNC, r0)
-			opset(ATLBSYNC, r0)
-
-		case ARLWMI:
-			opset(ARLWMICC, r0)
-			opset(ARLWNM, r0)
-			opset(ARLWNMCC, r0)
-
-		case ARLDMI:
-			opset(ARLDMICC, r0)
-			opset(ARLDIMI, r0)
-			opset(ARLDIMICC, r0)
-
-		case ARLDC:
-			opset(ARLDCCC, r0)
-
-		case ARLDCL:
-			opset(ARLDCR, r0)
-			opset(ARLDCLCC, r0)
-			opset(ARLDCRCC, r0)
-
-		case ARLDICL:
-			opset(ARLDICLCC, r0)
-			opset(ARLDICR, r0)
-			opset(ARLDICRCC, r0)
-
-		case AFMOVD:
-			opset(AFMOVDCC, r0)
-			opset(AFMOVDU, r0)
-			opset(AFMOVS, r0)
-			opset(AFMOVSU, r0)
-
-		case AECIWX:
-			opset(ALBAR, r0)
-			opset(ALWAR, r0)
-			opset(ALDAR, r0)
-
-		case ASYSCALL: /* just the op; flow of control */
-			opset(ARFI, r0)
-
-			opset(ARFCI, r0)
-			opset(ARFID, r0)
-			opset(AHRFID, r0)
-
-		case AMOVHBR:
-			opset(AMOVWBR, r0)
-			opset(AMOVDBR, r0)
-
-		case ASLBMFEE:
-			opset(ASLBMFEV, r0)
-
-		case ATW:
-			opset(ATD, r0)
-
-		case ATLBIE:
-			opset(ASLBIE, r0)
-			opset(ATLBIEL, r0)
-
-		case AEIEIO:
-			opset(ASLBIA, r0)
-
-		case ACMP:
-			opset(ACMPW, r0)
-
-		case ACMPU:
-			opset(ACMPWU, r0)
-
-		case AADD,
-			AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra; andis. $uimm,Rs,Ra */
-			AFMOVSX,
-			AFMOVSZ,
-			ALSW,
-			AMOVW,
-			/* load/store/move word with sign extension; special 32-bit move; move 32-bit literals */
-			AMOVWZ, /* load/store/move word with zero extension; move 32-bit literals  */
-			AMOVD,  /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
-			AMOVB,  /* macro: move byte with sign extension */
-			AMOVBU, /* macro: move byte with sign extension & update */
-			AMOVFL,
-			AMULLW,
-			/* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
-			ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
-			ASTSW,
-			ASLBMTE,
-			AWORD,
-			ADWORD,
-			obj.ANOP,
-			obj.ATEXT,
-			obj.AUNDEF,
-			obj.AUSEFIELD,
-			obj.AFUNCDATA,
-			obj.APCDATA,
-			obj.ADUFFZERO,
-			obj.ADUFFCOPY:
-			break
-		}
-	}
-}
-
-func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
-	return o<<26 | xo<<1 | oe<<11
-}
-
-func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
-	return o<<26 | xo<<2 | oe<<11
-}
-
-func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
-	return o<<26 | xo<<3 | oe<<11
-}
-
-func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
-	return o<<26 | xo<<4 | oe<<11
-}
-
-func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
-	return o<<26 | xo | oe<<11 | rc&1
-}
-
-func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
-	return o<<26 | xo | oe<<11 | (rc&1)<<10
-}
-
-func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
-	return o<<26 | xo<<1 | oe<<10 | rc&1
-}
-
-func OPCC(o uint32, xo uint32, rc uint32) uint32 {
-	return OPVCC(o, xo, 0, rc)
-}
-
-func OP(o uint32, xo uint32) uint32 {
-	return OPVCC(o, xo, 0, 0)
-}
-
-/* the order is dest, a/s, b/imm for both arithmetic and logical operations */
-func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
-	return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
-}
-
-/* VX-form 2-register operands, r/r/none */
-func AOP_RR(op uint32, d uint32, a uint32) uint32 {
-	return op | (d&31)<<21 | (a&31)<<11
-}
-
-/* VA-form 4-register operands */
-func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
-	return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
-}
-
-func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
-	return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
-}
-
-/* VX-form 2-register + UIM operands */
-func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
-	return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
-}
-
-/* VX-form 2-register + ST + SIX operands */
-func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
-	return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
-}
-
-/* VA-form 3-register + SHB operands */
-func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
-	return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
-}
-
-/* VX-form 1-register + SIM operands */
-func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
-	return op | (d&31)<<21 | (simm&31)<<16
-}
-
-/* XX1-form 3-register operands, 1 VSR operand */
-func AOP_XX1(op uint32, d uint32, a uint32, b uint32) uint32 {
-	/* For the XX-form encodings, we need the VSX register number to be exactly */
-	/* between 0-63, so we can properly set the rightmost bits. */
-	r := d - REG_VS0
-	return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
-}
-
-/* XX2-form 3-register operands, 2 VSR operands */
-func AOP_XX2(op uint32, d uint32, a uint32, b uint32) uint32 {
-	xt := d - REG_VS0
-	xb := b - REG_VS0
-	return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
-}
-
-/* XX3-form 3 VSR operands */
-func AOP_XX3(op uint32, d uint32, a uint32, b uint32) uint32 {
-	xt := d - REG_VS0
-	xa := a - REG_VS0
-	xb := b - REG_VS0
-	return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
-}
-
-/* XX3-form 3 VSR operands + immediate */
-func AOP_XX3I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
-	xt := d - REG_VS0
-	xa := a - REG_VS0
-	xb := b - REG_VS0
-	return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
-}
-
-/* XX4-form, 4 VSR operands */
-func AOP_XX4(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
-	xt := d - REG_VS0
-	xa := a - REG_VS0
-	xb := b - REG_VS0
-	xc := c - REG_VS0
-	return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
-}
-
-func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
-	return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
-}
-
-func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
-	return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
-}
-
-func OP_BR(op uint32, li uint32, aa uint32) uint32 {
-	return op | li&0x03FFFFFC | aa<<1
-}
-
-func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
-	return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
-}
-
-func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
-	return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
-}
-
-func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
-	return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
-}
-
-func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
-	return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
-}
-
-const (
-	/* each rhs is OPVCC(_, _, _, _) */
-	OP_ADD    = 31<<26 | 266<<1 | 0<<10 | 0
-	OP_ADDI   = 14<<26 | 0<<1 | 0<<10 | 0
-	OP_ADDIS  = 15<<26 | 0<<1 | 0<<10 | 0
-	OP_ANDI   = 28<<26 | 0<<1 | 0<<10 | 0
-	OP_EXTSB  = 31<<26 | 954<<1 | 0<<10 | 0
-	OP_EXTSH  = 31<<26 | 922<<1 | 0<<10 | 0
-	OP_EXTSW  = 31<<26 | 986<<1 | 0<<10 | 0
-	OP_ISEL   = 31<<26 | 15<<1 | 0<<10 | 0
-	OP_MCRF   = 19<<26 | 0<<1 | 0<<10 | 0
-	OP_MCRFS  = 63<<26 | 64<<1 | 0<<10 | 0
-	OP_MCRXR  = 31<<26 | 512<<1 | 0<<10 | 0
-	OP_MFCR   = 31<<26 | 19<<1 | 0<<10 | 0
-	OP_MFFS   = 63<<26 | 583<<1 | 0<<10 | 0
-	OP_MFMSR  = 31<<26 | 83<<1 | 0<<10 | 0
-	OP_MFSPR  = 31<<26 | 339<<1 | 0<<10 | 0
-	OP_MFSR   = 31<<26 | 595<<1 | 0<<10 | 0
-	OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
-	OP_MTCRF  = 31<<26 | 144<<1 | 0<<10 | 0
-	OP_MTFSF  = 63<<26 | 711<<1 | 0<<10 | 0
-	OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
-	OP_MTMSR  = 31<<26 | 146<<1 | 0<<10 | 0
-	OP_MTMSRD = 31<<26 | 178<<1 | 0<<10 | 0
-	OP_MTSPR  = 31<<26 | 467<<1 | 0<<10 | 0
-	OP_MTSR   = 31<<26 | 210<<1 | 0<<10 | 0
-	OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
-	OP_MULLW  = 31<<26 | 235<<1 | 0<<10 | 0
-	OP_MULLD  = 31<<26 | 233<<1 | 0<<10 | 0
-	OP_OR     = 31<<26 | 444<<1 | 0<<10 | 0
-	OP_ORI    = 24<<26 | 0<<1 | 0<<10 | 0
-	OP_ORIS   = 25<<26 | 0<<1 | 0<<10 | 0
-	OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
-	OP_SUBF   = 31<<26 | 40<<1 | 0<<10 | 0
-	OP_RLDIC  = 30<<26 | 4<<1 | 0<<10 | 0
-	OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
-	OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
-)
-
-func oclass(a *obj.Addr) int {
-	return int(a.Class) - 1
-}
-
-const (
-	D_FORM = iota
-	DS_FORM
-)
-
-// opform returns the form (D_FORM or DS_FORM) of an instruction. Used to decide on
-// which relocation to use with a load or store and only supports the needed
-// instructions.
-func opform(ctxt *obj.Link, insn uint32) int {
-	switch insn {
-	default:
-		ctxt.Diag("bad insn in loadform: %x", insn)
-	case OPVCC(58, 0, 0, 0), // ld
-		OPVCC(58, 0, 0, 0) | 1<<1, // lwa
-		OPVCC(62, 0, 0, 0):        // std
-		return DS_FORM
-	case OP_ADDI, // add
-		OPVCC(32, 0, 0, 0), // lwz
-		OPVCC(42, 0, 0, 0), // lha
-		OPVCC(40, 0, 0, 0), // lhz
-		OPVCC(34, 0, 0, 0), // lbz
-		OPVCC(50, 0, 0, 0), // lfd
-		OPVCC(48, 0, 0, 0), // lfs
-		OPVCC(36, 0, 0, 0), // stw
-		OPVCC(44, 0, 0, 0), // sth
-		OPVCC(38, 0, 0, 0), // stb
-		OPVCC(54, 0, 0, 0), // stfd
-		OPVCC(52, 0, 0, 0): // stfs
-		return D_FORM
-	}
-	return 0
-}
-
-// Encode instructions and create relocation for accessing s+d according to the
-// instruction op with source or destination (as appropriate) register reg.
-func symbolAccess(ctxt *obj.Link, s *obj.LSym, d int64, reg int16, op uint32) (o1, o2 uint32) {
-	var base uint32
-	form := opform(ctxt, op)
-	if ctxt.Flag_shared {
-		base = REG_R2
-	} else {
-		base = REG_R0
-	}
-	o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
-	o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
-	rel := obj.Addrel(ctxt.Cursym)
-	rel.Off = int32(ctxt.Pc)
-	rel.Siz = 8
-	rel.Sym = s
-	rel.Add = d
-	if ctxt.Flag_shared {
-		switch form {
-		case D_FORM:
-			rel.Type = obj.R_ADDRPOWER_TOCREL
-		case DS_FORM:
-			rel.Type = obj.R_ADDRPOWER_TOCREL_DS
-		}
-
-	} else {
-		switch form {
-		case D_FORM:
-			rel.Type = obj.R_ADDRPOWER
-		case DS_FORM:
-			rel.Type = obj.R_ADDRPOWER_DS
-		}
-	}
-	return
-}
-
-/*
- * 32-bit masks
- */
-func getmask(m []byte, v uint32) bool {
-	m[1] = 0
-	m[0] = m[1]
-	if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
-		if getmask(m, ^v) {
-			i := int(m[0])
-			m[0] = m[1] + 1
-			m[1] = byte(i - 1)
-			return true
-		}
-
-		return false
-	}
-
-	for i := 0; i < 32; i++ {
-		if v&(1<<uint(31-i)) != 0 {
-			m[0] = byte(i)
-			for {
-				m[1] = byte(i)
-				i++
-				if i >= 32 || v&(1<<uint(31-i)) == 0 {
-					break
-				}
-			}
-
-			for ; i < 32; i++ {
-				if v&(1<<uint(31-i)) != 0 {
-					return false
-				}
-			}
-			return true
-		}
-	}
-
-	return false
-}
-
-func maskgen(ctxt *obj.Link, p *obj.Prog, m []byte, v uint32) {
-	if !getmask(m, v) {
-		ctxt.Diag("cannot generate mask #%x\n%v", v, p)
-	}
-}
-
-/*
- * 64-bit masks (rldic etc)
- */
-func getmask64(m []byte, v uint64) bool {
-	m[1] = 0
-	m[0] = m[1]
-	for i := 0; i < 64; i++ {
-		if v&(uint64(1)<<uint(63-i)) != 0 {
-			m[0] = byte(i)
-			for {
-				m[1] = byte(i)
-				i++
-				if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
-					break
-				}
-			}
-
-			for ; i < 64; i++ {
-				if v&(uint64(1)<<uint(63-i)) != 0 {
-					return false
-				}
-			}
-			return true
-		}
-	}
-
-	return false
-}
-
-func maskgen64(ctxt *obj.Link, p *obj.Prog, m []byte, v uint64) {
-	if !getmask64(m, v) {
-		ctxt.Diag("cannot generate mask #%x\n%v", v, p)
-	}
-}
-
-func loadu32(r int, d int64) uint32 {
-	v := int32(d >> 16)
-	if isuint32(uint64(d)) {
-		return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
-	}
-	return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
-}
-
-func high16adjusted(d int32) uint16 {
-	if d&0x8000 != 0 {
-		return uint16((d >> 16) + 1)
-	}
-	return uint16(d >> 16)
-}
-
-func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
-	o1 := uint32(0)
-	o2 := uint32(0)
-	o3 := uint32(0)
-	o4 := uint32(0)
-	o5 := uint32(0)
-
-	//print("%v => case %d\n", p, o->type);
-	switch o.type_ {
-	default:
-		ctxt.Diag("unknown type %d", o.type_)
-		prasm(p)
-
-	case 0: /* pseudo ops */
-		break
-
-	case 1: /* mov r1,r2 ==> OR Rs,Rs,Ra */
-		if p.To.Reg == REGZERO && p.From.Type == obj.TYPE_CONST {
-			v := regoff(ctxt, &p.From)
-			if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 {
-				//nerrors--;
-				ctxt.Diag("literal operation on R0\n%v", p)
-			}
-
-			o1 = LOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(v))
-			break
-		}
-
-		o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
-
-	case 2: /* int/cr/fp op Rb,[Ra],Rd */
-		r := int(p.Reg)
-
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-		o1 = AOP_RRR(oprrr(ctxt, p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
-
-	case 3: /* mov $soreg/addcon/ucon, r ==> addis/addi $i,reg',r */
-		d := vregoff(ctxt, &p.From)
-
-		v := int32(d)
-		r := int(p.From.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
-			ctxt.Diag("literal operation on R0\n%v", p)
-		}
-		a := OP_ADDI
-		if o.a1 == C_UCON {
-			if d&0xffff != 0 {
-				log.Fatalf("invalid handling of %v", p)
-			}
-			v >>= 16
-			if r == REGZERO && isuint32(uint64(d)) {
-				o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
-				break
-			}
-
-			a = OP_ADDIS
-		} else {
-			if int64(int16(d)) != d {
-				log.Fatalf("invalid handling of %v", p)
-			}
-		}
-
-		o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
-
-	case 4: /* add/mul $scon,[r1],r2 */
-		v := regoff(ctxt, &p.From)
-
-		r := int(p.Reg)
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-		if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
-			ctxt.Diag("literal operation on R0\n%v", p)
-		}
-		if int32(int16(v)) != v {
-			log.Fatalf("mishandled instruction %v", p)
-		}
-		o1 = AOP_IRR(opirr(ctxt, p.As), uint32(p.To.Reg), uint32(r), uint32(v))
-
-	case 5: /* syscall */
-		o1 = oprrr(ctxt, p.As)
-
-	case 6: /* logical op Rb,[Rs,]Ra; no literal */
-		r := int(p.Reg)
-
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-		o1 = LOP_RRR(oprrr(ctxt, p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
-
-	case 7: /* mov r, soreg ==> stw o(r) */
-		r := int(p.To.Reg)
-
-		if r == 0 {
-			r = int(o.param)
-		}
-		v := regoff(ctxt, &p.To)
-		if p.To.Type == obj.TYPE_MEM && p.To.Index != 0 {
-			if v != 0 {
-				ctxt.Diag("illegal indexed instruction\n%v", p)
-			}
-			if ctxt.Flag_shared && r == REG_R13 {
-				rel := obj.Addrel(ctxt.Cursym)
-				rel.Off = int32(ctxt.Pc)
-				rel.Siz = 4
-				// This (and the matching part in the load case
-				// below) are the only places in the ppc64 toolchain
-				// that knows the name of the tls variable. Possibly
-				// we could add some assembly syntax so that the name
-				// of the variable does not have to be assumed.
-				rel.Sym = obj.Linklookup(ctxt, "runtime.tls_g", 0)
-				rel.Type = obj.R_POWER_TLS
-			}
-			o1 = AOP_RRR(opstorex(ctxt, p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
-		} else {
-			if int32(int16(v)) != v {
-				log.Fatalf("mishandled instruction %v", p)
-			}
-			o1 = AOP_IRR(opstore(ctxt, p.As), uint32(p.From.Reg), uint32(r), uint32(v))
-		}
-
-	case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r) */
-		r := int(p.From.Reg)
-
-		if r == 0 {
-			r = int(o.param)
-		}
-		v := regoff(ctxt, &p.From)
-		if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
-			if v != 0 {
-				ctxt.Diag("illegal indexed instruction\n%v", p)
-			}
-			if ctxt.Flag_shared && r == REG_R13 {
-				rel := obj.Addrel(ctxt.Cursym)
-				rel.Off = int32(ctxt.Pc)
-				rel.Siz = 4
-				rel.Sym = obj.Linklookup(ctxt, "runtime.tls_g", 0)
-				rel.Type = obj.R_POWER_TLS
-			}
-			o1 = AOP_RRR(oploadx(ctxt, p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
-		} else {
-			if int32(int16(v)) != v {
-				log.Fatalf("mishandled instruction %v", p)
-			}
-			o1 = AOP_IRR(opload(ctxt, p.As), uint32(p.To.Reg), uint32(r), uint32(v))
-		}
-
-	case 9: /* movb soreg, r ==> lbz o(r),r2; extsb r2,r2 */
-		r := int(p.From.Reg)
-
-		if r == 0 {
-			r = int(o.param)
-		}
-		v := regoff(ctxt, &p.From)
-		if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
-			if v != 0 {
-				ctxt.Diag("illegal indexed instruction\n%v", p)
-			}
-			o1 = AOP_RRR(oploadx(ctxt, p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
-		} else {
-			o1 = AOP_IRR(opload(ctxt, p.As), uint32(p.To.Reg), uint32(r), uint32(v))
-		}
-		o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
-
-	case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
-		r := int(p.Reg)
-
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-		o1 = AOP_RRR(oprrr(ctxt, p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
-
-	case 11: /* br/bl lbra */
-		v := int32(0)
-
-		if p.Pcond != nil {
-			v = int32(p.Pcond.Pc - p.Pc)
-			if v&03 != 0 {
-				ctxt.Diag("odd branch target address\n%v", p)
-				v &^= 03
-			}
-
-			if v < -(1<<25) || v >= 1<<24 {
-				ctxt.Diag("branch too far\n%v", p)
-			}
-		}
-
-		o1 = OP_BR(opirr(ctxt, p.As), uint32(v), 0)
-		if p.To.Sym != nil {
-			rel := obj.Addrel(ctxt.Cursym)
-			rel.Off = int32(ctxt.Pc)
-			rel.Siz = 4
-			rel.Sym = p.To.Sym
-			v += int32(p.To.Offset)
-			if v&03 != 0 {
-				ctxt.Diag("odd branch target address\n%v", p)
-				v &^= 03
-			}
-
-			rel.Add = int64(v)
-			rel.Type = obj.R_CALLPOWER
-		}
-		o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
-
-	case 12: /* movb r,r (extsb); movw r,r (extsw) */
-		if p.To.Reg == REGZERO && p.From.Type == obj.TYPE_CONST {
-			v := regoff(ctxt, &p.From)
-			if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 {
-				ctxt.Diag("literal operation on R0\n%v", p)
-			}
-
-			o1 = LOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(v))
-			break
-		}
-
-		if p.As == AMOVW {
-			o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
-		} else {
-			o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
-		}
-
-	case 13: /* mov[bhw]z r,r; uses rlwinm not andi. to avoid changing CC */
-		if p.As == AMOVBZ {
-			o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
-		} else if p.As == AMOVH {
-			o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
-		} else if p.As == AMOVHZ {
-			o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
-		} else if p.As == AMOVWZ {
-			o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
-		} else {
-			ctxt.Diag("internal: bad mov[bhw]z\n%v", p)
-		}
-
-	case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
-		r := int(p.Reg)
-
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-		d := vregoff(ctxt, p.From3)
-		var a int
-		switch p.As {
-
-		// These opcodes expect a mask operand that has to be converted into the
-		// appropriate operand.  The way these were defined, not all valid masks are possible.
-		// Left here for compatibility in case they were used or generated.
-		case ARLDCL, ARLDCLCC:
-			var mask [2]uint8
-			maskgen64(ctxt, p, mask[:], uint64(d))
-
-			a = int(mask[0]) /* MB */
-			if mask[1] != 63 {
-				ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
-			}
-
-		case ARLDCR, ARLDCRCC:
-			var mask [2]uint8
-			maskgen64(ctxt, p, mask[:], uint64(d))
-
-			a = int(mask[1]) /* ME */
-			if mask[0] != 0 {
-				ctxt.Diag("invalid mask for rotate: %x (start != 0)\n%v", uint64(d), p)
-			}
-
-		// These opcodes use a shift count like the ppc64 asm, no mask conversion done
-		case ARLDICR, ARLDICRCC, ARLDICL, ARLDICLCC:
-			a = int(d)
-
-		default:
-			ctxt.Diag("unexpected op in rldc case\n%v", p)
-			a = 0
-		}
-
-		o1 = LOP_RRR(oprrr(ctxt, p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
-		o1 |= (uint32(a) & 31) << 6
-		if a&0x20 != 0 {
-			o1 |= 1 << 5 /* mb[5] is top bit */
-		}
-
-	case 17, /* bc bo,bi,lbra (same for now) */
-		16: /* bc bo,bi,sbra */
-		a := 0
-
-		r := int(p.Reg)
-
-		if p.From.Type == obj.TYPE_CONST {
-			a = int(regoff(ctxt, &p.From))
-		} else if p.From.Type == obj.TYPE_REG {
-			if r != 0 {
-				ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
-			}
-			// BI values for the CR
-			switch p.From.Reg {
-			case REG_CR0:
-				r = BI_CR0
-			case REG_CR1:
-				r = BI_CR1
-			case REG_CR2:
-				r = BI_CR2
-			case REG_CR3:
-				r = BI_CR3
-			case REG_CR4:
-				r = BI_CR4
-			case REG_CR5:
-				r = BI_CR5
-			case REG_CR6:
-				r = BI_CR6
-			case REG_CR7:
-				r = BI_CR7
-			default:
-				ctxt.Diag("unrecognized register: expecting CR\n")
-			}
-		}
-		v := int32(0)
-		if p.Pcond != nil {
-			v = int32(p.Pcond.Pc - p.Pc)
-		}
-		if v&03 != 0 {
-			ctxt.Diag("odd branch target address\n%v", p)
-			v &^= 03
-		}
-
-		if v < -(1<<16) || v >= 1<<15 {
-			ctxt.Diag("branch too far\n%v", p)
-		}
-		o1 = OP_BC(opirr(ctxt, p.As), uint32(a), uint32(r), uint32(v), 0)
-
-	case 15: /* br/bl (r) => mov r,lr; br/bl (lr) */
-		var v int32
-		if p.As == ABC || p.As == ABCL {
-			v = regoff(ctxt, &p.To) & 31
-		} else {
-			v = 20 /* unconditional */
-		}
-		o1 = AOP_RRR(OP_MTSPR, uint32(p.To.Reg), 0, 0) | (REG_LR&0x1f)<<16 | ((REG_LR>>5)&0x1f)<<11
-		o2 = OPVCC(19, 16, 0, 0)
-		if p.As == ABL || p.As == ABCL {
-			o2 |= 1
-		}
-		o2 = OP_BCR(o2, uint32(v), uint32(p.To.Index))
-
-	case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
-		var v int32
-		if p.As == ABC || p.As == ABCL {
-			v = regoff(ctxt, &p.From) & 31
-		} else {
-			v = 20 /* unconditional */
-		}
-		r := int(p.Reg)
-		if r == 0 {
-			r = 0
-		}
-		switch oclass(&p.To) {
-		case C_CTR:
-			o1 = OPVCC(19, 528, 0, 0)
-
-		case C_LR:
-			o1 = OPVCC(19, 16, 0, 0)
-
-		default:
-			ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
-			v = 0
-		}
-
-		if p.As == ABL || p.As == ABCL {
-			o1 |= 1
-		}
-		o1 = OP_BCR(o1, uint32(v), uint32(r))
-
-	case 19: /* mov $lcon,r ==> cau+or */
-		d := vregoff(ctxt, &p.From)
-
-		if p.From.Sym == nil {
-			o1 = loadu32(int(p.To.Reg), d)
-			o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
-		} else {
-			o1, o2 = symbolAccess(ctxt, p.From.Sym, d, p.To.Reg, OP_ADDI)
-		}
-
-	//if(dlm) reloc(&p->from, p->pc, 0);
-
-	case 20: /* add $ucon,,r */
-		v := regoff(ctxt, &p.From)
-
-		r := int(p.Reg)
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-		if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
-			ctxt.Diag("literal operation on R0\n%v", p)
-		}
-		o1 = AOP_IRR(opirr(ctxt, -p.As), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
-
-	case 22: /* add $lcon,r1,r2 ==> cau+or+add */ /* could do add/sub more efficiently */
-		if p.To.Reg == REGTMP || p.Reg == REGTMP {
-			ctxt.Diag("can't synthesize large constant\n%v", p)
-		}
-		d := vregoff(ctxt, &p.From)
-		o1 = loadu32(REGTMP, d)
-		o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
-		r := int(p.Reg)
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-		o3 = AOP_RRR(oprrr(ctxt, p.As), uint32(p.To.Reg), REGTMP, uint32(r))
-		if p.From.Sym != nil {
-			ctxt.Diag("%v is not supported", p)
-		}
-
-	//if(dlm) reloc(&p->from, p->pc, 0);
-
-	case 23: /* and $lcon,r1,r2 ==> cau+or+and */ /* masks could be done using rlnm etc. */
-		if p.To.Reg == REGTMP || p.Reg == REGTMP {
-			ctxt.Diag("can't synthesize large constant\n%v", p)
-		}
-		d := vregoff(ctxt, &p.From)
-		o1 = loadu32(REGTMP, d)
-		o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
-		r := int(p.Reg)
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-		o3 = LOP_RRR(oprrr(ctxt, p.As), uint32(p.To.Reg), REGTMP, uint32(r))
-		if p.From.Sym != nil {
-			ctxt.Diag("%v is not supported", p)
-		}
-
-		//if(dlm) reloc(&p->from, p->pc, 0);
-
-		/*24*/
-	case 25:
-		/* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
-		v := regoff(ctxt, &p.From)
-
-		if v < 0 {
-			v = 0
-		} else if v > 63 {
-			v = 63
-		}
-		r := int(p.Reg)
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-		var a int
-		switch p.As {
-		case ASLD, ASLDCC:
-			a = int(63 - v)
-			o1 = OP_RLDICR
-
-		case ASRD, ASRDCC:
-			a = int(v)
-			v = 64 - v
-			o1 = OP_RLDICL
-
-		default:
-			ctxt.Diag("unexpected op in sldi case\n%v", p)
-			a = 0
-			o1 = 0
-		}
-
-		o1 = AOP_RRR(o1, uint32(r), uint32(p.To.Reg), (uint32(v) & 0x1F))
-		o1 |= (uint32(a) & 31) << 6
-		if v&0x20 != 0 {
-			o1 |= 1 << 1
-		}
-		if a&0x20 != 0 {
-			o1 |= 1 << 5 /* mb[5] is top bit */
-		}
-		if p.As == ASLDCC || p.As == ASRDCC {
-			o1 |= 1 /* Rc */
-		}
-
-	case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
-		if p.To.Reg == REGTMP {
-			ctxt.Diag("can't synthesize large constant\n%v", p)
-		}
-		v := regoff(ctxt, &p.From)
-		r := int(p.From.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
-		o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGTMP, uint32(v))
-
-	case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
-		v := regoff(ctxt, p.From3)
-
-		r := int(p.From.Reg)
-		o1 = AOP_IRR(opirr(ctxt, p.As), uint32(p.To.Reg), uint32(r), uint32(v))
-
-	case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
-		if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
-			ctxt.Diag("can't synthesize large constant\n%v", p)
-		}
-		v := regoff(ctxt, p.From3)
-		o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
-		o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
-		o3 = AOP_RRR(oprrr(ctxt, p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
-		if p.From.Sym != nil {
-			ctxt.Diag("%v is not supported", p)
-		}
-
-	//if(dlm) reloc(&p->from3, p->pc, 0);
-
-	case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
-		v := regoff(ctxt, &p.From)
-
-		d := vregoff(ctxt, p.From3)
-		var mask [2]uint8
-		maskgen64(ctxt, p, mask[:], uint64(d))
-		var a int
-		switch p.As {
-		case ARLDC, ARLDCCC:
-			a = int(mask[0]) /* MB */
-			if int32(mask[1]) != (63 - v) {
-				ctxt.Diag("invalid mask for shift: %x (shift %d)\n%v", uint64(d), v, p)
-			}
-
-		case ARLDCL, ARLDCLCC:
-			a = int(mask[0]) /* MB */
-			if mask[1] != 63 {
-				ctxt.Diag("invalid mask for shift: %x (shift %d)\n%v", uint64(d), v, p)
-			}
-
-		case ARLDCR, ARLDCRCC:
-			a = int(mask[1]) /* ME */
-			if mask[0] != 0 {
-				ctxt.Diag("invalid mask for shift: %x (shift %d)\n%v", uint64(d), v, p)
-			}
-
-		default:
-			ctxt.Diag("unexpected op in rldic case\n%v", p)
-			a = 0
-		}
-
-		o1 = AOP_RRR(opirr(ctxt, p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
-		o1 |= (uint32(a) & 31) << 6
-		if v&0x20 != 0 {
-			o1 |= 1 << 1
-		}
-		if a&0x20 != 0 {
-			o1 |= 1 << 5 /* mb[5] is top bit */
-		}
-
-	case 30: /* rldimi $sh,s,$mask,a */
-		v := regoff(ctxt, &p.From)
-
-		d := vregoff(ctxt, p.From3)
-
-		// Original opcodes had mask operands which had to be converted to a shift count as expected by
-		// the ppc64 asm.
-		switch p.As {
-		case ARLDMI, ARLDMICC:
-			var mask [2]uint8
-			maskgen64(ctxt, p, mask[:], uint64(d))
-			if int32(mask[1]) != (63 - v) {
-				ctxt.Diag("invalid mask for shift: %x (shift %d)\n%v", uint64(d), v, p)
-			}
-			o1 = AOP_RRR(opirr(ctxt, p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
-			o1 |= (uint32(mask[0]) & 31) << 6
-			if v&0x20 != 0 {
-				o1 |= 1 << 1
-			}
-			if mask[0]&0x20 != 0 {
-				o1 |= 1 << 5 /* mb[5] is top bit */
-			}
-
-		// Opcodes with shift count operands.
-		case ARLDIMI, ARLDIMICC:
-			o1 = AOP_RRR(opirr(ctxt, p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
-			o1 |= (uint32(d) & 31) << 6
-			if v&0x20 != 0 {
-				o1 |= 1 << 1
-			}
-		}
-
-	case 31: /* dword */
-		d := vregoff(ctxt, &p.From)
-
-		if ctxt.Arch.ByteOrder == binary.BigEndian {
-			o1 = uint32(d >> 32)
-			o2 = uint32(d)
-		} else {
-			o1 = uint32(d)
-			o2 = uint32(d >> 32)
-		}
-
-		if p.From.Sym != nil {
-			rel := obj.Addrel(ctxt.Cursym)
-			rel.Off = int32(ctxt.Pc)
-			rel.Siz = 8
-			rel.Sym = p.From.Sym
-			rel.Add = p.From.Offset
-			rel.Type = obj.R_ADDR
-			o2 = 0
-			o1 = o2
-		}
-
-	case 32: /* fmul frc,fra,frd */
-		r := int(p.Reg)
-
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-		o1 = AOP_RRR(oprrr(ctxt, p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
-
-	case 33: /* fabs [frb,]frd; fmr. frb,frd */
-		r := int(p.From.Reg)
-
-		if oclass(&p.From) == C_NONE {
-			r = int(p.To.Reg)
-		}
-		o1 = AOP_RRR(oprrr(ctxt, p.As), uint32(p.To.Reg), 0, uint32(r))
-
-	case 34: /* FMADDx fra,frb,frc,frd (d=a*b+c); FSELx a<0? (d=b): (d=c) */
-		o1 = AOP_RRR(oprrr(ctxt, p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.From3.Reg)&31)<<6
-
-	case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
-		v := regoff(ctxt, &p.To)
-
-		r := int(p.To.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
-		o2 = AOP_IRR(opstore(ctxt, p.As), uint32(p.From.Reg), REGTMP, uint32(v))
-
-	case 36: /* mov bz/h/hz lext/lauto/lreg,r ==> lbz/lha/lhz etc */
-		v := regoff(ctxt, &p.From)
-
-		r := int(p.From.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
-		o2 = AOP_IRR(opload(ctxt, p.As), uint32(p.To.Reg), REGTMP, uint32(v))
-
-	case 37: /* movb lext/lauto/lreg,r ==> lbz o(reg),r; extsb r */
-		v := regoff(ctxt, &p.From)
-
-		r := int(p.From.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
-		o2 = AOP_IRR(opload(ctxt, p.As), uint32(p.To.Reg), REGTMP, uint32(v))
-		o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
-
-	case 40: /* word */
-		o1 = uint32(regoff(ctxt, &p.From))
-
-	case 41: /* stswi */
-		o1 = AOP_RRR(opirr(ctxt, p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(regoff(ctxt, p.From3))&0x7F)<<11
-
-	case 42: /* lswi */
-		o1 = AOP_RRR(opirr(ctxt, p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(regoff(ctxt, p.From3))&0x7F)<<11
-
-	case 43: /* unary indexed source: dcbf (b); dcbf (a+b) */
-		o1 = AOP_RRR(oprrr(ctxt, p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
-
-	case 44: /* indexed store */
-		o1 = AOP_RRR(opstorex(ctxt, p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
-
-	case 45: /* indexed load */
-		o1 = AOP_RRR(oploadx(ctxt, p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
-
-	case 46: /* plain op */
-		o1 = oprrr(ctxt, p.As)
-
-	case 47: /* op Ra, Rd; also op [Ra,] Rd */
-		r := int(p.From.Reg)
-
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-		o1 = AOP_RRR(oprrr(ctxt, p.As), uint32(p.To.Reg), uint32(r), 0)
-
-	case 48: /* op Rs, Ra */
-		r := int(p.From.Reg)
-
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-		o1 = LOP_RRR(oprrr(ctxt, p.As), uint32(p.To.Reg), uint32(r), 0)
-
-	case 49: /* op Rb; op $n, Rb */
-		if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
-			v := regoff(ctxt, &p.From) & 1
-			o1 = AOP_RRR(oprrr(ctxt, p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
-		} else {
-			o1 = AOP_RRR(oprrr(ctxt, p.As), 0, 0, uint32(p.From.Reg))
-		}
-
-	case 50: /* rem[u] r1[,r2],r3 */
-		r := int(p.Reg)
-
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-		v := oprrr(ctxt, p.As)
-		t := v & (1<<10 | 1) /* OE|Rc */
-		o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
-		o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
-		o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
-		if p.As == AREMU {
-			o4 = o3
-
-			/* Clear top 32 bits */
-			o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
-		}
-
-	case 51: /* remd[u] r1[,r2],r3 */
-		r := int(p.Reg)
-
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-		v := oprrr(ctxt, p.As)
-		t := v & (1<<10 | 1) /* OE|Rc */
-		o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
-		o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
-		o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
-
-	case 52: /* mtfsbNx cr(n) */
-		v := regoff(ctxt, &p.From) & 31
-
-		o1 = AOP_RRR(oprrr(ctxt, p.As), uint32(v), 0, 0)
-
-	case 53: /* mffsX ,fr1 */
-		o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
-
-	case 54: /* mov msr,r1; mov r1, msr*/
-		if oclass(&p.From) == C_REG {
-			if p.As == AMOVD {
-				o1 = AOP_RRR(OP_MTMSRD, uint32(p.From.Reg), 0, 0)
-			} else {
-				o1 = AOP_RRR(OP_MTMSR, uint32(p.From.Reg), 0, 0)
-			}
-		} else {
-			o1 = AOP_RRR(OP_MFMSR, uint32(p.To.Reg), 0, 0)
-		}
-
-	case 55: /* op Rb, Rd */
-		o1 = AOP_RRR(oprrr(ctxt, p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
-
-	case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
-		v := regoff(ctxt, &p.From)
-
-		r := int(p.Reg)
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-		o1 = AOP_RRR(opirr(ctxt, p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
-		if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
-			o1 |= 1 << 1 /* mb[5] */
-		}
-
-	case 57: /* slw $sh,[s,]a -> rlwinm ... */
-		v := regoff(ctxt, &p.From)
-
-		r := int(p.Reg)
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-
-		/*
-			 * Let user (gs) shoot himself in the foot.
-			 * qc has already complained.
-			 *
-			if(v < 0 || v > 31)
-				ctxt->diag("illegal shift %ld\n%v", v, p);
-		*/
-		if v < 0 {
-			v = 0
-		} else if v > 32 {
-			v = 32
-		}
-		var mask [2]uint8
-		if p.As == ASRW || p.As == ASRWCC { /* shift right */
-			mask[0] = uint8(v)
-			mask[1] = 31
-			v = 32 - v
-		} else {
-			mask[0] = 0
-			mask[1] = uint8(31 - v)
-		}
-
-		o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
-		if p.As == ASLWCC || p.As == ASRWCC {
-			o1 |= 1 /* Rc */
-		}
-
-	case 58: /* logical $andcon,[s],a */
-		v := regoff(ctxt, &p.From)
-
-		r := int(p.Reg)
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-		o1 = LOP_IRR(opirr(ctxt, p.As), uint32(p.To.Reg), uint32(r), uint32(v))
-
-	case 59: /* or/and $ucon,,r */
-		v := regoff(ctxt, &p.From)
-
-		r := int(p.Reg)
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-		o1 = LOP_IRR(opirr(ctxt, -p.As), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis */
-
-	case 60: /* tw to,a,b */
-		r := int(regoff(ctxt, &p.From) & 31)
-
-		o1 = AOP_RRR(oprrr(ctxt, p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
-
-	case 61: /* tw to,a,$simm */
-		r := int(regoff(ctxt, &p.From) & 31)
-
-		v := regoff(ctxt, &p.To)
-		o1 = AOP_IRR(opirr(ctxt, p.As), uint32(r), uint32(p.Reg), uint32(v))
-
-	case 62: /* rlwmi $sh,s,$mask,a */
-		v := regoff(ctxt, &p.From)
-
-		var mask [2]uint8
-		maskgen(ctxt, p, mask[:], uint32(regoff(ctxt, p.From3)))
-		o1 = AOP_RRR(opirr(ctxt, p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
-		o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
-
-	case 63: /* rlwmi b,s,$mask,a */
-		var mask [2]uint8
-		maskgen(ctxt, p, mask[:], uint32(regoff(ctxt, p.From3)))
-
-		o1 = AOP_RRR(opirr(ctxt, p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
-		o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
-
-	case 64: /* mtfsf fr[, $m] {,fpcsr} */
-		var v int32
-		if p.From3Type() != obj.TYPE_NONE {
-			v = regoff(ctxt, p.From3) & 255
-		} else {
-			v = 255
-		}
-		o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
-
-	case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
-		if p.To.Reg == 0 {
-			ctxt.Diag("must specify FPSCR(n)\n%v", p)
-		}
-		o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(regoff(ctxt, &p.From))&31)<<12
-
-	case 66: /* mov spr,r1; mov r1,spr, also dcr */
-		var r int
-		var v int32
-		if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
-			r = int(p.From.Reg)
-			v = int32(p.To.Reg)
-			if REG_DCR0 <= v && v <= REG_DCR0+1023 {
-				o1 = OPVCC(31, 451, 0, 0) /* mtdcr */
-			} else {
-				o1 = OPVCC(31, 467, 0, 0) /* mtspr */
-			}
-		} else {
-			r = int(p.To.Reg)
-			v = int32(p.From.Reg)
-			if REG_DCR0 <= v && v <= REG_DCR0+1023 {
-				o1 = OPVCC(31, 323, 0, 0) /* mfdcr */
-			} else {
-				o1 = OPVCC(31, 339, 0, 0) /* mfspr */
-			}
-		}
-
-		o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
-
-	case 67: /* mcrf crfD,crfS */
-		if p.From.Type != obj.TYPE_REG || p.From.Reg < REG_CR0 || REG_CR7 < p.From.Reg || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
-			ctxt.Diag("illegal CR field number\n%v", p)
-		}
-		o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
-
-	case 68: /* mfcr rD; mfocrf CRM,rD */
-		if p.From.Type == obj.TYPE_REG && REG_CR0 <= p.From.Reg && p.From.Reg <= REG_CR7 {
-			v := int32(1 << uint(7-(p.To.Reg&7)))                                 /* CR(n) */
-			o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) | 1<<20 | uint32(v)<<12 /* new form, mfocrf */
-		} else {
-			o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* old form, whole register */
-		}
-
-	case 69: /* mtcrf CRM,rS */
-		var v int32
-		if p.From3Type() != obj.TYPE_NONE {
-			if p.To.Reg != 0 {
-				ctxt.Diag("can't use both mask and CR(n)\n%v", p)
-			}
-			v = regoff(ctxt, p.From3) & 0xff
-		} else {
-			if p.To.Reg == 0 {
-				v = 0xff /* CR */
-			} else {
-				v = 1 << uint(7-(p.To.Reg&7)) /* CR(n) */
-			}
-		}
-
-		o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
-
-	case 70: /* [f]cmp r,r,cr*/
-		var r int
-		if p.Reg == 0 {
-			r = 0
-		} else {
-			r = (int(p.Reg) & 7) << 2
-		}
-		o1 = AOP_RRR(oprrr(ctxt, p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
-
-	case 71: /* cmp[l] r,i,cr*/
-		var r int
-		if p.Reg == 0 {
-			r = 0
-		} else {
-			r = (int(p.Reg) & 7) << 2
-		}
-		o1 = AOP_RRR(opirr(ctxt, p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(regoff(ctxt, &p.To))&0xffff
-
-	case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
-		o1 = AOP_RRR(oprrr(ctxt, p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
-
-	case 73: /* mcrfs crfD,crfS */
-		if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
-			ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
-		}
-		o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
-
-	case 77: /* syscall $scon, syscall Rx */
-		if p.From.Type == obj.TYPE_CONST {
-			if p.From.Offset > BIG || p.From.Offset < -BIG {
-				ctxt.Diag("illegal syscall, sysnum too large: %v", p)
-			}
-			o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
-		} else if p.From.Type == obj.TYPE_REG {
-			o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
-		} else {
-			ctxt.Diag("illegal syscall: %v", p)
-			o1 = 0x7fe00008 // trap always
-		}
-
-		o2 = oprrr(ctxt, p.As)
-		o3 = AOP_RRR(oprrr(ctxt, AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
-
-	case 78: /* undef */
-		o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
-		   always to be an illegal instruction."  */
-
-	/* relocation operations */
-	case 74:
-		v := vregoff(ctxt, &p.To)
-		o1, o2 = symbolAccess(ctxt, p.To.Sym, v, p.From.Reg, opstore(ctxt, p.As))
-
-	//if(dlm) reloc(&p->to, p->pc, 1);
-
-	case 75:
-		v := vregoff(ctxt, &p.From)
-		o1, o2 = symbolAccess(ctxt, p.From.Sym, v, p.To.Reg, opload(ctxt, p.As))
-
-	//if(dlm) reloc(&p->from, p->pc, 1);
-
-	case 76:
-		v := vregoff(ctxt, &p.From)
-		o1, o2 = symbolAccess(ctxt, p.From.Sym, v, p.To.Reg, opload(ctxt, p.As))
-		o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
-
-		//if(dlm) reloc(&p->from, p->pc, 1);
-
-	case 79:
-		if p.From.Offset != 0 {
-			ctxt.Diag("invalid offset against tls var %v", p)
-		}
-		o1 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGZERO, 0)
-		rel := obj.Addrel(ctxt.Cursym)
-		rel.Off = int32(ctxt.Pc)
-		rel.Siz = 4
-		rel.Sym = p.From.Sym
-		rel.Type = obj.R_POWER_TLS_LE
-
-	case 80:
-		if p.From.Offset != 0 {
-			ctxt.Diag("invalid offset against tls var %v", p)
-		}
-		o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
-		o2 = AOP_IRR(opload(ctxt, AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
-		rel := obj.Addrel(ctxt.Cursym)
-		rel.Off = int32(ctxt.Pc)
-		rel.Siz = 8
-		rel.Sym = p.From.Sym
-		rel.Type = obj.R_POWER_TLS_IE
-
-	case 81:
-		v := vregoff(ctxt, &p.To)
-		if v != 0 {
-			ctxt.Diag("invalid offset against GOT slot %v", p)
-		}
-
-		o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
-		o2 = AOP_IRR(opload(ctxt, AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
-		rel := obj.Addrel(ctxt.Cursym)
-		rel.Off = int32(ctxt.Pc)
-		rel.Siz = 8
-		rel.Sym = p.From.Sym
-		rel.Type = obj.R_ADDRPOWER_GOT
-	case 82: /* vector instructions, VX-form and VC-form */
-		if p.From.Type == obj.TYPE_REG {
-			/* reg reg none OR reg reg reg */
-			/* 3-register operand order: VRA, VRB, VRT */
-			/* 2-register operand order: VRA, VRT */
-			o1 = AOP_RRR(oprrr(ctxt, p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
-		} else if p.From3Type() == obj.TYPE_CONST {
-			/* imm imm reg reg */
-			/* operand order: SIX, VRA, ST, VRT */
-			six := int(regoff(ctxt, &p.From))
-			st := int(regoff(ctxt, p.From3))
-			o1 = AOP_IIRR(opiirr(ctxt, p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
-		} else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
-			/* imm reg reg */
-			/* operand order: UIM, VRB, VRT */
-			uim := int(regoff(ctxt, &p.From))
-			o1 = AOP_VIRR(opirr(ctxt, p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
-		} else {
-			/* imm reg */
-			/* operand order: SIM, VRT */
-			sim := int(regoff(ctxt, &p.From))
-			o1 = AOP_IR(opirr(ctxt, p.As), uint32(p.To.Reg), uint32(sim))
-		}
-
-	case 83: /* vector instructions, VA-form */
-		if p.From.Type == obj.TYPE_REG {
-			/* reg reg reg reg */
-			/* 4-register operand order: VRA, VRB, VRC, VRT */
-			o1 = AOP_RRRR(oprrr(ctxt, p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.From3.Reg))
-		} else if p.From.Type == obj.TYPE_CONST {
-			/* imm reg reg reg */
-			/* operand order: SHB, VRA, VRB, VRT */
-			shb := int(regoff(ctxt, &p.From))
-			o1 = AOP_IRRR(opirrr(ctxt, p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From3.Reg), uint32(shb))
-		}
-
-	case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
-		bc := vregoff(ctxt, &p.From)
-
-		// rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
-		o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.From3.Reg), uint32(bc))
-
-	case 85: /* vector instructions, VX-form */
-		/* reg none reg */
-		/* 2-register operand order: VRB, VRT */
-		o1 = AOP_RR(oprrr(ctxt, p.As), uint32(p.To.Reg), uint32(p.From.Reg))
-
-	case 86: /* VSX indexed store, XX1-form */
-		/* reg reg reg */
-		/* 3-register operand order: XT, (RB)(RA*1) */
-		o1 = AOP_XX1(opstorex(ctxt, p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
-
-	case 87: /* VSX indexed load, XX1-form */
-		/* reg reg reg */
-		/* 3-register operand order: (RB)(RA*1), XT */
-		o1 = AOP_XX1(oploadx(ctxt, p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
-
-	case 88: /* VSX instructions, XX1-form */
-		/* reg reg none OR reg reg reg */
-		/* 3-register operand order: RA, RB, XT */
-		/* 2-register operand order: XS, RA or RA, XT */
-		xt := int32(p.To.Reg)
-		xs := int32(p.From.Reg)
-		if REG_VS0 <= xt && xt <= REG_VS63 {
-			o1 = AOP_XX1(oprrr(ctxt, p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
-		} else if REG_VS0 <= xs && xs <= REG_VS63 {
-			o1 = AOP_XX1(oprrr(ctxt, p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
-		}
-
-	case 89: /* VSX instructions, XX2-form */
-		/* reg none reg OR reg imm reg */
-		/* 2-register operand order: XB, XT or XB, UIM, XT*/
-		uim := int(regoff(ctxt, p.From3))
-		o1 = AOP_XX2(oprrr(ctxt, p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
-
-	case 90: /* VSX instructions, XX3-form */
-		if p.From3Type() == obj.TYPE_NONE {
-			/* reg reg reg */
-			/* 3-register operand order: XA, XB, XT */
-			o1 = AOP_XX3(oprrr(ctxt, p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
-		} else if p.From3Type() == obj.TYPE_CONST {
-			/* reg reg reg imm */
-			/* operand order: XA, XB, DM, XT */
-			dm := int(regoff(ctxt, p.From3))
-			o1 = AOP_XX3I(oprrr(ctxt, p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
-		}
-
-	case 91: /* VSX instructions, XX4-form */
-		/* reg reg reg reg */
-		/* 3-register operand order: XA, XB, XC, XT */
-		o1 = AOP_XX4(oprrr(ctxt, p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.From3.Reg))
-
-	}
-
-	out[0] = o1
-	out[1] = o2
-	out[2] = o3
-	out[3] = o4
-	out[4] = o5
-	return
-}
-
-func vregoff(ctxt *obj.Link, a *obj.Addr) int64 {
-	ctxt.Instoffset = 0
-	if a != nil {
-		aclass(ctxt, a)
-	}
-	return ctxt.Instoffset
-}
-
-func regoff(ctxt *obj.Link, a *obj.Addr) int32 {
-	return int32(vregoff(ctxt, a))
-}
-
-func oprrr(ctxt *obj.Link, a obj.As) uint32 {
-	switch a {
-	case AADD:
-		return OPVCC(31, 266, 0, 0)
-	case AADDCC:
-		return OPVCC(31, 266, 0, 1)
-	case AADDV:
-		return OPVCC(31, 266, 1, 0)
-	case AADDVCC:
-		return OPVCC(31, 266, 1, 1)
-	case AADDC:
-		return OPVCC(31, 10, 0, 0)
-	case AADDCCC:
-		return OPVCC(31, 10, 0, 1)
-	case AADDCV:
-		return OPVCC(31, 10, 1, 0)
-	case AADDCVCC:
-		return OPVCC(31, 10, 1, 1)
-	case AADDE:
-		return OPVCC(31, 138, 0, 0)
-	case AADDECC:
-		return OPVCC(31, 138, 0, 1)
-	case AADDEV:
-		return OPVCC(31, 138, 1, 0)
-	case AADDEVCC:
-		return OPVCC(31, 138, 1, 1)
-	case AADDME:
-		return OPVCC(31, 234, 0, 0)
-	case AADDMECC:
-		return OPVCC(31, 234, 0, 1)
-	case AADDMEV:
-		return OPVCC(31, 234, 1, 0)
-	case AADDMEVCC:
-		return OPVCC(31, 234, 1, 1)
-	case AADDZE:
-		return OPVCC(31, 202, 0, 0)
-	case AADDZECC:
-		return OPVCC(31, 202, 0, 1)
-	case AADDZEV:
-		return OPVCC(31, 202, 1, 0)
-	case AADDZEVCC:
-		return OPVCC(31, 202, 1, 1)
-
-	case AAND:
-		return OPVCC(31, 28, 0, 0)
-	case AANDCC:
-		return OPVCC(31, 28, 0, 1)
-	case AANDN:
-		return OPVCC(31, 60, 0, 0)
-	case AANDNCC:
-		return OPVCC(31, 60, 0, 1)
-
-	case ACMP:
-		return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
-	case ACMPU:
-		return OPVCC(31, 32, 0, 0) | 1<<21
-	case ACMPW:
-		return OPVCC(31, 0, 0, 0) /* L=0 */
-	case ACMPWU:
-		return OPVCC(31, 32, 0, 0)
-
-	case ACNTLZW:
-		return OPVCC(31, 26, 0, 0)
-	case ACNTLZWCC:
-		return OPVCC(31, 26, 0, 1)
-	case ACNTLZD:
-		return OPVCC(31, 58, 0, 0)
-	case ACNTLZDCC:
-		return OPVCC(31, 58, 0, 1)
-
-	case ACRAND:
-		return OPVCC(19, 257, 0, 0)
-	case ACRANDN:
-		return OPVCC(19, 129, 0, 0)
-	case ACREQV:
-		return OPVCC(19, 289, 0, 0)
-	case ACRNAND:
-		return OPVCC(19, 225, 0, 0)
-	case ACRNOR:
-		return OPVCC(19, 33, 0, 0)
-	case ACROR:
-		return OPVCC(19, 449, 0, 0)
-	case ACRORN:
-		return OPVCC(19, 417, 0, 0)
-	case ACRXOR:
-		return OPVCC(19, 193, 0, 0)
-
-	case ADCBF:
-		return OPVCC(31, 86, 0, 0)
-	case ADCBI:
-		return OPVCC(31, 470, 0, 0)
-	case ADCBST:
-		return OPVCC(31, 54, 0, 0)
-	case ADCBT:
-		return OPVCC(31, 278, 0, 0)
-	case ADCBTST:
-		return OPVCC(31, 246, 0, 0)
-	case ADCBZ:
-		return OPVCC(31, 1014, 0, 0)
-
-	case AREM, ADIVW:
-		return OPVCC(31, 491, 0, 0)
-
-	case AREMCC, ADIVWCC:
-		return OPVCC(31, 491, 0, 1)
-
-	case AREMV, ADIVWV:
-		return OPVCC(31, 491, 1, 0)
-
-	case AREMVCC, ADIVWVCC:
-		return OPVCC(31, 491, 1, 1)
-
-	case AREMU, ADIVWU:
-		return OPVCC(31, 459, 0, 0)
-
-	case AREMUCC, ADIVWUCC:
-		return OPVCC(31, 459, 0, 1)
-
-	case AREMUV, ADIVWUV:
-		return OPVCC(31, 459, 1, 0)
-
-	case AREMUVCC, ADIVWUVCC:
-		return OPVCC(31, 459, 1, 1)
-
-	case AREMD, ADIVD:
-		return OPVCC(31, 489, 0, 0)
-
-	case AREMDCC, ADIVDCC:
-		return OPVCC(31, 489, 0, 1)
-
-	case ADIVDE:
-		return OPVCC(31, 425, 0, 0)
-
-	case ADIVDECC:
-		return OPVCC(31, 425, 0, 1)
-
-	case ADIVDEU:
-		return OPVCC(31, 393, 0, 0)
-
-	case ADIVDEUCC:
-		return OPVCC(31, 393, 0, 1)
-
-	case AREMDV, ADIVDV:
-		return OPVCC(31, 489, 1, 0)
-
-	case AREMDVCC, ADIVDVCC:
-		return OPVCC(31, 489, 1, 1)
-
-	case AREMDU, ADIVDU:
-		return OPVCC(31, 457, 0, 0)
-
-	case AREMDUCC, ADIVDUCC:
-		return OPVCC(31, 457, 0, 1)
-
-	case AREMDUV, ADIVDUV:
-		return OPVCC(31, 457, 1, 0)
-
-	case AREMDUVCC, ADIVDUVCC:
-		return OPVCC(31, 457, 1, 1)
-
-	case AEIEIO:
-		return OPVCC(31, 854, 0, 0)
-
-	case AEQV:
-		return OPVCC(31, 284, 0, 0)
-	case AEQVCC:
-		return OPVCC(31, 284, 0, 1)
-
-	case AEXTSB:
-		return OPVCC(31, 954, 0, 0)
-	case AEXTSBCC:
-		return OPVCC(31, 954, 0, 1)
-	case AEXTSH:
-		return OPVCC(31, 922, 0, 0)
-	case AEXTSHCC:
-		return OPVCC(31, 922, 0, 1)
-	case AEXTSW:
-		return OPVCC(31, 986, 0, 0)
-	case AEXTSWCC:
-		return OPVCC(31, 986, 0, 1)
-
-	case AFABS:
-		return OPVCC(63, 264, 0, 0)
-	case AFABSCC:
-		return OPVCC(63, 264, 0, 1)
-	case AFADD:
-		return OPVCC(63, 21, 0, 0)
-	case AFADDCC:
-		return OPVCC(63, 21, 0, 1)
-	case AFADDS:
-		return OPVCC(59, 21, 0, 0)
-	case AFADDSCC:
-		return OPVCC(59, 21, 0, 1)
-	case AFCMPO:
-		return OPVCC(63, 32, 0, 0)
-	case AFCMPU:
-		return OPVCC(63, 0, 0, 0)
-	case AFCFID:
-		return OPVCC(63, 846, 0, 0)
-	case AFCFIDCC:
-		return OPVCC(63, 846, 0, 1)
-	case AFCFIDU:
-		return OPVCC(63, 974, 0, 0)
-	case AFCFIDUCC:
-		return OPVCC(63, 974, 0, 1)
-	case AFCTIW:
-		return OPVCC(63, 14, 0, 0)
-	case AFCTIWCC:
-		return OPVCC(63, 14, 0, 1)
-	case AFCTIWZ:
-		return OPVCC(63, 15, 0, 0)
-	case AFCTIWZCC:
-		return OPVCC(63, 15, 0, 1)
-	case AFCTID:
-		return OPVCC(63, 814, 0, 0)
-	case AFCTIDCC:
-		return OPVCC(63, 814, 0, 1)
-	case AFCTIDZ:
-		return OPVCC(63, 815, 0, 0)
-	case AFCTIDZCC:
-		return OPVCC(63, 815, 0, 1)
-	case AFDIV:
-		return OPVCC(63, 18, 0, 0)
-	case AFDIVCC:
-		return OPVCC(63, 18, 0, 1)
-	case AFDIVS:
-		return OPVCC(59, 18, 0, 0)
-	case AFDIVSCC:
-		return OPVCC(59, 18, 0, 1)
-	case AFMADD:
-		return OPVCC(63, 29, 0, 0)
-	case AFMADDCC:
-		return OPVCC(63, 29, 0, 1)
-	case AFMADDS:
-		return OPVCC(59, 29, 0, 0)
-	case AFMADDSCC:
-		return OPVCC(59, 29, 0, 1)
-
-	case AFMOVS, AFMOVD:
-		return OPVCC(63, 72, 0, 0) /* load */
-	case AFMOVDCC:
-		return OPVCC(63, 72, 0, 1)
-	case AFMSUB:
-		return OPVCC(63, 28, 0, 0)
-	case AFMSUBCC:
-		return OPVCC(63, 28, 0, 1)
-	case AFMSUBS:
-		return OPVCC(59, 28, 0, 0)
-	case AFMSUBSCC:
-		return OPVCC(59, 28, 0, 1)
-	case AFMUL:
-		return OPVCC(63, 25, 0, 0)
-	case AFMULCC:
-		return OPVCC(63, 25, 0, 1)
-	case AFMULS:
-		return OPVCC(59, 25, 0, 0)
-	case AFMULSCC:
-		return OPVCC(59, 25, 0, 1)
-	case AFNABS:
-		return OPVCC(63, 136, 0, 0)
-	case AFNABSCC:
-		return OPVCC(63, 136, 0, 1)
-	case AFNEG:
-		return OPVCC(63, 40, 0, 0)
-	case AFNEGCC:
-		return OPVCC(63, 40, 0, 1)
-	case AFNMADD:
-		return OPVCC(63, 31, 0, 0)
-	case AFNMADDCC:
-		return OPVCC(63, 31, 0, 1)
-	case AFNMADDS:
-		return OPVCC(59, 31, 0, 0)
-	case AFNMADDSCC:
-		return OPVCC(59, 31, 0, 1)
-	case AFNMSUB:
-		return OPVCC(63, 30, 0, 0)
-	case AFNMSUBCC:
-		return OPVCC(63, 30, 0, 1)
-	case AFNMSUBS:
-		return OPVCC(59, 30, 0, 0)
-	case AFNMSUBSCC:
-		return OPVCC(59, 30, 0, 1)
-	case AFRES:
-		return OPVCC(59, 24, 0, 0)
-	case AFRESCC:
-		return OPVCC(59, 24, 0, 1)
-	case AFRIM:
-		return OPVCC(63, 488, 0, 0)
-	case AFRIMCC:
-		return OPVCC(63, 488, 0, 1)
-	case AFRIP:
-		return OPVCC(63, 456, 0, 0)
-	case AFRIPCC:
-		return OPVCC(63, 456, 0, 1)
-	case AFRIZ:
-		return OPVCC(63, 424, 0, 0)
-	case AFRIZCC:
-		return OPVCC(63, 424, 0, 1)
-	case AFRSP:
-		return OPVCC(63, 12, 0, 0)
-	case AFRSPCC:
-		return OPVCC(63, 12, 0, 1)
-	case AFRSQRTE:
-		return OPVCC(63, 26, 0, 0)
-	case AFRSQRTECC:
-		return OPVCC(63, 26, 0, 1)
-	case AFSEL:
-		return OPVCC(63, 23, 0, 0)
-	case AFSELCC:
-		return OPVCC(63, 23, 0, 1)
-	case AFSQRT:
-		return OPVCC(63, 22, 0, 0)
-	case AFSQRTCC:
-		return OPVCC(63, 22, 0, 1)
-	case AFSQRTS:
-		return OPVCC(59, 22, 0, 0)
-	case AFSQRTSCC:
-		return OPVCC(59, 22, 0, 1)
-	case AFSUB:
-		return OPVCC(63, 20, 0, 0)
-	case AFSUBCC:
-		return OPVCC(63, 20, 0, 1)
-	case AFSUBS:
-		return OPVCC(59, 20, 0, 0)
-	case AFSUBSCC:
-		return OPVCC(59, 20, 0, 1)
-
-	case AICBI:
-		return OPVCC(31, 982, 0, 0)
-	case AISYNC:
-		return OPVCC(19, 150, 0, 0)
-
-	case AMTFSB0:
-		return OPVCC(63, 70, 0, 0)
-	case AMTFSB0CC:
-		return OPVCC(63, 70, 0, 1)
-	case AMTFSB1:
-		return OPVCC(63, 38, 0, 0)
-	case AMTFSB1CC:
-		return OPVCC(63, 38, 0, 1)
-
-	case AMULHW:
-		return OPVCC(31, 75, 0, 0)
-	case AMULHWCC:
-		return OPVCC(31, 75, 0, 1)
-	case AMULHWU:
-		return OPVCC(31, 11, 0, 0)
-	case AMULHWUCC:
-		return OPVCC(31, 11, 0, 1)
-	case AMULLW:
-		return OPVCC(31, 235, 0, 0)
-	case AMULLWCC:
-		return OPVCC(31, 235, 0, 1)
-	case AMULLWV:
-		return OPVCC(31, 235, 1, 0)
-	case AMULLWVCC:
-		return OPVCC(31, 235, 1, 1)
-
-	case AMULHD:
-		return OPVCC(31, 73, 0, 0)
-	case AMULHDCC:
-		return OPVCC(31, 73, 0, 1)
-	case AMULHDU:
-		return OPVCC(31, 9, 0, 0)
-	case AMULHDUCC:
-		return OPVCC(31, 9, 0, 1)
-	case AMULLD:
-		return OPVCC(31, 233, 0, 0)
-	case AMULLDCC:
-		return OPVCC(31, 233, 0, 1)
-	case AMULLDV:
-		return OPVCC(31, 233, 1, 0)
-	case AMULLDVCC:
-		return OPVCC(31, 233, 1, 1)
-
-	case ANAND:
-		return OPVCC(31, 476, 0, 0)
-	case ANANDCC:
-		return OPVCC(31, 476, 0, 1)
-	case ANEG:
-		return OPVCC(31, 104, 0, 0)
-	case ANEGCC:
-		return OPVCC(31, 104, 0, 1)
-	case ANEGV:
-		return OPVCC(31, 104, 1, 0)
-	case ANEGVCC:
-		return OPVCC(31, 104, 1, 1)
-	case ANOR:
-		return OPVCC(31, 124, 0, 0)
-	case ANORCC:
-		return OPVCC(31, 124, 0, 1)
-	case AOR:
-		return OPVCC(31, 444, 0, 0)
-	case AORCC:
-		return OPVCC(31, 444, 0, 1)
-	case AORN:
-		return OPVCC(31, 412, 0, 0)
-	case AORNCC:
-		return OPVCC(31, 412, 0, 1)
-
-	case ARFI:
-		return OPVCC(19, 50, 0, 0)
-	case ARFCI:
-		return OPVCC(19, 51, 0, 0)
-	case ARFID:
-		return OPVCC(19, 18, 0, 0)
-	case AHRFID:
-		return OPVCC(19, 274, 0, 0)
-
-	case ARLWMI:
-		return OPVCC(20, 0, 0, 0)
-	case ARLWMICC:
-		return OPVCC(20, 0, 0, 1)
-	case ARLWNM:
-		return OPVCC(23, 0, 0, 0)
-	case ARLWNMCC:
-		return OPVCC(23, 0, 0, 1)
-
-	case ARLDCL:
-		return OPVCC(30, 8, 0, 0)
-	case ARLDCR:
-		return OPVCC(30, 9, 0, 0)
-
-	case ARLDICL:
-		return OPVCC(30, 0, 0, 0)
-	case ARLDICLCC:
-		return OPVCC(30, 0, 0, 1)
-	case ARLDICR:
-		return OPVCC(30, 0, 0, 0) | 2<<1 // rldicr
-	case ARLDICRCC:
-		return OPVCC(30, 0, 0, 1) | 2<<1 // rldicr.
-
-	case ASYSCALL:
-		return OPVCC(17, 1, 0, 0)
-
-	case ASLW:
-		return OPVCC(31, 24, 0, 0)
-	case ASLWCC:
-		return OPVCC(31, 24, 0, 1)
-	case ASLD:
-		return OPVCC(31, 27, 0, 0)
-	case ASLDCC:
-		return OPVCC(31, 27, 0, 1)
-
-	case ASRAW:
-		return OPVCC(31, 792, 0, 0)
-	case ASRAWCC:
-		return OPVCC(31, 792, 0, 1)
-	case ASRAD:
-		return OPVCC(31, 794, 0, 0)
-	case ASRADCC:
-		return OPVCC(31, 794, 0, 1)
-
-	case ASRW:
-		return OPVCC(31, 536, 0, 0)
-	case ASRWCC:
-		return OPVCC(31, 536, 0, 1)
-	case ASRD:
-		return OPVCC(31, 539, 0, 0)
-	case ASRDCC:
-		return OPVCC(31, 539, 0, 1)
-
-	case ASUB:
-		return OPVCC(31, 40, 0, 0)
-	case ASUBCC:
-		return OPVCC(31, 40, 0, 1)
-	case ASUBV:
-		return OPVCC(31, 40, 1, 0)
-	case ASUBVCC:
-		return OPVCC(31, 40, 1, 1)
-	case ASUBC:
-		return OPVCC(31, 8, 0, 0)
-	case ASUBCCC:
-		return OPVCC(31, 8, 0, 1)
-	case ASUBCV:
-		return OPVCC(31, 8, 1, 0)
-	case ASUBCVCC:
-		return OPVCC(31, 8, 1, 1)
-	case ASUBE:
-		return OPVCC(31, 136, 0, 0)
-	case ASUBECC:
-		return OPVCC(31, 136, 0, 1)
-	case ASUBEV:
-		return OPVCC(31, 136, 1, 0)
-	case ASUBEVCC:
-		return OPVCC(31, 136, 1, 1)
-	case ASUBME:
-		return OPVCC(31, 232, 0, 0)
-	case ASUBMECC:
-		return OPVCC(31, 232, 0, 1)
-	case ASUBMEV:
-		return OPVCC(31, 232, 1, 0)
-	case ASUBMEVCC:
-		return OPVCC(31, 232, 1, 1)
-	case ASUBZE:
-		return OPVCC(31, 200, 0, 0)
-	case ASUBZECC:
-		return OPVCC(31, 200, 0, 1)
-	case ASUBZEV:
-		return OPVCC(31, 200, 1, 0)
-	case ASUBZEVCC:
-		return OPVCC(31, 200, 1, 1)
-
-	case ASYNC:
-		return OPVCC(31, 598, 0, 0)
-	case ALWSYNC:
-		return OPVCC(31, 598, 0, 0) | 1<<21
-
-	case APTESYNC:
-		return OPVCC(31, 598, 0, 0) | 2<<21
-
-	case ATLBIE:
-		return OPVCC(31, 306, 0, 0)
-	case ATLBIEL:
-		return OPVCC(31, 274, 0, 0)
-	case ATLBSYNC:
-		return OPVCC(31, 566, 0, 0)
-	case ASLBIA:
-		return OPVCC(31, 498, 0, 0)
-	case ASLBIE:
-		return OPVCC(31, 434, 0, 0)
-	case ASLBMFEE:
-		return OPVCC(31, 915, 0, 0)
-	case ASLBMFEV:
-		return OPVCC(31, 851, 0, 0)
-	case ASLBMTE:
-		return OPVCC(31, 402, 0, 0)
-
-	case ATW:
-		return OPVCC(31, 4, 0, 0)
-	case ATD:
-		return OPVCC(31, 68, 0, 0)
-
-	/* Vector (VMX/Altivec) instructions */
-	/* ISA 2.03 enables these for PPC970. For POWERx processors, these */
-	/* are enabled starting at POWER6 (ISA 2.05). */
-	case AVANDL:
-		return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
-	case AVANDC:
-		return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
-	case AVNAND:
-		return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
-
-	case AVORL:
-		return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
-	case AVORC:
-		return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
-	case AVNOR:
-		return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
-	case AVXOR:
-		return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
-	case AVEQV:
-		return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
-
-	case AVADDUBM:
-		return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
-	case AVADDUHM:
-		return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
-	case AVADDUWM:
-		return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
-	case AVADDUDM:
-		return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
-	case AVADDUQM:
-		return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
-
-	case AVADDCUQ:
-		return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
-	case AVADDCUW:
-		return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
-
-	case AVADDUBS:
-		return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
-	case AVADDUHS:
-		return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
-	case AVADDUWS:
-		return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
-
-	case AVADDSBS:
-		return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
-	case AVADDSHS:
-		return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
-	case AVADDSWS:
-		return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
-
-	case AVADDEUQM:
-		return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
-	case AVADDECUQ:
-		return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
-
-	case AVSUBUBM:
-		return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
-	case AVSUBUHM:
-		return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
-	case AVSUBUWM:
-		return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
-	case AVSUBUDM:
-		return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
-	case AVSUBUQM:
-		return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
-
-	case AVSUBCUQ:
-		return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
-	case AVSUBCUW:
-		return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
-
-	case AVSUBUBS:
-		return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
-	case AVSUBUHS:
-		return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
-	case AVSUBUWS:
-		return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
-
-	case AVSUBSBS:
-		return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
-	case AVSUBSHS:
-		return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
-	case AVSUBSWS:
-		return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
-
-	case AVSUBEUQM:
-		return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
-	case AVSUBECUQ:
-		return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
-
-	case AVRLB:
-		return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
-	case AVRLH:
-		return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
-	case AVRLW:
-		return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
-	case AVRLD:
-		return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
-
-	case AVSLB:
-		return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
-	case AVSLH:
-		return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
-	case AVSLW:
-		return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
-	case AVSL:
-		return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
-	case AVSLO:
-		return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
-	case AVSRB:
-		return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
-	case AVSRH:
-		return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
-	case AVSRW:
-		return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
-	case AVSR:
-		return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
-	case AVSRO:
-		return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
-	case AVSLD:
-		return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
-	case AVSRD:
-		return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
-
-	case AVSRAB:
-		return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
-	case AVSRAH:
-		return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
-	case AVSRAW:
-		return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
-	case AVSRAD:
-		return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
-
-	case AVCLZB:
-		return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
-	case AVCLZH:
-		return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
-	case AVCLZW:
-		return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
-	case AVCLZD:
-		return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
-
-	case AVPOPCNTB:
-		return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
-	case AVPOPCNTH:
-		return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
-	case AVPOPCNTW:
-		return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
-	case AVPOPCNTD:
-		return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
-
-	case AVCMPEQUB:
-		return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
-	case AVCMPEQUBCC:
-		return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
-	case AVCMPEQUH:
-		return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
-	case AVCMPEQUHCC:
-		return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
-	case AVCMPEQUW:
-		return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
-	case AVCMPEQUWCC:
-		return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
-	case AVCMPEQUD:
-		return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
-	case AVCMPEQUDCC:
-		return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
-
-	case AVCMPGTUB:
-		return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
-	case AVCMPGTUBCC:
-		return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
-	case AVCMPGTUH:
-		return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
-	case AVCMPGTUHCC:
-		return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
-	case AVCMPGTUW:
-		return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
-	case AVCMPGTUWCC:
-		return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
-	case AVCMPGTUD:
-		return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
-	case AVCMPGTUDCC:
-		return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
-	case AVCMPGTSB:
-		return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
-	case AVCMPGTSBCC:
-		return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
-	case AVCMPGTSH:
-		return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
-	case AVCMPGTSHCC:
-		return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
-	case AVCMPGTSW:
-		return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
-	case AVCMPGTSWCC:
-		return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
-	case AVCMPGTSD:
-		return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
-	case AVCMPGTSDCC:
-		return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
-
-	case AVPERM:
-		return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
-
-	case AVSEL:
-		return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
-
-	case AVCIPHER:
-		return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
-	case AVCIPHERLAST:
-		return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
-	case AVNCIPHER:
-		return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
-	case AVNCIPHERLAST:
-		return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
-	case AVSBOX:
-		return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
-	/* End of vector instructions */
-
-	/* Vector scalar (VSX) instructions */
-	/* ISA 2.06 enables these for POWER7. */
-	case AMFVSRD:
-		return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
-	case AMFVSRWZ:
-		return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
-
-	case AMTVSRD:
-		return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
-	case AMTVSRWA:
-		return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
-	case AMTVSRWZ:
-		return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
-
-	case AXXLANDQ:
-		return OPVXX3(60, 130, 0) /* xxland - v2.06 */
-	case AXXLANDC:
-		return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
-	case AXXLEQV:
-		return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
-	case AXXLNAND:
-		return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
-
-	case AXXLORC:
-		return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
-	case AXXLNOR:
-		return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
-	case AXXLORQ:
-		return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
-	case AXXLXOR:
-		return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
-
-	case AXXSEL:
-		return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
-
-	case AXXMRGHW:
-		return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
-	case AXXMRGLW:
-		return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
-
-	case AXXSPLTW:
-		return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
-
-	case AXXPERMDI:
-		return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
-
-	case AXXSLDWI:
-		return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
-
-	case AXSCVDPSP:
-		return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
-	case AXSCVSPDP:
-		return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
-	case AXSCVDPSPN:
-		return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
-	case AXSCVSPDPN:
-		return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
-
-	case AXVCVDPSP:
-		return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
-	case AXVCVSPDP:
-		return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
-
-	case AXSCVDPSXDS:
-		return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
-	case AXSCVDPSXWS:
-		return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
-	case AXSCVDPUXDS:
-		return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
-	case AXSCVDPUXWS:
-		return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
-
-	case AXSCVSXDDP:
-		return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
-	case AXSCVUXDDP:
-		return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
-	case AXSCVSXDSP:
-		return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
-	case AXSCVUXDSP:
-		return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
-
-	case AXVCVDPSXDS:
-		return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
-	case AXVCVDPSXWS:
-		return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
-	case AXVCVDPUXDS:
-		return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
-	case AXVCVDPUXWS:
-		return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
-	case AXVCVSPSXDS:
-		return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
-	case AXVCVSPSXWS:
-		return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
-	case AXVCVSPUXDS:
-		return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
-	case AXVCVSPUXWS:
-		return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
-
-	case AXVCVSXDDP:
-		return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
-	case AXVCVSXWDP:
-		return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
-	case AXVCVUXDDP:
-		return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
-	case AXVCVUXWDP:
-		return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
-	case AXVCVSXDSP:
-		return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
-	case AXVCVSXWSP:
-		return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
-	case AXVCVUXDSP:
-		return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
-	case AXVCVUXWSP:
-		return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
-	/* End of VSX instructions */
-
-	case AXOR:
-		return OPVCC(31, 316, 0, 0)
-	case AXORCC:
-		return OPVCC(31, 316, 0, 1)
-	}
-
-	ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
-	return 0
-}
-
-func opirrr(ctxt *obj.Link, a obj.As) uint32 {
-	switch a {
-	/* Vector (VMX/Altivec) instructions */
-	/* ISA 2.03 enables these for PPC970. For POWERx processors, these */
-	/* are enabled starting at POWER6 (ISA 2.05). */
-	case AVSLDOI:
-		return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
-	}
-
-	ctxt.Diag("bad i/r/r/r opcode %v", a)
-	return 0
-}
-
-func opiirr(ctxt *obj.Link, a obj.As) uint32 {
-	switch a {
-	/* Vector (VMX/Altivec) instructions */
-	/* ISA 2.07 enables these for POWER8 and beyond. */
-	case AVSHASIGMAW:
-		return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
-	case AVSHASIGMAD:
-		return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
-	}
-
-	ctxt.Diag("bad i/i/r/r opcode %v", a)
-	return 0
-}
-
-func opirr(ctxt *obj.Link, a obj.As) uint32 {
-	switch a {
-	case AADD:
-		return OPVCC(14, 0, 0, 0)
-	case AADDC:
-		return OPVCC(12, 0, 0, 0)
-	case AADDCCC:
-		return OPVCC(13, 0, 0, 0)
-	case -AADD:
-		return OPVCC(15, 0, 0, 0) /* ADDIS/CAU */
-
-	case AANDCC:
-		return OPVCC(28, 0, 0, 0)
-	case -AANDCC:
-		return OPVCC(29, 0, 0, 0) /* ANDIS./ANDIU. */
-
-	case ABR:
-		return OPVCC(18, 0, 0, 0)
-	case ABL:
-		return OPVCC(18, 0, 0, 0) | 1
-	case obj.ADUFFZERO:
-		return OPVCC(18, 0, 0, 0) | 1
-	case obj.ADUFFCOPY:
-		return OPVCC(18, 0, 0, 0) | 1
-	case ABC:
-		return OPVCC(16, 0, 0, 0)
-	case ABCL:
-		return OPVCC(16, 0, 0, 0) | 1
-
-	case ABEQ:
-		return AOP_RRR(16<<26, 12, 2, 0)
-	case ABGE:
-		return AOP_RRR(16<<26, 4, 0, 0)
-	case ABGT:
-		return AOP_RRR(16<<26, 12, 1, 0)
-	case ABLE:
-		return AOP_RRR(16<<26, 4, 1, 0)
-	case ABLT:
-		return AOP_RRR(16<<26, 12, 0, 0)
-	case ABNE:
-		return AOP_RRR(16<<26, 4, 2, 0)
-	case ABVC:
-		return AOP_RRR(16<<26, 4, 3, 0) // apparently unordered-clear
-	case ABVS:
-		return AOP_RRR(16<<26, 12, 3, 0) // apparently unordered-set
-
-	case ACMP:
-		return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
-	case ACMPU:
-		return OPVCC(10, 0, 0, 0) | 1<<21
-	case ACMPW:
-		return OPVCC(11, 0, 0, 0) /* L=0 */
-	case ACMPWU:
-		return OPVCC(10, 0, 0, 0)
-	case ALSW:
-		return OPVCC(31, 597, 0, 0)
-
-	case AMULLW:
-		return OPVCC(7, 0, 0, 0)
-
-	case AOR:
-		return OPVCC(24, 0, 0, 0)
-	case -AOR:
-		return OPVCC(25, 0, 0, 0) /* ORIS/ORIU */
-
-	case ARLWMI:
-		return OPVCC(20, 0, 0, 0) /* rlwimi */
-	case ARLWMICC:
-		return OPVCC(20, 0, 0, 1)
-	case ARLDMI:
-		return OPVCC(30, 0, 0, 0) | 3<<2 /* rldimi */
-	case ARLDMICC:
-		return OPVCC(30, 0, 0, 1) | 3<<2
-	case ARLDIMI:
-		return OPVCC(30, 0, 0, 0) | 3<<2 /* rldimi */
-	case ARLDIMICC:
-		return OPVCC(30, 0, 0, 1) | 3<<2
-	case ARLWNM:
-		return OPVCC(21, 0, 0, 0) /* rlwinm */
-	case ARLWNMCC:
-		return OPVCC(21, 0, 0, 1)
-
-	case ARLDCL:
-		return OPVCC(30, 0, 0, 0) /* rldicl */
-	case ARLDCLCC:
-		return OPVCC(30, 0, 0, 1)
-	case ARLDCR:
-		return OPVCC(30, 1, 0, 0) /* rldicr */
-	case ARLDCRCC:
-		return OPVCC(30, 1, 0, 1)
-	case ARLDC:
-		return OPVCC(30, 0, 0, 0) | 2<<2
-	case ARLDCCC:
-		return OPVCC(30, 0, 0, 1) | 2<<2
-
-	case ASRAW:
-		return OPVCC(31, 824, 0, 0)
-	case ASRAWCC:
-		return OPVCC(31, 824, 0, 1)
-	case ASRAD:
-		return OPVCC(31, (413 << 1), 0, 0)
-	case ASRADCC:
-		return OPVCC(31, (413 << 1), 0, 1)
-
-	case ASTSW:
-		return OPVCC(31, 725, 0, 0)
-
-	case ASUBC:
-		return OPVCC(8, 0, 0, 0)
-
-	case ATW:
-		return OPVCC(3, 0, 0, 0)
-	case ATD:
-		return OPVCC(2, 0, 0, 0)
-
-	/* Vector (VMX/Altivec) instructions */
-	/* ISA 2.03 enables these for PPC970. For POWERx processors, these */
-	/* are enabled starting at POWER6 (ISA 2.05). */
-	case AVSPLTB:
-		return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
-	case AVSPLTH:
-		return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
-	case AVSPLTW:
-		return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
-
-	case AVSPLTISB:
-		return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
-	case AVSPLTISH:
-		return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
-	case AVSPLTISW:
-		return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
-	/* End of vector instructions */
-
-	case AXOR:
-		return OPVCC(26, 0, 0, 0) /* XORIL */
-	case -AXOR:
-		return OPVCC(27, 0, 0, 0) /* XORIU */
-	}
-
-	ctxt.Diag("bad opcode i/r or i/r/r %v", a)
-	return 0
-}
-
-/*
- * load o(a),d
- */
-func opload(ctxt *obj.Link, a obj.As) uint32 {
-	switch a {
-	case AMOVD:
-		return OPVCC(58, 0, 0, 0) /* ld */
-	case AMOVDU:
-		return OPVCC(58, 0, 0, 1) /* ldu */
-	case AMOVWZ:
-		return OPVCC(32, 0, 0, 0) /* lwz */
-	case AMOVWZU:
-		return OPVCC(33, 0, 0, 0) /* lwzu */
-	case AMOVW:
-		return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
-
-		/* no AMOVWU */
-	case AMOVB, AMOVBZ:
-		return OPVCC(34, 0, 0, 0)
-		/* load */
-
-	case AMOVBU, AMOVBZU:
-		return OPVCC(35, 0, 0, 0)
-	case AFMOVD:
-		return OPVCC(50, 0, 0, 0)
-	case AFMOVDU:
-		return OPVCC(51, 0, 0, 0)
-	case AFMOVS:
-		return OPVCC(48, 0, 0, 0)
-	case AFMOVSU:
-		return OPVCC(49, 0, 0, 0)
-	case AMOVH:
-		return OPVCC(42, 0, 0, 0)
-	case AMOVHU:
-		return OPVCC(43, 0, 0, 0)
-	case AMOVHZ:
-		return OPVCC(40, 0, 0, 0)
-	case AMOVHZU:
-		return OPVCC(41, 0, 0, 0)
-	case AMOVMW:
-		return OPVCC(46, 0, 0, 0) /* lmw */
-	}
-
-	ctxt.Diag("bad load opcode %v", a)
-	return 0
-}
-
-/*
- * indexed load a(b),d
- */
-func oploadx(ctxt *obj.Link, a obj.As) uint32 {
-	switch a {
-	case AMOVWZ:
-		return OPVCC(31, 23, 0, 0) /* lwzx */
-	case AMOVWZU:
-		return OPVCC(31, 55, 0, 0) /* lwzux */
-	case AMOVW:
-		return OPVCC(31, 341, 0, 0) /* lwax */
-	case AMOVWU:
-		return OPVCC(31, 373, 0, 0) /* lwaux */
-
-	case AMOVB, AMOVBZ:
-		return OPVCC(31, 87, 0, 0) /* lbzx */
-
-	case AMOVBU, AMOVBZU:
-		return OPVCC(31, 119, 0, 0) /* lbzux */
-	case AFMOVD:
-		return OPVCC(31, 599, 0, 0) /* lfdx */
-	case AFMOVDU:
-		return OPVCC(31, 631, 0, 0) /*  lfdux */
-	case AFMOVS:
-		return OPVCC(31, 535, 0, 0) /* lfsx */
-	case AFMOVSU:
-		return OPVCC(31, 567, 0, 0) /* lfsux */
-	case AFMOVSX:
-		return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
-	case AFMOVSZ:
-		return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
-	case AMOVH:
-		return OPVCC(31, 343, 0, 0) /* lhax */
-	case AMOVHU:
-		return OPVCC(31, 375, 0, 0) /* lhaux */
-	case AMOVHBR:
-		return OPVCC(31, 790, 0, 0) /* lhbrx */
-	case AMOVWBR:
-		return OPVCC(31, 534, 0, 0) /* lwbrx */
-	case AMOVDBR:
-		return OPVCC(31, 532, 0, 0) /* ldbrx */
-	case AMOVHZ:
-		return OPVCC(31, 279, 0, 0) /* lhzx */
-	case AMOVHZU:
-		return OPVCC(31, 311, 0, 0) /* lhzux */
-	case AECIWX:
-		return OPVCC(31, 310, 0, 0) /* eciwx */
-	case ALBAR:
-		return OPVCC(31, 52, 0, 0) /* lbarx */
-	case ALWAR:
-		return OPVCC(31, 20, 0, 0) /* lwarx */
-	case ALDAR:
-		return OPVCC(31, 84, 0, 0)
-	case ALSW:
-		return OPVCC(31, 533, 0, 0) /* lswx */
-	case AMOVD:
-		return OPVCC(31, 21, 0, 0) /* ldx */
-	case AMOVDU:
-		return OPVCC(31, 53, 0, 0) /* ldux */
-
-	/* Vector (VMX/Altivec) instructions */
-	/* ISA 2.03 enables these for PPC970. For POWERx processors, these */
-	/* are enabled starting at POWER6 (ISA 2.05). */
-	case ALVEBX:
-		return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
-	case ALVEHX:
-		return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
-	case ALVEWX:
-		return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
-	case ALVX:
-		return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
-	case ALVXL:
-		return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
-	case ALVSL:
-		return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
-	case ALVSR:
-		return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
-		/* End of vector instructions */
-
-	/* Vector scalar (VSX) instructions */
-	/* ISA 2.06 enables these for POWER7. */
-	case ALXVD2X:
-		return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
-	case ALXVDSX:
-		return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
-	case ALXVW4X:
-		return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
-
-	case ALXSDX:
-		return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
-
-	case ALXSIWAX:
-		return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
-	case ALXSIWZX:
-		return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
-		/* End of vector scalar instructions */
-
-	}
-
-	ctxt.Diag("bad loadx opcode %v", a)
-	return 0
-}
-
-/*
- * store s,o(d)
- */
-func opstore(ctxt *obj.Link, a obj.As) uint32 {
-	switch a {
-	case AMOVB, AMOVBZ:
-		return OPVCC(38, 0, 0, 0) /* stb */
-
-	case AMOVBU, AMOVBZU:
-		return OPVCC(39, 0, 0, 0) /* stbu */
-	case AFMOVD:
-		return OPVCC(54, 0, 0, 0) /* stfd */
-	case AFMOVDU:
-		return OPVCC(55, 0, 0, 0) /* stfdu */
-	case AFMOVS:
-		return OPVCC(52, 0, 0, 0) /* stfs */
-	case AFMOVSU:
-		return OPVCC(53, 0, 0, 0) /* stfsu */
-
-	case AMOVHZ, AMOVH:
-		return OPVCC(44, 0, 0, 0) /* sth */
-
-	case AMOVHZU, AMOVHU:
-		return OPVCC(45, 0, 0, 0) /* sthu */
-	case AMOVMW:
-		return OPVCC(47, 0, 0, 0) /* stmw */
-	case ASTSW:
-		return OPVCC(31, 725, 0, 0) /* stswi */
-
-	case AMOVWZ, AMOVW:
-		return OPVCC(36, 0, 0, 0) /* stw */
-
-	case AMOVWZU, AMOVWU:
-		return OPVCC(37, 0, 0, 0) /* stwu */
-	case AMOVD:
-		return OPVCC(62, 0, 0, 0) /* std */
-	case AMOVDU:
-		return OPVCC(62, 0, 0, 1) /* stdu */
-	}
-
-	ctxt.Diag("unknown store opcode %v", a)
-	return 0
-}
-
-/*
- * indexed store s,a(b)
- */
-func opstorex(ctxt *obj.Link, a obj.As) uint32 {
-	switch a {
-	case AMOVB, AMOVBZ:
-		return OPVCC(31, 215, 0, 0) /* stbx */
-
-	case AMOVBU, AMOVBZU:
-		return OPVCC(31, 247, 0, 0) /* stbux */
-	case AFMOVD:
-		return OPVCC(31, 727, 0, 0) /* stfdx */
-	case AFMOVDU:
-		return OPVCC(31, 759, 0, 0) /* stfdux */
-	case AFMOVS:
-		return OPVCC(31, 663, 0, 0) /* stfsx */
-	case AFMOVSU:
-		return OPVCC(31, 695, 0, 0) /* stfsux */
-	case AFMOVSX:
-		return OPVCC(31, 983, 0, 0) /* stfiwx */
-
-	case AMOVHZ, AMOVH:
-		return OPVCC(31, 407, 0, 0) /* sthx */
-	case AMOVHBR:
-		return OPVCC(31, 918, 0, 0) /* sthbrx */
-
-	case AMOVHZU, AMOVHU:
-		return OPVCC(31, 439, 0, 0) /* sthux */
-
-	case AMOVWZ, AMOVW:
-		return OPVCC(31, 151, 0, 0) /* stwx */
-
-	case AMOVWZU, AMOVWU:
-		return OPVCC(31, 183, 0, 0) /* stwux */
-	case ASTSW:
-		return OPVCC(31, 661, 0, 0) /* stswx */
-	case AMOVWBR:
-		return OPVCC(31, 662, 0, 0) /* stwbrx */
-	case ASTBCCC:
-		return OPVCC(31, 694, 0, 1) /* stbcx. */
-	case ASTWCCC:
-		return OPVCC(31, 150, 0, 1) /* stwcx. */
-	case ASTDCCC:
-		return OPVCC(31, 214, 0, 1) /* stwdx. */
-	case AECOWX:
-		return OPVCC(31, 438, 0, 0) /* ecowx */
-	case AMOVD:
-		return OPVCC(31, 149, 0, 0) /* stdx */
-	case AMOVDU:
-		return OPVCC(31, 181, 0, 0) /* stdux */
-
-	/* Vector (VMX/Altivec) instructions */
-	/* ISA 2.03 enables these for PPC970. For POWERx processors, these */
-	/* are enabled starting at POWER6 (ISA 2.05). */
-	case ASTVEBX:
-		return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
-	case ASTVEHX:
-		return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
-	case ASTVEWX:
-		return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
-	case ASTVX:
-		return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
-	case ASTVXL:
-		return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
-		/* End of vector instructions */
-
-	/* Vector scalar (VSX) instructions */
-	/* ISA 2.06 enables these for POWER7. */
-	case ASTXVD2X:
-		return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
-	case ASTXVW4X:
-		return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
-
-	case ASTXSDX:
-		return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
-
-	case ASTXSIWX:
-		return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
-		/* End of vector scalar instructions */
-
-	}
-
-	ctxt.Diag("unknown storex opcode %v", a)
-	return 0
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/ppc64/list9.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/ppc64/list9.go
deleted file mode 100644
index 6dc30b8..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/ppc64/list9.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/ppc64/list9.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/ppc64/list9.go:1
-// cmd/9l/list.c from Vita Nuova.
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package ppc64
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"fmt"
-)
-
-func init() {
-	obj.RegisterRegister(obj.RBasePPC64, REG_DCR0+1024, Rconv)
-	obj.RegisterOpcode(obj.ABasePPC64, Anames)
-}
-
-func Rconv(r int) string {
-	if r == 0 {
-		return "NONE"
-	}
-	if r == REGG {
-		// Special case.
-		return "g"
-	}
-	if REG_R0 <= r && r <= REG_R31 {
-		return fmt.Sprintf("R%d", r-REG_R0)
-	}
-	if REG_F0 <= r && r <= REG_F31 {
-		return fmt.Sprintf("F%d", r-REG_F0)
-	}
-	if REG_V0 <= r && r <= REG_V31 {
-		return fmt.Sprintf("V%d", r-REG_V0)
-	}
-	if REG_VS0 <= r && r <= REG_VS63 {
-		return fmt.Sprintf("VS%d", r-REG_VS0)
-	}
-	if REG_CR0 <= r && r <= REG_CR7 {
-		return fmt.Sprintf("CR%d", r-REG_CR0)
-	}
-	if r == REG_CR {
-		return "CR"
-	}
-	if REG_SPR0 <= r && r <= REG_SPR0+1023 {
-		switch r {
-		case REG_XER:
-			return "XER"
-
-		case REG_LR:
-			return "LR"
-
-		case REG_CTR:
-			return "CTR"
-		}
-
-		return fmt.Sprintf("SPR(%d)", r-REG_SPR0)
-	}
-
-	if REG_DCR0 <= r && r <= REG_DCR0+1023 {
-		return fmt.Sprintf("DCR(%d)", r-REG_DCR0)
-	}
-	if r == REG_FPSCR {
-		return "FPSCR"
-	}
-	if r == REG_MSR {
-		return "MSR"
-	}
-
-	return fmt.Sprintf("Rgok(%d)", r-obj.RBasePPC64)
-}
-
-func DRconv(a int) string {
-	s := "C_??"
-	if a >= C_NONE && a <= C_NCLASS {
-		s = cnames9[a]
-	}
-	var fp string
-	fp += s
-	return fp
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/ppc64/obj9.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/ppc64/obj9.go
deleted file mode 100644
index a0d371b..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/ppc64/obj9.go
+++ /dev/null
@@ -1,1253 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/ppc64/obj9.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/ppc64/obj9.go:1
-// cmd/9l/noop.c, cmd/9l/pass.c, cmd/9l/span.c from Vita Nuova.
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package ppc64
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"fmt"
-	"math"
-)
-
-func progedit(ctxt *obj.Link, p *obj.Prog) {
-	p.From.Class = 0
-	p.To.Class = 0
-
-	// Rewrite BR/BL to symbol as TYPE_BRANCH.
-	switch p.As {
-	case ABR,
-		ABL,
-		obj.ARET,
-		obj.ADUFFZERO,
-		obj.ADUFFCOPY:
-		if p.To.Sym != nil {
-			p.To.Type = obj.TYPE_BRANCH
-		}
-	}
-
-	// Rewrite float constants to values stored in memory.
-	switch p.As {
-	case AFMOVS:
-		if p.From.Type == obj.TYPE_FCONST {
-			f32 := float32(p.From.Val.(float64))
-			i32 := math.Float32bits(f32)
-			literal := fmt.Sprintf("$f32.%08x", i32)
-			s := obj.Linklookup(ctxt, literal, 0)
-			s.Size = 4
-			p.From.Type = obj.TYPE_MEM
-			p.From.Sym = s
-			p.From.Sym.Set(obj.AttrLocal, true)
-			p.From.Name = obj.NAME_EXTERN
-			p.From.Offset = 0
-		}
-
-	case AFMOVD:
-		if p.From.Type == obj.TYPE_FCONST {
-			i64 := math.Float64bits(p.From.Val.(float64))
-			literal := fmt.Sprintf("$f64.%016x", i64)
-			s := obj.Linklookup(ctxt, literal, 0)
-			s.Size = 8
-			p.From.Type = obj.TYPE_MEM
-			p.From.Sym = s
-			p.From.Sym.Set(obj.AttrLocal, true)
-			p.From.Name = obj.NAME_EXTERN
-			p.From.Offset = 0
-		}
-
-		// Put >32-bit constants in memory and load them
-	case AMOVD:
-		if p.From.Type == obj.TYPE_CONST && p.From.Name == obj.NAME_NONE && p.From.Reg == 0 && int64(int32(p.From.Offset)) != p.From.Offset {
-			literal := fmt.Sprintf("$i64.%016x", uint64(p.From.Offset))
-			s := obj.Linklookup(ctxt, literal, 0)
-			s.Size = 8
-			p.From.Type = obj.TYPE_MEM
-			p.From.Sym = s
-			p.From.Sym.Set(obj.AttrLocal, true)
-			p.From.Name = obj.NAME_EXTERN
-			p.From.Offset = 0
-		}
-	}
-
-	// Rewrite SUB constants into ADD.
-	switch p.As {
-	case ASUBC:
-		if p.From.Type == obj.TYPE_CONST {
-			p.From.Offset = -p.From.Offset
-			p.As = AADDC
-		}
-
-	case ASUBCCC:
-		if p.From.Type == obj.TYPE_CONST {
-			p.From.Offset = -p.From.Offset
-			p.As = AADDCCC
-		}
-
-	case ASUB:
-		if p.From.Type == obj.TYPE_CONST {
-			p.From.Offset = -p.From.Offset
-			p.As = AADD
-		}
-	}
-	if ctxt.Flag_dynlink {
-		rewriteToUseGot(ctxt, p)
-	}
-}
-
-// Rewrite p, if necessary, to access global data via the global offset table.
-func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog) {
-	if p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO {
-		//     ADUFFxxx $offset
-		// becomes
-		//     MOVD runtime.duffxxx@GOT, R12
-		//     ADD $offset, R12
-		//     MOVD R12, CTR
-		//     BL (CTR)
-		var sym *obj.LSym
-		if p.As == obj.ADUFFZERO {
-			sym = obj.Linklookup(ctxt, "runtime.duffzero", 0)
-		} else {
-			sym = obj.Linklookup(ctxt, "runtime.duffcopy", 0)
-		}
-		offset := p.To.Offset
-		p.As = AMOVD
-		p.From.Type = obj.TYPE_MEM
-		p.From.Name = obj.NAME_GOTREF
-		p.From.Sym = sym
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R12
-		p.To.Name = obj.NAME_NONE
-		p.To.Offset = 0
-		p.To.Sym = nil
-		p1 := obj.Appendp(ctxt, p)
-		p1.As = AADD
-		p1.From.Type = obj.TYPE_CONST
-		p1.From.Offset = offset
-		p1.To.Type = obj.TYPE_REG
-		p1.To.Reg = REG_R12
-		p2 := obj.Appendp(ctxt, p1)
-		p2.As = AMOVD
-		p2.From.Type = obj.TYPE_REG
-		p2.From.Reg = REG_R12
-		p2.To.Type = obj.TYPE_REG
-		p2.To.Reg = REG_CTR
-		p3 := obj.Appendp(ctxt, p2)
-		p3.As = obj.ACALL
-		p3.From.Type = obj.TYPE_REG
-		p3.From.Reg = REG_R12
-		p3.To.Type = obj.TYPE_REG
-		p3.To.Reg = REG_CTR
-	}
-
-	// We only care about global data: NAME_EXTERN means a global
-	// symbol in the Go sense, and p.Sym.Local is true for a few
-	// internally defined symbols.
-	if p.From.Type == obj.TYPE_ADDR && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() {
-		// MOVD $sym, Rx becomes MOVD sym@GOT, Rx
-		// MOVD $sym+<off>, Rx becomes MOVD sym@GOT, Rx; ADD <off>, Rx
-		if p.As != AMOVD {
-			ctxt.Diag("do not know how to handle TYPE_ADDR in %v with -dynlink", p)
-		}
-		if p.To.Type != obj.TYPE_REG {
-			ctxt.Diag("do not know how to handle LEAQ-type insn to non-register in %v with -dynlink", p)
-		}
-		p.From.Type = obj.TYPE_MEM
-		p.From.Name = obj.NAME_GOTREF
-		if p.From.Offset != 0 {
-			q := obj.Appendp(ctxt, p)
-			q.As = AADD
-			q.From.Type = obj.TYPE_CONST
-			q.From.Offset = p.From.Offset
-			q.To = p.To
-			p.From.Offset = 0
-		}
-	}
-	if p.From3 != nil && p.From3.Name == obj.NAME_EXTERN {
-		ctxt.Diag("don't know how to handle %v with -dynlink", p)
-	}
-	var source *obj.Addr
-	// MOVx sym, Ry becomes MOVD sym@GOT, REGTMP; MOVx (REGTMP), Ry
-	// MOVx Ry, sym becomes MOVD sym@GOT, REGTMP; MOVx Ry, (REGTMP)
-	// An addition may be inserted between the two MOVs if there is an offset.
-	if p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() {
-		if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() {
-			ctxt.Diag("cannot handle NAME_EXTERN on both sides in %v with -dynlink", p)
-		}
-		source = &p.From
-	} else if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() {
-		source = &p.To
-	} else {
-		return
-	}
-	if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP {
-		return
-	}
-	if source.Sym.Type == obj.STLSBSS {
-		return
-	}
-	if source.Type != obj.TYPE_MEM {
-		ctxt.Diag("don't know how to handle %v with -dynlink", p)
-	}
-	p1 := obj.Appendp(ctxt, p)
-	p2 := obj.Appendp(ctxt, p1)
-
-	p1.As = AMOVD
-	p1.From.Type = obj.TYPE_MEM
-	p1.From.Sym = source.Sym
-	p1.From.Name = obj.NAME_GOTREF
-	p1.To.Type = obj.TYPE_REG
-	p1.To.Reg = REGTMP
-
-	p2.As = p.As
-	p2.From = p.From
-	p2.To = p.To
-	if p.From.Name == obj.NAME_EXTERN {
-		p2.From.Reg = REGTMP
-		p2.From.Name = obj.NAME_NONE
-		p2.From.Sym = nil
-	} else if p.To.Name == obj.NAME_EXTERN {
-		p2.To.Reg = REGTMP
-		p2.To.Name = obj.NAME_NONE
-		p2.To.Sym = nil
-	} else {
-		return
-	}
-	obj.Nopout(p)
-}
-
-func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
-	// TODO(minux): add morestack short-cuts with small fixed frame-size.
-	ctxt.Cursym = cursym
-
-	if cursym.Text == nil || cursym.Text.Link == nil {
-		return
-	}
-
-	p := cursym.Text
-	textstksiz := p.To.Offset
-	if textstksiz == -8 {
-		// Compatibility hack.
-		p.From3.Offset |= obj.NOFRAME
-		textstksiz = 0
-	}
-	if textstksiz%8 != 0 {
-		ctxt.Diag("frame size %d not a multiple of 8", textstksiz)
-	}
-	if p.From3.Offset&obj.NOFRAME != 0 {
-		if textstksiz != 0 {
-			ctxt.Diag("NOFRAME functions must have a frame size of 0, not %d", textstksiz)
-		}
-	}
-
-	cursym.Args = p.To.Val.(int32)
-	cursym.Locals = int32(textstksiz)
-
-	/*
-	 * find leaf subroutines
-	 * strip NOPs
-	 * expand RET
-	 * expand BECOME pseudo
-	 */
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f noops\n", obj.Cputime())
-	}
-
-	var q *obj.Prog
-	var q1 *obj.Prog
-	for p := cursym.Text; p != nil; p = p.Link {
-		switch p.As {
-		/* too hard, just leave alone */
-		case obj.ATEXT:
-			q = p
-
-			p.Mark |= LABEL | LEAF | SYNC
-			if p.Link != nil {
-				p.Link.Mark |= LABEL
-			}
-
-		case ANOR:
-			q = p
-			if p.To.Type == obj.TYPE_REG {
-				if p.To.Reg == REGZERO {
-					p.Mark |= LABEL | SYNC
-				}
-			}
-
-		case ALWAR,
-			ALBAR,
-			ASTBCCC,
-			ASTWCCC,
-			AECIWX,
-			AECOWX,
-			AEIEIO,
-			AICBI,
-			AISYNC,
-			ATLBIE,
-			ATLBIEL,
-			ASLBIA,
-			ASLBIE,
-			ASLBMFEE,
-			ASLBMFEV,
-			ASLBMTE,
-			ADCBF,
-			ADCBI,
-			ADCBST,
-			ADCBT,
-			ADCBTST,
-			ADCBZ,
-			ASYNC,
-			ATLBSYNC,
-			APTESYNC,
-			ALWSYNC,
-			ATW,
-			AWORD,
-			ARFI,
-			ARFCI,
-			ARFID,
-			AHRFID:
-			q = p
-			p.Mark |= LABEL | SYNC
-			continue
-
-		case AMOVW, AMOVWZ, AMOVD:
-			q = p
-			if p.From.Reg >= REG_SPECIAL || p.To.Reg >= REG_SPECIAL {
-				p.Mark |= LABEL | SYNC
-			}
-			continue
-
-		case AFABS,
-			AFABSCC,
-			AFADD,
-			AFADDCC,
-			AFCTIW,
-			AFCTIWCC,
-			AFCTIWZ,
-			AFCTIWZCC,
-			AFDIV,
-			AFDIVCC,
-			AFMADD,
-			AFMADDCC,
-			AFMOVD,
-			AFMOVDU,
-			/* case AFMOVDS: */
-			AFMOVS,
-			AFMOVSU,
-
-			/* case AFMOVSD: */
-			AFMSUB,
-			AFMSUBCC,
-			AFMUL,
-			AFMULCC,
-			AFNABS,
-			AFNABSCC,
-			AFNEG,
-			AFNEGCC,
-			AFNMADD,
-			AFNMADDCC,
-			AFNMSUB,
-			AFNMSUBCC,
-			AFRSP,
-			AFRSPCC,
-			AFSUB,
-			AFSUBCC:
-			q = p
-
-			p.Mark |= FLOAT
-			continue
-
-		case ABL,
-			ABCL,
-			obj.ADUFFZERO,
-			obj.ADUFFCOPY:
-			cursym.Text.Mark &^= LEAF
-			fallthrough
-
-		case ABC,
-			ABEQ,
-			ABGE,
-			ABGT,
-			ABLE,
-			ABLT,
-			ABNE,
-			ABR,
-			ABVC,
-			ABVS:
-			p.Mark |= BRANCH
-			q = p
-			q1 = p.Pcond
-			if q1 != nil {
-				for q1.As == obj.ANOP {
-					q1 = q1.Link
-					p.Pcond = q1
-				}
-
-				if q1.Mark&LEAF == 0 {
-					q1.Mark |= LABEL
-				}
-			} else {
-				p.Mark |= LABEL
-			}
-			q1 = p.Link
-			if q1 != nil {
-				q1.Mark |= LABEL
-			}
-			continue
-
-		case AFCMPO, AFCMPU:
-			q = p
-			p.Mark |= FCMP | FLOAT
-			continue
-
-		case obj.ARET:
-			q = p
-			if p.Link != nil {
-				p.Link.Mark |= LABEL
-			}
-			continue
-
-		case obj.ANOP:
-			q1 = p.Link
-			q.Link = q1 /* q is non-nop */
-			q1.Mark |= p.Mark
-			continue
-
-		default:
-			q = p
-			continue
-		}
-	}
-
-	autosize := int32(0)
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-	for p := cursym.Text; p != nil; p = p.Link {
-		o := p.As
-		switch o {
-		case obj.ATEXT:
-			autosize = int32(textstksiz)
-
-			if p.Mark&LEAF != 0 && autosize == 0 {
-				// A leaf function with no locals has no frame.
-				p.From3.Offset |= obj.NOFRAME
-			}
-
-			if p.From3.Offset&obj.NOFRAME == 0 {
-				// If there is a stack frame at all, it includes
-				// space to save the LR.
-				autosize += int32(ctxt.FixedFrameSize())
-			}
-
-			if p.Mark&LEAF != 0 && autosize < obj.StackSmall {
-				// A leaf function with a small stack can be marked
-				// NOSPLIT, avoiding a stack check.
-				p.From3.Offset |= obj.NOSPLIT
-			}
-
-			p.To.Offset = int64(autosize)
-
-			q = p
-
-			if ctxt.Flag_shared && cursym.Name != "runtime.duffzero" && cursym.Name != "runtime.duffcopy" && cursym.Name != "runtime.stackBarrier" {
-				// When compiling Go into PIC, all functions must start
-				// with instructions to load the TOC pointer into r2:
-				//
-				//	addis r2, r12, .TOC.-func@ha
-				//	addi r2, r2, .TOC.-func@l+4
-				//
-				// We could probably skip this prologue in some situations
-				// but it's a bit subtle. However, it is both safe and
-				// necessary to leave the prologue off duffzero and
-				// duffcopy as we rely on being able to jump to a specific
-				// instruction offset for them, and stackBarrier is only
-				// ever called from an overwritten LR-save slot on the
-				// stack (when r12 will not be remotely the right thing)
-				// but fortunately does not access global data.
-				//
-				// These are AWORDS because there is no (afaict) way to
-				// generate the addis instruction except as part of the
-				// load of a large constant, and in that case there is no
-				// way to use r12 as the source.
-				q = obj.Appendp(ctxt, q)
-				q.As = AWORD
-				q.Lineno = p.Lineno
-				q.From.Type = obj.TYPE_CONST
-				q.From.Offset = 0x3c4c0000
-				q = obj.Appendp(ctxt, q)
-				q.As = AWORD
-				q.Lineno = p.Lineno
-				q.From.Type = obj.TYPE_CONST
-				q.From.Offset = 0x38420000
-				rel := obj.Addrel(ctxt.Cursym)
-				rel.Off = 0
-				rel.Siz = 8
-				rel.Sym = obj.Linklookup(ctxt, ".TOC.", 0)
-				rel.Type = obj.R_ADDRPOWER_PCREL
-			}
-
-			if cursym.Text.From3.Offset&obj.NOSPLIT == 0 {
-				q = stacksplit(ctxt, q, autosize) // emit split check
-			}
-
-			if autosize != 0 {
-				// Make sure to save link register for non-empty frame, even if
-				// it is a leaf function, so that traceback works.
-				if cursym.Text.Mark&LEAF == 0 && autosize >= -BIG && autosize <= BIG {
-					// Use MOVDU to adjust R1 when saving R31, if autosize is small.
-					q = obj.Appendp(ctxt, q)
-					q.As = AMOVD
-					q.Lineno = p.Lineno
-					q.From.Type = obj.TYPE_REG
-					q.From.Reg = REG_LR
-					q.To.Type = obj.TYPE_REG
-					q.To.Reg = REGTMP
-
-					q = obj.Appendp(ctxt, q)
-					q.As = AMOVDU
-					q.Lineno = p.Lineno
-					q.From.Type = obj.TYPE_REG
-					q.From.Reg = REGTMP
-					q.To.Type = obj.TYPE_MEM
-					q.To.Offset = int64(-autosize)
-					q.To.Reg = REGSP
-					q.Spadj = int32(autosize)
-				} else {
-					// Frame size is too large for a MOVDU instruction.
-					// Store link register before decrementing SP, so if a signal comes
-					// during the execution of the function prologue, the traceback
-					// code will not see a half-updated stack frame.
-					q = obj.Appendp(ctxt, q)
-					q.As = AMOVD
-					q.Lineno = p.Lineno
-					q.From.Type = obj.TYPE_REG
-					q.From.Reg = REG_LR
-					q.To.Type = obj.TYPE_REG
-					q.To.Reg = REG_R29 // REGTMP may be used to synthesize large offset in the next instruction
-
-					q = obj.Appendp(ctxt, q)
-					q.As = AMOVD
-					q.Lineno = p.Lineno
-					q.From.Type = obj.TYPE_REG
-					q.From.Reg = REG_R29
-					q.To.Type = obj.TYPE_MEM
-					q.To.Offset = int64(-autosize)
-					q.To.Reg = REGSP
-
-					q = obj.Appendp(ctxt, q)
-					q.As = AADD
-					q.Lineno = p.Lineno
-					q.From.Type = obj.TYPE_CONST
-					q.From.Offset = int64(-autosize)
-					q.To.Type = obj.TYPE_REG
-					q.To.Reg = REGSP
-					q.Spadj = +autosize
-				}
-			} else if cursym.Text.Mark&LEAF == 0 {
-				// A very few functions that do not return to their caller
-				// (e.g. gogo) are not identified as leaves but still have
-				// no frame.
-				cursym.Text.Mark |= LEAF
-			}
-
-			if cursym.Text.Mark&LEAF != 0 {
-				cursym.Set(obj.AttrLeaf, true)
-				break
-			}
-
-			if ctxt.Flag_shared {
-				q = obj.Appendp(ctxt, q)
-				q.As = AMOVD
-				q.Lineno = p.Lineno
-				q.From.Type = obj.TYPE_REG
-				q.From.Reg = REG_R2
-				q.To.Type = obj.TYPE_MEM
-				q.To.Reg = REGSP
-				q.To.Offset = 24
-			}
-
-			if cursym.Text.From3.Offset&obj.WRAPPER != 0 {
-				// if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
-				//
-				//	MOVD g_panic(g), R3
-				//	CMP R0, R3
-				//	BEQ end
-				//	MOVD panic_argp(R3), R4
-				//	ADD $(autosize+8), R1, R5
-				//	CMP R4, R5
-				//	BNE end
-				//	ADD $8, R1, R6
-				//	MOVD R6, panic_argp(R3)
-				// end:
-				//	NOP
-				//
-				// The NOP is needed to give the jumps somewhere to land.
-				// It is a liblink NOP, not a ppc64 NOP: it encodes to 0 instruction bytes.
-
-				q = obj.Appendp(ctxt, q)
-
-				q.As = AMOVD
-				q.From.Type = obj.TYPE_MEM
-				q.From.Reg = REGG
-				q.From.Offset = 4 * int64(ctxt.Arch.PtrSize) // G.panic
-				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REG_R3
-
-				q = obj.Appendp(ctxt, q)
-				q.As = ACMP
-				q.From.Type = obj.TYPE_REG
-				q.From.Reg = REG_R0
-				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REG_R3
-
-				q = obj.Appendp(ctxt, q)
-				q.As = ABEQ
-				q.To.Type = obj.TYPE_BRANCH
-				p1 = q
-
-				q = obj.Appendp(ctxt, q)
-				q.As = AMOVD
-				q.From.Type = obj.TYPE_MEM
-				q.From.Reg = REG_R3
-				q.From.Offset = 0 // Panic.argp
-				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REG_R4
-
-				q = obj.Appendp(ctxt, q)
-				q.As = AADD
-				q.From.Type = obj.TYPE_CONST
-				q.From.Offset = int64(autosize) + ctxt.FixedFrameSize()
-				q.Reg = REGSP
-				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REG_R5
-
-				q = obj.Appendp(ctxt, q)
-				q.As = ACMP
-				q.From.Type = obj.TYPE_REG
-				q.From.Reg = REG_R4
-				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REG_R5
-
-				q = obj.Appendp(ctxt, q)
-				q.As = ABNE
-				q.To.Type = obj.TYPE_BRANCH
-				p2 = q
-
-				q = obj.Appendp(ctxt, q)
-				q.As = AADD
-				q.From.Type = obj.TYPE_CONST
-				q.From.Offset = ctxt.FixedFrameSize()
-				q.Reg = REGSP
-				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REG_R6
-
-				q = obj.Appendp(ctxt, q)
-				q.As = AMOVD
-				q.From.Type = obj.TYPE_REG
-				q.From.Reg = REG_R6
-				q.To.Type = obj.TYPE_MEM
-				q.To.Reg = REG_R3
-				q.To.Offset = 0 // Panic.argp
-
-				q = obj.Appendp(ctxt, q)
-
-				q.As = obj.ANOP
-				p1.Pcond = q
-				p2.Pcond = q
-			}
-
-		case obj.ARET:
-			if p.From.Type == obj.TYPE_CONST {
-				ctxt.Diag("using BECOME (%v) is not supported!", p)
-				break
-			}
-
-			retTarget := p.To.Sym
-
-			if cursym.Text.Mark&LEAF != 0 {
-				if autosize == 0 {
-					p.As = ABR
-					p.From = obj.Addr{}
-					if retTarget == nil {
-						p.To.Type = obj.TYPE_REG
-						p.To.Reg = REG_LR
-					} else {
-						p.To.Type = obj.TYPE_BRANCH
-						p.To.Sym = retTarget
-					}
-					p.Mark |= BRANCH
-					break
-				}
-
-				p.As = AADD
-				p.From.Type = obj.TYPE_CONST
-				p.From.Offset = int64(autosize)
-				p.To.Type = obj.TYPE_REG
-				p.To.Reg = REGSP
-				p.Spadj = -autosize
-
-				q = ctxt.NewProg()
-				q.As = ABR
-				q.Lineno = p.Lineno
-				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REG_LR
-				q.Mark |= BRANCH
-				q.Spadj = +autosize
-
-				q.Link = p.Link
-				p.Link = q
-				break
-			}
-
-			p.As = AMOVD
-			p.From.Type = obj.TYPE_MEM
-			p.From.Offset = 0
-			p.From.Reg = REGSP
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = REGTMP
-
-			q = ctxt.NewProg()
-			q.As = AMOVD
-			q.Lineno = p.Lineno
-			q.From.Type = obj.TYPE_REG
-			q.From.Reg = REGTMP
-			q.To.Type = obj.TYPE_REG
-			q.To.Reg = REG_LR
-
-			q.Link = p.Link
-			p.Link = q
-			p = q
-
-			if false {
-				// Debug bad returns
-				q = ctxt.NewProg()
-
-				q.As = AMOVD
-				q.Lineno = p.Lineno
-				q.From.Type = obj.TYPE_MEM
-				q.From.Offset = 0
-				q.From.Reg = REGTMP
-				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REGTMP
-
-				q.Link = p.Link
-				p.Link = q
-				p = q
-			}
-
-			if autosize != 0 {
-				q = ctxt.NewProg()
-				q.As = AADD
-				q.Lineno = p.Lineno
-				q.From.Type = obj.TYPE_CONST
-				q.From.Offset = int64(autosize)
-				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REGSP
-				q.Spadj = -autosize
-
-				q.Link = p.Link
-				p.Link = q
-			}
-
-			q1 = ctxt.NewProg()
-			q1.As = ABR
-			q1.Lineno = p.Lineno
-			if retTarget == nil {
-				q1.To.Type = obj.TYPE_REG
-				q1.To.Reg = REG_LR
-			} else {
-				q1.To.Type = obj.TYPE_BRANCH
-				q1.To.Sym = retTarget
-			}
-			q1.Mark |= BRANCH
-			q1.Spadj = +autosize
-
-			q1.Link = q.Link
-			q.Link = q1
-		case AADD:
-			if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.From.Type == obj.TYPE_CONST {
-				p.Spadj = int32(-p.From.Offset)
-			}
-		}
-	}
-}
-
-/*
-// instruction scheduling
-	if(debug['Q'] == 0)
-		return;
-
-	curtext = nil;
-	q = nil;	// p - 1
-	q1 = firstp;	// top of block
-	o = 0;		// count of instructions
-	for(p = firstp; p != nil; p = p1) {
-		p1 = p->link;
-		o++;
-		if(p->mark & NOSCHED){
-			if(q1 != p){
-				sched(q1, q);
-			}
-			for(; p != nil; p = p->link){
-				if(!(p->mark & NOSCHED))
-					break;
-				q = p;
-			}
-			p1 = p;
-			q1 = p;
-			o = 0;
-			continue;
-		}
-		if(p->mark & (LABEL|SYNC)) {
-			if(q1 != p)
-				sched(q1, q);
-			q1 = p;
-			o = 1;
-		}
-		if(p->mark & (BRANCH|SYNC)) {
-			sched(q1, p);
-			q1 = p1;
-			o = 0;
-		}
-		if(o >= NSCHED) {
-			sched(q1, p);
-			q1 = p1;
-			o = 0;
-		}
-		q = p;
-	}
-*/
-func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32) *obj.Prog {
-	p0 := p // save entry point, but skipping the two instructions setting R2 in shared mode
-
-	// MOVD	g_stackguard(g), R3
-	p = obj.Appendp(ctxt, p)
-
-	p.As = AMOVD
-	p.From.Type = obj.TYPE_MEM
-	p.From.Reg = REGG
-	p.From.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0
-	if ctxt.Cursym.CFunc() {
-		p.From.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1
-	}
-	p.To.Type = obj.TYPE_REG
-	p.To.Reg = REG_R3
-
-	var q *obj.Prog
-	if framesize <= obj.StackSmall {
-		// small stack: SP < stackguard
-		//	CMP	stackguard, SP
-		p = obj.Appendp(ctxt, p)
-
-		p.As = ACMPU
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_R3
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REGSP
-	} else if framesize <= obj.StackBig {
-		// large stack: SP-framesize < stackguard-StackSmall
-		//	ADD $-framesize, SP, R4
-		//	CMP stackguard, R4
-		p = obj.Appendp(ctxt, p)
-
-		p.As = AADD
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = int64(-framesize)
-		p.Reg = REGSP
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R4
-
-		p = obj.Appendp(ctxt, p)
-		p.As = ACMPU
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_R3
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R4
-	} else {
-		// Such a large stack we need to protect against wraparound.
-		// If SP is close to zero:
-		//	SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall)
-		// The +StackGuard on both sides is required to keep the left side positive:
-		// SP is allowed to be slightly below stackguard. See stack.h.
-		//
-		// Preemption sets stackguard to StackPreempt, a very large value.
-		// That breaks the math above, so we have to check for that explicitly.
-		//	// stackguard is R3
-		//	CMP	R3, $StackPreempt
-		//	BEQ	label-of-call-to-morestack
-		//	ADD	$StackGuard, SP, R4
-		//	SUB	R3, R4
-		//	MOVD	$(framesize+(StackGuard-StackSmall)), R31
-		//	CMPU	R31, R4
-		p = obj.Appendp(ctxt, p)
-
-		p.As = ACMP
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_R3
-		p.To.Type = obj.TYPE_CONST
-		p.To.Offset = obj.StackPreempt
-
-		p = obj.Appendp(ctxt, p)
-		q = p
-		p.As = ABEQ
-		p.To.Type = obj.TYPE_BRANCH
-
-		p = obj.Appendp(ctxt, p)
-		p.As = AADD
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = obj.StackGuard
-		p.Reg = REGSP
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R4
-
-		p = obj.Appendp(ctxt, p)
-		p.As = ASUB
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_R3
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R4
-
-		p = obj.Appendp(ctxt, p)
-		p.As = AMOVD
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = int64(framesize) + obj.StackGuard - obj.StackSmall
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REGTMP
-
-		p = obj.Appendp(ctxt, p)
-		p.As = ACMPU
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REGTMP
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R4
-	}
-
-	// q1: BLT	done
-	p = obj.Appendp(ctxt, p)
-	q1 := p
-
-	p.As = ABLT
-	p.To.Type = obj.TYPE_BRANCH
-
-	// MOVD	LR, R5
-	p = obj.Appendp(ctxt, p)
-
-	p.As = AMOVD
-	p.From.Type = obj.TYPE_REG
-	p.From.Reg = REG_LR
-	p.To.Type = obj.TYPE_REG
-	p.To.Reg = REG_R5
-	if q != nil {
-		q.Pcond = p
-	}
-
-	var morestacksym *obj.LSym
-	if ctxt.Cursym.CFunc() {
-		morestacksym = obj.Linklookup(ctxt, "runtime.morestackc", 0)
-	} else if ctxt.Cursym.Text.From3.Offset&obj.NEEDCTXT == 0 {
-		morestacksym = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
-	} else {
-		morestacksym = obj.Linklookup(ctxt, "runtime.morestack", 0)
-	}
-
-	if ctxt.Flag_shared {
-		// In PPC64 PIC code, R2 is used as TOC pointer derived from R12
-		// which is the address of function entry point when entering
-		// the function. We need to preserve R2 across call to morestack.
-		// Fortunately, in shared mode, 8(SP) and 16(SP) are reserved in
-		// the caller's frame, but not used (0(SP) is caller's saved LR,
-		// 24(SP) is caller's saved R2). Use 8(SP) to save this function's R2.
-
-		// MOVD R12, 8(SP)
-		p = obj.Appendp(ctxt, p)
-		p.As = AMOVD
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_R2
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = REGSP
-		p.To.Offset = 8
-	}
-
-	if ctxt.Flag_dynlink {
-		// Avoid calling morestack via a PLT when dynamically linking. The
-		// PLT stubs generated by the system linker on ppc64le when "std r2,
-		// 24(r1)" to save the TOC pointer in their callers stack
-		// frame. Unfortunately (and necessarily) morestack is called before
-		// the function that calls it sets up its frame and so the PLT ends
-		// up smashing the saved TOC pointer for its caller's caller.
-		//
-		// According to the ABI documentation there is a mechanism to avoid
-		// the TOC save that the PLT stub does (put a R_PPC64_TOCSAVE
-		// relocation on the nop after the call to morestack) but at the time
-		// of writing it is not supported at all by gold and my attempt to
-		// use it with ld.bfd caused an internal linker error. So this hack
-		// seems preferable.
-
-		// MOVD $runtime.morestack(SB), R12
-		p = obj.Appendp(ctxt, p)
-		p.As = AMOVD
-		p.From.Type = obj.TYPE_MEM
-		p.From.Sym = morestacksym
-		p.From.Name = obj.NAME_GOTREF
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R12
-
-		// MOVD R12, CTR
-		p = obj.Appendp(ctxt, p)
-		p.As = AMOVD
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_R12
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_CTR
-
-		// BL CTR
-		p = obj.Appendp(ctxt, p)
-		p.As = obj.ACALL
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_R12
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_CTR
-	} else {
-		// BL	runtime.morestack(SB)
-		p = obj.Appendp(ctxt, p)
-
-		p.As = ABL
-		p.To.Type = obj.TYPE_BRANCH
-		p.To.Sym = morestacksym
-	}
-
-	if ctxt.Flag_shared {
-		// MOVD 8(SP), R2
-		p = obj.Appendp(ctxt, p)
-		p.As = AMOVD
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = REGSP
-		p.From.Offset = 8
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R2
-	}
-
-	// BR	start
-	p = obj.Appendp(ctxt, p)
-	p.As = ABR
-	p.To.Type = obj.TYPE_BRANCH
-	p.Pcond = p0.Link
-
-	// placeholder for q1's jump target
-	p = obj.Appendp(ctxt, p)
-
-	p.As = obj.ANOP // zero-width place holder
-	q1.Pcond = p
-
-	return p
-}
-
-func follow(ctxt *obj.Link, s *obj.LSym) {
-	ctxt.Cursym = s
-
-	firstp := ctxt.NewProg()
-	lastp := firstp
-	xfol(ctxt, s.Text, &lastp)
-	lastp.Link = nil
-	s.Text = firstp.Link
-}
-
-func relinv(a obj.As) obj.As {
-	switch a {
-	case ABEQ:
-		return ABNE
-	case ABNE:
-		return ABEQ
-
-	case ABGE:
-		return ABLT
-	case ABLT:
-		return ABGE
-
-	case ABGT:
-		return ABLE
-	case ABLE:
-		return ABGT
-
-	case ABVC:
-		return ABVS
-	case ABVS:
-		return ABVC
-	}
-
-	return 0
-}
-
-func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) {
-	var q *obj.Prog
-	var r *obj.Prog
-	var b obj.As
-	var i int
-
-loop:
-	if p == nil {
-		return
-	}
-	a := p.As
-	if a == ABR {
-		q = p.Pcond
-		if (p.Mark&NOSCHED != 0) || q != nil && (q.Mark&NOSCHED != 0) {
-			p.Mark |= FOLL
-			(*last).Link = p
-			*last = p
-			p = p.Link
-			xfol(ctxt, p, last)
-			p = q
-			if p != nil && p.Mark&FOLL == 0 {
-				goto loop
-			}
-			return
-		}
-
-		if q != nil {
-			p.Mark |= FOLL
-			p = q
-			if p.Mark&FOLL == 0 {
-				goto loop
-			}
-		}
-	}
-
-	if p.Mark&FOLL != 0 {
-		i = 0
-		q = p
-		for ; i < 4; i, q = i+1, q.Link {
-			if q == *last || (q.Mark&NOSCHED != 0) {
-				break
-			}
-			b = 0 /* set */
-			a = q.As
-			if a == obj.ANOP {
-				i--
-				continue
-			}
-
-			if a == ABR || a == obj.ARET || a == ARFI || a == ARFCI || a == ARFID || a == AHRFID {
-				goto copy
-			}
-			if q.Pcond == nil || (q.Pcond.Mark&FOLL != 0) {
-				continue
-			}
-			b = relinv(a)
-			if b == 0 {
-				continue
-			}
-
-		copy:
-			for {
-				r = ctxt.NewProg()
-				*r = *p
-				if r.Mark&FOLL == 0 {
-					fmt.Printf("can't happen 1\n")
-				}
-				r.Mark |= FOLL
-				if p != q {
-					p = p.Link
-					(*last).Link = r
-					*last = r
-					continue
-				}
-
-				(*last).Link = r
-				*last = r
-				if a == ABR || a == obj.ARET || a == ARFI || a == ARFCI || a == ARFID || a == AHRFID {
-					return
-				}
-				r.As = b
-				r.Pcond = p.Link
-				r.Link = p.Pcond
-				if r.Link.Mark&FOLL == 0 {
-					xfol(ctxt, r.Link, last)
-				}
-				if r.Pcond.Mark&FOLL == 0 {
-					fmt.Printf("can't happen 2\n")
-				}
-				return
-			}
-		}
-
-		a = ABR
-		q = ctxt.NewProg()
-		q.As = a
-		q.Lineno = p.Lineno
-		q.To.Type = obj.TYPE_BRANCH
-		q.To.Offset = p.Pc
-		q.Pcond = p
-		p = q
-	}
-
-	p.Mark |= FOLL
-	(*last).Link = p
-	*last = p
-	if a == ABR || a == obj.ARET || a == ARFI || a == ARFCI || a == ARFID || a == AHRFID {
-		if p.Mark&NOSCHED != 0 {
-			p = p.Link
-			goto loop
-		}
-
-		return
-	}
-
-	if p.Pcond != nil {
-		if a != ABL && p.Link != nil {
-			xfol(ctxt, p.Link, last)
-			p = p.Pcond
-			if p == nil || (p.Mark&FOLL != 0) {
-				return
-			}
-			goto loop
-		}
-	}
-
-	p = p.Link
-	goto loop
-}
-
-var Linkppc64 = obj.LinkArch{
-	Arch:       sys.ArchPPC64,
-	Preprocess: preprocess,
-	Assemble:   span9,
-	Follow:     follow,
-	Progedit:   progedit,
-}
-
-var Linkppc64le = obj.LinkArch{
-	Arch:       sys.ArchPPC64LE,
-	Preprocess: preprocess,
-	Assemble:   span9,
-	Follow:     follow,
-	Progedit:   progedit,
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/reloctype_string.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/reloctype_string.go
deleted file mode 100644
index e10a4d2..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/reloctype_string.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/reloctype_string.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/reloctype_string.go:1
-// Code generated by "stringer -type=RelocType"; DO NOT EDIT
-
-package obj
-
-import "fmt"
-
-const _RelocType_name = "R_ADDRR_ADDRPOWERR_ADDRARM64R_ADDRMIPSR_ADDROFFR_WEAKADDROFFR_SIZER_CALLR_CALLARMR_CALLARM64R_CALLINDR_CALLPOWERR_CALLMIPSR_CONSTR_PCRELR_TLS_LER_TLS_IER_GOTOFFR_PLT0R_PLT1R_PLT2R_USEFIELDR_USETYPER_METHODOFFR_POWER_TOCR_GOTPCRELR_JMPMIPSR_DWARFREFR_ARM64_TLS_LER_ARM64_TLS_IER_ARM64_GOTPCRELR_POWER_TLS_LER_POWER_TLS_IER_POWER_TLSR_ADDRPOWER_DSR_ADDRPOWER_GOTR_ADDRPOWER_PCRELR_ADDRPOWER_TOCRELR_ADDRPOWER_TOCREL_DSR_PCRELDBLR_ADDRMIPSUR_ADDRMIPSTLS"
-
-var _RelocType_index = [...]uint16{0, 6, 17, 28, 38, 47, 60, 66, 72, 81, 92, 101, 112, 122, 129, 136, 144, 152, 160, 166, 172, 178, 188, 197, 208, 219, 229, 238, 248, 262, 276, 292, 306, 320, 331, 345, 360, 377, 395, 416, 426, 437, 450}
-
-func (i RelocType) String() string {
-	i -= 1
-	if i < 0 || i >= RelocType(len(_RelocType_index)-1) {
-		return fmt.Sprintf("RelocType(%d)", i+1)
-	}
-	return _RelocType_name[_RelocType_index[i]:_RelocType_index[i+1]]
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/s390x/a.out.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/s390x/a.out.go
deleted file mode 100644
index 46c8bea..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/s390x/a.out.go
+++ /dev/null
@@ -1,929 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/s390x/a.out.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/s390x/a.out.go:1
-// Based on cmd/internal/obj/ppc64/a.out.go.
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package s390x
-
-import "bootstrap/cmd/internal/obj"
-
-//go:generate go run ../stringer.go -i $GOFILE -o anames.go -p s390x
-
-const (
-	NSNAME = 8
-	NSYM   = 50
-	NREG   = 16 // number of general purpose registers
-	NFREG  = 16 // number of floating point registers
-)
-
-const (
-	// General purpose registers (GPRs).
-	REG_R0 = obj.RBaseS390X + iota
-	REG_R1
-	REG_R2
-	REG_R3
-	REG_R4
-	REG_R5
-	REG_R6
-	REG_R7
-	REG_R8
-	REG_R9
-	REG_R10
-	REG_R11
-	REG_R12
-	REG_R13
-	REG_R14
-	REG_R15
-
-	// Floating point registers (FPRs).
-	REG_F0
-	REG_F1
-	REG_F2
-	REG_F3
-	REG_F4
-	REG_F5
-	REG_F6
-	REG_F7
-	REG_F8
-	REG_F9
-	REG_F10
-	REG_F11
-	REG_F12
-	REG_F13
-	REG_F14
-	REG_F15
-
-	// Vector registers (VRs) - only available when the vector
-	// facility is installed.
-	// V0-V15 are aliases for F0-F15.
-	// We keep them in a separate space to make printing etc. easier
-	// If the code generator ever emits vector instructions it will
-	// need to take into account the aliasing.
-	REG_V0
-	REG_V1
-	REG_V2
-	REG_V3
-	REG_V4
-	REG_V5
-	REG_V6
-	REG_V7
-	REG_V8
-	REG_V9
-	REG_V10
-	REG_V11
-	REG_V12
-	REG_V13
-	REG_V14
-	REG_V15
-	REG_V16
-	REG_V17
-	REG_V18
-	REG_V19
-	REG_V20
-	REG_V21
-	REG_V22
-	REG_V23
-	REG_V24
-	REG_V25
-	REG_V26
-	REG_V27
-	REG_V28
-	REG_V29
-	REG_V30
-	REG_V31
-
-	// Access registers (ARs).
-	// The thread pointer is typically stored in the register pair
-	// AR0 and AR1.
-	REG_AR0
-	REG_AR1
-	REG_AR2
-	REG_AR3
-	REG_AR4
-	REG_AR5
-	REG_AR6
-	REG_AR7
-	REG_AR8
-	REG_AR9
-	REG_AR10
-	REG_AR11
-	REG_AR12
-	REG_AR13
-	REG_AR14
-	REG_AR15
-
-	REG_RESERVED // end of allocated registers
-
-	REGZERO = REG_R0  // set to zero
-	REGARG  = -1      // -1 disables passing the first argument in register
-	REGRT1  = REG_R3  // used during zeroing of the stack - not reserved
-	REGRT2  = REG_R4  // used during zeroing of the stack - not reserved
-	REGTMP  = REG_R10 // scratch register used in the assembler and linker
-	REGTMP2 = REG_R11 // scratch register used in the assembler and linker
-	REGCTXT = REG_R12 // context for closures
-	REGG    = REG_R13 // G
-	REG_LR  = REG_R14 // link register
-	REGSP   = REG_R15 // stack pointer
-)
-
-const (
-	BIG    = 32768 - 8
-	DISP12 = 4096
-	DISP16 = 65536
-	DISP20 = 1048576
-)
-
-const (
-	// mark flags
-	LABEL   = 1 << 0
-	LEAF    = 1 << 1
-	FLOAT   = 1 << 2
-	BRANCH  = 1 << 3
-	LOAD    = 1 << 4
-	FCMP    = 1 << 5
-	SYNC    = 1 << 6
-	LIST    = 1 << 7
-	FOLL    = 1 << 8
-	NOSCHED = 1 << 9
-)
-
-const ( // comments from func aclass in asmz.go
-	C_NONE     = iota
-	C_REG      // general-purpose register (64-bit)
-	C_FREG     // floating-point register (64-bit)
-	C_VREG     // vector register (128-bit)
-	C_AREG     // access register (32-bit)
-	C_ZCON     // constant == 0
-	C_SCON     // 0 <= constant <= 0x7fff (positive int16)
-	C_UCON     // constant & 0xffff == 0 (int16 or uint16)
-	C_ADDCON   // 0 > constant >= -0x8000 (negative int16)
-	C_ANDCON   // constant <= 0xffff
-	C_LCON     // constant (int32 or uint32)
-	C_DCON     // constant (int64 or uint64)
-	C_SACON    // computed address, 16-bit displacement, possibly SP-relative
-	C_LACON    // computed address, 32-bit displacement, possibly SP-relative
-	C_DACON    // computed address, 64-bit displacment?
-	C_SBRA     // short branch
-	C_LBRA     // long branch
-	C_SAUTO    // short auto
-	C_LAUTO    // long auto
-	C_ZOREG    // heap address, register-based, displacement == 0
-	C_SOREG    // heap address, register-based, int16 displacement
-	C_LOREG    // heap address, register-based, int32 displacement
-	C_TLS_LE   // TLS - local exec model (for executables)
-	C_TLS_IE   // TLS - initial exec model (for shared libraries loaded at program startup)
-	C_GOK      // general address
-	C_ADDR     // relocation for extern or static symbols (loads and stores)
-	C_SYMADDR  // relocation for extern or static symbols (address taking)
-	C_GOTADDR  // GOT slot for a symbol in -dynlink mode
-	C_TEXTSIZE // text size
-	C_ANY
-	C_NCLASS // must be the last
-)
-
-const (
-	// integer arithmetic
-	AADD = obj.ABaseS390X + obj.A_ARCHSPECIFIC + iota
-	AADDC
-	AADDE
-	AADDW
-	ADIVW
-	ADIVWU
-	ADIVD
-	ADIVDU
-	AMODW
-	AMODWU
-	AMODD
-	AMODDU
-	AMULLW
-	AMULLD
-	AMULHD
-	AMULHDU
-	ASUB
-	ASUBC
-	ASUBV
-	ASUBE
-	ASUBW
-	ANEG
-	ANEGW
-
-	// integer moves
-	AMOVWBR
-	AMOVB
-	AMOVBZ
-	AMOVH
-	AMOVHBR
-	AMOVHZ
-	AMOVW
-	AMOVWZ
-	AMOVD
-	AMOVDBR
-
-	// conditional moves
-	AMOVDEQ
-	AMOVDGE
-	AMOVDGT
-	AMOVDLE
-	AMOVDLT
-	AMOVDNE
-
-	// find leftmost one
-	AFLOGR
-
-	// integer bitwise
-	AAND
-	AANDW
-	AOR
-	AORW
-	AXOR
-	AXORW
-	ASLW
-	ASLD
-	ASRW
-	ASRAW
-	ASRD
-	ASRAD
-	ARLL
-	ARLLG
-
-	// floating point
-	AFABS
-	AFADD
-	AFADDS
-	AFCMPO
-	AFCMPU
-	ACEBR
-	AFDIV
-	AFDIVS
-	AFMADD
-	AFMADDS
-	AFMOVD
-	AFMOVS
-	AFMSUB
-	AFMSUBS
-	AFMUL
-	AFMULS
-	AFNABS
-	AFNEG
-	AFNEGS
-	AFNMADD
-	AFNMADDS
-	AFNMSUB
-	AFNMSUBS
-	ALEDBR
-	ALDEBR
-	AFSUB
-	AFSUBS
-	AFSQRT
-	AFSQRTS
-	AFIEBR
-	AFIDBR
-
-	// convert from int32/int64 to float/float64
-	ACEFBRA
-	ACDFBRA
-	ACEGBRA
-	ACDGBRA
-
-	// convert from float/float64 to int32/int64
-	ACFEBRA
-	ACFDBRA
-	ACGEBRA
-	ACGDBRA
-
-	// convert from uint32/uint64 to float/float64
-	ACELFBR
-	ACDLFBR
-	ACELGBR
-	ACDLGBR
-
-	// convert from float/float64 to uint32/uint64
-	ACLFEBR
-	ACLFDBR
-	ACLGEBR
-	ACLGDBR
-
-	// compare
-	ACMP
-	ACMPU
-	ACMPW
-	ACMPWU
-
-	// compare and swap
-	ACS
-	ACSG
-
-	// serialize
-	ASYNC
-
-	// branch
-	ABC
-	ABCL
-	ABEQ
-	ABGE
-	ABGT
-	ABLE
-	ABLT
-	ABLEU
-	ABLTU
-	ABNE
-	ABVC
-	ABVS
-	ASYSCALL
-
-	// compare and branch
-	ACMPBEQ
-	ACMPBGE
-	ACMPBGT
-	ACMPBLE
-	ACMPBLT
-	ACMPBNE
-	ACMPUBEQ
-	ACMPUBGE
-	ACMPUBGT
-	ACMPUBLE
-	ACMPUBLT
-	ACMPUBNE
-
-	// storage-and-storage
-	AMVC
-	ACLC
-	AXC
-	AOC
-	ANC
-
-	// load
-	AEXRL
-	ALARL
-	ALA
-	ALAY
-
-	// interlocked load and op
-	ALAA
-	ALAAG
-	ALAAL
-	ALAALG
-	ALAN
-	ALANG
-	ALAX
-	ALAXG
-	ALAO
-	ALAOG
-
-	// load/store multiple
-	ALMY
-	ALMG
-	ASTMY
-	ASTMG
-
-	// store clock
-	ASTCK
-	ASTCKC
-	ASTCKE
-	ASTCKF
-
-	// macros
-	ACLEAR
-
-	// vector
-	AVA
-	AVAB
-	AVAH
-	AVAF
-	AVAG
-	AVAQ
-	AVACC
-	AVACCB
-	AVACCH
-	AVACCF
-	AVACCG
-	AVACCQ
-	AVAC
-	AVACQ
-	AVACCC
-	AVACCCQ
-	AVN
-	AVNC
-	AVAVG
-	AVAVGB
-	AVAVGH
-	AVAVGF
-	AVAVGG
-	AVAVGL
-	AVAVGLB
-	AVAVGLH
-	AVAVGLF
-	AVAVGLG
-	AVCKSM
-	AVCEQ
-	AVCEQB
-	AVCEQH
-	AVCEQF
-	AVCEQG
-	AVCEQBS
-	AVCEQHS
-	AVCEQFS
-	AVCEQGS
-	AVCH
-	AVCHB
-	AVCHH
-	AVCHF
-	AVCHG
-	AVCHBS
-	AVCHHS
-	AVCHFS
-	AVCHGS
-	AVCHL
-	AVCHLB
-	AVCHLH
-	AVCHLF
-	AVCHLG
-	AVCHLBS
-	AVCHLHS
-	AVCHLFS
-	AVCHLGS
-	AVCLZ
-	AVCLZB
-	AVCLZH
-	AVCLZF
-	AVCLZG
-	AVCTZ
-	AVCTZB
-	AVCTZH
-	AVCTZF
-	AVCTZG
-	AVEC
-	AVECB
-	AVECH
-	AVECF
-	AVECG
-	AVECL
-	AVECLB
-	AVECLH
-	AVECLF
-	AVECLG
-	AVERIM
-	AVERIMB
-	AVERIMH
-	AVERIMF
-	AVERIMG
-	AVERLL
-	AVERLLB
-	AVERLLH
-	AVERLLF
-	AVERLLG
-	AVERLLV
-	AVERLLVB
-	AVERLLVH
-	AVERLLVF
-	AVERLLVG
-	AVESLV
-	AVESLVB
-	AVESLVH
-	AVESLVF
-	AVESLVG
-	AVESL
-	AVESLB
-	AVESLH
-	AVESLF
-	AVESLG
-	AVESRA
-	AVESRAB
-	AVESRAH
-	AVESRAF
-	AVESRAG
-	AVESRAV
-	AVESRAVB
-	AVESRAVH
-	AVESRAVF
-	AVESRAVG
-	AVESRL
-	AVESRLB
-	AVESRLH
-	AVESRLF
-	AVESRLG
-	AVESRLV
-	AVESRLVB
-	AVESRLVH
-	AVESRLVF
-	AVESRLVG
-	AVX
-	AVFAE
-	AVFAEB
-	AVFAEH
-	AVFAEF
-	AVFAEBS
-	AVFAEHS
-	AVFAEFS
-	AVFAEZB
-	AVFAEZH
-	AVFAEZF
-	AVFAEZBS
-	AVFAEZHS
-	AVFAEZFS
-	AVFEE
-	AVFEEB
-	AVFEEH
-	AVFEEF
-	AVFEEBS
-	AVFEEHS
-	AVFEEFS
-	AVFEEZB
-	AVFEEZH
-	AVFEEZF
-	AVFEEZBS
-	AVFEEZHS
-	AVFEEZFS
-	AVFENE
-	AVFENEB
-	AVFENEH
-	AVFENEF
-	AVFENEBS
-	AVFENEHS
-	AVFENEFS
-	AVFENEZB
-	AVFENEZH
-	AVFENEZF
-	AVFENEZBS
-	AVFENEZHS
-	AVFENEZFS
-	AVFA
-	AVFADB
-	AWFADB
-	AWFK
-	AWFKDB
-	AVFCE
-	AVFCEDB
-	AVFCEDBS
-	AWFCEDB
-	AWFCEDBS
-	AVFCH
-	AVFCHDB
-	AVFCHDBS
-	AWFCHDB
-	AWFCHDBS
-	AVFCHE
-	AVFCHEDB
-	AVFCHEDBS
-	AWFCHEDB
-	AWFCHEDBS
-	AWFC
-	AWFCDB
-	AVCDG
-	AVCDGB
-	AWCDGB
-	AVCDLG
-	AVCDLGB
-	AWCDLGB
-	AVCGD
-	AVCGDB
-	AWCGDB
-	AVCLGD
-	AVCLGDB
-	AWCLGDB
-	AVFD
-	AVFDDB
-	AWFDDB
-	AVLDE
-	AVLDEB
-	AWLDEB
-	AVLED
-	AVLEDB
-	AWLEDB
-	AVFM
-	AVFMDB
-	AWFMDB
-	AVFMA
-	AVFMADB
-	AWFMADB
-	AVFMS
-	AVFMSDB
-	AWFMSDB
-	AVFPSO
-	AVFPSODB
-	AWFPSODB
-	AVFLCDB
-	AWFLCDB
-	AVFLNDB
-	AWFLNDB
-	AVFLPDB
-	AWFLPDB
-	AVFSQ
-	AVFSQDB
-	AWFSQDB
-	AVFS
-	AVFSDB
-	AWFSDB
-	AVFTCI
-	AVFTCIDB
-	AWFTCIDB
-	AVGFM
-	AVGFMB
-	AVGFMH
-	AVGFMF
-	AVGFMG
-	AVGFMA
-	AVGFMAB
-	AVGFMAH
-	AVGFMAF
-	AVGFMAG
-	AVGEF
-	AVGEG
-	AVGBM
-	AVZERO
-	AVONE
-	AVGM
-	AVGMB
-	AVGMH
-	AVGMF
-	AVGMG
-	AVISTR
-	AVISTRB
-	AVISTRH
-	AVISTRF
-	AVISTRBS
-	AVISTRHS
-	AVISTRFS
-	AVL
-	AVLR
-	AVLREP
-	AVLREPB
-	AVLREPH
-	AVLREPF
-	AVLREPG
-	AVLC
-	AVLCB
-	AVLCH
-	AVLCF
-	AVLCG
-	AVLEH
-	AVLEF
-	AVLEG
-	AVLEB
-	AVLEIH
-	AVLEIF
-	AVLEIG
-	AVLEIB
-	AVFI
-	AVFIDB
-	AWFIDB
-	AVLGV
-	AVLGVB
-	AVLGVH
-	AVLGVF
-	AVLGVG
-	AVLLEZ
-	AVLLEZB
-	AVLLEZH
-	AVLLEZF
-	AVLLEZG
-	AVLM
-	AVLP
-	AVLPB
-	AVLPH
-	AVLPF
-	AVLPG
-	AVLBB
-	AVLVG
-	AVLVGB
-	AVLVGH
-	AVLVGF
-	AVLVGG
-	AVLVGP
-	AVLL
-	AVMX
-	AVMXB
-	AVMXH
-	AVMXF
-	AVMXG
-	AVMXL
-	AVMXLB
-	AVMXLH
-	AVMXLF
-	AVMXLG
-	AVMRH
-	AVMRHB
-	AVMRHH
-	AVMRHF
-	AVMRHG
-	AVMRL
-	AVMRLB
-	AVMRLH
-	AVMRLF
-	AVMRLG
-	AVMN
-	AVMNB
-	AVMNH
-	AVMNF
-	AVMNG
-	AVMNL
-	AVMNLB
-	AVMNLH
-	AVMNLF
-	AVMNLG
-	AVMAE
-	AVMAEB
-	AVMAEH
-	AVMAEF
-	AVMAH
-	AVMAHB
-	AVMAHH
-	AVMAHF
-	AVMALE
-	AVMALEB
-	AVMALEH
-	AVMALEF
-	AVMALH
-	AVMALHB
-	AVMALHH
-	AVMALHF
-	AVMALO
-	AVMALOB
-	AVMALOH
-	AVMALOF
-	AVMAL
-	AVMALB
-	AVMALHW
-	AVMALF
-	AVMAO
-	AVMAOB
-	AVMAOH
-	AVMAOF
-	AVME
-	AVMEB
-	AVMEH
-	AVMEF
-	AVMH
-	AVMHB
-	AVMHH
-	AVMHF
-	AVMLE
-	AVMLEB
-	AVMLEH
-	AVMLEF
-	AVMLH
-	AVMLHB
-	AVMLHH
-	AVMLHF
-	AVMLO
-	AVMLOB
-	AVMLOH
-	AVMLOF
-	AVML
-	AVMLB
-	AVMLHW
-	AVMLF
-	AVMO
-	AVMOB
-	AVMOH
-	AVMOF
-	AVNO
-	AVNOT
-	AVO
-	AVPK
-	AVPKH
-	AVPKF
-	AVPKG
-	AVPKLS
-	AVPKLSH
-	AVPKLSF
-	AVPKLSG
-	AVPKLSHS
-	AVPKLSFS
-	AVPKLSGS
-	AVPKS
-	AVPKSH
-	AVPKSF
-	AVPKSG
-	AVPKSHS
-	AVPKSFS
-	AVPKSGS
-	AVPERM
-	AVPDI
-	AVPOPCT
-	AVREP
-	AVREPB
-	AVREPH
-	AVREPF
-	AVREPG
-	AVREPI
-	AVREPIB
-	AVREPIH
-	AVREPIF
-	AVREPIG
-	AVSCEF
-	AVSCEG
-	AVSEL
-	AVSL
-	AVSLB
-	AVSLDB
-	AVSRA
-	AVSRAB
-	AVSRL
-	AVSRLB
-	AVSEG
-	AVSEGB
-	AVSEGH
-	AVSEGF
-	AVST
-	AVSTEH
-	AVSTEF
-	AVSTEG
-	AVSTEB
-	AVSTM
-	AVSTL
-	AVSTRC
-	AVSTRCB
-	AVSTRCH
-	AVSTRCF
-	AVSTRCBS
-	AVSTRCHS
-	AVSTRCFS
-	AVSTRCZB
-	AVSTRCZH
-	AVSTRCZF
-	AVSTRCZBS
-	AVSTRCZHS
-	AVSTRCZFS
-	AVS
-	AVSB
-	AVSH
-	AVSF
-	AVSG
-	AVSQ
-	AVSCBI
-	AVSCBIB
-	AVSCBIH
-	AVSCBIF
-	AVSCBIG
-	AVSCBIQ
-	AVSBCBI
-	AVSBCBIQ
-	AVSBI
-	AVSBIQ
-	AVSUMG
-	AVSUMGH
-	AVSUMGF
-	AVSUMQ
-	AVSUMQF
-	AVSUMQG
-	AVSUM
-	AVSUMB
-	AVSUMH
-	AVTM
-	AVUPH
-	AVUPHB
-	AVUPHH
-	AVUPHF
-	AVUPLH
-	AVUPLHB
-	AVUPLHH
-	AVUPLHF
-	AVUPLL
-	AVUPLLB
-	AVUPLLH
-	AVUPLLF
-	AVUPL
-	AVUPLB
-	AVUPLHW
-	AVUPLF
-
-	// binary
-	ABYTE
-	AWORD
-	ADWORD
-
-	// end marker
-	ALAST
-
-	// aliases
-	ABR = obj.AJMP
-	ABL = obj.ACALL
-)
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/s390x/anames.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/s390x/anames.go
deleted file mode 100644
index 1ddaad3..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/s390x/anames.go
+++ /dev/null
@@ -1,678 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/s390x/anames.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/s390x/anames.go:1
-// Generated by stringer -i a.out.go -o anames.go -p s390x
-// Do not edit.
-
-package s390x
-
-import "bootstrap/cmd/internal/obj"
-
-var Anames = []string{
-	obj.A_ARCHSPECIFIC: "ADD",
-	"ADDC",
-	"ADDE",
-	"ADDW",
-	"DIVW",
-	"DIVWU",
-	"DIVD",
-	"DIVDU",
-	"MODW",
-	"MODWU",
-	"MODD",
-	"MODDU",
-	"MULLW",
-	"MULLD",
-	"MULHD",
-	"MULHDU",
-	"SUB",
-	"SUBC",
-	"SUBV",
-	"SUBE",
-	"SUBW",
-	"NEG",
-	"NEGW",
-	"MOVWBR",
-	"MOVB",
-	"MOVBZ",
-	"MOVH",
-	"MOVHBR",
-	"MOVHZ",
-	"MOVW",
-	"MOVWZ",
-	"MOVD",
-	"MOVDBR",
-	"MOVDEQ",
-	"MOVDGE",
-	"MOVDGT",
-	"MOVDLE",
-	"MOVDLT",
-	"MOVDNE",
-	"FLOGR",
-	"AND",
-	"ANDW",
-	"OR",
-	"ORW",
-	"XOR",
-	"XORW",
-	"SLW",
-	"SLD",
-	"SRW",
-	"SRAW",
-	"SRD",
-	"SRAD",
-	"RLL",
-	"RLLG",
-	"FABS",
-	"FADD",
-	"FADDS",
-	"FCMPO",
-	"FCMPU",
-	"CEBR",
-	"FDIV",
-	"FDIVS",
-	"FMADD",
-	"FMADDS",
-	"FMOVD",
-	"FMOVS",
-	"FMSUB",
-	"FMSUBS",
-	"FMUL",
-	"FMULS",
-	"FNABS",
-	"FNEG",
-	"FNEGS",
-	"FNMADD",
-	"FNMADDS",
-	"FNMSUB",
-	"FNMSUBS",
-	"LEDBR",
-	"LDEBR",
-	"FSUB",
-	"FSUBS",
-	"FSQRT",
-	"FSQRTS",
-	"FIEBR",
-	"FIDBR",
-	"CEFBRA",
-	"CDFBRA",
-	"CEGBRA",
-	"CDGBRA",
-	"CFEBRA",
-	"CFDBRA",
-	"CGEBRA",
-	"CGDBRA",
-	"CELFBR",
-	"CDLFBR",
-	"CELGBR",
-	"CDLGBR",
-	"CLFEBR",
-	"CLFDBR",
-	"CLGEBR",
-	"CLGDBR",
-	"CMP",
-	"CMPU",
-	"CMPW",
-	"CMPWU",
-	"CS",
-	"CSG",
-	"SYNC",
-	"BC",
-	"BCL",
-	"BEQ",
-	"BGE",
-	"BGT",
-	"BLE",
-	"BLT",
-	"BLEU",
-	"BLTU",
-	"BNE",
-	"BVC",
-	"BVS",
-	"SYSCALL",
-	"CMPBEQ",
-	"CMPBGE",
-	"CMPBGT",
-	"CMPBLE",
-	"CMPBLT",
-	"CMPBNE",
-	"CMPUBEQ",
-	"CMPUBGE",
-	"CMPUBGT",
-	"CMPUBLE",
-	"CMPUBLT",
-	"CMPUBNE",
-	"MVC",
-	"CLC",
-	"XC",
-	"OC",
-	"NC",
-	"EXRL",
-	"LARL",
-	"LA",
-	"LAY",
-	"LAA",
-	"LAAG",
-	"LAAL",
-	"LAALG",
-	"LAN",
-	"LANG",
-	"LAX",
-	"LAXG",
-	"LAO",
-	"LAOG",
-	"LMY",
-	"LMG",
-	"STMY",
-	"STMG",
-	"STCK",
-	"STCKC",
-	"STCKE",
-	"STCKF",
-	"CLEAR",
-	"VA",
-	"VAB",
-	"VAH",
-	"VAF",
-	"VAG",
-	"VAQ",
-	"VACC",
-	"VACCB",
-	"VACCH",
-	"VACCF",
-	"VACCG",
-	"VACCQ",
-	"VAC",
-	"VACQ",
-	"VACCC",
-	"VACCCQ",
-	"VN",
-	"VNC",
-	"VAVG",
-	"VAVGB",
-	"VAVGH",
-	"VAVGF",
-	"VAVGG",
-	"VAVGL",
-	"VAVGLB",
-	"VAVGLH",
-	"VAVGLF",
-	"VAVGLG",
-	"VCKSM",
-	"VCEQ",
-	"VCEQB",
-	"VCEQH",
-	"VCEQF",
-	"VCEQG",
-	"VCEQBS",
-	"VCEQHS",
-	"VCEQFS",
-	"VCEQGS",
-	"VCH",
-	"VCHB",
-	"VCHH",
-	"VCHF",
-	"VCHG",
-	"VCHBS",
-	"VCHHS",
-	"VCHFS",
-	"VCHGS",
-	"VCHL",
-	"VCHLB",
-	"VCHLH",
-	"VCHLF",
-	"VCHLG",
-	"VCHLBS",
-	"VCHLHS",
-	"VCHLFS",
-	"VCHLGS",
-	"VCLZ",
-	"VCLZB",
-	"VCLZH",
-	"VCLZF",
-	"VCLZG",
-	"VCTZ",
-	"VCTZB",
-	"VCTZH",
-	"VCTZF",
-	"VCTZG",
-	"VEC",
-	"VECB",
-	"VECH",
-	"VECF",
-	"VECG",
-	"VECL",
-	"VECLB",
-	"VECLH",
-	"VECLF",
-	"VECLG",
-	"VERIM",
-	"VERIMB",
-	"VERIMH",
-	"VERIMF",
-	"VERIMG",
-	"VERLL",
-	"VERLLB",
-	"VERLLH",
-	"VERLLF",
-	"VERLLG",
-	"VERLLV",
-	"VERLLVB",
-	"VERLLVH",
-	"VERLLVF",
-	"VERLLVG",
-	"VESLV",
-	"VESLVB",
-	"VESLVH",
-	"VESLVF",
-	"VESLVG",
-	"VESL",
-	"VESLB",
-	"VESLH",
-	"VESLF",
-	"VESLG",
-	"VESRA",
-	"VESRAB",
-	"VESRAH",
-	"VESRAF",
-	"VESRAG",
-	"VESRAV",
-	"VESRAVB",
-	"VESRAVH",
-	"VESRAVF",
-	"VESRAVG",
-	"VESRL",
-	"VESRLB",
-	"VESRLH",
-	"VESRLF",
-	"VESRLG",
-	"VESRLV",
-	"VESRLVB",
-	"VESRLVH",
-	"VESRLVF",
-	"VESRLVG",
-	"VX",
-	"VFAE",
-	"VFAEB",
-	"VFAEH",
-	"VFAEF",
-	"VFAEBS",
-	"VFAEHS",
-	"VFAEFS",
-	"VFAEZB",
-	"VFAEZH",
-	"VFAEZF",
-	"VFAEZBS",
-	"VFAEZHS",
-	"VFAEZFS",
-	"VFEE",
-	"VFEEB",
-	"VFEEH",
-	"VFEEF",
-	"VFEEBS",
-	"VFEEHS",
-	"VFEEFS",
-	"VFEEZB",
-	"VFEEZH",
-	"VFEEZF",
-	"VFEEZBS",
-	"VFEEZHS",
-	"VFEEZFS",
-	"VFENE",
-	"VFENEB",
-	"VFENEH",
-	"VFENEF",
-	"VFENEBS",
-	"VFENEHS",
-	"VFENEFS",
-	"VFENEZB",
-	"VFENEZH",
-	"VFENEZF",
-	"VFENEZBS",
-	"VFENEZHS",
-	"VFENEZFS",
-	"VFA",
-	"VFADB",
-	"WFADB",
-	"WFK",
-	"WFKDB",
-	"VFCE",
-	"VFCEDB",
-	"VFCEDBS",
-	"WFCEDB",
-	"WFCEDBS",
-	"VFCH",
-	"VFCHDB",
-	"VFCHDBS",
-	"WFCHDB",
-	"WFCHDBS",
-	"VFCHE",
-	"VFCHEDB",
-	"VFCHEDBS",
-	"WFCHEDB",
-	"WFCHEDBS",
-	"WFC",
-	"WFCDB",
-	"VCDG",
-	"VCDGB",
-	"WCDGB",
-	"VCDLG",
-	"VCDLGB",
-	"WCDLGB",
-	"VCGD",
-	"VCGDB",
-	"WCGDB",
-	"VCLGD",
-	"VCLGDB",
-	"WCLGDB",
-	"VFD",
-	"VFDDB",
-	"WFDDB",
-	"VLDE",
-	"VLDEB",
-	"WLDEB",
-	"VLED",
-	"VLEDB",
-	"WLEDB",
-	"VFM",
-	"VFMDB",
-	"WFMDB",
-	"VFMA",
-	"VFMADB",
-	"WFMADB",
-	"VFMS",
-	"VFMSDB",
-	"WFMSDB",
-	"VFPSO",
-	"VFPSODB",
-	"WFPSODB",
-	"VFLCDB",
-	"WFLCDB",
-	"VFLNDB",
-	"WFLNDB",
-	"VFLPDB",
-	"WFLPDB",
-	"VFSQ",
-	"VFSQDB",
-	"WFSQDB",
-	"VFS",
-	"VFSDB",
-	"WFSDB",
-	"VFTCI",
-	"VFTCIDB",
-	"WFTCIDB",
-	"VGFM",
-	"VGFMB",
-	"VGFMH",
-	"VGFMF",
-	"VGFMG",
-	"VGFMA",
-	"VGFMAB",
-	"VGFMAH",
-	"VGFMAF",
-	"VGFMAG",
-	"VGEF",
-	"VGEG",
-	"VGBM",
-	"VZERO",
-	"VONE",
-	"VGM",
-	"VGMB",
-	"VGMH",
-	"VGMF",
-	"VGMG",
-	"VISTR",
-	"VISTRB",
-	"VISTRH",
-	"VISTRF",
-	"VISTRBS",
-	"VISTRHS",
-	"VISTRFS",
-	"VL",
-	"VLR",
-	"VLREP",
-	"VLREPB",
-	"VLREPH",
-	"VLREPF",
-	"VLREPG",
-	"VLC",
-	"VLCB",
-	"VLCH",
-	"VLCF",
-	"VLCG",
-	"VLEH",
-	"VLEF",
-	"VLEG",
-	"VLEB",
-	"VLEIH",
-	"VLEIF",
-	"VLEIG",
-	"VLEIB",
-	"VFI",
-	"VFIDB",
-	"WFIDB",
-	"VLGV",
-	"VLGVB",
-	"VLGVH",
-	"VLGVF",
-	"VLGVG",
-	"VLLEZ",
-	"VLLEZB",
-	"VLLEZH",
-	"VLLEZF",
-	"VLLEZG",
-	"VLM",
-	"VLP",
-	"VLPB",
-	"VLPH",
-	"VLPF",
-	"VLPG",
-	"VLBB",
-	"VLVG",
-	"VLVGB",
-	"VLVGH",
-	"VLVGF",
-	"VLVGG",
-	"VLVGP",
-	"VLL",
-	"VMX",
-	"VMXB",
-	"VMXH",
-	"VMXF",
-	"VMXG",
-	"VMXL",
-	"VMXLB",
-	"VMXLH",
-	"VMXLF",
-	"VMXLG",
-	"VMRH",
-	"VMRHB",
-	"VMRHH",
-	"VMRHF",
-	"VMRHG",
-	"VMRL",
-	"VMRLB",
-	"VMRLH",
-	"VMRLF",
-	"VMRLG",
-	"VMN",
-	"VMNB",
-	"VMNH",
-	"VMNF",
-	"VMNG",
-	"VMNL",
-	"VMNLB",
-	"VMNLH",
-	"VMNLF",
-	"VMNLG",
-	"VMAE",
-	"VMAEB",
-	"VMAEH",
-	"VMAEF",
-	"VMAH",
-	"VMAHB",
-	"VMAHH",
-	"VMAHF",
-	"VMALE",
-	"VMALEB",
-	"VMALEH",
-	"VMALEF",
-	"VMALH",
-	"VMALHB",
-	"VMALHH",
-	"VMALHF",
-	"VMALO",
-	"VMALOB",
-	"VMALOH",
-	"VMALOF",
-	"VMAL",
-	"VMALB",
-	"VMALHW",
-	"VMALF",
-	"VMAO",
-	"VMAOB",
-	"VMAOH",
-	"VMAOF",
-	"VME",
-	"VMEB",
-	"VMEH",
-	"VMEF",
-	"VMH",
-	"VMHB",
-	"VMHH",
-	"VMHF",
-	"VMLE",
-	"VMLEB",
-	"VMLEH",
-	"VMLEF",
-	"VMLH",
-	"VMLHB",
-	"VMLHH",
-	"VMLHF",
-	"VMLO",
-	"VMLOB",
-	"VMLOH",
-	"VMLOF",
-	"VML",
-	"VMLB",
-	"VMLHW",
-	"VMLF",
-	"VMO",
-	"VMOB",
-	"VMOH",
-	"VMOF",
-	"VNO",
-	"VNOT",
-	"VO",
-	"VPK",
-	"VPKH",
-	"VPKF",
-	"VPKG",
-	"VPKLS",
-	"VPKLSH",
-	"VPKLSF",
-	"VPKLSG",
-	"VPKLSHS",
-	"VPKLSFS",
-	"VPKLSGS",
-	"VPKS",
-	"VPKSH",
-	"VPKSF",
-	"VPKSG",
-	"VPKSHS",
-	"VPKSFS",
-	"VPKSGS",
-	"VPERM",
-	"VPDI",
-	"VPOPCT",
-	"VREP",
-	"VREPB",
-	"VREPH",
-	"VREPF",
-	"VREPG",
-	"VREPI",
-	"VREPIB",
-	"VREPIH",
-	"VREPIF",
-	"VREPIG",
-	"VSCEF",
-	"VSCEG",
-	"VSEL",
-	"VSL",
-	"VSLB",
-	"VSLDB",
-	"VSRA",
-	"VSRAB",
-	"VSRL",
-	"VSRLB",
-	"VSEG",
-	"VSEGB",
-	"VSEGH",
-	"VSEGF",
-	"VST",
-	"VSTEH",
-	"VSTEF",
-	"VSTEG",
-	"VSTEB",
-	"VSTM",
-	"VSTL",
-	"VSTRC",
-	"VSTRCB",
-	"VSTRCH",
-	"VSTRCF",
-	"VSTRCBS",
-	"VSTRCHS",
-	"VSTRCFS",
-	"VSTRCZB",
-	"VSTRCZH",
-	"VSTRCZF",
-	"VSTRCZBS",
-	"VSTRCZHS",
-	"VSTRCZFS",
-	"VS",
-	"VSB",
-	"VSH",
-	"VSF",
-	"VSG",
-	"VSQ",
-	"VSCBI",
-	"VSCBIB",
-	"VSCBIH",
-	"VSCBIF",
-	"VSCBIG",
-	"VSCBIQ",
-	"VSBCBI",
-	"VSBCBIQ",
-	"VSBI",
-	"VSBIQ",
-	"VSUMG",
-	"VSUMGH",
-	"VSUMGF",
-	"VSUMQ",
-	"VSUMQF",
-	"VSUMQG",
-	"VSUM",
-	"VSUMB",
-	"VSUMH",
-	"VTM",
-	"VUPH",
-	"VUPHB",
-	"VUPHH",
-	"VUPHF",
-	"VUPLH",
-	"VUPLHB",
-	"VUPLHH",
-	"VUPLHF",
-	"VUPLL",
-	"VUPLLB",
-	"VUPLLH",
-	"VUPLLF",
-	"VUPL",
-	"VUPLB",
-	"VUPLHW",
-	"VUPLF",
-	"BYTE",
-	"WORD",
-	"DWORD",
-	"LAST",
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/s390x/anamesz.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/s390x/anamesz.go
deleted file mode 100644
index 1db2fda..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/s390x/anamesz.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/s390x/anamesz.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/s390x/anamesz.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package s390x
-
-var cnamesz = []string{
-	"NONE",
-	"REG",
-	"FREG",
-	"VREG",
-	"AREG",
-	"ZCON",
-	"SCON",
-	"UCON",
-	"ADDCON",
-	"ANDCON",
-	"LCON",
-	"DCON",
-	"SACON",
-	"LACON",
-	"DACON",
-	"SBRA",
-	"LBRA",
-	"SAUTO",
-	"LAUTO",
-	"ZOREG",
-	"SOREG",
-	"LOREG",
-	"TLS_LE",
-	"TLS_IE",
-	"GOK",
-	"ADDR",
-	"SYMADDR",
-	"GOTADDR",
-	"TEXTSIZE",
-	"ANY",
-	"NCLASS",
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/s390x/asmz.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/s390x/asmz.go
deleted file mode 100644
index c7b284b..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/s390x/asmz.go
+++ /dev/null
@@ -1,4768 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/s390x/asmz.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/s390x/asmz.go:1
-// Based on cmd/internal/obj/ppc64/asm9.go.
-//
-//    Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//    Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//    Portions Copyright © 1997-1999 Vita Nuova Limited
-//    Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
-//    Portions Copyright © 2004,2006 Bruce Ellis
-//    Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//    Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
-//    Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package s390x
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"log"
-	"math"
-	"sort"
-)
-
-// instruction layout.
-const (
-	funcAlign = 16
-)
-
-type Optab struct {
-	as    obj.As // opcode
-	a1    uint8  // From
-	a2    uint8  // Reg
-	a3    uint8  // From3
-	a4    uint8  // To
-	type_ int8
-	param int16 // REGSP for auto variables
-}
-
-var optab = []Optab{
-	// instruction,  From,   Reg,    From3,  To, type, param
-	Optab{obj.ATEXT, C_ADDR, C_NONE, C_NONE, C_TEXTSIZE, 0, 0},
-	Optab{obj.ATEXT, C_ADDR, C_NONE, C_LCON, C_TEXTSIZE, 0, 0},
-
-	// move register
-	Optab{AMOVD, C_REG, C_NONE, C_NONE, C_REG, 1, 0},
-	Optab{AMOVB, C_REG, C_NONE, C_NONE, C_REG, 1, 0},
-	Optab{AMOVBZ, C_REG, C_NONE, C_NONE, C_REG, 1, 0},
-	Optab{AMOVW, C_REG, C_NONE, C_NONE, C_REG, 1, 0},
-	Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_REG, 1, 0},
-	Optab{AFMOVD, C_FREG, C_NONE, C_NONE, C_FREG, 1, 0},
-	Optab{AMOVDBR, C_REG, C_NONE, C_NONE, C_REG, 1, 0},
-
-	// load constant
-	Optab{AMOVD, C_LACON, C_NONE, C_NONE, C_REG, 26, REGSP},
-	Optab{AMOVW, C_LACON, C_NONE, C_NONE, C_REG, 26, REGSP},
-	Optab{AMOVWZ, C_LACON, C_NONE, C_NONE, C_REG, 26, REGSP},
-	Optab{AMOVD, C_DCON, C_NONE, C_NONE, C_REG, 3, 0},
-	Optab{AMOVW, C_DCON, C_NONE, C_NONE, C_REG, 3, 0},
-	Optab{AMOVWZ, C_DCON, C_NONE, C_NONE, C_REG, 3, 0},
-	Optab{AMOVB, C_DCON, C_NONE, C_NONE, C_REG, 3, 0},
-	Optab{AMOVBZ, C_DCON, C_NONE, C_NONE, C_REG, 3, 0},
-
-	// store constant
-	Optab{AMOVD, C_LCON, C_NONE, C_NONE, C_ADDR, 73, 0},
-	Optab{AMOVW, C_LCON, C_NONE, C_NONE, C_ADDR, 73, 0},
-	Optab{AMOVWZ, C_LCON, C_NONE, C_NONE, C_ADDR, 73, 0},
-	Optab{AMOVBZ, C_LCON, C_NONE, C_NONE, C_ADDR, 73, 0},
-	Optab{AMOVB, C_LCON, C_NONE, C_NONE, C_ADDR, 73, 0},
-	Optab{AMOVD, C_LCON, C_NONE, C_NONE, C_LAUTO, 72, REGSP},
-	Optab{AMOVW, C_LCON, C_NONE, C_NONE, C_LAUTO, 72, REGSP},
-	Optab{AMOVWZ, C_LCON, C_NONE, C_NONE, C_LAUTO, 72, REGSP},
-	Optab{AMOVB, C_LCON, C_NONE, C_NONE, C_LAUTO, 72, REGSP},
-	Optab{AMOVBZ, C_LCON, C_NONE, C_NONE, C_LAUTO, 72, REGSP},
-	Optab{AMOVD, C_LCON, C_NONE, C_NONE, C_LOREG, 72, 0},
-	Optab{AMOVW, C_LCON, C_NONE, C_NONE, C_LOREG, 72, 0},
-	Optab{AMOVWZ, C_LCON, C_NONE, C_NONE, C_LOREG, 72, 0},
-	Optab{AMOVB, C_LCON, C_NONE, C_NONE, C_LOREG, 72, 0},
-	Optab{AMOVBZ, C_LCON, C_NONE, C_NONE, C_LOREG, 72, 0},
-
-	// store
-	Optab{AMOVD, C_REG, C_NONE, C_NONE, C_LAUTO, 35, REGSP},
-	Optab{AMOVW, C_REG, C_NONE, C_NONE, C_LAUTO, 35, REGSP},
-	Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_LAUTO, 35, REGSP},
-	Optab{AMOVBZ, C_REG, C_NONE, C_NONE, C_LAUTO, 35, REGSP},
-	Optab{AMOVB, C_REG, C_NONE, C_NONE, C_LAUTO, 35, REGSP},
-	Optab{AMOVDBR, C_REG, C_NONE, C_NONE, C_LAUTO, 35, REGSP},
-	Optab{AMOVHBR, C_REG, C_NONE, C_NONE, C_LAUTO, 35, REGSP},
-	Optab{AMOVD, C_REG, C_NONE, C_NONE, C_LOREG, 35, 0},
-	Optab{AMOVW, C_REG, C_NONE, C_NONE, C_LOREG, 35, 0},
-	Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_LOREG, 35, 0},
-	Optab{AMOVBZ, C_REG, C_NONE, C_NONE, C_LOREG, 35, 0},
-	Optab{AMOVB, C_REG, C_NONE, C_NONE, C_LOREG, 35, 0},
-	Optab{AMOVDBR, C_REG, C_NONE, C_NONE, C_LOREG, 35, 0},
-	Optab{AMOVHBR, C_REG, C_NONE, C_NONE, C_LOREG, 35, 0},
-	Optab{AMOVD, C_REG, C_NONE, C_NONE, C_ADDR, 74, 0},
-	Optab{AMOVW, C_REG, C_NONE, C_NONE, C_ADDR, 74, 0},
-	Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_ADDR, 74, 0},
-	Optab{AMOVBZ, C_REG, C_NONE, C_NONE, C_ADDR, 74, 0},
-	Optab{AMOVB, C_REG, C_NONE, C_NONE, C_ADDR, 74, 0},
-
-	// load
-	Optab{AMOVD, C_LAUTO, C_NONE, C_NONE, C_REG, 36, REGSP},
-	Optab{AMOVW, C_LAUTO, C_NONE, C_NONE, C_REG, 36, REGSP},
-	Optab{AMOVWZ, C_LAUTO, C_NONE, C_NONE, C_REG, 36, REGSP},
-	Optab{AMOVBZ, C_LAUTO, C_NONE, C_NONE, C_REG, 36, REGSP},
-	Optab{AMOVB, C_LAUTO, C_NONE, C_NONE, C_REG, 36, REGSP},
-	Optab{AMOVDBR, C_LAUTO, C_NONE, C_NONE, C_REG, 36, REGSP},
-	Optab{AMOVHBR, C_LAUTO, C_NONE, C_NONE, C_REG, 36, REGSP},
-	Optab{AMOVD, C_LOREG, C_NONE, C_NONE, C_REG, 36, 0},
-	Optab{AMOVW, C_LOREG, C_NONE, C_NONE, C_REG, 36, 0},
-	Optab{AMOVWZ, C_LOREG, C_NONE, C_NONE, C_REG, 36, 0},
-	Optab{AMOVBZ, C_LOREG, C_NONE, C_NONE, C_REG, 36, 0},
-	Optab{AMOVB, C_LOREG, C_NONE, C_NONE, C_REG, 36, 0},
-	Optab{AMOVDBR, C_LOREG, C_NONE, C_NONE, C_REG, 36, 0},
-	Optab{AMOVHBR, C_LOREG, C_NONE, C_NONE, C_REG, 36, 0},
-	Optab{AMOVD, C_ADDR, C_NONE, C_NONE, C_REG, 75, 0},
-	Optab{AMOVW, C_ADDR, C_NONE, C_NONE, C_REG, 75, 0},
-	Optab{AMOVWZ, C_ADDR, C_NONE, C_NONE, C_REG, 75, 0},
-	Optab{AMOVBZ, C_ADDR, C_NONE, C_NONE, C_REG, 75, 0},
-	Optab{AMOVB, C_ADDR, C_NONE, C_NONE, C_REG, 75, 0},
-
-	// interlocked load and op
-	Optab{ALAAG, C_REG, C_REG, C_NONE, C_LOREG, 99, 0},
-
-	// integer arithmetic
-	Optab{AADD, C_REG, C_REG, C_NONE, C_REG, 2, 0},
-	Optab{AADD, C_REG, C_NONE, C_NONE, C_REG, 2, 0},
-	Optab{AADD, C_LCON, C_REG, C_NONE, C_REG, 22, 0},
-	Optab{AADD, C_LCON, C_NONE, C_NONE, C_REG, 22, 0},
-	Optab{AADD, C_LOREG, C_NONE, C_NONE, C_REG, 12, 0},
-	Optab{AADD, C_LAUTO, C_NONE, C_NONE, C_REG, 12, REGSP},
-	Optab{ASUB, C_LCON, C_REG, C_NONE, C_REG, 21, 0},
-	Optab{ASUB, C_LCON, C_NONE, C_NONE, C_REG, 21, 0},
-	Optab{ASUB, C_LOREG, C_NONE, C_NONE, C_REG, 12, 0},
-	Optab{ASUB, C_LAUTO, C_NONE, C_NONE, C_REG, 12, REGSP},
-	Optab{AMULHD, C_REG, C_NONE, C_NONE, C_REG, 4, 0},
-	Optab{AMULHD, C_REG, C_REG, C_NONE, C_REG, 4, 0},
-	Optab{ADIVW, C_REG, C_REG, C_NONE, C_REG, 2, 0},
-	Optab{ADIVW, C_REG, C_NONE, C_NONE, C_REG, 2, 0},
-	Optab{ASUB, C_REG, C_REG, C_NONE, C_REG, 10, 0},
-	Optab{ASUB, C_REG, C_NONE, C_NONE, C_REG, 10, 0},
-	Optab{ANEG, C_REG, C_NONE, C_NONE, C_REG, 47, 0},
-	Optab{ANEG, C_NONE, C_NONE, C_NONE, C_REG, 47, 0},
-
-	// integer logical
-	Optab{AAND, C_REG, C_REG, C_NONE, C_REG, 6, 0},
-	Optab{AAND, C_REG, C_NONE, C_NONE, C_REG, 6, 0},
-	Optab{AAND, C_LCON, C_NONE, C_NONE, C_REG, 23, 0},
-	Optab{AAND, C_LOREG, C_NONE, C_NONE, C_REG, 12, 0},
-	Optab{AAND, C_LAUTO, C_NONE, C_NONE, C_REG, 12, REGSP},
-	Optab{AANDW, C_REG, C_REG, C_NONE, C_REG, 6, 0},
-	Optab{AANDW, C_REG, C_NONE, C_NONE, C_REG, 6, 0},
-	Optab{AANDW, C_LCON, C_NONE, C_NONE, C_REG, 24, 0},
-	Optab{AANDW, C_LOREG, C_NONE, C_NONE, C_REG, 12, 0},
-	Optab{AANDW, C_LAUTO, C_NONE, C_NONE, C_REG, 12, REGSP},
-	Optab{ASLD, C_REG, C_NONE, C_NONE, C_REG, 7, 0},
-	Optab{ASLD, C_REG, C_REG, C_NONE, C_REG, 7, 0},
-	Optab{ASLD, C_SCON, C_REG, C_NONE, C_REG, 7, 0},
-	Optab{ASLD, C_SCON, C_NONE, C_NONE, C_REG, 7, 0},
-
-	// compare and swap
-	Optab{ACSG, C_REG, C_REG, C_NONE, C_SOREG, 79, 0},
-
-	// floating point
-	Optab{AFADD, C_FREG, C_NONE, C_NONE, C_FREG, 2, 0},
-	Optab{AFADD, C_FREG, C_FREG, C_NONE, C_FREG, 2, 0},
-	Optab{AFABS, C_FREG, C_NONE, C_NONE, C_FREG, 33, 0},
-	Optab{AFABS, C_NONE, C_NONE, C_NONE, C_FREG, 33, 0},
-	Optab{AFMADD, C_FREG, C_FREG, C_FREG, C_FREG, 34, 0},
-	Optab{AFMUL, C_FREG, C_NONE, C_NONE, C_FREG, 32, 0},
-	Optab{AFMUL, C_FREG, C_FREG, C_NONE, C_FREG, 32, 0},
-	Optab{AFMOVD, C_LAUTO, C_NONE, C_NONE, C_FREG, 36, REGSP},
-	Optab{AFMOVD, C_LOREG, C_NONE, C_NONE, C_FREG, 36, 0},
-	Optab{AFMOVD, C_ADDR, C_NONE, C_NONE, C_FREG, 75, 0},
-	Optab{AFMOVD, C_FREG, C_NONE, C_NONE, C_LAUTO, 35, REGSP},
-	Optab{AFMOVD, C_FREG, C_NONE, C_NONE, C_LOREG, 35, 0},
-	Optab{AFMOVD, C_FREG, C_NONE, C_NONE, C_ADDR, 74, 0},
-	Optab{AFMOVD, C_ZCON, C_NONE, C_NONE, C_FREG, 67, 0},
-	Optab{ACEFBRA, C_REG, C_NONE, C_NONE, C_FREG, 82, 0},
-	Optab{ACFEBRA, C_FREG, C_NONE, C_NONE, C_REG, 83, 0},
-	Optab{AFIEBR, C_SCON, C_FREG, C_NONE, C_FREG, 48, 0},
-
-	// load symbol address (plus offset)
-	Optab{AMOVD, C_SYMADDR, C_NONE, C_NONE, C_REG, 19, 0},
-	Optab{AMOVD, C_GOTADDR, C_NONE, C_NONE, C_REG, 93, 0},
-	Optab{AMOVD, C_TLS_LE, C_NONE, C_NONE, C_REG, 94, 0},
-	Optab{AMOVD, C_TLS_IE, C_NONE, C_NONE, C_REG, 95, 0},
-
-	// system call
-	Optab{ASYSCALL, C_NONE, C_NONE, C_NONE, C_NONE, 5, 0},
-	Optab{ASYSCALL, C_SCON, C_NONE, C_NONE, C_NONE, 77, 0},
-
-	// branch
-	Optab{ABEQ, C_NONE, C_NONE, C_NONE, C_SBRA, 16, 0},
-	Optab{ABR, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 0},
-	Optab{ABC, C_SCON, C_REG, C_NONE, C_LBRA, 16, 0},
-	Optab{ABR, C_NONE, C_NONE, C_NONE, C_REG, 18, 0},
-	Optab{ABR, C_REG, C_NONE, C_NONE, C_REG, 18, 0},
-	Optab{ABR, C_NONE, C_NONE, C_NONE, C_ZOREG, 15, 0},
-	Optab{ABC, C_NONE, C_NONE, C_NONE, C_ZOREG, 15, 0},
-	Optab{ACMPBEQ, C_REG, C_REG, C_NONE, C_SBRA, 89, 0},
-	Optab{ACMPBEQ, C_REG, C_NONE, C_ADDCON, C_SBRA, 90, 0},
-	Optab{ACMPBEQ, C_REG, C_NONE, C_SCON, C_SBRA, 90, 0},
-	Optab{ACMPUBEQ, C_REG, C_REG, C_NONE, C_SBRA, 89, 0},
-	Optab{ACMPUBEQ, C_REG, C_NONE, C_ANDCON, C_SBRA, 90, 0},
-
-	// move on condition
-	Optab{AMOVDEQ, C_REG, C_NONE, C_NONE, C_REG, 17, 0},
-
-	// find leftmost one
-	Optab{AFLOGR, C_REG, C_NONE, C_NONE, C_REG, 8, 0},
-
-	// compare
-	Optab{ACMP, C_REG, C_NONE, C_NONE, C_REG, 70, 0},
-	Optab{ACMP, C_REG, C_NONE, C_NONE, C_LCON, 71, 0},
-	Optab{ACMPU, C_REG, C_NONE, C_NONE, C_REG, 70, 0},
-	Optab{ACMPU, C_REG, C_NONE, C_NONE, C_LCON, 71, 0},
-	Optab{AFCMPO, C_FREG, C_NONE, C_NONE, C_FREG, 70, 0},
-	Optab{AFCMPO, C_FREG, C_REG, C_NONE, C_FREG, 70, 0},
-
-	// 32-bit access registers
-	Optab{AMOVW, C_AREG, C_NONE, C_NONE, C_REG, 68, 0},
-	Optab{AMOVWZ, C_AREG, C_NONE, C_NONE, C_REG, 68, 0},
-	Optab{AMOVW, C_REG, C_NONE, C_NONE, C_AREG, 69, 0},
-	Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_AREG, 69, 0},
-
-	// macros
-	Optab{ACLEAR, C_LCON, C_NONE, C_NONE, C_LOREG, 96, 0},
-	Optab{ACLEAR, C_LCON, C_NONE, C_NONE, C_LAUTO, 96, REGSP},
-
-	// load/store multiple
-	Optab{ASTMG, C_REG, C_REG, C_NONE, C_LOREG, 97, 0},
-	Optab{ASTMG, C_REG, C_REG, C_NONE, C_LAUTO, 97, REGSP},
-	Optab{ALMG, C_LOREG, C_REG, C_NONE, C_REG, 98, 0},
-	Optab{ALMG, C_LAUTO, C_REG, C_NONE, C_REG, 98, REGSP},
-
-	// bytes
-	Optab{ABYTE, C_SCON, C_NONE, C_NONE, C_NONE, 40, 0},
-	Optab{AWORD, C_LCON, C_NONE, C_NONE, C_NONE, 40, 0},
-	Optab{ADWORD, C_LCON, C_NONE, C_NONE, C_NONE, 31, 0},
-	Optab{ADWORD, C_DCON, C_NONE, C_NONE, C_NONE, 31, 0},
-
-	// fast synchronization
-	Optab{ASYNC, C_NONE, C_NONE, C_NONE, C_NONE, 81, 0},
-
-	// store clock
-	Optab{ASTCK, C_NONE, C_NONE, C_NONE, C_SAUTO, 88, REGSP},
-	Optab{ASTCK, C_NONE, C_NONE, C_NONE, C_SOREG, 88, 0},
-
-	// storage and storage
-	Optab{AMVC, C_LOREG, C_NONE, C_SCON, C_LOREG, 84, 0},
-	Optab{AMVC, C_LOREG, C_NONE, C_SCON, C_LAUTO, 84, REGSP},
-	Optab{AMVC, C_LAUTO, C_NONE, C_SCON, C_LAUTO, 84, REGSP},
-
-	// address
-	Optab{ALARL, C_LCON, C_NONE, C_NONE, C_REG, 85, 0},
-	Optab{ALARL, C_SYMADDR, C_NONE, C_NONE, C_REG, 85, 0},
-	Optab{ALA, C_SOREG, C_NONE, C_NONE, C_REG, 86, 0},
-	Optab{ALA, C_SAUTO, C_NONE, C_NONE, C_REG, 86, REGSP},
-	Optab{AEXRL, C_SYMADDR, C_NONE, C_NONE, C_REG, 87, 0},
-
-	// misc
-	Optab{obj.AUNDEF, C_NONE, C_NONE, C_NONE, C_NONE, 78, 0},
-	Optab{obj.APCDATA, C_LCON, C_NONE, C_NONE, C_LCON, 0, 0},
-	Optab{obj.AFUNCDATA, C_SCON, C_NONE, C_NONE, C_ADDR, 0, 0},
-	Optab{obj.ANOP, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0},
-	Optab{obj.ANOP, C_SAUTO, C_NONE, C_NONE, C_NONE, 0, 0},
-
-	// vector instructions
-
-	// VRX store
-	Optab{AVST, C_VREG, C_NONE, C_NONE, C_SOREG, 100, 0},
-	Optab{AVST, C_VREG, C_NONE, C_NONE, C_SAUTO, 100, REGSP},
-	Optab{AVSTEG, C_VREG, C_NONE, C_SCON, C_SOREG, 100, 0},
-	Optab{AVSTEG, C_VREG, C_NONE, C_SCON, C_SAUTO, 100, REGSP},
-
-	// VRX load
-	Optab{AVL, C_SOREG, C_NONE, C_NONE, C_VREG, 101, 0},
-	Optab{AVL, C_SAUTO, C_NONE, C_NONE, C_VREG, 101, REGSP},
-	Optab{AVLEG, C_SOREG, C_NONE, C_SCON, C_VREG, 101, 0},
-	Optab{AVLEG, C_SAUTO, C_NONE, C_SCON, C_VREG, 101, REGSP},
-
-	// VRV scatter
-	Optab{AVSCEG, C_VREG, C_NONE, C_SCON, C_SOREG, 102, 0},
-	Optab{AVSCEG, C_VREG, C_NONE, C_SCON, C_SAUTO, 102, REGSP},
-
-	// VRV gather
-	Optab{AVGEG, C_SOREG, C_NONE, C_SCON, C_VREG, 103, 0},
-	Optab{AVGEG, C_SAUTO, C_NONE, C_SCON, C_VREG, 103, REGSP},
-
-	// VRS element shift/rotate and load gr to/from vr element
-	Optab{AVESLG, C_SCON, C_VREG, C_NONE, C_VREG, 104, 0},
-	Optab{AVESLG, C_REG, C_VREG, C_NONE, C_VREG, 104, 0},
-	Optab{AVESLG, C_SCON, C_NONE, C_NONE, C_VREG, 104, 0},
-	Optab{AVESLG, C_REG, C_NONE, C_NONE, C_VREG, 104, 0},
-	Optab{AVLGVG, C_SCON, C_VREG, C_NONE, C_REG, 104, 0},
-	Optab{AVLGVG, C_REG, C_VREG, C_NONE, C_REG, 104, 0},
-	Optab{AVLVGG, C_SCON, C_REG, C_NONE, C_VREG, 104, 0},
-	Optab{AVLVGG, C_REG, C_REG, C_NONE, C_VREG, 104, 0},
-
-	// VRS store multiple
-	Optab{AVSTM, C_VREG, C_VREG, C_NONE, C_SOREG, 105, 0},
-	Optab{AVSTM, C_VREG, C_VREG, C_NONE, C_SAUTO, 105, REGSP},
-
-	// VRS load multiple
-	Optab{AVLM, C_SOREG, C_VREG, C_NONE, C_VREG, 106, 0},
-	Optab{AVLM, C_SAUTO, C_VREG, C_NONE, C_VREG, 106, REGSP},
-
-	// VRS store with length
-	Optab{AVSTL, C_VREG, C_NONE, C_REG, C_SOREG, 107, 0},
-	Optab{AVSTL, C_VREG, C_NONE, C_REG, C_SAUTO, 107, REGSP},
-
-	// VRS load with length
-	Optab{AVLL, C_SOREG, C_NONE, C_REG, C_VREG, 108, 0},
-	Optab{AVLL, C_SAUTO, C_NONE, C_REG, C_VREG, 108, REGSP},
-
-	// VRI-a
-	Optab{AVGBM, C_ANDCON, C_NONE, C_NONE, C_VREG, 109, 0},
-	Optab{AVZERO, C_NONE, C_NONE, C_NONE, C_VREG, 109, 0},
-	Optab{AVREPIG, C_ADDCON, C_NONE, C_NONE, C_VREG, 109, 0},
-	Optab{AVREPIG, C_SCON, C_NONE, C_NONE, C_VREG, 109, 0},
-	Optab{AVLEIG, C_ADDCON, C_NONE, C_SCON, C_VREG, 109, 0},
-	Optab{AVLEIG, C_SCON, C_NONE, C_SCON, C_VREG, 109, 0},
-
-	// VRI-b generate mask
-	Optab{AVGMG, C_SCON, C_NONE, C_SCON, C_VREG, 110, 0},
-
-	// VRI-c replicate
-	Optab{AVREPG, C_UCON, C_VREG, C_NONE, C_VREG, 111, 0},
-
-	// VRI-d element rotate and insert under mask and
-	// shift left double by byte
-	Optab{AVERIMG, C_VREG, C_VREG, C_SCON, C_VREG, 112, 0},
-	Optab{AVSLDB, C_VREG, C_VREG, C_SCON, C_VREG, 112, 0},
-
-	// VRI-d fp test data class immediate
-	Optab{AVFTCIDB, C_SCON, C_VREG, C_NONE, C_VREG, 113, 0},
-
-	// VRR-a load reg
-	Optab{AVLR, C_VREG, C_NONE, C_NONE, C_VREG, 114, 0},
-
-	// VRR-a compare
-	Optab{AVECG, C_VREG, C_NONE, C_NONE, C_VREG, 115, 0},
-
-	// VRR-b
-	Optab{AVCEQG, C_VREG, C_VREG, C_NONE, C_VREG, 117, 0},
-	Optab{AVFAEF, C_VREG, C_VREG, C_NONE, C_VREG, 117, 0},
-	Optab{AVPKSG, C_VREG, C_VREG, C_NONE, C_VREG, 117, 0},
-
-	// VRR-c
-	Optab{AVAQ, C_VREG, C_VREG, C_NONE, C_VREG, 118, 0},
-	Optab{AVAQ, C_VREG, C_NONE, C_NONE, C_VREG, 118, 0},
-	Optab{AVNOT, C_VREG, C_NONE, C_NONE, C_VREG, 118, 0},
-	Optab{AVPDI, C_VREG, C_VREG, C_SCON, C_VREG, 123, 0},
-
-	// VRR-c shifts
-	Optab{AVERLLVG, C_VREG, C_VREG, C_NONE, C_VREG, 119, 0},
-	Optab{AVERLLVG, C_VREG, C_NONE, C_NONE, C_VREG, 119, 0},
-
-	// VRR-d
-	//             2       3       1       4
-	Optab{AVACQ, C_VREG, C_VREG, C_VREG, C_VREG, 120, 0},
-
-	// VRR-e
-	Optab{AVSEL, C_VREG, C_VREG, C_VREG, C_VREG, 121, 0},
-
-	// VRR-f
-	Optab{AVLVGP, C_REG, C_REG, C_NONE, C_VREG, 122, 0},
-}
-
-var oprange [ALAST & obj.AMask][]Optab
-
-var xcmp [C_NCLASS][C_NCLASS]bool
-
-func spanz(ctxt *obj.Link, cursym *obj.LSym) {
-	p := cursym.Text
-	if p == nil || p.Link == nil { // handle external functions and ELF section symbols
-		return
-	}
-	ctxt.Cursym = cursym
-	ctxt.Autosize = int32(p.To.Offset)
-
-	if oprange[AORW&obj.AMask] == nil {
-		buildop(ctxt)
-	}
-
-	buffer := make([]byte, 0)
-	changed := true
-	loop := 0
-	for changed {
-		if loop > 10 {
-			ctxt.Diag("stuck in spanz loop")
-			break
-		}
-		changed = false
-		buffer = buffer[:0]
-		ctxt.Cursym.R = make([]obj.Reloc, 0)
-		for p := cursym.Text; p != nil; p = p.Link {
-			pc := int64(len(buffer))
-			if pc != p.Pc {
-				changed = true
-			}
-			p.Pc = pc
-			ctxt.Pc = p.Pc
-			ctxt.Curp = p
-			asmout(ctxt, &buffer)
-			if pc == int64(len(buffer)) {
-				switch p.As {
-				case obj.ANOP, obj.AFUNCDATA, obj.APCDATA, obj.ATEXT:
-					// ok
-				default:
-					ctxt.Diag("zero-width instruction\n%v", p)
-				}
-			}
-		}
-		loop++
-	}
-
-	cursym.Size = int64(len(buffer))
-	if cursym.Size%funcAlign != 0 {
-		cursym.Size += funcAlign - (cursym.Size % funcAlign)
-	}
-	cursym.Grow(cursym.Size)
-	copy(cursym.P, buffer)
-}
-
-func isint32(v int64) bool {
-	return int64(int32(v)) == v
-}
-
-func isuint32(v uint64) bool {
-	return uint64(uint32(v)) == v
-}
-
-func aclass(ctxt *obj.Link, a *obj.Addr) int {
-	switch a.Type {
-	case obj.TYPE_NONE:
-		return C_NONE
-
-	case obj.TYPE_REG:
-		if REG_R0 <= a.Reg && a.Reg <= REG_R15 {
-			return C_REG
-		}
-		if REG_F0 <= a.Reg && a.Reg <= REG_F15 {
-			return C_FREG
-		}
-		if REG_AR0 <= a.Reg && a.Reg <= REG_AR15 {
-			return C_AREG
-		}
-		if REG_V0 <= a.Reg && a.Reg <= REG_V31 {
-			return C_VREG
-		}
-		return C_GOK
-
-	case obj.TYPE_MEM:
-		switch a.Name {
-		case obj.NAME_EXTERN,
-			obj.NAME_STATIC:
-			if a.Sym == nil {
-				// must have a symbol
-				break
-			}
-			ctxt.Instoffset = a.Offset
-			if a.Sym.Type == obj.STLSBSS {
-				if ctxt.Flag_shared {
-					return C_TLS_IE // initial exec model
-				}
-				return C_TLS_LE // local exec model
-			}
-			return C_ADDR
-
-		case obj.NAME_GOTREF:
-			return C_GOTADDR
-
-		case obj.NAME_AUTO:
-			ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
-			if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
-				return C_SAUTO
-			}
-			return C_LAUTO
-
-		case obj.NAME_PARAM:
-			ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + ctxt.FixedFrameSize()
-			if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
-				return C_SAUTO
-			}
-			return C_LAUTO
-
-		case obj.NAME_NONE:
-			ctxt.Instoffset = a.Offset
-			if ctxt.Instoffset == 0 {
-				return C_ZOREG
-			}
-			if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
-				return C_SOREG
-			}
-			return C_LOREG
-		}
-
-		return C_GOK
-
-	case obj.TYPE_TEXTSIZE:
-		return C_TEXTSIZE
-
-	case obj.TYPE_FCONST:
-		if f64, ok := a.Val.(float64); ok && math.Float64bits(f64) == 0 {
-			return C_ZCON
-		}
-		ctxt.Diag("cannot handle the floating point constant %v", a.Val)
-
-	case obj.TYPE_CONST,
-		obj.TYPE_ADDR:
-		switch a.Name {
-		case obj.NAME_NONE:
-			ctxt.Instoffset = a.Offset
-			if a.Reg != 0 {
-				if -BIG <= ctxt.Instoffset && ctxt.Instoffset <= BIG {
-					return C_SACON
-				}
-				if isint32(ctxt.Instoffset) {
-					return C_LACON
-				}
-				return C_DACON
-			}
-			goto consize
-
-		case obj.NAME_EXTERN,
-			obj.NAME_STATIC:
-			s := a.Sym
-			if s == nil {
-				break
-			}
-			ctxt.Instoffset = a.Offset
-			if s.Type == obj.SCONST {
-				goto consize
-			}
-
-			return C_SYMADDR
-
-		case obj.NAME_AUTO:
-			ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
-			if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
-				return C_SACON
-			}
-			return C_LACON
-
-		case obj.NAME_PARAM:
-			ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + ctxt.FixedFrameSize()
-			if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
-				return C_SACON
-			}
-			return C_LACON
-		}
-
-		return C_GOK
-
-	consize:
-		if ctxt.Instoffset == 0 {
-			return C_ZCON
-		}
-		if ctxt.Instoffset >= 0 {
-			if ctxt.Instoffset <= 0x7fff {
-				return C_SCON
-			}
-			if ctxt.Instoffset <= 0xffff {
-				return C_ANDCON
-			}
-			if ctxt.Instoffset&0xffff == 0 && isuint32(uint64(ctxt.Instoffset)) { /* && (instoffset & (1<<31)) == 0) */
-				return C_UCON
-			}
-			if isint32(ctxt.Instoffset) || isuint32(uint64(ctxt.Instoffset)) {
-				return C_LCON
-			}
-			return C_DCON
-		}
-
-		if ctxt.Instoffset >= -0x8000 {
-			return C_ADDCON
-		}
-		if ctxt.Instoffset&0xffff == 0 && isint32(ctxt.Instoffset) {
-			return C_UCON
-		}
-		if isint32(ctxt.Instoffset) {
-			return C_LCON
-		}
-		return C_DCON
-
-	case obj.TYPE_BRANCH:
-		return C_SBRA
-	}
-
-	return C_GOK
-}
-
-func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
-	a1 := int(p.Optab)
-	if a1 != 0 {
-		return &optab[a1-1]
-	}
-	a1 = int(p.From.Class)
-	if a1 == 0 {
-		a1 = aclass(ctxt, &p.From) + 1
-		p.From.Class = int8(a1)
-	}
-
-	a1--
-	a3 := C_NONE + 1
-	if p.From3 != nil {
-		a3 = int(p.From3.Class)
-		if a3 == 0 {
-			a3 = aclass(ctxt, p.From3) + 1
-			p.From3.Class = int8(a3)
-		}
-	}
-
-	a3--
-	a4 := int(p.To.Class)
-	if a4 == 0 {
-		a4 = aclass(ctxt, &p.To) + 1
-		p.To.Class = int8(a4)
-	}
-
-	a4--
-	a2 := C_NONE
-	if p.Reg != 0 {
-		if REG_R0 <= p.Reg && p.Reg <= REG_R15 {
-			a2 = C_REG
-		} else if REG_V0 <= p.Reg && p.Reg <= REG_V31 {
-			a2 = C_VREG
-		} else if REG_F0 <= p.Reg && p.Reg <= REG_F15 {
-			a2 = C_FREG
-		} else if REG_AR0 <= p.Reg && p.Reg <= REG_AR15 {
-			a2 = C_AREG
-		}
-	}
-
-	ops := oprange[p.As&obj.AMask]
-	c1 := &xcmp[a1]
-	c2 := &xcmp[a2]
-	c3 := &xcmp[a3]
-	c4 := &xcmp[a4]
-	for i := range ops {
-		op := &ops[i]
-		if (int(op.a2) == a2 || c2[op.a2]) && c4[op.a4] && c1[op.a1] && c3[op.a3] {
-			p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
-			return op
-		}
-	}
-
-	// cannot find a case; abort
-	ctxt.Diag("illegal combination %v %v %v %v %v\n", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4))
-	ctxt.Diag("prog: %v\n", p)
-	return nil
-}
-
-func cmp(a int, b int) bool {
-	if a == b {
-		return true
-	}
-	switch a {
-	case C_DCON:
-		if b == C_LCON {
-			return true
-		}
-		fallthrough
-	case C_LCON:
-		if b == C_ZCON || b == C_SCON || b == C_UCON || b == C_ADDCON || b == C_ANDCON {
-			return true
-		}
-
-	case C_ADDCON:
-		if b == C_ZCON || b == C_SCON {
-			return true
-		}
-
-	case C_ANDCON:
-		if b == C_ZCON || b == C_SCON {
-			return true
-		}
-
-	case C_UCON:
-		if b == C_ZCON || b == C_SCON {
-			return true
-		}
-
-	case C_SCON:
-		if b == C_ZCON {
-			return true
-		}
-
-	case C_LACON:
-		if b == C_SACON {
-			return true
-		}
-
-	case C_LBRA:
-		if b == C_SBRA {
-			return true
-		}
-
-	case C_LAUTO:
-		if b == C_SAUTO {
-			return true
-		}
-
-	case C_LOREG:
-		if b == C_ZOREG || b == C_SOREG {
-			return true
-		}
-
-	case C_SOREG:
-		if b == C_ZOREG {
-			return true
-		}
-
-	case C_ANY:
-		return true
-	}
-
-	return false
-}
-
-type ocmp []Optab
-
-func (x ocmp) Len() int {
-	return len(x)
-}
-
-func (x ocmp) Swap(i, j int) {
-	x[i], x[j] = x[j], x[i]
-}
-
-func (x ocmp) Less(i, j int) bool {
-	p1 := &x[i]
-	p2 := &x[j]
-	n := int(p1.as) - int(p2.as)
-	if n != 0 {
-		return n < 0
-	}
-	n = int(p1.a1) - int(p2.a1)
-	if n != 0 {
-		return n < 0
-	}
-	n = int(p1.a2) - int(p2.a2)
-	if n != 0 {
-		return n < 0
-	}
-	n = int(p1.a3) - int(p2.a3)
-	if n != 0 {
-		return n < 0
-	}
-	n = int(p1.a4) - int(p2.a4)
-	if n != 0 {
-		return n < 0
-	}
-	return false
-}
-func opset(a, b obj.As) {
-	oprange[a&obj.AMask] = oprange[b&obj.AMask]
-}
-
-func buildop(ctxt *obj.Link) {
-	for i := 0; i < C_NCLASS; i++ {
-		for n := 0; n < C_NCLASS; n++ {
-			if cmp(n, i) {
-				xcmp[i][n] = true
-			}
-		}
-	}
-	sort.Sort(ocmp(optab))
-	for i := 0; i < len(optab); i++ {
-		r := optab[i].as
-		start := i
-		for ; i+1 < len(optab); i++ {
-			if optab[i+1].as != r {
-				break
-			}
-		}
-		oprange[r&obj.AMask] = optab[start : i+1]
-
-		// opset() aliases optab ranges for similar instructions, to reduce the number of optabs in the array.
-		// oprange[] is used by oplook() to find the Optab entry that applies to a given Prog.
-		switch r {
-		case AADD:
-			opset(AADDC, r)
-			opset(AADDW, r)
-			opset(AMULLD, r)
-			opset(AMULLW, r)
-		case ADIVW:
-			opset(AADDE, r)
-			opset(ADIVD, r)
-			opset(ADIVDU, r)
-			opset(ADIVWU, r)
-			opset(AMODD, r)
-			opset(AMODDU, r)
-			opset(AMODW, r)
-			opset(AMODWU, r)
-		case AMULHD:
-			opset(AMULHDU, r)
-		case AMOVBZ:
-			opset(AMOVH, r)
-			opset(AMOVHZ, r)
-		case ALA:
-			opset(ALAY, r)
-		case AMVC:
-			opset(ACLC, r)
-			opset(AXC, r)
-			opset(AOC, r)
-			opset(ANC, r)
-		case ASTCK:
-			opset(ASTCKC, r)
-			opset(ASTCKE, r)
-			opset(ASTCKF, r)
-		case ALAAG:
-			opset(ALAA, r)
-			opset(ALAAL, r)
-			opset(ALAALG, r)
-			opset(ALAN, r)
-			opset(ALANG, r)
-			opset(ALAX, r)
-			opset(ALAXG, r)
-			opset(ALAO, r)
-			opset(ALAOG, r)
-		case ASTMG:
-			opset(ASTMY, r)
-		case ALMG:
-			opset(ALMY, r)
-		case ABEQ:
-			opset(ABGE, r)
-			opset(ABGT, r)
-			opset(ABLE, r)
-			opset(ABLT, r)
-			opset(ABNE, r)
-			opset(ABVC, r)
-			opset(ABVS, r)
-			opset(ABLEU, r)
-			opset(ABLTU, r)
-		case ABR:
-			opset(ABL, r)
-		case ABC:
-			opset(ABCL, r)
-		case AFABS:
-			opset(AFNABS, r)
-			opset(AFNEG, r)
-			opset(AFNEGS, r)
-			opset(ALEDBR, r)
-			opset(ALDEBR, r)
-			opset(AFSQRT, r)
-			opset(AFSQRTS, r)
-		case AFADD:
-			opset(AFADDS, r)
-			opset(AFDIV, r)
-			opset(AFDIVS, r)
-			opset(AFSUB, r)
-			opset(AFSUBS, r)
-		case AFMADD:
-			opset(AFMADDS, r)
-			opset(AFMSUB, r)
-			opset(AFMSUBS, r)
-			opset(AFNMADD, r)
-			opset(AFNMADDS, r)
-			opset(AFNMSUB, r)
-			opset(AFNMSUBS, r)
-		case AFMUL:
-			opset(AFMULS, r)
-		case AFCMPO:
-			opset(AFCMPU, r)
-			opset(ACEBR, r)
-		case AAND:
-			opset(AOR, r)
-			opset(AXOR, r)
-		case AANDW:
-			opset(AORW, r)
-			opset(AXORW, r)
-		case ASLD:
-			opset(ASRD, r)
-			opset(ASLW, r)
-			opset(ASRW, r)
-			opset(ASRAD, r)
-			opset(ASRAW, r)
-			opset(ARLL, r)
-			opset(ARLLG, r)
-		case ACSG:
-			opset(ACS, r)
-		case ASUB:
-			opset(ASUBC, r)
-			opset(ASUBE, r)
-			opset(ASUBW, r)
-		case ANEG:
-			opset(ANEGW, r)
-		case AFMOVD:
-			opset(AFMOVS, r)
-		case AMOVDBR:
-			opset(AMOVWBR, r)
-		case ACMP:
-			opset(ACMPW, r)
-		case ACMPU:
-			opset(ACMPWU, r)
-		case ACEFBRA:
-			opset(ACDFBRA, r)
-			opset(ACEGBRA, r)
-			opset(ACDGBRA, r)
-			opset(ACELFBR, r)
-			opset(ACDLFBR, r)
-			opset(ACELGBR, r)
-			opset(ACDLGBR, r)
-		case ACFEBRA:
-			opset(ACFDBRA, r)
-			opset(ACGEBRA, r)
-			opset(ACGDBRA, r)
-			opset(ACLFEBR, r)
-			opset(ACLFDBR, r)
-			opset(ACLGEBR, r)
-			opset(ACLGDBR, r)
-		case AFIEBR:
-			opset(AFIDBR, r)
-		case ACMPBEQ:
-			opset(ACMPBGE, r)
-			opset(ACMPBGT, r)
-			opset(ACMPBLE, r)
-			opset(ACMPBLT, r)
-			opset(ACMPBNE, r)
-		case ACMPUBEQ:
-			opset(ACMPUBGE, r)
-			opset(ACMPUBGT, r)
-			opset(ACMPUBLE, r)
-			opset(ACMPUBLT, r)
-			opset(ACMPUBNE, r)
-		case AMOVDEQ:
-			opset(AMOVDGE, r)
-			opset(AMOVDGT, r)
-			opset(AMOVDLE, r)
-			opset(AMOVDLT, r)
-			opset(AMOVDNE, r)
-		case AVL:
-			opset(AVLLEZB, r)
-			opset(AVLLEZH, r)
-			opset(AVLLEZF, r)
-			opset(AVLLEZG, r)
-			opset(AVLREPB, r)
-			opset(AVLREPH, r)
-			opset(AVLREPF, r)
-			opset(AVLREPG, r)
-		case AVLEG:
-			opset(AVLBB, r)
-			opset(AVLEB, r)
-			opset(AVLEH, r)
-			opset(AVLEF, r)
-			opset(AVLEG, r)
-			opset(AVLREP, r)
-		case AVSTEG:
-			opset(AVSTEB, r)
-			opset(AVSTEH, r)
-			opset(AVSTEF, r)
-		case AVSCEG:
-			opset(AVSCEF, r)
-		case AVGEG:
-			opset(AVGEF, r)
-		case AVESLG:
-			opset(AVESLB, r)
-			opset(AVESLH, r)
-			opset(AVESLF, r)
-			opset(AVERLLB, r)
-			opset(AVERLLH, r)
-			opset(AVERLLF, r)
-			opset(AVERLLG, r)
-			opset(AVESRAB, r)
-			opset(AVESRAH, r)
-			opset(AVESRAF, r)
-			opset(AVESRAG, r)
-			opset(AVESRLB, r)
-			opset(AVESRLH, r)
-			opset(AVESRLF, r)
-			opset(AVESRLG, r)
-		case AVLGVG:
-			opset(AVLGVB, r)
-			opset(AVLGVH, r)
-			opset(AVLGVF, r)
-		case AVLVGG:
-			opset(AVLVGB, r)
-			opset(AVLVGH, r)
-			opset(AVLVGF, r)
-		case AVZERO:
-			opset(AVONE, r)
-		case AVREPIG:
-			opset(AVREPIB, r)
-			opset(AVREPIH, r)
-			opset(AVREPIF, r)
-		case AVLEIG:
-			opset(AVLEIB, r)
-			opset(AVLEIH, r)
-			opset(AVLEIF, r)
-		case AVGMG:
-			opset(AVGMB, r)
-			opset(AVGMH, r)
-			opset(AVGMF, r)
-		case AVREPG:
-			opset(AVREPB, r)
-			opset(AVREPH, r)
-			opset(AVREPF, r)
-		case AVERIMG:
-			opset(AVERIMB, r)
-			opset(AVERIMH, r)
-			opset(AVERIMF, r)
-		case AVFTCIDB:
-			opset(AWFTCIDB, r)
-		case AVLR:
-			opset(AVUPHB, r)
-			opset(AVUPHH, r)
-			opset(AVUPHF, r)
-			opset(AVUPLHB, r)
-			opset(AVUPLHH, r)
-			opset(AVUPLHF, r)
-			opset(AVUPLB, r)
-			opset(AVUPLHW, r)
-			opset(AVUPLF, r)
-			opset(AVUPLLB, r)
-			opset(AVUPLLH, r)
-			opset(AVUPLLF, r)
-			opset(AVCLZB, r)
-			opset(AVCLZH, r)
-			opset(AVCLZF, r)
-			opset(AVCLZG, r)
-			opset(AVCTZB, r)
-			opset(AVCTZH, r)
-			opset(AVCTZF, r)
-			opset(AVCTZG, r)
-			opset(AVLDEB, r)
-			opset(AWLDEB, r)
-			opset(AVFLCDB, r)
-			opset(AWFLCDB, r)
-			opset(AVFLNDB, r)
-			opset(AWFLNDB, r)
-			opset(AVFLPDB, r)
-			opset(AWFLPDB, r)
-			opset(AVFSQDB, r)
-			opset(AWFSQDB, r)
-			opset(AVISTRB, r)
-			opset(AVISTRH, r)
-			opset(AVISTRF, r)
-			opset(AVISTRBS, r)
-			opset(AVISTRHS, r)
-			opset(AVISTRFS, r)
-			opset(AVLCB, r)
-			opset(AVLCH, r)
-			opset(AVLCF, r)
-			opset(AVLCG, r)
-			opset(AVLPB, r)
-			opset(AVLPH, r)
-			opset(AVLPF, r)
-			opset(AVLPG, r)
-			opset(AVPOPCT, r)
-			opset(AVSEGB, r)
-			opset(AVSEGH, r)
-			opset(AVSEGF, r)
-		case AVECG:
-			opset(AVECB, r)
-			opset(AVECH, r)
-			opset(AVECF, r)
-			opset(AVECLB, r)
-			opset(AVECLH, r)
-			opset(AVECLF, r)
-			opset(AVECLG, r)
-			opset(AWFCDB, r)
-			opset(AWFKDB, r)
-		case AVCEQG:
-			opset(AVCEQB, r)
-			opset(AVCEQH, r)
-			opset(AVCEQF, r)
-			opset(AVCEQBS, r)
-			opset(AVCEQHS, r)
-			opset(AVCEQFS, r)
-			opset(AVCEQGS, r)
-			opset(AVCHB, r)
-			opset(AVCHH, r)
-			opset(AVCHF, r)
-			opset(AVCHG, r)
-			opset(AVCHBS, r)
-			opset(AVCHHS, r)
-			opset(AVCHFS, r)
-			opset(AVCHGS, r)
-			opset(AVCHLB, r)
-			opset(AVCHLH, r)
-			opset(AVCHLF, r)
-			opset(AVCHLG, r)
-			opset(AVCHLBS, r)
-			opset(AVCHLHS, r)
-			opset(AVCHLFS, r)
-			opset(AVCHLGS, r)
-		case AVFAEF:
-			opset(AVFAEB, r)
-			opset(AVFAEH, r)
-			opset(AVFAEBS, r)
-			opset(AVFAEHS, r)
-			opset(AVFAEFS, r)
-			opset(AVFAEZB, r)
-			opset(AVFAEZH, r)
-			opset(AVFAEZF, r)
-			opset(AVFAEZBS, r)
-			opset(AVFAEZHS, r)
-			opset(AVFAEZFS, r)
-			opset(AVFEEB, r)
-			opset(AVFEEH, r)
-			opset(AVFEEF, r)
-			opset(AVFEEBS, r)
-			opset(AVFEEHS, r)
-			opset(AVFEEFS, r)
-			opset(AVFEEZB, r)
-			opset(AVFEEZH, r)
-			opset(AVFEEZF, r)
-			opset(AVFEEZBS, r)
-			opset(AVFEEZHS, r)
-			opset(AVFEEZFS, r)
-			opset(AVFENEB, r)
-			opset(AVFENEH, r)
-			opset(AVFENEF, r)
-			opset(AVFENEBS, r)
-			opset(AVFENEHS, r)
-			opset(AVFENEFS, r)
-			opset(AVFENEZB, r)
-			opset(AVFENEZH, r)
-			opset(AVFENEZF, r)
-			opset(AVFENEZBS, r)
-			opset(AVFENEZHS, r)
-			opset(AVFENEZFS, r)
-		case AVPKSG:
-			opset(AVPKSH, r)
-			opset(AVPKSF, r)
-			opset(AVPKSHS, r)
-			opset(AVPKSFS, r)
-			opset(AVPKSGS, r)
-			opset(AVPKLSH, r)
-			opset(AVPKLSF, r)
-			opset(AVPKLSG, r)
-			opset(AVPKLSHS, r)
-			opset(AVPKLSFS, r)
-			opset(AVPKLSGS, r)
-		case AVAQ:
-			opset(AVAB, r)
-			opset(AVAH, r)
-			opset(AVAF, r)
-			opset(AVAG, r)
-			opset(AVACCB, r)
-			opset(AVACCH, r)
-			opset(AVACCF, r)
-			opset(AVACCG, r)
-			opset(AVACCQ, r)
-			opset(AVN, r)
-			opset(AVNC, r)
-			opset(AVAVGB, r)
-			opset(AVAVGH, r)
-			opset(AVAVGF, r)
-			opset(AVAVGG, r)
-			opset(AVAVGLB, r)
-			opset(AVAVGLH, r)
-			opset(AVAVGLF, r)
-			opset(AVAVGLG, r)
-			opset(AVCKSM, r)
-			opset(AVX, r)
-			opset(AVFADB, r)
-			opset(AWFADB, r)
-			opset(AVFCEDB, r)
-			opset(AVFCEDBS, r)
-			opset(AWFCEDB, r)
-			opset(AWFCEDBS, r)
-			opset(AVFCHDB, r)
-			opset(AVFCHDBS, r)
-			opset(AWFCHDB, r)
-			opset(AWFCHDBS, r)
-			opset(AVFCHEDB, r)
-			opset(AVFCHEDBS, r)
-			opset(AWFCHEDB, r)
-			opset(AWFCHEDBS, r)
-			opset(AVFMDB, r)
-			opset(AWFMDB, r)
-			opset(AVGFMB, r)
-			opset(AVGFMH, r)
-			opset(AVGFMF, r)
-			opset(AVGFMG, r)
-			opset(AVMXB, r)
-			opset(AVMXH, r)
-			opset(AVMXF, r)
-			opset(AVMXG, r)
-			opset(AVMXLB, r)
-			opset(AVMXLH, r)
-			opset(AVMXLF, r)
-			opset(AVMXLG, r)
-			opset(AVMNB, r)
-			opset(AVMNH, r)
-			opset(AVMNF, r)
-			opset(AVMNG, r)
-			opset(AVMNLB, r)
-			opset(AVMNLH, r)
-			opset(AVMNLF, r)
-			opset(AVMNLG, r)
-			opset(AVMRHB, r)
-			opset(AVMRHH, r)
-			opset(AVMRHF, r)
-			opset(AVMRHG, r)
-			opset(AVMRLB, r)
-			opset(AVMRLH, r)
-			opset(AVMRLF, r)
-			opset(AVMRLG, r)
-			opset(AVMEB, r)
-			opset(AVMEH, r)
-			opset(AVMEF, r)
-			opset(AVMLEB, r)
-			opset(AVMLEH, r)
-			opset(AVMLEF, r)
-			opset(AVMOB, r)
-			opset(AVMOH, r)
-			opset(AVMOF, r)
-			opset(AVMLOB, r)
-			opset(AVMLOH, r)
-			opset(AVMLOF, r)
-			opset(AVMHB, r)
-			opset(AVMHH, r)
-			opset(AVMHF, r)
-			opset(AVMLHB, r)
-			opset(AVMLHH, r)
-			opset(AVMLHF, r)
-			opset(AVMLH, r)
-			opset(AVMLHW, r)
-			opset(AVMLF, r)
-			opset(AVNO, r)
-			opset(AVO, r)
-			opset(AVPKH, r)
-			opset(AVPKF, r)
-			opset(AVPKG, r)
-			opset(AVSUMGH, r)
-			opset(AVSUMGF, r)
-			opset(AVSUMQF, r)
-			opset(AVSUMQG, r)
-			opset(AVSUMB, r)
-			opset(AVSUMH, r)
-		case AVERLLVG:
-			opset(AVERLLVB, r)
-			opset(AVERLLVH, r)
-			opset(AVERLLVF, r)
-			opset(AVESLVB, r)
-			opset(AVESLVH, r)
-			opset(AVESLVF, r)
-			opset(AVESLVG, r)
-			opset(AVESRAVB, r)
-			opset(AVESRAVH, r)
-			opset(AVESRAVF, r)
-			opset(AVESRAVG, r)
-			opset(AVESRLVB, r)
-			opset(AVESRLVH, r)
-			opset(AVESRLVF, r)
-			opset(AVESRLVG, r)
-			opset(AVFDDB, r)
-			opset(AWFDDB, r)
-			opset(AVFSDB, r)
-			opset(AWFSDB, r)
-			opset(AVSL, r)
-			opset(AVSLB, r)
-			opset(AVSRA, r)
-			opset(AVSRAB, r)
-			opset(AVSRL, r)
-			opset(AVSRLB, r)
-			opset(AVSF, r)
-			opset(AVSG, r)
-			opset(AVSQ, r)
-			opset(AVSCBIB, r)
-			opset(AVSCBIH, r)
-			opset(AVSCBIF, r)
-			opset(AVSCBIG, r)
-			opset(AVSCBIQ, r)
-		case AVACQ:
-			opset(AVACCCQ, r)
-			opset(AVGFMAB, r)
-			opset(AVGFMAH, r)
-			opset(AVGFMAF, r)
-			opset(AVGFMAG, r)
-			opset(AVMALB, r)
-			opset(AVMALHW, r)
-			opset(AVMALF, r)
-			opset(AVMAHB, r)
-			opset(AVMAHH, r)
-			opset(AVMAHF, r)
-			opset(AVMALHB, r)
-			opset(AVMALHH, r)
-			opset(AVMALHF, r)
-			opset(AVMAEB, r)
-			opset(AVMAEH, r)
-			opset(AVMAEF, r)
-			opset(AVMALEB, r)
-			opset(AVMALEH, r)
-			opset(AVMALEF, r)
-			opset(AVMAOB, r)
-			opset(AVMAOH, r)
-			opset(AVMAOF, r)
-			opset(AVMALOB, r)
-			opset(AVMALOH, r)
-			opset(AVMALOF, r)
-			opset(AVSTRCB, r)
-			opset(AVSTRCH, r)
-			opset(AVSTRCF, r)
-			opset(AVSTRCBS, r)
-			opset(AVSTRCHS, r)
-			opset(AVSTRCFS, r)
-			opset(AVSTRCZB, r)
-			opset(AVSTRCZH, r)
-			opset(AVSTRCZF, r)
-			opset(AVSTRCZBS, r)
-			opset(AVSTRCZHS, r)
-			opset(AVSTRCZFS, r)
-			opset(AVSBCBIQ, r)
-			opset(AVSBIQ, r)
-		case AVSEL:
-			opset(AVFMADB, r)
-			opset(AWFMADB, r)
-			opset(AVFMSDB, r)
-			opset(AWFMSDB, r)
-			opset(AVPERM, r)
-		}
-	}
-}
-
-const (
-	op_A       uint32 = 0x5A00 // FORMAT_RX1        ADD (32)
-	op_AD      uint32 = 0x6A00 // FORMAT_RX1        ADD NORMALIZED (long HFP)
-	op_ADB     uint32 = 0xED1A // FORMAT_RXE        ADD (long BFP)
-	op_ADBR    uint32 = 0xB31A // FORMAT_RRE        ADD (long BFP)
-	op_ADR     uint32 = 0x2A00 // FORMAT_RR         ADD NORMALIZED (long HFP)
-	op_ADTR    uint32 = 0xB3D2 // FORMAT_RRF1       ADD (long DFP)
-	op_ADTRA   uint32 = 0xB3D2 // FORMAT_RRF1       ADD (long DFP)
-	op_AE      uint32 = 0x7A00 // FORMAT_RX1        ADD NORMALIZED (short HFP)
-	op_AEB     uint32 = 0xED0A // FORMAT_RXE        ADD (short BFP)
-	op_AEBR    uint32 = 0xB30A // FORMAT_RRE        ADD (short BFP)
-	op_AER     uint32 = 0x3A00 // FORMAT_RR         ADD NORMALIZED (short HFP)
-	op_AFI     uint32 = 0xC209 // FORMAT_RIL1       ADD IMMEDIATE (32)
-	op_AG      uint32 = 0xE308 // FORMAT_RXY1       ADD (64)
-	op_AGF     uint32 = 0xE318 // FORMAT_RXY1       ADD (64<-32)
-	op_AGFI    uint32 = 0xC208 // FORMAT_RIL1       ADD IMMEDIATE (64<-32)
-	op_AGFR    uint32 = 0xB918 // FORMAT_RRE        ADD (64<-32)
-	op_AGHI    uint32 = 0xA70B // FORMAT_RI1        ADD HALFWORD IMMEDIATE (64)
-	op_AGHIK   uint32 = 0xECD9 // FORMAT_RIE4       ADD IMMEDIATE (64<-16)
-	op_AGR     uint32 = 0xB908 // FORMAT_RRE        ADD (64)
-	op_AGRK    uint32 = 0xB9E8 // FORMAT_RRF1       ADD (64)
-	op_AGSI    uint32 = 0xEB7A // FORMAT_SIY        ADD IMMEDIATE (64<-8)
-	op_AH      uint32 = 0x4A00 // FORMAT_RX1        ADD HALFWORD
-	op_AHHHR   uint32 = 0xB9C8 // FORMAT_RRF1       ADD HIGH (32)
-	op_AHHLR   uint32 = 0xB9D8 // FORMAT_RRF1       ADD HIGH (32)
-	op_AHI     uint32 = 0xA70A // FORMAT_RI1        ADD HALFWORD IMMEDIATE (32)
-	op_AHIK    uint32 = 0xECD8 // FORMAT_RIE4       ADD IMMEDIATE (32<-16)
-	op_AHY     uint32 = 0xE37A // FORMAT_RXY1       ADD HALFWORD
-	op_AIH     uint32 = 0xCC08 // FORMAT_RIL1       ADD IMMEDIATE HIGH (32)
-	op_AL      uint32 = 0x5E00 // FORMAT_RX1        ADD LOGICAL (32)
-	op_ALC     uint32 = 0xE398 // FORMAT_RXY1       ADD LOGICAL WITH CARRY (32)
-	op_ALCG    uint32 = 0xE388 // FORMAT_RXY1       ADD LOGICAL WITH CARRY (64)
-	op_ALCGR   uint32 = 0xB988 // FORMAT_RRE        ADD LOGICAL WITH CARRY (64)
-	op_ALCR    uint32 = 0xB998 // FORMAT_RRE        ADD LOGICAL WITH CARRY (32)
-	op_ALFI    uint32 = 0xC20B // FORMAT_RIL1       ADD LOGICAL IMMEDIATE (32)
-	op_ALG     uint32 = 0xE30A // FORMAT_RXY1       ADD LOGICAL (64)
-	op_ALGF    uint32 = 0xE31A // FORMAT_RXY1       ADD LOGICAL (64<-32)
-	op_ALGFI   uint32 = 0xC20A // FORMAT_RIL1       ADD LOGICAL IMMEDIATE (64<-32)
-	op_ALGFR   uint32 = 0xB91A // FORMAT_RRE        ADD LOGICAL (64<-32)
-	op_ALGHSIK uint32 = 0xECDB // FORMAT_RIE4       ADD LOGICAL WITH SIGNED IMMEDIATE (64<-16)
-	op_ALGR    uint32 = 0xB90A // FORMAT_RRE        ADD LOGICAL (64)
-	op_ALGRK   uint32 = 0xB9EA // FORMAT_RRF1       ADD LOGICAL (64)
-	op_ALGSI   uint32 = 0xEB7E // FORMAT_SIY        ADD LOGICAL WITH SIGNED IMMEDIATE (64<-8)
-	op_ALHHHR  uint32 = 0xB9CA // FORMAT_RRF1       ADD LOGICAL HIGH (32)
-	op_ALHHLR  uint32 = 0xB9DA // FORMAT_RRF1       ADD LOGICAL HIGH (32)
-	op_ALHSIK  uint32 = 0xECDA // FORMAT_RIE4       ADD LOGICAL WITH SIGNED IMMEDIATE (32<-16)
-	op_ALR     uint32 = 0x1E00 // FORMAT_RR         ADD LOGICAL (32)
-	op_ALRK    uint32 = 0xB9FA // FORMAT_RRF1       ADD LOGICAL (32)
-	op_ALSI    uint32 = 0xEB6E // FORMAT_SIY        ADD LOGICAL WITH SIGNED IMMEDIATE (32<-8)
-	op_ALSIH   uint32 = 0xCC0A // FORMAT_RIL1       ADD LOGICAL WITH SIGNED IMMEDIATE HIGH (32)
-	op_ALSIHN  uint32 = 0xCC0B // FORMAT_RIL1       ADD LOGICAL WITH SIGNED IMMEDIATE HIGH (32)
-	op_ALY     uint32 = 0xE35E // FORMAT_RXY1       ADD LOGICAL (32)
-	op_AP      uint32 = 0xFA00 // FORMAT_SS2        ADD DECIMAL
-	op_AR      uint32 = 0x1A00 // FORMAT_RR         ADD (32)
-	op_ARK     uint32 = 0xB9F8 // FORMAT_RRF1       ADD (32)
-	op_ASI     uint32 = 0xEB6A // FORMAT_SIY        ADD IMMEDIATE (32<-8)
-	op_AU      uint32 = 0x7E00 // FORMAT_RX1        ADD UNNORMALIZED (short HFP)
-	op_AUR     uint32 = 0x3E00 // FORMAT_RR         ADD UNNORMALIZED (short HFP)
-	op_AW      uint32 = 0x6E00 // FORMAT_RX1        ADD UNNORMALIZED (long HFP)
-	op_AWR     uint32 = 0x2E00 // FORMAT_RR         ADD UNNORMALIZED (long HFP)
-	op_AXBR    uint32 = 0xB34A // FORMAT_RRE        ADD (extended BFP)
-	op_AXR     uint32 = 0x3600 // FORMAT_RR         ADD NORMALIZED (extended HFP)
-	op_AXTR    uint32 = 0xB3DA // FORMAT_RRF1       ADD (extended DFP)
-	op_AXTRA   uint32 = 0xB3DA // FORMAT_RRF1       ADD (extended DFP)
-	op_AY      uint32 = 0xE35A // FORMAT_RXY1       ADD (32)
-	op_BAKR    uint32 = 0xB240 // FORMAT_RRE        BRANCH AND STACK
-	op_BAL     uint32 = 0x4500 // FORMAT_RX1        BRANCH AND LINK
-	op_BALR    uint32 = 0x0500 // FORMAT_RR         BRANCH AND LINK
-	op_BAS     uint32 = 0x4D00 // FORMAT_RX1        BRANCH AND SAVE
-	op_BASR    uint32 = 0x0D00 // FORMAT_RR         BRANCH AND SAVE
-	op_BASSM   uint32 = 0x0C00 // FORMAT_RR         BRANCH AND SAVE AND SET MODE
-	op_BC      uint32 = 0x4700 // FORMAT_RX2        BRANCH ON CONDITION
-	op_BCR     uint32 = 0x0700 // FORMAT_RR         BRANCH ON CONDITION
-	op_BCT     uint32 = 0x4600 // FORMAT_RX1        BRANCH ON COUNT (32)
-	op_BCTG    uint32 = 0xE346 // FORMAT_RXY1       BRANCH ON COUNT (64)
-	op_BCTGR   uint32 = 0xB946 // FORMAT_RRE        BRANCH ON COUNT (64)
-	op_BCTR    uint32 = 0x0600 // FORMAT_RR         BRANCH ON COUNT (32)
-	op_BPP     uint32 = 0xC700 // FORMAT_SMI        BRANCH PREDICTION PRELOAD
-	op_BPRP    uint32 = 0xC500 // FORMAT_MII        BRANCH PREDICTION RELATIVE PRELOAD
-	op_BRAS    uint32 = 0xA705 // FORMAT_RI2        BRANCH RELATIVE AND SAVE
-	op_BRASL   uint32 = 0xC005 // FORMAT_RIL2       BRANCH RELATIVE AND SAVE LONG
-	op_BRC     uint32 = 0xA704 // FORMAT_RI3        BRANCH RELATIVE ON CONDITION
-	op_BRCL    uint32 = 0xC004 // FORMAT_RIL3       BRANCH RELATIVE ON CONDITION LONG
-	op_BRCT    uint32 = 0xA706 // FORMAT_RI2        BRANCH RELATIVE ON COUNT (32)
-	op_BRCTG   uint32 = 0xA707 // FORMAT_RI2        BRANCH RELATIVE ON COUNT (64)
-	op_BRCTH   uint32 = 0xCC06 // FORMAT_RIL2       BRANCH RELATIVE ON COUNT HIGH (32)
-	op_BRXH    uint32 = 0x8400 // FORMAT_RSI        BRANCH RELATIVE ON INDEX HIGH (32)
-	op_BRXHG   uint32 = 0xEC44 // FORMAT_RIE5       BRANCH RELATIVE ON INDEX HIGH (64)
-	op_BRXLE   uint32 = 0x8500 // FORMAT_RSI        BRANCH RELATIVE ON INDEX LOW OR EQ. (32)
-	op_BRXLG   uint32 = 0xEC45 // FORMAT_RIE5       BRANCH RELATIVE ON INDEX LOW OR EQ. (64)
-	op_BSA     uint32 = 0xB25A // FORMAT_RRE        BRANCH AND SET AUTHORITY
-	op_BSG     uint32 = 0xB258 // FORMAT_RRE        BRANCH IN SUBSPACE GROUP
-	op_BSM     uint32 = 0x0B00 // FORMAT_RR         BRANCH AND SET MODE
-	op_BXH     uint32 = 0x8600 // FORMAT_RS1        BRANCH ON INDEX HIGH (32)
-	op_BXHG    uint32 = 0xEB44 // FORMAT_RSY1       BRANCH ON INDEX HIGH (64)
-	op_BXLE    uint32 = 0x8700 // FORMAT_RS1        BRANCH ON INDEX LOW OR EQUAL (32)
-	op_BXLEG   uint32 = 0xEB45 // FORMAT_RSY1       BRANCH ON INDEX LOW OR EQUAL (64)
-	op_C       uint32 = 0x5900 // FORMAT_RX1        COMPARE (32)
-	op_CD      uint32 = 0x6900 // FORMAT_RX1        COMPARE (long HFP)
-	op_CDB     uint32 = 0xED19 // FORMAT_RXE        COMPARE (long BFP)
-	op_CDBR    uint32 = 0xB319 // FORMAT_RRE        COMPARE (long BFP)
-	op_CDFBR   uint32 = 0xB395 // FORMAT_RRE        CONVERT FROM FIXED (32 to long BFP)
-	op_CDFBRA  uint32 = 0xB395 // FORMAT_RRF5       CONVERT FROM FIXED (32 to long BFP)
-	op_CDFR    uint32 = 0xB3B5 // FORMAT_RRE        CONVERT FROM FIXED (32 to long HFP)
-	op_CDFTR   uint32 = 0xB951 // FORMAT_RRE        CONVERT FROM FIXED (32 to long DFP)
-	op_CDGBR   uint32 = 0xB3A5 // FORMAT_RRE        CONVERT FROM FIXED (64 to long BFP)
-	op_CDGBRA  uint32 = 0xB3A5 // FORMAT_RRF5       CONVERT FROM FIXED (64 to long BFP)
-	op_CDGR    uint32 = 0xB3C5 // FORMAT_RRE        CONVERT FROM FIXED (64 to long HFP)
-	op_CDGTR   uint32 = 0xB3F1 // FORMAT_RRE        CONVERT FROM FIXED (64 to long DFP)
-	op_CDGTRA  uint32 = 0xB3F1 // FORMAT_RRF5       CONVERT FROM FIXED (64 to long DFP)
-	op_CDLFBR  uint32 = 0xB391 // FORMAT_RRF5       CONVERT FROM LOGICAL (32 to long BFP)
-	op_CDLFTR  uint32 = 0xB953 // FORMAT_RRF5       CONVERT FROM LOGICAL (32 to long DFP)
-	op_CDLGBR  uint32 = 0xB3A1 // FORMAT_RRF5       CONVERT FROM LOGICAL (64 to long BFP)
-	op_CDLGTR  uint32 = 0xB952 // FORMAT_RRF5       CONVERT FROM LOGICAL (64 to long DFP)
-	op_CDR     uint32 = 0x2900 // FORMAT_RR         COMPARE (long HFP)
-	op_CDS     uint32 = 0xBB00 // FORMAT_RS1        COMPARE DOUBLE AND SWAP (32)
-	op_CDSG    uint32 = 0xEB3E // FORMAT_RSY1       COMPARE DOUBLE AND SWAP (64)
-	op_CDSTR   uint32 = 0xB3F3 // FORMAT_RRE        CONVERT FROM SIGNED PACKED (64 to long DFP)
-	op_CDSY    uint32 = 0xEB31 // FORMAT_RSY1       COMPARE DOUBLE AND SWAP (32)
-	op_CDTR    uint32 = 0xB3E4 // FORMAT_RRE        COMPARE (long DFP)
-	op_CDUTR   uint32 = 0xB3F2 // FORMAT_RRE        CONVERT FROM UNSIGNED PACKED (64 to long DFP)
-	op_CDZT    uint32 = 0xEDAA // FORMAT_RSL        CONVERT FROM ZONED (to long DFP)
-	op_CE      uint32 = 0x7900 // FORMAT_RX1        COMPARE (short HFP)
-	op_CEB     uint32 = 0xED09 // FORMAT_RXE        COMPARE (short BFP)
-	op_CEBR    uint32 = 0xB309 // FORMAT_RRE        COMPARE (short BFP)
-	op_CEDTR   uint32 = 0xB3F4 // FORMAT_RRE        COMPARE BIASED EXPONENT (long DFP)
-	op_CEFBR   uint32 = 0xB394 // FORMAT_RRE        CONVERT FROM FIXED (32 to short BFP)
-	op_CEFBRA  uint32 = 0xB394 // FORMAT_RRF5       CONVERT FROM FIXED (32 to short BFP)
-	op_CEFR    uint32 = 0xB3B4 // FORMAT_RRE        CONVERT FROM FIXED (32 to short HFP)
-	op_CEGBR   uint32 = 0xB3A4 // FORMAT_RRE        CONVERT FROM FIXED (64 to short BFP)
-	op_CEGBRA  uint32 = 0xB3A4 // FORMAT_RRF5       CONVERT FROM FIXED (64 to short BFP)
-	op_CEGR    uint32 = 0xB3C4 // FORMAT_RRE        CONVERT FROM FIXED (64 to short HFP)
-	op_CELFBR  uint32 = 0xB390 // FORMAT_RRF5       CONVERT FROM LOGICAL (32 to short BFP)
-	op_CELGBR  uint32 = 0xB3A0 // FORMAT_RRF5       CONVERT FROM LOGICAL (64 to short BFP)
-	op_CER     uint32 = 0x3900 // FORMAT_RR         COMPARE (short HFP)
-	op_CEXTR   uint32 = 0xB3FC // FORMAT_RRE        COMPARE BIASED EXPONENT (extended DFP)
-	op_CFC     uint32 = 0xB21A // FORMAT_S          COMPARE AND FORM CODEWORD
-	op_CFDBR   uint32 = 0xB399 // FORMAT_RRF5       CONVERT TO FIXED (long BFP to 32)
-	op_CFDBRA  uint32 = 0xB399 // FORMAT_RRF5       CONVERT TO FIXED (long BFP to 32)
-	op_CFDR    uint32 = 0xB3B9 // FORMAT_RRF5       CONVERT TO FIXED (long HFP to 32)
-	op_CFDTR   uint32 = 0xB941 // FORMAT_RRF5       CONVERT TO FIXED (long DFP to 32)
-	op_CFEBR   uint32 = 0xB398 // FORMAT_RRF5       CONVERT TO FIXED (short BFP to 32)
-	op_CFEBRA  uint32 = 0xB398 // FORMAT_RRF5       CONVERT TO FIXED (short BFP to 32)
-	op_CFER    uint32 = 0xB3B8 // FORMAT_RRF5       CONVERT TO FIXED (short HFP to 32)
-	op_CFI     uint32 = 0xC20D // FORMAT_RIL1       COMPARE IMMEDIATE (32)
-	op_CFXBR   uint32 = 0xB39A // FORMAT_RRF5       CONVERT TO FIXED (extended BFP to 32)
-	op_CFXBRA  uint32 = 0xB39A // FORMAT_RRF5       CONVERT TO FIXED (extended BFP to 32)
-	op_CFXR    uint32 = 0xB3BA // FORMAT_RRF5       CONVERT TO FIXED (extended HFP to 32)
-	op_CFXTR   uint32 = 0xB949 // FORMAT_RRF5       CONVERT TO FIXED (extended DFP to 32)
-	op_CG      uint32 = 0xE320 // FORMAT_RXY1       COMPARE (64)
-	op_CGDBR   uint32 = 0xB3A9 // FORMAT_RRF5       CONVERT TO FIXED (long BFP to 64)
-	op_CGDBRA  uint32 = 0xB3A9 // FORMAT_RRF5       CONVERT TO FIXED (long BFP to 64)
-	op_CGDR    uint32 = 0xB3C9 // FORMAT_RRF5       CONVERT TO FIXED (long HFP to 64)
-	op_CGDTR   uint32 = 0xB3E1 // FORMAT_RRF5       CONVERT TO FIXED (long DFP to 64)
-	op_CGDTRA  uint32 = 0xB3E1 // FORMAT_RRF5       CONVERT TO FIXED (long DFP to 64)
-	op_CGEBR   uint32 = 0xB3A8 // FORMAT_RRF5       CONVERT TO FIXED (short BFP to 64)
-	op_CGEBRA  uint32 = 0xB3A8 // FORMAT_RRF5       CONVERT TO FIXED (short BFP to 64)
-	op_CGER    uint32 = 0xB3C8 // FORMAT_RRF5       CONVERT TO FIXED (short HFP to 64)
-	op_CGF     uint32 = 0xE330 // FORMAT_RXY1       COMPARE (64<-32)
-	op_CGFI    uint32 = 0xC20C // FORMAT_RIL1       COMPARE IMMEDIATE (64<-32)
-	op_CGFR    uint32 = 0xB930 // FORMAT_RRE        COMPARE (64<-32)
-	op_CGFRL   uint32 = 0xC60C // FORMAT_RIL2       COMPARE RELATIVE LONG (64<-32)
-	op_CGH     uint32 = 0xE334 // FORMAT_RXY1       COMPARE HALFWORD (64<-16)
-	op_CGHI    uint32 = 0xA70F // FORMAT_RI1        COMPARE HALFWORD IMMEDIATE (64<-16)
-	op_CGHRL   uint32 = 0xC604 // FORMAT_RIL2       COMPARE HALFWORD RELATIVE LONG (64<-16)
-	op_CGHSI   uint32 = 0xE558 // FORMAT_SIL        COMPARE HALFWORD IMMEDIATE (64<-16)
-	op_CGIB    uint32 = 0xECFC // FORMAT_RIS        COMPARE IMMEDIATE AND BRANCH (64<-8)
-	op_CGIJ    uint32 = 0xEC7C // FORMAT_RIE3       COMPARE IMMEDIATE AND BRANCH RELATIVE (64<-8)
-	op_CGIT    uint32 = 0xEC70 // FORMAT_RIE1       COMPARE IMMEDIATE AND TRAP (64<-16)
-	op_CGR     uint32 = 0xB920 // FORMAT_RRE        COMPARE (64)
-	op_CGRB    uint32 = 0xECE4 // FORMAT_RRS        COMPARE AND BRANCH (64)
-	op_CGRJ    uint32 = 0xEC64 // FORMAT_RIE2       COMPARE AND BRANCH RELATIVE (64)
-	op_CGRL    uint32 = 0xC608 // FORMAT_RIL2       COMPARE RELATIVE LONG (64)
-	op_CGRT    uint32 = 0xB960 // FORMAT_RRF3       COMPARE AND TRAP (64)
-	op_CGXBR   uint32 = 0xB3AA // FORMAT_RRF5       CONVERT TO FIXED (extended BFP to 64)
-	op_CGXBRA  uint32 = 0xB3AA // FORMAT_RRF5       CONVERT TO FIXED (extended BFP to 64)
-	op_CGXR    uint32 = 0xB3CA // FORMAT_RRF5       CONVERT TO FIXED (extended HFP to 64)
-	op_CGXTR   uint32 = 0xB3E9 // FORMAT_RRF5       CONVERT TO FIXED (extended DFP to 64)
-	op_CGXTRA  uint32 = 0xB3E9 // FORMAT_RRF5       CONVERT TO FIXED (extended DFP to 64)
-	op_CH      uint32 = 0x4900 // FORMAT_RX1        COMPARE HALFWORD (32<-16)
-	op_CHF     uint32 = 0xE3CD // FORMAT_RXY1       COMPARE HIGH (32)
-	op_CHHR    uint32 = 0xB9CD // FORMAT_RRE        COMPARE HIGH (32)
-	op_CHHSI   uint32 = 0xE554 // FORMAT_SIL        COMPARE HALFWORD IMMEDIATE (16)
-	op_CHI     uint32 = 0xA70E // FORMAT_RI1        COMPARE HALFWORD IMMEDIATE (32<-16)
-	op_CHLR    uint32 = 0xB9DD // FORMAT_RRE        COMPARE HIGH (32)
-	op_CHRL    uint32 = 0xC605 // FORMAT_RIL2       COMPARE HALFWORD RELATIVE LONG (32<-16)
-	op_CHSI    uint32 = 0xE55C // FORMAT_SIL        COMPARE HALFWORD IMMEDIATE (32<-16)
-	op_CHY     uint32 = 0xE379 // FORMAT_RXY1       COMPARE HALFWORD (32<-16)
-	op_CIB     uint32 = 0xECFE // FORMAT_RIS        COMPARE IMMEDIATE AND BRANCH (32<-8)
-	op_CIH     uint32 = 0xCC0D // FORMAT_RIL1       COMPARE IMMEDIATE HIGH (32)
-	op_CIJ     uint32 = 0xEC7E // FORMAT_RIE3       COMPARE IMMEDIATE AND BRANCH RELATIVE (32<-8)
-	op_CIT     uint32 = 0xEC72 // FORMAT_RIE1       COMPARE IMMEDIATE AND TRAP (32<-16)
-	op_CKSM    uint32 = 0xB241 // FORMAT_RRE        CHECKSUM
-	op_CL      uint32 = 0x5500 // FORMAT_RX1        COMPARE LOGICAL (32)
-	op_CLC     uint32 = 0xD500 // FORMAT_SS1        COMPARE LOGICAL (character)
-	op_CLCL    uint32 = 0x0F00 // FORMAT_RR         COMPARE LOGICAL LONG
-	op_CLCLE   uint32 = 0xA900 // FORMAT_RS1        COMPARE LOGICAL LONG EXTENDED
-	op_CLCLU   uint32 = 0xEB8F // FORMAT_RSY1       COMPARE LOGICAL LONG UNICODE
-	op_CLFDBR  uint32 = 0xB39D // FORMAT_RRF5       CONVERT TO LOGICAL (long BFP to 32)
-	op_CLFDTR  uint32 = 0xB943 // FORMAT_RRF5       CONVERT TO LOGICAL (long DFP to 32)
-	op_CLFEBR  uint32 = 0xB39C // FORMAT_RRF5       CONVERT TO LOGICAL (short BFP to 32)
-	op_CLFHSI  uint32 = 0xE55D // FORMAT_SIL        COMPARE LOGICAL IMMEDIATE (32<-16)
-	op_CLFI    uint32 = 0xC20F // FORMAT_RIL1       COMPARE LOGICAL IMMEDIATE (32)
-	op_CLFIT   uint32 = 0xEC73 // FORMAT_RIE1       COMPARE LOGICAL IMMEDIATE AND TRAP (32<-16)
-	op_CLFXBR  uint32 = 0xB39E // FORMAT_RRF5       CONVERT TO LOGICAL (extended BFP to 32)
-	op_CLFXTR  uint32 = 0xB94B // FORMAT_RRF5       CONVERT TO LOGICAL (extended DFP to 32)
-	op_CLG     uint32 = 0xE321 // FORMAT_RXY1       COMPARE LOGICAL (64)
-	op_CLGDBR  uint32 = 0xB3AD // FORMAT_RRF5       CONVERT TO LOGICAL (long BFP to 64)
-	op_CLGDTR  uint32 = 0xB942 // FORMAT_RRF5       CONVERT TO LOGICAL (long DFP to 64)
-	op_CLGEBR  uint32 = 0xB3AC // FORMAT_RRF5       CONVERT TO LOGICAL (short BFP to 64)
-	op_CLGF    uint32 = 0xE331 // FORMAT_RXY1       COMPARE LOGICAL (64<-32)
-	op_CLGFI   uint32 = 0xC20E // FORMAT_RIL1       COMPARE LOGICAL IMMEDIATE (64<-32)
-	op_CLGFR   uint32 = 0xB931 // FORMAT_RRE        COMPARE LOGICAL (64<-32)
-	op_CLGFRL  uint32 = 0xC60E // FORMAT_RIL2       COMPARE LOGICAL RELATIVE LONG (64<-32)
-	op_CLGHRL  uint32 = 0xC606 // FORMAT_RIL2       COMPARE LOGICAL RELATIVE LONG (64<-16)
-	op_CLGHSI  uint32 = 0xE559 // FORMAT_SIL        COMPARE LOGICAL IMMEDIATE (64<-16)
-	op_CLGIB   uint32 = 0xECFD // FORMAT_RIS        COMPARE LOGICAL IMMEDIATE AND BRANCH (64<-8)
-	op_CLGIJ   uint32 = 0xEC7D // FORMAT_RIE3       COMPARE LOGICAL IMMEDIATE AND BRANCH RELATIVE (64<-8)
-	op_CLGIT   uint32 = 0xEC71 // FORMAT_RIE1       COMPARE LOGICAL IMMEDIATE AND TRAP (64<-16)
-	op_CLGR    uint32 = 0xB921 // FORMAT_RRE        COMPARE LOGICAL (64)
-	op_CLGRB   uint32 = 0xECE5 // FORMAT_RRS        COMPARE LOGICAL AND BRANCH (64)
-	op_CLGRJ   uint32 = 0xEC65 // FORMAT_RIE2       COMPARE LOGICAL AND BRANCH RELATIVE (64)
-	op_CLGRL   uint32 = 0xC60A // FORMAT_RIL2       COMPARE LOGICAL RELATIVE LONG (64)
-	op_CLGRT   uint32 = 0xB961 // FORMAT_RRF3       COMPARE LOGICAL AND TRAP (64)
-	op_CLGT    uint32 = 0xEB2B // FORMAT_RSY2       COMPARE LOGICAL AND TRAP (64)
-	op_CLGXBR  uint32 = 0xB3AE // FORMAT_RRF5       CONVERT TO LOGICAL (extended BFP to 64)
-	op_CLGXTR  uint32 = 0xB94A // FORMAT_RRF5       CONVERT TO LOGICAL (extended DFP to 64)
-	op_CLHF    uint32 = 0xE3CF // FORMAT_RXY1       COMPARE LOGICAL HIGH (32)
-	op_CLHHR   uint32 = 0xB9CF // FORMAT_RRE        COMPARE LOGICAL HIGH (32)
-	op_CLHHSI  uint32 = 0xE555 // FORMAT_SIL        COMPARE LOGICAL IMMEDIATE (16)
-	op_CLHLR   uint32 = 0xB9DF // FORMAT_RRE        COMPARE LOGICAL HIGH (32)
-	op_CLHRL   uint32 = 0xC607 // FORMAT_RIL2       COMPARE LOGICAL RELATIVE LONG (32<-16)
-	op_CLI     uint32 = 0x9500 // FORMAT_SI         COMPARE LOGICAL (immediate)
-	op_CLIB    uint32 = 0xECFF // FORMAT_RIS        COMPARE LOGICAL IMMEDIATE AND BRANCH (32<-8)
-	op_CLIH    uint32 = 0xCC0F // FORMAT_RIL1       COMPARE LOGICAL IMMEDIATE HIGH (32)
-	op_CLIJ    uint32 = 0xEC7F // FORMAT_RIE3       COMPARE LOGICAL IMMEDIATE AND BRANCH RELATIVE (32<-8)
-	op_CLIY    uint32 = 0xEB55 // FORMAT_SIY        COMPARE LOGICAL (immediate)
-	op_CLM     uint32 = 0xBD00 // FORMAT_RS2        COMPARE LOGICAL CHAR. UNDER MASK (low)
-	op_CLMH    uint32 = 0xEB20 // FORMAT_RSY2       COMPARE LOGICAL CHAR. UNDER MASK (high)
-	op_CLMY    uint32 = 0xEB21 // FORMAT_RSY2       COMPARE LOGICAL CHAR. UNDER MASK (low)
-	op_CLR     uint32 = 0x1500 // FORMAT_RR         COMPARE LOGICAL (32)
-	op_CLRB    uint32 = 0xECF7 // FORMAT_RRS        COMPARE LOGICAL AND BRANCH (32)
-	op_CLRJ    uint32 = 0xEC77 // FORMAT_RIE2       COMPARE LOGICAL AND BRANCH RELATIVE (32)
-	op_CLRL    uint32 = 0xC60F // FORMAT_RIL2       COMPARE LOGICAL RELATIVE LONG (32)
-	op_CLRT    uint32 = 0xB973 // FORMAT_RRF3       COMPARE LOGICAL AND TRAP (32)
-	op_CLST    uint32 = 0xB25D // FORMAT_RRE        COMPARE LOGICAL STRING
-	op_CLT     uint32 = 0xEB23 // FORMAT_RSY2       COMPARE LOGICAL AND TRAP (32)
-	op_CLY     uint32 = 0xE355 // FORMAT_RXY1       COMPARE LOGICAL (32)
-	op_CMPSC   uint32 = 0xB263 // FORMAT_RRE        COMPRESSION CALL
-	op_CP      uint32 = 0xF900 // FORMAT_SS2        COMPARE DECIMAL
-	op_CPSDR   uint32 = 0xB372 // FORMAT_RRF2       COPY SIGN (long)
-	op_CPYA    uint32 = 0xB24D // FORMAT_RRE        COPY ACCESS
-	op_CR      uint32 = 0x1900 // FORMAT_RR         COMPARE (32)
-	op_CRB     uint32 = 0xECF6 // FORMAT_RRS        COMPARE AND BRANCH (32)
-	op_CRDTE   uint32 = 0xB98F // FORMAT_RRF2       COMPARE AND REPLACE DAT TABLE ENTRY
-	op_CRJ     uint32 = 0xEC76 // FORMAT_RIE2       COMPARE AND BRANCH RELATIVE (32)
-	op_CRL     uint32 = 0xC60D // FORMAT_RIL2       COMPARE RELATIVE LONG (32)
-	op_CRT     uint32 = 0xB972 // FORMAT_RRF3       COMPARE AND TRAP (32)
-	op_CS      uint32 = 0xBA00 // FORMAT_RS1        COMPARE AND SWAP (32)
-	op_CSCH    uint32 = 0xB230 // FORMAT_S          CLEAR SUBCHANNEL
-	op_CSDTR   uint32 = 0xB3E3 // FORMAT_RRF4       CONVERT TO SIGNED PACKED (long DFP to 64)
-	op_CSG     uint32 = 0xEB30 // FORMAT_RSY1       COMPARE AND SWAP (64)
-	op_CSP     uint32 = 0xB250 // FORMAT_RRE        COMPARE AND SWAP AND PURGE
-	op_CSPG    uint32 = 0xB98A // FORMAT_RRE        COMPARE AND SWAP AND PURGE
-	op_CSST    uint32 = 0xC802 // FORMAT_SSF        COMPARE AND SWAP AND STORE
-	op_CSXTR   uint32 = 0xB3EB // FORMAT_RRF4       CONVERT TO SIGNED PACKED (extended DFP to 128)
-	op_CSY     uint32 = 0xEB14 // FORMAT_RSY1       COMPARE AND SWAP (32)
-	op_CU12    uint32 = 0xB2A7 // FORMAT_RRF3       CONVERT UTF-8 TO UTF-16
-	op_CU14    uint32 = 0xB9B0 // FORMAT_RRF3       CONVERT UTF-8 TO UTF-32
-	op_CU21    uint32 = 0xB2A6 // FORMAT_RRF3       CONVERT UTF-16 TO UTF-8
-	op_CU24    uint32 = 0xB9B1 // FORMAT_RRF3       CONVERT UTF-16 TO UTF-32
-	op_CU41    uint32 = 0xB9B2 // FORMAT_RRE        CONVERT UTF-32 TO UTF-8
-	op_CU42    uint32 = 0xB9B3 // FORMAT_RRE        CONVERT UTF-32 TO UTF-16
-	op_CUDTR   uint32 = 0xB3E2 // FORMAT_RRE        CONVERT TO UNSIGNED PACKED (long DFP to 64)
-	op_CUSE    uint32 = 0xB257 // FORMAT_RRE        COMPARE UNTIL SUBSTRING EQUAL
-	op_CUTFU   uint32 = 0xB2A7 // FORMAT_RRF3       CONVERT UTF-8 TO UNICODE
-	op_CUUTF   uint32 = 0xB2A6 // FORMAT_RRF3       CONVERT UNICODE TO UTF-8
-	op_CUXTR   uint32 = 0xB3EA // FORMAT_RRE        CONVERT TO UNSIGNED PACKED (extended DFP to 128)
-	op_CVB     uint32 = 0x4F00 // FORMAT_RX1        CONVERT TO BINARY (32)
-	op_CVBG    uint32 = 0xE30E // FORMAT_RXY1       CONVERT TO BINARY (64)
-	op_CVBY    uint32 = 0xE306 // FORMAT_RXY1       CONVERT TO BINARY (32)
-	op_CVD     uint32 = 0x4E00 // FORMAT_RX1        CONVERT TO DECIMAL (32)
-	op_CVDG    uint32 = 0xE32E // FORMAT_RXY1       CONVERT TO DECIMAL (64)
-	op_CVDY    uint32 = 0xE326 // FORMAT_RXY1       CONVERT TO DECIMAL (32)
-	op_CXBR    uint32 = 0xB349 // FORMAT_RRE        COMPARE (extended BFP)
-	op_CXFBR   uint32 = 0xB396 // FORMAT_RRE        CONVERT FROM FIXED (32 to extended BFP)
-	op_CXFBRA  uint32 = 0xB396 // FORMAT_RRF5       CONVERT FROM FIXED (32 to extended BFP)
-	op_CXFR    uint32 = 0xB3B6 // FORMAT_RRE        CONVERT FROM FIXED (32 to extended HFP)
-	op_CXFTR   uint32 = 0xB959 // FORMAT_RRE        CONVERT FROM FIXED (32 to extended DFP)
-	op_CXGBR   uint32 = 0xB3A6 // FORMAT_RRE        CONVERT FROM FIXED (64 to extended BFP)
-	op_CXGBRA  uint32 = 0xB3A6 // FORMAT_RRF5       CONVERT FROM FIXED (64 to extended BFP)
-	op_CXGR    uint32 = 0xB3C6 // FORMAT_RRE        CONVERT FROM FIXED (64 to extended HFP)
-	op_CXGTR   uint32 = 0xB3F9 // FORMAT_RRE        CONVERT FROM FIXED (64 to extended DFP)
-	op_CXGTRA  uint32 = 0xB3F9 // FORMAT_RRF5       CONVERT FROM FIXED (64 to extended DFP)
-	op_CXLFBR  uint32 = 0xB392 // FORMAT_RRF5       CONVERT FROM LOGICAL (32 to extended BFP)
-	op_CXLFTR  uint32 = 0xB95B // FORMAT_RRF5       CONVERT FROM LOGICAL (32 to extended DFP)
-	op_CXLGBR  uint32 = 0xB3A2 // FORMAT_RRF5       CONVERT FROM LOGICAL (64 to extended BFP)
-	op_CXLGTR  uint32 = 0xB95A // FORMAT_RRF5       CONVERT FROM LOGICAL (64 to extended DFP)
-	op_CXR     uint32 = 0xB369 // FORMAT_RRE        COMPARE (extended HFP)
-	op_CXSTR   uint32 = 0xB3FB // FORMAT_RRE        CONVERT FROM SIGNED PACKED (128 to extended DFP)
-	op_CXTR    uint32 = 0xB3EC // FORMAT_RRE        COMPARE (extended DFP)
-	op_CXUTR   uint32 = 0xB3FA // FORMAT_RRE        CONVERT FROM UNSIGNED PACKED (128 to ext. DFP)
-	op_CXZT    uint32 = 0xEDAB // FORMAT_RSL        CONVERT FROM ZONED (to extended DFP)
-	op_CY      uint32 = 0xE359 // FORMAT_RXY1       COMPARE (32)
-	op_CZDT    uint32 = 0xEDA8 // FORMAT_RSL        CONVERT TO ZONED (from long DFP)
-	op_CZXT    uint32 = 0xEDA9 // FORMAT_RSL        CONVERT TO ZONED (from extended DFP)
-	op_D       uint32 = 0x5D00 // FORMAT_RX1        DIVIDE (32<-64)
-	op_DD      uint32 = 0x6D00 // FORMAT_RX1        DIVIDE (long HFP)
-	op_DDB     uint32 = 0xED1D // FORMAT_RXE        DIVIDE (long BFP)
-	op_DDBR    uint32 = 0xB31D // FORMAT_RRE        DIVIDE (long BFP)
-	op_DDR     uint32 = 0x2D00 // FORMAT_RR         DIVIDE (long HFP)
-	op_DDTR    uint32 = 0xB3D1 // FORMAT_RRF1       DIVIDE (long DFP)
-	op_DDTRA   uint32 = 0xB3D1 // FORMAT_RRF1       DIVIDE (long DFP)
-	op_DE      uint32 = 0x7D00 // FORMAT_RX1        DIVIDE (short HFP)
-	op_DEB     uint32 = 0xED0D // FORMAT_RXE        DIVIDE (short BFP)
-	op_DEBR    uint32 = 0xB30D // FORMAT_RRE        DIVIDE (short BFP)
-	op_DER     uint32 = 0x3D00 // FORMAT_RR         DIVIDE (short HFP)
-	op_DIDBR   uint32 = 0xB35B // FORMAT_RRF2       DIVIDE TO INTEGER (long BFP)
-	op_DIEBR   uint32 = 0xB353 // FORMAT_RRF2       DIVIDE TO INTEGER (short BFP)
-	op_DL      uint32 = 0xE397 // FORMAT_RXY1       DIVIDE LOGICAL (32<-64)
-	op_DLG     uint32 = 0xE387 // FORMAT_RXY1       DIVIDE LOGICAL (64<-128)
-	op_DLGR    uint32 = 0xB987 // FORMAT_RRE        DIVIDE LOGICAL (64<-128)
-	op_DLR     uint32 = 0xB997 // FORMAT_RRE        DIVIDE LOGICAL (32<-64)
-	op_DP      uint32 = 0xFD00 // FORMAT_SS2        DIVIDE DECIMAL
-	op_DR      uint32 = 0x1D00 // FORMAT_RR         DIVIDE (32<-64)
-	op_DSG     uint32 = 0xE30D // FORMAT_RXY1       DIVIDE SINGLE (64)
-	op_DSGF    uint32 = 0xE31D // FORMAT_RXY1       DIVIDE SINGLE (64<-32)
-	op_DSGFR   uint32 = 0xB91D // FORMAT_RRE        DIVIDE SINGLE (64<-32)
-	op_DSGR    uint32 = 0xB90D // FORMAT_RRE        DIVIDE SINGLE (64)
-	op_DXBR    uint32 = 0xB34D // FORMAT_RRE        DIVIDE (extended BFP)
-	op_DXR     uint32 = 0xB22D // FORMAT_RRE        DIVIDE (extended HFP)
-	op_DXTR    uint32 = 0xB3D9 // FORMAT_RRF1       DIVIDE (extended DFP)
-	op_DXTRA   uint32 = 0xB3D9 // FORMAT_RRF1       DIVIDE (extended DFP)
-	op_EAR     uint32 = 0xB24F // FORMAT_RRE        EXTRACT ACCESS
-	op_ECAG    uint32 = 0xEB4C // FORMAT_RSY1       EXTRACT CACHE ATTRIBUTE
-	op_ECTG    uint32 = 0xC801 // FORMAT_SSF        EXTRACT CPU TIME
-	op_ED      uint32 = 0xDE00 // FORMAT_SS1        EDIT
-	op_EDMK    uint32 = 0xDF00 // FORMAT_SS1        EDIT AND MARK
-	op_EEDTR   uint32 = 0xB3E5 // FORMAT_RRE        EXTRACT BIASED EXPONENT (long DFP to 64)
-	op_EEXTR   uint32 = 0xB3ED // FORMAT_RRE        EXTRACT BIASED EXPONENT (extended DFP to 64)
-	op_EFPC    uint32 = 0xB38C // FORMAT_RRE        EXTRACT FPC
-	op_EPAIR   uint32 = 0xB99A // FORMAT_RRE        EXTRACT PRIMARY ASN AND INSTANCE
-	op_EPAR    uint32 = 0xB226 // FORMAT_RRE        EXTRACT PRIMARY ASN
-	op_EPSW    uint32 = 0xB98D // FORMAT_RRE        EXTRACT PSW
-	op_EREG    uint32 = 0xB249 // FORMAT_RRE        EXTRACT STACKED REGISTERS (32)
-	op_EREGG   uint32 = 0xB90E // FORMAT_RRE        EXTRACT STACKED REGISTERS (64)
-	op_ESAIR   uint32 = 0xB99B // FORMAT_RRE        EXTRACT SECONDARY ASN AND INSTANCE
-	op_ESAR    uint32 = 0xB227 // FORMAT_RRE        EXTRACT SECONDARY ASN
-	op_ESDTR   uint32 = 0xB3E7 // FORMAT_RRE        EXTRACT SIGNIFICANCE (long DFP)
-	op_ESEA    uint32 = 0xB99D // FORMAT_RRE        EXTRACT AND SET EXTENDED AUTHORITY
-	op_ESTA    uint32 = 0xB24A // FORMAT_RRE        EXTRACT STACKED STATE
-	op_ESXTR   uint32 = 0xB3EF // FORMAT_RRE        EXTRACT SIGNIFICANCE (extended DFP)
-	op_ETND    uint32 = 0xB2EC // FORMAT_RRE        EXTRACT TRANSACTION NESTING DEPTH
-	op_EX      uint32 = 0x4400 // FORMAT_RX1        EXECUTE
-	op_EXRL    uint32 = 0xC600 // FORMAT_RIL2       EXECUTE RELATIVE LONG
-	op_FIDBR   uint32 = 0xB35F // FORMAT_RRF5       LOAD FP INTEGER (long BFP)
-	op_FIDBRA  uint32 = 0xB35F // FORMAT_RRF5       LOAD FP INTEGER (long BFP)
-	op_FIDR    uint32 = 0xB37F // FORMAT_RRE        LOAD FP INTEGER (long HFP)
-	op_FIDTR   uint32 = 0xB3D7 // FORMAT_RRF5       LOAD FP INTEGER (long DFP)
-	op_FIEBR   uint32 = 0xB357 // FORMAT_RRF5       LOAD FP INTEGER (short BFP)
-	op_FIEBRA  uint32 = 0xB357 // FORMAT_RRF5       LOAD FP INTEGER (short BFP)
-	op_FIER    uint32 = 0xB377 // FORMAT_RRE        LOAD FP INTEGER (short HFP)
-	op_FIXBR   uint32 = 0xB347 // FORMAT_RRF5       LOAD FP INTEGER (extended BFP)
-	op_FIXBRA  uint32 = 0xB347 // FORMAT_RRF5       LOAD FP INTEGER (extended BFP)
-	op_FIXR    uint32 = 0xB367 // FORMAT_RRE        LOAD FP INTEGER (extended HFP)
-	op_FIXTR   uint32 = 0xB3DF // FORMAT_RRF5       LOAD FP INTEGER (extended DFP)
-	op_FLOGR   uint32 = 0xB983 // FORMAT_RRE        FIND LEFTMOST ONE
-	op_HDR     uint32 = 0x2400 // FORMAT_RR         HALVE (long HFP)
-	op_HER     uint32 = 0x3400 // FORMAT_RR         HALVE (short HFP)
-	op_HSCH    uint32 = 0xB231 // FORMAT_S          HALT SUBCHANNEL
-	op_IAC     uint32 = 0xB224 // FORMAT_RRE        INSERT ADDRESS SPACE CONTROL
-	op_IC      uint32 = 0x4300 // FORMAT_RX1        INSERT CHARACTER
-	op_ICM     uint32 = 0xBF00 // FORMAT_RS2        INSERT CHARACTERS UNDER MASK (low)
-	op_ICMH    uint32 = 0xEB80 // FORMAT_RSY2       INSERT CHARACTERS UNDER MASK (high)
-	op_ICMY    uint32 = 0xEB81 // FORMAT_RSY2       INSERT CHARACTERS UNDER MASK (low)
-	op_ICY     uint32 = 0xE373 // FORMAT_RXY1       INSERT CHARACTER
-	op_IDTE    uint32 = 0xB98E // FORMAT_RRF2       INVALIDATE DAT TABLE ENTRY
-	op_IEDTR   uint32 = 0xB3F6 // FORMAT_RRF2       INSERT BIASED EXPONENT (64 to long DFP)
-	op_IEXTR   uint32 = 0xB3FE // FORMAT_RRF2       INSERT BIASED EXPONENT (64 to extended DFP)
-	op_IIHF    uint32 = 0xC008 // FORMAT_RIL1       INSERT IMMEDIATE (high)
-	op_IIHH    uint32 = 0xA500 // FORMAT_RI1        INSERT IMMEDIATE (high high)
-	op_IIHL    uint32 = 0xA501 // FORMAT_RI1        INSERT IMMEDIATE (high low)
-	op_IILF    uint32 = 0xC009 // FORMAT_RIL1       INSERT IMMEDIATE (low)
-	op_IILH    uint32 = 0xA502 // FORMAT_RI1        INSERT IMMEDIATE (low high)
-	op_IILL    uint32 = 0xA503 // FORMAT_RI1        INSERT IMMEDIATE (low low)
-	op_IPK     uint32 = 0xB20B // FORMAT_S          INSERT PSW KEY
-	op_IPM     uint32 = 0xB222 // FORMAT_RRE        INSERT PROGRAM MASK
-	op_IPTE    uint32 = 0xB221 // FORMAT_RRF1       INVALIDATE PAGE TABLE ENTRY
-	op_ISKE    uint32 = 0xB229 // FORMAT_RRE        INSERT STORAGE KEY EXTENDED
-	op_IVSK    uint32 = 0xB223 // FORMAT_RRE        INSERT VIRTUAL STORAGE KEY
-	op_KDB     uint32 = 0xED18 // FORMAT_RXE        COMPARE AND SIGNAL (long BFP)
-	op_KDBR    uint32 = 0xB318 // FORMAT_RRE        COMPARE AND SIGNAL (long BFP)
-	op_KDTR    uint32 = 0xB3E0 // FORMAT_RRE        COMPARE AND SIGNAL (long DFP)
-	op_KEB     uint32 = 0xED08 // FORMAT_RXE        COMPARE AND SIGNAL (short BFP)
-	op_KEBR    uint32 = 0xB308 // FORMAT_RRE        COMPARE AND SIGNAL (short BFP)
-	op_KIMD    uint32 = 0xB93E // FORMAT_RRE        COMPUTE INTERMEDIATE MESSAGE DIGEST
-	op_KLMD    uint32 = 0xB93F // FORMAT_RRE        COMPUTE LAST MESSAGE DIGEST
-	op_KM      uint32 = 0xB92E // FORMAT_RRE        CIPHER MESSAGE
-	op_KMAC    uint32 = 0xB91E // FORMAT_RRE        COMPUTE MESSAGE AUTHENTICATION CODE
-	op_KMC     uint32 = 0xB92F // FORMAT_RRE        CIPHER MESSAGE WITH CHAINING
-	op_KMCTR   uint32 = 0xB92D // FORMAT_RRF2       CIPHER MESSAGE WITH COUNTER
-	op_KMF     uint32 = 0xB92A // FORMAT_RRE        CIPHER MESSAGE WITH CFB
-	op_KMO     uint32 = 0xB92B // FORMAT_RRE        CIPHER MESSAGE WITH OFB
-	op_KXBR    uint32 = 0xB348 // FORMAT_RRE        COMPARE AND SIGNAL (extended BFP)
-	op_KXTR    uint32 = 0xB3E8 // FORMAT_RRE        COMPARE AND SIGNAL (extended DFP)
-	op_L       uint32 = 0x5800 // FORMAT_RX1        LOAD (32)
-	op_LA      uint32 = 0x4100 // FORMAT_RX1        LOAD ADDRESS
-	op_LAA     uint32 = 0xEBF8 // FORMAT_RSY1       LOAD AND ADD (32)
-	op_LAAG    uint32 = 0xEBE8 // FORMAT_RSY1       LOAD AND ADD (64)
-	op_LAAL    uint32 = 0xEBFA // FORMAT_RSY1       LOAD AND ADD LOGICAL (32)
-	op_LAALG   uint32 = 0xEBEA // FORMAT_RSY1       LOAD AND ADD LOGICAL (64)
-	op_LAE     uint32 = 0x5100 // FORMAT_RX1        LOAD ADDRESS EXTENDED
-	op_LAEY    uint32 = 0xE375 // FORMAT_RXY1       LOAD ADDRESS EXTENDED
-	op_LAM     uint32 = 0x9A00 // FORMAT_RS1        LOAD ACCESS MULTIPLE
-	op_LAMY    uint32 = 0xEB9A // FORMAT_RSY1       LOAD ACCESS MULTIPLE
-	op_LAN     uint32 = 0xEBF4 // FORMAT_RSY1       LOAD AND AND (32)
-	op_LANG    uint32 = 0xEBE4 // FORMAT_RSY1       LOAD AND AND (64)
-	op_LAO     uint32 = 0xEBF6 // FORMAT_RSY1       LOAD AND OR (32)
-	op_LAOG    uint32 = 0xEBE6 // FORMAT_RSY1       LOAD AND OR (64)
-	op_LARL    uint32 = 0xC000 // FORMAT_RIL2       LOAD ADDRESS RELATIVE LONG
-	op_LASP    uint32 = 0xE500 // FORMAT_SSE        LOAD ADDRESS SPACE PARAMETERS
-	op_LAT     uint32 = 0xE39F // FORMAT_RXY1       LOAD AND TRAP (32L<-32)
-	op_LAX     uint32 = 0xEBF7 // FORMAT_RSY1       LOAD AND EXCLUSIVE OR (32)
-	op_LAXG    uint32 = 0xEBE7 // FORMAT_RSY1       LOAD AND EXCLUSIVE OR (64)
-	op_LAY     uint32 = 0xE371 // FORMAT_RXY1       LOAD ADDRESS
-	op_LB      uint32 = 0xE376 // FORMAT_RXY1       LOAD BYTE (32)
-	op_LBH     uint32 = 0xE3C0 // FORMAT_RXY1       LOAD BYTE HIGH (32<-8)
-	op_LBR     uint32 = 0xB926 // FORMAT_RRE        LOAD BYTE (32)
-	op_LCDBR   uint32 = 0xB313 // FORMAT_RRE        LOAD COMPLEMENT (long BFP)
-	op_LCDFR   uint32 = 0xB373 // FORMAT_RRE        LOAD COMPLEMENT (long)
-	op_LCDR    uint32 = 0x2300 // FORMAT_RR         LOAD COMPLEMENT (long HFP)
-	op_LCEBR   uint32 = 0xB303 // FORMAT_RRE        LOAD COMPLEMENT (short BFP)
-	op_LCER    uint32 = 0x3300 // FORMAT_RR         LOAD COMPLEMENT (short HFP)
-	op_LCGFR   uint32 = 0xB913 // FORMAT_RRE        LOAD COMPLEMENT (64<-32)
-	op_LCGR    uint32 = 0xB903 // FORMAT_RRE        LOAD COMPLEMENT (64)
-	op_LCR     uint32 = 0x1300 // FORMAT_RR         LOAD COMPLEMENT (32)
-	op_LCTL    uint32 = 0xB700 // FORMAT_RS1        LOAD CONTROL (32)
-	op_LCTLG   uint32 = 0xEB2F // FORMAT_RSY1       LOAD CONTROL (64)
-	op_LCXBR   uint32 = 0xB343 // FORMAT_RRE        LOAD COMPLEMENT (extended BFP)
-	op_LCXR    uint32 = 0xB363 // FORMAT_RRE        LOAD COMPLEMENT (extended HFP)
-	op_LD      uint32 = 0x6800 // FORMAT_RX1        LOAD (long)
-	op_LDE     uint32 = 0xED24 // FORMAT_RXE        LOAD LENGTHENED (short to long HFP)
-	op_LDEB    uint32 = 0xED04 // FORMAT_RXE        LOAD LENGTHENED (short to long BFP)
-	op_LDEBR   uint32 = 0xB304 // FORMAT_RRE        LOAD LENGTHENED (short to long BFP)
-	op_LDER    uint32 = 0xB324 // FORMAT_RRE        LOAD LENGTHENED (short to long HFP)
-	op_LDETR   uint32 = 0xB3D4 // FORMAT_RRF4       LOAD LENGTHENED (short to long DFP)
-	op_LDGR    uint32 = 0xB3C1 // FORMAT_RRE        LOAD FPR FROM GR (64 to long)
-	op_LDR     uint32 = 0x2800 // FORMAT_RR         LOAD (long)
-	op_LDXBR   uint32 = 0xB345 // FORMAT_RRE        LOAD ROUNDED (extended to long BFP)
-	op_LDXBRA  uint32 = 0xB345 // FORMAT_RRF5       LOAD ROUNDED (extended to long BFP)
-	op_LDXR    uint32 = 0x2500 // FORMAT_RR         LOAD ROUNDED (extended to long HFP)
-	op_LDXTR   uint32 = 0xB3DD // FORMAT_RRF5       LOAD ROUNDED (extended to long DFP)
-	op_LDY     uint32 = 0xED65 // FORMAT_RXY1       LOAD (long)
-	op_LE      uint32 = 0x7800 // FORMAT_RX1        LOAD (short)
-	op_LEDBR   uint32 = 0xB344 // FORMAT_RRE        LOAD ROUNDED (long to short BFP)
-	op_LEDBRA  uint32 = 0xB344 // FORMAT_RRF5       LOAD ROUNDED (long to short BFP)
-	op_LEDR    uint32 = 0x3500 // FORMAT_RR         LOAD ROUNDED (long to short HFP)
-	op_LEDTR   uint32 = 0xB3D5 // FORMAT_RRF5       LOAD ROUNDED (long to short DFP)
-	op_LER     uint32 = 0x3800 // FORMAT_RR         LOAD (short)
-	op_LEXBR   uint32 = 0xB346 // FORMAT_RRE        LOAD ROUNDED (extended to short BFP)
-	op_LEXBRA  uint32 = 0xB346 // FORMAT_RRF5       LOAD ROUNDED (extended to short BFP)
-	op_LEXR    uint32 = 0xB366 // FORMAT_RRE        LOAD ROUNDED (extended to short HFP)
-	op_LEY     uint32 = 0xED64 // FORMAT_RXY1       LOAD (short)
-	op_LFAS    uint32 = 0xB2BD // FORMAT_S          LOAD FPC AND SIGNAL
-	op_LFH     uint32 = 0xE3CA // FORMAT_RXY1       LOAD HIGH (32)
-	op_LFHAT   uint32 = 0xE3C8 // FORMAT_RXY1       LOAD HIGH AND TRAP (32H<-32)
-	op_LFPC    uint32 = 0xB29D // FORMAT_S          LOAD FPC
-	op_LG      uint32 = 0xE304 // FORMAT_RXY1       LOAD (64)
-	op_LGAT    uint32 = 0xE385 // FORMAT_RXY1       LOAD AND TRAP (64)
-	op_LGB     uint32 = 0xE377 // FORMAT_RXY1       LOAD BYTE (64)
-	op_LGBR    uint32 = 0xB906 // FORMAT_RRE        LOAD BYTE (64)
-	op_LGDR    uint32 = 0xB3CD // FORMAT_RRE        LOAD GR FROM FPR (long to 64)
-	op_LGF     uint32 = 0xE314 // FORMAT_RXY1       LOAD (64<-32)
-	op_LGFI    uint32 = 0xC001 // FORMAT_RIL1       LOAD IMMEDIATE (64<-32)
-	op_LGFR    uint32 = 0xB914 // FORMAT_RRE        LOAD (64<-32)
-	op_LGFRL   uint32 = 0xC40C // FORMAT_RIL2       LOAD RELATIVE LONG (64<-32)
-	op_LGH     uint32 = 0xE315 // FORMAT_RXY1       LOAD HALFWORD (64)
-	op_LGHI    uint32 = 0xA709 // FORMAT_RI1        LOAD HALFWORD IMMEDIATE (64)
-	op_LGHR    uint32 = 0xB907 // FORMAT_RRE        LOAD HALFWORD (64)
-	op_LGHRL   uint32 = 0xC404 // FORMAT_RIL2       LOAD HALFWORD RELATIVE LONG (64<-16)
-	op_LGR     uint32 = 0xB904 // FORMAT_RRE        LOAD (64)
-	op_LGRL    uint32 = 0xC408 // FORMAT_RIL2       LOAD RELATIVE LONG (64)
-	op_LH      uint32 = 0x4800 // FORMAT_RX1        LOAD HALFWORD (32)
-	op_LHH     uint32 = 0xE3C4 // FORMAT_RXY1       LOAD HALFWORD HIGH (32<-16)
-	op_LHI     uint32 = 0xA708 // FORMAT_RI1        LOAD HALFWORD IMMEDIATE (32)
-	op_LHR     uint32 = 0xB927 // FORMAT_RRE        LOAD HALFWORD (32)
-	op_LHRL    uint32 = 0xC405 // FORMAT_RIL2       LOAD HALFWORD RELATIVE LONG (32<-16)
-	op_LHY     uint32 = 0xE378 // FORMAT_RXY1       LOAD HALFWORD (32)
-	op_LLC     uint32 = 0xE394 // FORMAT_RXY1       LOAD LOGICAL CHARACTER (32)
-	op_LLCH    uint32 = 0xE3C2 // FORMAT_RXY1       LOAD LOGICAL CHARACTER HIGH (32<-8)
-	op_LLCR    uint32 = 0xB994 // FORMAT_RRE        LOAD LOGICAL CHARACTER (32)
-	op_LLGC    uint32 = 0xE390 // FORMAT_RXY1       LOAD LOGICAL CHARACTER (64)
-	op_LLGCR   uint32 = 0xB984 // FORMAT_RRE        LOAD LOGICAL CHARACTER (64)
-	op_LLGF    uint32 = 0xE316 // FORMAT_RXY1       LOAD LOGICAL (64<-32)
-	op_LLGFAT  uint32 = 0xE39D // FORMAT_RXY1       LOAD LOGICAL AND TRAP (64<-32)
-	op_LLGFR   uint32 = 0xB916 // FORMAT_RRE        LOAD LOGICAL (64<-32)
-	op_LLGFRL  uint32 = 0xC40E // FORMAT_RIL2       LOAD LOGICAL RELATIVE LONG (64<-32)
-	op_LLGH    uint32 = 0xE391 // FORMAT_RXY1       LOAD LOGICAL HALFWORD (64)
-	op_LLGHR   uint32 = 0xB985 // FORMAT_RRE        LOAD LOGICAL HALFWORD (64)
-	op_LLGHRL  uint32 = 0xC406 // FORMAT_RIL2       LOAD LOGICAL HALFWORD RELATIVE LONG (64<-16)
-	op_LLGT    uint32 = 0xE317 // FORMAT_RXY1       LOAD LOGICAL THIRTY ONE BITS
-	op_LLGTAT  uint32 = 0xE39C // FORMAT_RXY1       LOAD LOGICAL THIRTY ONE BITS AND TRAP (64<-31)
-	op_LLGTR   uint32 = 0xB917 // FORMAT_RRE        LOAD LOGICAL THIRTY ONE BITS
-	op_LLH     uint32 = 0xE395 // FORMAT_RXY1       LOAD LOGICAL HALFWORD (32)
-	op_LLHH    uint32 = 0xE3C6 // FORMAT_RXY1       LOAD LOGICAL HALFWORD HIGH (32<-16)
-	op_LLHR    uint32 = 0xB995 // FORMAT_RRE        LOAD LOGICAL HALFWORD (32)
-	op_LLHRL   uint32 = 0xC402 // FORMAT_RIL2       LOAD LOGICAL HALFWORD RELATIVE LONG (32<-16)
-	op_LLIHF   uint32 = 0xC00E // FORMAT_RIL1       LOAD LOGICAL IMMEDIATE (high)
-	op_LLIHH   uint32 = 0xA50C // FORMAT_RI1        LOAD LOGICAL IMMEDIATE (high high)
-	op_LLIHL   uint32 = 0xA50D // FORMAT_RI1        LOAD LOGICAL IMMEDIATE (high low)
-	op_LLILF   uint32 = 0xC00F // FORMAT_RIL1       LOAD LOGICAL IMMEDIATE (low)
-	op_LLILH   uint32 = 0xA50E // FORMAT_RI1        LOAD LOGICAL IMMEDIATE (low high)
-	op_LLILL   uint32 = 0xA50F // FORMAT_RI1        LOAD LOGICAL IMMEDIATE (low low)
-	op_LM      uint32 = 0x9800 // FORMAT_RS1        LOAD MULTIPLE (32)
-	op_LMD     uint32 = 0xEF00 // FORMAT_SS5        LOAD MULTIPLE DISJOINT
-	op_LMG     uint32 = 0xEB04 // FORMAT_RSY1       LOAD MULTIPLE (64)
-	op_LMH     uint32 = 0xEB96 // FORMAT_RSY1       LOAD MULTIPLE HIGH
-	op_LMY     uint32 = 0xEB98 // FORMAT_RSY1       LOAD MULTIPLE (32)
-	op_LNDBR   uint32 = 0xB311 // FORMAT_RRE        LOAD NEGATIVE (long BFP)
-	op_LNDFR   uint32 = 0xB371 // FORMAT_RRE        LOAD NEGATIVE (long)
-	op_LNDR    uint32 = 0x2100 // FORMAT_RR         LOAD NEGATIVE (long HFP)
-	op_LNEBR   uint32 = 0xB301 // FORMAT_RRE        LOAD NEGATIVE (short BFP)
-	op_LNER    uint32 = 0x3100 // FORMAT_RR         LOAD NEGATIVE (short HFP)
-	op_LNGFR   uint32 = 0xB911 // FORMAT_RRE        LOAD NEGATIVE (64<-32)
-	op_LNGR    uint32 = 0xB901 // FORMAT_RRE        LOAD NEGATIVE (64)
-	op_LNR     uint32 = 0x1100 // FORMAT_RR         LOAD NEGATIVE (32)
-	op_LNXBR   uint32 = 0xB341 // FORMAT_RRE        LOAD NEGATIVE (extended BFP)
-	op_LNXR    uint32 = 0xB361 // FORMAT_RRE        LOAD NEGATIVE (extended HFP)
-	op_LOC     uint32 = 0xEBF2 // FORMAT_RSY2       LOAD ON CONDITION (32)
-	op_LOCG    uint32 = 0xEBE2 // FORMAT_RSY2       LOAD ON CONDITION (64)
-	op_LOCGR   uint32 = 0xB9E2 // FORMAT_RRF3       LOAD ON CONDITION (64)
-	op_LOCR    uint32 = 0xB9F2 // FORMAT_RRF3       LOAD ON CONDITION (32)
-	op_LPD     uint32 = 0xC804 // FORMAT_SSF        LOAD PAIR DISJOINT (32)
-	op_LPDBR   uint32 = 0xB310 // FORMAT_RRE        LOAD POSITIVE (long BFP)
-	op_LPDFR   uint32 = 0xB370 // FORMAT_RRE        LOAD POSITIVE (long)
-	op_LPDG    uint32 = 0xC805 // FORMAT_SSF        LOAD PAIR DISJOINT (64)
-	op_LPDR    uint32 = 0x2000 // FORMAT_RR         LOAD POSITIVE (long HFP)
-	op_LPEBR   uint32 = 0xB300 // FORMAT_RRE        LOAD POSITIVE (short BFP)
-	op_LPER    uint32 = 0x3000 // FORMAT_RR         LOAD POSITIVE (short HFP)
-	op_LPGFR   uint32 = 0xB910 // FORMAT_RRE        LOAD POSITIVE (64<-32)
-	op_LPGR    uint32 = 0xB900 // FORMAT_RRE        LOAD POSITIVE (64)
-	op_LPQ     uint32 = 0xE38F // FORMAT_RXY1       LOAD PAIR FROM QUADWORD
-	op_LPR     uint32 = 0x1000 // FORMAT_RR         LOAD POSITIVE (32)
-	op_LPSW    uint32 = 0x8200 // FORMAT_S          LOAD PSW
-	op_LPSWE   uint32 = 0xB2B2 // FORMAT_S          LOAD PSW EXTENDED
-	op_LPTEA   uint32 = 0xB9AA // FORMAT_RRF2       LOAD PAGE TABLE ENTRY ADDRESS
-	op_LPXBR   uint32 = 0xB340 // FORMAT_RRE        LOAD POSITIVE (extended BFP)
-	op_LPXR    uint32 = 0xB360 // FORMAT_RRE        LOAD POSITIVE (extended HFP)
-	op_LR      uint32 = 0x1800 // FORMAT_RR         LOAD (32)
-	op_LRA     uint32 = 0xB100 // FORMAT_RX1        LOAD REAL ADDRESS (32)
-	op_LRAG    uint32 = 0xE303 // FORMAT_RXY1       LOAD REAL ADDRESS (64)
-	op_LRAY    uint32 = 0xE313 // FORMAT_RXY1       LOAD REAL ADDRESS (32)
-	op_LRDR    uint32 = 0x2500 // FORMAT_RR         LOAD ROUNDED (extended to long HFP)
-	op_LRER    uint32 = 0x3500 // FORMAT_RR         LOAD ROUNDED (long to short HFP)
-	op_LRL     uint32 = 0xC40D // FORMAT_RIL2       LOAD RELATIVE LONG (32)
-	op_LRV     uint32 = 0xE31E // FORMAT_RXY1       LOAD REVERSED (32)
-	op_LRVG    uint32 = 0xE30F // FORMAT_RXY1       LOAD REVERSED (64)
-	op_LRVGR   uint32 = 0xB90F // FORMAT_RRE        LOAD REVERSED (64)
-	op_LRVH    uint32 = 0xE31F // FORMAT_RXY1       LOAD REVERSED (16)
-	op_LRVR    uint32 = 0xB91F // FORMAT_RRE        LOAD REVERSED (32)
-	op_LT      uint32 = 0xE312 // FORMAT_RXY1       LOAD AND TEST (32)
-	op_LTDBR   uint32 = 0xB312 // FORMAT_RRE        LOAD AND TEST (long BFP)
-	op_LTDR    uint32 = 0x2200 // FORMAT_RR         LOAD AND TEST (long HFP)
-	op_LTDTR   uint32 = 0xB3D6 // FORMAT_RRE        LOAD AND TEST (long DFP)
-	op_LTEBR   uint32 = 0xB302 // FORMAT_RRE        LOAD AND TEST (short BFP)
-	op_LTER    uint32 = 0x3200 // FORMAT_RR         LOAD AND TEST (short HFP)
-	op_LTG     uint32 = 0xE302 // FORMAT_RXY1       LOAD AND TEST (64)
-	op_LTGF    uint32 = 0xE332 // FORMAT_RXY1       LOAD AND TEST (64<-32)
-	op_LTGFR   uint32 = 0xB912 // FORMAT_RRE        LOAD AND TEST (64<-32)
-	op_LTGR    uint32 = 0xB902 // FORMAT_RRE        LOAD AND TEST (64)
-	op_LTR     uint32 = 0x1200 // FORMAT_RR         LOAD AND TEST (32)
-	op_LTXBR   uint32 = 0xB342 // FORMAT_RRE        LOAD AND TEST (extended BFP)
-	op_LTXR    uint32 = 0xB362 // FORMAT_RRE        LOAD AND TEST (extended HFP)
-	op_LTXTR   uint32 = 0xB3DE // FORMAT_RRE        LOAD AND TEST (extended DFP)
-	op_LURA    uint32 = 0xB24B // FORMAT_RRE        LOAD USING REAL ADDRESS (32)
-	op_LURAG   uint32 = 0xB905 // FORMAT_RRE        LOAD USING REAL ADDRESS (64)
-	op_LXD     uint32 = 0xED25 // FORMAT_RXE        LOAD LENGTHENED (long to extended HFP)
-	op_LXDB    uint32 = 0xED05 // FORMAT_RXE        LOAD LENGTHENED (long to extended BFP)
-	op_LXDBR   uint32 = 0xB305 // FORMAT_RRE        LOAD LENGTHENED (long to extended BFP)
-	op_LXDR    uint32 = 0xB325 // FORMAT_RRE        LOAD LENGTHENED (long to extended HFP)
-	op_LXDTR   uint32 = 0xB3DC // FORMAT_RRF4       LOAD LENGTHENED (long to extended DFP)
-	op_LXE     uint32 = 0xED26 // FORMAT_RXE        LOAD LENGTHENED (short to extended HFP)
-	op_LXEB    uint32 = 0xED06 // FORMAT_RXE        LOAD LENGTHENED (short to extended BFP)
-	op_LXEBR   uint32 = 0xB306 // FORMAT_RRE        LOAD LENGTHENED (short to extended BFP)
-	op_LXER    uint32 = 0xB326 // FORMAT_RRE        LOAD LENGTHENED (short to extended HFP)
-	op_LXR     uint32 = 0xB365 // FORMAT_RRE        LOAD (extended)
-	op_LY      uint32 = 0xE358 // FORMAT_RXY1       LOAD (32)
-	op_LZDR    uint32 = 0xB375 // FORMAT_RRE        LOAD ZERO (long)
-	op_LZER    uint32 = 0xB374 // FORMAT_RRE        LOAD ZERO (short)
-	op_LZXR    uint32 = 0xB376 // FORMAT_RRE        LOAD ZERO (extended)
-	op_M       uint32 = 0x5C00 // FORMAT_RX1        MULTIPLY (64<-32)
-	op_MAD     uint32 = 0xED3E // FORMAT_RXF        MULTIPLY AND ADD (long HFP)
-	op_MADB    uint32 = 0xED1E // FORMAT_RXF        MULTIPLY AND ADD (long BFP)
-	op_MADBR   uint32 = 0xB31E // FORMAT_RRD        MULTIPLY AND ADD (long BFP)
-	op_MADR    uint32 = 0xB33E // FORMAT_RRD        MULTIPLY AND ADD (long HFP)
-	op_MAE     uint32 = 0xED2E // FORMAT_RXF        MULTIPLY AND ADD (short HFP)
-	op_MAEB    uint32 = 0xED0E // FORMAT_RXF        MULTIPLY AND ADD (short BFP)
-	op_MAEBR   uint32 = 0xB30E // FORMAT_RRD        MULTIPLY AND ADD (short BFP)
-	op_MAER    uint32 = 0xB32E // FORMAT_RRD        MULTIPLY AND ADD (short HFP)
-	op_MAY     uint32 = 0xED3A // FORMAT_RXF        MULTIPLY & ADD UNNORMALIZED (long to ext. HFP)
-	op_MAYH    uint32 = 0xED3C // FORMAT_RXF        MULTIPLY AND ADD UNNRM. (long to ext. high HFP)
-	op_MAYHR   uint32 = 0xB33C // FORMAT_RRD        MULTIPLY AND ADD UNNRM. (long to ext. high HFP)
-	op_MAYL    uint32 = 0xED38 // FORMAT_RXF        MULTIPLY AND ADD UNNRM. (long to ext. low HFP)
-	op_MAYLR   uint32 = 0xB338 // FORMAT_RRD        MULTIPLY AND ADD UNNRM. (long to ext. low HFP)
-	op_MAYR    uint32 = 0xB33A // FORMAT_RRD        MULTIPLY & ADD UNNORMALIZED (long to ext. HFP)
-	op_MC      uint32 = 0xAF00 // FORMAT_SI         MONITOR CALL
-	op_MD      uint32 = 0x6C00 // FORMAT_RX1        MULTIPLY (long HFP)
-	op_MDB     uint32 = 0xED1C // FORMAT_RXE        MULTIPLY (long BFP)
-	op_MDBR    uint32 = 0xB31C // FORMAT_RRE        MULTIPLY (long BFP)
-	op_MDE     uint32 = 0x7C00 // FORMAT_RX1        MULTIPLY (short to long HFP)
-	op_MDEB    uint32 = 0xED0C // FORMAT_RXE        MULTIPLY (short to long BFP)
-	op_MDEBR   uint32 = 0xB30C // FORMAT_RRE        MULTIPLY (short to long BFP)
-	op_MDER    uint32 = 0x3C00 // FORMAT_RR         MULTIPLY (short to long HFP)
-	op_MDR     uint32 = 0x2C00 // FORMAT_RR         MULTIPLY (long HFP)
-	op_MDTR    uint32 = 0xB3D0 // FORMAT_RRF1       MULTIPLY (long DFP)
-	op_MDTRA   uint32 = 0xB3D0 // FORMAT_RRF1       MULTIPLY (long DFP)
-	op_ME      uint32 = 0x7C00 // FORMAT_RX1        MULTIPLY (short to long HFP)
-	op_MEE     uint32 = 0xED37 // FORMAT_RXE        MULTIPLY (short HFP)
-	op_MEEB    uint32 = 0xED17 // FORMAT_RXE        MULTIPLY (short BFP)
-	op_MEEBR   uint32 = 0xB317 // FORMAT_RRE        MULTIPLY (short BFP)
-	op_MEER    uint32 = 0xB337 // FORMAT_RRE        MULTIPLY (short HFP)
-	op_MER     uint32 = 0x3C00 // FORMAT_RR         MULTIPLY (short to long HFP)
-	op_MFY     uint32 = 0xE35C // FORMAT_RXY1       MULTIPLY (64<-32)
-	op_MGHI    uint32 = 0xA70D // FORMAT_RI1        MULTIPLY HALFWORD IMMEDIATE (64)
-	op_MH      uint32 = 0x4C00 // FORMAT_RX1        MULTIPLY HALFWORD (32)
-	op_MHI     uint32 = 0xA70C // FORMAT_RI1        MULTIPLY HALFWORD IMMEDIATE (32)
-	op_MHY     uint32 = 0xE37C // FORMAT_RXY1       MULTIPLY HALFWORD (32)
-	op_ML      uint32 = 0xE396 // FORMAT_RXY1       MULTIPLY LOGICAL (64<-32)
-	op_MLG     uint32 = 0xE386 // FORMAT_RXY1       MULTIPLY LOGICAL (128<-64)
-	op_MLGR    uint32 = 0xB986 // FORMAT_RRE        MULTIPLY LOGICAL (128<-64)
-	op_MLR     uint32 = 0xB996 // FORMAT_RRE        MULTIPLY LOGICAL (64<-32)
-	op_MP      uint32 = 0xFC00 // FORMAT_SS2        MULTIPLY DECIMAL
-	op_MR      uint32 = 0x1C00 // FORMAT_RR         MULTIPLY (64<-32)
-	op_MS      uint32 = 0x7100 // FORMAT_RX1        MULTIPLY SINGLE (32)
-	op_MSCH    uint32 = 0xB232 // FORMAT_S          MODIFY SUBCHANNEL
-	op_MSD     uint32 = 0xED3F // FORMAT_RXF        MULTIPLY AND SUBTRACT (long HFP)
-	op_MSDB    uint32 = 0xED1F // FORMAT_RXF        MULTIPLY AND SUBTRACT (long BFP)
-	op_MSDBR   uint32 = 0xB31F // FORMAT_RRD        MULTIPLY AND SUBTRACT (long BFP)
-	op_MSDR    uint32 = 0xB33F // FORMAT_RRD        MULTIPLY AND SUBTRACT (long HFP)
-	op_MSE     uint32 = 0xED2F // FORMAT_RXF        MULTIPLY AND SUBTRACT (short HFP)
-	op_MSEB    uint32 = 0xED0F // FORMAT_RXF        MULTIPLY AND SUBTRACT (short BFP)
-	op_MSEBR   uint32 = 0xB30F // FORMAT_RRD        MULTIPLY AND SUBTRACT (short BFP)
-	op_MSER    uint32 = 0xB32F // FORMAT_RRD        MULTIPLY AND SUBTRACT (short HFP)
-	op_MSFI    uint32 = 0xC201 // FORMAT_RIL1       MULTIPLY SINGLE IMMEDIATE (32)
-	op_MSG     uint32 = 0xE30C // FORMAT_RXY1       MULTIPLY SINGLE (64)
-	op_MSGF    uint32 = 0xE31C // FORMAT_RXY1       MULTIPLY SINGLE (64<-32)
-	op_MSGFI   uint32 = 0xC200 // FORMAT_RIL1       MULTIPLY SINGLE IMMEDIATE (64<-32)
-	op_MSGFR   uint32 = 0xB91C // FORMAT_RRE        MULTIPLY SINGLE (64<-32)
-	op_MSGR    uint32 = 0xB90C // FORMAT_RRE        MULTIPLY SINGLE (64)
-	op_MSR     uint32 = 0xB252 // FORMAT_RRE        MULTIPLY SINGLE (32)
-	op_MSTA    uint32 = 0xB247 // FORMAT_RRE        MODIFY STACKED STATE
-	op_MSY     uint32 = 0xE351 // FORMAT_RXY1       MULTIPLY SINGLE (32)
-	op_MVC     uint32 = 0xD200 // FORMAT_SS1        MOVE (character)
-	op_MVCDK   uint32 = 0xE50F // FORMAT_SSE        MOVE WITH DESTINATION KEY
-	op_MVCIN   uint32 = 0xE800 // FORMAT_SS1        MOVE INVERSE
-	op_MVCK    uint32 = 0xD900 // FORMAT_SS4        MOVE WITH KEY
-	op_MVCL    uint32 = 0x0E00 // FORMAT_RR         MOVE LONG
-	op_MVCLE   uint32 = 0xA800 // FORMAT_RS1        MOVE LONG EXTENDED
-	op_MVCLU   uint32 = 0xEB8E // FORMAT_RSY1       MOVE LONG UNICODE
-	op_MVCOS   uint32 = 0xC800 // FORMAT_SSF        MOVE WITH OPTIONAL SPECIFICATIONS
-	op_MVCP    uint32 = 0xDA00 // FORMAT_SS4        MOVE TO PRIMARY
-	op_MVCS    uint32 = 0xDB00 // FORMAT_SS4        MOVE TO SECONDARY
-	op_MVCSK   uint32 = 0xE50E // FORMAT_SSE        MOVE WITH SOURCE KEY
-	op_MVGHI   uint32 = 0xE548 // FORMAT_SIL        MOVE (64<-16)
-	op_MVHHI   uint32 = 0xE544 // FORMAT_SIL        MOVE (16<-16)
-	op_MVHI    uint32 = 0xE54C // FORMAT_SIL        MOVE (32<-16)
-	op_MVI     uint32 = 0x9200 // FORMAT_SI         MOVE (immediate)
-	op_MVIY    uint32 = 0xEB52 // FORMAT_SIY        MOVE (immediate)
-	op_MVN     uint32 = 0xD100 // FORMAT_SS1        MOVE NUMERICS
-	op_MVO     uint32 = 0xF100 // FORMAT_SS2        MOVE WITH OFFSET
-	op_MVPG    uint32 = 0xB254 // FORMAT_RRE        MOVE PAGE
-	op_MVST    uint32 = 0xB255 // FORMAT_RRE        MOVE STRING
-	op_MVZ     uint32 = 0xD300 // FORMAT_SS1        MOVE ZONES
-	op_MXBR    uint32 = 0xB34C // FORMAT_RRE        MULTIPLY (extended BFP)
-	op_MXD     uint32 = 0x6700 // FORMAT_RX1        MULTIPLY (long to extended HFP)
-	op_MXDB    uint32 = 0xED07 // FORMAT_RXE        MULTIPLY (long to extended BFP)
-	op_MXDBR   uint32 = 0xB307 // FORMAT_RRE        MULTIPLY (long to extended BFP)
-	op_MXDR    uint32 = 0x2700 // FORMAT_RR         MULTIPLY (long to extended HFP)
-	op_MXR     uint32 = 0x2600 // FORMAT_RR         MULTIPLY (extended HFP)
-	op_MXTR    uint32 = 0xB3D8 // FORMAT_RRF1       MULTIPLY (extended DFP)
-	op_MXTRA   uint32 = 0xB3D8 // FORMAT_RRF1       MULTIPLY (extended DFP)
-	op_MY      uint32 = 0xED3B // FORMAT_RXF        MULTIPLY UNNORMALIZED (long to ext. HFP)
-	op_MYH     uint32 = 0xED3D // FORMAT_RXF        MULTIPLY UNNORM. (long to ext. high HFP)
-	op_MYHR    uint32 = 0xB33D // FORMAT_RRD        MULTIPLY UNNORM. (long to ext. high HFP)
-	op_MYL     uint32 = 0xED39 // FORMAT_RXF        MULTIPLY UNNORM. (long to ext. low HFP)
-	op_MYLR    uint32 = 0xB339 // FORMAT_RRD        MULTIPLY UNNORM. (long to ext. low HFP)
-	op_MYR     uint32 = 0xB33B // FORMAT_RRD        MULTIPLY UNNORMALIZED (long to ext. HFP)
-	op_N       uint32 = 0x5400 // FORMAT_RX1        AND (32)
-	op_NC      uint32 = 0xD400 // FORMAT_SS1        AND (character)
-	op_NG      uint32 = 0xE380 // FORMAT_RXY1       AND (64)
-	op_NGR     uint32 = 0xB980 // FORMAT_RRE        AND (64)
-	op_NGRK    uint32 = 0xB9E4 // FORMAT_RRF1       AND (64)
-	op_NI      uint32 = 0x9400 // FORMAT_SI         AND (immediate)
-	op_NIAI    uint32 = 0xB2FA // FORMAT_IE         NEXT INSTRUCTION ACCESS INTENT
-	op_NIHF    uint32 = 0xC00A // FORMAT_RIL1       AND IMMEDIATE (high)
-	op_NIHH    uint32 = 0xA504 // FORMAT_RI1        AND IMMEDIATE (high high)
-	op_NIHL    uint32 = 0xA505 // FORMAT_RI1        AND IMMEDIATE (high low)
-	op_NILF    uint32 = 0xC00B // FORMAT_RIL1       AND IMMEDIATE (low)
-	op_NILH    uint32 = 0xA506 // FORMAT_RI1        AND IMMEDIATE (low high)
-	op_NILL    uint32 = 0xA507 // FORMAT_RI1        AND IMMEDIATE (low low)
-	op_NIY     uint32 = 0xEB54 // FORMAT_SIY        AND (immediate)
-	op_NR      uint32 = 0x1400 // FORMAT_RR         AND (32)
-	op_NRK     uint32 = 0xB9F4 // FORMAT_RRF1       AND (32)
-	op_NTSTG   uint32 = 0xE325 // FORMAT_RXY1       NONTRANSACTIONAL STORE
-	op_NY      uint32 = 0xE354 // FORMAT_RXY1       AND (32)
-	op_O       uint32 = 0x5600 // FORMAT_RX1        OR (32)
-	op_OC      uint32 = 0xD600 // FORMAT_SS1        OR (character)
-	op_OG      uint32 = 0xE381 // FORMAT_RXY1       OR (64)
-	op_OGR     uint32 = 0xB981 // FORMAT_RRE        OR (64)
-	op_OGRK    uint32 = 0xB9E6 // FORMAT_RRF1       OR (64)
-	op_OI      uint32 = 0x9600 // FORMAT_SI         OR (immediate)
-	op_OIHF    uint32 = 0xC00C // FORMAT_RIL1       OR IMMEDIATE (high)
-	op_OIHH    uint32 = 0xA508 // FORMAT_RI1        OR IMMEDIATE (high high)
-	op_OIHL    uint32 = 0xA509 // FORMAT_RI1        OR IMMEDIATE (high low)
-	op_OILF    uint32 = 0xC00D // FORMAT_RIL1       OR IMMEDIATE (low)
-	op_OILH    uint32 = 0xA50A // FORMAT_RI1        OR IMMEDIATE (low high)
-	op_OILL    uint32 = 0xA50B // FORMAT_RI1        OR IMMEDIATE (low low)
-	op_OIY     uint32 = 0xEB56 // FORMAT_SIY        OR (immediate)
-	op_OR      uint32 = 0x1600 // FORMAT_RR         OR (32)
-	op_ORK     uint32 = 0xB9F6 // FORMAT_RRF1       OR (32)
-	op_OY      uint32 = 0xE356 // FORMAT_RXY1       OR (32)
-	op_PACK    uint32 = 0xF200 // FORMAT_SS2        PACK
-	op_PALB    uint32 = 0xB248 // FORMAT_RRE        PURGE ALB
-	op_PC      uint32 = 0xB218 // FORMAT_S          PROGRAM CALL
-	op_PCC     uint32 = 0xB92C // FORMAT_RRE        PERFORM CRYPTOGRAPHIC COMPUTATION
-	op_PCKMO   uint32 = 0xB928 // FORMAT_RRE        PERFORM CRYPTOGRAPHIC KEY MGMT. OPERATIONS
-	op_PFD     uint32 = 0xE336 // FORMAT_RXY2       PREFETCH DATA
-	op_PFDRL   uint32 = 0xC602 // FORMAT_RIL3       PREFETCH DATA RELATIVE LONG
-	op_PFMF    uint32 = 0xB9AF // FORMAT_RRE        PERFORM FRAME MANAGEMENT FUNCTION
-	op_PFPO    uint32 = 0x010A // FORMAT_E          PERFORM FLOATING-POINT OPERATION
-	op_PGIN    uint32 = 0xB22E // FORMAT_RRE        PAGE IN
-	op_PGOUT   uint32 = 0xB22F // FORMAT_RRE        PAGE OUT
-	op_PKA     uint32 = 0xE900 // FORMAT_SS6        PACK ASCII
-	op_PKU     uint32 = 0xE100 // FORMAT_SS6        PACK UNICODE
-	op_PLO     uint32 = 0xEE00 // FORMAT_SS5        PERFORM LOCKED OPERATION
-	op_POPCNT  uint32 = 0xB9E1 // FORMAT_RRE        POPULATION COUNT
-	op_PPA     uint32 = 0xB2E8 // FORMAT_RRF3       PERFORM PROCESSOR ASSIST
-	op_PR      uint32 = 0x0101 // FORMAT_E          PROGRAM RETURN
-	op_PT      uint32 = 0xB228 // FORMAT_RRE        PROGRAM TRANSFER
-	op_PTF     uint32 = 0xB9A2 // FORMAT_RRE        PERFORM TOPOLOGY FUNCTION
-	op_PTFF    uint32 = 0x0104 // FORMAT_E          PERFORM TIMING FACILITY FUNCTION
-	op_PTI     uint32 = 0xB99E // FORMAT_RRE        PROGRAM TRANSFER WITH INSTANCE
-	op_PTLB    uint32 = 0xB20D // FORMAT_S          PURGE TLB
-	op_QADTR   uint32 = 0xB3F5 // FORMAT_RRF2       QUANTIZE (long DFP)
-	op_QAXTR   uint32 = 0xB3FD // FORMAT_RRF2       QUANTIZE (extended DFP)
-	op_RCHP    uint32 = 0xB23B // FORMAT_S          RESET CHANNEL PATH
-	op_RISBG   uint32 = 0xEC55 // FORMAT_RIE6       ROTATE THEN INSERT SELECTED BITS
-	op_RISBGN  uint32 = 0xEC59 // FORMAT_RIE6       ROTATE THEN INSERT SELECTED BITS
-	op_RISBHG  uint32 = 0xEC5D // FORMAT_RIE6       ROTATE THEN INSERT SELECTED BITS HIGH
-	op_RISBLG  uint32 = 0xEC51 // FORMAT_RIE6       ROTATE THEN INSERT SELECTED BITS LOW
-	op_RLL     uint32 = 0xEB1D // FORMAT_RSY1       ROTATE LEFT SINGLE LOGICAL (32)
-	op_RLLG    uint32 = 0xEB1C // FORMAT_RSY1       ROTATE LEFT SINGLE LOGICAL (64)
-	op_RNSBG   uint32 = 0xEC54 // FORMAT_RIE6       ROTATE THEN AND SELECTED BITS
-	op_ROSBG   uint32 = 0xEC56 // FORMAT_RIE6       ROTATE THEN OR SELECTED BITS
-	op_RP      uint32 = 0xB277 // FORMAT_S          RESUME PROGRAM
-	op_RRBE    uint32 = 0xB22A // FORMAT_RRE        RESET REFERENCE BIT EXTENDED
-	op_RRBM    uint32 = 0xB9AE // FORMAT_RRE        RESET REFERENCE BITS MULTIPLE
-	op_RRDTR   uint32 = 0xB3F7 // FORMAT_RRF2       REROUND (long DFP)
-	op_RRXTR   uint32 = 0xB3FF // FORMAT_RRF2       REROUND (extended DFP)
-	op_RSCH    uint32 = 0xB238 // FORMAT_S          RESUME SUBCHANNEL
-	op_RXSBG   uint32 = 0xEC57 // FORMAT_RIE6       ROTATE THEN EXCLUSIVE OR SELECTED BITS
-	op_S       uint32 = 0x5B00 // FORMAT_RX1        SUBTRACT (32)
-	op_SAC     uint32 = 0xB219 // FORMAT_S          SET ADDRESS SPACE CONTROL
-	op_SACF    uint32 = 0xB279 // FORMAT_S          SET ADDRESS SPACE CONTROL FAST
-	op_SAL     uint32 = 0xB237 // FORMAT_S          SET ADDRESS LIMIT
-	op_SAM24   uint32 = 0x010C // FORMAT_E          SET ADDRESSING MODE (24)
-	op_SAM31   uint32 = 0x010D // FORMAT_E          SET ADDRESSING MODE (31)
-	op_SAM64   uint32 = 0x010E // FORMAT_E          SET ADDRESSING MODE (64)
-	op_SAR     uint32 = 0xB24E // FORMAT_RRE        SET ACCESS
-	op_SCHM    uint32 = 0xB23C // FORMAT_S          SET CHANNEL MONITOR
-	op_SCK     uint32 = 0xB204 // FORMAT_S          SET CLOCK
-	op_SCKC    uint32 = 0xB206 // FORMAT_S          SET CLOCK COMPARATOR
-	op_SCKPF   uint32 = 0x0107 // FORMAT_E          SET CLOCK PROGRAMMABLE FIELD
-	op_SD      uint32 = 0x6B00 // FORMAT_RX1        SUBTRACT NORMALIZED (long HFP)
-	op_SDB     uint32 = 0xED1B // FORMAT_RXE        SUBTRACT (long BFP)
-	op_SDBR    uint32 = 0xB31B // FORMAT_RRE        SUBTRACT (long BFP)
-	op_SDR     uint32 = 0x2B00 // FORMAT_RR         SUBTRACT NORMALIZED (long HFP)
-	op_SDTR    uint32 = 0xB3D3 // FORMAT_RRF1       SUBTRACT (long DFP)
-	op_SDTRA   uint32 = 0xB3D3 // FORMAT_RRF1       SUBTRACT (long DFP)
-	op_SE      uint32 = 0x7B00 // FORMAT_RX1        SUBTRACT NORMALIZED (short HFP)
-	op_SEB     uint32 = 0xED0B // FORMAT_RXE        SUBTRACT (short BFP)
-	op_SEBR    uint32 = 0xB30B // FORMAT_RRE        SUBTRACT (short BFP)
-	op_SER     uint32 = 0x3B00 // FORMAT_RR         SUBTRACT NORMALIZED (short HFP)
-	op_SFASR   uint32 = 0xB385 // FORMAT_RRE        SET FPC AND SIGNAL
-	op_SFPC    uint32 = 0xB384 // FORMAT_RRE        SET FPC
-	op_SG      uint32 = 0xE309 // FORMAT_RXY1       SUBTRACT (64)
-	op_SGF     uint32 = 0xE319 // FORMAT_RXY1       SUBTRACT (64<-32)
-	op_SGFR    uint32 = 0xB919 // FORMAT_RRE        SUBTRACT (64<-32)
-	op_SGR     uint32 = 0xB909 // FORMAT_RRE        SUBTRACT (64)
-	op_SGRK    uint32 = 0xB9E9 // FORMAT_RRF1       SUBTRACT (64)
-	op_SH      uint32 = 0x4B00 // FORMAT_RX1        SUBTRACT HALFWORD
-	op_SHHHR   uint32 = 0xB9C9 // FORMAT_RRF1       SUBTRACT HIGH (32)
-	op_SHHLR   uint32 = 0xB9D9 // FORMAT_RRF1       SUBTRACT HIGH (32)
-	op_SHY     uint32 = 0xE37B // FORMAT_RXY1       SUBTRACT HALFWORD
-	op_SIGP    uint32 = 0xAE00 // FORMAT_RS1        SIGNAL PROCESSOR
-	op_SL      uint32 = 0x5F00 // FORMAT_RX1        SUBTRACT LOGICAL (32)
-	op_SLA     uint32 = 0x8B00 // FORMAT_RS1        SHIFT LEFT SINGLE (32)
-	op_SLAG    uint32 = 0xEB0B // FORMAT_RSY1       SHIFT LEFT SINGLE (64)
-	op_SLAK    uint32 = 0xEBDD // FORMAT_RSY1       SHIFT LEFT SINGLE (32)
-	op_SLB     uint32 = 0xE399 // FORMAT_RXY1       SUBTRACT LOGICAL WITH BORROW (32)
-	op_SLBG    uint32 = 0xE389 // FORMAT_RXY1       SUBTRACT LOGICAL WITH BORROW (64)
-	op_SLBGR   uint32 = 0xB989 // FORMAT_RRE        SUBTRACT LOGICAL WITH BORROW (64)
-	op_SLBR    uint32 = 0xB999 // FORMAT_RRE        SUBTRACT LOGICAL WITH BORROW (32)
-	op_SLDA    uint32 = 0x8F00 // FORMAT_RS1        SHIFT LEFT DOUBLE
-	op_SLDL    uint32 = 0x8D00 // FORMAT_RS1        SHIFT LEFT DOUBLE LOGICAL
-	op_SLDT    uint32 = 0xED40 // FORMAT_RXF        SHIFT SIGNIFICAND LEFT (long DFP)
-	op_SLFI    uint32 = 0xC205 // FORMAT_RIL1       SUBTRACT LOGICAL IMMEDIATE (32)
-	op_SLG     uint32 = 0xE30B // FORMAT_RXY1       SUBTRACT LOGICAL (64)
-	op_SLGF    uint32 = 0xE31B // FORMAT_RXY1       SUBTRACT LOGICAL (64<-32)
-	op_SLGFI   uint32 = 0xC204 // FORMAT_RIL1       SUBTRACT LOGICAL IMMEDIATE (64<-32)
-	op_SLGFR   uint32 = 0xB91B // FORMAT_RRE        SUBTRACT LOGICAL (64<-32)
-	op_SLGR    uint32 = 0xB90B // FORMAT_RRE        SUBTRACT LOGICAL (64)
-	op_SLGRK   uint32 = 0xB9EB // FORMAT_RRF1       SUBTRACT LOGICAL (64)
-	op_SLHHHR  uint32 = 0xB9CB // FORMAT_RRF1       SUBTRACT LOGICAL HIGH (32)
-	op_SLHHLR  uint32 = 0xB9DB // FORMAT_RRF1       SUBTRACT LOGICAL HIGH (32)
-	op_SLL     uint32 = 0x8900 // FORMAT_RS1        SHIFT LEFT SINGLE LOGICAL (32)
-	op_SLLG    uint32 = 0xEB0D // FORMAT_RSY1       SHIFT LEFT SINGLE LOGICAL (64)
-	op_SLLK    uint32 = 0xEBDF // FORMAT_RSY1       SHIFT LEFT SINGLE LOGICAL (32)
-	op_SLR     uint32 = 0x1F00 // FORMAT_RR         SUBTRACT LOGICAL (32)
-	op_SLRK    uint32 = 0xB9FB // FORMAT_RRF1       SUBTRACT LOGICAL (32)
-	op_SLXT    uint32 = 0xED48 // FORMAT_RXF        SHIFT SIGNIFICAND LEFT (extended DFP)
-	op_SLY     uint32 = 0xE35F // FORMAT_RXY1       SUBTRACT LOGICAL (32)
-	op_SP      uint32 = 0xFB00 // FORMAT_SS2        SUBTRACT DECIMAL
-	op_SPKA    uint32 = 0xB20A // FORMAT_S          SET PSW KEY FROM ADDRESS
-	op_SPM     uint32 = 0x0400 // FORMAT_RR         SET PROGRAM MASK
-	op_SPT     uint32 = 0xB208 // FORMAT_S          SET CPU TIMER
-	op_SPX     uint32 = 0xB210 // FORMAT_S          SET PREFIX
-	op_SQD     uint32 = 0xED35 // FORMAT_RXE        SQUARE ROOT (long HFP)
-	op_SQDB    uint32 = 0xED15 // FORMAT_RXE        SQUARE ROOT (long BFP)
-	op_SQDBR   uint32 = 0xB315 // FORMAT_RRE        SQUARE ROOT (long BFP)
-	op_SQDR    uint32 = 0xB244 // FORMAT_RRE        SQUARE ROOT (long HFP)
-	op_SQE     uint32 = 0xED34 // FORMAT_RXE        SQUARE ROOT (short HFP)
-	op_SQEB    uint32 = 0xED14 // FORMAT_RXE        SQUARE ROOT (short BFP)
-	op_SQEBR   uint32 = 0xB314 // FORMAT_RRE        SQUARE ROOT (short BFP)
-	op_SQER    uint32 = 0xB245 // FORMAT_RRE        SQUARE ROOT (short HFP)
-	op_SQXBR   uint32 = 0xB316 // FORMAT_RRE        SQUARE ROOT (extended BFP)
-	op_SQXR    uint32 = 0xB336 // FORMAT_RRE        SQUARE ROOT (extended HFP)
-	op_SR      uint32 = 0x1B00 // FORMAT_RR         SUBTRACT (32)
-	op_SRA     uint32 = 0x8A00 // FORMAT_RS1        SHIFT RIGHT SINGLE (32)
-	op_SRAG    uint32 = 0xEB0A // FORMAT_RSY1       SHIFT RIGHT SINGLE (64)
-	op_SRAK    uint32 = 0xEBDC // FORMAT_RSY1       SHIFT RIGHT SINGLE (32)
-	op_SRDA    uint32 = 0x8E00 // FORMAT_RS1        SHIFT RIGHT DOUBLE
-	op_SRDL    uint32 = 0x8C00 // FORMAT_RS1        SHIFT RIGHT DOUBLE LOGICAL
-	op_SRDT    uint32 = 0xED41 // FORMAT_RXF        SHIFT SIGNIFICAND RIGHT (long DFP)
-	op_SRK     uint32 = 0xB9F9 // FORMAT_RRF1       SUBTRACT (32)
-	op_SRL     uint32 = 0x8800 // FORMAT_RS1        SHIFT RIGHT SINGLE LOGICAL (32)
-	op_SRLG    uint32 = 0xEB0C // FORMAT_RSY1       SHIFT RIGHT SINGLE LOGICAL (64)
-	op_SRLK    uint32 = 0xEBDE // FORMAT_RSY1       SHIFT RIGHT SINGLE LOGICAL (32)
-	op_SRNM    uint32 = 0xB299 // FORMAT_S          SET BFP ROUNDING MODE (2 bit)
-	op_SRNMB   uint32 = 0xB2B8 // FORMAT_S          SET BFP ROUNDING MODE (3 bit)
-	op_SRNMT   uint32 = 0xB2B9 // FORMAT_S          SET DFP ROUNDING MODE
-	op_SRP     uint32 = 0xF000 // FORMAT_SS3        SHIFT AND ROUND DECIMAL
-	op_SRST    uint32 = 0xB25E // FORMAT_RRE        SEARCH STRING
-	op_SRSTU   uint32 = 0xB9BE // FORMAT_RRE        SEARCH STRING UNICODE
-	op_SRXT    uint32 = 0xED49 // FORMAT_RXF        SHIFT SIGNIFICAND RIGHT (extended DFP)
-	op_SSAIR   uint32 = 0xB99F // FORMAT_RRE        SET SECONDARY ASN WITH INSTANCE
-	op_SSAR    uint32 = 0xB225 // FORMAT_RRE        SET SECONDARY ASN
-	op_SSCH    uint32 = 0xB233 // FORMAT_S          START SUBCHANNEL
-	op_SSKE    uint32 = 0xB22B // FORMAT_RRF3       SET STORAGE KEY EXTENDED
-	op_SSM     uint32 = 0x8000 // FORMAT_S          SET SYSTEM MASK
-	op_ST      uint32 = 0x5000 // FORMAT_RX1        STORE (32)
-	op_STAM    uint32 = 0x9B00 // FORMAT_RS1        STORE ACCESS MULTIPLE
-	op_STAMY   uint32 = 0xEB9B // FORMAT_RSY1       STORE ACCESS MULTIPLE
-	op_STAP    uint32 = 0xB212 // FORMAT_S          STORE CPU ADDRESS
-	op_STC     uint32 = 0x4200 // FORMAT_RX1        STORE CHARACTER
-	op_STCH    uint32 = 0xE3C3 // FORMAT_RXY1       STORE CHARACTER HIGH (8)
-	op_STCK    uint32 = 0xB205 // FORMAT_S          STORE CLOCK
-	op_STCKC   uint32 = 0xB207 // FORMAT_S          STORE CLOCK COMPARATOR
-	op_STCKE   uint32 = 0xB278 // FORMAT_S          STORE CLOCK EXTENDED
-	op_STCKF   uint32 = 0xB27C // FORMAT_S          STORE CLOCK FAST
-	op_STCM    uint32 = 0xBE00 // FORMAT_RS2        STORE CHARACTERS UNDER MASK (low)
-	op_STCMH   uint32 = 0xEB2C // FORMAT_RSY2       STORE CHARACTERS UNDER MASK (high)
-	op_STCMY   uint32 = 0xEB2D // FORMAT_RSY2       STORE CHARACTERS UNDER MASK (low)
-	op_STCPS   uint32 = 0xB23A // FORMAT_S          STORE CHANNEL PATH STATUS
-	op_STCRW   uint32 = 0xB239 // FORMAT_S          STORE CHANNEL REPORT WORD
-	op_STCTG   uint32 = 0xEB25 // FORMAT_RSY1       STORE CONTROL (64)
-	op_STCTL   uint32 = 0xB600 // FORMAT_RS1        STORE CONTROL (32)
-	op_STCY    uint32 = 0xE372 // FORMAT_RXY1       STORE CHARACTER
-	op_STD     uint32 = 0x6000 // FORMAT_RX1        STORE (long)
-	op_STDY    uint32 = 0xED67 // FORMAT_RXY1       STORE (long)
-	op_STE     uint32 = 0x7000 // FORMAT_RX1        STORE (short)
-	op_STEY    uint32 = 0xED66 // FORMAT_RXY1       STORE (short)
-	op_STFH    uint32 = 0xE3CB // FORMAT_RXY1       STORE HIGH (32)
-	op_STFL    uint32 = 0xB2B1 // FORMAT_S          STORE FACILITY LIST
-	op_STFLE   uint32 = 0xB2B0 // FORMAT_S          STORE FACILITY LIST EXTENDED
-	op_STFPC   uint32 = 0xB29C // FORMAT_S          STORE FPC
-	op_STG     uint32 = 0xE324 // FORMAT_RXY1       STORE (64)
-	op_STGRL   uint32 = 0xC40B // FORMAT_RIL2       STORE RELATIVE LONG (64)
-	op_STH     uint32 = 0x4000 // FORMAT_RX1        STORE HALFWORD
-	op_STHH    uint32 = 0xE3C7 // FORMAT_RXY1       STORE HALFWORD HIGH (16)
-	op_STHRL   uint32 = 0xC407 // FORMAT_RIL2       STORE HALFWORD RELATIVE LONG
-	op_STHY    uint32 = 0xE370 // FORMAT_RXY1       STORE HALFWORD
-	op_STIDP   uint32 = 0xB202 // FORMAT_S          STORE CPU ID
-	op_STM     uint32 = 0x9000 // FORMAT_RS1        STORE MULTIPLE (32)
-	op_STMG    uint32 = 0xEB24 // FORMAT_RSY1       STORE MULTIPLE (64)
-	op_STMH    uint32 = 0xEB26 // FORMAT_RSY1       STORE MULTIPLE HIGH
-	op_STMY    uint32 = 0xEB90 // FORMAT_RSY1       STORE MULTIPLE (32)
-	op_STNSM   uint32 = 0xAC00 // FORMAT_SI         STORE THEN AND SYSTEM MASK
-	op_STOC    uint32 = 0xEBF3 // FORMAT_RSY2       STORE ON CONDITION (32)
-	op_STOCG   uint32 = 0xEBE3 // FORMAT_RSY2       STORE ON CONDITION (64)
-	op_STOSM   uint32 = 0xAD00 // FORMAT_SI         STORE THEN OR SYSTEM MASK
-	op_STPQ    uint32 = 0xE38E // FORMAT_RXY1       STORE PAIR TO QUADWORD
-	op_STPT    uint32 = 0xB209 // FORMAT_S          STORE CPU TIMER
-	op_STPX    uint32 = 0xB211 // FORMAT_S          STORE PREFIX
-	op_STRAG   uint32 = 0xE502 // FORMAT_SSE        STORE REAL ADDRESS
-	op_STRL    uint32 = 0xC40F // FORMAT_RIL2       STORE RELATIVE LONG (32)
-	op_STRV    uint32 = 0xE33E // FORMAT_RXY1       STORE REVERSED (32)
-	op_STRVG   uint32 = 0xE32F // FORMAT_RXY1       STORE REVERSED (64)
-	op_STRVH   uint32 = 0xE33F // FORMAT_RXY1       STORE REVERSED (16)
-	op_STSCH   uint32 = 0xB234 // FORMAT_S          STORE SUBCHANNEL
-	op_STSI    uint32 = 0xB27D // FORMAT_S          STORE SYSTEM INFORMATION
-	op_STURA   uint32 = 0xB246 // FORMAT_RRE        STORE USING REAL ADDRESS (32)
-	op_STURG   uint32 = 0xB925 // FORMAT_RRE        STORE USING REAL ADDRESS (64)
-	op_STY     uint32 = 0xE350 // FORMAT_RXY1       STORE (32)
-	op_SU      uint32 = 0x7F00 // FORMAT_RX1        SUBTRACT UNNORMALIZED (short HFP)
-	op_SUR     uint32 = 0x3F00 // FORMAT_RR         SUBTRACT UNNORMALIZED (short HFP)
-	op_SVC     uint32 = 0x0A00 // FORMAT_I          SUPERVISOR CALL
-	op_SW      uint32 = 0x6F00 // FORMAT_RX1        SUBTRACT UNNORMALIZED (long HFP)
-	op_SWR     uint32 = 0x2F00 // FORMAT_RR         SUBTRACT UNNORMALIZED (long HFP)
-	op_SXBR    uint32 = 0xB34B // FORMAT_RRE        SUBTRACT (extended BFP)
-	op_SXR     uint32 = 0x3700 // FORMAT_RR         SUBTRACT NORMALIZED (extended HFP)
-	op_SXTR    uint32 = 0xB3DB // FORMAT_RRF1       SUBTRACT (extended DFP)
-	op_SXTRA   uint32 = 0xB3DB // FORMAT_RRF1       SUBTRACT (extended DFP)
-	op_SY      uint32 = 0xE35B // FORMAT_RXY1       SUBTRACT (32)
-	op_TABORT  uint32 = 0xB2FC // FORMAT_S          TRANSACTION ABORT
-	op_TAM     uint32 = 0x010B // FORMAT_E          TEST ADDRESSING MODE
-	op_TAR     uint32 = 0xB24C // FORMAT_RRE        TEST ACCESS
-	op_TB      uint32 = 0xB22C // FORMAT_RRE        TEST BLOCK
-	op_TBDR    uint32 = 0xB351 // FORMAT_RRF5       CONVERT HFP TO BFP (long)
-	op_TBEDR   uint32 = 0xB350 // FORMAT_RRF5       CONVERT HFP TO BFP (long to short)
-	op_TBEGIN  uint32 = 0xE560 // FORMAT_SIL        TRANSACTION BEGIN
-	op_TBEGINC uint32 = 0xE561 // FORMAT_SIL        TRANSACTION BEGIN
-	op_TCDB    uint32 = 0xED11 // FORMAT_RXE        TEST DATA CLASS (long BFP)
-	op_TCEB    uint32 = 0xED10 // FORMAT_RXE        TEST DATA CLASS (short BFP)
-	op_TCXB    uint32 = 0xED12 // FORMAT_RXE        TEST DATA CLASS (extended BFP)
-	op_TDCDT   uint32 = 0xED54 // FORMAT_RXE        TEST DATA CLASS (long DFP)
-	op_TDCET   uint32 = 0xED50 // FORMAT_RXE        TEST DATA CLASS (short DFP)
-	op_TDCXT   uint32 = 0xED58 // FORMAT_RXE        TEST DATA CLASS (extended DFP)
-	op_TDGDT   uint32 = 0xED55 // FORMAT_RXE        TEST DATA GROUP (long DFP)
-	op_TDGET   uint32 = 0xED51 // FORMAT_RXE        TEST DATA GROUP (short DFP)
-	op_TDGXT   uint32 = 0xED59 // FORMAT_RXE        TEST DATA GROUP (extended DFP)
-	op_TEND    uint32 = 0xB2F8 // FORMAT_S          TRANSACTION END
-	op_THDER   uint32 = 0xB358 // FORMAT_RRE        CONVERT BFP TO HFP (short to long)
-	op_THDR    uint32 = 0xB359 // FORMAT_RRE        CONVERT BFP TO HFP (long)
-	op_TM      uint32 = 0x9100 // FORMAT_SI         TEST UNDER MASK
-	op_TMH     uint32 = 0xA700 // FORMAT_RI1        TEST UNDER MASK HIGH
-	op_TMHH    uint32 = 0xA702 // FORMAT_RI1        TEST UNDER MASK (high high)
-	op_TMHL    uint32 = 0xA703 // FORMAT_RI1        TEST UNDER MASK (high low)
-	op_TML     uint32 = 0xA701 // FORMAT_RI1        TEST UNDER MASK LOW
-	op_TMLH    uint32 = 0xA700 // FORMAT_RI1        TEST UNDER MASK (low high)
-	op_TMLL    uint32 = 0xA701 // FORMAT_RI1        TEST UNDER MASK (low low)
-	op_TMY     uint32 = 0xEB51 // FORMAT_SIY        TEST UNDER MASK
-	op_TP      uint32 = 0xEBC0 // FORMAT_RSL        TEST DECIMAL
-	op_TPI     uint32 = 0xB236 // FORMAT_S          TEST PENDING INTERRUPTION
-	op_TPROT   uint32 = 0xE501 // FORMAT_SSE        TEST PROTECTION
-	op_TR      uint32 = 0xDC00 // FORMAT_SS1        TRANSLATE
-	op_TRACE   uint32 = 0x9900 // FORMAT_RS1        TRACE (32)
-	op_TRACG   uint32 = 0xEB0F // FORMAT_RSY1       TRACE (64)
-	op_TRAP2   uint32 = 0x01FF // FORMAT_E          TRAP
-	op_TRAP4   uint32 = 0xB2FF // FORMAT_S          TRAP
-	op_TRE     uint32 = 0xB2A5 // FORMAT_RRE        TRANSLATE EXTENDED
-	op_TROO    uint32 = 0xB993 // FORMAT_RRF3       TRANSLATE ONE TO ONE
-	op_TROT    uint32 = 0xB992 // FORMAT_RRF3       TRANSLATE ONE TO TWO
-	op_TRT     uint32 = 0xDD00 // FORMAT_SS1        TRANSLATE AND TEST
-	op_TRTE    uint32 = 0xB9BF // FORMAT_RRF3       TRANSLATE AND TEST EXTENDED
-	op_TRTO    uint32 = 0xB991 // FORMAT_RRF3       TRANSLATE TWO TO ONE
-	op_TRTR    uint32 = 0xD000 // FORMAT_SS1        TRANSLATE AND TEST REVERSE
-	op_TRTRE   uint32 = 0xB9BD // FORMAT_RRF3       TRANSLATE AND TEST REVERSE EXTENDED
-	op_TRTT    uint32 = 0xB990 // FORMAT_RRF3       TRANSLATE TWO TO TWO
-	op_TS      uint32 = 0x9300 // FORMAT_S          TEST AND SET
-	op_TSCH    uint32 = 0xB235 // FORMAT_S          TEST SUBCHANNEL
-	op_UNPK    uint32 = 0xF300 // FORMAT_SS2        UNPACK
-	op_UNPKA   uint32 = 0xEA00 // FORMAT_SS1        UNPACK ASCII
-	op_UNPKU   uint32 = 0xE200 // FORMAT_SS1        UNPACK UNICODE
-	op_UPT     uint32 = 0x0102 // FORMAT_E          UPDATE TREE
-	op_X       uint32 = 0x5700 // FORMAT_RX1        EXCLUSIVE OR (32)
-	op_XC      uint32 = 0xD700 // FORMAT_SS1        EXCLUSIVE OR (character)
-	op_XG      uint32 = 0xE382 // FORMAT_RXY1       EXCLUSIVE OR (64)
-	op_XGR     uint32 = 0xB982 // FORMAT_RRE        EXCLUSIVE OR (64)
-	op_XGRK    uint32 = 0xB9E7 // FORMAT_RRF1       EXCLUSIVE OR (64)
-	op_XI      uint32 = 0x9700 // FORMAT_SI         EXCLUSIVE OR (immediate)
-	op_XIHF    uint32 = 0xC006 // FORMAT_RIL1       EXCLUSIVE OR IMMEDIATE (high)
-	op_XILF    uint32 = 0xC007 // FORMAT_RIL1       EXCLUSIVE OR IMMEDIATE (low)
-	op_XIY     uint32 = 0xEB57 // FORMAT_SIY        EXCLUSIVE OR (immediate)
-	op_XR      uint32 = 0x1700 // FORMAT_RR         EXCLUSIVE OR (32)
-	op_XRK     uint32 = 0xB9F7 // FORMAT_RRF1       EXCLUSIVE OR (32)
-	op_XSCH    uint32 = 0xB276 // FORMAT_S          CANCEL SUBCHANNEL
-	op_XY      uint32 = 0xE357 // FORMAT_RXY1       EXCLUSIVE OR (32)
-	op_ZAP     uint32 = 0xF800 // FORMAT_SS2        ZERO AND ADD
-
-	// added in z13
-	op_CXPT   uint32 = 0xEDAF // 	RSL-b	CONVERT FROM PACKED (to extended DFP)
-	op_CDPT   uint32 = 0xEDAE // 	RSL-b	CONVERT FROM PACKED (to long DFP)
-	op_CPXT   uint32 = 0xEDAD // 	RSL-b	CONVERT TO PACKED (from extended DFP)
-	op_CPDT   uint32 = 0xEDAC // 	RSL-b	CONVERT TO PACKED (from long DFP)
-	op_LZRF   uint32 = 0xE33B // 	RXY-a	LOAD AND ZERO RIGHTMOST BYTE (32)
-	op_LZRG   uint32 = 0xE32A // 	RXY-a	LOAD AND ZERO RIGHTMOST BYTE (64)
-	op_LCCB   uint32 = 0xE727 // 	RXE	LOAD COUNT TO BLOCK BOUNDARY
-	op_LOCHHI uint32 = 0xEC4E // 	RIE-g	LOAD HALFWORD HIGH IMMEDIATE ON CONDITION (32←16)
-	op_LOCHI  uint32 = 0xEC42 // 	RIE-g	LOAD HALFWORD IMMEDIATE ON CONDITION (32←16)
-	op_LOCGHI uint32 = 0xEC46 // 	RIE-g	LOAD HALFWORD IMMEDIATE ON CONDITION (64←16)
-	op_LOCFH  uint32 = 0xEBE0 // 	RSY-b	LOAD HIGH ON CONDITION (32)
-	op_LOCFHR uint32 = 0xB9E0 // 	RRF-c	LOAD HIGH ON CONDITION (32)
-	op_LLZRGF uint32 = 0xE33A // 	RXY-a	LOAD LOGICAL AND ZERO RIGHTMOST BYTE (64←32)
-	op_STOCFH uint32 = 0xEBE1 // 	RSY-b	STORE HIGH ON CONDITION
-	op_VA     uint32 = 0xE7F3 // 	VRR-c	VECTOR ADD
-	op_VACC   uint32 = 0xE7F1 // 	VRR-c	VECTOR ADD COMPUTE CARRY
-	op_VAC    uint32 = 0xE7BB // 	VRR-d	VECTOR ADD WITH CARRY
-	op_VACCC  uint32 = 0xE7B9 // 	VRR-d	VECTOR ADD WITH CARRY COMPUTE CARRY
-	op_VN     uint32 = 0xE768 // 	VRR-c	VECTOR AND
-	op_VNC    uint32 = 0xE769 // 	VRR-c	VECTOR AND WITH COMPLEMENT
-	op_VAVG   uint32 = 0xE7F2 // 	VRR-c	VECTOR AVERAGE
-	op_VAVGL  uint32 = 0xE7F0 // 	VRR-c	VECTOR AVERAGE LOGICAL
-	op_VCKSM  uint32 = 0xE766 // 	VRR-c	VECTOR CHECKSUM
-	op_VCEQ   uint32 = 0xE7F8 // 	VRR-b	VECTOR COMPARE EQUAL
-	op_VCH    uint32 = 0xE7FB // 	VRR-b	VECTOR COMPARE HIGH
-	op_VCHL   uint32 = 0xE7F9 // 	VRR-b	VECTOR COMPARE HIGH LOGICAL
-	op_VCLZ   uint32 = 0xE753 // 	VRR-a	VECTOR COUNT LEADING ZEROS
-	op_VCTZ   uint32 = 0xE752 // 	VRR-a	VECTOR COUNT TRAILING ZEROS
-	op_VEC    uint32 = 0xE7DB // 	VRR-a	VECTOR ELEMENT COMPARE
-	op_VECL   uint32 = 0xE7D9 // 	VRR-a	VECTOR ELEMENT COMPARE LOGICAL
-	op_VERIM  uint32 = 0xE772 // 	VRI-d	VECTOR ELEMENT ROTATE AND INSERT UNDER MASK
-	op_VERLL  uint32 = 0xE733 // 	VRS-a	VECTOR ELEMENT ROTATE LEFT LOGICAL
-	op_VERLLV uint32 = 0xE773 // 	VRR-c	VECTOR ELEMENT ROTATE LEFT LOGICAL
-	op_VESLV  uint32 = 0xE770 // 	VRR-c	VECTOR ELEMENT SHIFT LEFT
-	op_VESL   uint32 = 0xE730 // 	VRS-a	VECTOR ELEMENT SHIFT LEFT
-	op_VESRA  uint32 = 0xE73A // 	VRS-a	VECTOR ELEMENT SHIFT RIGHT ARITHMETIC
-	op_VESRAV uint32 = 0xE77A // 	VRR-c	VECTOR ELEMENT SHIFT RIGHT ARITHMETIC
-	op_VESRL  uint32 = 0xE738 // 	VRS-a	VECTOR ELEMENT SHIFT RIGHT LOGICAL
-	op_VESRLV uint32 = 0xE778 // 	VRR-c	VECTOR ELEMENT SHIFT RIGHT LOGICAL
-	op_VX     uint32 = 0xE76D // 	VRR-c	VECTOR EXCLUSIVE OR
-	op_VFAE   uint32 = 0xE782 // 	VRR-b	VECTOR FIND ANY ELEMENT EQUAL
-	op_VFEE   uint32 = 0xE780 // 	VRR-b	VECTOR FIND ELEMENT EQUAL
-	op_VFENE  uint32 = 0xE781 // 	VRR-b	VECTOR FIND ELEMENT NOT EQUAL
-	op_VFA    uint32 = 0xE7E3 // 	VRR-c	VECTOR FP ADD
-	op_WFK    uint32 = 0xE7CA // 	VRR-a	VECTOR FP COMPARE AND SIGNAL SCALAR
-	op_VFCE   uint32 = 0xE7E8 // 	VRR-c	VECTOR FP COMPARE EQUAL
-	op_VFCH   uint32 = 0xE7EB // 	VRR-c	VECTOR FP COMPARE HIGH
-	op_VFCHE  uint32 = 0xE7EA // 	VRR-c	VECTOR FP COMPARE HIGH OR EQUAL
-	op_WFC    uint32 = 0xE7CB // 	VRR-a	VECTOR FP COMPARE SCALAR
-	op_VCDG   uint32 = 0xE7C3 // 	VRR-a	VECTOR FP CONVERT FROM FIXED 64-BIT
-	op_VCDLG  uint32 = 0xE7C1 // 	VRR-a	VECTOR FP CONVERT FROM LOGICAL 64-BIT
-	op_VCGD   uint32 = 0xE7C2 // 	VRR-a	VECTOR FP CONVERT TO FIXED 64-BIT
-	op_VCLGD  uint32 = 0xE7C0 // 	VRR-a	VECTOR FP CONVERT TO LOGICAL 64-BIT
-	op_VFD    uint32 = 0xE7E5 // 	VRR-c	VECTOR FP DIVIDE
-	op_VLDE   uint32 = 0xE7C4 // 	VRR-a	VECTOR FP LOAD LENGTHENED
-	op_VLED   uint32 = 0xE7C5 // 	VRR-a	VECTOR FP LOAD ROUNDED
-	op_VFM    uint32 = 0xE7E7 // 	VRR-c	VECTOR FP MULTIPLY
-	op_VFMA   uint32 = 0xE78F // 	VRR-e	VECTOR FP MULTIPLY AND ADD
-	op_VFMS   uint32 = 0xE78E // 	VRR-e	VECTOR FP MULTIPLY AND SUBTRACT
-	op_VFPSO  uint32 = 0xE7CC // 	VRR-a	VECTOR FP PERFORM SIGN OPERATION
-	op_VFSQ   uint32 = 0xE7CE // 	VRR-a	VECTOR FP SQUARE ROOT
-	op_VFS    uint32 = 0xE7E2 // 	VRR-c	VECTOR FP SUBTRACT
-	op_VFTCI  uint32 = 0xE74A // 	VRI-e	VECTOR FP TEST DATA CLASS IMMEDIATE
-	op_VGFM   uint32 = 0xE7B4 // 	VRR-c	VECTOR GALOIS FIELD MULTIPLY SUM
-	op_VGFMA  uint32 = 0xE7BC // 	VRR-d	VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE
-	op_VGEF   uint32 = 0xE713 // 	VRV	VECTOR GATHER ELEMENT (32)
-	op_VGEG   uint32 = 0xE712 // 	VRV	VECTOR GATHER ELEMENT (64)
-	op_VGBM   uint32 = 0xE744 // 	VRI-a	VECTOR GENERATE BYTE MASK
-	op_VGM    uint32 = 0xE746 // 	VRI-b	VECTOR GENERATE MASK
-	op_VISTR  uint32 = 0xE75C // 	VRR-a	VECTOR ISOLATE STRING
-	op_VL     uint32 = 0xE706 // 	VRX	VECTOR LOAD
-	op_VLR    uint32 = 0xE756 // 	VRR-a	VECTOR LOAD
-	op_VLREP  uint32 = 0xE705 // 	VRX	VECTOR LOAD AND REPLICATE
-	op_VLC    uint32 = 0xE7DE // 	VRR-a	VECTOR LOAD COMPLEMENT
-	op_VLEH   uint32 = 0xE701 // 	VRX	VECTOR LOAD ELEMENT (16)
-	op_VLEF   uint32 = 0xE703 // 	VRX	VECTOR LOAD ELEMENT (32)
-	op_VLEG   uint32 = 0xE702 // 	VRX	VECTOR LOAD ELEMENT (64)
-	op_VLEB   uint32 = 0xE700 // 	VRX	VECTOR LOAD ELEMENT (8)
-	op_VLEIH  uint32 = 0xE741 // 	VRI-a	VECTOR LOAD ELEMENT IMMEDIATE (16)
-	op_VLEIF  uint32 = 0xE743 // 	VRI-a	VECTOR LOAD ELEMENT IMMEDIATE (32)
-	op_VLEIG  uint32 = 0xE742 // 	VRI-a	VECTOR LOAD ELEMENT IMMEDIATE (64)
-	op_VLEIB  uint32 = 0xE740 // 	VRI-a	VECTOR LOAD ELEMENT IMMEDIATE (8)
-	op_VFI    uint32 = 0xE7C7 // 	VRR-a	VECTOR LOAD FP INTEGER
-	op_VLGV   uint32 = 0xE721 // 	VRS-c	VECTOR LOAD GR FROM VR ELEMENT
-	op_VLLEZ  uint32 = 0xE704 // 	VRX	VECTOR LOAD LOGICAL ELEMENT AND ZERO
-	op_VLM    uint32 = 0xE736 // 	VRS-a	VECTOR LOAD MULTIPLE
-	op_VLP    uint32 = 0xE7DF // 	VRR-a	VECTOR LOAD POSITIVE
-	op_VLBB   uint32 = 0xE707 // 	VRX	VECTOR LOAD TO BLOCK BOUNDARY
-	op_VLVG   uint32 = 0xE722 // 	VRS-b	VECTOR LOAD VR ELEMENT FROM GR
-	op_VLVGP  uint32 = 0xE762 // 	VRR-f	VECTOR LOAD VR FROM GRS DISJOINT
-	op_VLL    uint32 = 0xE737 // 	VRS-b	VECTOR LOAD WITH LENGTH
-	op_VMX    uint32 = 0xE7FF // 	VRR-c	VECTOR MAXIMUM
-	op_VMXL   uint32 = 0xE7FD // 	VRR-c	VECTOR MAXIMUM LOGICAL
-	op_VMRH   uint32 = 0xE761 // 	VRR-c	VECTOR MERGE HIGH
-	op_VMRL   uint32 = 0xE760 // 	VRR-c	VECTOR MERGE LOW
-	op_VMN    uint32 = 0xE7FE // 	VRR-c	VECTOR MINIMUM
-	op_VMNL   uint32 = 0xE7FC // 	VRR-c	VECTOR MINIMUM LOGICAL
-	op_VMAE   uint32 = 0xE7AE // 	VRR-d	VECTOR MULTIPLY AND ADD EVEN
-	op_VMAH   uint32 = 0xE7AB // 	VRR-d	VECTOR MULTIPLY AND ADD HIGH
-	op_VMALE  uint32 = 0xE7AC // 	VRR-d	VECTOR MULTIPLY AND ADD LOGICAL EVEN
-	op_VMALH  uint32 = 0xE7A9 // 	VRR-d	VECTOR MULTIPLY AND ADD LOGICAL HIGH
-	op_VMALO  uint32 = 0xE7AD // 	VRR-d	VECTOR MULTIPLY AND ADD LOGICAL ODD
-	op_VMAL   uint32 = 0xE7AA // 	VRR-d	VECTOR MULTIPLY AND ADD LOW
-	op_VMAO   uint32 = 0xE7AF // 	VRR-d	VECTOR MULTIPLY AND ADD ODD
-	op_VME    uint32 = 0xE7A6 // 	VRR-c	VECTOR MULTIPLY EVEN
-	op_VMH    uint32 = 0xE7A3 // 	VRR-c	VECTOR MULTIPLY HIGH
-	op_VMLE   uint32 = 0xE7A4 // 	VRR-c	VECTOR MULTIPLY EVEN LOGICAL
-	op_VMLH   uint32 = 0xE7A1 // 	VRR-c	VECTOR MULTIPLY HIGH LOGICAL
-	op_VMLO   uint32 = 0xE7A5 // 	VRR-c	VECTOR MULTIPLY ODD LOGICAL
-	op_VML    uint32 = 0xE7A2 // 	VRR-c	VECTOR MULTIPLY LOW
-	op_VMO    uint32 = 0xE7A7 // 	VRR-c	VECTOR MULTIPLY ODD
-	op_VNO    uint32 = 0xE76B // 	VRR-c	VECTOR NOR
-	op_VO     uint32 = 0xE76A // 	VRR-c	VECTOR OR
-	op_VPK    uint32 = 0xE794 // 	VRR-c	VECTOR PACK
-	op_VPKLS  uint32 = 0xE795 // 	VRR-b	VECTOR PACK LOGICAL SATURATE
-	op_VPKS   uint32 = 0xE797 // 	VRR-b	VECTOR PACK SATURATE
-	op_VPERM  uint32 = 0xE78C // 	VRR-e	VECTOR PERMUTE
-	op_VPDI   uint32 = 0xE784 // 	VRR-c	VECTOR PERMUTE DOUBLEWORD IMMEDIATE
-	op_VPOPCT uint32 = 0xE750 // 	VRR-a	VECTOR POPULATION COUNT
-	op_VREP   uint32 = 0xE74D // 	VRI-c	VECTOR REPLICATE
-	op_VREPI  uint32 = 0xE745 // 	VRI-a	VECTOR REPLICATE IMMEDIATE
-	op_VSCEF  uint32 = 0xE71B // 	VRV	VECTOR SCATTER ELEMENT (32)
-	op_VSCEG  uint32 = 0xE71A // 	VRV	VECTOR SCATTER ELEMENT (64)
-	op_VSEL   uint32 = 0xE78D // 	VRR-e	VECTOR SELECT
-	op_VSL    uint32 = 0xE774 // 	VRR-c	VECTOR SHIFT LEFT
-	op_VSLB   uint32 = 0xE775 // 	VRR-c	VECTOR SHIFT LEFT BY BYTE
-	op_VSLDB  uint32 = 0xE777 // 	VRI-d	VECTOR SHIFT LEFT DOUBLE BY BYTE
-	op_VSRA   uint32 = 0xE77E // 	VRR-c	VECTOR SHIFT RIGHT ARITHMETIC
-	op_VSRAB  uint32 = 0xE77F // 	VRR-c	VECTOR SHIFT RIGHT ARITHMETIC BY BYTE
-	op_VSRL   uint32 = 0xE77C // 	VRR-c	VECTOR SHIFT RIGHT LOGICAL
-	op_VSRLB  uint32 = 0xE77D // 	VRR-c	VECTOR SHIFT RIGHT LOGICAL BY BYTE
-	op_VSEG   uint32 = 0xE75F // 	VRR-a	VECTOR SIGN EXTEND TO DOUBLEWORD
-	op_VST    uint32 = 0xE70E // 	VRX	VECTOR STORE
-	op_VSTEH  uint32 = 0xE709 // 	VRX	VECTOR STORE ELEMENT (16)
-	op_VSTEF  uint32 = 0xE70B // 	VRX	VECTOR STORE ELEMENT (32)
-	op_VSTEG  uint32 = 0xE70A // 	VRX	VECTOR STORE ELEMENT (64)
-	op_VSTEB  uint32 = 0xE708 // 	VRX	VECTOR STORE ELEMENT (8)
-	op_VSTM   uint32 = 0xE73E // 	VRS-a	VECTOR STORE MULTIPLE
-	op_VSTL   uint32 = 0xE73F // 	VRS-b	VECTOR STORE WITH LENGTH
-	op_VSTRC  uint32 = 0xE78A // 	VRR-d	VECTOR STRING RANGE COMPARE
-	op_VS     uint32 = 0xE7F7 // 	VRR-c	VECTOR SUBTRACT
-	op_VSCBI  uint32 = 0xE7F5 // 	VRR-c	VECTOR SUBTRACT COMPUTE BORROW INDICATION
-	op_VSBCBI uint32 = 0xE7BD // 	VRR-d	VECTOR SUBTRACT WITH BORROW COMPUTE BORROW INDICATION
-	op_VSBI   uint32 = 0xE7BF // 	VRR-d	VECTOR SUBTRACT WITH BORROW INDICATION
-	op_VSUMG  uint32 = 0xE765 // 	VRR-c	VECTOR SUM ACROSS DOUBLEWORD
-	op_VSUMQ  uint32 = 0xE767 // 	VRR-c	VECTOR SUM ACROSS QUADWORD
-	op_VSUM   uint32 = 0xE764 // 	VRR-c	VECTOR SUM ACROSS WORD
-	op_VTM    uint32 = 0xE7D8 // 	VRR-a	VECTOR TEST UNDER MASK
-	op_VUPH   uint32 = 0xE7D7 // 	VRR-a	VECTOR UNPACK HIGH
-	op_VUPLH  uint32 = 0xE7D5 // 	VRR-a	VECTOR UNPACK LOGICAL HIGH
-	op_VUPLL  uint32 = 0xE7D4 // 	VRR-a	VECTOR UNPACK LOGICAL LOW
-	op_VUPL   uint32 = 0xE7D6 // 	VRR-a	VECTOR UNPACK LOW
-)
-
-func oclass(a *obj.Addr) int {
-	return int(a.Class) - 1
-}
-
-// Add a relocation for the immediate in a RIL style instruction.
-// The addend will be adjusted as required.
-func addrilreloc(ctxt *obj.Link, sym *obj.LSym, add int64) *obj.Reloc {
-	if sym == nil {
-		ctxt.Diag("require symbol to apply relocation")
-	}
-	offset := int64(2) // relocation offset from start of instruction
-	rel := obj.Addrel(ctxt.Cursym)
-	rel.Off = int32(ctxt.Pc + offset)
-	rel.Siz = 4
-	rel.Sym = sym
-	rel.Add = add + offset + int64(rel.Siz)
-	rel.Type = obj.R_PCRELDBL
-	return rel
-}
-
-func addrilrelocoffset(ctxt *obj.Link, sym *obj.LSym, add, offset int64) *obj.Reloc {
-	if sym == nil {
-		ctxt.Diag("require symbol to apply relocation")
-	}
-	offset += int64(2) // relocation offset from start of instruction
-	rel := obj.Addrel(ctxt.Cursym)
-	rel.Off = int32(ctxt.Pc + offset)
-	rel.Siz = 4
-	rel.Sym = sym
-	rel.Add = add + offset + int64(rel.Siz)
-	rel.Type = obj.R_PCRELDBL
-	return rel
-}
-
-// Add a CALL relocation for the immediate in a RIL style instruction.
-// The addend will be adjusted as required.
-func addcallreloc(ctxt *obj.Link, sym *obj.LSym, add int64) *obj.Reloc {
-	if sym == nil {
-		ctxt.Diag("require symbol to apply relocation")
-	}
-	offset := int64(2) // relocation offset from start of instruction
-	rel := obj.Addrel(ctxt.Cursym)
-	rel.Off = int32(ctxt.Pc + offset)
-	rel.Siz = 4
-	rel.Sym = sym
-	rel.Add = add + offset + int64(rel.Siz)
-	rel.Type = obj.R_CALL
-	return rel
-}
-
-func branchMask(ctxt *obj.Link, p *obj.Prog) uint32 {
-	switch p.As {
-	case ABEQ, ACMPBEQ, ACMPUBEQ, AMOVDEQ:
-		return 0x8
-	case ABGE, ACMPBGE, ACMPUBGE, AMOVDGE:
-		return 0xA
-	case ABGT, ACMPBGT, ACMPUBGT, AMOVDGT:
-		return 0x2
-	case ABLE, ACMPBLE, ACMPUBLE, AMOVDLE:
-		return 0xC
-	case ABLT, ACMPBLT, ACMPUBLT, AMOVDLT:
-		return 0x4
-	case ABNE, ACMPBNE, ACMPUBNE, AMOVDNE:
-		return 0x7
-	case ABLEU: // LE or unordered
-		return 0xD
-	case ABLTU: // LT or unordered
-		return 0x5
-	case ABVC:
-		return 0x0 // needs extra instruction
-	case ABVS:
-		return 0x1 // unordered
-	}
-	ctxt.Diag("unknown conditional branch %v", p.As)
-	return 0xF
-}
-
-func asmout(ctxt *obj.Link, asm *[]byte) {
-	p := ctxt.Curp
-	o := oplook(ctxt, p)
-	ctxt.Printp = p
-
-	switch o.type_ {
-	default:
-		ctxt.Diag("unknown type %d", o.type_)
-
-	case 0: // PSEUDO OPS
-		break
-
-	case 1: // mov reg reg
-		switch p.As {
-		default:
-			ctxt.Diag("unhandled operation: %v", p.As)
-		case AMOVD:
-			zRRE(op_LGR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-		// sign extend
-		case AMOVW:
-			zRRE(op_LGFR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-		case AMOVH:
-			zRRE(op_LGHR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-		case AMOVB:
-			zRRE(op_LGBR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-		// zero extend
-		case AMOVWZ:
-			zRRE(op_LLGFR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-		case AMOVHZ:
-			zRRE(op_LLGHR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-		case AMOVBZ:
-			zRRE(op_LLGCR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-		// reverse bytes
-		case AMOVDBR:
-			zRRE(op_LRVGR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-		case AMOVWBR:
-			zRRE(op_LRVR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-		// floating point
-		case AFMOVD, AFMOVS:
-			zRR(op_LDR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-		}
-
-	case 2: // arithmetic op reg [reg] reg
-		r := p.Reg
-		if r == 0 {
-			r = p.To.Reg
-		}
-
-		var opcode uint32
-
-		switch p.As {
-		default:
-			ctxt.Diag("invalid opcode")
-		case AADD:
-			opcode = op_AGRK
-		case AADDC:
-			opcode = op_ALGRK
-		case AADDE:
-			opcode = op_ALCGR
-		case AADDW:
-			opcode = op_ARK
-		case AMULLW:
-			opcode = op_MSGFR
-		case AMULLD:
-			opcode = op_MSGR
-		case ADIVW, AMODW:
-			opcode = op_DSGFR
-		case ADIVWU, AMODWU:
-			opcode = op_DLR
-		case ADIVD, AMODD:
-			opcode = op_DSGR
-		case ADIVDU, AMODDU:
-			opcode = op_DLGR
-		case AFADD:
-			opcode = op_ADBR
-		case AFADDS:
-			opcode = op_AEBR
-		case AFSUB:
-			opcode = op_SDBR
-		case AFSUBS:
-			opcode = op_SEBR
-		case AFDIV:
-			opcode = op_DDBR
-		case AFDIVS:
-			opcode = op_DEBR
-		}
-
-		switch p.As {
-		default:
-
-		case AADD, AADDC, AADDW:
-			if p.As == AADDW && r == p.To.Reg {
-				zRR(op_AR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-			} else {
-				zRRF(opcode, uint32(p.From.Reg), 0, uint32(p.To.Reg), uint32(r), asm)
-			}
-
-		case AADDE, AMULLW, AMULLD:
-			if r == p.To.Reg {
-				zRRE(opcode, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-			} else if p.From.Reg == p.To.Reg {
-				zRRE(opcode, uint32(p.To.Reg), uint32(r), asm)
-			} else {
-				zRRE(op_LGR, uint32(p.To.Reg), uint32(r), asm)
-				zRRE(opcode, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-			}
-
-		case ADIVW, ADIVWU, ADIVD, ADIVDU:
-			if p.As == ADIVWU || p.As == ADIVDU {
-				zRI(op_LGHI, REGTMP, 0, asm)
-			}
-			zRRE(op_LGR, REGTMP2, uint32(r), asm)
-			zRRE(opcode, REGTMP, uint32(p.From.Reg), asm)
-			zRRE(op_LGR, uint32(p.To.Reg), REGTMP2, asm)
-
-		case AMODW, AMODWU, AMODD, AMODDU:
-			if p.As == AMODWU || p.As == AMODDU {
-				zRI(op_LGHI, REGTMP, 0, asm)
-			}
-			zRRE(op_LGR, REGTMP2, uint32(r), asm)
-			zRRE(opcode, REGTMP, uint32(p.From.Reg), asm)
-			zRRE(op_LGR, uint32(p.To.Reg), REGTMP, asm)
-
-		case AFADD, AFADDS:
-			if r == p.To.Reg {
-				zRRE(opcode, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-			} else if p.From.Reg == p.To.Reg {
-				zRRE(opcode, uint32(p.To.Reg), uint32(r), asm)
-			} else {
-				zRR(op_LDR, uint32(p.To.Reg), uint32(r), asm)
-				zRRE(opcode, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-			}
-
-		case AFSUB, AFSUBS, AFDIV, AFDIVS:
-			if r == p.To.Reg {
-				zRRE(opcode, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-			} else if p.From.Reg == p.To.Reg {
-				zRRE(op_LGDR, REGTMP, uint32(r), asm)
-				zRRE(opcode, uint32(r), uint32(p.From.Reg), asm)
-				zRR(op_LDR, uint32(p.To.Reg), uint32(r), asm)
-				zRRE(op_LDGR, uint32(r), REGTMP, asm)
-			} else {
-				zRR(op_LDR, uint32(p.To.Reg), uint32(r), asm)
-				zRRE(opcode, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-			}
-
-		}
-
-	case 3: // mov $constant reg
-		v := vregoff(ctxt, &p.From)
-		switch p.As {
-		case AMOVBZ:
-			v = int64(uint8(v))
-		case AMOVHZ:
-			v = int64(uint16(v))
-		case AMOVWZ:
-			v = int64(uint32(v))
-		case AMOVB:
-			v = int64(int8(v))
-		case AMOVH:
-			v = int64(int16(v))
-		case AMOVW:
-			v = int64(int32(v))
-		}
-		if int64(int16(v)) == v {
-			zRI(op_LGHI, uint32(p.To.Reg), uint32(v), asm)
-		} else if v&0xffff0000 == v {
-			zRI(op_LLILH, uint32(p.To.Reg), uint32(v>>16), asm)
-		} else if v&0xffff00000000 == v {
-			zRI(op_LLIHL, uint32(p.To.Reg), uint32(v>>32), asm)
-		} else if uint64(v)&0xffff000000000000 == uint64(v) {
-			zRI(op_LLIHH, uint32(p.To.Reg), uint32(v>>48), asm)
-		} else if int64(int32(v)) == v {
-			zRIL(_a, op_LGFI, uint32(p.To.Reg), uint32(v), asm)
-		} else if int64(uint32(v)) == v {
-			zRIL(_a, op_LLILF, uint32(p.To.Reg), uint32(v), asm)
-		} else if uint64(v)&0xffffffff00000000 == uint64(v) {
-			zRIL(_a, op_LLIHF, uint32(p.To.Reg), uint32(v>>32), asm)
-		} else {
-			zRIL(_a, op_LLILF, uint32(p.To.Reg), uint32(v), asm)
-			zRIL(_a, op_IIHF, uint32(p.To.Reg), uint32(v>>32), asm)
-		}
-
-	case 4: // multiply high (a*b)>>64
-		r := p.Reg
-		if r == 0 {
-			r = p.To.Reg
-		}
-		zRRE(op_LGR, REGTMP2, uint32(r), asm)
-		zRRE(op_MLGR, REGTMP, uint32(p.From.Reg), asm)
-		switch p.As {
-		case AMULHDU:
-			// Unsigned: move result into correct register.
-			zRRE(op_LGR, uint32(p.To.Reg), REGTMP, asm)
-		case AMULHD:
-			// Signed: need to convert result.
-			// See Hacker's Delight 8-3.
-			zRSY(op_SRAG, REGTMP2, uint32(p.From.Reg), 0, 63, asm)
-			zRRE(op_NGR, REGTMP2, uint32(r), asm)
-			zRRE(op_SGR, REGTMP, REGTMP2, asm)
-			zRSY(op_SRAG, REGTMP2, uint32(r), 0, 63, asm)
-			zRRE(op_NGR, REGTMP2, uint32(p.From.Reg), asm)
-			zRRF(op_SGRK, REGTMP2, 0, uint32(p.To.Reg), REGTMP, asm)
-		}
-
-	case 5: // syscall
-		zI(op_SVC, 0, asm)
-
-	case 6: // logical op reg [reg] reg
-		var oprr, oprre, oprrf uint32
-		switch p.As {
-		case AAND:
-			oprre = op_NGR
-			oprrf = op_NGRK
-		case AANDW:
-			oprr = op_NR
-			oprrf = op_NRK
-		case AOR:
-			oprre = op_OGR
-			oprrf = op_OGRK
-		case AORW:
-			oprr = op_OR
-			oprrf = op_ORK
-		case AXOR:
-			oprre = op_XGR
-			oprrf = op_XGRK
-		case AXORW:
-			oprr = op_XR
-			oprrf = op_XRK
-		}
-		if p.Reg == 0 {
-			if oprr != 0 {
-				zRR(oprr, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-			} else {
-				zRRE(oprre, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-			}
-		} else {
-			zRRF(oprrf, uint32(p.Reg), 0, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-		}
-
-	case 7: // shift/rotate reg [reg] reg
-		d2 := vregoff(ctxt, &p.From)
-		b2 := p.From.Reg
-		r3 := p.Reg
-		if r3 == 0 {
-			r3 = p.To.Reg
-		}
-		r1 := p.To.Reg
-		var opcode uint32
-		switch p.As {
-		default:
-		case ASLD:
-			opcode = op_SLLG
-		case ASRD:
-			opcode = op_SRLG
-		case ASLW:
-			opcode = op_SLLK
-		case ASRW:
-			opcode = op_SRLK
-		case ARLL:
-			opcode = op_RLL
-		case ARLLG:
-			opcode = op_RLLG
-		case ASRAW:
-			opcode = op_SRAK
-		case ASRAD:
-			opcode = op_SRAG
-		}
-		zRSY(opcode, uint32(r1), uint32(r3), uint32(b2), uint32(d2), asm)
-
-	case 8: // find leftmost one
-		if p.To.Reg&1 != 0 {
-			ctxt.Diag("target must be an even-numbered register")
-		}
-		// FLOGR also writes a mask to p.To.Reg+1.
-		zRRE(op_FLOGR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-
-	case 10: // subtract reg [reg] reg
-		r := int(p.Reg)
-
-		switch p.As {
-		default:
-		case ASUB:
-			if r == 0 {
-				zRRE(op_SGR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-			} else {
-				zRRF(op_SGRK, uint32(p.From.Reg), 0, uint32(p.To.Reg), uint32(r), asm)
-			}
-		case ASUBC:
-			if r == 0 {
-				zRRE(op_SLGR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-			} else {
-				zRRF(op_SLGRK, uint32(p.From.Reg), 0, uint32(p.To.Reg), uint32(r), asm)
-			}
-		case ASUBE:
-			if r == 0 {
-				r = int(p.To.Reg)
-			}
-			if r == int(p.To.Reg) {
-				zRRE(op_SLBGR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-			} else if p.From.Reg == p.To.Reg {
-				zRRE(op_LGR, REGTMP, uint32(p.From.Reg), asm)
-				zRRE(op_LGR, uint32(p.To.Reg), uint32(r), asm)
-				zRRE(op_SLBGR, uint32(p.To.Reg), REGTMP, asm)
-			} else {
-				zRRE(op_LGR, uint32(p.To.Reg), uint32(r), asm)
-				zRRE(op_SLBGR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-			}
-		case ASUBW:
-			if r == 0 {
-				zRR(op_SR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-			} else {
-				zRRF(op_SRK, uint32(p.From.Reg), 0, uint32(p.To.Reg), uint32(r), asm)
-			}
-		}
-
-	case 11: // br/bl
-		v := int32(0)
-
-		if p.Pcond != nil {
-			v = int32((p.Pcond.Pc - p.Pc) >> 1)
-		}
-
-		if p.As == ABR && p.To.Sym == nil && int32(int16(v)) == v {
-			zRI(op_BRC, 0xF, uint32(v), asm)
-		} else {
-			if p.As == ABL {
-				zRIL(_b, op_BRASL, uint32(REG_LR), uint32(v), asm)
-			} else {
-				zRIL(_c, op_BRCL, 0xF, uint32(v), asm)
-			}
-			if p.To.Sym != nil {
-				addcallreloc(ctxt, p.To.Sym, p.To.Offset)
-			}
-		}
-
-	case 12:
-		r1 := p.To.Reg
-		d2 := vregoff(ctxt, &p.From)
-		b2 := p.From.Reg
-		if b2 == 0 {
-			b2 = o.param
-		}
-		x2 := p.From.Index
-		if -DISP20/2 > d2 || d2 >= DISP20/2 {
-			zRIL(_a, op_LGFI, REGTMP, uint32(d2), asm)
-			if x2 != 0 {
-				zRX(op_LA, REGTMP, REGTMP, uint32(x2), 0, asm)
-			}
-			x2 = REGTMP
-			d2 = 0
-		}
-		var opx, opxy uint32
-		switch p.As {
-		case AADD:
-			opxy = op_AG
-		case AADDC:
-			opxy = op_ALG
-		case AADDW:
-			opx = op_A
-			opxy = op_AY
-		case AMULLW:
-			opx = op_MS
-			opxy = op_MSY
-		case AMULLD:
-			opxy = op_MSG
-		case ASUB:
-			opxy = op_SG
-		case ASUBC:
-			opxy = op_SLG
-		case ASUBE:
-			opxy = op_SLBG
-		case ASUBW:
-			opx = op_S
-			opxy = op_SY
-		case AAND:
-			opxy = op_NG
-		case AANDW:
-			opx = op_N
-			opxy = op_NY
-		case AOR:
-			opxy = op_OG
-		case AORW:
-			opx = op_O
-			opxy = op_OY
-		case AXOR:
-			opxy = op_XG
-		case AXORW:
-			opx = op_X
-			opxy = op_XY
-		}
-		if opx != 0 && 0 <= d2 && d2 < DISP12 {
-			zRX(opx, uint32(r1), uint32(x2), uint32(b2), uint32(d2), asm)
-		} else {
-			zRXY(opxy, uint32(r1), uint32(x2), uint32(b2), uint32(d2), asm)
-		}
-
-	case 15: // br/bl (reg)
-		r := p.To.Reg
-		if p.As == ABCL || p.As == ABL {
-			zRR(op_BASR, uint32(REG_LR), uint32(r), asm)
-		} else {
-			zRR(op_BCR, 0xF, uint32(r), asm)
-		}
-
-	case 16: // conditional branch
-		v := int32(0)
-		if p.Pcond != nil {
-			v = int32((p.Pcond.Pc - p.Pc) >> 1)
-		}
-		mask := branchMask(ctxt, p)
-		if p.To.Sym == nil && int32(int16(v)) == v {
-			zRI(op_BRC, mask, uint32(v), asm)
-		} else {
-			zRIL(_c, op_BRCL, mask, uint32(v), asm)
-		}
-		if p.To.Sym != nil {
-			addrilreloc(ctxt, p.To.Sym, p.To.Offset)
-		}
-
-	case 17: // move on condition
-		m3 := branchMask(ctxt, p)
-		zRRF(op_LOCGR, m3, 0, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-
-	case 18: // br/bl reg
-		if p.As == ABL {
-			zRR(op_BASR, uint32(REG_LR), uint32(p.To.Reg), asm)
-		} else {
-			zRR(op_BCR, 0xF, uint32(p.To.Reg), asm)
-		}
-
-	case 19: // mov $sym+n(SB) reg
-		d := vregoff(ctxt, &p.From)
-		zRIL(_b, op_LARL, uint32(p.To.Reg), 0, asm)
-		if d&1 != 0 {
-			zRX(op_LA, uint32(p.To.Reg), uint32(p.To.Reg), 0, 1, asm)
-			d -= 1
-		}
-		addrilreloc(ctxt, p.From.Sym, d)
-
-	case 21: // subtract $constant [reg] reg
-		v := vregoff(ctxt, &p.From)
-		r := p.Reg
-		if r == 0 {
-			r = p.To.Reg
-		}
-		switch p.As {
-		case ASUB:
-			zRIL(_a, op_LGFI, uint32(REGTMP), uint32(v), asm)
-			zRRF(op_SLGRK, uint32(REGTMP), 0, uint32(p.To.Reg), uint32(r), asm)
-		case ASUBC:
-			if r != p.To.Reg {
-				zRRE(op_LGR, uint32(p.To.Reg), uint32(r), asm)
-			}
-			zRIL(_a, op_SLGFI, uint32(p.To.Reg), uint32(v), asm)
-		case ASUBW:
-			if r != p.To.Reg {
-				zRR(op_LR, uint32(p.To.Reg), uint32(r), asm)
-			}
-			zRIL(_a, op_SLFI, uint32(p.To.Reg), uint32(v), asm)
-		}
-
-	case 22: // add/multiply $constant [reg] reg
-		v := vregoff(ctxt, &p.From)
-		r := p.Reg
-		if r == 0 {
-			r = p.To.Reg
-		}
-		var opri, opril, oprie uint32
-		switch p.As {
-		case AADD:
-			opri = op_AGHI
-			opril = op_AGFI
-			oprie = op_AGHIK
-		case AADDC:
-			opril = op_ALGFI
-			oprie = op_ALGHSIK
-		case AADDW:
-			opri = op_AHI
-			opril = op_AFI
-			oprie = op_AHIK
-		case AMULLW:
-			opri = op_MHI
-			opril = op_MSFI
-		case AMULLD:
-			opri = op_MGHI
-			opril = op_MSGFI
-		}
-		if r != p.To.Reg && (oprie == 0 || int64(int16(v)) != v) {
-			switch p.As {
-			case AADD, AADDC, AMULLD:
-				zRRE(op_LGR, uint32(p.To.Reg), uint32(r), asm)
-			case AADDW, AMULLW:
-				zRR(op_LR, uint32(p.To.Reg), uint32(r), asm)
-			}
-			r = p.To.Reg
-		}
-		if r == p.To.Reg {
-			if opri != 0 && int64(int16(v)) == v {
-				zRI(opri, uint32(p.To.Reg), uint32(v), asm)
-			} else {
-				zRIL(_a, opril, uint32(p.To.Reg), uint32(v), asm)
-			}
-		} else {
-			zRIE(_d, oprie, uint32(p.To.Reg), uint32(r), uint32(v), 0, 0, 0, 0, asm)
-		}
-
-	case 23: // 64-bit logical op $constant reg
-		// TODO(mundaym): merge with case 24.
-		v := vregoff(ctxt, &p.From)
-		switch p.As {
-		default:
-			ctxt.Diag("%v is not supported", p)
-		case AAND:
-			if v >= 0 { // needs zero extend
-				zRIL(_a, op_LGFI, REGTMP, uint32(v), asm)
-				zRRE(op_NGR, uint32(p.To.Reg), REGTMP, asm)
-			} else if int64(int16(v)) == v {
-				zRI(op_NILL, uint32(p.To.Reg), uint32(v), asm)
-			} else { //  r.To.Reg & 0xffffffff00000000 & uint32(v)
-				zRIL(_a, op_NILF, uint32(p.To.Reg), uint32(v), asm)
-			}
-		case AOR:
-			if int64(uint32(v)) != v { // needs sign extend
-				zRIL(_a, op_LGFI, REGTMP, uint32(v), asm)
-				zRRE(op_OGR, uint32(p.To.Reg), REGTMP, asm)
-			} else if int64(uint16(v)) == v {
-				zRI(op_OILL, uint32(p.To.Reg), uint32(v), asm)
-			} else {
-				zRIL(_a, op_OILF, uint32(p.To.Reg), uint32(v), asm)
-			}
-		case AXOR:
-			if int64(uint32(v)) != v { // needs sign extend
-				zRIL(_a, op_LGFI, REGTMP, uint32(v), asm)
-				zRRE(op_XGR, uint32(p.To.Reg), REGTMP, asm)
-			} else {
-				zRIL(_a, op_XILF, uint32(p.To.Reg), uint32(v), asm)
-			}
-		}
-
-	case 24: // 32-bit logical op $constant reg
-		v := vregoff(ctxt, &p.From)
-		switch p.As {
-		case AANDW:
-			if uint32(v&0xffff0000) == 0xffff0000 {
-				zRI(op_NILL, uint32(p.To.Reg), uint32(v), asm)
-			} else if uint32(v&0x0000ffff) == 0x0000ffff {
-				zRI(op_NILH, uint32(p.To.Reg), uint32(v)>>16, asm)
-			} else {
-				zRIL(_a, op_NILF, uint32(p.To.Reg), uint32(v), asm)
-			}
-		case AORW:
-			if uint32(v&0xffff0000) == 0 {
-				zRI(op_OILL, uint32(p.To.Reg), uint32(v), asm)
-			} else if uint32(v&0x0000ffff) == 0 {
-				zRI(op_OILH, uint32(p.To.Reg), uint32(v)>>16, asm)
-			} else {
-				zRIL(_a, op_OILF, uint32(p.To.Reg), uint32(v), asm)
-			}
-		case AXORW:
-			zRIL(_a, op_XILF, uint32(p.To.Reg), uint32(v), asm)
-		}
-
-	case 26: // MOVD $offset(base)(index), reg
-		v := regoff(ctxt, &p.From)
-		r := p.From.Reg
-		if r == 0 {
-			r = o.param
-		}
-		i := p.From.Index
-		if v >= 0 && v < DISP12 {
-			zRX(op_LA, uint32(p.To.Reg), uint32(r), uint32(i), uint32(v), asm)
-		} else if v >= -DISP20/2 && v < DISP20/2 {
-			zRXY(op_LAY, uint32(p.To.Reg), uint32(r), uint32(i), uint32(v), asm)
-		} else {
-			zRIL(_a, op_LGFI, REGTMP, uint32(v), asm)
-			zRX(op_LA, uint32(p.To.Reg), uint32(r), REGTMP, uint32(i), asm)
-		}
-
-	case 31: // dword
-		wd := uint64(vregoff(ctxt, &p.From))
-		*asm = append(*asm,
-			uint8(wd>>56),
-			uint8(wd>>48),
-			uint8(wd>>40),
-			uint8(wd>>32),
-			uint8(wd>>24),
-			uint8(wd>>16),
-			uint8(wd>>8),
-			uint8(wd))
-
-	case 32: // fmul freg [freg] freg
-		r := int(p.Reg)
-		if r == 0 {
-			r = int(p.To.Reg)
-		}
-
-		var opcode uint32
-
-		switch p.As {
-		default:
-			ctxt.Diag("invalid opcode")
-		case AFMUL:
-			opcode = op_MDBR
-		case AFMULS:
-			opcode = op_MEEBR
-		}
-
-		if r == int(p.To.Reg) {
-			zRRE(opcode, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-		} else if p.From.Reg == p.To.Reg {
-			zRRE(opcode, uint32(p.To.Reg), uint32(r), asm)
-		} else {
-			zRR(op_LDR, uint32(p.To.Reg), uint32(r), asm)
-			zRRE(opcode, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-		}
-
-	case 33: // float op [freg] freg
-		r := p.From.Reg
-		if oclass(&p.From) == C_NONE {
-			r = p.To.Reg
-		}
-		var opcode uint32
-		switch p.As {
-		default:
-		case AFABS:
-			opcode = op_LPDBR
-		case AFNABS:
-			opcode = op_LNDBR
-		case AFNEG:
-			opcode = op_LCDFR
-		case AFNEGS:
-			opcode = op_LCEBR
-		case ALEDBR:
-			opcode = op_LEDBR
-		case ALDEBR:
-			opcode = op_LDEBR
-		case AFSQRT:
-			opcode = op_SQDBR
-		case AFSQRTS:
-			opcode = op_SQEBR
-		}
-		zRRE(opcode, uint32(p.To.Reg), uint32(r), asm)
-
-	case 34: // float multiply-add freg freg freg freg
-		var opcode uint32
-
-		switch p.As {
-		default:
-			ctxt.Diag("invalid opcode")
-		case AFMADD:
-			opcode = op_MADBR
-		case AFMADDS:
-			opcode = op_MAEBR
-		case AFMSUB:
-			opcode = op_MSDBR
-		case AFMSUBS:
-			opcode = op_MSEBR
-		case AFNMADD:
-			opcode = op_MADBR
-		case AFNMADDS:
-			opcode = op_MAEBR
-		case AFNMSUB:
-			opcode = op_MSDBR
-		case AFNMSUBS:
-			opcode = op_MSEBR
-		}
-
-		zRR(op_LDR, uint32(p.To.Reg), uint32(p.Reg), asm)
-		zRRD(opcode, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From3.Reg), asm)
-
-		if p.As == AFNMADD || p.As == AFNMADDS || p.As == AFNMSUB || p.As == AFNMSUBS {
-			zRRE(op_LCDFR, uint32(p.To.Reg), uint32(p.To.Reg), asm)
-		}
-
-	case 35: // mov reg mem (no relocation)
-		d2 := regoff(ctxt, &p.To)
-		b2 := p.To.Reg
-		if b2 == 0 {
-			b2 = o.param
-		}
-		x2 := p.To.Index
-		if d2 < -DISP20/2 || d2 >= DISP20/2 {
-			zRIL(_a, op_LGFI, REGTMP, uint32(d2), asm)
-			if x2 != 0 {
-				zRX(op_LA, REGTMP, REGTMP, uint32(x2), 0, asm)
-			}
-			x2 = REGTMP
-			d2 = 0
-		}
-		zRXY(zopstore(ctxt, p.As), uint32(p.From.Reg), uint32(x2), uint32(b2), uint32(d2), asm)
-
-	case 36: // mov mem reg (no relocation)
-		d2 := regoff(ctxt, &p.From)
-		b2 := p.From.Reg
-		if b2 == 0 {
-			b2 = o.param
-		}
-		x2 := p.From.Index
-		if d2 < -DISP20/2 || d2 >= DISP20/2 {
-			zRIL(_a, op_LGFI, REGTMP, uint32(d2), asm)
-			if x2 != 0 {
-				zRX(op_LA, REGTMP, REGTMP, uint32(x2), 0, asm)
-			}
-			x2 = REGTMP
-			d2 = 0
-		}
-		zRXY(zopload(ctxt, p.As), uint32(p.To.Reg), uint32(x2), uint32(b2), uint32(d2), asm)
-
-	case 40: // word/byte
-		wd := uint32(regoff(ctxt, &p.From))
-		if p.As == AWORD { //WORD
-			*asm = append(*asm, uint8(wd>>24), uint8(wd>>16), uint8(wd>>8), uint8(wd))
-		} else { //BYTE
-			*asm = append(*asm, uint8(wd))
-		}
-
-	case 47: // negate [reg] reg
-		r := p.From.Reg
-		if r == 0 {
-			r = p.To.Reg
-		}
-		switch p.As {
-		case ANEG:
-			zRRE(op_LCGR, uint32(p.To.Reg), uint32(r), asm)
-		case ANEGW:
-			zRRE(op_LCGFR, uint32(p.To.Reg), uint32(r), asm)
-		}
-
-	case 48: // floating-point round to integer
-		m3 := vregoff(ctxt, &p.From)
-		if 0 > m3 || m3 > 7 {
-			ctxt.Diag("mask (%v) must be in the range [0, 7]", m3)
-		}
-		var opcode uint32
-		switch p.As {
-		case AFIEBR:
-			opcode = op_FIEBR
-		case AFIDBR:
-			opcode = op_FIDBR
-		}
-		zRRF(opcode, uint32(m3), 0, uint32(p.To.Reg), uint32(p.Reg), asm)
-
-	case 67: // fmov $0 freg
-		var opcode uint32
-		switch p.As {
-		case AFMOVS:
-			opcode = op_LZER
-		case AFMOVD:
-			opcode = op_LZDR
-		}
-		zRRE(opcode, uint32(p.To.Reg), 0, asm)
-
-	case 68: // movw areg reg
-		zRRE(op_EAR, uint32(p.To.Reg), uint32(p.From.Reg-REG_AR0), asm)
-
-	case 69: // movw reg areg
-		zRRE(op_SAR, uint32(p.To.Reg-REG_AR0), uint32(p.From.Reg), asm)
-
-	case 70: // cmp reg reg
-		if p.As == ACMPW || p.As == ACMPWU {
-			zRR(zoprr(ctxt, p.As), uint32(p.From.Reg), uint32(p.To.Reg), asm)
-		} else {
-			zRRE(zoprre(ctxt, p.As), uint32(p.From.Reg), uint32(p.To.Reg), asm)
-		}
-
-	case 71: // cmp reg $constant
-		v := vregoff(ctxt, &p.To)
-		switch p.As {
-		case ACMP, ACMPW:
-			if int64(int32(v)) != v {
-				ctxt.Diag("%v overflows an int32", v)
-			}
-		case ACMPU, ACMPWU:
-			if int64(uint32(v)) != v {
-				ctxt.Diag("%v overflows a uint32", v)
-			}
-		}
-		if p.As == ACMP && int64(int16(v)) == v {
-			zRI(op_CGHI, uint32(p.From.Reg), uint32(v), asm)
-		} else if p.As == ACMPW && int64(int16(v)) == v {
-			zRI(op_CHI, uint32(p.From.Reg), uint32(v), asm)
-		} else {
-			zRIL(_a, zopril(ctxt, p.As), uint32(p.From.Reg), uint32(v), asm)
-		}
-
-	case 72: // mov $constant mem
-		v := regoff(ctxt, &p.From)
-		d := regoff(ctxt, &p.To)
-		r := p.To.Reg
-		x := p.To.Index
-		if r == 0 {
-			r = o.param
-		}
-		if int32(int16(v)) == v && x == 0 {
-			if d < 0 || d >= DISP12 {
-				if r == REGTMP || r == REGTMP2 {
-					zRIL(_a, op_AGFI, uint32(r), uint32(d), asm)
-				} else {
-					zRIL(_a, op_LGFI, REGTMP, uint32(d), asm)
-					zRRE(op_AGR, REGTMP, uint32(r), asm)
-					r = REGTMP
-				}
-				d = 0
-			}
-			var opcode uint32
-			switch p.As {
-			case AMOVD:
-				opcode = op_MVGHI
-			case AMOVW, AMOVWZ:
-				opcode = op_MVHI
-			case AMOVH, AMOVHZ:
-				opcode = op_MVHHI
-			case AMOVB, AMOVBZ:
-				opcode = op_MVI
-			}
-			if opcode == op_MVI {
-				zSI(opcode, uint32(v), uint32(r), uint32(d), asm)
-			} else {
-				zSIL(opcode, uint32(r), uint32(d), uint32(v), asm)
-			}
-		} else {
-			zRIL(_a, op_LGFI, REGTMP2, uint32(v), asm)
-			if d < -DISP20/2 || d >= DISP20/2 {
-				if r == REGTMP {
-					zRIL(_a, op_AGFI, REGTMP, uint32(d), asm)
-				} else {
-					zRIL(_a, op_LGFI, REGTMP, uint32(d), asm)
-					if x != 0 {
-						zRRE(op_AGR, REGTMP, uint32(x), asm)
-					}
-					x = REGTMP
-				}
-				d = 0
-			}
-			zRXY(zopstore(ctxt, p.As), REGTMP2, uint32(x), uint32(r), uint32(d), asm)
-		}
-
-	case 73: // mov $constant addr (including relocation)
-		v := regoff(ctxt, &p.From)
-		d := regoff(ctxt, &p.To)
-		a := uint32(0)
-		if d&1 != 0 {
-			d -= 1
-			a = 1
-		}
-		zRIL(_b, op_LARL, REGTMP, uint32(d), asm)
-		addrilreloc(ctxt, p.To.Sym, int64(d))
-		if int32(int16(v)) == v {
-			var opcode uint32
-			switch p.As {
-			case AMOVD:
-				opcode = op_MVGHI
-			case AMOVW, AMOVWZ:
-				opcode = op_MVHI
-			case AMOVH, AMOVHZ:
-				opcode = op_MVHHI
-			case AMOVB, AMOVBZ:
-				opcode = op_MVI
-			}
-			if opcode == op_MVI {
-				zSI(opcode, uint32(v), REGTMP, a, asm)
-			} else {
-				zSIL(opcode, REGTMP, a, uint32(v), asm)
-			}
-		} else {
-			zRIL(_a, op_LGFI, REGTMP2, uint32(v), asm)
-			zRXY(zopstore(ctxt, p.As), REGTMP2, 0, REGTMP, a, asm)
-		}
-
-	case 74: // mov reg addr (including relocation)
-		i2 := regoff(ctxt, &p.To)
-		switch p.As {
-		case AMOVD:
-			zRIL(_b, op_STGRL, uint32(p.From.Reg), 0, asm)
-		case AMOVW, AMOVWZ: // The zero extension doesn't affect store instructions
-			zRIL(_b, op_STRL, uint32(p.From.Reg), 0, asm)
-		case AMOVH, AMOVHZ: // The zero extension doesn't affect store instructions
-			zRIL(_b, op_STHRL, uint32(p.From.Reg), 0, asm)
-		case AMOVB, AMOVBZ: // The zero extension doesn't affect store instructions
-			zRIL(_b, op_LARL, REGTMP, 0, asm)
-			adj := uint32(0) // adjustment needed for odd addresses
-			if i2&1 != 0 {
-				i2 -= 1
-				adj = 1
-			}
-			zRX(op_STC, uint32(p.From.Reg), 0, REGTMP, adj, asm)
-		case AFMOVD:
-			zRIL(_b, op_LARL, REGTMP, 0, asm)
-			zRX(op_STD, uint32(p.From.Reg), 0, REGTMP, 0, asm)
-		case AFMOVS:
-			zRIL(_b, op_LARL, REGTMP, 0, asm)
-			zRX(op_STE, uint32(p.From.Reg), 0, REGTMP, 0, asm)
-		}
-		addrilreloc(ctxt, p.To.Sym, int64(i2))
-
-	case 75: // mov addr reg (including relocation)
-		i2 := regoff(ctxt, &p.From)
-		switch p.As {
-		case AMOVD:
-			if i2&1 != 0 {
-				zRIL(_b, op_LARL, REGTMP, 0, asm)
-				zRXY(op_LG, uint32(p.To.Reg), REGTMP, 0, 1, asm)
-				i2 -= 1
-			} else {
-				zRIL(_b, op_LGRL, uint32(p.To.Reg), 0, asm)
-			}
-		case AMOVW:
-			zRIL(_b, op_LGFRL, uint32(p.To.Reg), 0, asm)
-		case AMOVWZ:
-			zRIL(_b, op_LLGFRL, uint32(p.To.Reg), 0, asm)
-		case AMOVH:
-			zRIL(_b, op_LGHRL, uint32(p.To.Reg), 0, asm)
-		case AMOVHZ:
-			zRIL(_b, op_LLGHRL, uint32(p.To.Reg), 0, asm)
-		case AMOVB, AMOVBZ:
-			zRIL(_b, op_LARL, REGTMP, 0, asm)
-			adj := uint32(0) // adjustment needed for odd addresses
-			if i2&1 != 0 {
-				i2 -= 1
-				adj = 1
-			}
-			switch p.As {
-			case AMOVB:
-				zRXY(op_LGB, uint32(p.To.Reg), 0, REGTMP, adj, asm)
-			case AMOVBZ:
-				zRXY(op_LLGC, uint32(p.To.Reg), 0, REGTMP, adj, asm)
-			}
-		case AFMOVD:
-			zRIL(_a, op_LARL, REGTMP, 0, asm)
-			zRX(op_LD, uint32(p.To.Reg), 0, REGTMP, 0, asm)
-		case AFMOVS:
-			zRIL(_a, op_LARL, REGTMP, 0, asm)
-			zRX(op_LE, uint32(p.To.Reg), 0, REGTMP, 0, asm)
-		}
-		addrilreloc(ctxt, p.From.Sym, int64(i2))
-
-	case 77: // syscall $constant
-		if p.From.Offset > 255 || p.From.Offset < 1 {
-			ctxt.Diag("illegal system call; system call number out of range: %v", p)
-			zE(op_TRAP2, asm) // trap always
-		} else {
-			zI(op_SVC, uint32(p.From.Offset), asm)
-		}
-
-	case 78: // undef
-		// "An instruction consisting entirely of binary 0s is guaranteed
-		// always to be an illegal instruction."
-		*asm = append(*asm, 0, 0, 0, 0)
-
-	case 79: // compare and swap reg reg reg
-		v := regoff(ctxt, &p.To)
-		if v < 0 {
-			v = 0
-		}
-		if p.As == ACS {
-			zRS(op_CS, uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg), uint32(v), asm)
-		} else if p.As == ACSG {
-			zRSY(op_CSG, uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg), uint32(v), asm)
-		}
-
-	case 81: // sync
-		zRR(op_BCR, 0xE, 0, asm)
-
-	case 82: // fixed to float conversion
-		var opcode uint32
-		switch p.As {
-		default:
-			log.Fatalf("unexpected opcode %v", p.As)
-		case ACEFBRA:
-			opcode = op_CEFBRA
-		case ACDFBRA:
-			opcode = op_CDFBRA
-		case ACEGBRA:
-			opcode = op_CEGBRA
-		case ACDGBRA:
-			opcode = op_CDGBRA
-		case ACELFBR:
-			opcode = op_CELFBR
-		case ACDLFBR:
-			opcode = op_CDLFBR
-		case ACELGBR:
-			opcode = op_CELGBR
-		case ACDLGBR:
-			opcode = op_CDLGBR
-		}
-		// set immediate operand M3 to 0 to use the default BFP rounding mode
-		// (usually round to nearest, ties to even)
-		// TODO(mundaym): should this be fixed at round to nearest, ties to even?
-		// M4 is reserved and must be 0
-		zRRF(opcode, 0, 0, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-
-	case 83: // float to fixed conversion
-		var opcode uint32
-		switch p.As {
-		default:
-			log.Fatalf("unexpected opcode %v", p.As)
-		case ACFEBRA:
-			opcode = op_CFEBRA
-		case ACFDBRA:
-			opcode = op_CFDBRA
-		case ACGEBRA:
-			opcode = op_CGEBRA
-		case ACGDBRA:
-			opcode = op_CGDBRA
-		case ACLFEBR:
-			opcode = op_CLFEBR
-		case ACLFDBR:
-			opcode = op_CLFDBR
-		case ACLGEBR:
-			opcode = op_CLGEBR
-		case ACLGDBR:
-			opcode = op_CLGDBR
-		}
-		// set immediate operand M3 to 5 for rounding toward zero (required by Go spec)
-		// M4 is reserved and must be 0
-		zRRF(opcode, 5, 0, uint32(p.To.Reg), uint32(p.From.Reg), asm)
-
-	case 84: // storage-and-storage operations $length mem mem (length in From3)
-		l := regoff(ctxt, p.From3)
-		if l < 1 || l > 256 {
-			ctxt.Diag("number of bytes (%v) not in range [1,256]", l)
-		}
-		if p.From.Index != 0 || p.To.Index != 0 {
-			ctxt.Diag("cannot use index reg")
-		}
-		b1 := p.To.Reg
-		b2 := p.From.Reg
-		if b1 == 0 {
-			b1 = o.param
-		}
-		if b2 == 0 {
-			b2 = o.param
-		}
-		d1 := regoff(ctxt, &p.To)
-		d2 := regoff(ctxt, &p.From)
-		if d1 < 0 || d1 >= DISP12 {
-			if b2 == REGTMP {
-				ctxt.Diag("REGTMP conflict")
-			}
-			if b1 != REGTMP {
-				zRRE(op_LGR, REGTMP, uint32(b1), asm)
-			}
-			zRIL(_a, op_AGFI, REGTMP, uint32(d1), asm)
-			if d1 == d2 && b1 == b2 {
-				d2 = 0
-				b2 = REGTMP
-			}
-			d1 = 0
-			b1 = REGTMP
-		}
-		if d2 < 0 || d2 >= DISP12 {
-			if b1 == REGTMP2 {
-				ctxt.Diag("REGTMP2 conflict")
-			}
-			if b2 != REGTMP2 {
-				zRRE(op_LGR, REGTMP2, uint32(b2), asm)
-			}
-			zRIL(_a, op_AGFI, REGTMP2, uint32(d2), asm)
-			d2 = 0
-			b2 = REGTMP2
-		}
-		var opcode uint32
-		switch p.As {
-		default:
-			ctxt.Diag("unexpected opcode %v", p.As)
-		case AMVC:
-			opcode = op_MVC
-		case ACLC:
-			opcode = op_CLC
-			// swap operand order for CLC so that it matches CMP
-			b1, b2 = b2, b1
-			d1, d2 = d2, d1
-		case AXC:
-			opcode = op_XC
-		case AOC:
-			opcode = op_OC
-		case ANC:
-			opcode = op_NC
-		}
-		zSS(_a, opcode, uint32(l-1), 0, uint32(b1), uint32(d1), uint32(b2), uint32(d2), asm)
-
-	case 85: // load address relative long
-		v := regoff(ctxt, &p.From)
-		if p.From.Sym == nil {
-			if (v & 1) != 0 {
-				ctxt.Diag("cannot use LARL with odd offset: %v", v)
-			}
-		} else {
-			addrilreloc(ctxt, p.From.Sym, int64(v))
-			v = 0
-		}
-		zRIL(_b, op_LARL, uint32(p.To.Reg), uint32(v>>1), asm)
-
-	case 86: // load address
-		d := vregoff(ctxt, &p.From)
-		x := p.From.Index
-		b := p.From.Reg
-		if b == 0 {
-			b = o.param
-		}
-		switch p.As {
-		case ALA:
-			zRX(op_LA, uint32(p.To.Reg), uint32(x), uint32(b), uint32(d), asm)
-		case ALAY:
-			zRXY(op_LAY, uint32(p.To.Reg), uint32(x), uint32(b), uint32(d), asm)
-		}
-
-	case 87: // execute relative long
-		v := vregoff(ctxt, &p.From)
-		if p.From.Sym == nil {
-			if v&1 != 0 {
-				ctxt.Diag("cannot use EXRL with odd offset: %v", v)
-			}
-		} else {
-			addrilreloc(ctxt, p.From.Sym, v)
-			v = 0
-		}
-		zRIL(_b, op_EXRL, uint32(p.To.Reg), uint32(v>>1), asm)
-
-	case 88: // store clock
-		var opcode uint32
-		switch p.As {
-		case ASTCK:
-			opcode = op_STCK
-		case ASTCKC:
-			opcode = op_STCKC
-		case ASTCKE:
-			opcode = op_STCKE
-		case ASTCKF:
-			opcode = op_STCKF
-		}
-		v := vregoff(ctxt, &p.To)
-		r := int(p.To.Reg)
-		if r == 0 {
-			r = int(o.param)
-		}
-		zS(opcode, uint32(r), uint32(v), asm)
-
-	case 89: // compare and branch reg reg
-		var v int32
-		if p.Pcond != nil {
-			v = int32((p.Pcond.Pc - p.Pc) >> 1)
-		}
-		var opcode, opcode2 uint32
-		switch p.As {
-		case ACMPBEQ, ACMPBGE, ACMPBGT, ACMPBLE, ACMPBLT, ACMPBNE:
-			opcode = op_CGRJ
-			opcode2 = op_CGR
-		case ACMPUBEQ, ACMPUBGE, ACMPUBGT, ACMPUBLE, ACMPUBLT, ACMPUBNE:
-			opcode = op_CLGRJ
-			opcode2 = op_CLGR
-		}
-		mask := branchMask(ctxt, p)
-		if int32(int16(v)) != v {
-			zRRE(opcode2, uint32(p.From.Reg), uint32(p.Reg), asm)
-			zRIL(_c, op_BRCL, mask, uint32(v-sizeRRE/2), asm)
-		} else {
-			zRIE(_b, opcode, uint32(p.From.Reg), uint32(p.Reg), uint32(v), 0, 0, mask, 0, asm)
-		}
-
-	case 90: // compare and branch reg $constant
-		var v int32
-		if p.Pcond != nil {
-			v = int32((p.Pcond.Pc - p.Pc) >> 1)
-		}
-		var opcode, opcode2 uint32
-		switch p.As {
-		case ACMPBEQ, ACMPBGE, ACMPBGT, ACMPBLE, ACMPBLT, ACMPBNE:
-			opcode = op_CGIJ
-			opcode2 = op_CGFI
-		case ACMPUBEQ, ACMPUBGE, ACMPUBGT, ACMPUBLE, ACMPUBLT, ACMPUBNE:
-			opcode = op_CLGIJ
-			opcode2 = op_CLGFI
-		}
-		mask := branchMask(ctxt, p)
-		if int32(int16(v)) != v {
-			zRIL(_a, opcode2, uint32(p.From.Reg), uint32(regoff(ctxt, p.From3)), asm)
-			zRIL(_c, op_BRCL, mask, uint32(v-sizeRIL/2), asm)
-		} else {
-			zRIE(_c, opcode, uint32(p.From.Reg), mask, uint32(v), 0, 0, 0, uint32(regoff(ctxt, p.From3)), asm)
-		}
-
-	case 93: // GOT lookup
-		v := vregoff(ctxt, &p.To)
-		if v != 0 {
-			ctxt.Diag("invalid offset against GOT slot %v", p)
-		}
-		zRIL(_b, op_LGRL, uint32(p.To.Reg), 0, asm)
-		rel := obj.Addrel(ctxt.Cursym)
-		rel.Off = int32(ctxt.Pc + 2)
-		rel.Siz = 4
-		rel.Sym = p.From.Sym
-		rel.Type = obj.R_GOTPCREL
-		rel.Add = 2 + int64(rel.Siz)
-
-	case 94: // TLS local exec model
-		zRIL(_b, op_LARL, REGTMP, (sizeRIL+sizeRXY+sizeRI)>>1, asm)
-		zRXY(op_LG, uint32(p.To.Reg), REGTMP, 0, 0, asm)
-		zRI(op_BRC, 0xF, (sizeRI+8)>>1, asm)
-		*asm = append(*asm, 0, 0, 0, 0, 0, 0, 0, 0)
-		rel := obj.Addrel(ctxt.Cursym)
-		rel.Off = int32(ctxt.Pc + sizeRIL + sizeRXY + sizeRI)
-		rel.Siz = 8
-		rel.Sym = p.From.Sym
-		rel.Type = obj.R_TLS_LE
-		rel.Add = 0
-
-	case 95: // TLS initial exec model
-		// Assembly                   | Relocation symbol    | Done Here?
-		// --------------------------------------------------------------
-		// ear  %r11, %a0             |                      |
-		// sllg %r11, %r11, 32        |                      |
-		// ear  %r11, %a1             |                      |
-		// larl %r10, <var>@indntpoff | R_390_TLS_IEENT      | Y
-		// lg   %r10, 0(%r10)         | R_390_TLS_LOAD (tag) | Y
-		// la   %r10, 0(%r10, %r11)   |                      |
-		// --------------------------------------------------------------
-
-		// R_390_TLS_IEENT
-		zRIL(_b, op_LARL, REGTMP, 0, asm)
-		ieent := obj.Addrel(ctxt.Cursym)
-		ieent.Off = int32(ctxt.Pc + 2)
-		ieent.Siz = 4
-		ieent.Sym = p.From.Sym
-		ieent.Type = obj.R_TLS_IE
-		ieent.Add = 2 + int64(ieent.Siz)
-
-		// R_390_TLS_LOAD
-		zRXY(op_LGF, uint32(p.To.Reg), REGTMP, 0, 0, asm)
-		// TODO(mundaym): add R_390_TLS_LOAD relocation here
-		// not strictly required but might allow the linker to optimize
-
-	case 96: // clear macro
-		length := vregoff(ctxt, &p.From)
-		offset := vregoff(ctxt, &p.To)
-		reg := p.To.Reg
-		if reg == 0 {
-			reg = o.param
-		}
-		if length <= 0 {
-			ctxt.Diag("cannot CLEAR %d bytes, must be greater than 0", length)
-		}
-		for length > 0 {
-			if offset < 0 || offset >= DISP12 {
-				if offset >= -DISP20/2 && offset < DISP20/2 {
-					zRXY(op_LAY, REGTMP, uint32(reg), 0, uint32(offset), asm)
-				} else {
-					if reg != REGTMP {
-						zRRE(op_LGR, REGTMP, uint32(reg), asm)
-					}
-					zRIL(_a, op_AGFI, REGTMP, uint32(offset), asm)
-				}
-				reg = REGTMP
-				offset = 0
-			}
-			size := length
-			if size > 256 {
-				size = 256
-			}
-
-			switch size {
-			case 1:
-				zSI(op_MVI, 0, uint32(reg), uint32(offset), asm)
-			case 2:
-				zSIL(op_MVHHI, uint32(reg), uint32(offset), 0, asm)
-			case 4:
-				zSIL(op_MVHI, uint32(reg), uint32(offset), 0, asm)
-			case 8:
-				zSIL(op_MVGHI, uint32(reg), uint32(offset), 0, asm)
-			default:
-				zSS(_a, op_XC, uint32(size-1), 0, uint32(reg), uint32(offset), uint32(reg), uint32(offset), asm)
-			}
-
-			length -= size
-			offset += size
-		}
-
-	case 97: // store multiple
-		rstart := p.From.Reg
-		rend := p.Reg
-		offset := regoff(ctxt, &p.To)
-		reg := p.To.Reg
-		if reg == 0 {
-			reg = o.param
-		}
-		if offset < -DISP20/2 || offset >= DISP20/2 {
-			if reg != REGTMP {
-				zRRE(op_LGR, REGTMP, uint32(reg), asm)
-			}
-			zRIL(_a, op_AGFI, REGTMP, uint32(offset), asm)
-			reg = REGTMP
-			offset = 0
-		}
-		switch p.As {
-		case ASTMY:
-			if offset >= 0 && offset < DISP12 {
-				zRS(op_STM, uint32(rstart), uint32(rend), uint32(reg), uint32(offset), asm)
-			} else {
-				zRSY(op_STMY, uint32(rstart), uint32(rend), uint32(reg), uint32(offset), asm)
-			}
-		case ASTMG:
-			zRSY(op_STMG, uint32(rstart), uint32(rend), uint32(reg), uint32(offset), asm)
-		}
-
-	case 98: // load multiple
-		rstart := p.Reg
-		rend := p.To.Reg
-		offset := regoff(ctxt, &p.From)
-		reg := p.From.Reg
-		if reg == 0 {
-			reg = o.param
-		}
-		if offset < -DISP20/2 || offset >= DISP20/2 {
-			if reg != REGTMP {
-				zRRE(op_LGR, REGTMP, uint32(reg), asm)
-			}
-			zRIL(_a, op_AGFI, REGTMP, uint32(offset), asm)
-			reg = REGTMP
-			offset = 0
-		}
-		switch p.As {
-		case ALMY:
-			if offset >= 0 && offset < DISP12 {
-				zRS(op_LM, uint32(rstart), uint32(rend), uint32(reg), uint32(offset), asm)
-			} else {
-				zRSY(op_LMY, uint32(rstart), uint32(rend), uint32(reg), uint32(offset), asm)
-			}
-		case ALMG:
-			zRSY(op_LMG, uint32(rstart), uint32(rend), uint32(reg), uint32(offset), asm)
-		}
-
-	case 99: // interlocked load and op
-		if p.To.Index != 0 {
-			ctxt.Diag("cannot use indexed address")
-		}
-		offset := regoff(ctxt, &p.To)
-		if offset < -DISP20/2 || offset >= DISP20/2 {
-			ctxt.Diag("%v does not fit into 20-bit signed integer", offset)
-		}
-		var opcode uint32
-		switch p.As {
-		case ALAA:
-			opcode = op_LAA
-		case ALAAG:
-			opcode = op_LAAG
-		case ALAAL:
-			opcode = op_LAAL
-		case ALAALG:
-			opcode = op_LAALG
-		case ALAN:
-			opcode = op_LAN
-		case ALANG:
-			opcode = op_LANG
-		case ALAX:
-			opcode = op_LAX
-		case ALAXG:
-			opcode = op_LAXG
-		case ALAO:
-			opcode = op_LAO
-		case ALAOG:
-			opcode = op_LAOG
-		}
-		zRSY(opcode, uint32(p.Reg), uint32(p.From.Reg), uint32(p.To.Reg), uint32(offset), asm)
-
-	case 100: // VRX STORE
-		op, m3, _ := vop(p.As)
-		if p.From3 != nil {
-			m3 = uint32(vregoff(ctxt, p.From3))
-		}
-		b2 := p.To.Reg
-		if b2 == 0 {
-			b2 = o.param
-		}
-		d2 := uint32(vregoff(ctxt, &p.To))
-		zVRX(op, uint32(p.From.Reg), uint32(p.To.Index), uint32(b2), d2, m3, asm)
-
-	case 101: // VRX LOAD
-		op, m3, _ := vop(p.As)
-		if p.From3 != nil {
-			m3 = uint32(vregoff(ctxt, p.From3))
-		}
-		b2 := p.From.Reg
-		if b2 == 0 {
-			b2 = o.param
-		}
-		d2 := uint32(vregoff(ctxt, &p.From))
-		zVRX(op, uint32(p.To.Reg), uint32(p.From.Index), uint32(b2), d2, m3, asm)
-
-	case 102: // VRV SCATTER
-		op, m3, _ := vop(p.As)
-		if p.From3 != nil {
-			m3 = uint32(vregoff(ctxt, p.From3))
-		}
-		b2 := p.To.Reg
-		if b2 == 0 {
-			b2 = o.param
-		}
-		d2 := uint32(vregoff(ctxt, &p.To))
-		zVRV(op, uint32(p.From.Reg), uint32(p.To.Index), uint32(b2), d2, m3, asm)
-
-	case 103: // VRV GATHER
-		op, m3, _ := vop(p.As)
-		if p.From3 != nil {
-			m3 = uint32(vregoff(ctxt, p.From3))
-		}
-		b2 := p.From.Reg
-		if b2 == 0 {
-			b2 = o.param
-		}
-		d2 := uint32(vregoff(ctxt, &p.From))
-		zVRV(op, uint32(p.To.Reg), uint32(p.From.Index), uint32(b2), d2, m3, asm)
-
-	case 104: // VRS SHIFT/ROTATE and LOAD GR FROM VR ELEMENT
-		op, m4, _ := vop(p.As)
-		fr := p.Reg
-		if fr == 0 {
-			fr = p.To.Reg
-		}
-		bits := uint32(vregoff(ctxt, &p.From))
-		zVRS(op, uint32(p.To.Reg), uint32(fr), uint32(p.From.Reg), bits, m4, asm)
-
-	case 105: // VRS STORE MULTIPLE
-		op, _, _ := vop(p.As)
-		offset := uint32(vregoff(ctxt, &p.To))
-		reg := p.To.Reg
-		if reg == 0 {
-			reg = o.param
-		}
-		zVRS(op, uint32(p.From.Reg), uint32(p.Reg), uint32(reg), offset, 0, asm)
-
-	case 106: // VRS LOAD MULTIPLE
-		op, _, _ := vop(p.As)
-		offset := uint32(vregoff(ctxt, &p.From))
-		reg := p.From.Reg
-		if reg == 0 {
-			reg = o.param
-		}
-		zVRS(op, uint32(p.Reg), uint32(p.To.Reg), uint32(reg), offset, 0, asm)
-
-	case 107: // VRS STORE WITH LENGTH
-		op, _, _ := vop(p.As)
-		offset := uint32(vregoff(ctxt, &p.To))
-		reg := p.To.Reg
-		if reg == 0 {
-			reg = o.param
-		}
-		zVRS(op, uint32(p.From.Reg), uint32(p.From3.Reg), uint32(reg), offset, 0, asm)
-
-	case 108: // VRS LOAD WITH LENGTH
-		op, _, _ := vop(p.As)
-		offset := uint32(vregoff(ctxt, &p.From))
-		reg := p.From.Reg
-		if reg == 0 {
-			reg = o.param
-		}
-		zVRS(op, uint32(p.To.Reg), uint32(p.From3.Reg), uint32(reg), offset, 0, asm)
-
-	case 109: // VRI-a
-		op, _, _ := vop(p.As)
-		i2 := uint32(vregoff(ctxt, &p.From))
-		switch p.As {
-		case AVZERO:
-			i2 = 0
-		case AVONE:
-			i2 = 0xffff
-		}
-		m3 := uint32(0)
-		if p.From3 != nil {
-			m3 = uint32(vregoff(ctxt, p.From3))
-		}
-		zVRIa(op, uint32(p.To.Reg), i2, m3, asm)
-
-	case 110:
-		op, m4, _ := vop(p.As)
-		i2 := uint32(vregoff(ctxt, p.From3))
-		i3 := uint32(vregoff(ctxt, &p.From))
-		zVRIb(op, uint32(p.To.Reg), i2, i3, m4, asm)
-
-	case 111:
-		op, m4, _ := vop(p.As)
-		i2 := uint32(vregoff(ctxt, &p.From))
-		zVRIc(op, uint32(p.To.Reg), uint32(p.Reg), i2, m4, asm)
-
-	case 112:
-		op, m5, _ := vop(p.As)
-		i4 := uint32(vregoff(ctxt, p.From3))
-		zVRId(op, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), i4, m5, asm)
-
-	case 113:
-		op, m4, _ := vop(p.As)
-		m5 := singleElementMask(p.As)
-		i3 := uint32(vregoff(ctxt, &p.From))
-		zVRIe(op, uint32(p.To.Reg), uint32(p.Reg), i3, m5, m4, asm)
-
-	case 114: // VRR-a
-		op, m3, m5 := vop(p.As)
-		m4 := singleElementMask(p.As)
-		zVRRa(op, uint32(p.To.Reg), uint32(p.From.Reg), m5, m4, m3, asm)
-
-	case 115: // VRR-a COMPARE
-		op, m3, m5 := vop(p.As)
-		m4 := singleElementMask(p.As)
-		zVRRa(op, uint32(p.From.Reg), uint32(p.To.Reg), m5, m4, m3, asm)
-
-	case 116: // VRR-a
-
-	case 117: // VRR-b
-		op, m4, m5 := vop(p.As)
-		zVRRb(op, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), m5, m4, asm)
-
-	case 118: // VRR-c
-		op, m4, m6 := vop(p.As)
-		m5 := singleElementMask(p.As)
-		v3 := p.Reg
-		if v3 == 0 {
-			v3 = p.To.Reg
-		}
-		zVRRc(op, uint32(p.To.Reg), uint32(p.From.Reg), uint32(v3), m6, m5, m4, asm)
-
-	case 119: // VRR-c SHIFT/ROTATE/DIVIDE/SUB (rhs value on the left, like SLD, DIV etc.)
-		op, m4, m6 := vop(p.As)
-		m5 := singleElementMask(p.As)
-		v2 := p.Reg
-		if v2 == 0 {
-			v2 = p.To.Reg
-		}
-		zVRRc(op, uint32(p.To.Reg), uint32(v2), uint32(p.From.Reg), m6, m5, m4, asm)
-
-	case 120: // VRR-d
-		op, m6, _ := vop(p.As)
-		m5 := singleElementMask(p.As)
-		v1 := uint32(p.To.Reg)
-		v2 := uint32(p.From3.Reg)
-		v3 := uint32(p.From.Reg)
-		v4 := uint32(p.Reg)
-		zVRRd(op, v1, v2, v3, m6, m5, v4, asm)
-
-	case 121: // VRR-e
-		op, m6, _ := vop(p.As)
-		m5 := singleElementMask(p.As)
-		v1 := uint32(p.To.Reg)
-		v2 := uint32(p.From3.Reg)
-		v3 := uint32(p.From.Reg)
-		v4 := uint32(p.Reg)
-		zVRRe(op, v1, v2, v3, m6, m5, v4, asm)
-
-	case 122: // VRR-f LOAD VRS FROM GRS DISJOINT
-		op, _, _ := vop(p.As)
-		zVRRf(op, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), asm)
-
-	case 123: // VPDI $m4, V2, V3, V1
-		op, _, _ := vop(p.As)
-		m4 := regoff(ctxt, p.From3)
-		zVRRc(op, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), 0, 0, uint32(m4), asm)
-	}
-}
-
-func vregoff(ctxt *obj.Link, a *obj.Addr) int64 {
-	ctxt.Instoffset = 0
-	if a != nil {
-		aclass(ctxt, a)
-	}
-	return ctxt.Instoffset
-}
-
-func regoff(ctxt *obj.Link, a *obj.Addr) int32 {
-	return int32(vregoff(ctxt, a))
-}
-
-// zopload returns the RXY op for the given load
-func zopload(ctxt *obj.Link, a obj.As) uint32 {
-	switch a {
-	// fixed point load
-	case AMOVD:
-		return op_LG
-	case AMOVW:
-		return op_LGF
-	case AMOVWZ:
-		return op_LLGF
-	case AMOVH:
-		return op_LGH
-	case AMOVHZ:
-		return op_LLGH
-	case AMOVB:
-		return op_LGB
-	case AMOVBZ:
-		return op_LLGC
-
-	// floating point load
-	case AFMOVD:
-		return op_LDY
-	case AFMOVS:
-		return op_LEY
-
-	// byte reversed load
-	case AMOVDBR:
-		return op_LRVG
-	case AMOVWBR:
-		return op_LRV
-	case AMOVHBR:
-		return op_LRVH
-	}
-
-	ctxt.Diag("unknown store opcode %v", a)
-	return 0
-}
-
-// zopstore returns the RXY op for the given store
-func zopstore(ctxt *obj.Link, a obj.As) uint32 {
-	switch a {
-	// fixed point store
-	case AMOVD:
-		return op_STG
-	case AMOVW, AMOVWZ:
-		return op_STY
-	case AMOVH, AMOVHZ:
-		return op_STHY
-	case AMOVB, AMOVBZ:
-		return op_STCY
-
-	// floating point store
-	case AFMOVD:
-		return op_STDY
-	case AFMOVS:
-		return op_STEY
-
-	// byte reversed store
-	case AMOVDBR:
-		return op_STRVG
-	case AMOVWBR:
-		return op_STRV
-	case AMOVHBR:
-		return op_STRVH
-	}
-
-	ctxt.Diag("unknown store opcode %v", a)
-	return 0
-}
-
-// zoprre returns the RRE op for the given a
-func zoprre(ctxt *obj.Link, a obj.As) uint32 {
-	switch a {
-	case ACMP:
-		return op_CGR
-	case ACMPU:
-		return op_CLGR
-	case AFCMPO: //ordered
-		return op_KDBR
-	case AFCMPU: //unordered
-		return op_CDBR
-	case ACEBR:
-		return op_CEBR
-	}
-	ctxt.Diag("unknown rre opcode %v", a)
-	return 0
-}
-
-// zoprr returns the RR op for the given a
-func zoprr(ctxt *obj.Link, a obj.As) uint32 {
-	switch a {
-	case ACMPW:
-		return op_CR
-	case ACMPWU:
-		return op_CLR
-	}
-	ctxt.Diag("unknown rr opcode %v", a)
-	return 0
-}
-
-// zopril returns the RIL op for the given a
-func zopril(ctxt *obj.Link, a obj.As) uint32 {
-	switch a {
-	case ACMP:
-		return op_CGFI
-	case ACMPU:
-		return op_CLGFI
-	case ACMPW:
-		return op_CFI
-	case ACMPWU:
-		return op_CLFI
-	}
-	ctxt.Diag("unknown ril opcode %v", a)
-	return 0
-}
-
-// z instructions sizes
-const (
-	sizeE    = 2
-	sizeI    = 2
-	sizeIE   = 4
-	sizeMII  = 6
-	sizeRI   = 4
-	sizeRI1  = 4
-	sizeRI2  = 4
-	sizeRI3  = 4
-	sizeRIE  = 6
-	sizeRIE1 = 6
-	sizeRIE2 = 6
-	sizeRIE3 = 6
-	sizeRIE4 = 6
-	sizeRIE5 = 6
-	sizeRIE6 = 6
-	sizeRIL  = 6
-	sizeRIL1 = 6
-	sizeRIL2 = 6
-	sizeRIL3 = 6
-	sizeRIS  = 6
-	sizeRR   = 2
-	sizeRRD  = 4
-	sizeRRE  = 4
-	sizeRRF  = 4
-	sizeRRF1 = 4
-	sizeRRF2 = 4
-	sizeRRF3 = 4
-	sizeRRF4 = 4
-	sizeRRF5 = 4
-	sizeRRR  = 2
-	sizeRRS  = 6
-	sizeRS   = 4
-	sizeRS1  = 4
-	sizeRS2  = 4
-	sizeRSI  = 4
-	sizeRSL  = 6
-	sizeRSY  = 6
-	sizeRSY1 = 6
-	sizeRSY2 = 6
-	sizeRX   = 4
-	sizeRX1  = 4
-	sizeRX2  = 4
-	sizeRXE  = 6
-	sizeRXF  = 6
-	sizeRXY  = 6
-	sizeRXY1 = 6
-	sizeRXY2 = 6
-	sizeS    = 4
-	sizeSI   = 4
-	sizeSIL  = 6
-	sizeSIY  = 6
-	sizeSMI  = 6
-	sizeSS   = 6
-	sizeSS1  = 6
-	sizeSS2  = 6
-	sizeSS3  = 6
-	sizeSS4  = 6
-	sizeSS5  = 6
-	sizeSS6  = 6
-	sizeSSE  = 6
-	sizeSSF  = 6
-)
-
-// instruction format variations
-type form int
-
-const (
-	_a form = iota
-	_b
-	_c
-	_d
-	_e
-	_f
-)
-
-func zE(op uint32, asm *[]byte) {
-	*asm = append(*asm, uint8(op>>8), uint8(op))
-}
-
-func zI(op, i1 uint32, asm *[]byte) {
-	*asm = append(*asm, uint8(op>>8), uint8(i1))
-}
-
-func zMII(op, m1, ri2, ri3 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		(uint8(m1)<<4)|uint8((ri2>>8)&0x0F),
-		uint8(ri2),
-		uint8(ri3>>16),
-		uint8(ri3>>8),
-		uint8(ri3))
-}
-
-func zRI(op, r1_m1, i2_ri2 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		(uint8(r1_m1)<<4)|(uint8(op)&0x0F),
-		uint8(i2_ri2>>8),
-		uint8(i2_ri2))
-}
-
-// Expected argument values for the instruction formats.
-//
-// Format    a1  a2   a3  a4  a5  a6  a7
-// ------------------------------------
-// a         r1,  0,  i2,  0,  0, m3,  0
-// b         r1, r2, ri4,  0,  0, m3,  0
-// c         r1, m3, ri4,  0,  0,  0, i2
-// d         r1, r3,  i2,  0,  0,  0,  0
-// e         r1, r3, ri2,  0,  0,  0,  0
-// f         r1, r2,   0, i3, i4,  0, i5
-// g         r1, m3,  i2,  0,  0,  0,  0
-func zRIE(f form, op, r1, r2_m3_r3, i2_ri4_ri2, i3, i4, m3, i2_i5 uint32, asm *[]byte) {
-	*asm = append(*asm, uint8(op>>8), uint8(r1)<<4|uint8(r2_m3_r3&0x0F))
-
-	switch f {
-	default:
-		*asm = append(*asm, uint8(i2_ri4_ri2>>8), uint8(i2_ri4_ri2))
-	case _f:
-		*asm = append(*asm, uint8(i3), uint8(i4))
-	}
-
-	switch f {
-	case _a, _b:
-		*asm = append(*asm, uint8(m3)<<4)
-	default:
-		*asm = append(*asm, uint8(i2_i5))
-	}
-
-	*asm = append(*asm, uint8(op))
-}
-
-func zRIL(f form, op, r1_m1, i2_ri2 uint32, asm *[]byte) {
-	if f == _a || f == _b {
-		r1_m1 = r1_m1 - obj.RBaseS390X // this is a register base
-	}
-	*asm = append(*asm,
-		uint8(op>>8),
-		(uint8(r1_m1)<<4)|(uint8(op)&0x0F),
-		uint8(i2_ri2>>24),
-		uint8(i2_ri2>>16),
-		uint8(i2_ri2>>8),
-		uint8(i2_ri2))
-}
-
-func zRIS(op, r1, m3, b4, d4, i2 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		(uint8(r1)<<4)|uint8(m3&0x0F),
-		(uint8(b4)<<4)|(uint8(d4>>8)&0x0F),
-		uint8(d4),
-		uint8(i2),
-		uint8(op))
-}
-
-func zRR(op, r1, r2 uint32, asm *[]byte) {
-	*asm = append(*asm, uint8(op>>8), (uint8(r1)<<4)|uint8(r2&0x0F))
-}
-
-func zRRD(op, r1, r3, r2 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		uint8(op),
-		uint8(r1)<<4,
-		(uint8(r3)<<4)|uint8(r2&0x0F))
-}
-
-func zRRE(op, r1, r2 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		uint8(op),
-		0,
-		(uint8(r1)<<4)|uint8(r2&0x0F))
-}
-
-func zRRF(op, r3_m3, m4, r1, r2 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		uint8(op),
-		(uint8(r3_m3)<<4)|uint8(m4&0x0F),
-		(uint8(r1)<<4)|uint8(r2&0x0F))
-}
-
-func zRRS(op, r1, r2, b4, d4, m3 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		(uint8(r1)<<4)|uint8(r2&0x0F),
-		(uint8(b4)<<4)|uint8((d4>>8)&0x0F),
-		uint8(d4),
-		uint8(m3)<<4,
-		uint8(op))
-}
-
-func zRS(op, r1, r3_m3, b2, d2 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		(uint8(r1)<<4)|uint8(r3_m3&0x0F),
-		(uint8(b2)<<4)|uint8((d2>>8)&0x0F),
-		uint8(d2))
-}
-
-func zRSI(op, r1, r3, ri2 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		(uint8(r1)<<4)|uint8(r3&0x0F),
-		uint8(ri2>>8),
-		uint8(ri2))
-}
-
-func zRSL(op, l1, b2, d2 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		uint8(l1),
-		(uint8(b2)<<4)|uint8((d2>>8)&0x0F),
-		uint8(d2),
-		uint8(op))
-}
-
-func zRSY(op, r1, r3_m3, b2, d2 uint32, asm *[]byte) {
-	dl2 := uint16(d2) & 0x0FFF
-	*asm = append(*asm,
-		uint8(op>>8),
-		(uint8(r1)<<4)|uint8(r3_m3&0x0F),
-		(uint8(b2)<<4)|(uint8(dl2>>8)&0x0F),
-		uint8(dl2),
-		uint8(d2>>12),
-		uint8(op))
-}
-
-func zRX(op, r1_m1, x2, b2, d2 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		(uint8(r1_m1)<<4)|uint8(x2&0x0F),
-		(uint8(b2)<<4)|uint8((d2>>8)&0x0F),
-		uint8(d2))
-}
-
-func zRXE(op, r1, x2, b2, d2, m3 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		(uint8(r1)<<4)|uint8(x2&0x0F),
-		(uint8(b2)<<4)|uint8((d2>>8)&0x0F),
-		uint8(d2),
-		uint8(m3)<<4,
-		uint8(op))
-}
-
-func zRXF(op, r3, x2, b2, d2, m1 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		(uint8(r3)<<4)|uint8(x2&0x0F),
-		(uint8(b2)<<4)|uint8((d2>>8)&0x0F),
-		uint8(d2),
-		uint8(m1)<<4,
-		uint8(op))
-}
-
-func zRXY(op, r1_m1, x2, b2, d2 uint32, asm *[]byte) {
-	dl2 := uint16(d2) & 0x0FFF
-	*asm = append(*asm,
-		uint8(op>>8),
-		(uint8(r1_m1)<<4)|uint8(x2&0x0F),
-		(uint8(b2)<<4)|(uint8(dl2>>8)&0x0F),
-		uint8(dl2),
-		uint8(d2>>12),
-		uint8(op))
-}
-
-func zS(op, b2, d2 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		uint8(op),
-		(uint8(b2)<<4)|uint8((d2>>8)&0x0F),
-		uint8(d2))
-}
-
-func zSI(op, i2, b1, d1 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		uint8(i2),
-		(uint8(b1)<<4)|uint8((d1>>8)&0x0F),
-		uint8(d1))
-}
-
-func zSIL(op, b1, d1, i2 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		uint8(op),
-		(uint8(b1)<<4)|uint8((d1>>8)&0x0F),
-		uint8(d1),
-		uint8(i2>>8),
-		uint8(i2))
-}
-
-func zSIY(op, i2, b1, d1 uint32, asm *[]byte) {
-	dl1 := uint16(d1) & 0x0FFF
-	*asm = append(*asm,
-		uint8(op>>8),
-		uint8(i2),
-		(uint8(b1)<<4)|(uint8(dl1>>8)&0x0F),
-		uint8(dl1),
-		uint8(d1>>12),
-		uint8(op))
-}
-
-func zSMI(op, m1, b3, d3, ri2 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		uint8(m1)<<4,
-		(uint8(b3)<<4)|uint8((d3>>8)&0x0F),
-		uint8(d3),
-		uint8(ri2>>8),
-		uint8(ri2))
-}
-
-// Expected argument values for the instruction formats.
-//
-// Format    a1  a2  a3  a4  a5  a6
-// -------------------------------
-// a         l1,  0, b1, d1, b2, d2
-// b         l1, l2, b1, d1, b2, d2
-// c         l1, i3, b1, d1, b2, d2
-// d         r1, r3, b1, d1, b2, d2
-// e         r1, r3, b2, d2, b4, d4
-// f          0, l2, b1, d1, b2, d2
-func zSS(f form, op, l1_r1, l2_i3_r3, b1_b2, d1_d2, b2_b4, d2_d4 uint32, asm *[]byte) {
-	*asm = append(*asm, uint8(op>>8))
-
-	switch f {
-	case _a:
-		*asm = append(*asm, uint8(l1_r1))
-	case _b, _c, _d, _e:
-		*asm = append(*asm, (uint8(l1_r1)<<4)|uint8(l2_i3_r3&0x0F))
-	case _f:
-		*asm = append(*asm, uint8(l2_i3_r3))
-	}
-
-	*asm = append(*asm,
-		(uint8(b1_b2)<<4)|uint8((d1_d2>>8)&0x0F),
-		uint8(d1_d2),
-		(uint8(b2_b4)<<4)|uint8((d2_d4>>8)&0x0F),
-		uint8(d2_d4))
-}
-
-func zSSE(op, b1, d1, b2, d2 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		uint8(op),
-		(uint8(b1)<<4)|uint8((d1>>8)&0x0F),
-		uint8(d1),
-		(uint8(b2)<<4)|uint8((d2>>8)&0x0F),
-		uint8(d2))
-}
-
-func zSSF(op, r3, b1, d1, b2, d2 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		(uint8(r3)<<4)|(uint8(op)&0x0F),
-		(uint8(b1)<<4)|uint8((d1>>8)&0x0F),
-		uint8(d1),
-		(uint8(b2)<<4)|uint8((d2>>8)&0x0F),
-		uint8(d2))
-}
-
-func rxb(va, vb, vc, vd uint32) uint8 {
-	mask := uint8(0)
-	if va >= REG_V16 && va <= REG_V31 {
-		mask |= 0x8
-	}
-	if vb >= REG_V16 && vb <= REG_V31 {
-		mask |= 0x4
-	}
-	if vc >= REG_V16 && vc <= REG_V31 {
-		mask |= 0x2
-	}
-	if vd >= REG_V16 && vd <= REG_V31 {
-		mask |= 0x1
-	}
-	return mask
-}
-
-func zVRX(op, v1, x2, b2, d2, m3 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		(uint8(v1)<<4)|(uint8(x2)&0xf),
-		(uint8(b2)<<4)|(uint8(d2>>8)&0xf),
-		uint8(d2),
-		(uint8(m3)<<4)|rxb(v1, 0, 0, 0),
-		uint8(op))
-}
-
-func zVRV(op, v1, v2, b2, d2, m3 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		(uint8(v1)<<4)|(uint8(v2)&0xf),
-		(uint8(b2)<<4)|(uint8(d2>>8)&0xf),
-		uint8(d2),
-		(uint8(m3)<<4)|rxb(v1, v2, 0, 0),
-		uint8(op))
-}
-
-func zVRS(op, v1, v3_r3, b2, d2, m4 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		(uint8(v1)<<4)|(uint8(v3_r3)&0xf),
-		(uint8(b2)<<4)|(uint8(d2>>8)&0xf),
-		uint8(d2),
-		(uint8(m4)<<4)|rxb(v1, v3_r3, 0, 0),
-		uint8(op))
-}
-
-func zVRRa(op, v1, v2, m5, m4, m3 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		(uint8(v1)<<4)|(uint8(v2)&0xf),
-		0,
-		(uint8(m5)<<4)|(uint8(m4)&0xf),
-		(uint8(m3)<<4)|rxb(v1, v2, 0, 0),
-		uint8(op))
-}
-
-func zVRRb(op, v1, v2, v3, m5, m4 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		(uint8(v1)<<4)|(uint8(v2)&0xf),
-		uint8(v3)<<4,
-		uint8(m5)<<4,
-		(uint8(m4)<<4)|rxb(v1, v2, v3, 0),
-		uint8(op))
-}
-
-func zVRRc(op, v1, v2, v3, m6, m5, m4 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		(uint8(v1)<<4)|(uint8(v2)&0xf),
-		uint8(v3)<<4,
-		(uint8(m6)<<4)|(uint8(m5)&0xf),
-		(uint8(m4)<<4)|rxb(v1, v2, v3, 0),
-		uint8(op))
-}
-
-func zVRRd(op, v1, v2, v3, m5, m6, v4 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		(uint8(v1)<<4)|(uint8(v2)&0xf),
-		(uint8(v3)<<4)|(uint8(m5)&0xf),
-		uint8(m6)<<4,
-		(uint8(v4)<<4)|rxb(v1, v2, v3, v4),
-		uint8(op))
-}
-
-func zVRRe(op, v1, v2, v3, m6, m5, v4 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		(uint8(v1)<<4)|(uint8(v2)&0xf),
-		(uint8(v3)<<4)|(uint8(m6)&0xf),
-		uint8(m5),
-		(uint8(v4)<<4)|rxb(v1, v2, v3, v4),
-		uint8(op))
-}
-
-func zVRRf(op, v1, r2, r3 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		(uint8(v1)<<4)|(uint8(r2)&0xf),
-		uint8(r3)<<4,
-		0,
-		rxb(v1, 0, 0, 0),
-		uint8(op))
-}
-
-func zVRIa(op, v1, i2, m3 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		uint8(v1)<<4,
-		uint8(i2>>8),
-		uint8(i2),
-		(uint8(m3)<<4)|rxb(v1, 0, 0, 0),
-		uint8(op))
-}
-
-func zVRIb(op, v1, i2, i3, m4 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		uint8(v1)<<4,
-		uint8(i2),
-		uint8(i3),
-		(uint8(m4)<<4)|rxb(v1, 0, 0, 0),
-		uint8(op))
-}
-
-func zVRIc(op, v1, v3, i2, m4 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		(uint8(v1)<<4)|(uint8(v3)&0xf),
-		uint8(i2>>8),
-		uint8(i2),
-		(uint8(m4)<<4)|rxb(v1, v3, 0, 0),
-		uint8(op))
-}
-
-func zVRId(op, v1, v2, v3, i4, m5 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		(uint8(v1)<<4)|(uint8(v2)&0xf),
-		uint8(v3)<<4,
-		uint8(i4),
-		(uint8(m5)<<4)|rxb(v1, v2, v3, 0),
-		uint8(op))
-}
-
-func zVRIe(op, v1, v2, i3, m5, m4 uint32, asm *[]byte) {
-	*asm = append(*asm,
-		uint8(op>>8),
-		(uint8(v1)<<4)|(uint8(v2)&0xf),
-		uint8(i3>>4),
-		(uint8(i3)<<4)|(uint8(m5)&0xf),
-		(uint8(m4)<<4)|rxb(v1, v2, 0, 0),
-		uint8(op))
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/s390x/listz.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/s390x/listz.go
deleted file mode 100644
index 23ae9fe..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/s390x/listz.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/s390x/listz.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/s390x/listz.go:1
-// Based on cmd/internal/obj/ppc64/list9.go.
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package s390x
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"fmt"
-)
-
-func init() {
-	obj.RegisterRegister(obj.RBaseS390X, REG_R0+1024, Rconv)
-	obj.RegisterOpcode(obj.ABaseS390X, Anames)
-}
-
-func Rconv(r int) string {
-	if r == 0 {
-		return "NONE"
-	}
-	if r == REGG {
-		// Special case.
-		return "g"
-	}
-	if REG_R0 <= r && r <= REG_R15 {
-		return fmt.Sprintf("R%d", r-REG_R0)
-	}
-	if REG_F0 <= r && r <= REG_F15 {
-		return fmt.Sprintf("F%d", r-REG_F0)
-	}
-	if REG_AR0 <= r && r <= REG_AR15 {
-		return fmt.Sprintf("AR%d", r-REG_AR0)
-	}
-	if REG_V0 <= r && r <= REG_V31 {
-		return fmt.Sprintf("V%d", r-REG_V0)
-	}
-	return fmt.Sprintf("Rgok(%d)", r-obj.RBaseS390X)
-}
-
-func DRconv(a int) string {
-	s := "C_??"
-	if a >= C_NONE && a <= C_NCLASS {
-		s = cnamesz[a]
-	}
-	var fp string
-	fp += s
-	return fp
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/s390x/objz.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/s390x/objz.go
deleted file mode 100644
index f40614e..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/s390x/objz.go
+++ /dev/null
@@ -1,1031 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/s390x/objz.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/s390x/objz.go:1
-// Based on cmd/internal/obj/ppc64/obj9.go.
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package s390x
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"fmt"
-	"math"
-)
-
-func progedit(ctxt *obj.Link, p *obj.Prog) {
-	p.From.Class = 0
-	p.To.Class = 0
-
-	// Rewrite BR/BL to symbol as TYPE_BRANCH.
-	switch p.As {
-	case ABR,
-		ABL,
-		obj.ARET,
-		obj.ADUFFZERO,
-		obj.ADUFFCOPY:
-		if p.To.Sym != nil {
-			p.To.Type = obj.TYPE_BRANCH
-		}
-	}
-
-	// Rewrite float constants to values stored in memory unless they are +0.
-	switch p.As {
-	case AFMOVS:
-		if p.From.Type == obj.TYPE_FCONST {
-			f32 := float32(p.From.Val.(float64))
-			i32 := math.Float32bits(f32)
-			if i32 == 0 { // +0
-				break
-			}
-			literal := fmt.Sprintf("$f32.%08x", i32)
-			s := obj.Linklookup(ctxt, literal, 0)
-			s.Size = 4
-			p.From.Type = obj.TYPE_MEM
-			p.From.Sym = s
-			p.From.Sym.Set(obj.AttrLocal, true)
-			p.From.Name = obj.NAME_EXTERN
-			p.From.Offset = 0
-		}
-
-	case AFMOVD:
-		if p.From.Type == obj.TYPE_FCONST {
-			i64 := math.Float64bits(p.From.Val.(float64))
-			if i64 == 0 { // +0
-				break
-			}
-			literal := fmt.Sprintf("$f64.%016x", i64)
-			s := obj.Linklookup(ctxt, literal, 0)
-			s.Size = 8
-			p.From.Type = obj.TYPE_MEM
-			p.From.Sym = s
-			p.From.Sym.Set(obj.AttrLocal, true)
-			p.From.Name = obj.NAME_EXTERN
-			p.From.Offset = 0
-		}
-
-		// put constants not loadable by LOAD IMMEDIATE into memory
-	case AMOVD:
-		if p.From.Type == obj.TYPE_CONST {
-			val := p.From.Offset
-			if int64(int32(val)) != val &&
-				int64(uint32(val)) != val &&
-				int64(uint64(val)&(0xffffffff<<32)) != val {
-				literal := fmt.Sprintf("$i64.%016x", uint64(p.From.Offset))
-				s := obj.Linklookup(ctxt, literal, 0)
-				s.Size = 8
-				p.From.Type = obj.TYPE_MEM
-				p.From.Sym = s
-				p.From.Sym.Set(obj.AttrLocal, true)
-				p.From.Name = obj.NAME_EXTERN
-				p.From.Offset = 0
-			}
-		}
-	}
-
-	// Rewrite SUB constants into ADD.
-	switch p.As {
-	case ASUBC:
-		if p.From.Type == obj.TYPE_CONST && isint32(-p.From.Offset) {
-			p.From.Offset = -p.From.Offset
-			p.As = AADDC
-		}
-
-	case ASUB:
-		if p.From.Type == obj.TYPE_CONST && isint32(-p.From.Offset) {
-			p.From.Offset = -p.From.Offset
-			p.As = AADD
-		}
-	}
-
-	if ctxt.Flag_dynlink {
-		rewriteToUseGot(ctxt, p)
-	}
-}
-
-// Rewrite p, if necessary, to access global data via the global offset table.
-func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog) {
-	// At the moment EXRL instructions are not emitted by the compiler and only reference local symbols in
-	// assembly code.
-	if p.As == AEXRL {
-		return
-	}
-
-	// We only care about global data: NAME_EXTERN means a global
-	// symbol in the Go sense, and p.Sym.Local is true for a few
-	// internally defined symbols.
-	if p.From.Type == obj.TYPE_ADDR && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() {
-		// MOVD $sym, Rx becomes MOVD sym@GOT, Rx
-		// MOVD $sym+<off>, Rx becomes MOVD sym@GOT, Rx; ADD <off>, Rx
-		if p.To.Type != obj.TYPE_REG || p.As != AMOVD {
-			ctxt.Diag("do not know how to handle LEA-type insn to non-register in %v with -dynlink", p)
-		}
-		p.From.Type = obj.TYPE_MEM
-		p.From.Name = obj.NAME_GOTREF
-		q := p
-		if p.From.Offset != 0 {
-			q = obj.Appendp(ctxt, p)
-			q.As = AADD
-			q.From.Type = obj.TYPE_CONST
-			q.From.Offset = p.From.Offset
-			q.To = p.To
-			p.From.Offset = 0
-		}
-	}
-	if p.From3 != nil && p.From3.Name == obj.NAME_EXTERN {
-		ctxt.Diag("don't know how to handle %v with -dynlink", p)
-	}
-	var source *obj.Addr
-	// MOVD sym, Ry becomes MOVD sym@GOT, REGTMP; MOVD (REGTMP), Ry
-	// MOVD Ry, sym becomes MOVD sym@GOT, REGTMP; MOVD Ry, (REGTMP)
-	// An addition may be inserted between the two MOVs if there is an offset.
-	if p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() {
-		if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() {
-			ctxt.Diag("cannot handle NAME_EXTERN on both sides in %v with -dynlink", p)
-		}
-		source = &p.From
-	} else if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() {
-		source = &p.To
-	} else {
-		return
-	}
-	if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP {
-		return
-	}
-	if source.Sym.Type == obj.STLSBSS {
-		return
-	}
-	if source.Type != obj.TYPE_MEM {
-		ctxt.Diag("don't know how to handle %v with -dynlink", p)
-	}
-	p1 := obj.Appendp(ctxt, p)
-	p2 := obj.Appendp(ctxt, p1)
-
-	p1.As = AMOVD
-	p1.From.Type = obj.TYPE_MEM
-	p1.From.Sym = source.Sym
-	p1.From.Name = obj.NAME_GOTREF
-	p1.To.Type = obj.TYPE_REG
-	p1.To.Reg = REGTMP
-
-	p2.As = p.As
-	p2.From = p.From
-	p2.To = p.To
-	if p.From.Name == obj.NAME_EXTERN {
-		p2.From.Reg = REGTMP
-		p2.From.Name = obj.NAME_NONE
-		p2.From.Sym = nil
-	} else if p.To.Name == obj.NAME_EXTERN {
-		p2.To.Reg = REGTMP
-		p2.To.Name = obj.NAME_NONE
-		p2.To.Sym = nil
-	} else {
-		return
-	}
-	obj.Nopout(p)
-}
-
-func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
-	// TODO(minux): add morestack short-cuts with small fixed frame-size.
-	ctxt.Cursym = cursym
-
-	if cursym.Text == nil || cursym.Text.Link == nil {
-		return
-	}
-
-	p := cursym.Text
-	textstksiz := p.To.Offset
-	if textstksiz == -8 {
-		// Compatibility hack.
-		p.From3.Offset |= obj.NOFRAME
-		textstksiz = 0
-	}
-	if textstksiz%8 != 0 {
-		ctxt.Diag("frame size %d not a multiple of 8", textstksiz)
-	}
-	if p.From3.Offset&obj.NOFRAME != 0 {
-		if textstksiz != 0 {
-			ctxt.Diag("NOFRAME functions must have a frame size of 0, not %d", textstksiz)
-		}
-	}
-
-	cursym.Args = p.To.Val.(int32)
-	cursym.Locals = int32(textstksiz)
-
-	/*
-	 * find leaf subroutines
-	 * strip NOPs
-	 * expand RET
-	 * expand BECOME pseudo
-	 */
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f noops\n", obj.Cputime())
-	}
-
-	var q *obj.Prog
-	var q1 *obj.Prog
-	for p := cursym.Text; p != nil; p = p.Link {
-		switch p.As {
-		/* too hard, just leave alone */
-		case obj.ATEXT:
-			q = p
-
-			p.Mark |= LABEL | LEAF | SYNC
-			if p.Link != nil {
-				p.Link.Mark |= LABEL
-			}
-
-		case ASYNC,
-			AWORD:
-			q = p
-			p.Mark |= LABEL | SYNC
-			continue
-
-		case AMOVW, AMOVWZ, AMOVD:
-			q = p
-			if p.From.Reg >= REG_RESERVED || p.To.Reg >= REG_RESERVED {
-				p.Mark |= LABEL | SYNC
-			}
-			continue
-
-		case AFABS,
-			AFADD,
-			AFDIV,
-			AFMADD,
-			AFMOVD,
-			AFMOVS,
-			AFMSUB,
-			AFMUL,
-			AFNABS,
-			AFNEG,
-			AFNMADD,
-			AFNMSUB,
-			ALEDBR,
-			ALDEBR,
-			AFSUB:
-			q = p
-
-			p.Mark |= FLOAT
-			continue
-
-		case ABL,
-			ABCL,
-			obj.ADUFFZERO,
-			obj.ADUFFCOPY:
-			cursym.Text.Mark &^= LEAF
-			fallthrough
-
-		case ABC,
-			ABEQ,
-			ABGE,
-			ABGT,
-			ABLE,
-			ABLT,
-			ABLEU,
-			ABLTU,
-			ABNE,
-			ABR,
-			ABVC,
-			ABVS,
-			ACMPBEQ,
-			ACMPBGE,
-			ACMPBGT,
-			ACMPBLE,
-			ACMPBLT,
-			ACMPBNE,
-			ACMPUBEQ,
-			ACMPUBGE,
-			ACMPUBGT,
-			ACMPUBLE,
-			ACMPUBLT,
-			ACMPUBNE:
-			p.Mark |= BRANCH
-			q = p
-			q1 = p.Pcond
-			if q1 != nil {
-				for q1.As == obj.ANOP {
-					q1 = q1.Link
-					p.Pcond = q1
-				}
-
-				if q1.Mark&LEAF == 0 {
-					q1.Mark |= LABEL
-				}
-			} else {
-				p.Mark |= LABEL
-			}
-			q1 = p.Link
-			if q1 != nil {
-				q1.Mark |= LABEL
-			}
-			continue
-
-		case AFCMPO, AFCMPU:
-			q = p
-			p.Mark |= FCMP | FLOAT
-			continue
-
-		case obj.ARET:
-			q = p
-			if p.Link != nil {
-				p.Link.Mark |= LABEL
-			}
-			continue
-
-		case obj.ANOP:
-			q1 = p.Link
-			q.Link = q1 /* q is non-nop */
-			q1.Mark |= p.Mark
-			continue
-
-		default:
-			q = p
-			continue
-		}
-	}
-
-	autosize := int32(0)
-	var p1 *obj.Prog
-	var p2 *obj.Prog
-	var pLast *obj.Prog
-	var pPre *obj.Prog
-	var pPreempt *obj.Prog
-	wasSplit := false
-	for p := cursym.Text; p != nil; p = p.Link {
-		pLast = p
-		switch p.As {
-		case obj.ATEXT:
-			autosize = int32(textstksiz)
-
-			if p.Mark&LEAF != 0 && autosize == 0 {
-				// A leaf function with no locals has no frame.
-				p.From3.Offset |= obj.NOFRAME
-			}
-
-			if p.From3.Offset&obj.NOFRAME == 0 {
-				// If there is a stack frame at all, it includes
-				// space to save the LR.
-				autosize += int32(ctxt.FixedFrameSize())
-			}
-
-			if p.Mark&LEAF != 0 && autosize < obj.StackSmall {
-				// A leaf function with a small stack can be marked
-				// NOSPLIT, avoiding a stack check.
-				p.From3.Offset |= obj.NOSPLIT
-			}
-
-			p.To.Offset = int64(autosize)
-
-			q = p
-
-			if p.From3.Offset&obj.NOSPLIT == 0 {
-				p, pPreempt = stacksplitPre(ctxt, p, autosize) // emit pre part of split check
-				pPre = p
-				wasSplit = true //need post part of split
-			}
-
-			if autosize != 0 {
-				// Make sure to save link register for non-empty frame, even if
-				// it is a leaf function, so that traceback works.
-				// Store link register before decrementing SP, so if a signal comes
-				// during the execution of the function prologue, the traceback
-				// code will not see a half-updated stack frame.
-				q = obj.Appendp(ctxt, p)
-				q.As = AMOVD
-				q.From.Type = obj.TYPE_REG
-				q.From.Reg = REG_LR
-				q.To.Type = obj.TYPE_MEM
-				q.To.Reg = REGSP
-				q.To.Offset = int64(-autosize)
-
-				q = obj.Appendp(ctxt, q)
-				q.As = AMOVD
-				q.From.Type = obj.TYPE_ADDR
-				q.From.Offset = int64(-autosize)
-				q.From.Reg = REGSP // not actually needed - REGSP is assumed if no reg is provided
-				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REGSP
-				q.Spadj = autosize
-			} else if cursym.Text.Mark&LEAF == 0 {
-				// A very few functions that do not return to their caller
-				// (e.g. gogo) are not identified as leaves but still have
-				// no frame.
-				cursym.Text.Mark |= LEAF
-			}
-
-			if cursym.Text.Mark&LEAF != 0 {
-				cursym.Set(obj.AttrLeaf, true)
-				break
-			}
-
-			if cursym.Text.From3.Offset&obj.WRAPPER != 0 {
-				// if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
-				//
-				//	MOVD g_panic(g), R3
-				//	CMP R3, $0
-				//	BEQ end
-				//	MOVD panic_argp(R3), R4
-				//	ADD $(autosize+8), R1, R5
-				//	CMP R4, R5
-				//	BNE end
-				//	ADD $8, R1, R6
-				//	MOVD R6, panic_argp(R3)
-				// end:
-				//	NOP
-				//
-				// The NOP is needed to give the jumps somewhere to land.
-				// It is a liblink NOP, not a s390x NOP: it encodes to 0 instruction bytes.
-
-				q = obj.Appendp(ctxt, q)
-
-				q.As = AMOVD
-				q.From.Type = obj.TYPE_MEM
-				q.From.Reg = REGG
-				q.From.Offset = 4 * int64(ctxt.Arch.PtrSize) // G.panic
-				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REG_R3
-
-				q = obj.Appendp(ctxt, q)
-				q.As = ACMP
-				q.From.Type = obj.TYPE_REG
-				q.From.Reg = REG_R3
-				q.To.Type = obj.TYPE_CONST
-				q.To.Offset = 0
-
-				q = obj.Appendp(ctxt, q)
-				q.As = ABEQ
-				q.To.Type = obj.TYPE_BRANCH
-				p1 = q
-
-				q = obj.Appendp(ctxt, q)
-				q.As = AMOVD
-				q.From.Type = obj.TYPE_MEM
-				q.From.Reg = REG_R3
-				q.From.Offset = 0 // Panic.argp
-				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REG_R4
-
-				q = obj.Appendp(ctxt, q)
-				q.As = AADD
-				q.From.Type = obj.TYPE_CONST
-				q.From.Offset = int64(autosize) + ctxt.FixedFrameSize()
-				q.Reg = REGSP
-				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REG_R5
-
-				q = obj.Appendp(ctxt, q)
-				q.As = ACMP
-				q.From.Type = obj.TYPE_REG
-				q.From.Reg = REG_R4
-				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REG_R5
-
-				q = obj.Appendp(ctxt, q)
-				q.As = ABNE
-				q.To.Type = obj.TYPE_BRANCH
-				p2 = q
-
-				q = obj.Appendp(ctxt, q)
-				q.As = AADD
-				q.From.Type = obj.TYPE_CONST
-				q.From.Offset = ctxt.FixedFrameSize()
-				q.Reg = REGSP
-				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REG_R6
-
-				q = obj.Appendp(ctxt, q)
-				q.As = AMOVD
-				q.From.Type = obj.TYPE_REG
-				q.From.Reg = REG_R6
-				q.To.Type = obj.TYPE_MEM
-				q.To.Reg = REG_R3
-				q.To.Offset = 0 // Panic.argp
-
-				q = obj.Appendp(ctxt, q)
-
-				q.As = obj.ANOP
-				p1.Pcond = q
-				p2.Pcond = q
-			}
-
-		case obj.ARET:
-			if p.From.Type == obj.TYPE_CONST {
-				ctxt.Diag("using BECOME (%v) is not supported!", p)
-				break
-			}
-
-			retTarget := p.To.Sym
-
-			if cursym.Text.Mark&LEAF != 0 {
-				if autosize == 0 {
-					p.As = ABR
-					p.From = obj.Addr{}
-					if retTarget == nil {
-						p.To.Type = obj.TYPE_REG
-						p.To.Reg = REG_LR
-					} else {
-						p.To.Type = obj.TYPE_BRANCH
-						p.To.Sym = retTarget
-					}
-					p.Mark |= BRANCH
-					break
-				}
-
-				p.As = AADD
-				p.From.Type = obj.TYPE_CONST
-				p.From.Offset = int64(autosize)
-				p.To.Type = obj.TYPE_REG
-				p.To.Reg = REGSP
-				p.Spadj = -autosize
-
-				q = obj.Appendp(ctxt, p)
-				q.As = ABR
-				q.From = obj.Addr{}
-				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REG_LR
-				q.Mark |= BRANCH
-				q.Spadj = autosize
-				break
-			}
-
-			p.As = AMOVD
-			p.From.Type = obj.TYPE_MEM
-			p.From.Reg = REGSP
-			p.From.Offset = 0
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = REG_LR
-
-			q = p
-
-			if autosize != 0 {
-				q = obj.Appendp(ctxt, q)
-				q.As = AADD
-				q.From.Type = obj.TYPE_CONST
-				q.From.Offset = int64(autosize)
-				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REGSP
-				q.Spadj = -autosize
-			}
-
-			q = obj.Appendp(ctxt, q)
-			q.As = ABR
-			q.From = obj.Addr{}
-			if retTarget == nil {
-				q.To.Type = obj.TYPE_REG
-				q.To.Reg = REG_LR
-			} else {
-				q.To.Type = obj.TYPE_BRANCH
-				q.To.Sym = retTarget
-			}
-			q.Mark |= BRANCH
-			q.Spadj = autosize
-
-		case AADD:
-			if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.From.Type == obj.TYPE_CONST {
-				p.Spadj = int32(-p.From.Offset)
-			}
-		}
-	}
-	if wasSplit {
-		pLast = stacksplitPost(ctxt, pLast, pPre, pPreempt, autosize) // emit post part of split check
-	}
-}
-
-/*
-// instruction scheduling
-	if(debug['Q'] == 0)
-		return;
-
-	curtext = nil;
-	q = nil;	// p - 1
-	q1 = firstp;	// top of block
-	o = 0;		// count of instructions
-	for(p = firstp; p != nil; p = p1) {
-		p1 = p->link;
-		o++;
-		if(p->mark & NOSCHED){
-			if(q1 != p){
-				sched(q1, q);
-			}
-			for(; p != nil; p = p->link){
-				if(!(p->mark & NOSCHED))
-					break;
-				q = p;
-			}
-			p1 = p;
-			q1 = p;
-			o = 0;
-			continue;
-		}
-		if(p->mark & (LABEL|SYNC)) {
-			if(q1 != p)
-				sched(q1, q);
-			q1 = p;
-			o = 1;
-		}
-		if(p->mark & (BRANCH|SYNC)) {
-			sched(q1, p);
-			q1 = p1;
-			o = 0;
-		}
-		if(o >= NSCHED) {
-			sched(q1, p);
-			q1 = p1;
-			o = 0;
-		}
-		q = p;
-	}
-*/
-func stacksplitPre(ctxt *obj.Link, p *obj.Prog, framesize int32) (*obj.Prog, *obj.Prog) {
-	var q *obj.Prog
-
-	// MOVD	g_stackguard(g), R3
-	p = obj.Appendp(ctxt, p)
-
-	p.As = AMOVD
-	p.From.Type = obj.TYPE_MEM
-	p.From.Reg = REGG
-	p.From.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0
-	if ctxt.Cursym.CFunc() {
-		p.From.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1
-	}
-	p.To.Type = obj.TYPE_REG
-	p.To.Reg = REG_R3
-
-	q = nil
-	if framesize <= obj.StackSmall {
-		// small stack: SP < stackguard
-		//	CMP	stackguard, SP
-
-		//p.To.Type = obj.TYPE_REG
-		//p.To.Reg = REGSP
-
-		// q1: BLT	done
-
-		p = obj.Appendp(ctxt, p)
-		//q1 = p
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_R3
-		p.Reg = REGSP
-		p.As = ACMPUBGE
-		p.To.Type = obj.TYPE_BRANCH
-		//p = obj.Appendp(ctxt, p)
-
-		//p.As = ACMPU
-		//p.From.Type = obj.TYPE_REG
-		//p.From.Reg = REG_R3
-		//p.To.Type = obj.TYPE_REG
-		//p.To.Reg = REGSP
-
-		//p = obj.Appendp(ctxt, p)
-		//p.As = ABGE
-		//p.To.Type = obj.TYPE_BRANCH
-
-	} else if framesize <= obj.StackBig {
-		// large stack: SP-framesize < stackguard-StackSmall
-		//	ADD $-framesize, SP, R4
-		//	CMP stackguard, R4
-		p = obj.Appendp(ctxt, p)
-
-		p.As = AADD
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = int64(-framesize)
-		p.Reg = REGSP
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R4
-
-		p = obj.Appendp(ctxt, p)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_R3
-		p.Reg = REG_R4
-		p.As = ACMPUBGE
-		p.To.Type = obj.TYPE_BRANCH
-
-	} else {
-		// Such a large stack we need to protect against wraparound.
-		// If SP is close to zero:
-		//	SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall)
-		// The +StackGuard on both sides is required to keep the left side positive:
-		// SP is allowed to be slightly below stackguard. See stack.h.
-		//
-		// Preemption sets stackguard to StackPreempt, a very large value.
-		// That breaks the math above, so we have to check for that explicitly.
-		//	// stackguard is R3
-		//	CMP	R3, $StackPreempt
-		//	BEQ	label-of-call-to-morestack
-		//	ADD	$StackGuard, SP, R4
-		//	SUB	R3, R4
-		//	MOVD	$(framesize+(StackGuard-StackSmall)), TEMP
-		//	CMPUBGE	TEMP, R4
-		p = obj.Appendp(ctxt, p)
-
-		p.As = ACMP
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_R3
-		p.To.Type = obj.TYPE_CONST
-		p.To.Offset = obj.StackPreempt
-
-		p = obj.Appendp(ctxt, p)
-		q = p
-		p.As = ABEQ
-		p.To.Type = obj.TYPE_BRANCH
-
-		p = obj.Appendp(ctxt, p)
-		p.As = AADD
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = obj.StackGuard
-		p.Reg = REGSP
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R4
-
-		p = obj.Appendp(ctxt, p)
-		p.As = ASUB
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_R3
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_R4
-
-		p = obj.Appendp(ctxt, p)
-		p.As = AMOVD
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = int64(framesize) + obj.StackGuard - obj.StackSmall
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REGTMP
-
-		p = obj.Appendp(ctxt, p)
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REGTMP
-		p.Reg = REG_R4
-		p.As = ACMPUBGE
-		p.To.Type = obj.TYPE_BRANCH
-	}
-
-	return p, q
-}
-
-func stacksplitPost(ctxt *obj.Link, p *obj.Prog, pPre *obj.Prog, pPreempt *obj.Prog, framesize int32) *obj.Prog {
-	// Now we are at the end of the function, but logically
-	// we are still in function prologue. We need to fix the
-	// SP data and PCDATA.
-	spfix := obj.Appendp(ctxt, p)
-	spfix.As = obj.ANOP
-	spfix.Spadj = -framesize
-
-	pcdata := obj.Appendp(ctxt, spfix)
-	pcdata.Lineno = ctxt.Cursym.Text.Lineno
-	pcdata.Mode = ctxt.Cursym.Text.Mode
-	pcdata.As = obj.APCDATA
-	pcdata.From.Type = obj.TYPE_CONST
-	pcdata.From.Offset = obj.PCDATA_StackMapIndex
-	pcdata.To.Type = obj.TYPE_CONST
-	pcdata.To.Offset = -1 // pcdata starts at -1 at function entry
-
-	// MOVD	LR, R5
-	p = obj.Appendp(ctxt, pcdata)
-	pPre.Pcond = p
-	p.As = AMOVD
-	p.From.Type = obj.TYPE_REG
-	p.From.Reg = REG_LR
-	p.To.Type = obj.TYPE_REG
-	p.To.Reg = REG_R5
-	if pPreempt != nil {
-		pPreempt.Pcond = p
-	}
-
-	// BL	runtime.morestack(SB)
-	p = obj.Appendp(ctxt, p)
-
-	p.As = ABL
-	p.To.Type = obj.TYPE_BRANCH
-	if ctxt.Cursym.CFunc() {
-		p.To.Sym = obj.Linklookup(ctxt, "runtime.morestackc", 0)
-	} else if ctxt.Cursym.Text.From3.Offset&obj.NEEDCTXT == 0 {
-		p.To.Sym = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
-	} else {
-		p.To.Sym = obj.Linklookup(ctxt, "runtime.morestack", 0)
-	}
-
-	// BR	start
-	p = obj.Appendp(ctxt, p)
-
-	p.As = ABR
-	p.To.Type = obj.TYPE_BRANCH
-	p.Pcond = ctxt.Cursym.Text.Link
-	return p
-}
-
-var pc_cnt int64
-
-func follow(ctxt *obj.Link, s *obj.LSym) {
-	ctxt.Cursym = s
-
-	pc_cnt = 0
-	firstp := ctxt.NewProg()
-	lastp := firstp
-	xfol(ctxt, s.Text, &lastp)
-	lastp.Link = nil
-	s.Text = firstp.Link
-}
-
-func relinv(a obj.As) obj.As {
-	switch a {
-	case ABEQ:
-		return ABNE
-	case ABNE:
-		return ABEQ
-
-	case ABGE:
-		return ABLT
-	case ABLT:
-		return ABGE
-
-	case ABGT:
-		return ABLE
-	case ABLE:
-		return ABGT
-
-	case ABVC:
-		return ABVS
-	case ABVS:
-		return ABVC
-	}
-
-	return 0
-}
-
-func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) {
-	var q *obj.Prog
-	var r *obj.Prog
-	var b obj.As
-
-	for p != nil {
-		a := p.As
-		if a == ABR {
-			q = p.Pcond
-			if (p.Mark&NOSCHED != 0) || q != nil && (q.Mark&NOSCHED != 0) {
-				p.Mark |= FOLL
-				(*last).Link = p
-				*last = p
-				(*last).Pc = pc_cnt
-				pc_cnt += 1
-				p = p.Link
-				xfol(ctxt, p, last)
-				p = q
-				if p != nil && p.Mark&FOLL == 0 {
-					continue
-				}
-				return
-			}
-
-			if q != nil {
-				p.Mark |= FOLL
-				p = q
-				if p.Mark&FOLL == 0 {
-					continue
-				}
-			}
-		}
-
-		if p.Mark&FOLL != 0 {
-			q = p
-			for i := 0; i < 4; i, q = i+1, q.Link {
-				if q == *last || (q.Mark&NOSCHED != 0) {
-					break
-				}
-				b = 0 /* set */
-				a = q.As
-				if a == obj.ANOP {
-					i--
-					continue
-				}
-				if a != ABR && a != obj.ARET {
-					if q.Pcond == nil || (q.Pcond.Mark&FOLL != 0) {
-						continue
-					}
-					b = relinv(a)
-					if b == 0 {
-						continue
-					}
-				}
-
-				for {
-					r = ctxt.NewProg()
-					*r = *p
-					if r.Mark&FOLL == 0 {
-						fmt.Printf("can't happen 1\n")
-					}
-					r.Mark |= FOLL
-					if p != q {
-						p = p.Link
-						(*last).Link = r
-						*last = r
-						(*last).Pc = pc_cnt
-						pc_cnt += 1
-						continue
-					}
-
-					(*last).Link = r
-					*last = r
-					(*last).Pc = pc_cnt
-					pc_cnt += 1
-					if a == ABR || a == obj.ARET {
-						return
-					}
-					r.As = b
-					r.Pcond = p.Link
-					r.Link = p.Pcond
-					if r.Link.Mark&FOLL == 0 {
-						xfol(ctxt, r.Link, last)
-					}
-					if r.Pcond.Mark&FOLL == 0 {
-						fmt.Printf("can't happen 2\n")
-					}
-					return
-				}
-			}
-
-			a = ABR
-			q = ctxt.NewProg()
-			q.As = a
-			q.Lineno = p.Lineno
-			q.To.Type = obj.TYPE_BRANCH
-			q.To.Offset = p.Pc
-			q.Pcond = p
-			p = q
-		}
-
-		p.Mark |= FOLL
-		(*last).Link = p
-		*last = p
-		(*last).Pc = pc_cnt
-		pc_cnt += 1
-
-		if a == ABR || a == obj.ARET {
-			if p.Mark&NOSCHED != 0 {
-				p = p.Link
-				continue
-			}
-
-			return
-		}
-
-		if p.Pcond != nil {
-			if a != ABL && p.Link != nil {
-				xfol(ctxt, p.Link, last)
-				p = p.Pcond
-				if p == nil || (p.Mark&FOLL != 0) {
-					return
-				}
-				continue
-			}
-		}
-
-		p = p.Link
-	}
-}
-
-var unaryDst = map[obj.As]bool{
-	ASTCK:  true,
-	ASTCKC: true,
-	ASTCKE: true,
-	ASTCKF: true,
-	ANEG:   true,
-	ANEGW:  true,
-	AVONE:  true,
-	AVZERO: true,
-}
-
-var Links390x = obj.LinkArch{
-	Arch:       sys.ArchS390X,
-	Preprocess: preprocess,
-	Assemble:   spanz,
-	Follow:     follow,
-	Progedit:   progedit,
-	UnaryDst:   unaryDst,
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/s390x/vector.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/s390x/vector.go
deleted file mode 100644
index d22b272..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/s390x/vector.go
+++ /dev/null
@@ -1,1064 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/s390x/vector.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/s390x/vector.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package s390x
-
-import (
-	"bootstrap/cmd/internal/obj"
-)
-
-// This file contains utility functions for use when
-// assembling vector instructions.
-
-// vop returns the opcode, element size and condition
-// setting for the given (possibly extended) mnemonic.
-func vop(as obj.As) (opcode, es, cs uint32) {
-	switch as {
-	default:
-		return 0, 0, 0
-	case AVA:
-		return op_VA, 0, 0
-	case AVAB:
-		return op_VA, 0, 0
-	case AVAH:
-		return op_VA, 1, 0
-	case AVAF:
-		return op_VA, 2, 0
-	case AVAG:
-		return op_VA, 3, 0
-	case AVAQ:
-		return op_VA, 4, 0
-	case AVACC:
-		return op_VACC, 0, 0
-	case AVACCB:
-		return op_VACC, 0, 0
-	case AVACCH:
-		return op_VACC, 1, 0
-	case AVACCF:
-		return op_VACC, 2, 0
-	case AVACCG:
-		return op_VACC, 3, 0
-	case AVACCQ:
-		return op_VACC, 4, 0
-	case AVAC:
-		return op_VAC, 0, 0
-	case AVACQ:
-		return op_VAC, 4, 0
-	case AVACCC:
-		return op_VACCC, 0, 0
-	case AVACCCQ:
-		return op_VACCC, 4, 0
-	case AVN:
-		return op_VN, 0, 0
-	case AVNC:
-		return op_VNC, 0, 0
-	case AVAVG:
-		return op_VAVG, 0, 0
-	case AVAVGB:
-		return op_VAVG, 0, 0
-	case AVAVGH:
-		return op_VAVG, 1, 0
-	case AVAVGF:
-		return op_VAVG, 2, 0
-	case AVAVGG:
-		return op_VAVG, 3, 0
-	case AVAVGL:
-		return op_VAVGL, 0, 0
-	case AVAVGLB:
-		return op_VAVGL, 0, 0
-	case AVAVGLH:
-		return op_VAVGL, 1, 0
-	case AVAVGLF:
-		return op_VAVGL, 2, 0
-	case AVAVGLG:
-		return op_VAVGL, 3, 0
-	case AVCKSM:
-		return op_VCKSM, 0, 0
-	case AVCEQ:
-		return op_VCEQ, 0, 0
-	case AVCEQB:
-		return op_VCEQ, 0, 0
-	case AVCEQH:
-		return op_VCEQ, 1, 0
-	case AVCEQF:
-		return op_VCEQ, 2, 0
-	case AVCEQG:
-		return op_VCEQ, 3, 0
-	case AVCEQBS:
-		return op_VCEQ, 0, 1
-	case AVCEQHS:
-		return op_VCEQ, 1, 1
-	case AVCEQFS:
-		return op_VCEQ, 2, 1
-	case AVCEQGS:
-		return op_VCEQ, 3, 1
-	case AVCH:
-		return op_VCH, 0, 0
-	case AVCHB:
-		return op_VCH, 0, 0
-	case AVCHH:
-		return op_VCH, 1, 0
-	case AVCHF:
-		return op_VCH, 2, 0
-	case AVCHG:
-		return op_VCH, 3, 0
-	case AVCHBS:
-		return op_VCH, 0, 1
-	case AVCHHS:
-		return op_VCH, 1, 1
-	case AVCHFS:
-		return op_VCH, 2, 1
-	case AVCHGS:
-		return op_VCH, 3, 1
-	case AVCHL:
-		return op_VCHL, 0, 0
-	case AVCHLB:
-		return op_VCHL, 0, 0
-	case AVCHLH:
-		return op_VCHL, 1, 0
-	case AVCHLF:
-		return op_VCHL, 2, 0
-	case AVCHLG:
-		return op_VCHL, 3, 0
-	case AVCHLBS:
-		return op_VCHL, 0, 1
-	case AVCHLHS:
-		return op_VCHL, 1, 1
-	case AVCHLFS:
-		return op_VCHL, 2, 1
-	case AVCHLGS:
-		return op_VCHL, 3, 1
-	case AVCLZ:
-		return op_VCLZ, 0, 0
-	case AVCLZB:
-		return op_VCLZ, 0, 0
-	case AVCLZH:
-		return op_VCLZ, 1, 0
-	case AVCLZF:
-		return op_VCLZ, 2, 0
-	case AVCLZG:
-		return op_VCLZ, 3, 0
-	case AVCTZ:
-		return op_VCTZ, 0, 0
-	case AVCTZB:
-		return op_VCTZ, 0, 0
-	case AVCTZH:
-		return op_VCTZ, 1, 0
-	case AVCTZF:
-		return op_VCTZ, 2, 0
-	case AVCTZG:
-		return op_VCTZ, 3, 0
-	case AVEC:
-		return op_VEC, 0, 0
-	case AVECB:
-		return op_VEC, 0, 0
-	case AVECH:
-		return op_VEC, 1, 0
-	case AVECF:
-		return op_VEC, 2, 0
-	case AVECG:
-		return op_VEC, 3, 0
-	case AVECL:
-		return op_VECL, 0, 0
-	case AVECLB:
-		return op_VECL, 0, 0
-	case AVECLH:
-		return op_VECL, 1, 0
-	case AVECLF:
-		return op_VECL, 2, 0
-	case AVECLG:
-		return op_VECL, 3, 0
-	case AVERIM:
-		return op_VERIM, 0, 0
-	case AVERIMB:
-		return op_VERIM, 0, 0
-	case AVERIMH:
-		return op_VERIM, 1, 0
-	case AVERIMF:
-		return op_VERIM, 2, 0
-	case AVERIMG:
-		return op_VERIM, 3, 0
-	case AVERLL:
-		return op_VERLL, 0, 0
-	case AVERLLB:
-		return op_VERLL, 0, 0
-	case AVERLLH:
-		return op_VERLL, 1, 0
-	case AVERLLF:
-		return op_VERLL, 2, 0
-	case AVERLLG:
-		return op_VERLL, 3, 0
-	case AVERLLV:
-		return op_VERLLV, 0, 0
-	case AVERLLVB:
-		return op_VERLLV, 0, 0
-	case AVERLLVH:
-		return op_VERLLV, 1, 0
-	case AVERLLVF:
-		return op_VERLLV, 2, 0
-	case AVERLLVG:
-		return op_VERLLV, 3, 0
-	case AVESLV:
-		return op_VESLV, 0, 0
-	case AVESLVB:
-		return op_VESLV, 0, 0
-	case AVESLVH:
-		return op_VESLV, 1, 0
-	case AVESLVF:
-		return op_VESLV, 2, 0
-	case AVESLVG:
-		return op_VESLV, 3, 0
-	case AVESL:
-		return op_VESL, 0, 0
-	case AVESLB:
-		return op_VESL, 0, 0
-	case AVESLH:
-		return op_VESL, 1, 0
-	case AVESLF:
-		return op_VESL, 2, 0
-	case AVESLG:
-		return op_VESL, 3, 0
-	case AVESRA:
-		return op_VESRA, 0, 0
-	case AVESRAB:
-		return op_VESRA, 0, 0
-	case AVESRAH:
-		return op_VESRA, 1, 0
-	case AVESRAF:
-		return op_VESRA, 2, 0
-	case AVESRAG:
-		return op_VESRA, 3, 0
-	case AVESRAV:
-		return op_VESRAV, 0, 0
-	case AVESRAVB:
-		return op_VESRAV, 0, 0
-	case AVESRAVH:
-		return op_VESRAV, 1, 0
-	case AVESRAVF:
-		return op_VESRAV, 2, 0
-	case AVESRAVG:
-		return op_VESRAV, 3, 0
-	case AVESRL:
-		return op_VESRL, 0, 0
-	case AVESRLB:
-		return op_VESRL, 0, 0
-	case AVESRLH:
-		return op_VESRL, 1, 0
-	case AVESRLF:
-		return op_VESRL, 2, 0
-	case AVESRLG:
-		return op_VESRL, 3, 0
-	case AVESRLV:
-		return op_VESRLV, 0, 0
-	case AVESRLVB:
-		return op_VESRLV, 0, 0
-	case AVESRLVH:
-		return op_VESRLV, 1, 0
-	case AVESRLVF:
-		return op_VESRLV, 2, 0
-	case AVESRLVG:
-		return op_VESRLV, 3, 0
-	case AVX:
-		return op_VX, 0, 0
-	case AVFAE:
-		return op_VFAE, 0, 0
-	case AVFAEB:
-		return op_VFAE, 0, 0
-	case AVFAEH:
-		return op_VFAE, 1, 0
-	case AVFAEF:
-		return op_VFAE, 2, 0
-	case AVFAEBS:
-		return op_VFAE, 0, 1
-	case AVFAEHS:
-		return op_VFAE, 1, 1
-	case AVFAEFS:
-		return op_VFAE, 2, 1
-	case AVFAEZB:
-		return op_VFAE, 0, 2
-	case AVFAEZH:
-		return op_VFAE, 1, 2
-	case AVFAEZF:
-		return op_VFAE, 2, 2
-	case AVFAEZBS:
-		return op_VFAE, 0, 3
-	case AVFAEZHS:
-		return op_VFAE, 1, 3
-	case AVFAEZFS:
-		return op_VFAE, 2, 3
-	case AVFEE:
-		return op_VFEE, 0, 0
-	case AVFEEB:
-		return op_VFEE, 0, 0
-	case AVFEEH:
-		return op_VFEE, 1, 0
-	case AVFEEF:
-		return op_VFEE, 2, 0
-	case AVFEEBS:
-		return op_VFEE, 0, 1
-	case AVFEEHS:
-		return op_VFEE, 1, 1
-	case AVFEEFS:
-		return op_VFEE, 2, 1
-	case AVFEEZB:
-		return op_VFEE, 0, 2
-	case AVFEEZH:
-		return op_VFEE, 1, 2
-	case AVFEEZF:
-		return op_VFEE, 2, 2
-	case AVFEEZBS:
-		return op_VFEE, 0, 3
-	case AVFEEZHS:
-		return op_VFEE, 1, 3
-	case AVFEEZFS:
-		return op_VFEE, 2, 3
-	case AVFENE:
-		return op_VFENE, 0, 0
-	case AVFENEB:
-		return op_VFENE, 0, 0
-	case AVFENEH:
-		return op_VFENE, 1, 0
-	case AVFENEF:
-		return op_VFENE, 2, 0
-	case AVFENEBS:
-		return op_VFENE, 0, 1
-	case AVFENEHS:
-		return op_VFENE, 1, 1
-	case AVFENEFS:
-		return op_VFENE, 2, 1
-	case AVFENEZB:
-		return op_VFENE, 0, 2
-	case AVFENEZH:
-		return op_VFENE, 1, 2
-	case AVFENEZF:
-		return op_VFENE, 2, 2
-	case AVFENEZBS:
-		return op_VFENE, 0, 3
-	case AVFENEZHS:
-		return op_VFENE, 1, 3
-	case AVFENEZFS:
-		return op_VFENE, 2, 3
-	case AVFA:
-		return op_VFA, 0, 0
-	case AVFADB:
-		return op_VFA, 3, 0
-	case AWFADB:
-		return op_VFA, 3, 0
-	case AWFK:
-		return op_WFK, 0, 0
-	case AWFKDB:
-		return op_WFK, 3, 0
-	case AVFCE:
-		return op_VFCE, 0, 0
-	case AVFCEDB:
-		return op_VFCE, 3, 0
-	case AVFCEDBS:
-		return op_VFCE, 3, 1
-	case AWFCEDB:
-		return op_VFCE, 3, 0
-	case AWFCEDBS:
-		return op_VFCE, 3, 1
-	case AVFCH:
-		return op_VFCH, 0, 0
-	case AVFCHDB:
-		return op_VFCH, 3, 0
-	case AVFCHDBS:
-		return op_VFCH, 3, 1
-	case AWFCHDB:
-		return op_VFCH, 3, 0
-	case AWFCHDBS:
-		return op_VFCH, 3, 1
-	case AVFCHE:
-		return op_VFCHE, 0, 0
-	case AVFCHEDB:
-		return op_VFCHE, 3, 0
-	case AVFCHEDBS:
-		return op_VFCHE, 3, 1
-	case AWFCHEDB:
-		return op_VFCHE, 3, 0
-	case AWFCHEDBS:
-		return op_VFCHE, 3, 1
-	case AWFC:
-		return op_WFC, 0, 0
-	case AWFCDB:
-		return op_WFC, 3, 0
-	case AVCDG:
-		return op_VCDG, 0, 0
-	case AVCDGB:
-		return op_VCDG, 3, 0
-	case AWCDGB:
-		return op_VCDG, 3, 0
-	case AVCDLG:
-		return op_VCDLG, 0, 0
-	case AVCDLGB:
-		return op_VCDLG, 3, 0
-	case AWCDLGB:
-		return op_VCDLG, 3, 0
-	case AVCGD:
-		return op_VCGD, 0, 0
-	case AVCGDB:
-		return op_VCGD, 3, 0
-	case AWCGDB:
-		return op_VCGD, 3, 0
-	case AVCLGD:
-		return op_VCLGD, 0, 0
-	case AVCLGDB:
-		return op_VCLGD, 3, 0
-	case AWCLGDB:
-		return op_VCLGD, 3, 0
-	case AVFD:
-		return op_VFD, 0, 0
-	case AVFDDB:
-		return op_VFD, 3, 0
-	case AWFDDB:
-		return op_VFD, 3, 0
-	case AVLDE:
-		return op_VLDE, 0, 0
-	case AVLDEB:
-		return op_VLDE, 2, 0
-	case AWLDEB:
-		return op_VLDE, 2, 0
-	case AVLED:
-		return op_VLED, 0, 0
-	case AVLEDB:
-		return op_VLED, 3, 0
-	case AWLEDB:
-		return op_VLED, 3, 0
-	case AVFM:
-		return op_VFM, 0, 0
-	case AVFMDB:
-		return op_VFM, 3, 0
-	case AWFMDB:
-		return op_VFM, 3, 0
-	case AVFMA:
-		return op_VFMA, 0, 0
-	case AVFMADB:
-		return op_VFMA, 3, 0
-	case AWFMADB:
-		return op_VFMA, 3, 0
-	case AVFMS:
-		return op_VFMS, 0, 0
-	case AVFMSDB:
-		return op_VFMS, 3, 0
-	case AWFMSDB:
-		return op_VFMS, 3, 0
-	case AVFPSO:
-		return op_VFPSO, 0, 0
-	case AVFPSODB:
-		return op_VFPSO, 3, 0
-	case AWFPSODB:
-		return op_VFPSO, 3, 0
-	case AVFLCDB:
-		return op_VFPSO, 3, 0
-	case AWFLCDB:
-		return op_VFPSO, 3, 0
-	case AVFLNDB:
-		return op_VFPSO, 3, 1
-	case AWFLNDB:
-		return op_VFPSO, 3, 1
-	case AVFLPDB:
-		return op_VFPSO, 3, 2
-	case AWFLPDB:
-		return op_VFPSO, 3, 2
-	case AVFSQ:
-		return op_VFSQ, 0, 0
-	case AVFSQDB:
-		return op_VFSQ, 3, 0
-	case AWFSQDB:
-		return op_VFSQ, 3, 0
-	case AVFS:
-		return op_VFS, 0, 0
-	case AVFSDB:
-		return op_VFS, 3, 0
-	case AWFSDB:
-		return op_VFS, 3, 0
-	case AVFTCI:
-		return op_VFTCI, 0, 0
-	case AVFTCIDB:
-		return op_VFTCI, 3, 0
-	case AWFTCIDB:
-		return op_VFTCI, 3, 0
-	case AVGFM:
-		return op_VGFM, 0, 0
-	case AVGFMB:
-		return op_VGFM, 0, 0
-	case AVGFMH:
-		return op_VGFM, 1, 0
-	case AVGFMF:
-		return op_VGFM, 2, 0
-	case AVGFMG:
-		return op_VGFM, 3, 0
-	case AVGFMA:
-		return op_VGFMA, 0, 0
-	case AVGFMAB:
-		return op_VGFMA, 0, 0
-	case AVGFMAH:
-		return op_VGFMA, 1, 0
-	case AVGFMAF:
-		return op_VGFMA, 2, 0
-	case AVGFMAG:
-		return op_VGFMA, 3, 0
-	case AVGEF:
-		return op_VGEF, 0, 0
-	case AVGEG:
-		return op_VGEG, 0, 0
-	case AVGBM:
-		return op_VGBM, 0, 0
-	case AVZERO:
-		return op_VGBM, 0, 0
-	case AVONE:
-		return op_VGBM, 0, 0
-	case AVGM:
-		return op_VGM, 0, 0
-	case AVGMB:
-		return op_VGM, 0, 0
-	case AVGMH:
-		return op_VGM, 1, 0
-	case AVGMF:
-		return op_VGM, 2, 0
-	case AVGMG:
-		return op_VGM, 3, 0
-	case AVISTR:
-		return op_VISTR, 0, 0
-	case AVISTRB:
-		return op_VISTR, 0, 0
-	case AVISTRH:
-		return op_VISTR, 1, 0
-	case AVISTRF:
-		return op_VISTR, 2, 0
-	case AVISTRBS:
-		return op_VISTR, 0, 1
-	case AVISTRHS:
-		return op_VISTR, 1, 1
-	case AVISTRFS:
-		return op_VISTR, 2, 1
-	case AVL:
-		return op_VL, 0, 0
-	case AVLR:
-		return op_VLR, 0, 0
-	case AVLREP:
-		return op_VLREP, 0, 0
-	case AVLREPB:
-		return op_VLREP, 0, 0
-	case AVLREPH:
-		return op_VLREP, 1, 0
-	case AVLREPF:
-		return op_VLREP, 2, 0
-	case AVLREPG:
-		return op_VLREP, 3, 0
-	case AVLC:
-		return op_VLC, 0, 0
-	case AVLCB:
-		return op_VLC, 0, 0
-	case AVLCH:
-		return op_VLC, 1, 0
-	case AVLCF:
-		return op_VLC, 2, 0
-	case AVLCG:
-		return op_VLC, 3, 0
-	case AVLEH:
-		return op_VLEH, 0, 0
-	case AVLEF:
-		return op_VLEF, 0, 0
-	case AVLEG:
-		return op_VLEG, 0, 0
-	case AVLEB:
-		return op_VLEB, 0, 0
-	case AVLEIH:
-		return op_VLEIH, 0, 0
-	case AVLEIF:
-		return op_VLEIF, 0, 0
-	case AVLEIG:
-		return op_VLEIG, 0, 0
-	case AVLEIB:
-		return op_VLEIB, 0, 0
-	case AVFI:
-		return op_VFI, 0, 0
-	case AVFIDB:
-		return op_VFI, 3, 0
-	case AWFIDB:
-		return op_VFI, 3, 0
-	case AVLGV:
-		return op_VLGV, 0, 0
-	case AVLGVB:
-		return op_VLGV, 0, 0
-	case AVLGVH:
-		return op_VLGV, 1, 0
-	case AVLGVF:
-		return op_VLGV, 2, 0
-	case AVLGVG:
-		return op_VLGV, 3, 0
-	case AVLLEZ:
-		return op_VLLEZ, 0, 0
-	case AVLLEZB:
-		return op_VLLEZ, 0, 0
-	case AVLLEZH:
-		return op_VLLEZ, 1, 0
-	case AVLLEZF:
-		return op_VLLEZ, 2, 0
-	case AVLLEZG:
-		return op_VLLEZ, 3, 0
-	case AVLM:
-		return op_VLM, 0, 0
-	case AVLP:
-		return op_VLP, 0, 0
-	case AVLPB:
-		return op_VLP, 0, 0
-	case AVLPH:
-		return op_VLP, 1, 0
-	case AVLPF:
-		return op_VLP, 2, 0
-	case AVLPG:
-		return op_VLP, 3, 0
-	case AVLBB:
-		return op_VLBB, 0, 0
-	case AVLVG:
-		return op_VLVG, 0, 0
-	case AVLVGB:
-		return op_VLVG, 0, 0
-	case AVLVGH:
-		return op_VLVG, 1, 0
-	case AVLVGF:
-		return op_VLVG, 2, 0
-	case AVLVGG:
-		return op_VLVG, 3, 0
-	case AVLVGP:
-		return op_VLVGP, 0, 0
-	case AVLL:
-		return op_VLL, 0, 0
-	case AVMX:
-		return op_VMX, 0, 0
-	case AVMXB:
-		return op_VMX, 0, 0
-	case AVMXH:
-		return op_VMX, 1, 0
-	case AVMXF:
-		return op_VMX, 2, 0
-	case AVMXG:
-		return op_VMX, 3, 0
-	case AVMXL:
-		return op_VMXL, 0, 0
-	case AVMXLB:
-		return op_VMXL, 0, 0
-	case AVMXLH:
-		return op_VMXL, 1, 0
-	case AVMXLF:
-		return op_VMXL, 2, 0
-	case AVMXLG:
-		return op_VMXL, 3, 0
-	case AVMRH:
-		return op_VMRH, 0, 0
-	case AVMRHB:
-		return op_VMRH, 0, 0
-	case AVMRHH:
-		return op_VMRH, 1, 0
-	case AVMRHF:
-		return op_VMRH, 2, 0
-	case AVMRHG:
-		return op_VMRH, 3, 0
-	case AVMRL:
-		return op_VMRL, 0, 0
-	case AVMRLB:
-		return op_VMRL, 0, 0
-	case AVMRLH:
-		return op_VMRL, 1, 0
-	case AVMRLF:
-		return op_VMRL, 2, 0
-	case AVMRLG:
-		return op_VMRL, 3, 0
-	case AVMN:
-		return op_VMN, 0, 0
-	case AVMNB:
-		return op_VMN, 0, 0
-	case AVMNH:
-		return op_VMN, 1, 0
-	case AVMNF:
-		return op_VMN, 2, 0
-	case AVMNG:
-		return op_VMN, 3, 0
-	case AVMNL:
-		return op_VMNL, 0, 0
-	case AVMNLB:
-		return op_VMNL, 0, 0
-	case AVMNLH:
-		return op_VMNL, 1, 0
-	case AVMNLF:
-		return op_VMNL, 2, 0
-	case AVMNLG:
-		return op_VMNL, 3, 0
-	case AVMAE:
-		return op_VMAE, 0, 0
-	case AVMAEB:
-		return op_VMAE, 0, 0
-	case AVMAEH:
-		return op_VMAE, 1, 0
-	case AVMAEF:
-		return op_VMAE, 2, 0
-	case AVMAH:
-		return op_VMAH, 0, 0
-	case AVMAHB:
-		return op_VMAH, 0, 0
-	case AVMAHH:
-		return op_VMAH, 1, 0
-	case AVMAHF:
-		return op_VMAH, 2, 0
-	case AVMALE:
-		return op_VMALE, 0, 0
-	case AVMALEB:
-		return op_VMALE, 0, 0
-	case AVMALEH:
-		return op_VMALE, 1, 0
-	case AVMALEF:
-		return op_VMALE, 2, 0
-	case AVMALH:
-		return op_VMALH, 0, 0
-	case AVMALHB:
-		return op_VMALH, 0, 0
-	case AVMALHH:
-		return op_VMALH, 1, 0
-	case AVMALHF:
-		return op_VMALH, 2, 0
-	case AVMALO:
-		return op_VMALO, 0, 0
-	case AVMALOB:
-		return op_VMALO, 0, 0
-	case AVMALOH:
-		return op_VMALO, 1, 0
-	case AVMALOF:
-		return op_VMALO, 2, 0
-	case AVMAL:
-		return op_VMAL, 0, 0
-	case AVMALB:
-		return op_VMAL, 0, 0
-	case AVMALHW:
-		return op_VMAL, 1, 0
-	case AVMALF:
-		return op_VMAL, 2, 0
-	case AVMAO:
-		return op_VMAO, 0, 0
-	case AVMAOB:
-		return op_VMAO, 0, 0
-	case AVMAOH:
-		return op_VMAO, 1, 0
-	case AVMAOF:
-		return op_VMAO, 2, 0
-	case AVME:
-		return op_VME, 0, 0
-	case AVMEB:
-		return op_VME, 0, 0
-	case AVMEH:
-		return op_VME, 1, 0
-	case AVMEF:
-		return op_VME, 2, 0
-	case AVMH:
-		return op_VMH, 0, 0
-	case AVMHB:
-		return op_VMH, 0, 0
-	case AVMHH:
-		return op_VMH, 1, 0
-	case AVMHF:
-		return op_VMH, 2, 0
-	case AVMLE:
-		return op_VMLE, 0, 0
-	case AVMLEB:
-		return op_VMLE, 0, 0
-	case AVMLEH:
-		return op_VMLE, 1, 0
-	case AVMLEF:
-		return op_VMLE, 2, 0
-	case AVMLH:
-		return op_VMLH, 0, 0
-	case AVMLHB:
-		return op_VMLH, 0, 0
-	case AVMLHH:
-		return op_VMLH, 1, 0
-	case AVMLHF:
-		return op_VMLH, 2, 0
-	case AVMLO:
-		return op_VMLO, 0, 0
-	case AVMLOB:
-		return op_VMLO, 0, 0
-	case AVMLOH:
-		return op_VMLO, 1, 0
-	case AVMLOF:
-		return op_VMLO, 2, 0
-	case AVML:
-		return op_VML, 0, 0
-	case AVMLB:
-		return op_VML, 0, 0
-	case AVMLHW:
-		return op_VML, 1, 0
-	case AVMLF:
-		return op_VML, 2, 0
-	case AVMO:
-		return op_VMO, 0, 0
-	case AVMOB:
-		return op_VMO, 0, 0
-	case AVMOH:
-		return op_VMO, 1, 0
-	case AVMOF:
-		return op_VMO, 2, 0
-	case AVNO:
-		return op_VNO, 0, 0
-	case AVNOT:
-		return op_VNO, 0, 0
-	case AVO:
-		return op_VO, 0, 0
-	case AVPK:
-		return op_VPK, 0, 0
-	case AVPKH:
-		return op_VPK, 1, 0
-	case AVPKF:
-		return op_VPK, 2, 0
-	case AVPKG:
-		return op_VPK, 3, 0
-	case AVPKLS:
-		return op_VPKLS, 0, 0
-	case AVPKLSH:
-		return op_VPKLS, 1, 0
-	case AVPKLSF:
-		return op_VPKLS, 2, 0
-	case AVPKLSG:
-		return op_VPKLS, 3, 0
-	case AVPKLSHS:
-		return op_VPKLS, 1, 1
-	case AVPKLSFS:
-		return op_VPKLS, 2, 1
-	case AVPKLSGS:
-		return op_VPKLS, 3, 1
-	case AVPKS:
-		return op_VPKS, 0, 0
-	case AVPKSH:
-		return op_VPKS, 1, 0
-	case AVPKSF:
-		return op_VPKS, 2, 0
-	case AVPKSG:
-		return op_VPKS, 3, 0
-	case AVPKSHS:
-		return op_VPKS, 1, 1
-	case AVPKSFS:
-		return op_VPKS, 2, 1
-	case AVPKSGS:
-		return op_VPKS, 3, 1
-	case AVPERM:
-		return op_VPERM, 0, 0
-	case AVPDI:
-		return op_VPDI, 0, 0
-	case AVPOPCT:
-		return op_VPOPCT, 0, 0
-	case AVREP:
-		return op_VREP, 0, 0
-	case AVREPB:
-		return op_VREP, 0, 0
-	case AVREPH:
-		return op_VREP, 1, 0
-	case AVREPF:
-		return op_VREP, 2, 0
-	case AVREPG:
-		return op_VREP, 3, 0
-	case AVREPI:
-		return op_VREPI, 0, 0
-	case AVREPIB:
-		return op_VREPI, 0, 0
-	case AVREPIH:
-		return op_VREPI, 1, 0
-	case AVREPIF:
-		return op_VREPI, 2, 0
-	case AVREPIG:
-		return op_VREPI, 3, 0
-	case AVSCEF:
-		return op_VSCEF, 0, 0
-	case AVSCEG:
-		return op_VSCEG, 0, 0
-	case AVSEL:
-		return op_VSEL, 0, 0
-	case AVSL:
-		return op_VSL, 0, 0
-	case AVSLB:
-		return op_VSLB, 0, 0
-	case AVSLDB:
-		return op_VSLDB, 0, 0
-	case AVSRA:
-		return op_VSRA, 0, 0
-	case AVSRAB:
-		return op_VSRAB, 0, 0
-	case AVSRL:
-		return op_VSRL, 0, 0
-	case AVSRLB:
-		return op_VSRLB, 0, 0
-	case AVSEG:
-		return op_VSEG, 0, 0
-	case AVSEGB:
-		return op_VSEG, 0, 0
-	case AVSEGH:
-		return op_VSEG, 1, 0
-	case AVSEGF:
-		return op_VSEG, 2, 0
-	case AVST:
-		return op_VST, 0, 0
-	case AVSTEH:
-		return op_VSTEH, 0, 0
-	case AVSTEF:
-		return op_VSTEF, 0, 0
-	case AVSTEG:
-		return op_VSTEG, 0, 0
-	case AVSTEB:
-		return op_VSTEB, 0, 0
-	case AVSTM:
-		return op_VSTM, 0, 0
-	case AVSTL:
-		return op_VSTL, 0, 0
-	case AVSTRC:
-		return op_VSTRC, 0, 0
-	case AVSTRCB:
-		return op_VSTRC, 0, 0
-	case AVSTRCH:
-		return op_VSTRC, 1, 0
-	case AVSTRCF:
-		return op_VSTRC, 2, 0
-	case AVSTRCBS:
-		return op_VSTRC, 0, 1
-	case AVSTRCHS:
-		return op_VSTRC, 1, 1
-	case AVSTRCFS:
-		return op_VSTRC, 2, 1
-	case AVSTRCZB:
-		return op_VSTRC, 0, 2
-	case AVSTRCZH:
-		return op_VSTRC, 1, 2
-	case AVSTRCZF:
-		return op_VSTRC, 2, 2
-	case AVSTRCZBS:
-		return op_VSTRC, 0, 3
-	case AVSTRCZHS:
-		return op_VSTRC, 1, 3
-	case AVSTRCZFS:
-		return op_VSTRC, 2, 3
-	case AVS:
-		return op_VS, 0, 0
-	case AVSB:
-		return op_VS, 0, 0
-	case AVSH:
-		return op_VS, 1, 0
-	case AVSF:
-		return op_VS, 2, 0
-	case AVSG:
-		return op_VS, 3, 0
-	case AVSQ:
-		return op_VS, 4, 0
-	case AVSCBI:
-		return op_VSCBI, 0, 0
-	case AVSCBIB:
-		return op_VSCBI, 0, 0
-	case AVSCBIH:
-		return op_VSCBI, 1, 0
-	case AVSCBIF:
-		return op_VSCBI, 2, 0
-	case AVSCBIG:
-		return op_VSCBI, 3, 0
-	case AVSCBIQ:
-		return op_VSCBI, 4, 0
-	case AVSBCBI:
-		return op_VSBCBI, 0, 0
-	case AVSBCBIQ:
-		return op_VSBCBI, 4, 0
-	case AVSBI:
-		return op_VSBI, 0, 0
-	case AVSBIQ:
-		return op_VSBI, 4, 0
-	case AVSUMG:
-		return op_VSUMG, 0, 0
-	case AVSUMGH:
-		return op_VSUMG, 1, 0
-	case AVSUMGF:
-		return op_VSUMG, 2, 0
-	case AVSUMQ:
-		return op_VSUMQ, 0, 0
-	case AVSUMQF:
-		return op_VSUMQ, 1, 0
-	case AVSUMQG:
-		return op_VSUMQ, 2, 0
-	case AVSUM:
-		return op_VSUM, 0, 0
-	case AVSUMB:
-		return op_VSUM, 0, 0
-	case AVSUMH:
-		return op_VSUM, 1, 0
-	case AVTM:
-		return op_VTM, 0, 0
-	case AVUPH:
-		return op_VUPH, 0, 0
-	case AVUPHB:
-		return op_VUPH, 0, 0
-	case AVUPHH:
-		return op_VUPH, 1, 0
-	case AVUPHF:
-		return op_VUPH, 2, 0
-	case AVUPLH:
-		return op_VUPLH, 0, 0
-	case AVUPLHB:
-		return op_VUPLH, 0, 0
-	case AVUPLHH:
-		return op_VUPLH, 1, 0
-	case AVUPLHF:
-		return op_VUPLH, 2, 0
-	case AVUPLL:
-		return op_VUPLL, 0, 0
-	case AVUPLLB:
-		return op_VUPLL, 0, 0
-	case AVUPLLH:
-		return op_VUPLL, 1, 0
-	case AVUPLLF:
-		return op_VUPLL, 2, 0
-	case AVUPL:
-		return op_VUPL, 0, 0
-	case AVUPLB:
-		return op_VUPL, 0, 0
-	case AVUPLHW:
-		return op_VUPL, 1, 0
-	case AVUPLF:
-		return op_VUPL, 2, 0
-	}
-}
-
-// singleElementMask returns the single element mask bits required for the
-// given instruction.
-func singleElementMask(as obj.As) uint32 {
-	switch as {
-	case AWFADB,
-		AWFK,
-		AWFKDB,
-		AWFCEDB,
-		AWFCEDBS,
-		AWFCHDB,
-		AWFCHDBS,
-		AWFCHEDB,
-		AWFCHEDBS,
-		AWFC,
-		AWFCDB,
-		AWCDGB,
-		AWCDLGB,
-		AWCGDB,
-		AWCLGDB,
-		AWFDDB,
-		AWLDEB,
-		AWLEDB,
-		AWFMDB,
-		AWFMADB,
-		AWFMSDB,
-		AWFPSODB,
-		AWFLCDB,
-		AWFLNDB,
-		AWFLPDB,
-		AWFSQDB,
-		AWFSDB,
-		AWFTCIDB,
-		AWFIDB:
-		return 8
-	}
-	return 0
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/sizeof_test.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/sizeof_test.go
deleted file mode 100644
index 424d9b6..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/sizeof_test.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/sizeof_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/sizeof_test.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !nacl
-
-package obj
-
-import (
-	"reflect"
-	"testing"
-	"unsafe"
-)
-
-// Assert that the size of important structures do not change unexpectedly.
-
-func TestSizeof(t *testing.T) {
-	const _64bit = unsafe.Sizeof(uintptr(0)) == 8
-
-	var tests = []struct {
-		val    interface{} // type as a value
-		_32bit uintptr     // size on 32bit platforms
-		_64bit uintptr     // size on 64bit platforms
-	}{
-		{Addr{}, 40, 64},
-		{LSym{}, 76, 128},
-		{Prog{}, 144, 224},
-	}
-
-	for _, tt := range tests {
-		want := tt._32bit
-		if _64bit {
-			want = tt._64bit
-		}
-		got := reflect.TypeOf(tt.val).Size()
-		if want != got {
-			t.Errorf("unsafe.Sizeof(%T) = %d, want %d", tt.val, got, want)
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/stack.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/stack.go
deleted file mode 100644
index b75d339..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/stack.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/stack.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/stack.go:1
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package obj
-
-// For the linkers. Must match Go definitions.
-// TODO(rsc): Share Go definitions with linkers directly.
-
-const (
-	STACKSYSTEM = 0
-	StackSystem = STACKSYSTEM
-	StackBig    = 4096
-	StackGuard  = 880*stackGuardMultiplier + StackSystem
-	StackSmall  = 128
-	StackLimit  = StackGuard - StackSystem - StackSmall
-)
-
-const (
-	StackPreempt = -1314 // 0xfff...fade
-)
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/stringer.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/stringer.go
deleted file mode 100644
index d33fbf2..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/stringer.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/stringer.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/stringer.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-// This is a mini version of the stringer tool customized for the Anames table
-// in the architecture support for obj.
-// This version just generates the slice of strings, not the String method.
-
-package main
-
-import (
-	"bufio"
-	"flag"
-	"fmt"
-	"log"
-	"os"
-	"regexp"
-	"strings"
-)
-
-var (
-	input  = flag.String("i", "", "input file name")
-	output = flag.String("o", "", "output file name")
-	pkg    = flag.String("p", "", "package name")
-)
-
-var Are = regexp.MustCompile(`^\tA([A-Z0-9]+)`)
-
-func main() {
-	flag.Parse()
-	if *input == "" || *output == "" || *pkg == "" {
-		flag.Usage()
-		os.Exit(2)
-	}
-	in, err := os.Open(*input)
-	if err != nil {
-		log.Fatal(err)
-	}
-	fd, err := os.Create(*output)
-	if err != nil {
-		log.Fatal(err)
-	}
-	out := bufio.NewWriter(fd)
-	defer out.Flush()
-	var on = false
-	s := bufio.NewScanner(in)
-	first := true
-	for s.Scan() {
-		line := s.Text()
-		if !on {
-			// First relevant line contains "= obj.ABase".
-			// If we find it, delete the = so we don't stop immediately.
-			const prefix = "= obj.ABase"
-			index := strings.Index(line, prefix)
-			if index < 0 {
-				continue
-			}
-			// It's on. Start with the header.
-			fmt.Fprintf(out, header, *input, *output, *pkg, *pkg)
-			on = true
-			line = line[:index]
-		}
-		// Strip comments so their text won't defeat our heuristic.
-		index := strings.Index(line, "//")
-		if index > 0 {
-			line = line[:index]
-		}
-		index = strings.Index(line, "/*")
-		if index > 0 {
-			line = line[:index]
-		}
-		// Termination condition: Any line with an = changes the sequence,
-		// so stop there, and stop at a closing brace.
-		if strings.HasPrefix(line, "}") || strings.ContainsRune(line, '=') {
-			break
-		}
-		sub := Are.FindStringSubmatch(line)
-		if len(sub) < 2 {
-			continue
-		}
-		if first {
-			fmt.Fprintf(out, "\tobj.A_ARCHSPECIFIC: %q,\n", sub[1])
-			first = false
-		} else {
-			fmt.Fprintf(out, "\t%q,\n", sub[1])
-		}
-	}
-	fmt.Fprintln(out, "}")
-	if s.Err() != nil {
-		log.Fatal(err)
-	}
-}
-
-const header = `// Generated by stringer -i %s -o %s -p %s
-// Do not edit.
-
-package %s
-
-import "bootstrap/cmd/internal/obj"
-
-var Anames = []string{
-`
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/sym.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/sym.go
deleted file mode 100644
index 87a26e3..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/sym.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/sym.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/sym.go:1
-// Derived from Inferno utils/6l/obj.c and utils/6l/span.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6l/obj.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6l/span.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package obj
-
-import (
-	"log"
-	"os"
-	"path/filepath"
-)
-
-func Linknew(arch *LinkArch) *Link {
-	ctxt := new(Link)
-	ctxt.Hash = make(map[SymVer]*LSym)
-	ctxt.Arch = arch
-	ctxt.Version = HistVersion
-
-	var buf string
-	buf, _ = os.Getwd()
-	if buf == "" {
-		buf = "/???"
-	}
-	buf = filepath.ToSlash(buf)
-	ctxt.Pathname = buf
-
-	ctxt.LineHist.GOROOT = GOROOT
-	ctxt.LineHist.Dir = ctxt.Pathname
-
-	ctxt.Headtype.Set(GOOS)
-	if ctxt.Headtype < 0 {
-		log.Fatalf("unknown goos %s", GOOS)
-	}
-
-	ctxt.Flag_optimize = true
-	ctxt.Framepointer_enabled = Framepointer_enabled(GOOS, arch.Name)
-	return ctxt
-}
-
-func Linklookup(ctxt *Link, name string, v int) *LSym {
-	s := ctxt.Hash[SymVer{name, v}]
-	if s != nil {
-		return s
-	}
-
-	s = &LSym{
-		Name:    name,
-		Type:    0,
-		Version: int16(v),
-		Size:    0,
-	}
-	ctxt.Hash[SymVer{name, v}] = s
-	return s
-}
-
-func Linksymfmt(s *LSym) string {
-	if s == nil {
-		return "<nil>"
-	}
-	return s.Name
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/symkind_string.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/symkind_string.go
deleted file mode 100644
index 617db93..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/symkind_string.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/symkind_string.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/symkind_string.go:1
-// Code generated by "stringer -type=SymKind"; DO NOT EDIT
-
-package obj
-
-import "fmt"
-
-const _SymKind_name = "SxxxSTEXTSELFRXSECTSTYPESSTRINGSGOSTRINGSGOFUNCSGCBITSSRODATASFUNCTABSELFROSECTSMACHOPLTSTYPERELROSSTRINGRELROSGOSTRINGRELROSGOFUNCRELROSGCBITSRELROSRODATARELROSFUNCTABRELROSTYPELINKSITABLINKSSYMTABSPCLNTABSELFSECTSMACHOSMACHOGOTSWINDOWSSELFGOTSNOPTRDATASINITARRSDATASBSSSNOPTRBSSSTLSBSSSXREFSMACHOSYMSTRSMACHOSYMTABSMACHOINDIRECTPLTSMACHOINDIRECTGOTSFILESFILEPATHSCONSTSDYNIMPORTSHOSTOBJSDWARFSECTSDWARFINFO"
-
-var _SymKind_index = [...]uint16{0, 4, 9, 19, 24, 31, 40, 47, 54, 61, 69, 79, 88, 98, 110, 124, 136, 148, 160, 173, 182, 191, 198, 206, 214, 220, 229, 237, 244, 254, 262, 267, 271, 280, 287, 292, 304, 316, 333, 350, 355, 364, 370, 380, 388, 398, 408}
-
-func (i SymKind) String() string {
-	if i < 0 || i >= SymKind(len(_SymKind_index)-1) {
-		return fmt.Sprintf("SymKind(%d)", i)
-	}
-	return _SymKind_name[_SymKind_index[i]:_SymKind_index[i+1]]
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/textflag.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/textflag.go
deleted file mode 100644
index 9dfe034..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/textflag.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/textflag.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/textflag.go:1
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file defines flags attached to various functions
-// and data objects. The compilers, assemblers, and linker must
-// all agree on these values.
-
-package obj
-
-const (
-	// Don't profile the marked routine.
-	//
-	// Deprecated: Not implemented, do not use.
-	NOPROF = 1
-
-	// It is ok for the linker to get multiple of these symbols. It will
-	// pick one of the duplicates to use.
-	DUPOK = 2
-
-	// Don't insert stack check preamble.
-	NOSPLIT = 4
-
-	// Put this data in a read-only section.
-	RODATA = 8
-
-	// This data contains no pointers.
-	NOPTR = 16
-
-	// This is a wrapper function and should not count as disabling 'recover'.
-	WRAPPER = 32
-
-	// This function uses its incoming context register.
-	NEEDCTXT = 64
-
-	// When passed to ggloblsym, causes Local to be set to true on the LSym it creates.
-	LOCAL = 128
-
-	// Allocate a word of thread local storage and store the offset from the
-	// thread local base to the thread local storage in this variable.
-	TLSBSS = 256
-
-	// Do not insert instructions to allocate a stack frame for this function.
-	// Only valid on functions that declare a frame size of 0.
-	// TODO(mwhudson): only implemented for ppc64x at present.
-	NOFRAME = 512
-
-	// Function can call reflect.Type.Method or reflect.Type.MethodByName.
-	REFLECTMETHOD = 1024
-)
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/typekind.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/typekind.go
deleted file mode 100644
index 0e63ab8..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/typekind.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/typekind.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/typekind.go:1
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package obj
-
-// Must match runtime and reflect.
-// Included by cmd/gc.
-
-const (
-	KindBool = 1 + iota
-	KindInt
-	KindInt8
-	KindInt16
-	KindInt32
-	KindInt64
-	KindUint
-	KindUint8
-	KindUint16
-	KindUint32
-	KindUint64
-	KindUintptr
-	KindFloat32
-	KindFloat64
-	KindComplex64
-	KindComplex128
-	KindArray
-	KindChan
-	KindFunc
-	KindInterface
-	KindMap
-	KindPtr
-	KindSlice
-	KindString
-	KindStruct
-	KindUnsafePointer
-	KindDirectIface = 1 << 5
-	KindGCProg      = 1 << 6
-	KindNoPointers  = 1 << 7
-	KindMask        = (1 << 5) - 1
-)
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/util.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/util.go
deleted file mode 100644
index b015757..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/util.go
+++ /dev/null
@@ -1,502 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/util.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/util.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package obj
-
-import (
-	"bytes"
-	"fmt"
-	"log"
-	"os"
-	"strings"
-	"time"
-)
-
-const REG_NONE = 0
-
-var start time.Time
-
-func Cputime() float64 {
-	if start.IsZero() {
-		start = time.Now()
-	}
-	return time.Since(start).Seconds()
-}
-
-func envOr(key, value string) string {
-	if x := os.Getenv(key); x != "" {
-		return x
-	}
-	return value
-}
-
-var (
-	GOROOT  = envOr("GOROOT", defaultGOROOT)
-	GOARCH  = envOr("GOARCH", defaultGOARCH)
-	GOOS    = envOr("GOOS", defaultGOOS)
-	GO386   = envOr("GO386", defaultGO386)
-	GOARM   = goarm()
-	Version = version
-)
-
-func goarm() int {
-	switch v := envOr("GOARM", defaultGOARM); v {
-	case "5":
-		return 5
-	case "6":
-		return 6
-	case "7":
-		return 7
-	}
-	// Fail here, rather than validate at multiple call sites.
-	log.Fatalf("Invalid GOARM value. Must be 5, 6, or 7.")
-	panic("unreachable")
-}
-
-func Getgoextlinkenabled() string {
-	return envOr("GO_EXTLINK_ENABLED", defaultGO_EXTLINK_ENABLED)
-}
-
-func (p *Prog) Line() string {
-	return p.Ctxt.LineHist.LineString(int(p.Lineno))
-}
-
-var armCondCode = []string{
-	".EQ",
-	".NE",
-	".CS",
-	".CC",
-	".MI",
-	".PL",
-	".VS",
-	".VC",
-	".HI",
-	".LS",
-	".GE",
-	".LT",
-	".GT",
-	".LE",
-	"",
-	".NV",
-}
-
-/* ARM scond byte */
-const (
-	C_SCOND     = (1 << 4) - 1
-	C_SBIT      = 1 << 4
-	C_PBIT      = 1 << 5
-	C_WBIT      = 1 << 6
-	C_FBIT      = 1 << 7
-	C_UBIT      = 1 << 7
-	C_SCOND_XOR = 14
-)
-
-// CConv formats ARM condition codes.
-func CConv(s uint8) string {
-	if s == 0 {
-		return ""
-	}
-	sc := armCondCode[(s&C_SCOND)^C_SCOND_XOR]
-	if s&C_SBIT != 0 {
-		sc += ".S"
-	}
-	if s&C_PBIT != 0 {
-		sc += ".P"
-	}
-	if s&C_WBIT != 0 {
-		sc += ".W"
-	}
-	if s&C_UBIT != 0 { /* ambiguous with FBIT */
-		sc += ".U"
-	}
-	return sc
-}
-
-func (p *Prog) String() string {
-	if p == nil {
-		return "<nil Prog>"
-	}
-
-	if p.Ctxt == nil {
-		return "<Prog without ctxt>"
-	}
-
-	sc := CConv(p.Scond)
-
-	var buf bytes.Buffer
-
-	fmt.Fprintf(&buf, "%.5d (%v)\t%v%s", p.Pc, p.Line(), p.As, sc)
-	sep := "\t"
-	quadOpAmd64 := p.RegTo2 == -1
-	if quadOpAmd64 {
-		fmt.Fprintf(&buf, "%s$%d", sep, p.From3.Offset)
-		sep = ", "
-	}
-	if p.From.Type != TYPE_NONE {
-		fmt.Fprintf(&buf, "%s%v", sep, Dconv(p, &p.From))
-		sep = ", "
-	}
-	if p.Reg != REG_NONE {
-		// Should not happen but might as well show it if it does.
-		fmt.Fprintf(&buf, "%s%v", sep, Rconv(int(p.Reg)))
-		sep = ", "
-	}
-	if p.From3Type() != TYPE_NONE {
-		if p.From3.Type == TYPE_CONST && p.As == ATEXT {
-			// Special case - omit $.
-			fmt.Fprintf(&buf, "%s%d", sep, p.From3.Offset)
-		} else if quadOpAmd64 {
-			fmt.Fprintf(&buf, "%s%v", sep, Rconv(int(p.From3.Reg)))
-		} else {
-			fmt.Fprintf(&buf, "%s%v", sep, Dconv(p, p.From3))
-		}
-		sep = ", "
-	}
-	if p.To.Type != TYPE_NONE {
-		fmt.Fprintf(&buf, "%s%v", sep, Dconv(p, &p.To))
-	}
-	if p.RegTo2 != REG_NONE && !quadOpAmd64 {
-		fmt.Fprintf(&buf, "%s%v", sep, Rconv(int(p.RegTo2)))
-	}
-	return buf.String()
-}
-
-func (ctxt *Link) NewProg() *Prog {
-	var p *Prog
-	if i := ctxt.allocIdx; i < len(ctxt.progs) {
-		p = &ctxt.progs[i]
-		ctxt.allocIdx = i + 1
-	} else {
-		p = new(Prog) // should be the only call to this; all others should use ctxt.NewProg
-	}
-	p.Ctxt = ctxt
-	return p
-}
-func (ctxt *Link) freeProgs() {
-	s := ctxt.progs[:ctxt.allocIdx]
-	for i := range s {
-		s[i] = Prog{}
-	}
-	ctxt.allocIdx = 0
-}
-
-func (ctxt *Link) Line(n int) string {
-	return ctxt.LineHist.LineString(n)
-}
-
-func Getcallerpc(interface{}) uintptr {
-	return 1
-}
-
-func (ctxt *Link) Dconv(a *Addr) string {
-	return Dconv(nil, a)
-}
-
-func Dconv(p *Prog, a *Addr) string {
-	var str string
-
-	switch a.Type {
-	default:
-		str = fmt.Sprintf("type=%d", a.Type)
-
-	case TYPE_NONE:
-		str = ""
-		if a.Name != NAME_NONE || a.Reg != 0 || a.Sym != nil {
-			str = fmt.Sprintf("%v(%v)(NONE)", Mconv(a), Rconv(int(a.Reg)))
-		}
-
-	case TYPE_REG:
-		// TODO(rsc): This special case is for x86 instructions like
-		//	PINSRQ	CX,$1,X6
-		// where the $1 is included in the p->to Addr.
-		// Move into a new field.
-		if a.Offset != 0 {
-			str = fmt.Sprintf("$%d,%v", a.Offset, Rconv(int(a.Reg)))
-			break
-		}
-
-		str = Rconv(int(a.Reg))
-		if a.Name != NAME_NONE || a.Sym != nil {
-			str = fmt.Sprintf("%v(%v)(REG)", Mconv(a), Rconv(int(a.Reg)))
-		}
-
-	case TYPE_BRANCH:
-		if a.Sym != nil {
-			str = fmt.Sprintf("%s(SB)", a.Sym.Name)
-		} else if p != nil && p.Pcond != nil {
-			str = fmt.Sprint(p.Pcond.Pc)
-		} else if a.Val != nil {
-			str = fmt.Sprint(a.Val.(*Prog).Pc)
-		} else {
-			str = fmt.Sprintf("%d(PC)", a.Offset)
-		}
-
-	case TYPE_INDIR:
-		str = fmt.Sprintf("*%s", Mconv(a))
-
-	case TYPE_MEM:
-		str = Mconv(a)
-		if a.Index != REG_NONE {
-			str += fmt.Sprintf("(%v*%d)", Rconv(int(a.Index)), int(a.Scale))
-		}
-
-	case TYPE_CONST:
-		if a.Reg != 0 {
-			str = fmt.Sprintf("$%v(%v)", Mconv(a), Rconv(int(a.Reg)))
-		} else {
-			str = fmt.Sprintf("$%v", Mconv(a))
-		}
-
-	case TYPE_TEXTSIZE:
-		if a.Val.(int32) == ArgsSizeUnknown {
-			str = fmt.Sprintf("$%d", a.Offset)
-		} else {
-			str = fmt.Sprintf("$%d-%d", a.Offset, a.Val.(int32))
-		}
-
-	case TYPE_FCONST:
-		str = fmt.Sprintf("%.17g", a.Val.(float64))
-		// Make sure 1 prints as 1.0
-		if !strings.ContainsAny(str, ".e") {
-			str += ".0"
-		}
-		str = fmt.Sprintf("$(%s)", str)
-
-	case TYPE_SCONST:
-		str = fmt.Sprintf("$%q", a.Val.(string))
-
-	case TYPE_ADDR:
-		str = fmt.Sprintf("$%s", Mconv(a))
-
-	case TYPE_SHIFT:
-		v := int(a.Offset)
-		ops := "<<>>->@>"
-		switch GOARCH {
-		case "arm":
-			op := ops[((v>>5)&3)<<1:]
-			if v&(1<<4) != 0 {
-				str = fmt.Sprintf("R%d%c%cR%d", v&15, op[0], op[1], (v>>8)&15)
-			} else {
-				str = fmt.Sprintf("R%d%c%c%d", v&15, op[0], op[1], (v>>7)&31)
-			}
-			if a.Reg != 0 {
-				str += fmt.Sprintf("(%v)", Rconv(int(a.Reg)))
-			}
-		case "arm64":
-			op := ops[((v>>22)&3)<<1:]
-			str = fmt.Sprintf("R%d%c%c%d", (v>>16)&31, op[0], op[1], (v>>10)&63)
-		default:
-			panic("TYPE_SHIFT is not supported on " + GOARCH)
-		}
-
-	case TYPE_REGREG:
-		str = fmt.Sprintf("(%v, %v)", Rconv(int(a.Reg)), Rconv(int(a.Offset)))
-
-	case TYPE_REGREG2:
-		str = fmt.Sprintf("%v, %v", Rconv(int(a.Reg)), Rconv(int(a.Offset)))
-
-	case TYPE_REGLIST:
-		str = regListConv(int(a.Offset))
-	}
-
-	return str
-}
-
-func Mconv(a *Addr) string {
-	var str string
-
-	switch a.Name {
-	default:
-		str = fmt.Sprintf("name=%d", a.Name)
-
-	case NAME_NONE:
-		switch {
-		case a.Reg == REG_NONE:
-			str = fmt.Sprint(a.Offset)
-		case a.Offset == 0:
-			str = fmt.Sprintf("(%v)", Rconv(int(a.Reg)))
-		case a.Offset != 0:
-			str = fmt.Sprintf("%d(%v)", a.Offset, Rconv(int(a.Reg)))
-		}
-
-	case NAME_EXTERN:
-		if a.Sym != nil {
-			str = fmt.Sprintf("%s%s(SB)", a.Sym.Name, offConv(a.Offset))
-		} else {
-			str = fmt.Sprintf("%s(SB)", offConv(a.Offset))
-		}
-
-	case NAME_GOTREF:
-		if a.Sym != nil {
-			str = fmt.Sprintf("%s%s@GOT(SB)", a.Sym.Name, offConv(a.Offset))
-		} else {
-			str = fmt.Sprintf("%s@GOT(SB)", offConv(a.Offset))
-		}
-
-	case NAME_STATIC:
-		if a.Sym != nil {
-			str = fmt.Sprintf("%s<>%s(SB)", a.Sym.Name, offConv(a.Offset))
-		} else {
-			str = fmt.Sprintf("<>%s(SB)", offConv(a.Offset))
-		}
-
-	case NAME_AUTO:
-		if a.Sym != nil {
-			str = fmt.Sprintf("%s%s(SP)", a.Sym.Name, offConv(a.Offset))
-		} else {
-			str = fmt.Sprintf("%s(SP)", offConv(a.Offset))
-		}
-
-	case NAME_PARAM:
-		if a.Sym != nil {
-			str = fmt.Sprintf("%s%s(FP)", a.Sym.Name, offConv(a.Offset))
-		} else {
-			str = fmt.Sprintf("%s(FP)", offConv(a.Offset))
-		}
-	}
-	return str
-}
-
-func offConv(off int64) string {
-	if off == 0 {
-		return ""
-	}
-	return fmt.Sprintf("%+d", off)
-}
-
-type regSet struct {
-	lo    int
-	hi    int
-	Rconv func(int) string
-}
-
-// Few enough architectures that a linear scan is fastest.
-// Not even worth sorting.
-var regSpace []regSet
-
-/*
-	Each architecture defines a register space as a unique
-	integer range.
-	Here is the list of architectures and the base of their register spaces.
-*/
-
-const (
-	// Because of masking operations in the encodings, each register
-	// space should start at 0 modulo some power of 2.
-	RBase386   = 1 * 1024
-	RBaseAMD64 = 2 * 1024
-	RBaseARM   = 3 * 1024
-	RBasePPC64 = 4 * 1024  // range [4k, 8k)
-	RBaseARM64 = 8 * 1024  // range [8k, 13k)
-	RBaseMIPS  = 13 * 1024 // range [13k, 14k)
-	RBaseS390X = 14 * 1024 // range [14k, 15k)
-)
-
-// RegisterRegister binds a pretty-printer (Rconv) for register
-// numbers to a given register number range. Lo is inclusive,
-// hi exclusive (valid registers are lo through hi-1).
-func RegisterRegister(lo, hi int, Rconv func(int) string) {
-	regSpace = append(regSpace, regSet{lo, hi, Rconv})
-}
-
-func Rconv(reg int) string {
-	if reg == REG_NONE {
-		return "NONE"
-	}
-	for i := range regSpace {
-		rs := &regSpace[i]
-		if rs.lo <= reg && reg < rs.hi {
-			return rs.Rconv(reg)
-		}
-	}
-	return fmt.Sprintf("R???%d", reg)
-}
-
-func regListConv(list int) string {
-	str := ""
-
-	for i := 0; i < 16; i++ { // TODO: 16 is ARM-specific.
-		if list&(1<<uint(i)) != 0 {
-			if str == "" {
-				str += "["
-			} else {
-				str += ","
-			}
-			// This is ARM-specific; R10 is g.
-			if i == 10 {
-				str += "g"
-			} else {
-				str += fmt.Sprintf("R%d", i)
-			}
-		}
-	}
-
-	str += "]"
-	return str
-}
-
-type opSet struct {
-	lo    As
-	names []string
-}
-
-// Not even worth sorting
-var aSpace []opSet
-
-// RegisterOpcode binds a list of instruction names
-// to a given instruction number range.
-func RegisterOpcode(lo As, Anames []string) {
-	if len(Anames) > AllowedOpCodes {
-		panic(fmt.Sprintf("too many instructions, have %d max %d", len(Anames), AllowedOpCodes))
-	}
-	aSpace = append(aSpace, opSet{lo, Anames})
-}
-
-func (a As) String() string {
-	if 0 <= a && int(a) < len(Anames) {
-		return Anames[a]
-	}
-	for i := range aSpace {
-		as := &aSpace[i]
-		if as.lo <= a && int(a-as.lo) < len(as.names) {
-			return as.names[a-as.lo]
-		}
-	}
-	return fmt.Sprintf("A???%d", a)
-}
-
-var Anames = []string{
-	"XXX",
-	"CALL",
-	"DUFFCOPY",
-	"DUFFZERO",
-	"END",
-	"FUNCDATA",
-	"JMP",
-	"NOP",
-	"PCDATA",
-	"RET",
-	"TEXT",
-	"TYPE",
-	"UNDEF",
-	"USEFIELD",
-	"VARDEF",
-	"VARKILL",
-	"VARLIVE",
-}
-
-func Bool2int(b bool) int {
-	// The compiler currently only optimizes this form.
-	// See issue 6011.
-	var i int
-	if b {
-		i = 1
-	} else {
-		i = 0
-	}
-	return i
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/x86/a.out.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/x86/a.out.go
deleted file mode 100644
index d0dcf92..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/x86/a.out.go
+++ /dev/null
@@ -1,1012 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/x86/a.out.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/x86/a.out.go:1
-// Inferno utils/6c/6.out.h
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6c/6.out.h
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package x86
-
-import "bootstrap/cmd/internal/obj"
-
-//go:generate go run ../stringer.go -i $GOFILE -o anames.go -p x86
-
-const (
-	/* mark flags */
-	DONE          = 1 << iota
-	PRESERVEFLAGS // not allowed to clobber flags
-)
-
-/*
- *	amd64
- */
-const (
-	AAAA = obj.ABaseAMD64 + obj.A_ARCHSPECIFIC + iota
-	AAAD
-	AAAM
-	AAAS
-	AADCB
-	AADCL
-	AADCW
-	AADDB
-	AADDL
-	AADDW
-	AADJSP
-	AANDB
-	AANDL
-	AANDW
-	AARPL
-	ABOUNDL
-	ABOUNDW
-	ABSFL
-	ABSFW
-	ABSRL
-	ABSRW
-	ABTL
-	ABTW
-	ABTCL
-	ABTCW
-	ABTRL
-	ABTRW
-	ABTSL
-	ABTSW
-	ABYTE
-	ACLC
-	ACLD
-	ACLI
-	ACLTS
-	ACMC
-	ACMPB
-	ACMPL
-	ACMPW
-	ACMPSB
-	ACMPSL
-	ACMPSW
-	ADAA
-	ADAS
-	ADECB
-	ADECL
-	ADECQ
-	ADECW
-	ADIVB
-	ADIVL
-	ADIVW
-	AENTER
-	AHADDPD
-	AHADDPS
-	AHLT
-	AHSUBPD
-	AHSUBPS
-	AIDIVB
-	AIDIVL
-	AIDIVW
-	AIMULB
-	AIMULL
-	AIMULW
-	AINB
-	AINL
-	AINW
-	AINCB
-	AINCL
-	AINCQ
-	AINCW
-	AINSB
-	AINSL
-	AINSW
-	AINT
-	AINTO
-	AIRETL
-	AIRETW
-	AJCC // >= unsigned
-	AJCS // < unsigned
-	AJCXZL
-	AJEQ // == (zero)
-	AJGE // >= signed
-	AJGT // > signed
-	AJHI // > unsigned
-	AJLE // <= signed
-	AJLS // <= unsigned
-	AJLT // < signed
-	AJMI // sign bit set (negative)
-	AJNE // != (nonzero)
-	AJOC // overflow clear
-	AJOS // overflow set
-	AJPC // parity clear
-	AJPL // sign bit clear (positive)
-	AJPS // parity set
-	ALAHF
-	ALARL
-	ALARW
-	ALEAL
-	ALEAW
-	ALEAVEL
-	ALEAVEW
-	ALOCK
-	ALODSB
-	ALODSL
-	ALODSW
-	ALONG
-	ALOOP
-	ALOOPEQ
-	ALOOPNE
-	ALSLL
-	ALSLW
-	AMOVB
-	AMOVL
-	AMOVW
-	AMOVBLSX
-	AMOVBLZX
-	AMOVBQSX
-	AMOVBQZX
-	AMOVBWSX
-	AMOVBWZX
-	AMOVWLSX
-	AMOVWLZX
-	AMOVWQSX
-	AMOVWQZX
-	AMOVSB
-	AMOVSL
-	AMOVSW
-	AMULB
-	AMULL
-	AMULW
-	ANEGB
-	ANEGL
-	ANEGW
-	ANOTB
-	ANOTL
-	ANOTW
-	AORB
-	AORL
-	AORW
-	AOUTB
-	AOUTL
-	AOUTW
-	AOUTSB
-	AOUTSL
-	AOUTSW
-	APAUSE
-	APOPAL
-	APOPAW
-	APOPCNTW
-	APOPCNTL
-	APOPCNTQ
-	APOPFL
-	APOPFW
-	APOPL
-	APOPW
-	APUSHAL
-	APUSHAW
-	APUSHFL
-	APUSHFW
-	APUSHL
-	APUSHW
-	ARCLB
-	ARCLL
-	ARCLW
-	ARCRB
-	ARCRL
-	ARCRW
-	AREP
-	AREPN
-	AROLB
-	AROLL
-	AROLW
-	ARORB
-	ARORL
-	ARORW
-	ASAHF
-	ASALB
-	ASALL
-	ASALW
-	ASARB
-	ASARL
-	ASARW
-	ASBBB
-	ASBBL
-	ASBBW
-	ASCASB
-	ASCASL
-	ASCASW
-	ASETCC
-	ASETCS
-	ASETEQ
-	ASETGE
-	ASETGT
-	ASETHI
-	ASETLE
-	ASETLS
-	ASETLT
-	ASETMI
-	ASETNE
-	ASETOC
-	ASETOS
-	ASETPC
-	ASETPL
-	ASETPS
-	ACDQ
-	ACWD
-	ASHLB
-	ASHLL
-	ASHLW
-	ASHRB
-	ASHRL
-	ASHRW
-	ASTC
-	ASTD
-	ASTI
-	ASTOSB
-	ASTOSL
-	ASTOSW
-	ASUBB
-	ASUBL
-	ASUBW
-	ASYSCALL
-	ATESTB
-	ATESTL
-	ATESTW
-	AVERR
-	AVERW
-	AWAIT
-	AWORD
-	AXCHGB
-	AXCHGL
-	AXCHGW
-	AXLAT
-	AXORB
-	AXORL
-	AXORW
-
-	AFMOVB
-	AFMOVBP
-	AFMOVD
-	AFMOVDP
-	AFMOVF
-	AFMOVFP
-	AFMOVL
-	AFMOVLP
-	AFMOVV
-	AFMOVVP
-	AFMOVW
-	AFMOVWP
-	AFMOVX
-	AFMOVXP
-
-	AFCOMD
-	AFCOMDP
-	AFCOMDPP
-	AFCOMF
-	AFCOMFP
-	AFCOML
-	AFCOMLP
-	AFCOMW
-	AFCOMWP
-	AFUCOM
-	AFUCOMP
-	AFUCOMPP
-
-	AFADDDP
-	AFADDW
-	AFADDL
-	AFADDF
-	AFADDD
-
-	AFMULDP
-	AFMULW
-	AFMULL
-	AFMULF
-	AFMULD
-
-	AFSUBDP
-	AFSUBW
-	AFSUBL
-	AFSUBF
-	AFSUBD
-
-	AFSUBRDP
-	AFSUBRW
-	AFSUBRL
-	AFSUBRF
-	AFSUBRD
-
-	AFDIVDP
-	AFDIVW
-	AFDIVL
-	AFDIVF
-	AFDIVD
-
-	AFDIVRDP
-	AFDIVRW
-	AFDIVRL
-	AFDIVRF
-	AFDIVRD
-
-	AFXCHD
-	AFFREE
-
-	AFLDCW
-	AFLDENV
-	AFRSTOR
-	AFSAVE
-	AFSTCW
-	AFSTENV
-	AFSTSW
-
-	AF2XM1
-	AFABS
-	AFCHS
-	AFCLEX
-	AFCOS
-	AFDECSTP
-	AFINCSTP
-	AFINIT
-	AFLD1
-	AFLDL2E
-	AFLDL2T
-	AFLDLG2
-	AFLDLN2
-	AFLDPI
-	AFLDZ
-	AFNOP
-	AFPATAN
-	AFPREM
-	AFPREM1
-	AFPTAN
-	AFRNDINT
-	AFSCALE
-	AFSIN
-	AFSINCOS
-	AFSQRT
-	AFTST
-	AFXAM
-	AFXTRACT
-	AFYL2X
-	AFYL2XP1
-
-	// extra 32-bit operations
-	ACMPXCHGB
-	ACMPXCHGL
-	ACMPXCHGW
-	ACMPXCHG8B
-	ACPUID
-	AINVD
-	AINVLPG
-	ALFENCE
-	AMFENCE
-	AMOVNTIL
-	ARDMSR
-	ARDPMC
-	ARDTSC
-	ARSM
-	ASFENCE
-	ASYSRET
-	AWBINVD
-	AWRMSR
-	AXADDB
-	AXADDL
-	AXADDW
-
-	// conditional move
-	ACMOVLCC
-	ACMOVLCS
-	ACMOVLEQ
-	ACMOVLGE
-	ACMOVLGT
-	ACMOVLHI
-	ACMOVLLE
-	ACMOVLLS
-	ACMOVLLT
-	ACMOVLMI
-	ACMOVLNE
-	ACMOVLOC
-	ACMOVLOS
-	ACMOVLPC
-	ACMOVLPL
-	ACMOVLPS
-	ACMOVQCC
-	ACMOVQCS
-	ACMOVQEQ
-	ACMOVQGE
-	ACMOVQGT
-	ACMOVQHI
-	ACMOVQLE
-	ACMOVQLS
-	ACMOVQLT
-	ACMOVQMI
-	ACMOVQNE
-	ACMOVQOC
-	ACMOVQOS
-	ACMOVQPC
-	ACMOVQPL
-	ACMOVQPS
-	ACMOVWCC
-	ACMOVWCS
-	ACMOVWEQ
-	ACMOVWGE
-	ACMOVWGT
-	ACMOVWHI
-	ACMOVWLE
-	ACMOVWLS
-	ACMOVWLT
-	ACMOVWMI
-	ACMOVWNE
-	ACMOVWOC
-	ACMOVWOS
-	ACMOVWPC
-	ACMOVWPL
-	ACMOVWPS
-
-	// 64-bit
-	AADCQ
-	AADDQ
-	AANDQ
-	ABSFQ
-	ABSRQ
-	ABTCQ
-	ABTQ
-	ABTRQ
-	ABTSQ
-	ACMPQ
-	ACMPSQ
-	ACMPXCHGQ
-	ACQO
-	ADIVQ
-	AIDIVQ
-	AIMULQ
-	AIRETQ
-	AJCXZQ
-	ALEAQ
-	ALEAVEQ
-	ALODSQ
-	AMOVQ
-	AMOVLQSX
-	AMOVLQZX
-	AMOVNTIQ
-	AMOVSQ
-	AMULQ
-	ANEGQ
-	ANOTQ
-	AORQ
-	APOPFQ
-	APOPQ
-	APUSHFQ
-	APUSHQ
-	ARCLQ
-	ARCRQ
-	AROLQ
-	ARORQ
-	AQUAD
-	ASALQ
-	ASARQ
-	ASBBQ
-	ASCASQ
-	ASHLQ
-	ASHRQ
-	ASTOSQ
-	ASUBQ
-	ATESTQ
-	AXADDQ
-	AXCHGQ
-	AXORQ
-	AXGETBV
-
-	// media
-	AADDPD
-	AADDPS
-	AADDSD
-	AADDSS
-	AANDNL
-	AANDNQ
-	AANDNPD
-	AANDNPS
-	AANDPD
-	AANDPS
-	ABEXTRL
-	ABEXTRQ
-	ABLSIL
-	ABLSIQ
-	ABLSMSKL
-	ABLSMSKQ
-	ABLSRL
-	ABLSRQ
-	ABZHIL
-	ABZHIQ
-	ACMPPD
-	ACMPPS
-	ACMPSD
-	ACMPSS
-	ACOMISD
-	ACOMISS
-	ACVTPD2PL
-	ACVTPD2PS
-	ACVTPL2PD
-	ACVTPL2PS
-	ACVTPS2PD
-	ACVTPS2PL
-	ACVTSD2SL
-	ACVTSD2SQ
-	ACVTSD2SS
-	ACVTSL2SD
-	ACVTSL2SS
-	ACVTSQ2SD
-	ACVTSQ2SS
-	ACVTSS2SD
-	ACVTSS2SL
-	ACVTSS2SQ
-	ACVTTPD2PL
-	ACVTTPS2PL
-	ACVTTSD2SL
-	ACVTTSD2SQ
-	ACVTTSS2SL
-	ACVTTSS2SQ
-	ADIVPD
-	ADIVPS
-	ADIVSD
-	ADIVSS
-	AEMMS
-	AFXRSTOR
-	AFXRSTOR64
-	AFXSAVE
-	AFXSAVE64
-	ALDDQU
-	ALDMXCSR
-	AMASKMOVOU
-	AMASKMOVQ
-	AMAXPD
-	AMAXPS
-	AMAXSD
-	AMAXSS
-	AMINPD
-	AMINPS
-	AMINSD
-	AMINSS
-	AMOVAPD
-	AMOVAPS
-	AMOVOU
-	AMOVHLPS
-	AMOVHPD
-	AMOVHPS
-	AMOVLHPS
-	AMOVLPD
-	AMOVLPS
-	AMOVMSKPD
-	AMOVMSKPS
-	AMOVNTO
-	AMOVNTPD
-	AMOVNTPS
-	AMOVNTQ
-	AMOVO
-	AMOVQOZX
-	AMOVSD
-	AMOVSS
-	AMOVUPD
-	AMOVUPS
-	AMULPD
-	AMULPS
-	AMULSD
-	AMULSS
-	AMULXL
-	AMULXQ
-	AORPD
-	AORPS
-	APACKSSLW
-	APACKSSWB
-	APACKUSWB
-	APADDB
-	APADDL
-	APADDQ
-	APADDSB
-	APADDSW
-	APADDUSB
-	APADDUSW
-	APADDW
-	APAND
-	APANDN
-	APAVGB
-	APAVGW
-	APCMPEQB
-	APCMPEQL
-	APCMPEQW
-	APCMPGTB
-	APCMPGTL
-	APCMPGTW
-	APDEPL
-	APDEPQ
-	APEXTL
-	APEXTQ
-	APEXTRB
-	APEXTRD
-	APEXTRQ
-	APEXTRW
-	APHADDD
-	APHADDSW
-	APHADDW
-	APHMINPOSUW
-	APHSUBD
-	APHSUBSW
-	APHSUBW
-	APINSRB
-	APINSRD
-	APINSRQ
-	APINSRW
-	APMADDWL
-	APMAXSW
-	APMAXUB
-	APMINSW
-	APMINUB
-	APMOVMSKB
-	APMOVSXBD
-	APMOVSXBQ
-	APMOVSXBW
-	APMOVSXDQ
-	APMOVSXWD
-	APMOVSXWQ
-	APMOVZXBD
-	APMOVZXBQ
-	APMOVZXBW
-	APMOVZXDQ
-	APMOVZXWD
-	APMOVZXWQ
-	APMULDQ
-	APMULHUW
-	APMULHW
-	APMULLD
-	APMULLW
-	APMULULQ
-	APOR
-	APSADBW
-	APSHUFB
-	APSHUFHW
-	APSHUFL
-	APSHUFLW
-	APSHUFW
-	APSLLL
-	APSLLO
-	APSLLQ
-	APSLLW
-	APSRAL
-	APSRAW
-	APSRLL
-	APSRLO
-	APSRLQ
-	APSRLW
-	APSUBB
-	APSUBL
-	APSUBQ
-	APSUBSB
-	APSUBSW
-	APSUBUSB
-	APSUBUSW
-	APSUBW
-	APUNPCKHBW
-	APUNPCKHLQ
-	APUNPCKHQDQ
-	APUNPCKHWL
-	APUNPCKLBW
-	APUNPCKLLQ
-	APUNPCKLQDQ
-	APUNPCKLWL
-	APXOR
-	ARCPPS
-	ARCPSS
-	ARSQRTPS
-	ARSQRTSS
-	ASARXL
-	ASARXQ
-	ASHLXL
-	ASHLXQ
-	ASHRXL
-	ASHRXQ
-	ASHUFPD
-	ASHUFPS
-	ASQRTPD
-	ASQRTPS
-	ASQRTSD
-	ASQRTSS
-	ASTMXCSR
-	ASUBPD
-	ASUBPS
-	ASUBSD
-	ASUBSS
-	AUCOMISD
-	AUCOMISS
-	AUNPCKHPD
-	AUNPCKHPS
-	AUNPCKLPD
-	AUNPCKLPS
-	AXORPD
-	AXORPS
-	APCMPESTRI
-
-	ARETFW
-	ARETFL
-	ARETFQ
-	ASWAPGS
-
-	AMODE
-	ACRC32B
-	ACRC32Q
-	AIMUL3Q
-
-	APREFETCHT0
-	APREFETCHT1
-	APREFETCHT2
-	APREFETCHNTA
-
-	AMOVQL
-	ABSWAPL
-	ABSWAPQ
-
-	AAESENC
-	AAESENCLAST
-	AAESDEC
-	AAESDECLAST
-	AAESIMC
-	AAESKEYGENASSIST
-
-	AROUNDPS
-	AROUNDSS
-	AROUNDPD
-	AROUNDSD
-	AMOVDDUP
-	AMOVSHDUP
-	AMOVSLDUP
-
-	APSHUFD
-	APCLMULQDQ
-
-	AVZEROUPPER
-	AVMOVDQU
-	AVMOVNTDQ
-	AVMOVDQA
-	AVPCMPEQB
-	AVPXOR
-	AVPMOVMSKB
-	AVPAND
-	AVPTEST
-	AVPBROADCASTB
-	AVPSHUFB
-	AVPSHUFD
-	AVPERM2F128
-	AVPALIGNR
-	AVPADDQ
-	AVPADDD
-	AVPSRLDQ
-	AVPSLLDQ
-	AVPSRLQ
-	AVPSLLQ
-	AVPSRLD
-	AVPSLLD
-	AVPOR
-	AVPBLENDD
-	AVINSERTI128
-	AVPERM2I128
-	ARORXL
-	ARORXQ
-	AVBROADCASTSS
-	AVBROADCASTSD
-	AVMOVDDUP
-	AVMOVSHDUP
-	AVMOVSLDUP
-
-	// from 386
-	AJCXZW
-	AFCMOVCC
-	AFCMOVCS
-	AFCMOVEQ
-	AFCMOVHI
-	AFCMOVLS
-	AFCMOVNE
-	AFCMOVNU
-	AFCMOVUN
-	AFCOMI
-	AFCOMIP
-	AFUCOMI
-	AFUCOMIP
-
-	// TSX
-	AXACQUIRE
-	AXRELEASE
-	AXBEGIN
-	AXEND
-	AXABORT
-	AXTEST
-
-	ALAST
-)
-
-const (
-	REG_NONE = 0
-)
-
-const (
-	REG_AL = obj.RBaseAMD64 + iota
-	REG_CL
-	REG_DL
-	REG_BL
-	REG_SPB
-	REG_BPB
-	REG_SIB
-	REG_DIB
-	REG_R8B
-	REG_R9B
-	REG_R10B
-	REG_R11B
-	REG_R12B
-	REG_R13B
-	REG_R14B
-	REG_R15B
-
-	REG_AX
-	REG_CX
-	REG_DX
-	REG_BX
-	REG_SP
-	REG_BP
-	REG_SI
-	REG_DI
-	REG_R8
-	REG_R9
-	REG_R10
-	REG_R11
-	REG_R12
-	REG_R13
-	REG_R14
-	REG_R15
-
-	REG_AH
-	REG_CH
-	REG_DH
-	REG_BH
-
-	REG_F0
-	REG_F1
-	REG_F2
-	REG_F3
-	REG_F4
-	REG_F5
-	REG_F6
-	REG_F7
-
-	REG_M0
-	REG_M1
-	REG_M2
-	REG_M3
-	REG_M4
-	REG_M5
-	REG_M6
-	REG_M7
-
-	REG_X0
-	REG_X1
-	REG_X2
-	REG_X3
-	REG_X4
-	REG_X5
-	REG_X6
-	REG_X7
-	REG_X8
-	REG_X9
-	REG_X10
-	REG_X11
-	REG_X12
-	REG_X13
-	REG_X14
-	REG_X15
-
-	REG_Y0
-	REG_Y1
-	REG_Y2
-	REG_Y3
-	REG_Y4
-	REG_Y5
-	REG_Y6
-	REG_Y7
-	REG_Y8
-	REG_Y9
-	REG_Y10
-	REG_Y11
-	REG_Y12
-	REG_Y13
-	REG_Y14
-	REG_Y15
-
-	REG_CS
-	REG_SS
-	REG_DS
-	REG_ES
-	REG_FS
-	REG_GS
-
-	REG_GDTR /* global descriptor table register */
-	REG_IDTR /* interrupt descriptor table register */
-	REG_LDTR /* local descriptor table register */
-	REG_MSW  /* machine status word */
-	REG_TASK /* task register */
-
-	REG_CR0
-	REG_CR1
-	REG_CR2
-	REG_CR3
-	REG_CR4
-	REG_CR5
-	REG_CR6
-	REG_CR7
-	REG_CR8
-	REG_CR9
-	REG_CR10
-	REG_CR11
-	REG_CR12
-	REG_CR13
-	REG_CR14
-	REG_CR15
-
-	REG_DR0
-	REG_DR1
-	REG_DR2
-	REG_DR3
-	REG_DR4
-	REG_DR5
-	REG_DR6
-	REG_DR7
-
-	REG_TR0
-	REG_TR1
-	REG_TR2
-	REG_TR3
-	REG_TR4
-	REG_TR5
-	REG_TR6
-	REG_TR7
-
-	REG_TLS
-
-	MAXREG
-
-	REG_CR = REG_CR0
-	REG_DR = REG_DR0
-	REG_TR = REG_TR0
-
-	REGARG   = -1
-	REGRET   = REG_AX
-	FREGRET  = REG_X0
-	REGSP    = REG_SP
-	REGCTXT  = REG_DX
-	REGEXT   = REG_R15     /* compiler allocates external registers R15 down */
-	FREGMIN  = REG_X0 + 5  /* first register variable */
-	FREGEXT  = REG_X0 + 15 /* first external register */
-	T_TYPE   = 1 << 0
-	T_INDEX  = 1 << 1
-	T_OFFSET = 1 << 2
-	T_FCONST = 1 << 3
-	T_SYM    = 1 << 4
-	T_SCONST = 1 << 5
-	T_64     = 1 << 6
-	T_GOTYPE = 1 << 7
-)
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/x86/anames.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/x86/anames.go
deleted file mode 100644
index dfafed3..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/x86/anames.go
+++ /dev/null
@@ -1,772 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/x86/anames.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/x86/anames.go:1
-// Generated by stringer -i a.out.go -o anames.go -p x86
-// Do not edit.
-
-package x86
-
-import "bootstrap/cmd/internal/obj"
-
-var Anames = []string{
-	obj.A_ARCHSPECIFIC: "AAA",
-	"AAD",
-	"AAM",
-	"AAS",
-	"ADCB",
-	"ADCL",
-	"ADCW",
-	"ADDB",
-	"ADDL",
-	"ADDW",
-	"ADJSP",
-	"ANDB",
-	"ANDL",
-	"ANDW",
-	"ARPL",
-	"BOUNDL",
-	"BOUNDW",
-	"BSFL",
-	"BSFW",
-	"BSRL",
-	"BSRW",
-	"BTL",
-	"BTW",
-	"BTCL",
-	"BTCW",
-	"BTRL",
-	"BTRW",
-	"BTSL",
-	"BTSW",
-	"BYTE",
-	"CLC",
-	"CLD",
-	"CLI",
-	"CLTS",
-	"CMC",
-	"CMPB",
-	"CMPL",
-	"CMPW",
-	"CMPSB",
-	"CMPSL",
-	"CMPSW",
-	"DAA",
-	"DAS",
-	"DECB",
-	"DECL",
-	"DECQ",
-	"DECW",
-	"DIVB",
-	"DIVL",
-	"DIVW",
-	"ENTER",
-	"HADDPD",
-	"HADDPS",
-	"HLT",
-	"HSUBPD",
-	"HSUBPS",
-	"IDIVB",
-	"IDIVL",
-	"IDIVW",
-	"IMULB",
-	"IMULL",
-	"IMULW",
-	"INB",
-	"INL",
-	"INW",
-	"INCB",
-	"INCL",
-	"INCQ",
-	"INCW",
-	"INSB",
-	"INSL",
-	"INSW",
-	"INT",
-	"INTO",
-	"IRETL",
-	"IRETW",
-	"JCC",
-	"JCS",
-	"JCXZL",
-	"JEQ",
-	"JGE",
-	"JGT",
-	"JHI",
-	"JLE",
-	"JLS",
-	"JLT",
-	"JMI",
-	"JNE",
-	"JOC",
-	"JOS",
-	"JPC",
-	"JPL",
-	"JPS",
-	"LAHF",
-	"LARL",
-	"LARW",
-	"LEAL",
-	"LEAW",
-	"LEAVEL",
-	"LEAVEW",
-	"LOCK",
-	"LODSB",
-	"LODSL",
-	"LODSW",
-	"LONG",
-	"LOOP",
-	"LOOPEQ",
-	"LOOPNE",
-	"LSLL",
-	"LSLW",
-	"MOVB",
-	"MOVL",
-	"MOVW",
-	"MOVBLSX",
-	"MOVBLZX",
-	"MOVBQSX",
-	"MOVBQZX",
-	"MOVBWSX",
-	"MOVBWZX",
-	"MOVWLSX",
-	"MOVWLZX",
-	"MOVWQSX",
-	"MOVWQZX",
-	"MOVSB",
-	"MOVSL",
-	"MOVSW",
-	"MULB",
-	"MULL",
-	"MULW",
-	"NEGB",
-	"NEGL",
-	"NEGW",
-	"NOTB",
-	"NOTL",
-	"NOTW",
-	"ORB",
-	"ORL",
-	"ORW",
-	"OUTB",
-	"OUTL",
-	"OUTW",
-	"OUTSB",
-	"OUTSL",
-	"OUTSW",
-	"PAUSE",
-	"POPAL",
-	"POPAW",
-	"POPCNTW",
-	"POPCNTL",
-	"POPCNTQ",
-	"POPFL",
-	"POPFW",
-	"POPL",
-	"POPW",
-	"PUSHAL",
-	"PUSHAW",
-	"PUSHFL",
-	"PUSHFW",
-	"PUSHL",
-	"PUSHW",
-	"RCLB",
-	"RCLL",
-	"RCLW",
-	"RCRB",
-	"RCRL",
-	"RCRW",
-	"REP",
-	"REPN",
-	"ROLB",
-	"ROLL",
-	"ROLW",
-	"RORB",
-	"RORL",
-	"RORW",
-	"SAHF",
-	"SALB",
-	"SALL",
-	"SALW",
-	"SARB",
-	"SARL",
-	"SARW",
-	"SBBB",
-	"SBBL",
-	"SBBW",
-	"SCASB",
-	"SCASL",
-	"SCASW",
-	"SETCC",
-	"SETCS",
-	"SETEQ",
-	"SETGE",
-	"SETGT",
-	"SETHI",
-	"SETLE",
-	"SETLS",
-	"SETLT",
-	"SETMI",
-	"SETNE",
-	"SETOC",
-	"SETOS",
-	"SETPC",
-	"SETPL",
-	"SETPS",
-	"CDQ",
-	"CWD",
-	"SHLB",
-	"SHLL",
-	"SHLW",
-	"SHRB",
-	"SHRL",
-	"SHRW",
-	"STC",
-	"STD",
-	"STI",
-	"STOSB",
-	"STOSL",
-	"STOSW",
-	"SUBB",
-	"SUBL",
-	"SUBW",
-	"SYSCALL",
-	"TESTB",
-	"TESTL",
-	"TESTW",
-	"VERR",
-	"VERW",
-	"WAIT",
-	"WORD",
-	"XCHGB",
-	"XCHGL",
-	"XCHGW",
-	"XLAT",
-	"XORB",
-	"XORL",
-	"XORW",
-	"FMOVB",
-	"FMOVBP",
-	"FMOVD",
-	"FMOVDP",
-	"FMOVF",
-	"FMOVFP",
-	"FMOVL",
-	"FMOVLP",
-	"FMOVV",
-	"FMOVVP",
-	"FMOVW",
-	"FMOVWP",
-	"FMOVX",
-	"FMOVXP",
-	"FCOMD",
-	"FCOMDP",
-	"FCOMDPP",
-	"FCOMF",
-	"FCOMFP",
-	"FCOML",
-	"FCOMLP",
-	"FCOMW",
-	"FCOMWP",
-	"FUCOM",
-	"FUCOMP",
-	"FUCOMPP",
-	"FADDDP",
-	"FADDW",
-	"FADDL",
-	"FADDF",
-	"FADDD",
-	"FMULDP",
-	"FMULW",
-	"FMULL",
-	"FMULF",
-	"FMULD",
-	"FSUBDP",
-	"FSUBW",
-	"FSUBL",
-	"FSUBF",
-	"FSUBD",
-	"FSUBRDP",
-	"FSUBRW",
-	"FSUBRL",
-	"FSUBRF",
-	"FSUBRD",
-	"FDIVDP",
-	"FDIVW",
-	"FDIVL",
-	"FDIVF",
-	"FDIVD",
-	"FDIVRDP",
-	"FDIVRW",
-	"FDIVRL",
-	"FDIVRF",
-	"FDIVRD",
-	"FXCHD",
-	"FFREE",
-	"FLDCW",
-	"FLDENV",
-	"FRSTOR",
-	"FSAVE",
-	"FSTCW",
-	"FSTENV",
-	"FSTSW",
-	"F2XM1",
-	"FABS",
-	"FCHS",
-	"FCLEX",
-	"FCOS",
-	"FDECSTP",
-	"FINCSTP",
-	"FINIT",
-	"FLD1",
-	"FLDL2E",
-	"FLDL2T",
-	"FLDLG2",
-	"FLDLN2",
-	"FLDPI",
-	"FLDZ",
-	"FNOP",
-	"FPATAN",
-	"FPREM",
-	"FPREM1",
-	"FPTAN",
-	"FRNDINT",
-	"FSCALE",
-	"FSIN",
-	"FSINCOS",
-	"FSQRT",
-	"FTST",
-	"FXAM",
-	"FXTRACT",
-	"FYL2X",
-	"FYL2XP1",
-	"CMPXCHGB",
-	"CMPXCHGL",
-	"CMPXCHGW",
-	"CMPXCHG8B",
-	"CPUID",
-	"INVD",
-	"INVLPG",
-	"LFENCE",
-	"MFENCE",
-	"MOVNTIL",
-	"RDMSR",
-	"RDPMC",
-	"RDTSC",
-	"RSM",
-	"SFENCE",
-	"SYSRET",
-	"WBINVD",
-	"WRMSR",
-	"XADDB",
-	"XADDL",
-	"XADDW",
-	"CMOVLCC",
-	"CMOVLCS",
-	"CMOVLEQ",
-	"CMOVLGE",
-	"CMOVLGT",
-	"CMOVLHI",
-	"CMOVLLE",
-	"CMOVLLS",
-	"CMOVLLT",
-	"CMOVLMI",
-	"CMOVLNE",
-	"CMOVLOC",
-	"CMOVLOS",
-	"CMOVLPC",
-	"CMOVLPL",
-	"CMOVLPS",
-	"CMOVQCC",
-	"CMOVQCS",
-	"CMOVQEQ",
-	"CMOVQGE",
-	"CMOVQGT",
-	"CMOVQHI",
-	"CMOVQLE",
-	"CMOVQLS",
-	"CMOVQLT",
-	"CMOVQMI",
-	"CMOVQNE",
-	"CMOVQOC",
-	"CMOVQOS",
-	"CMOVQPC",
-	"CMOVQPL",
-	"CMOVQPS",
-	"CMOVWCC",
-	"CMOVWCS",
-	"CMOVWEQ",
-	"CMOVWGE",
-	"CMOVWGT",
-	"CMOVWHI",
-	"CMOVWLE",
-	"CMOVWLS",
-	"CMOVWLT",
-	"CMOVWMI",
-	"CMOVWNE",
-	"CMOVWOC",
-	"CMOVWOS",
-	"CMOVWPC",
-	"CMOVWPL",
-	"CMOVWPS",
-	"ADCQ",
-	"ADDQ",
-	"ANDQ",
-	"BSFQ",
-	"BSRQ",
-	"BTCQ",
-	"BTQ",
-	"BTRQ",
-	"BTSQ",
-	"CMPQ",
-	"CMPSQ",
-	"CMPXCHGQ",
-	"CQO",
-	"DIVQ",
-	"IDIVQ",
-	"IMULQ",
-	"IRETQ",
-	"JCXZQ",
-	"LEAQ",
-	"LEAVEQ",
-	"LODSQ",
-	"MOVQ",
-	"MOVLQSX",
-	"MOVLQZX",
-	"MOVNTIQ",
-	"MOVSQ",
-	"MULQ",
-	"NEGQ",
-	"NOTQ",
-	"ORQ",
-	"POPFQ",
-	"POPQ",
-	"PUSHFQ",
-	"PUSHQ",
-	"RCLQ",
-	"RCRQ",
-	"ROLQ",
-	"RORQ",
-	"QUAD",
-	"SALQ",
-	"SARQ",
-	"SBBQ",
-	"SCASQ",
-	"SHLQ",
-	"SHRQ",
-	"STOSQ",
-	"SUBQ",
-	"TESTQ",
-	"XADDQ",
-	"XCHGQ",
-	"XORQ",
-	"XGETBV",
-	"ADDPD",
-	"ADDPS",
-	"ADDSD",
-	"ADDSS",
-	"ANDNL",
-	"ANDNQ",
-	"ANDNPD",
-	"ANDNPS",
-	"ANDPD",
-	"ANDPS",
-	"BEXTRL",
-	"BEXTRQ",
-	"BLSIL",
-	"BLSIQ",
-	"BLSMSKL",
-	"BLSMSKQ",
-	"BLSRL",
-	"BLSRQ",
-	"BZHIL",
-	"BZHIQ",
-	"CMPPD",
-	"CMPPS",
-	"CMPSD",
-	"CMPSS",
-	"COMISD",
-	"COMISS",
-	"CVTPD2PL",
-	"CVTPD2PS",
-	"CVTPL2PD",
-	"CVTPL2PS",
-	"CVTPS2PD",
-	"CVTPS2PL",
-	"CVTSD2SL",
-	"CVTSD2SQ",
-	"CVTSD2SS",
-	"CVTSL2SD",
-	"CVTSL2SS",
-	"CVTSQ2SD",
-	"CVTSQ2SS",
-	"CVTSS2SD",
-	"CVTSS2SL",
-	"CVTSS2SQ",
-	"CVTTPD2PL",
-	"CVTTPS2PL",
-	"CVTTSD2SL",
-	"CVTTSD2SQ",
-	"CVTTSS2SL",
-	"CVTTSS2SQ",
-	"DIVPD",
-	"DIVPS",
-	"DIVSD",
-	"DIVSS",
-	"EMMS",
-	"FXRSTOR",
-	"FXRSTOR64",
-	"FXSAVE",
-	"FXSAVE64",
-	"LDDQU",
-	"LDMXCSR",
-	"MASKMOVOU",
-	"MASKMOVQ",
-	"MAXPD",
-	"MAXPS",
-	"MAXSD",
-	"MAXSS",
-	"MINPD",
-	"MINPS",
-	"MINSD",
-	"MINSS",
-	"MOVAPD",
-	"MOVAPS",
-	"MOVOU",
-	"MOVHLPS",
-	"MOVHPD",
-	"MOVHPS",
-	"MOVLHPS",
-	"MOVLPD",
-	"MOVLPS",
-	"MOVMSKPD",
-	"MOVMSKPS",
-	"MOVNTO",
-	"MOVNTPD",
-	"MOVNTPS",
-	"MOVNTQ",
-	"MOVO",
-	"MOVQOZX",
-	"MOVSD",
-	"MOVSS",
-	"MOVUPD",
-	"MOVUPS",
-	"MULPD",
-	"MULPS",
-	"MULSD",
-	"MULSS",
-	"MULXL",
-	"MULXQ",
-	"ORPD",
-	"ORPS",
-	"PACKSSLW",
-	"PACKSSWB",
-	"PACKUSWB",
-	"PADDB",
-	"PADDL",
-	"PADDQ",
-	"PADDSB",
-	"PADDSW",
-	"PADDUSB",
-	"PADDUSW",
-	"PADDW",
-	"PAND",
-	"PANDN",
-	"PAVGB",
-	"PAVGW",
-	"PCMPEQB",
-	"PCMPEQL",
-	"PCMPEQW",
-	"PCMPGTB",
-	"PCMPGTL",
-	"PCMPGTW",
-	"PDEPL",
-	"PDEPQ",
-	"PEXTL",
-	"PEXTQ",
-	"PEXTRB",
-	"PEXTRD",
-	"PEXTRQ",
-	"PEXTRW",
-	"PHADDD",
-	"PHADDSW",
-	"PHADDW",
-	"PHMINPOSUW",
-	"PHSUBD",
-	"PHSUBSW",
-	"PHSUBW",
-	"PINSRB",
-	"PINSRD",
-	"PINSRQ",
-	"PINSRW",
-	"PMADDWL",
-	"PMAXSW",
-	"PMAXUB",
-	"PMINSW",
-	"PMINUB",
-	"PMOVMSKB",
-	"PMOVSXBD",
-	"PMOVSXBQ",
-	"PMOVSXBW",
-	"PMOVSXDQ",
-	"PMOVSXWD",
-	"PMOVSXWQ",
-	"PMOVZXBD",
-	"PMOVZXBQ",
-	"PMOVZXBW",
-	"PMOVZXDQ",
-	"PMOVZXWD",
-	"PMOVZXWQ",
-	"PMULDQ",
-	"PMULHUW",
-	"PMULHW",
-	"PMULLD",
-	"PMULLW",
-	"PMULULQ",
-	"POR",
-	"PSADBW",
-	"PSHUFB",
-	"PSHUFHW",
-	"PSHUFL",
-	"PSHUFLW",
-	"PSHUFW",
-	"PSLLL",
-	"PSLLO",
-	"PSLLQ",
-	"PSLLW",
-	"PSRAL",
-	"PSRAW",
-	"PSRLL",
-	"PSRLO",
-	"PSRLQ",
-	"PSRLW",
-	"PSUBB",
-	"PSUBL",
-	"PSUBQ",
-	"PSUBSB",
-	"PSUBSW",
-	"PSUBUSB",
-	"PSUBUSW",
-	"PSUBW",
-	"PUNPCKHBW",
-	"PUNPCKHLQ",
-	"PUNPCKHQDQ",
-	"PUNPCKHWL",
-	"PUNPCKLBW",
-	"PUNPCKLLQ",
-	"PUNPCKLQDQ",
-	"PUNPCKLWL",
-	"PXOR",
-	"RCPPS",
-	"RCPSS",
-	"RSQRTPS",
-	"RSQRTSS",
-	"SARXL",
-	"SARXQ",
-	"SHLXL",
-	"SHLXQ",
-	"SHRXL",
-	"SHRXQ",
-	"SHUFPD",
-	"SHUFPS",
-	"SQRTPD",
-	"SQRTPS",
-	"SQRTSD",
-	"SQRTSS",
-	"STMXCSR",
-	"SUBPD",
-	"SUBPS",
-	"SUBSD",
-	"SUBSS",
-	"UCOMISD",
-	"UCOMISS",
-	"UNPCKHPD",
-	"UNPCKHPS",
-	"UNPCKLPD",
-	"UNPCKLPS",
-	"XORPD",
-	"XORPS",
-	"PCMPESTRI",
-	"RETFW",
-	"RETFL",
-	"RETFQ",
-	"SWAPGS",
-	"MODE",
-	"CRC32B",
-	"CRC32Q",
-	"IMUL3Q",
-	"PREFETCHT0",
-	"PREFETCHT1",
-	"PREFETCHT2",
-	"PREFETCHNTA",
-	"MOVQL",
-	"BSWAPL",
-	"BSWAPQ",
-	"AESENC",
-	"AESENCLAST",
-	"AESDEC",
-	"AESDECLAST",
-	"AESIMC",
-	"AESKEYGENASSIST",
-	"ROUNDPS",
-	"ROUNDSS",
-	"ROUNDPD",
-	"ROUNDSD",
-	"MOVDDUP",
-	"MOVSHDUP",
-	"MOVSLDUP",
-	"PSHUFD",
-	"PCLMULQDQ",
-	"VZEROUPPER",
-	"VMOVDQU",
-	"VMOVNTDQ",
-	"VMOVDQA",
-	"VPCMPEQB",
-	"VPXOR",
-	"VPMOVMSKB",
-	"VPAND",
-	"VPTEST",
-	"VPBROADCASTB",
-	"VPSHUFB",
-	"VPSHUFD",
-	"VPERM2F128",
-	"VPALIGNR",
-	"VPADDQ",
-	"VPADDD",
-	"VPSRLDQ",
-	"VPSLLDQ",
-	"VPSRLQ",
-	"VPSLLQ",
-	"VPSRLD",
-	"VPSLLD",
-	"VPOR",
-	"VPBLENDD",
-	"VINSERTI128",
-	"VPERM2I128",
-	"RORXL",
-	"RORXQ",
-	"VBROADCASTSS",
-	"VBROADCASTSD",
-	"VMOVDDUP",
-	"VMOVSHDUP",
-	"VMOVSLDUP",
-	"JCXZW",
-	"FCMOVCC",
-	"FCMOVCS",
-	"FCMOVEQ",
-	"FCMOVHI",
-	"FCMOVLS",
-	"FCMOVNE",
-	"FCMOVNU",
-	"FCMOVUN",
-	"FCOMI",
-	"FCOMIP",
-	"FUCOMI",
-	"FUCOMIP",
-	"XACQUIRE",
-	"XRELEASE",
-	"XBEGIN",
-	"XEND",
-	"XABORT",
-	"XTEST",
-	"LAST",
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/x86/asm6.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/x86/asm6.go
deleted file mode 100644
index 2688aa2..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/x86/asm6.go
+++ /dev/null
@@ -1,4538 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/x86/asm6.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/x86/asm6.go:1
-// Inferno utils/6l/span.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6l/span.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package x86
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"encoding/binary"
-	"fmt"
-	"log"
-	"strings"
-)
-
-// Instruction layout.
-
-const (
-	// Loop alignment constants:
-	// want to align loop entry to LoopAlign-byte boundary,
-	// and willing to insert at most MaxLoopPad bytes of NOP to do so.
-	// We define a loop entry as the target of a backward jump.
-	//
-	// gcc uses MaxLoopPad = 10 for its 'generic x86-64' config,
-	// and it aligns all jump targets, not just backward jump targets.
-	//
-	// As of 6/1/2012, the effect of setting MaxLoopPad = 10 here
-	// is very slight but negative, so the alignment is disabled by
-	// setting MaxLoopPad = 0. The code is here for reference and
-	// for future experiments.
-	//
-	LoopAlign  = 16
-	MaxLoopPad = 0
-	funcAlign  = 16
-)
-
-type Optab struct {
-	as     obj.As
-	ytab   []ytab
-	prefix uint8
-	op     [23]uint8
-}
-
-type ytab struct {
-	from    uint8
-	from3   uint8
-	to      uint8
-	zcase   uint8
-	zoffset uint8
-}
-
-type Movtab struct {
-	as   obj.As
-	ft   uint8
-	f3t  uint8
-	tt   uint8
-	code uint8
-	op   [4]uint8
-}
-
-const (
-	Yxxx = iota
-	Ynone
-	Yi0 // $0
-	Yi1 // $1
-	Yi8 // $x, x fits in int8
-	Yu8 // $x, x fits in uint8
-	Yu7 // $x, x in 0..127 (fits in both int8 and uint8)
-	Ys32
-	Yi32
-	Yi64
-	Yiauto
-	Yal
-	Ycl
-	Yax
-	Ycx
-	Yrb
-	Yrl
-	Yrl32 // Yrl on 32-bit system
-	Yrf
-	Yf0
-	Yrx
-	Ymb
-	Yml
-	Ym
-	Ybr
-	Ycs
-	Yss
-	Yds
-	Yes
-	Yfs
-	Ygs
-	Ygdtr
-	Yidtr
-	Yldtr
-	Ymsw
-	Ytask
-	Ycr0
-	Ycr1
-	Ycr2
-	Ycr3
-	Ycr4
-	Ycr5
-	Ycr6
-	Ycr7
-	Ycr8
-	Ydr0
-	Ydr1
-	Ydr2
-	Ydr3
-	Ydr4
-	Ydr5
-	Ydr6
-	Ydr7
-	Ytr0
-	Ytr1
-	Ytr2
-	Ytr3
-	Ytr4
-	Ytr5
-	Ytr6
-	Ytr7
-	Ymr
-	Ymm
-	Yxr
-	Yxm
-	Yyr
-	Yym
-	Ytls
-	Ytextsize
-	Yindir
-	Ymax
-)
-
-const (
-	Zxxx = iota
-	Zlit
-	Zlitm_r
-	Z_rp
-	Zbr
-	Zcall
-	Zcallcon
-	Zcallduff
-	Zcallind
-	Zcallindreg
-	Zib_
-	Zib_rp
-	Zibo_m
-	Zibo_m_xm
-	Zil_
-	Zil_rp
-	Ziq_rp
-	Zilo_m
-	Zjmp
-	Zjmpcon
-	Zloop
-	Zo_iw
-	Zm_o
-	Zm_r
-	Zm2_r
-	Zm_r_xm
-	Zm_r_i_xm
-	Zm_r_xm_nr
-	Zr_m_xm_nr
-	Zibm_r /* mmx1,mmx2/mem64,imm8 */
-	Zibr_m
-	Zmb_r
-	Zaut_r
-	Zo_m
-	Zo_m64
-	Zpseudo
-	Zr_m
-	Zr_m_xm
-	Zrp_
-	Z_ib
-	Z_il
-	Zm_ibo
-	Zm_ilo
-	Zib_rr
-	Zil_rr
-	Zclr
-	Zbyte
-	Zvex_rm_v_r
-	Zvex_r_v_rm
-	Zvex_v_rm_r
-	Zvex_i_rm_r
-	Zvex_i_r_v
-	Zvex_i_rm_v_r
-	Zmax
-)
-
-const (
-	Px   = 0
-	Px1  = 1    // symbolic; exact value doesn't matter
-	P32  = 0x32 /* 32-bit only */
-	Pe   = 0x66 /* operand escape */
-	Pm   = 0x0f /* 2byte opcode escape */
-	Pq   = 0xff /* both escapes: 66 0f */
-	Pb   = 0xfe /* byte operands */
-	Pf2  = 0xf2 /* xmm escape 1: f2 0f */
-	Pf3  = 0xf3 /* xmm escape 2: f3 0f */
-	Pef3 = 0xf5 /* xmm escape 2 with 16-bit prefix: 66 f3 0f */
-	Pq3  = 0x67 /* xmm escape 3: 66 48 0f */
-	Pq4  = 0x68 /* xmm escape 4: 66 0F 38 */
-	Pfw  = 0xf4 /* Pf3 with Rex.w: f3 48 0f */
-	Pw   = 0x48 /* Rex.w */
-	Pw8  = 0x90 // symbolic; exact value doesn't matter
-	Py   = 0x80 /* defaults to 64-bit mode */
-	Py1  = 0x81 // symbolic; exact value doesn't matter
-	Py3  = 0x83 // symbolic; exact value doesn't matter
-	Pvex = 0x84 // symbolic: exact value doesn't matter
-
-	Rxw = 1 << 3 /* =1, 64-bit operand size */
-	Rxr = 1 << 2 /* extend modrm reg */
-	Rxx = 1 << 1 /* extend sib index */
-	Rxb = 1 << 0 /* extend modrm r/m, sib base, or opcode reg */
-)
-
-const (
-	// Encoding for VEX prefix in tables.
-	// The P, L, and W fields are chosen to match
-	// their eventual locations in the VEX prefix bytes.
-
-	// P field - 2 bits
-	vex66 = 1 << 0
-	vexF3 = 2 << 0
-	vexF2 = 3 << 0
-	// L field - 1 bit
-	vexLZ  = 0 << 2
-	vexLIG = 0 << 2
-	vex128 = 0 << 2
-	vex256 = 1 << 2
-	// W field - 1 bit
-	vexWIG = 0 << 7
-	vexW0  = 0 << 7
-	vexW1  = 1 << 7
-	// M field - 5 bits, but mostly reserved; we can store up to 4
-	vex0F   = 1 << 3
-	vex0F38 = 2 << 3
-	vex0F3A = 3 << 3
-
-	// Combinations used in the manual.
-	VEX_128_0F_WIG      = vex128 | vex0F | vexWIG
-	VEX_128_66_0F_W0    = vex128 | vex66 | vex0F | vexW0
-	VEX_128_66_0F_W1    = vex128 | vex66 | vex0F | vexW1
-	VEX_128_66_0F_WIG   = vex128 | vex66 | vex0F | vexWIG
-	VEX_128_66_0F38_W0  = vex128 | vex66 | vex0F38 | vexW0
-	VEX_128_66_0F38_W1  = vex128 | vex66 | vex0F38 | vexW1
-	VEX_128_66_0F38_WIG = vex128 | vex66 | vex0F38 | vexWIG
-	VEX_128_66_0F3A_W0  = vex128 | vex66 | vex0F3A | vexW0
-	VEX_128_66_0F3A_W1  = vex128 | vex66 | vex0F3A | vexW1
-	VEX_128_66_0F3A_WIG = vex128 | vex66 | vex0F3A | vexWIG
-	VEX_128_F2_0F_WIG   = vex128 | vexF2 | vex0F | vexWIG
-	VEX_128_F3_0F_WIG   = vex128 | vexF3 | vex0F | vexWIG
-	VEX_256_66_0F_WIG   = vex256 | vex66 | vex0F | vexWIG
-	VEX_256_66_0F38_W0  = vex256 | vex66 | vex0F38 | vexW0
-	VEX_256_66_0F38_W1  = vex256 | vex66 | vex0F38 | vexW1
-	VEX_256_66_0F38_WIG = vex256 | vex66 | vex0F38 | vexWIG
-	VEX_256_66_0F3A_W0  = vex256 | vex66 | vex0F3A | vexW0
-	VEX_256_66_0F3A_W1  = vex256 | vex66 | vex0F3A | vexW1
-	VEX_256_66_0F3A_WIG = vex256 | vex66 | vex0F3A | vexWIG
-	VEX_256_F2_0F_WIG   = vex256 | vexF2 | vex0F | vexWIG
-	VEX_256_F3_0F_WIG   = vex256 | vexF3 | vex0F | vexWIG
-	VEX_LIG_0F_WIG      = vexLIG | vex0F | vexWIG
-	VEX_LIG_66_0F_WIG   = vexLIG | vex66 | vex0F | vexWIG
-	VEX_LIG_66_0F38_W0  = vexLIG | vex66 | vex0F38 | vexW0
-	VEX_LIG_66_0F38_W1  = vexLIG | vex66 | vex0F38 | vexW1
-	VEX_LIG_66_0F3A_WIG = vexLIG | vex66 | vex0F3A | vexWIG
-	VEX_LIG_F2_0F_W0    = vexLIG | vexF2 | vex0F | vexW0
-	VEX_LIG_F2_0F_W1    = vexLIG | vexF2 | vex0F | vexW1
-	VEX_LIG_F2_0F_WIG   = vexLIG | vexF2 | vex0F | vexWIG
-	VEX_LIG_F3_0F_W0    = vexLIG | vexF3 | vex0F | vexW0
-	VEX_LIG_F3_0F_W1    = vexLIG | vexF3 | vex0F | vexW1
-	VEX_LIG_F3_0F_WIG   = vexLIG | vexF3 | vex0F | vexWIG
-	VEX_LZ_0F_WIG       = vexLZ | vex0F | vexWIG
-	VEX_LZ_0F38_W0      = vexLZ | vex0F38 | vexW0
-	VEX_LZ_0F38_W1      = vexLZ | vex0F38 | vexW1
-	VEX_LZ_66_0F38_W0   = vexLZ | vex66 | vex0F38 | vexW0
-	VEX_LZ_66_0F38_W1   = vexLZ | vex66 | vex0F38 | vexW1
-	VEX_LZ_F2_0F38_W0   = vexLZ | vexF2 | vex0F38 | vexW0
-	VEX_LZ_F2_0F38_W1   = vexLZ | vexF2 | vex0F38 | vexW1
-	VEX_LZ_F2_0F3A_W0   = vexLZ | vexF2 | vex0F3A | vexW0
-	VEX_LZ_F2_0F3A_W1   = vexLZ | vexF2 | vex0F3A | vexW1
-	VEX_LZ_F3_0F38_W0   = vexLZ | vexF3 | vex0F38 | vexW0
-	VEX_LZ_F3_0F38_W1   = vexLZ | vexF3 | vex0F38 | vexW1
-)
-
-var ycover [Ymax * Ymax]uint8
-
-var reg [MAXREG]int
-
-var regrex [MAXREG + 1]int
-
-var ynone = []ytab{
-	{Ynone, Ynone, Ynone, Zlit, 1},
-}
-
-var ytext = []ytab{
-	{Ymb, Ynone, Ytextsize, Zpseudo, 0},
-	{Ymb, Yi32, Ytextsize, Zpseudo, 1},
-}
-
-var ynop = []ytab{
-	{Ynone, Ynone, Ynone, Zpseudo, 0},
-	{Ynone, Ynone, Yiauto, Zpseudo, 0},
-	{Ynone, Ynone, Yml, Zpseudo, 0},
-	{Ynone, Ynone, Yrf, Zpseudo, 0},
-	{Ynone, Ynone, Yxr, Zpseudo, 0},
-	{Yiauto, Ynone, Ynone, Zpseudo, 0},
-	{Yml, Ynone, Ynone, Zpseudo, 0},
-	{Yrf, Ynone, Ynone, Zpseudo, 0},
-	{Yxr, Ynone, Ynone, Zpseudo, 1},
-}
-
-var yfuncdata = []ytab{
-	{Yi32, Ynone, Ym, Zpseudo, 0},
-}
-
-var ypcdata = []ytab{
-	{Yi32, Ynone, Yi32, Zpseudo, 0},
-}
-
-var yxorb = []ytab{
-	{Yi32, Ynone, Yal, Zib_, 1},
-	{Yi32, Ynone, Ymb, Zibo_m, 2},
-	{Yrb, Ynone, Ymb, Zr_m, 1},
-	{Ymb, Ynone, Yrb, Zm_r, 1},
-}
-
-var yaddl = []ytab{
-	{Yi8, Ynone, Yml, Zibo_m, 2},
-	{Yi32, Ynone, Yax, Zil_, 1},
-	{Yi32, Ynone, Yml, Zilo_m, 2},
-	{Yrl, Ynone, Yml, Zr_m, 1},
-	{Yml, Ynone, Yrl, Zm_r, 1},
-}
-
-var yincl = []ytab{
-	{Ynone, Ynone, Yrl, Z_rp, 1},
-	{Ynone, Ynone, Yml, Zo_m, 2},
-}
-
-var yincq = []ytab{
-	{Ynone, Ynone, Yml, Zo_m, 2},
-}
-
-var ycmpb = []ytab{
-	{Yal, Ynone, Yi32, Z_ib, 1},
-	{Ymb, Ynone, Yi32, Zm_ibo, 2},
-	{Ymb, Ynone, Yrb, Zm_r, 1},
-	{Yrb, Ynone, Ymb, Zr_m, 1},
-}
-
-var ycmpl = []ytab{
-	{Yml, Ynone, Yi8, Zm_ibo, 2},
-	{Yax, Ynone, Yi32, Z_il, 1},
-	{Yml, Ynone, Yi32, Zm_ilo, 2},
-	{Yml, Ynone, Yrl, Zm_r, 1},
-	{Yrl, Ynone, Yml, Zr_m, 1},
-}
-
-var yshb = []ytab{
-	{Yi1, Ynone, Ymb, Zo_m, 2},
-	{Yi32, Ynone, Ymb, Zibo_m, 2},
-	{Ycx, Ynone, Ymb, Zo_m, 2},
-}
-
-var yshl = []ytab{
-	{Yi1, Ynone, Yml, Zo_m, 2},
-	{Yi32, Ynone, Yml, Zibo_m, 2},
-	{Ycl, Ynone, Yml, Zo_m, 2},
-	{Ycx, Ynone, Yml, Zo_m, 2},
-}
-
-var ytestl = []ytab{
-	{Yi32, Ynone, Yax, Zil_, 1},
-	{Yi32, Ynone, Yml, Zilo_m, 2},
-	{Yrl, Ynone, Yml, Zr_m, 1},
-	{Yml, Ynone, Yrl, Zm_r, 1},
-}
-
-var ymovb = []ytab{
-	{Yrb, Ynone, Ymb, Zr_m, 1},
-	{Ymb, Ynone, Yrb, Zm_r, 1},
-	{Yi32, Ynone, Yrb, Zib_rp, 1},
-	{Yi32, Ynone, Ymb, Zibo_m, 2},
-}
-
-var ybtl = []ytab{
-	{Yi8, Ynone, Yml, Zibo_m, 2},
-	{Yrl, Ynone, Yml, Zr_m, 1},
-}
-
-var ymovw = []ytab{
-	{Yrl, Ynone, Yml, Zr_m, 1},
-	{Yml, Ynone, Yrl, Zm_r, 1},
-	{Yi0, Ynone, Yrl, Zclr, 1},
-	{Yi32, Ynone, Yrl, Zil_rp, 1},
-	{Yi32, Ynone, Yml, Zilo_m, 2},
-	{Yiauto, Ynone, Yrl, Zaut_r, 2},
-}
-
-var ymovl = []ytab{
-	{Yrl, Ynone, Yml, Zr_m, 1},
-	{Yml, Ynone, Yrl, Zm_r, 1},
-	{Yi0, Ynone, Yrl, Zclr, 1},
-	{Yi32, Ynone, Yrl, Zil_rp, 1},
-	{Yi32, Ynone, Yml, Zilo_m, 2},
-	{Yml, Ynone, Ymr, Zm_r_xm, 1}, // MMX MOVD
-	{Ymr, Ynone, Yml, Zr_m_xm, 1}, // MMX MOVD
-	{Yml, Ynone, Yxr, Zm_r_xm, 2}, // XMM MOVD (32 bit)
-	{Yxr, Ynone, Yml, Zr_m_xm, 2}, // XMM MOVD (32 bit)
-	{Yiauto, Ynone, Yrl, Zaut_r, 2},
-}
-
-var yret = []ytab{
-	{Ynone, Ynone, Ynone, Zo_iw, 1},
-	{Yi32, Ynone, Ynone, Zo_iw, 1},
-}
-
-var ymovq = []ytab{
-	// valid in 32-bit mode
-	{Ym, Ynone, Ymr, Zm_r_xm_nr, 1},  // 0x6f MMX MOVQ (shorter encoding)
-	{Ymr, Ynone, Ym, Zr_m_xm_nr, 1},  // 0x7f MMX MOVQ
-	{Yxr, Ynone, Ymr, Zm_r_xm_nr, 2}, // Pf2, 0xd6 MOVDQ2Q
-	{Yxm, Ynone, Yxr, Zm_r_xm_nr, 2}, // Pf3, 0x7e MOVQ xmm1/m64 -> xmm2
-	{Yxr, Ynone, Yxm, Zr_m_xm_nr, 2}, // Pe, 0xd6 MOVQ xmm1 -> xmm2/m64
-
-	// valid only in 64-bit mode, usually with 64-bit prefix
-	{Yrl, Ynone, Yml, Zr_m, 1},      // 0x89
-	{Yml, Ynone, Yrl, Zm_r, 1},      // 0x8b
-	{Yi0, Ynone, Yrl, Zclr, 1},      // 0x31
-	{Ys32, Ynone, Yrl, Zilo_m, 2},   // 32 bit signed 0xc7,(0)
-	{Yi64, Ynone, Yrl, Ziq_rp, 1},   // 0xb8 -- 32/64 bit immediate
-	{Yi32, Ynone, Yml, Zilo_m, 2},   // 0xc7,(0)
-	{Ymm, Ynone, Ymr, Zm_r_xm, 1},   // 0x6e MMX MOVD
-	{Ymr, Ynone, Ymm, Zr_m_xm, 1},   // 0x7e MMX MOVD
-	{Yml, Ynone, Yxr, Zm_r_xm, 2},   // Pe, 0x6e MOVD xmm load
-	{Yxr, Ynone, Yml, Zr_m_xm, 2},   // Pe, 0x7e MOVD xmm store
-	{Yiauto, Ynone, Yrl, Zaut_r, 1}, // 0 built-in LEAQ
-}
-
-var ym_rl = []ytab{
-	{Ym, Ynone, Yrl, Zm_r, 1},
-}
-
-var yrl_m = []ytab{
-	{Yrl, Ynone, Ym, Zr_m, 1},
-}
-
-var ymb_rl = []ytab{
-	{Ymb, Ynone, Yrl, Zmb_r, 1},
-}
-
-var yml_rl = []ytab{
-	{Yml, Ynone, Yrl, Zm_r, 1},
-}
-
-var yrl_ml = []ytab{
-	{Yrl, Ynone, Yml, Zr_m, 1},
-}
-
-var yml_mb = []ytab{
-	{Yrb, Ynone, Ymb, Zr_m, 1},
-	{Ymb, Ynone, Yrb, Zm_r, 1},
-}
-
-var yrb_mb = []ytab{
-	{Yrb, Ynone, Ymb, Zr_m, 1},
-}
-
-var yxchg = []ytab{
-	{Yax, Ynone, Yrl, Z_rp, 1},
-	{Yrl, Ynone, Yax, Zrp_, 1},
-	{Yrl, Ynone, Yml, Zr_m, 1},
-	{Yml, Ynone, Yrl, Zm_r, 1},
-}
-
-var ydivl = []ytab{
-	{Yml, Ynone, Ynone, Zm_o, 2},
-}
-
-var ydivb = []ytab{
-	{Ymb, Ynone, Ynone, Zm_o, 2},
-}
-
-var yimul = []ytab{
-	{Yml, Ynone, Ynone, Zm_o, 2},
-	{Yi8, Ynone, Yrl, Zib_rr, 1},
-	{Yi32, Ynone, Yrl, Zil_rr, 1},
-	{Yml, Ynone, Yrl, Zm_r, 2},
-}
-
-var yimul3 = []ytab{
-	{Yi8, Yml, Yrl, Zibm_r, 2},
-}
-
-var ybyte = []ytab{
-	{Yi64, Ynone, Ynone, Zbyte, 1},
-}
-
-var yin = []ytab{
-	{Yi32, Ynone, Ynone, Zib_, 1},
-	{Ynone, Ynone, Ynone, Zlit, 1},
-}
-
-var yint = []ytab{
-	{Yi32, Ynone, Ynone, Zib_, 1},
-}
-
-var ypushl = []ytab{
-	{Yrl, Ynone, Ynone, Zrp_, 1},
-	{Ym, Ynone, Ynone, Zm_o, 2},
-	{Yi8, Ynone, Ynone, Zib_, 1},
-	{Yi32, Ynone, Ynone, Zil_, 1},
-}
-
-var ypopl = []ytab{
-	{Ynone, Ynone, Yrl, Z_rp, 1},
-	{Ynone, Ynone, Ym, Zo_m, 2},
-}
-
-var ybswap = []ytab{
-	{Ynone, Ynone, Yrl, Z_rp, 2},
-}
-
-var yscond = []ytab{
-	{Ynone, Ynone, Ymb, Zo_m, 2},
-}
-
-var yjcond = []ytab{
-	{Ynone, Ynone, Ybr, Zbr, 0},
-	{Yi0, Ynone, Ybr, Zbr, 0},
-	{Yi1, Ynone, Ybr, Zbr, 1},
-}
-
-var yloop = []ytab{
-	{Ynone, Ynone, Ybr, Zloop, 1},
-}
-
-var ycall = []ytab{
-	{Ynone, Ynone, Yml, Zcallindreg, 0},
-	{Yrx, Ynone, Yrx, Zcallindreg, 2},
-	{Ynone, Ynone, Yindir, Zcallind, 2},
-	{Ynone, Ynone, Ybr, Zcall, 0},
-	{Ynone, Ynone, Yi32, Zcallcon, 1},
-}
-
-var yduff = []ytab{
-	{Ynone, Ynone, Yi32, Zcallduff, 1},
-}
-
-var yjmp = []ytab{
-	{Ynone, Ynone, Yml, Zo_m64, 2},
-	{Ynone, Ynone, Ybr, Zjmp, 0},
-	{Ynone, Ynone, Yi32, Zjmpcon, 1},
-}
-
-var yfmvd = []ytab{
-	{Ym, Ynone, Yf0, Zm_o, 2},
-	{Yf0, Ynone, Ym, Zo_m, 2},
-	{Yrf, Ynone, Yf0, Zm_o, 2},
-	{Yf0, Ynone, Yrf, Zo_m, 2},
-}
-
-var yfmvdp = []ytab{
-	{Yf0, Ynone, Ym, Zo_m, 2},
-	{Yf0, Ynone, Yrf, Zo_m, 2},
-}
-
-var yfmvf = []ytab{
-	{Ym, Ynone, Yf0, Zm_o, 2},
-	{Yf0, Ynone, Ym, Zo_m, 2},
-}
-
-var yfmvx = []ytab{
-	{Ym, Ynone, Yf0, Zm_o, 2},
-}
-
-var yfmvp = []ytab{
-	{Yf0, Ynone, Ym, Zo_m, 2},
-}
-
-var yfcmv = []ytab{
-	{Yrf, Ynone, Yf0, Zm_o, 2},
-}
-
-var yfadd = []ytab{
-	{Ym, Ynone, Yf0, Zm_o, 2},
-	{Yrf, Ynone, Yf0, Zm_o, 2},
-	{Yf0, Ynone, Yrf, Zo_m, 2},
-}
-
-var yfxch = []ytab{
-	{Yf0, Ynone, Yrf, Zo_m, 2},
-	{Yrf, Ynone, Yf0, Zm_o, 2},
-}
-
-var ycompp = []ytab{
-	{Yf0, Ynone, Yrf, Zo_m, 2}, /* botch is really f0,f1 */
-}
-
-var ystsw = []ytab{
-	{Ynone, Ynone, Ym, Zo_m, 2},
-	{Ynone, Ynone, Yax, Zlit, 1},
-}
-
-var ysvrs = []ytab{
-	{Ynone, Ynone, Ym, Zo_m, 2},
-	{Ym, Ynone, Ynone, Zm_o, 2},
-}
-
-var ymm = []ytab{
-	{Ymm, Ynone, Ymr, Zm_r_xm, 1},
-	{Yxm, Ynone, Yxr, Zm_r_xm, 2},
-}
-
-var yxm = []ytab{
-	{Yxm, Ynone, Yxr, Zm_r_xm, 1},
-}
-
-var yxm_q4 = []ytab{
-	{Yxm, Ynone, Yxr, Zm_r, 1},
-}
-
-var yxcvm1 = []ytab{
-	{Yxm, Ynone, Yxr, Zm_r_xm, 2},
-	{Yxm, Ynone, Ymr, Zm_r_xm, 2},
-}
-
-var yxcvm2 = []ytab{
-	{Yxm, Ynone, Yxr, Zm_r_xm, 2},
-	{Ymm, Ynone, Yxr, Zm_r_xm, 2},
-}
-
-var yxr = []ytab{
-	{Yxr, Ynone, Yxr, Zm_r_xm, 1},
-}
-
-var yxr_ml = []ytab{
-	{Yxr, Ynone, Yml, Zr_m_xm, 1},
-}
-
-var ymr = []ytab{
-	{Ymr, Ynone, Ymr, Zm_r, 1},
-}
-
-var ymr_ml = []ytab{
-	{Ymr, Ynone, Yml, Zr_m_xm, 1},
-}
-
-var yxcmpi = []ytab{
-	{Yxm, Yxr, Yi8, Zm_r_i_xm, 2},
-}
-
-var yxmov = []ytab{
-	{Yxm, Ynone, Yxr, Zm_r_xm, 1},
-	{Yxr, Ynone, Yxm, Zr_m_xm, 1},
-}
-
-var yxcvfl = []ytab{
-	{Yxm, Ynone, Yrl, Zm_r_xm, 1},
-}
-
-var yxcvlf = []ytab{
-	{Yml, Ynone, Yxr, Zm_r_xm, 1},
-}
-
-var yxcvfq = []ytab{
-	{Yxm, Ynone, Yrl, Zm_r_xm, 2},
-}
-
-var yxcvqf = []ytab{
-	{Yml, Ynone, Yxr, Zm_r_xm, 2},
-}
-
-var yps = []ytab{
-	{Ymm, Ynone, Ymr, Zm_r_xm, 1},
-	{Yi8, Ynone, Ymr, Zibo_m_xm, 2},
-	{Yxm, Ynone, Yxr, Zm_r_xm, 2},
-	{Yi8, Ynone, Yxr, Zibo_m_xm, 3},
-}
-
-var yxrrl = []ytab{
-	{Yxr, Ynone, Yrl, Zm_r, 1},
-}
-
-var ymrxr = []ytab{
-	{Ymr, Ynone, Yxr, Zm_r, 1},
-	{Yxm, Ynone, Yxr, Zm_r_xm, 1},
-}
-
-var ymshuf = []ytab{
-	{Yi8, Ymm, Ymr, Zibm_r, 2},
-}
-
-var ymshufb = []ytab{
-	{Yxm, Ynone, Yxr, Zm2_r, 2},
-}
-
-var yxshuf = []ytab{
-	{Yu8, Yxm, Yxr, Zibm_r, 2},
-}
-
-var yextrw = []ytab{
-	{Yu8, Yxr, Yrl, Zibm_r, 2},
-}
-
-var yextr = []ytab{
-	{Yu8, Yxr, Ymm, Zibr_m, 3},
-}
-
-var yinsrw = []ytab{
-	{Yu8, Yml, Yxr, Zibm_r, 2},
-}
-
-var yinsr = []ytab{
-	{Yu8, Ymm, Yxr, Zibm_r, 3},
-}
-
-var ypsdq = []ytab{
-	{Yi8, Ynone, Yxr, Zibo_m, 2},
-}
-
-var ymskb = []ytab{
-	{Yxr, Ynone, Yrl, Zm_r_xm, 2},
-	{Ymr, Ynone, Yrl, Zm_r_xm, 1},
-}
-
-var ycrc32l = []ytab{
-	{Yml, Ynone, Yrl, Zlitm_r, 0},
-}
-
-var yprefetch = []ytab{
-	{Ym, Ynone, Ynone, Zm_o, 2},
-}
-
-var yaes = []ytab{
-	{Yxm, Ynone, Yxr, Zlitm_r, 2},
-}
-
-var yxbegin = []ytab{
-	{Ynone, Ynone, Ybr, Zjmp, 1},
-}
-
-var yxabort = []ytab{
-	{Yu8, Ynone, Ynone, Zib_, 1},
-}
-
-var ylddqu = []ytab{
-	{Ym, Ynone, Yxr, Zm_r, 1},
-}
-
-// VEX instructions that come in two forms:
-//	VTHING xmm2/m128, xmmV, xmm1
-//	VTHING ymm2/m256, ymmV, ymm1
-// The opcode array in the corresponding Optab entry
-// should contain the (VEX prefixes, opcode byte) pair
-// for each of the two forms.
-// For example, the entries for VPXOR are:
-//
-//	VPXOR xmm2/m128, xmmV, xmm1
-//	VEX.NDS.128.66.0F.WIG EF /r
-//
-//	VPXOR ymm2/m256, ymmV, ymm1
-//	VEX.NDS.256.66.0F.WIG EF /r
-//
-// The NDS/NDD/DDS part can be dropped, producing this
-// Optab entry:
-//
-//	{AVPXOR, yvex_xy3, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0xEF, VEX_256_66_0F_WIG, 0xEF}}
-//
-var yvex_xy3 = []ytab{
-	{Yxm, Yxr, Yxr, Zvex_rm_v_r, 2},
-	{Yym, Yyr, Yyr, Zvex_rm_v_r, 2},
-}
-
-var yvex_ri3 = []ytab{
-	{Yi8, Ymb, Yrl, Zvex_i_rm_r, 2},
-}
-
-var yvex_xyi3 = []ytab{
-	{Yu8, Yxm, Yxr, Zvex_i_rm_r, 2},
-	{Yu8, Yym, Yyr, Zvex_i_rm_r, 2},
-	{Yi8, Yxm, Yxr, Zvex_i_rm_r, 2},
-	{Yi8, Yym, Yyr, Zvex_i_rm_r, 2},
-}
-
-var yvex_yyi4 = []ytab{ //TODO don't hide 4 op, some version have xmm version
-	{Yym, Yyr, Yyr, Zvex_i_rm_v_r, 2},
-}
-
-var yvex_xyi4 = []ytab{
-	{Yxm, Yyr, Yyr, Zvex_i_rm_v_r, 2},
-}
-
-var yvex_shift = []ytab{
-	{Yi8, Yxr, Yxr, Zvex_i_r_v, 3},
-	{Yi8, Yyr, Yyr, Zvex_i_r_v, 3},
-	{Yxm, Yxr, Yxr, Zvex_rm_v_r, 2},
-	{Yxm, Yyr, Yyr, Zvex_rm_v_r, 2},
-}
-
-var yvex_shift_dq = []ytab{
-	{Yi8, Yxr, Yxr, Zvex_i_r_v, 3},
-	{Yi8, Yyr, Yyr, Zvex_i_r_v, 3},
-}
-
-var yvex_r3 = []ytab{
-	{Yml, Yrl, Yrl, Zvex_rm_v_r, 2},
-}
-
-var yvex_vmr3 = []ytab{
-	{Yrl, Yml, Yrl, Zvex_v_rm_r, 2},
-}
-
-var yvex_xy2 = []ytab{
-	{Yxm, Ynone, Yxr, Zvex_rm_v_r, 2},
-	{Yym, Ynone, Yyr, Zvex_rm_v_r, 2},
-}
-
-var yvex_xyr2 = []ytab{
-	{Yxr, Ynone, Yrl, Zvex_rm_v_r, 2},
-	{Yyr, Ynone, Yrl, Zvex_rm_v_r, 2},
-}
-
-var yvex_vmovdqa = []ytab{
-	{Yxm, Ynone, Yxr, Zvex_rm_v_r, 2},
-	{Yxr, Ynone, Yxm, Zvex_r_v_rm, 2},
-	{Yym, Ynone, Yyr, Zvex_rm_v_r, 2},
-	{Yyr, Ynone, Yym, Zvex_r_v_rm, 2},
-}
-
-var yvex_vmovntdq = []ytab{
-	{Yxr, Ynone, Ym, Zvex_r_v_rm, 2},
-	{Yyr, Ynone, Ym, Zvex_r_v_rm, 2},
-}
-
-var yvex_vpbroadcast = []ytab{
-	{Yxm, Ynone, Yxr, Zvex_rm_v_r, 2},
-	{Yxm, Ynone, Yyr, Zvex_rm_v_r, 2},
-}
-
-var yvex_vpbroadcast_sd = []ytab{
-	{Yxm, Ynone, Yyr, Zvex_rm_v_r, 2},
-}
-
-var ymmxmm0f38 = []ytab{
-	{Ymm, Ynone, Ymr, Zlitm_r, 3},
-	{Yxm, Ynone, Yxr, Zlitm_r, 5},
-}
-
-/*
- * You are doasm, holding in your hand a Prog* with p->as set to, say, ACRC32,
- * and p->from and p->to as operands (Addr*).  The linker scans optab to find
- * the entry with the given p->as and then looks through the ytable for that
- * instruction (the second field in the optab struct) for a line whose first
- * two values match the Ytypes of the p->from and p->to operands.  The function
- * oclass in span.c computes the specific Ytype of an operand and then the set
- * of more general Ytypes that it satisfies is implied by the ycover table, set
- * up in instinit.  For example, oclass distinguishes the constants 0 and 1
- * from the more general 8-bit constants, but instinit says
- *
- *        ycover[Yi0*Ymax + Ys32] = 1;
- *        ycover[Yi1*Ymax + Ys32] = 1;
- *        ycover[Yi8*Ymax + Ys32] = 1;
- *
- * which means that Yi0, Yi1, and Yi8 all count as Ys32 (signed 32)
- * if that's what an instruction can handle.
- *
- * In parallel with the scan through the ytable for the appropriate line, there
- * is a z pointer that starts out pointing at the strange magic byte list in
- * the Optab struct.  With each step past a non-matching ytable line, z
- * advances by the 4th entry in the line.  When a matching line is found, that
- * z pointer has the extra data to use in laying down the instruction bytes.
- * The actual bytes laid down are a function of the 3rd entry in the line (that
- * is, the Ztype) and the z bytes.
- *
- * For example, let's look at AADDL.  The optab line says:
- *        { AADDL,        yaddl,  Px, 0x83,(00),0x05,0x81,(00),0x01,0x03 },
- *
- * and yaddl says
- *        uchar   yaddl[] =
- *        {
- *                Yi8,    Yml,    Zibo_m, 2,
- *                Yi32,   Yax,    Zil_,   1,
- *                Yi32,   Yml,    Zilo_m, 2,
- *                Yrl,    Yml,    Zr_m,   1,
- *                Yml,    Yrl,    Zm_r,   1,
- *                0
- *        };
- *
- * so there are 5 possible types of ADDL instruction that can be laid down, and
- * possible states used to lay them down (Ztype and z pointer, assuming z
- * points at {0x83,(00),0x05,0x81,(00),0x01,0x03}) are:
- *
- *        Yi8, Yml -> Zibo_m, z (0x83, 00)
- *        Yi32, Yax -> Zil_, z+2 (0x05)
- *        Yi32, Yml -> Zilo_m, z+2+1 (0x81, 0x00)
- *        Yrl, Yml -> Zr_m, z+2+1+2 (0x01)
- *        Yml, Yrl -> Zm_r, z+2+1+2+1 (0x03)
- *
- * The Pconstant in the optab line controls the prefix bytes to emit.  That's
- * relatively straightforward as this program goes.
- *
- * The switch on t[2] in doasm implements the various Z cases.  Zibo_m, for
- * example, is an opcode byte (z[0]) then an asmando (which is some kind of
- * encoded addressing mode for the Yml arg), and then a single immediate byte.
- * Zilo_m is the same but a long (32-bit) immediate.
- */
-var optab =
-/*	as, ytab, andproto, opcode */
-[]Optab{
-	{obj.AXXX, nil, 0, [23]uint8{}},
-	{AAAA, ynone, P32, [23]uint8{0x37}},
-	{AAAD, ynone, P32, [23]uint8{0xd5, 0x0a}},
-	{AAAM, ynone, P32, [23]uint8{0xd4, 0x0a}},
-	{AAAS, ynone, P32, [23]uint8{0x3f}},
-	{AADCB, yxorb, Pb, [23]uint8{0x14, 0x80, 02, 0x10, 0x10}},
-	{AADCL, yaddl, Px, [23]uint8{0x83, 02, 0x15, 0x81, 02, 0x11, 0x13}},
-	{AADCQ, yaddl, Pw, [23]uint8{0x83, 02, 0x15, 0x81, 02, 0x11, 0x13}},
-	{AADCW, yaddl, Pe, [23]uint8{0x83, 02, 0x15, 0x81, 02, 0x11, 0x13}},
-	{AADDB, yxorb, Pb, [23]uint8{0x04, 0x80, 00, 0x00, 0x02}},
-	{AADDL, yaddl, Px, [23]uint8{0x83, 00, 0x05, 0x81, 00, 0x01, 0x03}},
-	{AADDPD, yxm, Pq, [23]uint8{0x58}},
-	{AADDPS, yxm, Pm, [23]uint8{0x58}},
-	{AADDQ, yaddl, Pw, [23]uint8{0x83, 00, 0x05, 0x81, 00, 0x01, 0x03}},
-	{AADDSD, yxm, Pf2, [23]uint8{0x58}},
-	{AADDSS, yxm, Pf3, [23]uint8{0x58}},
-	{AADDW, yaddl, Pe, [23]uint8{0x83, 00, 0x05, 0x81, 00, 0x01, 0x03}},
-	{AADJSP, nil, 0, [23]uint8{}},
-	{AANDB, yxorb, Pb, [23]uint8{0x24, 0x80, 04, 0x20, 0x22}},
-	{AANDL, yaddl, Px, [23]uint8{0x83, 04, 0x25, 0x81, 04, 0x21, 0x23}},
-	{AANDNPD, yxm, Pq, [23]uint8{0x55}},
-	{AANDNPS, yxm, Pm, [23]uint8{0x55}},
-	{AANDPD, yxm, Pq, [23]uint8{0x54}},
-	{AANDPS, yxm, Pq, [23]uint8{0x54}},
-	{AANDQ, yaddl, Pw, [23]uint8{0x83, 04, 0x25, 0x81, 04, 0x21, 0x23}},
-	{AANDW, yaddl, Pe, [23]uint8{0x83, 04, 0x25, 0x81, 04, 0x21, 0x23}},
-	{AARPL, yrl_ml, P32, [23]uint8{0x63}},
-	{ABOUNDL, yrl_m, P32, [23]uint8{0x62}},
-	{ABOUNDW, yrl_m, Pe, [23]uint8{0x62}},
-	{ABSFL, yml_rl, Pm, [23]uint8{0xbc}},
-	{ABSFQ, yml_rl, Pw, [23]uint8{0x0f, 0xbc}},
-	{ABSFW, yml_rl, Pq, [23]uint8{0xbc}},
-	{ABSRL, yml_rl, Pm, [23]uint8{0xbd}},
-	{ABSRQ, yml_rl, Pw, [23]uint8{0x0f, 0xbd}},
-	{ABSRW, yml_rl, Pq, [23]uint8{0xbd}},
-	{ABSWAPL, ybswap, Px, [23]uint8{0x0f, 0xc8}},
-	{ABSWAPQ, ybswap, Pw, [23]uint8{0x0f, 0xc8}},
-	{ABTCL, ybtl, Pm, [23]uint8{0xba, 07, 0xbb}},
-	{ABTCQ, ybtl, Pw, [23]uint8{0x0f, 0xba, 07, 0x0f, 0xbb}},
-	{ABTCW, ybtl, Pq, [23]uint8{0xba, 07, 0xbb}},
-	{ABTL, ybtl, Pm, [23]uint8{0xba, 04, 0xa3}},
-	{ABTQ, ybtl, Pw, [23]uint8{0x0f, 0xba, 04, 0x0f, 0xa3}},
-	{ABTRL, ybtl, Pm, [23]uint8{0xba, 06, 0xb3}},
-	{ABTRQ, ybtl, Pw, [23]uint8{0x0f, 0xba, 06, 0x0f, 0xb3}},
-	{ABTRW, ybtl, Pq, [23]uint8{0xba, 06, 0xb3}},
-	{ABTSL, ybtl, Pm, [23]uint8{0xba, 05, 0xab}},
-	{ABTSQ, ybtl, Pw, [23]uint8{0x0f, 0xba, 05, 0x0f, 0xab}},
-	{ABTSW, ybtl, Pq, [23]uint8{0xba, 05, 0xab}},
-	{ABTW, ybtl, Pq, [23]uint8{0xba, 04, 0xa3}},
-	{ABYTE, ybyte, Px, [23]uint8{1}},
-	{obj.ACALL, ycall, Px, [23]uint8{0xff, 02, 0xff, 0x15, 0xe8}},
-	{ACDQ, ynone, Px, [23]uint8{0x99}},
-	{ACLC, ynone, Px, [23]uint8{0xf8}},
-	{ACLD, ynone, Px, [23]uint8{0xfc}},
-	{ACLI, ynone, Px, [23]uint8{0xfa}},
-	{ACLTS, ynone, Pm, [23]uint8{0x06}},
-	{ACMC, ynone, Px, [23]uint8{0xf5}},
-	{ACMOVLCC, yml_rl, Pm, [23]uint8{0x43}},
-	{ACMOVLCS, yml_rl, Pm, [23]uint8{0x42}},
-	{ACMOVLEQ, yml_rl, Pm, [23]uint8{0x44}},
-	{ACMOVLGE, yml_rl, Pm, [23]uint8{0x4d}},
-	{ACMOVLGT, yml_rl, Pm, [23]uint8{0x4f}},
-	{ACMOVLHI, yml_rl, Pm, [23]uint8{0x47}},
-	{ACMOVLLE, yml_rl, Pm, [23]uint8{0x4e}},
-	{ACMOVLLS, yml_rl, Pm, [23]uint8{0x46}},
-	{ACMOVLLT, yml_rl, Pm, [23]uint8{0x4c}},
-	{ACMOVLMI, yml_rl, Pm, [23]uint8{0x48}},
-	{ACMOVLNE, yml_rl, Pm, [23]uint8{0x45}},
-	{ACMOVLOC, yml_rl, Pm, [23]uint8{0x41}},
-	{ACMOVLOS, yml_rl, Pm, [23]uint8{0x40}},
-	{ACMOVLPC, yml_rl, Pm, [23]uint8{0x4b}},
-	{ACMOVLPL, yml_rl, Pm, [23]uint8{0x49}},
-	{ACMOVLPS, yml_rl, Pm, [23]uint8{0x4a}},
-	{ACMOVQCC, yml_rl, Pw, [23]uint8{0x0f, 0x43}},
-	{ACMOVQCS, yml_rl, Pw, [23]uint8{0x0f, 0x42}},
-	{ACMOVQEQ, yml_rl, Pw, [23]uint8{0x0f, 0x44}},
-	{ACMOVQGE, yml_rl, Pw, [23]uint8{0x0f, 0x4d}},
-	{ACMOVQGT, yml_rl, Pw, [23]uint8{0x0f, 0x4f}},
-	{ACMOVQHI, yml_rl, Pw, [23]uint8{0x0f, 0x47}},
-	{ACMOVQLE, yml_rl, Pw, [23]uint8{0x0f, 0x4e}},
-	{ACMOVQLS, yml_rl, Pw, [23]uint8{0x0f, 0x46}},
-	{ACMOVQLT, yml_rl, Pw, [23]uint8{0x0f, 0x4c}},
-	{ACMOVQMI, yml_rl, Pw, [23]uint8{0x0f, 0x48}},
-	{ACMOVQNE, yml_rl, Pw, [23]uint8{0x0f, 0x45}},
-	{ACMOVQOC, yml_rl, Pw, [23]uint8{0x0f, 0x41}},
-	{ACMOVQOS, yml_rl, Pw, [23]uint8{0x0f, 0x40}},
-	{ACMOVQPC, yml_rl, Pw, [23]uint8{0x0f, 0x4b}},
-	{ACMOVQPL, yml_rl, Pw, [23]uint8{0x0f, 0x49}},
-	{ACMOVQPS, yml_rl, Pw, [23]uint8{0x0f, 0x4a}},
-	{ACMOVWCC, yml_rl, Pq, [23]uint8{0x43}},
-	{ACMOVWCS, yml_rl, Pq, [23]uint8{0x42}},
-	{ACMOVWEQ, yml_rl, Pq, [23]uint8{0x44}},
-	{ACMOVWGE, yml_rl, Pq, [23]uint8{0x4d}},
-	{ACMOVWGT, yml_rl, Pq, [23]uint8{0x4f}},
-	{ACMOVWHI, yml_rl, Pq, [23]uint8{0x47}},
-	{ACMOVWLE, yml_rl, Pq, [23]uint8{0x4e}},
-	{ACMOVWLS, yml_rl, Pq, [23]uint8{0x46}},
-	{ACMOVWLT, yml_rl, Pq, [23]uint8{0x4c}},
-	{ACMOVWMI, yml_rl, Pq, [23]uint8{0x48}},
-	{ACMOVWNE, yml_rl, Pq, [23]uint8{0x45}},
-	{ACMOVWOC, yml_rl, Pq, [23]uint8{0x41}},
-	{ACMOVWOS, yml_rl, Pq, [23]uint8{0x40}},
-	{ACMOVWPC, yml_rl, Pq, [23]uint8{0x4b}},
-	{ACMOVWPL, yml_rl, Pq, [23]uint8{0x49}},
-	{ACMOVWPS, yml_rl, Pq, [23]uint8{0x4a}},
-	{ACMPB, ycmpb, Pb, [23]uint8{0x3c, 0x80, 07, 0x38, 0x3a}},
-	{ACMPL, ycmpl, Px, [23]uint8{0x83, 07, 0x3d, 0x81, 07, 0x39, 0x3b}},
-	{ACMPPD, yxcmpi, Px, [23]uint8{Pe, 0xc2}},
-	{ACMPPS, yxcmpi, Pm, [23]uint8{0xc2, 0}},
-	{ACMPQ, ycmpl, Pw, [23]uint8{0x83, 07, 0x3d, 0x81, 07, 0x39, 0x3b}},
-	{ACMPSB, ynone, Pb, [23]uint8{0xa6}},
-	{ACMPSD, yxcmpi, Px, [23]uint8{Pf2, 0xc2}},
-	{ACMPSL, ynone, Px, [23]uint8{0xa7}},
-	{ACMPSQ, ynone, Pw, [23]uint8{0xa7}},
-	{ACMPSS, yxcmpi, Px, [23]uint8{Pf3, 0xc2}},
-	{ACMPSW, ynone, Pe, [23]uint8{0xa7}},
-	{ACMPW, ycmpl, Pe, [23]uint8{0x83, 07, 0x3d, 0x81, 07, 0x39, 0x3b}},
-	{ACOMISD, yxm, Pe, [23]uint8{0x2f}},
-	{ACOMISS, yxm, Pm, [23]uint8{0x2f}},
-	{ACPUID, ynone, Pm, [23]uint8{0xa2}},
-	{ACVTPL2PD, yxcvm2, Px, [23]uint8{Pf3, 0xe6, Pe, 0x2a}},
-	{ACVTPL2PS, yxcvm2, Pm, [23]uint8{0x5b, 0, 0x2a, 0}},
-	{ACVTPD2PL, yxcvm1, Px, [23]uint8{Pf2, 0xe6, Pe, 0x2d}},
-	{ACVTPD2PS, yxm, Pe, [23]uint8{0x5a}},
-	{ACVTPS2PL, yxcvm1, Px, [23]uint8{Pe, 0x5b, Pm, 0x2d}},
-	{ACVTPS2PD, yxm, Pm, [23]uint8{0x5a}},
-	{ACVTSD2SL, yxcvfl, Pf2, [23]uint8{0x2d}},
-	{ACVTSD2SQ, yxcvfq, Pw, [23]uint8{Pf2, 0x2d}},
-	{ACVTSD2SS, yxm, Pf2, [23]uint8{0x5a}},
-	{ACVTSL2SD, yxcvlf, Pf2, [23]uint8{0x2a}},
-	{ACVTSQ2SD, yxcvqf, Pw, [23]uint8{Pf2, 0x2a}},
-	{ACVTSL2SS, yxcvlf, Pf3, [23]uint8{0x2a}},
-	{ACVTSQ2SS, yxcvqf, Pw, [23]uint8{Pf3, 0x2a}},
-	{ACVTSS2SD, yxm, Pf3, [23]uint8{0x5a}},
-	{ACVTSS2SL, yxcvfl, Pf3, [23]uint8{0x2d}},
-	{ACVTSS2SQ, yxcvfq, Pw, [23]uint8{Pf3, 0x2d}},
-	{ACVTTPD2PL, yxcvm1, Px, [23]uint8{Pe, 0xe6, Pe, 0x2c}},
-	{ACVTTPS2PL, yxcvm1, Px, [23]uint8{Pf3, 0x5b, Pm, 0x2c}},
-	{ACVTTSD2SL, yxcvfl, Pf2, [23]uint8{0x2c}},
-	{ACVTTSD2SQ, yxcvfq, Pw, [23]uint8{Pf2, 0x2c}},
-	{ACVTTSS2SL, yxcvfl, Pf3, [23]uint8{0x2c}},
-	{ACVTTSS2SQ, yxcvfq, Pw, [23]uint8{Pf3, 0x2c}},
-	{ACWD, ynone, Pe, [23]uint8{0x99}},
-	{ACQO, ynone, Pw, [23]uint8{0x99}},
-	{ADAA, ynone, P32, [23]uint8{0x27}},
-	{ADAS, ynone, P32, [23]uint8{0x2f}},
-	{ADECB, yscond, Pb, [23]uint8{0xfe, 01}},
-	{ADECL, yincl, Px1, [23]uint8{0x48, 0xff, 01}},
-	{ADECQ, yincq, Pw, [23]uint8{0xff, 01}},
-	{ADECW, yincq, Pe, [23]uint8{0xff, 01}},
-	{ADIVB, ydivb, Pb, [23]uint8{0xf6, 06}},
-	{ADIVL, ydivl, Px, [23]uint8{0xf7, 06}},
-	{ADIVPD, yxm, Pe, [23]uint8{0x5e}},
-	{ADIVPS, yxm, Pm, [23]uint8{0x5e}},
-	{ADIVQ, ydivl, Pw, [23]uint8{0xf7, 06}},
-	{ADIVSD, yxm, Pf2, [23]uint8{0x5e}},
-	{ADIVSS, yxm, Pf3, [23]uint8{0x5e}},
-	{ADIVW, ydivl, Pe, [23]uint8{0xf7, 06}},
-	{AEMMS, ynone, Pm, [23]uint8{0x77}},
-	{AENTER, nil, 0, [23]uint8{}}, /* botch */
-	{AFXRSTOR, ysvrs, Pm, [23]uint8{0xae, 01, 0xae, 01}},
-	{AFXSAVE, ysvrs, Pm, [23]uint8{0xae, 00, 0xae, 00}},
-	{AFXRSTOR64, ysvrs, Pw, [23]uint8{0x0f, 0xae, 01, 0x0f, 0xae, 01}},
-	{AFXSAVE64, ysvrs, Pw, [23]uint8{0x0f, 0xae, 00, 0x0f, 0xae, 00}},
-	{AHLT, ynone, Px, [23]uint8{0xf4}},
-	{AIDIVB, ydivb, Pb, [23]uint8{0xf6, 07}},
-	{AIDIVL, ydivl, Px, [23]uint8{0xf7, 07}},
-	{AIDIVQ, ydivl, Pw, [23]uint8{0xf7, 07}},
-	{AIDIVW, ydivl, Pe, [23]uint8{0xf7, 07}},
-	{AIMULB, ydivb, Pb, [23]uint8{0xf6, 05}},
-	{AIMULL, yimul, Px, [23]uint8{0xf7, 05, 0x6b, 0x69, Pm, 0xaf}},
-	{AIMULQ, yimul, Pw, [23]uint8{0xf7, 05, 0x6b, 0x69, Pm, 0xaf}},
-	{AIMULW, yimul, Pe, [23]uint8{0xf7, 05, 0x6b, 0x69, Pm, 0xaf}},
-	{AIMUL3Q, yimul3, Pw, [23]uint8{0x6b, 00}},
-	{AINB, yin, Pb, [23]uint8{0xe4, 0xec}},
-	{AINCB, yscond, Pb, [23]uint8{0xfe, 00}},
-	{AINCL, yincl, Px1, [23]uint8{0x40, 0xff, 00}},
-	{AINCQ, yincq, Pw, [23]uint8{0xff, 00}},
-	{AINCW, yincq, Pe, [23]uint8{0xff, 00}},
-	{AINL, yin, Px, [23]uint8{0xe5, 0xed}},
-	{AINSB, ynone, Pb, [23]uint8{0x6c}},
-	{AINSL, ynone, Px, [23]uint8{0x6d}},
-	{AINSW, ynone, Pe, [23]uint8{0x6d}},
-	{AINT, yint, Px, [23]uint8{0xcd}},
-	{AINTO, ynone, P32, [23]uint8{0xce}},
-	{AINW, yin, Pe, [23]uint8{0xe5, 0xed}},
-	{AIRETL, ynone, Px, [23]uint8{0xcf}},
-	{AIRETQ, ynone, Pw, [23]uint8{0xcf}},
-	{AIRETW, ynone, Pe, [23]uint8{0xcf}},
-	{AJCC, yjcond, Px, [23]uint8{0x73, 0x83, 00}},
-	{AJCS, yjcond, Px, [23]uint8{0x72, 0x82}},
-	{AJCXZL, yloop, Px, [23]uint8{0xe3}},
-	{AJCXZW, yloop, Px, [23]uint8{0xe3}},
-	{AJCXZQ, yloop, Px, [23]uint8{0xe3}},
-	{AJEQ, yjcond, Px, [23]uint8{0x74, 0x84}},
-	{AJGE, yjcond, Px, [23]uint8{0x7d, 0x8d}},
-	{AJGT, yjcond, Px, [23]uint8{0x7f, 0x8f}},
-	{AJHI, yjcond, Px, [23]uint8{0x77, 0x87}},
-	{AJLE, yjcond, Px, [23]uint8{0x7e, 0x8e}},
-	{AJLS, yjcond, Px, [23]uint8{0x76, 0x86}},
-	{AJLT, yjcond, Px, [23]uint8{0x7c, 0x8c}},
-	{AJMI, yjcond, Px, [23]uint8{0x78, 0x88}},
-	{obj.AJMP, yjmp, Px, [23]uint8{0xff, 04, 0xeb, 0xe9}},
-	{AJNE, yjcond, Px, [23]uint8{0x75, 0x85}},
-	{AJOC, yjcond, Px, [23]uint8{0x71, 0x81, 00}},
-	{AJOS, yjcond, Px, [23]uint8{0x70, 0x80, 00}},
-	{AJPC, yjcond, Px, [23]uint8{0x7b, 0x8b}},
-	{AJPL, yjcond, Px, [23]uint8{0x79, 0x89}},
-	{AJPS, yjcond, Px, [23]uint8{0x7a, 0x8a}},
-	{AHADDPD, yxm, Pq, [23]uint8{0x7c}},
-	{AHADDPS, yxm, Pf2, [23]uint8{0x7c}},
-	{AHSUBPD, yxm, Pq, [23]uint8{0x7d}},
-	{AHSUBPS, yxm, Pf2, [23]uint8{0x7d}},
-	{ALAHF, ynone, Px, [23]uint8{0x9f}},
-	{ALARL, yml_rl, Pm, [23]uint8{0x02}},
-	{ALARW, yml_rl, Pq, [23]uint8{0x02}},
-	{ALDDQU, ylddqu, Pf2, [23]uint8{0xf0}},
-	{ALDMXCSR, ysvrs, Pm, [23]uint8{0xae, 02, 0xae, 02}},
-	{ALEAL, ym_rl, Px, [23]uint8{0x8d}},
-	{ALEAQ, ym_rl, Pw, [23]uint8{0x8d}},
-	{ALEAVEL, ynone, P32, [23]uint8{0xc9}},
-	{ALEAVEQ, ynone, Py, [23]uint8{0xc9}},
-	{ALEAVEW, ynone, Pe, [23]uint8{0xc9}},
-	{ALEAW, ym_rl, Pe, [23]uint8{0x8d}},
-	{ALOCK, ynone, Px, [23]uint8{0xf0}},
-	{ALODSB, ynone, Pb, [23]uint8{0xac}},
-	{ALODSL, ynone, Px, [23]uint8{0xad}},
-	{ALODSQ, ynone, Pw, [23]uint8{0xad}},
-	{ALODSW, ynone, Pe, [23]uint8{0xad}},
-	{ALONG, ybyte, Px, [23]uint8{4}},
-	{ALOOP, yloop, Px, [23]uint8{0xe2}},
-	{ALOOPEQ, yloop, Px, [23]uint8{0xe1}},
-	{ALOOPNE, yloop, Px, [23]uint8{0xe0}},
-	{ALSLL, yml_rl, Pm, [23]uint8{0x03}},
-	{ALSLW, yml_rl, Pq, [23]uint8{0x03}},
-	{AMASKMOVOU, yxr, Pe, [23]uint8{0xf7}},
-	{AMASKMOVQ, ymr, Pm, [23]uint8{0xf7}},
-	{AMAXPD, yxm, Pe, [23]uint8{0x5f}},
-	{AMAXPS, yxm, Pm, [23]uint8{0x5f}},
-	{AMAXSD, yxm, Pf2, [23]uint8{0x5f}},
-	{AMAXSS, yxm, Pf3, [23]uint8{0x5f}},
-	{AMINPD, yxm, Pe, [23]uint8{0x5d}},
-	{AMINPS, yxm, Pm, [23]uint8{0x5d}},
-	{AMINSD, yxm, Pf2, [23]uint8{0x5d}},
-	{AMINSS, yxm, Pf3, [23]uint8{0x5d}},
-	{AMOVAPD, yxmov, Pe, [23]uint8{0x28, 0x29}},
-	{AMOVAPS, yxmov, Pm, [23]uint8{0x28, 0x29}},
-	{AMOVB, ymovb, Pb, [23]uint8{0x88, 0x8a, 0xb0, 0xc6, 00}},
-	{AMOVBLSX, ymb_rl, Pm, [23]uint8{0xbe}},
-	{AMOVBLZX, ymb_rl, Pm, [23]uint8{0xb6}},
-	{AMOVBQSX, ymb_rl, Pw, [23]uint8{0x0f, 0xbe}},
-	{AMOVBQZX, ymb_rl, Pm, [23]uint8{0xb6}},
-	{AMOVBWSX, ymb_rl, Pq, [23]uint8{0xbe}},
-	{AMOVBWZX, ymb_rl, Pq, [23]uint8{0xb6}},
-	{AMOVO, yxmov, Pe, [23]uint8{0x6f, 0x7f}},
-	{AMOVOU, yxmov, Pf3, [23]uint8{0x6f, 0x7f}},
-	{AMOVHLPS, yxr, Pm, [23]uint8{0x12}},
-	{AMOVHPD, yxmov, Pe, [23]uint8{0x16, 0x17}},
-	{AMOVHPS, yxmov, Pm, [23]uint8{0x16, 0x17}},
-	{AMOVL, ymovl, Px, [23]uint8{0x89, 0x8b, 0x31, 0xb8, 0xc7, 00, 0x6e, 0x7e, Pe, 0x6e, Pe, 0x7e, 0}},
-	{AMOVLHPS, yxr, Pm, [23]uint8{0x16}},
-	{AMOVLPD, yxmov, Pe, [23]uint8{0x12, 0x13}},
-	{AMOVLPS, yxmov, Pm, [23]uint8{0x12, 0x13}},
-	{AMOVLQSX, yml_rl, Pw, [23]uint8{0x63}},
-	{AMOVLQZX, yml_rl, Px, [23]uint8{0x8b}},
-	{AMOVMSKPD, yxrrl, Pq, [23]uint8{0x50}},
-	{AMOVMSKPS, yxrrl, Pm, [23]uint8{0x50}},
-	{AMOVNTO, yxr_ml, Pe, [23]uint8{0xe7}},
-	{AMOVNTPD, yxr_ml, Pe, [23]uint8{0x2b}},
-	{AMOVNTPS, yxr_ml, Pm, [23]uint8{0x2b}},
-	{AMOVNTQ, ymr_ml, Pm, [23]uint8{0xe7}},
-	{AMOVQ, ymovq, Pw8, [23]uint8{0x6f, 0x7f, Pf2, 0xd6, Pf3, 0x7e, Pe, 0xd6, 0x89, 0x8b, 0x31, 0xc7, 00, 0xb8, 0xc7, 00, 0x6e, 0x7e, Pe, 0x6e, Pe, 0x7e, 0}},
-	{AMOVQOZX, ymrxr, Pf3, [23]uint8{0xd6, 0x7e}},
-	{AMOVSB, ynone, Pb, [23]uint8{0xa4}},
-	{AMOVSD, yxmov, Pf2, [23]uint8{0x10, 0x11}},
-	{AMOVSL, ynone, Px, [23]uint8{0xa5}},
-	{AMOVSQ, ynone, Pw, [23]uint8{0xa5}},
-	{AMOVSS, yxmov, Pf3, [23]uint8{0x10, 0x11}},
-	{AMOVSW, ynone, Pe, [23]uint8{0xa5}},
-	{AMOVUPD, yxmov, Pe, [23]uint8{0x10, 0x11}},
-	{AMOVUPS, yxmov, Pm, [23]uint8{0x10, 0x11}},
-	{AMOVW, ymovw, Pe, [23]uint8{0x89, 0x8b, 0x31, 0xb8, 0xc7, 00, 0}},
-	{AMOVWLSX, yml_rl, Pm, [23]uint8{0xbf}},
-	{AMOVWLZX, yml_rl, Pm, [23]uint8{0xb7}},
-	{AMOVWQSX, yml_rl, Pw, [23]uint8{0x0f, 0xbf}},
-	{AMOVWQZX, yml_rl, Pw, [23]uint8{0x0f, 0xb7}},
-	{AMULB, ydivb, Pb, [23]uint8{0xf6, 04}},
-	{AMULL, ydivl, Px, [23]uint8{0xf7, 04}},
-	{AMULPD, yxm, Pe, [23]uint8{0x59}},
-	{AMULPS, yxm, Ym, [23]uint8{0x59}},
-	{AMULQ, ydivl, Pw, [23]uint8{0xf7, 04}},
-	{AMULSD, yxm, Pf2, [23]uint8{0x59}},
-	{AMULSS, yxm, Pf3, [23]uint8{0x59}},
-	{AMULW, ydivl, Pe, [23]uint8{0xf7, 04}},
-	{ANEGB, yscond, Pb, [23]uint8{0xf6, 03}},
-	{ANEGL, yscond, Px, [23]uint8{0xf7, 03}},
-	{ANEGQ, yscond, Pw, [23]uint8{0xf7, 03}},
-	{ANEGW, yscond, Pe, [23]uint8{0xf7, 03}},
-	{obj.ANOP, ynop, Px, [23]uint8{0, 0}},
-	{ANOTB, yscond, Pb, [23]uint8{0xf6, 02}},
-	{ANOTL, yscond, Px, [23]uint8{0xf7, 02}}, // TODO(rsc): yscond is wrong here.
-	{ANOTQ, yscond, Pw, [23]uint8{0xf7, 02}},
-	{ANOTW, yscond, Pe, [23]uint8{0xf7, 02}},
-	{AORB, yxorb, Pb, [23]uint8{0x0c, 0x80, 01, 0x08, 0x0a}},
-	{AORL, yaddl, Px, [23]uint8{0x83, 01, 0x0d, 0x81, 01, 0x09, 0x0b}},
-	{AORPD, yxm, Pq, [23]uint8{0x56}},
-	{AORPS, yxm, Pm, [23]uint8{0x56}},
-	{AORQ, yaddl, Pw, [23]uint8{0x83, 01, 0x0d, 0x81, 01, 0x09, 0x0b}},
-	{AORW, yaddl, Pe, [23]uint8{0x83, 01, 0x0d, 0x81, 01, 0x09, 0x0b}},
-	{AOUTB, yin, Pb, [23]uint8{0xe6, 0xee}},
-	{AOUTL, yin, Px, [23]uint8{0xe7, 0xef}},
-	{AOUTSB, ynone, Pb, [23]uint8{0x6e}},
-	{AOUTSL, ynone, Px, [23]uint8{0x6f}},
-	{AOUTSW, ynone, Pe, [23]uint8{0x6f}},
-	{AOUTW, yin, Pe, [23]uint8{0xe7, 0xef}},
-	{APACKSSLW, ymm, Py1, [23]uint8{0x6b, Pe, 0x6b}},
-	{APACKSSWB, ymm, Py1, [23]uint8{0x63, Pe, 0x63}},
-	{APACKUSWB, ymm, Py1, [23]uint8{0x67, Pe, 0x67}},
-	{APADDB, ymm, Py1, [23]uint8{0xfc, Pe, 0xfc}},
-	{APADDL, ymm, Py1, [23]uint8{0xfe, Pe, 0xfe}},
-	{APADDQ, yxm, Pe, [23]uint8{0xd4}},
-	{APADDSB, ymm, Py1, [23]uint8{0xec, Pe, 0xec}},
-	{APADDSW, ymm, Py1, [23]uint8{0xed, Pe, 0xed}},
-	{APADDUSB, ymm, Py1, [23]uint8{0xdc, Pe, 0xdc}},
-	{APADDUSW, ymm, Py1, [23]uint8{0xdd, Pe, 0xdd}},
-	{APADDW, ymm, Py1, [23]uint8{0xfd, Pe, 0xfd}},
-	{APAND, ymm, Py1, [23]uint8{0xdb, Pe, 0xdb}},
-	{APANDN, ymm, Py1, [23]uint8{0xdf, Pe, 0xdf}},
-	{APAUSE, ynone, Px, [23]uint8{0xf3, 0x90}},
-	{APAVGB, ymm, Py1, [23]uint8{0xe0, Pe, 0xe0}},
-	{APAVGW, ymm, Py1, [23]uint8{0xe3, Pe, 0xe3}},
-	{APCMPEQB, ymm, Py1, [23]uint8{0x74, Pe, 0x74}},
-	{APCMPEQL, ymm, Py1, [23]uint8{0x76, Pe, 0x76}},
-	{APCMPEQW, ymm, Py1, [23]uint8{0x75, Pe, 0x75}},
-	{APCMPGTB, ymm, Py1, [23]uint8{0x64, Pe, 0x64}},
-	{APCMPGTL, ymm, Py1, [23]uint8{0x66, Pe, 0x66}},
-	{APCMPGTW, ymm, Py1, [23]uint8{0x65, Pe, 0x65}},
-	{APEXTRW, yextrw, Pq, [23]uint8{0xc5, 00}},
-	{APEXTRB, yextr, Pq, [23]uint8{0x3a, 0x14, 00}},
-	{APEXTRD, yextr, Pq, [23]uint8{0x3a, 0x16, 00}},
-	{APEXTRQ, yextr, Pq3, [23]uint8{0x3a, 0x16, 00}},
-	{APHADDD, ymmxmm0f38, Px, [23]uint8{0x0F, 0x38, 0x02, 0, 0x66, 0x0F, 0x38, 0x02, 0}},
-	{APHADDSW, yxm_q4, Pq4, [23]uint8{0x03}},
-	{APHADDW, yxm_q4, Pq4, [23]uint8{0x01}},
-	{APHMINPOSUW, yxm_q4, Pq4, [23]uint8{0x41}},
-	{APHSUBD, yxm_q4, Pq4, [23]uint8{0x06}},
-	{APHSUBSW, yxm_q4, Pq4, [23]uint8{0x07}},
-	{APHSUBW, yxm_q4, Pq4, [23]uint8{0x05}},
-	{APINSRW, yinsrw, Pq, [23]uint8{0xc4, 00}},
-	{APINSRB, yinsr, Pq, [23]uint8{0x3a, 0x20, 00}},
-	{APINSRD, yinsr, Pq, [23]uint8{0x3a, 0x22, 00}},
-	{APINSRQ, yinsr, Pq3, [23]uint8{0x3a, 0x22, 00}},
-	{APMADDWL, ymm, Py1, [23]uint8{0xf5, Pe, 0xf5}},
-	{APMAXSW, yxm, Pe, [23]uint8{0xee}},
-	{APMAXUB, yxm, Pe, [23]uint8{0xde}},
-	{APMINSW, yxm, Pe, [23]uint8{0xea}},
-	{APMINUB, yxm, Pe, [23]uint8{0xda}},
-	{APMOVMSKB, ymskb, Px, [23]uint8{Pe, 0xd7, 0xd7}},
-	{APMOVSXBD, yxm_q4, Pq4, [23]uint8{0x21}},
-	{APMOVSXBQ, yxm_q4, Pq4, [23]uint8{0x22}},
-	{APMOVSXBW, yxm_q4, Pq4, [23]uint8{0x20}},
-	{APMOVSXDQ, yxm_q4, Pq4, [23]uint8{0x25}},
-	{APMOVSXWD, yxm_q4, Pq4, [23]uint8{0x23}},
-	{APMOVSXWQ, yxm_q4, Pq4, [23]uint8{0x24}},
-	{APMOVZXBD, yxm_q4, Pq4, [23]uint8{0x31}},
-	{APMOVZXBQ, yxm_q4, Pq4, [23]uint8{0x32}},
-	{APMOVZXBW, yxm_q4, Pq4, [23]uint8{0x30}},
-	{APMOVZXDQ, yxm_q4, Pq4, [23]uint8{0x35}},
-	{APMOVZXWD, yxm_q4, Pq4, [23]uint8{0x33}},
-	{APMOVZXWQ, yxm_q4, Pq4, [23]uint8{0x34}},
-	{APMULDQ, yxm_q4, Pq4, [23]uint8{0x28}},
-	{APMULHUW, ymm, Py1, [23]uint8{0xe4, Pe, 0xe4}},
-	{APMULHW, ymm, Py1, [23]uint8{0xe5, Pe, 0xe5}},
-	{APMULLD, yxm_q4, Pq4, [23]uint8{0x40}},
-	{APMULLW, ymm, Py1, [23]uint8{0xd5, Pe, 0xd5}},
-	{APMULULQ, ymm, Py1, [23]uint8{0xf4, Pe, 0xf4}},
-	{APOPAL, ynone, P32, [23]uint8{0x61}},
-	{APOPAW, ynone, Pe, [23]uint8{0x61}},
-	{APOPCNTW, yml_rl, Pef3, [23]uint8{0xb8}},
-	{APOPCNTL, yml_rl, Pf3, [23]uint8{0xb8}},
-	{APOPCNTQ, yml_rl, Pfw, [23]uint8{0xb8}},
-	{APOPFL, ynone, P32, [23]uint8{0x9d}},
-	{APOPFQ, ynone, Py, [23]uint8{0x9d}},
-	{APOPFW, ynone, Pe, [23]uint8{0x9d}},
-	{APOPL, ypopl, P32, [23]uint8{0x58, 0x8f, 00}},
-	{APOPQ, ypopl, Py, [23]uint8{0x58, 0x8f, 00}},
-	{APOPW, ypopl, Pe, [23]uint8{0x58, 0x8f, 00}},
-	{APOR, ymm, Py1, [23]uint8{0xeb, Pe, 0xeb}},
-	{APSADBW, yxm, Pq, [23]uint8{0xf6}},
-	{APSHUFHW, yxshuf, Pf3, [23]uint8{0x70, 00}},
-	{APSHUFL, yxshuf, Pq, [23]uint8{0x70, 00}},
-	{APSHUFLW, yxshuf, Pf2, [23]uint8{0x70, 00}},
-	{APSHUFW, ymshuf, Pm, [23]uint8{0x70, 00}},
-	{APSHUFB, ymshufb, Pq, [23]uint8{0x38, 0x00}},
-	{APSLLO, ypsdq, Pq, [23]uint8{0x73, 07}},
-	{APSLLL, yps, Py3, [23]uint8{0xf2, 0x72, 06, Pe, 0xf2, Pe, 0x72, 06}},
-	{APSLLQ, yps, Py3, [23]uint8{0xf3, 0x73, 06, Pe, 0xf3, Pe, 0x73, 06}},
-	{APSLLW, yps, Py3, [23]uint8{0xf1, 0x71, 06, Pe, 0xf1, Pe, 0x71, 06}},
-	{APSRAL, yps, Py3, [23]uint8{0xe2, 0x72, 04, Pe, 0xe2, Pe, 0x72, 04}},
-	{APSRAW, yps, Py3, [23]uint8{0xe1, 0x71, 04, Pe, 0xe1, Pe, 0x71, 04}},
-	{APSRLO, ypsdq, Pq, [23]uint8{0x73, 03}},
-	{APSRLL, yps, Py3, [23]uint8{0xd2, 0x72, 02, Pe, 0xd2, Pe, 0x72, 02}},
-	{APSRLQ, yps, Py3, [23]uint8{0xd3, 0x73, 02, Pe, 0xd3, Pe, 0x73, 02}},
-	{APSRLW, yps, Py3, [23]uint8{0xd1, 0x71, 02, Pe, 0xd1, Pe, 0x71, 02}},
-	{APSUBB, yxm, Pe, [23]uint8{0xf8}},
-	{APSUBL, yxm, Pe, [23]uint8{0xfa}},
-	{APSUBQ, yxm, Pe, [23]uint8{0xfb}},
-	{APSUBSB, yxm, Pe, [23]uint8{0xe8}},
-	{APSUBSW, yxm, Pe, [23]uint8{0xe9}},
-	{APSUBUSB, yxm, Pe, [23]uint8{0xd8}},
-	{APSUBUSW, yxm, Pe, [23]uint8{0xd9}},
-	{APSUBW, yxm, Pe, [23]uint8{0xf9}},
-	{APUNPCKHBW, ymm, Py1, [23]uint8{0x68, Pe, 0x68}},
-	{APUNPCKHLQ, ymm, Py1, [23]uint8{0x6a, Pe, 0x6a}},
-	{APUNPCKHQDQ, yxm, Pe, [23]uint8{0x6d}},
-	{APUNPCKHWL, ymm, Py1, [23]uint8{0x69, Pe, 0x69}},
-	{APUNPCKLBW, ymm, Py1, [23]uint8{0x60, Pe, 0x60}},
-	{APUNPCKLLQ, ymm, Py1, [23]uint8{0x62, Pe, 0x62}},
-	{APUNPCKLQDQ, yxm, Pe, [23]uint8{0x6c}},
-	{APUNPCKLWL, ymm, Py1, [23]uint8{0x61, Pe, 0x61}},
-	{APUSHAL, ynone, P32, [23]uint8{0x60}},
-	{APUSHAW, ynone, Pe, [23]uint8{0x60}},
-	{APUSHFL, ynone, P32, [23]uint8{0x9c}},
-	{APUSHFQ, ynone, Py, [23]uint8{0x9c}},
-	{APUSHFW, ynone, Pe, [23]uint8{0x9c}},
-	{APUSHL, ypushl, P32, [23]uint8{0x50, 0xff, 06, 0x6a, 0x68}},
-	{APUSHQ, ypushl, Py, [23]uint8{0x50, 0xff, 06, 0x6a, 0x68}},
-	{APUSHW, ypushl, Pe, [23]uint8{0x50, 0xff, 06, 0x6a, 0x68}},
-	{APXOR, ymm, Py1, [23]uint8{0xef, Pe, 0xef}},
-	{AQUAD, ybyte, Px, [23]uint8{8}},
-	{ARCLB, yshb, Pb, [23]uint8{0xd0, 02, 0xc0, 02, 0xd2, 02}},
-	{ARCLL, yshl, Px, [23]uint8{0xd1, 02, 0xc1, 02, 0xd3, 02, 0xd3, 02}},
-	{ARCLQ, yshl, Pw, [23]uint8{0xd1, 02, 0xc1, 02, 0xd3, 02, 0xd3, 02}},
-	{ARCLW, yshl, Pe, [23]uint8{0xd1, 02, 0xc1, 02, 0xd3, 02, 0xd3, 02}},
-	{ARCPPS, yxm, Pm, [23]uint8{0x53}},
-	{ARCPSS, yxm, Pf3, [23]uint8{0x53}},
-	{ARCRB, yshb, Pb, [23]uint8{0xd0, 03, 0xc0, 03, 0xd2, 03}},
-	{ARCRL, yshl, Px, [23]uint8{0xd1, 03, 0xc1, 03, 0xd3, 03, 0xd3, 03}},
-	{ARCRQ, yshl, Pw, [23]uint8{0xd1, 03, 0xc1, 03, 0xd3, 03, 0xd3, 03}},
-	{ARCRW, yshl, Pe, [23]uint8{0xd1, 03, 0xc1, 03, 0xd3, 03, 0xd3, 03}},
-	{AREP, ynone, Px, [23]uint8{0xf3}},
-	{AREPN, ynone, Px, [23]uint8{0xf2}},
-	{obj.ARET, ynone, Px, [23]uint8{0xc3}},
-	{ARETFW, yret, Pe, [23]uint8{0xcb, 0xca}},
-	{ARETFL, yret, Px, [23]uint8{0xcb, 0xca}},
-	{ARETFQ, yret, Pw, [23]uint8{0xcb, 0xca}},
-	{AROLB, yshb, Pb, [23]uint8{0xd0, 00, 0xc0, 00, 0xd2, 00}},
-	{AROLL, yshl, Px, [23]uint8{0xd1, 00, 0xc1, 00, 0xd3, 00, 0xd3, 00}},
-	{AROLQ, yshl, Pw, [23]uint8{0xd1, 00, 0xc1, 00, 0xd3, 00, 0xd3, 00}},
-	{AROLW, yshl, Pe, [23]uint8{0xd1, 00, 0xc1, 00, 0xd3, 00, 0xd3, 00}},
-	{ARORB, yshb, Pb, [23]uint8{0xd0, 01, 0xc0, 01, 0xd2, 01}},
-	{ARORL, yshl, Px, [23]uint8{0xd1, 01, 0xc1, 01, 0xd3, 01, 0xd3, 01}},
-	{ARORQ, yshl, Pw, [23]uint8{0xd1, 01, 0xc1, 01, 0xd3, 01, 0xd3, 01}},
-	{ARORW, yshl, Pe, [23]uint8{0xd1, 01, 0xc1, 01, 0xd3, 01, 0xd3, 01}},
-	{ARSQRTPS, yxm, Pm, [23]uint8{0x52}},
-	{ARSQRTSS, yxm, Pf3, [23]uint8{0x52}},
-	{ASAHF, ynone, Px1, [23]uint8{0x9e, 00, 0x86, 0xe0, 0x50, 0x9d}}, /* XCHGB AH,AL; PUSH AX; POPFL */
-	{ASALB, yshb, Pb, [23]uint8{0xd0, 04, 0xc0, 04, 0xd2, 04}},
-	{ASALL, yshl, Px, [23]uint8{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}},
-	{ASALQ, yshl, Pw, [23]uint8{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}},
-	{ASALW, yshl, Pe, [23]uint8{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}},
-	{ASARB, yshb, Pb, [23]uint8{0xd0, 07, 0xc0, 07, 0xd2, 07}},
-	{ASARL, yshl, Px, [23]uint8{0xd1, 07, 0xc1, 07, 0xd3, 07, 0xd3, 07}},
-	{ASARQ, yshl, Pw, [23]uint8{0xd1, 07, 0xc1, 07, 0xd3, 07, 0xd3, 07}},
-	{ASARW, yshl, Pe, [23]uint8{0xd1, 07, 0xc1, 07, 0xd3, 07, 0xd3, 07}},
-	{ASBBB, yxorb, Pb, [23]uint8{0x1c, 0x80, 03, 0x18, 0x1a}},
-	{ASBBL, yaddl, Px, [23]uint8{0x83, 03, 0x1d, 0x81, 03, 0x19, 0x1b}},
-	{ASBBQ, yaddl, Pw, [23]uint8{0x83, 03, 0x1d, 0x81, 03, 0x19, 0x1b}},
-	{ASBBW, yaddl, Pe, [23]uint8{0x83, 03, 0x1d, 0x81, 03, 0x19, 0x1b}},
-	{ASCASB, ynone, Pb, [23]uint8{0xae}},
-	{ASCASL, ynone, Px, [23]uint8{0xaf}},
-	{ASCASQ, ynone, Pw, [23]uint8{0xaf}},
-	{ASCASW, ynone, Pe, [23]uint8{0xaf}},
-	{ASETCC, yscond, Pb, [23]uint8{0x0f, 0x93, 00}},
-	{ASETCS, yscond, Pb, [23]uint8{0x0f, 0x92, 00}},
-	{ASETEQ, yscond, Pb, [23]uint8{0x0f, 0x94, 00}},
-	{ASETGE, yscond, Pb, [23]uint8{0x0f, 0x9d, 00}},
-	{ASETGT, yscond, Pb, [23]uint8{0x0f, 0x9f, 00}},
-	{ASETHI, yscond, Pb, [23]uint8{0x0f, 0x97, 00}},
-	{ASETLE, yscond, Pb, [23]uint8{0x0f, 0x9e, 00}},
-	{ASETLS, yscond, Pb, [23]uint8{0x0f, 0x96, 00}},
-	{ASETLT, yscond, Pb, [23]uint8{0x0f, 0x9c, 00}},
-	{ASETMI, yscond, Pb, [23]uint8{0x0f, 0x98, 00}},
-	{ASETNE, yscond, Pb, [23]uint8{0x0f, 0x95, 00}},
-	{ASETOC, yscond, Pb, [23]uint8{0x0f, 0x91, 00}},
-	{ASETOS, yscond, Pb, [23]uint8{0x0f, 0x90, 00}},
-	{ASETPC, yscond, Pb, [23]uint8{0x0f, 0x9b, 00}},
-	{ASETPL, yscond, Pb, [23]uint8{0x0f, 0x99, 00}},
-	{ASETPS, yscond, Pb, [23]uint8{0x0f, 0x9a, 00}},
-	{ASHLB, yshb, Pb, [23]uint8{0xd0, 04, 0xc0, 04, 0xd2, 04}},
-	{ASHLL, yshl, Px, [23]uint8{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}},
-	{ASHLQ, yshl, Pw, [23]uint8{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}},
-	{ASHLW, yshl, Pe, [23]uint8{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}},
-	{ASHRB, yshb, Pb, [23]uint8{0xd0, 05, 0xc0, 05, 0xd2, 05}},
-	{ASHRL, yshl, Px, [23]uint8{0xd1, 05, 0xc1, 05, 0xd3, 05, 0xd3, 05}},
-	{ASHRQ, yshl, Pw, [23]uint8{0xd1, 05, 0xc1, 05, 0xd3, 05, 0xd3, 05}},
-	{ASHRW, yshl, Pe, [23]uint8{0xd1, 05, 0xc1, 05, 0xd3, 05, 0xd3, 05}},
-	{ASHUFPD, yxshuf, Pq, [23]uint8{0xc6, 00}},
-	{ASHUFPS, yxshuf, Pm, [23]uint8{0xc6, 00}},
-	{ASQRTPD, yxm, Pe, [23]uint8{0x51}},
-	{ASQRTPS, yxm, Pm, [23]uint8{0x51}},
-	{ASQRTSD, yxm, Pf2, [23]uint8{0x51}},
-	{ASQRTSS, yxm, Pf3, [23]uint8{0x51}},
-	{ASTC, ynone, Px, [23]uint8{0xf9}},
-	{ASTD, ynone, Px, [23]uint8{0xfd}},
-	{ASTI, ynone, Px, [23]uint8{0xfb}},
-	{ASTMXCSR, ysvrs, Pm, [23]uint8{0xae, 03, 0xae, 03}},
-	{ASTOSB, ynone, Pb, [23]uint8{0xaa}},
-	{ASTOSL, ynone, Px, [23]uint8{0xab}},
-	{ASTOSQ, ynone, Pw, [23]uint8{0xab}},
-	{ASTOSW, ynone, Pe, [23]uint8{0xab}},
-	{ASUBB, yxorb, Pb, [23]uint8{0x2c, 0x80, 05, 0x28, 0x2a}},
-	{ASUBL, yaddl, Px, [23]uint8{0x83, 05, 0x2d, 0x81, 05, 0x29, 0x2b}},
-	{ASUBPD, yxm, Pe, [23]uint8{0x5c}},
-	{ASUBPS, yxm, Pm, [23]uint8{0x5c}},
-	{ASUBQ, yaddl, Pw, [23]uint8{0x83, 05, 0x2d, 0x81, 05, 0x29, 0x2b}},
-	{ASUBSD, yxm, Pf2, [23]uint8{0x5c}},
-	{ASUBSS, yxm, Pf3, [23]uint8{0x5c}},
-	{ASUBW, yaddl, Pe, [23]uint8{0x83, 05, 0x2d, 0x81, 05, 0x29, 0x2b}},
-	{ASWAPGS, ynone, Pm, [23]uint8{0x01, 0xf8}},
-	{ASYSCALL, ynone, Px, [23]uint8{0x0f, 0x05}}, /* fast syscall */
-	{ATESTB, yxorb, Pb, [23]uint8{0xa8, 0xf6, 00, 0x84, 0x84}},
-	{ATESTL, ytestl, Px, [23]uint8{0xa9, 0xf7, 00, 0x85, 0x85}},
-	{ATESTQ, ytestl, Pw, [23]uint8{0xa9, 0xf7, 00, 0x85, 0x85}},
-	{ATESTW, ytestl, Pe, [23]uint8{0xa9, 0xf7, 00, 0x85, 0x85}},
-	{obj.ATEXT, ytext, Px, [23]uint8{}},
-	{AUCOMISD, yxm, Pe, [23]uint8{0x2e}},
-	{AUCOMISS, yxm, Pm, [23]uint8{0x2e}},
-	{AUNPCKHPD, yxm, Pe, [23]uint8{0x15}},
-	{AUNPCKHPS, yxm, Pm, [23]uint8{0x15}},
-	{AUNPCKLPD, yxm, Pe, [23]uint8{0x14}},
-	{AUNPCKLPS, yxm, Pm, [23]uint8{0x14}},
-	{AVERR, ydivl, Pm, [23]uint8{0x00, 04}},
-	{AVERW, ydivl, Pm, [23]uint8{0x00, 05}},
-	{AWAIT, ynone, Px, [23]uint8{0x9b}},
-	{AWORD, ybyte, Px, [23]uint8{2}},
-	{AXCHGB, yml_mb, Pb, [23]uint8{0x86, 0x86}},
-	{AXCHGL, yxchg, Px, [23]uint8{0x90, 0x90, 0x87, 0x87}},
-	{AXCHGQ, yxchg, Pw, [23]uint8{0x90, 0x90, 0x87, 0x87}},
-	{AXCHGW, yxchg, Pe, [23]uint8{0x90, 0x90, 0x87, 0x87}},
-	{AXLAT, ynone, Px, [23]uint8{0xd7}},
-	{AXORB, yxorb, Pb, [23]uint8{0x34, 0x80, 06, 0x30, 0x32}},
-	{AXORL, yaddl, Px, [23]uint8{0x83, 06, 0x35, 0x81, 06, 0x31, 0x33}},
-	{AXORPD, yxm, Pe, [23]uint8{0x57}},
-	{AXORPS, yxm, Pm, [23]uint8{0x57}},
-	{AXORQ, yaddl, Pw, [23]uint8{0x83, 06, 0x35, 0x81, 06, 0x31, 0x33}},
-	{AXORW, yaddl, Pe, [23]uint8{0x83, 06, 0x35, 0x81, 06, 0x31, 0x33}},
-	{AFMOVB, yfmvx, Px, [23]uint8{0xdf, 04}},
-	{AFMOVBP, yfmvp, Px, [23]uint8{0xdf, 06}},
-	{AFMOVD, yfmvd, Px, [23]uint8{0xdd, 00, 0xdd, 02, 0xd9, 00, 0xdd, 02}},
-	{AFMOVDP, yfmvdp, Px, [23]uint8{0xdd, 03, 0xdd, 03}},
-	{AFMOVF, yfmvf, Px, [23]uint8{0xd9, 00, 0xd9, 02}},
-	{AFMOVFP, yfmvp, Px, [23]uint8{0xd9, 03}},
-	{AFMOVL, yfmvf, Px, [23]uint8{0xdb, 00, 0xdb, 02}},
-	{AFMOVLP, yfmvp, Px, [23]uint8{0xdb, 03}},
-	{AFMOVV, yfmvx, Px, [23]uint8{0xdf, 05}},
-	{AFMOVVP, yfmvp, Px, [23]uint8{0xdf, 07}},
-	{AFMOVW, yfmvf, Px, [23]uint8{0xdf, 00, 0xdf, 02}},
-	{AFMOVWP, yfmvp, Px, [23]uint8{0xdf, 03}},
-	{AFMOVX, yfmvx, Px, [23]uint8{0xdb, 05}},
-	{AFMOVXP, yfmvp, Px, [23]uint8{0xdb, 07}},
-	{AFCMOVCC, yfcmv, Px, [23]uint8{0xdb, 00}},
-	{AFCMOVCS, yfcmv, Px, [23]uint8{0xda, 00}},
-	{AFCMOVEQ, yfcmv, Px, [23]uint8{0xda, 01}},
-	{AFCMOVHI, yfcmv, Px, [23]uint8{0xdb, 02}},
-	{AFCMOVLS, yfcmv, Px, [23]uint8{0xda, 02}},
-	{AFCMOVNE, yfcmv, Px, [23]uint8{0xdb, 01}},
-	{AFCMOVNU, yfcmv, Px, [23]uint8{0xdb, 03}},
-	{AFCMOVUN, yfcmv, Px, [23]uint8{0xda, 03}},
-	{AFCOMD, yfadd, Px, [23]uint8{0xdc, 02, 0xd8, 02, 0xdc, 02}},  /* botch */
-	{AFCOMDP, yfadd, Px, [23]uint8{0xdc, 03, 0xd8, 03, 0xdc, 03}}, /* botch */
-	{AFCOMDPP, ycompp, Px, [23]uint8{0xde, 03}},
-	{AFCOMF, yfmvx, Px, [23]uint8{0xd8, 02}},
-	{AFCOMFP, yfmvx, Px, [23]uint8{0xd8, 03}},
-	{AFCOMI, yfmvx, Px, [23]uint8{0xdb, 06}},
-	{AFCOMIP, yfmvx, Px, [23]uint8{0xdf, 06}},
-	{AFCOML, yfmvx, Px, [23]uint8{0xda, 02}},
-	{AFCOMLP, yfmvx, Px, [23]uint8{0xda, 03}},
-	{AFCOMW, yfmvx, Px, [23]uint8{0xde, 02}},
-	{AFCOMWP, yfmvx, Px, [23]uint8{0xde, 03}},
-	{AFUCOM, ycompp, Px, [23]uint8{0xdd, 04}},
-	{AFUCOMI, ycompp, Px, [23]uint8{0xdb, 05}},
-	{AFUCOMIP, ycompp, Px, [23]uint8{0xdf, 05}},
-	{AFUCOMP, ycompp, Px, [23]uint8{0xdd, 05}},
-	{AFUCOMPP, ycompp, Px, [23]uint8{0xda, 13}},
-	{AFADDDP, ycompp, Px, [23]uint8{0xde, 00}},
-	{AFADDW, yfmvx, Px, [23]uint8{0xde, 00}},
-	{AFADDL, yfmvx, Px, [23]uint8{0xda, 00}},
-	{AFADDF, yfmvx, Px, [23]uint8{0xd8, 00}},
-	{AFADDD, yfadd, Px, [23]uint8{0xdc, 00, 0xd8, 00, 0xdc, 00}},
-	{AFMULDP, ycompp, Px, [23]uint8{0xde, 01}},
-	{AFMULW, yfmvx, Px, [23]uint8{0xde, 01}},
-	{AFMULL, yfmvx, Px, [23]uint8{0xda, 01}},
-	{AFMULF, yfmvx, Px, [23]uint8{0xd8, 01}},
-	{AFMULD, yfadd, Px, [23]uint8{0xdc, 01, 0xd8, 01, 0xdc, 01}},
-	{AFSUBDP, ycompp, Px, [23]uint8{0xde, 05}},
-	{AFSUBW, yfmvx, Px, [23]uint8{0xde, 04}},
-	{AFSUBL, yfmvx, Px, [23]uint8{0xda, 04}},
-	{AFSUBF, yfmvx, Px, [23]uint8{0xd8, 04}},
-	{AFSUBD, yfadd, Px, [23]uint8{0xdc, 04, 0xd8, 04, 0xdc, 05}},
-	{AFSUBRDP, ycompp, Px, [23]uint8{0xde, 04}},
-	{AFSUBRW, yfmvx, Px, [23]uint8{0xde, 05}},
-	{AFSUBRL, yfmvx, Px, [23]uint8{0xda, 05}},
-	{AFSUBRF, yfmvx, Px, [23]uint8{0xd8, 05}},
-	{AFSUBRD, yfadd, Px, [23]uint8{0xdc, 05, 0xd8, 05, 0xdc, 04}},
-	{AFDIVDP, ycompp, Px, [23]uint8{0xde, 07}},
-	{AFDIVW, yfmvx, Px, [23]uint8{0xde, 06}},
-	{AFDIVL, yfmvx, Px, [23]uint8{0xda, 06}},
-	{AFDIVF, yfmvx, Px, [23]uint8{0xd8, 06}},
-	{AFDIVD, yfadd, Px, [23]uint8{0xdc, 06, 0xd8, 06, 0xdc, 07}},
-	{AFDIVRDP, ycompp, Px, [23]uint8{0xde, 06}},
-	{AFDIVRW, yfmvx, Px, [23]uint8{0xde, 07}},
-	{AFDIVRL, yfmvx, Px, [23]uint8{0xda, 07}},
-	{AFDIVRF, yfmvx, Px, [23]uint8{0xd8, 07}},
-	{AFDIVRD, yfadd, Px, [23]uint8{0xdc, 07, 0xd8, 07, 0xdc, 06}},
-	{AFXCHD, yfxch, Px, [23]uint8{0xd9, 01, 0xd9, 01}},
-	{AFFREE, nil, 0, [23]uint8{}},
-	{AFLDCW, ysvrs, Px, [23]uint8{0xd9, 05, 0xd9, 05}},
-	{AFLDENV, ysvrs, Px, [23]uint8{0xd9, 04, 0xd9, 04}},
-	{AFRSTOR, ysvrs, Px, [23]uint8{0xdd, 04, 0xdd, 04}},
-	{AFSAVE, ysvrs, Px, [23]uint8{0xdd, 06, 0xdd, 06}},
-	{AFSTCW, ysvrs, Px, [23]uint8{0xd9, 07, 0xd9, 07}},
-	{AFSTENV, ysvrs, Px, [23]uint8{0xd9, 06, 0xd9, 06}},
-	{AFSTSW, ystsw, Px, [23]uint8{0xdd, 07, 0xdf, 0xe0}},
-	{AF2XM1, ynone, Px, [23]uint8{0xd9, 0xf0}},
-	{AFABS, ynone, Px, [23]uint8{0xd9, 0xe1}},
-	{AFCHS, ynone, Px, [23]uint8{0xd9, 0xe0}},
-	{AFCLEX, ynone, Px, [23]uint8{0xdb, 0xe2}},
-	{AFCOS, ynone, Px, [23]uint8{0xd9, 0xff}},
-	{AFDECSTP, ynone, Px, [23]uint8{0xd9, 0xf6}},
-	{AFINCSTP, ynone, Px, [23]uint8{0xd9, 0xf7}},
-	{AFINIT, ynone, Px, [23]uint8{0xdb, 0xe3}},
-	{AFLD1, ynone, Px, [23]uint8{0xd9, 0xe8}},
-	{AFLDL2E, ynone, Px, [23]uint8{0xd9, 0xea}},
-	{AFLDL2T, ynone, Px, [23]uint8{0xd9, 0xe9}},
-	{AFLDLG2, ynone, Px, [23]uint8{0xd9, 0xec}},
-	{AFLDLN2, ynone, Px, [23]uint8{0xd9, 0xed}},
-	{AFLDPI, ynone, Px, [23]uint8{0xd9, 0xeb}},
-	{AFLDZ, ynone, Px, [23]uint8{0xd9, 0xee}},
-	{AFNOP, ynone, Px, [23]uint8{0xd9, 0xd0}},
-	{AFPATAN, ynone, Px, [23]uint8{0xd9, 0xf3}},
-	{AFPREM, ynone, Px, [23]uint8{0xd9, 0xf8}},
-	{AFPREM1, ynone, Px, [23]uint8{0xd9, 0xf5}},
-	{AFPTAN, ynone, Px, [23]uint8{0xd9, 0xf2}},
-	{AFRNDINT, ynone, Px, [23]uint8{0xd9, 0xfc}},
-	{AFSCALE, ynone, Px, [23]uint8{0xd9, 0xfd}},
-	{AFSIN, ynone, Px, [23]uint8{0xd9, 0xfe}},
-	{AFSINCOS, ynone, Px, [23]uint8{0xd9, 0xfb}},
-	{AFSQRT, ynone, Px, [23]uint8{0xd9, 0xfa}},
-	{AFTST, ynone, Px, [23]uint8{0xd9, 0xe4}},
-	{AFXAM, ynone, Px, [23]uint8{0xd9, 0xe5}},
-	{AFXTRACT, ynone, Px, [23]uint8{0xd9, 0xf4}},
-	{AFYL2X, ynone, Px, [23]uint8{0xd9, 0xf1}},
-	{AFYL2XP1, ynone, Px, [23]uint8{0xd9, 0xf9}},
-	{ACMPXCHGB, yrb_mb, Pb, [23]uint8{0x0f, 0xb0}},
-	{ACMPXCHGL, yrl_ml, Px, [23]uint8{0x0f, 0xb1}},
-	{ACMPXCHGW, yrl_ml, Pe, [23]uint8{0x0f, 0xb1}},
-	{ACMPXCHGQ, yrl_ml, Pw, [23]uint8{0x0f, 0xb1}},
-	{ACMPXCHG8B, yscond, Pm, [23]uint8{0xc7, 01}},
-	{AINVD, ynone, Pm, [23]uint8{0x08}},
-	{AINVLPG, ydivb, Pm, [23]uint8{0x01, 07}},
-	{ALFENCE, ynone, Pm, [23]uint8{0xae, 0xe8}},
-	{AMFENCE, ynone, Pm, [23]uint8{0xae, 0xf0}},
-	{AMOVNTIL, yrl_ml, Pm, [23]uint8{0xc3}},
-	{AMOVNTIQ, yrl_ml, Pw, [23]uint8{0x0f, 0xc3}},
-	{ARDMSR, ynone, Pm, [23]uint8{0x32}},
-	{ARDPMC, ynone, Pm, [23]uint8{0x33}},
-	{ARDTSC, ynone, Pm, [23]uint8{0x31}},
-	{ARSM, ynone, Pm, [23]uint8{0xaa}},
-	{ASFENCE, ynone, Pm, [23]uint8{0xae, 0xf8}},
-	{ASYSRET, ynone, Pm, [23]uint8{0x07}},
-	{AWBINVD, ynone, Pm, [23]uint8{0x09}},
-	{AWRMSR, ynone, Pm, [23]uint8{0x30}},
-	{AXADDB, yrb_mb, Pb, [23]uint8{0x0f, 0xc0}},
-	{AXADDL, yrl_ml, Px, [23]uint8{0x0f, 0xc1}},
-	{AXADDQ, yrl_ml, Pw, [23]uint8{0x0f, 0xc1}},
-	{AXADDW, yrl_ml, Pe, [23]uint8{0x0f, 0xc1}},
-	{ACRC32B, ycrc32l, Px, [23]uint8{0xf2, 0x0f, 0x38, 0xf0, 0}},
-	{ACRC32Q, ycrc32l, Pw, [23]uint8{0xf2, 0x0f, 0x38, 0xf1, 0}},
-	{APREFETCHT0, yprefetch, Pm, [23]uint8{0x18, 01}},
-	{APREFETCHT1, yprefetch, Pm, [23]uint8{0x18, 02}},
-	{APREFETCHT2, yprefetch, Pm, [23]uint8{0x18, 03}},
-	{APREFETCHNTA, yprefetch, Pm, [23]uint8{0x18, 00}},
-	{AMOVQL, yrl_ml, Px, [23]uint8{0x89}},
-	{obj.AUNDEF, ynone, Px, [23]uint8{0x0f, 0x0b}},
-	{AAESENC, yaes, Pq, [23]uint8{0x38, 0xdc, 0}},
-	{AAESENCLAST, yaes, Pq, [23]uint8{0x38, 0xdd, 0}},
-	{AAESDEC, yaes, Pq, [23]uint8{0x38, 0xde, 0}},
-	{AAESDECLAST, yaes, Pq, [23]uint8{0x38, 0xdf, 0}},
-	{AAESIMC, yaes, Pq, [23]uint8{0x38, 0xdb, 0}},
-	{AAESKEYGENASSIST, yxshuf, Pq, [23]uint8{0x3a, 0xdf, 0}},
-	{AROUNDPD, yxshuf, Pq, [23]uint8{0x3a, 0x09, 0}},
-	{AROUNDPS, yxshuf, Pq, [23]uint8{0x3a, 0x08, 0}},
-	{AROUNDSD, yxshuf, Pq, [23]uint8{0x3a, 0x0b, 0}},
-	{AROUNDSS, yxshuf, Pq, [23]uint8{0x3a, 0x0a, 0}},
-	{APSHUFD, yxshuf, Pq, [23]uint8{0x70, 0}},
-	{APCLMULQDQ, yxshuf, Pq, [23]uint8{0x3a, 0x44, 0}},
-	{APCMPESTRI, yxshuf, Pq, [23]uint8{0x3a, 0x61, 0}},
-	{AMOVDDUP, yxm, Pf2, [23]uint8{0x12}},
-	{AMOVSHDUP, yxm, Pf3, [23]uint8{0x16}},
-	{AMOVSLDUP, yxm, Pf3, [23]uint8{0x12}},
-
-	{AANDNL, yvex_r3, Pvex, [23]uint8{VEX_LZ_0F38_W0, 0xF2}},
-	{AANDNQ, yvex_r3, Pvex, [23]uint8{VEX_LZ_0F38_W1, 0xF2}},
-	{ABEXTRL, yvex_vmr3, Pvex, [23]uint8{VEX_LZ_0F38_W0, 0xF7}},
-	{ABEXTRQ, yvex_vmr3, Pvex, [23]uint8{VEX_LZ_0F38_W1, 0xF7}},
-	{ABZHIL, yvex_vmr3, Pvex, [23]uint8{VEX_LZ_0F38_W0, 0xF5}},
-	{ABZHIQ, yvex_vmr3, Pvex, [23]uint8{VEX_LZ_0F38_W1, 0xF5}},
-	{AMULXL, yvex_r3, Pvex, [23]uint8{VEX_LZ_F2_0F38_W0, 0xF6}},
-	{AMULXQ, yvex_r3, Pvex, [23]uint8{VEX_LZ_F2_0F38_W1, 0xF6}},
-	{APDEPL, yvex_r3, Pvex, [23]uint8{VEX_LZ_F2_0F38_W0, 0xF5}},
-	{APDEPQ, yvex_r3, Pvex, [23]uint8{VEX_LZ_F2_0F38_W1, 0xF5}},
-	{APEXTL, yvex_r3, Pvex, [23]uint8{VEX_LZ_F3_0F38_W0, 0xF5}},
-	{APEXTQ, yvex_r3, Pvex, [23]uint8{VEX_LZ_F3_0F38_W1, 0xF5}},
-	{ASARXL, yvex_vmr3, Pvex, [23]uint8{VEX_LZ_F3_0F38_W0, 0xF7}},
-	{ASARXQ, yvex_vmr3, Pvex, [23]uint8{VEX_LZ_F3_0F38_W1, 0xF7}},
-	{ASHLXL, yvex_vmr3, Pvex, [23]uint8{VEX_LZ_66_0F38_W0, 0xF7}},
-	{ASHLXQ, yvex_vmr3, Pvex, [23]uint8{VEX_LZ_66_0F38_W1, 0xF7}},
-	{ASHRXL, yvex_vmr3, Pvex, [23]uint8{VEX_LZ_F2_0F38_W0, 0xF7}},
-	{ASHRXQ, yvex_vmr3, Pvex, [23]uint8{VEX_LZ_F2_0F38_W1, 0xF7}},
-
-	{AVZEROUPPER, ynone, Px, [23]uint8{0xc5, 0xf8, 0x77}},
-	{AVMOVDQU, yvex_vmovdqa, Pvex, [23]uint8{VEX_128_F3_0F_WIG, 0x6F, VEX_128_F3_0F_WIG, 0x7F, VEX_256_F3_0F_WIG, 0x6F, VEX_256_F3_0F_WIG, 0x7F}},
-	{AVMOVDQA, yvex_vmovdqa, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0x6F, VEX_128_66_0F_WIG, 0x7F, VEX_256_66_0F_WIG, 0x6F, VEX_256_66_0F_WIG, 0x7F}},
-	{AVMOVNTDQ, yvex_vmovntdq, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0xE7, VEX_256_66_0F_WIG, 0xE7}},
-	{AVPCMPEQB, yvex_xy3, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0x74, VEX_256_66_0F_WIG, 0x74}},
-	{AVPXOR, yvex_xy3, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0xEF, VEX_256_66_0F_WIG, 0xEF}},
-	{AVPMOVMSKB, yvex_xyr2, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0xD7, VEX_256_66_0F_WIG, 0xD7}},
-	{AVPAND, yvex_xy3, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0xDB, VEX_256_66_0F_WIG, 0xDB}},
-	{AVPBROADCASTB, yvex_vpbroadcast, Pvex, [23]uint8{VEX_128_66_0F38_W0, 0x78, VEX_256_66_0F38_W0, 0x78}},
-	{AVPTEST, yvex_xy2, Pvex, [23]uint8{VEX_128_66_0F38_WIG, 0x17, VEX_256_66_0F38_WIG, 0x17}},
-	{AVPSHUFB, yvex_xy3, Pvex, [23]uint8{VEX_128_66_0F38_WIG, 0x00, VEX_256_66_0F38_WIG, 0x00}},
-	{AVPSHUFD, yvex_xyi3, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0x70, VEX_256_66_0F_WIG, 0x70, VEX_128_66_0F_WIG, 0x70, VEX_256_66_0F_WIG, 0x70}},
-	{AVPOR, yvex_xy3, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0xeb, VEX_256_66_0F_WIG, 0xeb}},
-	{AVPADDQ, yvex_xy3, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0xd4, VEX_256_66_0F_WIG, 0xd4}},
-	{AVPADDD, yvex_xy3, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0xfe, VEX_256_66_0F_WIG, 0xfe}},
-	{AVPSLLD, yvex_shift, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0x72, 0xf0, VEX_256_66_0F_WIG, 0x72, 0xf0, VEX_128_66_0F_WIG, 0xf2, VEX_256_66_0F_WIG, 0xf2}},
-	{AVPSLLQ, yvex_shift, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0x73, 0xf0, VEX_256_66_0F_WIG, 0x73, 0xf0, VEX_128_66_0F_WIG, 0xf3, VEX_256_66_0F_WIG, 0xf3}},
-	{AVPSRLD, yvex_shift, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0x72, 0xd0, VEX_256_66_0F_WIG, 0x72, 0xd0, VEX_128_66_0F_WIG, 0xd2, VEX_256_66_0F_WIG, 0xd2}},
-	{AVPSRLQ, yvex_shift, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0x73, 0xd0, VEX_256_66_0F_WIG, 0x73, 0xd0, VEX_128_66_0F_WIG, 0xd3, VEX_256_66_0F_WIG, 0xd3}},
-	{AVPSRLDQ, yvex_shift_dq, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0x73, 0xd8, VEX_256_66_0F_WIG, 0x73, 0xd8}},
-	{AVPSLLDQ, yvex_shift_dq, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0x73, 0xf8, VEX_256_66_0F_WIG, 0x73, 0xf8}},
-	{AVPERM2F128, yvex_yyi4, Pvex, [23]uint8{VEX_256_66_0F3A_W0, 0x06}},
-	{AVPALIGNR, yvex_yyi4, Pvex, [23]uint8{VEX_256_66_0F3A_WIG, 0x0f}},
-	{AVPBLENDD, yvex_yyi4, Pvex, [23]uint8{VEX_256_66_0F3A_WIG, 0x02}},
-	{AVINSERTI128, yvex_xyi4, Pvex, [23]uint8{VEX_256_66_0F3A_WIG, 0x38}},
-	{AVPERM2I128, yvex_yyi4, Pvex, [23]uint8{VEX_256_66_0F3A_WIG, 0x46}},
-	{ARORXL, yvex_ri3, Pvex, [23]uint8{VEX_LZ_F2_0F3A_W0, 0xf0}},
-	{ARORXQ, yvex_ri3, Pvex, [23]uint8{VEX_LZ_F2_0F3A_W1, 0xf0}},
-	{AVBROADCASTSD, yvex_vpbroadcast_sd, Pvex, [23]uint8{VEX_256_66_0F38_W0, 0x19}},
-	{AVBROADCASTSS, yvex_vpbroadcast, Pvex, [23]uint8{VEX_128_66_0F38_W0, 0x18, VEX_256_66_0F38_W0, 0x18}},
-	{AVMOVDDUP, yvex_xy2, Pvex, [23]uint8{VEX_128_F2_0F_WIG, 0x12, VEX_256_F2_0F_WIG, 0x12}},
-	{AVMOVSHDUP, yvex_xy2, Pvex, [23]uint8{VEX_128_F3_0F_WIG, 0x16, VEX_256_F3_0F_WIG, 0x16}},
-	{AVMOVSLDUP, yvex_xy2, Pvex, [23]uint8{VEX_128_F3_0F_WIG, 0x12, VEX_256_F3_0F_WIG, 0x12}},
-
-	{AXACQUIRE, ynone, Px, [23]uint8{0xf2}},
-	{AXRELEASE, ynone, Px, [23]uint8{0xf3}},
-	{AXBEGIN, yxbegin, Px, [23]uint8{0xc7, 0xf8}},
-	{AXABORT, yxabort, Px, [23]uint8{0xc6, 0xf8}},
-	{AXEND, ynone, Px, [23]uint8{0x0f, 01, 0xd5}},
-	{AXTEST, ynone, Px, [23]uint8{0x0f, 01, 0xd6}},
-	{AXGETBV, ynone, Pm, [23]uint8{01, 0xd0}},
-	{obj.AUSEFIELD, ynop, Px, [23]uint8{0, 0}},
-	{obj.ATYPE, nil, 0, [23]uint8{}},
-	{obj.AFUNCDATA, yfuncdata, Px, [23]uint8{0, 0}},
-	{obj.APCDATA, ypcdata, Px, [23]uint8{0, 0}},
-	{obj.AVARDEF, nil, 0, [23]uint8{}},
-	{obj.AVARKILL, nil, 0, [23]uint8{}},
-	{obj.ADUFFCOPY, yduff, Px, [23]uint8{0xe8}},
-	{obj.ADUFFZERO, yduff, Px, [23]uint8{0xe8}},
-	{obj.AEND, nil, 0, [23]uint8{}},
-	{0, nil, 0, [23]uint8{}},
-}
-
-var opindex [(ALAST + 1) & obj.AMask]*Optab
-
-// isextern reports whether s describes an external symbol that must avoid pc-relative addressing.
-// This happens on systems like Solaris that call .so functions instead of system calls.
-// It does not seem to be necessary for any other systems. This is probably working
-// around a Solaris-specific bug that should be fixed differently, but we don't know
-// what that bug is. And this does fix it.
-func isextern(s *obj.LSym) bool {
-	// All the Solaris dynamic imports from libc.so begin with "libc_".
-	return strings.HasPrefix(s.Name, "libc_")
-}
-
-// single-instruction no-ops of various lengths.
-// constructed by hand and disassembled with gdb to verify.
-// see http://www.agner.org/optimize/optimizing_assembly.pdf for discussion.
-var nop = [][16]uint8{
-	{0x90},
-	{0x66, 0x90},
-	{0x0F, 0x1F, 0x00},
-	{0x0F, 0x1F, 0x40, 0x00},
-	{0x0F, 0x1F, 0x44, 0x00, 0x00},
-	{0x66, 0x0F, 0x1F, 0x44, 0x00, 0x00},
-	{0x0F, 0x1F, 0x80, 0x00, 0x00, 0x00, 0x00},
-	{0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
-	{0x66, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
-}
-
-// Native Client rejects the repeated 0x66 prefix.
-// {0x66, 0x66, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
-func fillnop(p []byte, n int) {
-	var m int
-
-	for n > 0 {
-		m = n
-		if m > len(nop) {
-			m = len(nop)
-		}
-		copy(p[:m], nop[m-1][:m])
-		p = p[m:]
-		n -= m
-	}
-}
-
-func naclpad(ctxt *obj.Link, s *obj.LSym, c int32, pad int32) int32 {
-	s.Grow(int64(c) + int64(pad))
-	fillnop(s.P[c:], int(pad))
-	return c + pad
-}
-
-func spadjop(ctxt *obj.Link, p *obj.Prog, l, q obj.As) obj.As {
-	if p.Mode != 64 || ctxt.Arch.PtrSize == 4 {
-		return l
-	}
-	return q
-}
-
-func span6(ctxt *obj.Link, s *obj.LSym) {
-	ctxt.Cursym = s
-
-	if s.P != nil {
-		return
-	}
-
-	if ycover[0] == 0 {
-		instinit()
-	}
-
-	for p := ctxt.Cursym.Text; p != nil; p = p.Link {
-		if p.To.Type == obj.TYPE_BRANCH {
-			if p.Pcond == nil {
-				p.Pcond = p
-			}
-		}
-		if p.As == AADJSP {
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = REG_SP
-			v := int32(-p.From.Offset)
-			p.From.Offset = int64(v)
-			p.As = spadjop(ctxt, p, AADDL, AADDQ)
-			if v < 0 {
-				p.As = spadjop(ctxt, p, ASUBL, ASUBQ)
-				v = -v
-				p.From.Offset = int64(v)
-			}
-
-			if v == 0 {
-				p.As = obj.ANOP
-			}
-		}
-	}
-
-	var q *obj.Prog
-	var count int64 // rough count of number of instructions
-	for p := s.Text; p != nil; p = p.Link {
-		count++
-		p.Back = 2 // use short branches first time through
-		q = p.Pcond
-		if q != nil && (q.Back&2 != 0) {
-			p.Back |= 1 // backward jump
-			q.Back |= 4 // loop head
-		}
-
-		if p.As == AADJSP {
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = REG_SP
-			v := int32(-p.From.Offset)
-			p.From.Offset = int64(v)
-			p.As = spadjop(ctxt, p, AADDL, AADDQ)
-			if v < 0 {
-				p.As = spadjop(ctxt, p, ASUBL, ASUBQ)
-				v = -v
-				p.From.Offset = int64(v)
-			}
-
-			if v == 0 {
-				p.As = obj.ANOP
-			}
-		}
-	}
-	s.GrowCap(count * 5) // preallocate roughly 5 bytes per instruction
-
-	n := 0
-	var c int32
-	errors := ctxt.Errors
-	var deferreturn *obj.LSym
-	if ctxt.Headtype == obj.Hnacl {
-		deferreturn = obj.Linklookup(ctxt, "runtime.deferreturn", 0)
-	}
-	for {
-		loop := int32(0)
-		for i := range s.R {
-			s.R[i] = obj.Reloc{}
-		}
-		s.R = s.R[:0]
-		s.P = s.P[:0]
-		c = 0
-		for p := s.Text; p != nil; p = p.Link {
-			if ctxt.Headtype == obj.Hnacl && p.Isize > 0 {
-
-				// pad everything to avoid crossing 32-byte boundary
-				if c>>5 != (c+int32(p.Isize)-1)>>5 {
-					c = naclpad(ctxt, s, c, -c&31)
-				}
-
-				// pad call deferreturn to start at 32-byte boundary
-				// so that subtracting 5 in jmpdefer will jump back
-				// to that boundary and rerun the call.
-				if p.As == obj.ACALL && p.To.Sym == deferreturn {
-					c = naclpad(ctxt, s, c, -c&31)
-				}
-
-				// pad call to end at 32-byte boundary
-				if p.As == obj.ACALL {
-					c = naclpad(ctxt, s, c, -(c+int32(p.Isize))&31)
-				}
-
-				// the linker treats REP and STOSQ as different instructions
-				// but in fact the REP is a prefix on the STOSQ.
-				// make sure REP has room for 2 more bytes, so that
-				// padding will not be inserted before the next instruction.
-				if (p.As == AREP || p.As == AREPN) && c>>5 != (c+3-1)>>5 {
-					c = naclpad(ctxt, s, c, -c&31)
-				}
-
-				// same for LOCK.
-				// various instructions follow; the longest is 4 bytes.
-				// give ourselves 8 bytes so as to avoid surprises.
-				if p.As == ALOCK && c>>5 != (c+8-1)>>5 {
-					c = naclpad(ctxt, s, c, -c&31)
-				}
-			}
-
-			if (p.Back&4 != 0) && c&(LoopAlign-1) != 0 {
-				// pad with NOPs
-				v := -c & (LoopAlign - 1)
-
-				if v <= MaxLoopPad {
-					s.Grow(int64(c) + int64(v))
-					fillnop(s.P[c:], int(v))
-					c += v
-				}
-			}
-
-			p.Pc = int64(c)
-
-			// process forward jumps to p
-			for q = p.Rel; q != nil; q = q.Forwd {
-				v := int32(p.Pc - (q.Pc + int64(q.Isize)))
-				if q.Back&2 != 0 { // short
-					if v > 127 {
-						loop++
-						q.Back ^= 2
-					}
-
-					if q.As == AJCXZL || q.As == AXBEGIN {
-						s.P[q.Pc+2] = byte(v)
-					} else {
-						s.P[q.Pc+1] = byte(v)
-					}
-				} else {
-					binary.LittleEndian.PutUint32(s.P[q.Pc+int64(q.Isize)-4:], uint32(v))
-				}
-			}
-
-			p.Rel = nil
-
-			p.Pc = int64(c)
-			asmins(ctxt, p)
-			m := ctxt.AsmBuf.Len()
-			if int(p.Isize) != m {
-				p.Isize = uint8(m)
-				loop++
-			}
-
-			s.Grow(p.Pc + int64(m))
-			copy(s.P[p.Pc:], ctxt.AsmBuf.Bytes())
-			c += int32(m)
-		}
-
-		n++
-		if n > 20 {
-			ctxt.Diag("span must be looping")
-			log.Fatalf("loop")
-		}
-		if loop == 0 {
-			break
-		}
-		if ctxt.Errors > errors {
-			return
-		}
-	}
-
-	if ctxt.Headtype == obj.Hnacl {
-		c = naclpad(ctxt, s, c, -c&31)
-	}
-
-	s.Size = int64(c)
-
-	if false { /* debug['a'] > 1 */
-		fmt.Printf("span1 %s %d (%d tries)\n %.6x", s.Name, s.Size, n, 0)
-		var i int
-		for i = 0; i < len(s.P); i++ {
-			fmt.Printf(" %.2x", s.P[i])
-			if i%16 == 15 {
-				fmt.Printf("\n  %.6x", uint(i+1))
-			}
-		}
-
-		if i%16 != 0 {
-			fmt.Printf("\n")
-		}
-
-		for i := 0; i < len(s.R); i++ {
-			r := &s.R[i]
-			fmt.Printf(" rel %#.4x/%d %s%+d\n", uint32(r.Off), r.Siz, r.Sym.Name, r.Add)
-		}
-	}
-}
-
-func instinit() {
-	for i := 1; optab[i].as != 0; i++ {
-		c := optab[i].as
-		if opindex[c&obj.AMask] != nil {
-			log.Fatalf("phase error in optab: %d (%v)", i, c)
-		}
-		opindex[c&obj.AMask] = &optab[i]
-	}
-
-	for i := 0; i < Ymax; i++ {
-		ycover[i*Ymax+i] = 1
-	}
-
-	ycover[Yi0*Ymax+Yi8] = 1
-	ycover[Yi1*Ymax+Yi8] = 1
-	ycover[Yu7*Ymax+Yi8] = 1
-
-	ycover[Yi0*Ymax+Yu7] = 1
-	ycover[Yi1*Ymax+Yu7] = 1
-
-	ycover[Yi0*Ymax+Yu8] = 1
-	ycover[Yi1*Ymax+Yu8] = 1
-	ycover[Yu7*Ymax+Yu8] = 1
-
-	ycover[Yi0*Ymax+Ys32] = 1
-	ycover[Yi1*Ymax+Ys32] = 1
-	ycover[Yu7*Ymax+Ys32] = 1
-	ycover[Yu8*Ymax+Ys32] = 1
-	ycover[Yi8*Ymax+Ys32] = 1
-
-	ycover[Yi0*Ymax+Yi32] = 1
-	ycover[Yi1*Ymax+Yi32] = 1
-	ycover[Yu7*Ymax+Yi32] = 1
-	ycover[Yu8*Ymax+Yi32] = 1
-	ycover[Yi8*Ymax+Yi32] = 1
-	ycover[Ys32*Ymax+Yi32] = 1
-
-	ycover[Yi0*Ymax+Yi64] = 1
-	ycover[Yi1*Ymax+Yi64] = 1
-	ycover[Yu7*Ymax+Yi64] = 1
-	ycover[Yu8*Ymax+Yi64] = 1
-	ycover[Yi8*Ymax+Yi64] = 1
-	ycover[Ys32*Ymax+Yi64] = 1
-	ycover[Yi32*Ymax+Yi64] = 1
-
-	ycover[Yal*Ymax+Yrb] = 1
-	ycover[Ycl*Ymax+Yrb] = 1
-	ycover[Yax*Ymax+Yrb] = 1
-	ycover[Ycx*Ymax+Yrb] = 1
-	ycover[Yrx*Ymax+Yrb] = 1
-	ycover[Yrl*Ymax+Yrb] = 1 // but not Yrl32
-
-	ycover[Ycl*Ymax+Ycx] = 1
-
-	ycover[Yax*Ymax+Yrx] = 1
-	ycover[Ycx*Ymax+Yrx] = 1
-
-	ycover[Yax*Ymax+Yrl] = 1
-	ycover[Ycx*Ymax+Yrl] = 1
-	ycover[Yrx*Ymax+Yrl] = 1
-	ycover[Yrl32*Ymax+Yrl] = 1
-
-	ycover[Yf0*Ymax+Yrf] = 1
-
-	ycover[Yal*Ymax+Ymb] = 1
-	ycover[Ycl*Ymax+Ymb] = 1
-	ycover[Yax*Ymax+Ymb] = 1
-	ycover[Ycx*Ymax+Ymb] = 1
-	ycover[Yrx*Ymax+Ymb] = 1
-	ycover[Yrb*Ymax+Ymb] = 1
-	ycover[Yrl*Ymax+Ymb] = 1 // but not Yrl32
-	ycover[Ym*Ymax+Ymb] = 1
-
-	ycover[Yax*Ymax+Yml] = 1
-	ycover[Ycx*Ymax+Yml] = 1
-	ycover[Yrx*Ymax+Yml] = 1
-	ycover[Yrl*Ymax+Yml] = 1
-	ycover[Yrl32*Ymax+Yml] = 1
-	ycover[Ym*Ymax+Yml] = 1
-
-	ycover[Yax*Ymax+Ymm] = 1
-	ycover[Ycx*Ymax+Ymm] = 1
-	ycover[Yrx*Ymax+Ymm] = 1
-	ycover[Yrl*Ymax+Ymm] = 1
-	ycover[Yrl32*Ymax+Ymm] = 1
-	ycover[Ym*Ymax+Ymm] = 1
-	ycover[Ymr*Ymax+Ymm] = 1
-
-	ycover[Ym*Ymax+Yxm] = 1
-	ycover[Yxr*Ymax+Yxm] = 1
-
-	ycover[Ym*Ymax+Yym] = 1
-	ycover[Yyr*Ymax+Yym] = 1
-
-	for i := 0; i < MAXREG; i++ {
-		reg[i] = -1
-		if i >= REG_AL && i <= REG_R15B {
-			reg[i] = (i - REG_AL) & 7
-			if i >= REG_SPB && i <= REG_DIB {
-				regrex[i] = 0x40
-			}
-			if i >= REG_R8B && i <= REG_R15B {
-				regrex[i] = Rxr | Rxx | Rxb
-			}
-		}
-
-		if i >= REG_AH && i <= REG_BH {
-			reg[i] = 4 + ((i - REG_AH) & 7)
-		}
-		if i >= REG_AX && i <= REG_R15 {
-			reg[i] = (i - REG_AX) & 7
-			if i >= REG_R8 {
-				regrex[i] = Rxr | Rxx | Rxb
-			}
-		}
-
-		if i >= REG_F0 && i <= REG_F0+7 {
-			reg[i] = (i - REG_F0) & 7
-		}
-		if i >= REG_M0 && i <= REG_M0+7 {
-			reg[i] = (i - REG_M0) & 7
-		}
-		if i >= REG_X0 && i <= REG_X0+15 {
-			reg[i] = (i - REG_X0) & 7
-			if i >= REG_X0+8 {
-				regrex[i] = Rxr | Rxx | Rxb
-			}
-		}
-		if i >= REG_Y0 && i <= REG_Y0+15 {
-			reg[i] = (i - REG_Y0) & 7
-			if i >= REG_Y0+8 {
-				regrex[i] = Rxr | Rxx | Rxb
-			}
-		}
-
-		if i >= REG_CR+8 && i <= REG_CR+15 {
-			regrex[i] = Rxr
-		}
-	}
-}
-
-var isAndroid = (obj.GOOS == "android")
-
-func prefixof(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) int {
-	if a.Reg < REG_CS && a.Index < REG_CS { // fast path
-		return 0
-	}
-	if a.Type == obj.TYPE_MEM && a.Name == obj.NAME_NONE {
-		switch a.Reg {
-		case REG_CS:
-			return 0x2e
-
-		case REG_DS:
-			return 0x3e
-
-		case REG_ES:
-			return 0x26
-
-		case REG_FS:
-			return 0x64
-
-		case REG_GS:
-			return 0x65
-
-		case REG_TLS:
-			// NOTE: Systems listed here should be only systems that
-			// support direct TLS references like 8(TLS) implemented as
-			// direct references from FS or GS. Systems that require
-			// the initial-exec model, where you load the TLS base into
-			// a register and then index from that register, do not reach
-			// this code and should not be listed.
-			if p.Mode == 32 {
-				switch ctxt.Headtype {
-				default:
-					if isAndroid {
-						return 0x65 // GS
-					}
-					log.Fatalf("unknown TLS base register for %v", ctxt.Headtype)
-
-				case obj.Hdarwin,
-					obj.Hdragonfly,
-					obj.Hfreebsd,
-					obj.Hnetbsd,
-					obj.Hopenbsd:
-					return 0x65 // GS
-				}
-			}
-
-			switch ctxt.Headtype {
-			default:
-				log.Fatalf("unknown TLS base register for %v", ctxt.Headtype)
-
-			case obj.Hlinux:
-				if isAndroid {
-					return 0x64 // FS
-				}
-
-				if ctxt.Flag_shared {
-					log.Fatalf("unknown TLS base register for linux with -shared")
-				} else {
-					return 0x64 // FS
-				}
-
-			case obj.Hdragonfly,
-				obj.Hfreebsd,
-				obj.Hnetbsd,
-				obj.Hopenbsd,
-				obj.Hsolaris:
-				return 0x64 // FS
-
-			case obj.Hdarwin:
-				return 0x65 // GS
-			}
-		}
-	}
-
-	if p.Mode == 32 {
-		if a.Index == REG_TLS && ctxt.Flag_shared {
-			// When building for inclusion into a shared library, an instruction of the form
-			//     MOVL 0(CX)(TLS*1), AX
-			// becomes
-			//     mov %gs:(%ecx), %eax
-			// which assumes that the correct TLS offset has been loaded into %ecx (today
-			// there is only one TLS variable -- g -- so this is OK). When not building for
-			// a shared library the instruction it becomes
-			//     mov 0x0(%ecx), $eax
-			// and a R_TLS_LE relocation, and so does not require a prefix.
-			if a.Offset != 0 {
-				ctxt.Diag("cannot handle non-0 offsets to TLS")
-			}
-			return 0x65 // GS
-		}
-		return 0
-	}
-
-	switch a.Index {
-	case REG_CS:
-		return 0x2e
-
-	case REG_DS:
-		return 0x3e
-
-	case REG_ES:
-		return 0x26
-
-	case REG_TLS:
-		if ctxt.Flag_shared {
-			// When building for inclusion into a shared library, an instruction of the form
-			//     MOV 0(CX)(TLS*1), AX
-			// becomes
-			//     mov %fs:(%rcx), %rax
-			// which assumes that the correct TLS offset has been loaded into %rcx (today
-			// there is only one TLS variable -- g -- so this is OK). When not building for
-			// a shared library the instruction does not require a prefix.
-			if a.Offset != 0 {
-				log.Fatalf("cannot handle non-0 offsets to TLS")
-			}
-			return 0x64
-		}
-
-	case REG_FS:
-		return 0x64
-
-	case REG_GS:
-		return 0x65
-	}
-
-	return 0
-}
-
-func oclass(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) int {
-	switch a.Type {
-	case obj.TYPE_NONE:
-		return Ynone
-
-	case obj.TYPE_BRANCH:
-		return Ybr
-
-	case obj.TYPE_INDIR:
-		if a.Name != obj.NAME_NONE && a.Reg == REG_NONE && a.Index == REG_NONE && a.Scale == 0 {
-			return Yindir
-		}
-		return Yxxx
-
-	case obj.TYPE_MEM:
-		if a.Index == REG_SP {
-			// Can't use SP as the index register
-			return Yxxx
-		}
-		if ctxt.Asmode == 64 {
-			switch a.Name {
-			case obj.NAME_EXTERN, obj.NAME_STATIC, obj.NAME_GOTREF:
-				// Global variables can't use index registers and their
-				// base register is %rip (%rip is encoded as REG_NONE).
-				if a.Reg != REG_NONE || a.Index != REG_NONE || a.Scale != 0 {
-					return Yxxx
-				}
-			case obj.NAME_AUTO, obj.NAME_PARAM:
-				// These names must have a base of SP.  The old compiler
-				// uses 0 for the base register. SSA uses REG_SP.
-				if a.Reg != REG_SP && a.Reg != 0 {
-					return Yxxx
-				}
-			case obj.NAME_NONE:
-				// everything is ok
-			default:
-				// unknown name
-				return Yxxx
-			}
-		}
-		return Ym
-
-	case obj.TYPE_ADDR:
-		switch a.Name {
-		case obj.NAME_GOTREF:
-			ctxt.Diag("unexpected TYPE_ADDR with NAME_GOTREF")
-			return Yxxx
-
-		case obj.NAME_EXTERN,
-			obj.NAME_STATIC:
-			if a.Sym != nil && isextern(a.Sym) || (p.Mode == 32 && !ctxt.Flag_shared) {
-				return Yi32
-			}
-			return Yiauto // use pc-relative addressing
-
-		case obj.NAME_AUTO,
-			obj.NAME_PARAM:
-			return Yiauto
-		}
-
-		// TODO(rsc): DUFFZERO/DUFFCOPY encoding forgot to set a->index
-		// and got Yi32 in an earlier version of this code.
-		// Keep doing that until we fix yduff etc.
-		if a.Sym != nil && strings.HasPrefix(a.Sym.Name, "runtime.duff") {
-			return Yi32
-		}
-
-		if a.Sym != nil || a.Name != obj.NAME_NONE {
-			ctxt.Diag("unexpected addr: %v", obj.Dconv(p, a))
-		}
-		fallthrough
-
-		// fall through
-
-	case obj.TYPE_CONST:
-		if a.Sym != nil {
-			ctxt.Diag("TYPE_CONST with symbol: %v", obj.Dconv(p, a))
-		}
-
-		v := a.Offset
-		if p.Mode == 32 {
-			v = int64(int32(v))
-		}
-		if v == 0 {
-			if p.Mark&PRESERVEFLAGS != 0 {
-				// If PRESERVEFLAGS is set, avoid MOV $0, AX turning into XOR AX, AX.
-				return Yu7
-			}
-			return Yi0
-		}
-		if v == 1 {
-			return Yi1
-		}
-		if v >= 0 && v <= 127 {
-			return Yu7
-		}
-		if v >= 0 && v <= 255 {
-			return Yu8
-		}
-		if v >= -128 && v <= 127 {
-			return Yi8
-		}
-		if p.Mode == 32 {
-			return Yi32
-		}
-		l := int32(v)
-		if int64(l) == v {
-			return Ys32 /* can sign extend */
-		}
-		if v>>32 == 0 {
-			return Yi32 /* unsigned */
-		}
-		return Yi64
-
-	case obj.TYPE_TEXTSIZE:
-		return Ytextsize
-	}
-
-	if a.Type != obj.TYPE_REG {
-		ctxt.Diag("unexpected addr1: type=%d %v", a.Type, obj.Dconv(p, a))
-		return Yxxx
-	}
-
-	switch a.Reg {
-	case REG_AL:
-		return Yal
-
-	case REG_AX:
-		return Yax
-
-		/*
-			case REG_SPB:
-		*/
-	case REG_BPB,
-		REG_SIB,
-		REG_DIB,
-		REG_R8B,
-		REG_R9B,
-		REG_R10B,
-		REG_R11B,
-		REG_R12B,
-		REG_R13B,
-		REG_R14B,
-		REG_R15B:
-		if ctxt.Asmode != 64 {
-			return Yxxx
-		}
-		fallthrough
-
-	case REG_DL,
-		REG_BL,
-		REG_AH,
-		REG_CH,
-		REG_DH,
-		REG_BH:
-		return Yrb
-
-	case REG_CL:
-		return Ycl
-
-	case REG_CX:
-		return Ycx
-
-	case REG_DX, REG_BX:
-		return Yrx
-
-	case REG_R8, /* not really Yrl */
-		REG_R9,
-		REG_R10,
-		REG_R11,
-		REG_R12,
-		REG_R13,
-		REG_R14,
-		REG_R15:
-		if ctxt.Asmode != 64 {
-			return Yxxx
-		}
-		fallthrough
-
-	case REG_SP, REG_BP, REG_SI, REG_DI:
-		if p.Mode == 32 {
-			return Yrl32
-		}
-		return Yrl
-
-	case REG_F0 + 0:
-		return Yf0
-
-	case REG_F0 + 1,
-		REG_F0 + 2,
-		REG_F0 + 3,
-		REG_F0 + 4,
-		REG_F0 + 5,
-		REG_F0 + 6,
-		REG_F0 + 7:
-		return Yrf
-
-	case REG_M0 + 0,
-		REG_M0 + 1,
-		REG_M0 + 2,
-		REG_M0 + 3,
-		REG_M0 + 4,
-		REG_M0 + 5,
-		REG_M0 + 6,
-		REG_M0 + 7:
-		return Ymr
-
-	case REG_X0 + 0,
-		REG_X0 + 1,
-		REG_X0 + 2,
-		REG_X0 + 3,
-		REG_X0 + 4,
-		REG_X0 + 5,
-		REG_X0 + 6,
-		REG_X0 + 7,
-		REG_X0 + 8,
-		REG_X0 + 9,
-		REG_X0 + 10,
-		REG_X0 + 11,
-		REG_X0 + 12,
-		REG_X0 + 13,
-		REG_X0 + 14,
-		REG_X0 + 15:
-		return Yxr
-
-	case REG_Y0 + 0,
-		REG_Y0 + 1,
-		REG_Y0 + 2,
-		REG_Y0 + 3,
-		REG_Y0 + 4,
-		REG_Y0 + 5,
-		REG_Y0 + 6,
-		REG_Y0 + 7,
-		REG_Y0 + 8,
-		REG_Y0 + 9,
-		REG_Y0 + 10,
-		REG_Y0 + 11,
-		REG_Y0 + 12,
-		REG_Y0 + 13,
-		REG_Y0 + 14,
-		REG_Y0 + 15:
-		return Yyr
-
-	case REG_CS:
-		return Ycs
-	case REG_SS:
-		return Yss
-	case REG_DS:
-		return Yds
-	case REG_ES:
-		return Yes
-	case REG_FS:
-		return Yfs
-	case REG_GS:
-		return Ygs
-	case REG_TLS:
-		return Ytls
-
-	case REG_GDTR:
-		return Ygdtr
-	case REG_IDTR:
-		return Yidtr
-	case REG_LDTR:
-		return Yldtr
-	case REG_MSW:
-		return Ymsw
-	case REG_TASK:
-		return Ytask
-
-	case REG_CR + 0:
-		return Ycr0
-	case REG_CR + 1:
-		return Ycr1
-	case REG_CR + 2:
-		return Ycr2
-	case REG_CR + 3:
-		return Ycr3
-	case REG_CR + 4:
-		return Ycr4
-	case REG_CR + 5:
-		return Ycr5
-	case REG_CR + 6:
-		return Ycr6
-	case REG_CR + 7:
-		return Ycr7
-	case REG_CR + 8:
-		return Ycr8
-
-	case REG_DR + 0:
-		return Ydr0
-	case REG_DR + 1:
-		return Ydr1
-	case REG_DR + 2:
-		return Ydr2
-	case REG_DR + 3:
-		return Ydr3
-	case REG_DR + 4:
-		return Ydr4
-	case REG_DR + 5:
-		return Ydr5
-	case REG_DR + 6:
-		return Ydr6
-	case REG_DR + 7:
-		return Ydr7
-
-	case REG_TR + 0:
-		return Ytr0
-	case REG_TR + 1:
-		return Ytr1
-	case REG_TR + 2:
-		return Ytr2
-	case REG_TR + 3:
-		return Ytr3
-	case REG_TR + 4:
-		return Ytr4
-	case REG_TR + 5:
-		return Ytr5
-	case REG_TR + 6:
-		return Ytr6
-	case REG_TR + 7:
-		return Ytr7
-	}
-
-	return Yxxx
-}
-
-func asmidx(ctxt *obj.Link, scale int, index int, base int) {
-	var i int
-
-	switch index {
-	default:
-		goto bad
-
-	case REG_NONE:
-		i = 4 << 3
-		goto bas
-
-	case REG_R8,
-		REG_R9,
-		REG_R10,
-		REG_R11,
-		REG_R12,
-		REG_R13,
-		REG_R14,
-		REG_R15:
-		if ctxt.Asmode != 64 {
-			goto bad
-		}
-		fallthrough
-
-	case REG_AX,
-		REG_CX,
-		REG_DX,
-		REG_BX,
-		REG_BP,
-		REG_SI,
-		REG_DI:
-		i = reg[index] << 3
-	}
-
-	switch scale {
-	default:
-		goto bad
-
-	case 1:
-		break
-
-	case 2:
-		i |= 1 << 6
-
-	case 4:
-		i |= 2 << 6
-
-	case 8:
-		i |= 3 << 6
-	}
-
-bas:
-	switch base {
-	default:
-		goto bad
-
-	case REG_NONE: /* must be mod=00 */
-		i |= 5
-
-	case REG_R8,
-		REG_R9,
-		REG_R10,
-		REG_R11,
-		REG_R12,
-		REG_R13,
-		REG_R14,
-		REG_R15:
-		if ctxt.Asmode != 64 {
-			goto bad
-		}
-		fallthrough
-
-	case REG_AX,
-		REG_CX,
-		REG_DX,
-		REG_BX,
-		REG_SP,
-		REG_BP,
-		REG_SI,
-		REG_DI:
-		i |= reg[base]
-	}
-
-	ctxt.AsmBuf.Put1(byte(i))
-	return
-
-bad:
-	ctxt.Diag("asmidx: bad address %d/%d/%d", scale, index, base)
-	ctxt.AsmBuf.Put1(0)
-	return
-}
-
-func relput4(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
-	var rel obj.Reloc
-
-	v := vaddr(ctxt, p, a, &rel)
-	if rel.Siz != 0 {
-		if rel.Siz != 4 {
-			ctxt.Diag("bad reloc")
-		}
-		r := obj.Addrel(ctxt.Cursym)
-		*r = rel
-		r.Off = int32(p.Pc + int64(ctxt.AsmBuf.Len()))
-	}
-
-	ctxt.AsmBuf.PutInt32(int32(v))
-}
-
-/*
-static void
-relput8(Prog *p, Addr *a)
-{
-	vlong v;
-	Reloc rel, *r;
-
-	v = vaddr(ctxt, p, a, &rel);
-	if(rel.siz != 0) {
-		r = addrel(ctxt->cursym);
-		*r = rel;
-		r->siz = 8;
-		r->off = p->pc + ctxt->andptr - ctxt->and;
-	}
-	put8(ctxt, v);
-}
-*/
-func vaddr(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r *obj.Reloc) int64 {
-	if r != nil {
-		*r = obj.Reloc{}
-	}
-
-	switch a.Name {
-	case obj.NAME_STATIC,
-		obj.NAME_GOTREF,
-		obj.NAME_EXTERN:
-		s := a.Sym
-		if r == nil {
-			ctxt.Diag("need reloc for %v", obj.Dconv(p, a))
-			log.Fatalf("reloc")
-		}
-
-		if a.Name == obj.NAME_GOTREF {
-			r.Siz = 4
-			r.Type = obj.R_GOTPCREL
-		} else if isextern(s) || (p.Mode != 64 && !ctxt.Flag_shared) {
-			r.Siz = 4
-			r.Type = obj.R_ADDR
-		} else {
-			r.Siz = 4
-			r.Type = obj.R_PCREL
-		}
-
-		r.Off = -1 // caller must fill in
-		r.Sym = s
-		r.Add = a.Offset
-
-		return 0
-	}
-
-	if (a.Type == obj.TYPE_MEM || a.Type == obj.TYPE_ADDR) && a.Reg == REG_TLS {
-		if r == nil {
-			ctxt.Diag("need reloc for %v", obj.Dconv(p, a))
-			log.Fatalf("reloc")
-		}
-
-		if !ctxt.Flag_shared || isAndroid || ctxt.Headtype == obj.Hdarwin {
-			r.Type = obj.R_TLS_LE
-			r.Siz = 4
-			r.Off = -1 // caller must fill in
-			r.Add = a.Offset
-		}
-		return 0
-	}
-
-	return a.Offset
-}
-
-func asmandsz(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r int, rex int, m64 int) {
-	var base int
-	var rel obj.Reloc
-
-	rex &= 0x40 | Rxr
-	switch {
-	case int64(int32(a.Offset)) == a.Offset:
-		// Offset fits in sign-extended 32 bits.
-	case int64(uint32(a.Offset)) == a.Offset && ctxt.Rexflag&Rxw == 0:
-		// Offset fits in zero-extended 32 bits in a 32-bit instruction.
-		// This is allowed for assembly that wants to use 32-bit hex
-		// constants, e.g. LEAL 0x99999999(AX), AX.
-	default:
-		ctxt.Diag("offset too large in %s", p)
-	}
-	v := int32(a.Offset)
-	rel.Siz = 0
-
-	switch a.Type {
-	case obj.TYPE_ADDR:
-		if a.Name == obj.NAME_NONE {
-			ctxt.Diag("unexpected TYPE_ADDR with NAME_NONE")
-		}
-		if a.Index == REG_TLS {
-			ctxt.Diag("unexpected TYPE_ADDR with index==REG_TLS")
-		}
-		goto bad
-
-	case obj.TYPE_REG:
-		if a.Reg < REG_AL || REG_Y0+15 < a.Reg {
-			goto bad
-		}
-		if v != 0 {
-			goto bad
-		}
-		ctxt.AsmBuf.Put1(byte(3<<6 | reg[a.Reg]<<0 | r<<3))
-		ctxt.Rexflag |= regrex[a.Reg]&(0x40|Rxb) | rex
-		return
-	}
-
-	if a.Type != obj.TYPE_MEM {
-		goto bad
-	}
-
-	if a.Index != REG_NONE && a.Index != REG_TLS {
-		base := int(a.Reg)
-		switch a.Name {
-		case obj.NAME_EXTERN,
-			obj.NAME_GOTREF,
-			obj.NAME_STATIC:
-			if !isextern(a.Sym) && p.Mode == 64 {
-				goto bad
-			}
-			if p.Mode == 32 && ctxt.Flag_shared {
-				// The base register has already been set. It holds the PC
-				// of this instruction returned by a PC-reading thunk.
-				// See obj6.go:rewriteToPcrel.
-			} else {
-				base = REG_NONE
-			}
-			v = int32(vaddr(ctxt, p, a, &rel))
-
-		case obj.NAME_AUTO,
-			obj.NAME_PARAM:
-			base = REG_SP
-		}
-
-		ctxt.Rexflag |= regrex[int(a.Index)]&Rxx | regrex[base]&Rxb | rex
-		if base == REG_NONE {
-			ctxt.AsmBuf.Put1(byte(0<<6 | 4<<0 | r<<3))
-			asmidx(ctxt, int(a.Scale), int(a.Index), base)
-			goto putrelv
-		}
-
-		if v == 0 && rel.Siz == 0 && base != REG_BP && base != REG_R13 {
-			ctxt.AsmBuf.Put1(byte(0<<6 | 4<<0 | r<<3))
-			asmidx(ctxt, int(a.Scale), int(a.Index), base)
-			return
-		}
-
-		if v >= -128 && v < 128 && rel.Siz == 0 {
-			ctxt.AsmBuf.Put1(byte(1<<6 | 4<<0 | r<<3))
-			asmidx(ctxt, int(a.Scale), int(a.Index), base)
-			ctxt.AsmBuf.Put1(byte(v))
-			return
-		}
-
-		ctxt.AsmBuf.Put1(byte(2<<6 | 4<<0 | r<<3))
-		asmidx(ctxt, int(a.Scale), int(a.Index), base)
-		goto putrelv
-	}
-
-	base = int(a.Reg)
-	switch a.Name {
-	case obj.NAME_STATIC,
-		obj.NAME_GOTREF,
-		obj.NAME_EXTERN:
-		if a.Sym == nil {
-			ctxt.Diag("bad addr: %v", p)
-		}
-		if p.Mode == 32 && ctxt.Flag_shared {
-			// The base register has already been set. It holds the PC
-			// of this instruction returned by a PC-reading thunk.
-			// See obj6.go:rewriteToPcrel.
-		} else {
-			base = REG_NONE
-		}
-		v = int32(vaddr(ctxt, p, a, &rel))
-
-	case obj.NAME_AUTO,
-		obj.NAME_PARAM:
-		base = REG_SP
-	}
-
-	if base == REG_TLS {
-		v = int32(vaddr(ctxt, p, a, &rel))
-	}
-
-	ctxt.Rexflag |= regrex[base]&Rxb | rex
-	if base == REG_NONE || (REG_CS <= base && base <= REG_GS) || base == REG_TLS {
-		if (a.Sym == nil || !isextern(a.Sym)) && base == REG_NONE && (a.Name == obj.NAME_STATIC || a.Name == obj.NAME_EXTERN || a.Name == obj.NAME_GOTREF) || p.Mode != 64 {
-			if a.Name == obj.NAME_GOTREF && (a.Offset != 0 || a.Index != 0 || a.Scale != 0) {
-				ctxt.Diag("%v has offset against gotref", p)
-			}
-			ctxt.AsmBuf.Put1(byte(0<<6 | 5<<0 | r<<3))
-			goto putrelv
-		}
-
-		// temporary
-		ctxt.AsmBuf.Put2(
-			byte(0<<6|4<<0|r<<3), // sib present
-			0<<6|4<<3|5<<0,       // DS:d32
-		)
-		goto putrelv
-	}
-
-	if base == REG_SP || base == REG_R12 {
-		if v == 0 {
-			ctxt.AsmBuf.Put1(byte(0<<6 | reg[base]<<0 | r<<3))
-			asmidx(ctxt, int(a.Scale), REG_NONE, base)
-			return
-		}
-
-		if v >= -128 && v < 128 {
-			ctxt.AsmBuf.Put1(byte(1<<6 | reg[base]<<0 | r<<3))
-			asmidx(ctxt, int(a.Scale), REG_NONE, base)
-			ctxt.AsmBuf.Put1(byte(v))
-			return
-		}
-
-		ctxt.AsmBuf.Put1(byte(2<<6 | reg[base]<<0 | r<<3))
-		asmidx(ctxt, int(a.Scale), REG_NONE, base)
-		goto putrelv
-	}
-
-	if REG_AX <= base && base <= REG_R15 {
-		if a.Index == REG_TLS && !ctxt.Flag_shared {
-			rel = obj.Reloc{}
-			rel.Type = obj.R_TLS_LE
-			rel.Siz = 4
-			rel.Sym = nil
-			rel.Add = int64(v)
-			v = 0
-		}
-
-		if v == 0 && rel.Siz == 0 && base != REG_BP && base != REG_R13 {
-			ctxt.AsmBuf.Put1(byte(0<<6 | reg[base]<<0 | r<<3))
-			return
-		}
-
-		if v >= -128 && v < 128 && rel.Siz == 0 {
-			ctxt.AsmBuf.Put2(byte(1<<6|reg[base]<<0|r<<3), byte(v))
-			return
-		}
-
-		ctxt.AsmBuf.Put1(byte(2<<6 | reg[base]<<0 | r<<3))
-		goto putrelv
-	}
-
-	goto bad
-
-putrelv:
-	if rel.Siz != 0 {
-		if rel.Siz != 4 {
-			ctxt.Diag("bad rel")
-			goto bad
-		}
-
-		r := obj.Addrel(ctxt.Cursym)
-		*r = rel
-		r.Off = int32(ctxt.Curp.Pc + int64(ctxt.AsmBuf.Len()))
-	}
-
-	ctxt.AsmBuf.PutInt32(v)
-	return
-
-bad:
-	ctxt.Diag("asmand: bad address %v", obj.Dconv(p, a))
-	return
-}
-
-func asmand(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, ra *obj.Addr) {
-	asmandsz(ctxt, p, a, reg[ra.Reg], regrex[ra.Reg], 0)
-}
-
-func asmando(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, o int) {
-	asmandsz(ctxt, p, a, o, 0, 0)
-}
-
-func bytereg(a *obj.Addr, t *uint8) {
-	if a.Type == obj.TYPE_REG && a.Index == REG_NONE && (REG_AX <= a.Reg && a.Reg <= REG_R15) {
-		a.Reg += REG_AL - REG_AX
-		*t = 0
-	}
-}
-
-func unbytereg(a *obj.Addr, t *uint8) {
-	if a.Type == obj.TYPE_REG && a.Index == REG_NONE && (REG_AL <= a.Reg && a.Reg <= REG_R15B) {
-		a.Reg += REG_AX - REG_AL
-		*t = 0
-	}
-}
-
-const (
-	E = 0xff
-)
-
-var ymovtab = []Movtab{
-	/* push */
-	{APUSHL, Ycs, Ynone, Ynone, 0, [4]uint8{0x0e, E, 0, 0}},
-	{APUSHL, Yss, Ynone, Ynone, 0, [4]uint8{0x16, E, 0, 0}},
-	{APUSHL, Yds, Ynone, Ynone, 0, [4]uint8{0x1e, E, 0, 0}},
-	{APUSHL, Yes, Ynone, Ynone, 0, [4]uint8{0x06, E, 0, 0}},
-	{APUSHL, Yfs, Ynone, Ynone, 0, [4]uint8{0x0f, 0xa0, E, 0}},
-	{APUSHL, Ygs, Ynone, Ynone, 0, [4]uint8{0x0f, 0xa8, E, 0}},
-	{APUSHQ, Yfs, Ynone, Ynone, 0, [4]uint8{0x0f, 0xa0, E, 0}},
-	{APUSHQ, Ygs, Ynone, Ynone, 0, [4]uint8{0x0f, 0xa8, E, 0}},
-	{APUSHW, Ycs, Ynone, Ynone, 0, [4]uint8{Pe, 0x0e, E, 0}},
-	{APUSHW, Yss, Ynone, Ynone, 0, [4]uint8{Pe, 0x16, E, 0}},
-	{APUSHW, Yds, Ynone, Ynone, 0, [4]uint8{Pe, 0x1e, E, 0}},
-	{APUSHW, Yes, Ynone, Ynone, 0, [4]uint8{Pe, 0x06, E, 0}},
-	{APUSHW, Yfs, Ynone, Ynone, 0, [4]uint8{Pe, 0x0f, 0xa0, E}},
-	{APUSHW, Ygs, Ynone, Ynone, 0, [4]uint8{Pe, 0x0f, 0xa8, E}},
-
-	/* pop */
-	{APOPL, Ynone, Ynone, Yds, 0, [4]uint8{0x1f, E, 0, 0}},
-	{APOPL, Ynone, Ynone, Yes, 0, [4]uint8{0x07, E, 0, 0}},
-	{APOPL, Ynone, Ynone, Yss, 0, [4]uint8{0x17, E, 0, 0}},
-	{APOPL, Ynone, Ynone, Yfs, 0, [4]uint8{0x0f, 0xa1, E, 0}},
-	{APOPL, Ynone, Ynone, Ygs, 0, [4]uint8{0x0f, 0xa9, E, 0}},
-	{APOPQ, Ynone, Ynone, Yfs, 0, [4]uint8{0x0f, 0xa1, E, 0}},
-	{APOPQ, Ynone, Ynone, Ygs, 0, [4]uint8{0x0f, 0xa9, E, 0}},
-	{APOPW, Ynone, Ynone, Yds, 0, [4]uint8{Pe, 0x1f, E, 0}},
-	{APOPW, Ynone, Ynone, Yes, 0, [4]uint8{Pe, 0x07, E, 0}},
-	{APOPW, Ynone, Ynone, Yss, 0, [4]uint8{Pe, 0x17, E, 0}},
-	{APOPW, Ynone, Ynone, Yfs, 0, [4]uint8{Pe, 0x0f, 0xa1, E}},
-	{APOPW, Ynone, Ynone, Ygs, 0, [4]uint8{Pe, 0x0f, 0xa9, E}},
-
-	/* mov seg */
-	{AMOVW, Yes, Ynone, Yml, 1, [4]uint8{0x8c, 0, 0, 0}},
-	{AMOVW, Ycs, Ynone, Yml, 1, [4]uint8{0x8c, 1, 0, 0}},
-	{AMOVW, Yss, Ynone, Yml, 1, [4]uint8{0x8c, 2, 0, 0}},
-	{AMOVW, Yds, Ynone, Yml, 1, [4]uint8{0x8c, 3, 0, 0}},
-	{AMOVW, Yfs, Ynone, Yml, 1, [4]uint8{0x8c, 4, 0, 0}},
-	{AMOVW, Ygs, Ynone, Yml, 1, [4]uint8{0x8c, 5, 0, 0}},
-	{AMOVW, Yml, Ynone, Yes, 2, [4]uint8{0x8e, 0, 0, 0}},
-	{AMOVW, Yml, Ynone, Ycs, 2, [4]uint8{0x8e, 1, 0, 0}},
-	{AMOVW, Yml, Ynone, Yss, 2, [4]uint8{0x8e, 2, 0, 0}},
-	{AMOVW, Yml, Ynone, Yds, 2, [4]uint8{0x8e, 3, 0, 0}},
-	{AMOVW, Yml, Ynone, Yfs, 2, [4]uint8{0x8e, 4, 0, 0}},
-	{AMOVW, Yml, Ynone, Ygs, 2, [4]uint8{0x8e, 5, 0, 0}},
-
-	/* mov cr */
-	{AMOVL, Ycr0, Ynone, Yml, 3, [4]uint8{0x0f, 0x20, 0, 0}},
-	{AMOVL, Ycr2, Ynone, Yml, 3, [4]uint8{0x0f, 0x20, 2, 0}},
-	{AMOVL, Ycr3, Ynone, Yml, 3, [4]uint8{0x0f, 0x20, 3, 0}},
-	{AMOVL, Ycr4, Ynone, Yml, 3, [4]uint8{0x0f, 0x20, 4, 0}},
-	{AMOVL, Ycr8, Ynone, Yml, 3, [4]uint8{0x0f, 0x20, 8, 0}},
-	{AMOVQ, Ycr0, Ynone, Yml, 3, [4]uint8{0x0f, 0x20, 0, 0}},
-	{AMOVQ, Ycr2, Ynone, Yml, 3, [4]uint8{0x0f, 0x20, 2, 0}},
-	{AMOVQ, Ycr3, Ynone, Yml, 3, [4]uint8{0x0f, 0x20, 3, 0}},
-	{AMOVQ, Ycr4, Ynone, Yml, 3, [4]uint8{0x0f, 0x20, 4, 0}},
-	{AMOVQ, Ycr8, Ynone, Yml, 3, [4]uint8{0x0f, 0x20, 8, 0}},
-	{AMOVL, Yml, Ynone, Ycr0, 4, [4]uint8{0x0f, 0x22, 0, 0}},
-	{AMOVL, Yml, Ynone, Ycr2, 4, [4]uint8{0x0f, 0x22, 2, 0}},
-	{AMOVL, Yml, Ynone, Ycr3, 4, [4]uint8{0x0f, 0x22, 3, 0}},
-	{AMOVL, Yml, Ynone, Ycr4, 4, [4]uint8{0x0f, 0x22, 4, 0}},
-	{AMOVL, Yml, Ynone, Ycr8, 4, [4]uint8{0x0f, 0x22, 8, 0}},
-	{AMOVQ, Yml, Ynone, Ycr0, 4, [4]uint8{0x0f, 0x22, 0, 0}},
-	{AMOVQ, Yml, Ynone, Ycr2, 4, [4]uint8{0x0f, 0x22, 2, 0}},
-	{AMOVQ, Yml, Ynone, Ycr3, 4, [4]uint8{0x0f, 0x22, 3, 0}},
-	{AMOVQ, Yml, Ynone, Ycr4, 4, [4]uint8{0x0f, 0x22, 4, 0}},
-	{AMOVQ, Yml, Ynone, Ycr8, 4, [4]uint8{0x0f, 0x22, 8, 0}},
-
-	/* mov dr */
-	{AMOVL, Ydr0, Ynone, Yml, 3, [4]uint8{0x0f, 0x21, 0, 0}},
-	{AMOVL, Ydr6, Ynone, Yml, 3, [4]uint8{0x0f, 0x21, 6, 0}},
-	{AMOVL, Ydr7, Ynone, Yml, 3, [4]uint8{0x0f, 0x21, 7, 0}},
-	{AMOVQ, Ydr0, Ynone, Yml, 3, [4]uint8{0x0f, 0x21, 0, 0}},
-	{AMOVQ, Ydr6, Ynone, Yml, 3, [4]uint8{0x0f, 0x21, 6, 0}},
-	{AMOVQ, Ydr7, Ynone, Yml, 3, [4]uint8{0x0f, 0x21, 7, 0}},
-	{AMOVL, Yml, Ynone, Ydr0, 4, [4]uint8{0x0f, 0x23, 0, 0}},
-	{AMOVL, Yml, Ynone, Ydr6, 4, [4]uint8{0x0f, 0x23, 6, 0}},
-	{AMOVL, Yml, Ynone, Ydr7, 4, [4]uint8{0x0f, 0x23, 7, 0}},
-	{AMOVQ, Yml, Ynone, Ydr0, 4, [4]uint8{0x0f, 0x23, 0, 0}},
-	{AMOVQ, Yml, Ynone, Ydr6, 4, [4]uint8{0x0f, 0x23, 6, 0}},
-	{AMOVQ, Yml, Ynone, Ydr7, 4, [4]uint8{0x0f, 0x23, 7, 0}},
-
-	/* mov tr */
-	{AMOVL, Ytr6, Ynone, Yml, 3, [4]uint8{0x0f, 0x24, 6, 0}},
-	{AMOVL, Ytr7, Ynone, Yml, 3, [4]uint8{0x0f, 0x24, 7, 0}},
-	{AMOVL, Yml, Ynone, Ytr6, 4, [4]uint8{0x0f, 0x26, 6, E}},
-	{AMOVL, Yml, Ynone, Ytr7, 4, [4]uint8{0x0f, 0x26, 7, E}},
-
-	/* lgdt, sgdt, lidt, sidt */
-	{AMOVL, Ym, Ynone, Ygdtr, 4, [4]uint8{0x0f, 0x01, 2, 0}},
-	{AMOVL, Ygdtr, Ynone, Ym, 3, [4]uint8{0x0f, 0x01, 0, 0}},
-	{AMOVL, Ym, Ynone, Yidtr, 4, [4]uint8{0x0f, 0x01, 3, 0}},
-	{AMOVL, Yidtr, Ynone, Ym, 3, [4]uint8{0x0f, 0x01, 1, 0}},
-	{AMOVQ, Ym, Ynone, Ygdtr, 4, [4]uint8{0x0f, 0x01, 2, 0}},
-	{AMOVQ, Ygdtr, Ynone, Ym, 3, [4]uint8{0x0f, 0x01, 0, 0}},
-	{AMOVQ, Ym, Ynone, Yidtr, 4, [4]uint8{0x0f, 0x01, 3, 0}},
-	{AMOVQ, Yidtr, Ynone, Ym, 3, [4]uint8{0x0f, 0x01, 1, 0}},
-
-	/* lldt, sldt */
-	{AMOVW, Yml, Ynone, Yldtr, 4, [4]uint8{0x0f, 0x00, 2, 0}},
-	{AMOVW, Yldtr, Ynone, Yml, 3, [4]uint8{0x0f, 0x00, 0, 0}},
-
-	/* lmsw, smsw */
-	{AMOVW, Yml, Ynone, Ymsw, 4, [4]uint8{0x0f, 0x01, 6, 0}},
-	{AMOVW, Ymsw, Ynone, Yml, 3, [4]uint8{0x0f, 0x01, 4, 0}},
-
-	/* ltr, str */
-	{AMOVW, Yml, Ynone, Ytask, 4, [4]uint8{0x0f, 0x00, 3, 0}},
-	{AMOVW, Ytask, Ynone, Yml, 3, [4]uint8{0x0f, 0x00, 1, 0}},
-
-	/* load full pointer - unsupported
-	Movtab{AMOVL, Yml, Ycol, 5, [4]uint8{0, 0, 0, 0}},
-	Movtab{AMOVW, Yml, Ycol, 5, [4]uint8{Pe, 0, 0, 0}},
-	*/
-
-	/* double shift */
-	{ASHLL, Yi8, Yrl, Yml, 6, [4]uint8{0xa4, 0xa5, 0, 0}},
-	{ASHLL, Ycl, Yrl, Yml, 6, [4]uint8{0xa4, 0xa5, 0, 0}},
-	{ASHLL, Ycx, Yrl, Yml, 6, [4]uint8{0xa4, 0xa5, 0, 0}},
-	{ASHRL, Yi8, Yrl, Yml, 6, [4]uint8{0xac, 0xad, 0, 0}},
-	{ASHRL, Ycl, Yrl, Yml, 6, [4]uint8{0xac, 0xad, 0, 0}},
-	{ASHRL, Ycx, Yrl, Yml, 6, [4]uint8{0xac, 0xad, 0, 0}},
-	{ASHLQ, Yi8, Yrl, Yml, 6, [4]uint8{Pw, 0xa4, 0xa5, 0}},
-	{ASHLQ, Ycl, Yrl, Yml, 6, [4]uint8{Pw, 0xa4, 0xa5, 0}},
-	{ASHLQ, Ycx, Yrl, Yml, 6, [4]uint8{Pw, 0xa4, 0xa5, 0}},
-	{ASHRQ, Yi8, Yrl, Yml, 6, [4]uint8{Pw, 0xac, 0xad, 0}},
-	{ASHRQ, Ycl, Yrl, Yml, 6, [4]uint8{Pw, 0xac, 0xad, 0}},
-	{ASHRQ, Ycx, Yrl, Yml, 6, [4]uint8{Pw, 0xac, 0xad, 0}},
-	{ASHLW, Yi8, Yrl, Yml, 6, [4]uint8{Pe, 0xa4, 0xa5, 0}},
-	{ASHLW, Ycl, Yrl, Yml, 6, [4]uint8{Pe, 0xa4, 0xa5, 0}},
-	{ASHLW, Ycx, Yrl, Yml, 6, [4]uint8{Pe, 0xa4, 0xa5, 0}},
-	{ASHRW, Yi8, Yrl, Yml, 6, [4]uint8{Pe, 0xac, 0xad, 0}},
-	{ASHRW, Ycl, Yrl, Yml, 6, [4]uint8{Pe, 0xac, 0xad, 0}},
-	{ASHRW, Ycx, Yrl, Yml, 6, [4]uint8{Pe, 0xac, 0xad, 0}},
-
-	/* load TLS base */
-	{AMOVL, Ytls, Ynone, Yrl, 7, [4]uint8{0, 0, 0, 0}},
-	{AMOVQ, Ytls, Ynone, Yrl, 7, [4]uint8{0, 0, 0, 0}},
-	{0, 0, 0, 0, 0, [4]uint8{}},
-}
-
-func isax(a *obj.Addr) bool {
-	switch a.Reg {
-	case REG_AX, REG_AL, REG_AH:
-		return true
-	}
-
-	if a.Index == REG_AX {
-		return true
-	}
-	return false
-}
-
-func subreg(p *obj.Prog, from int, to int) {
-	if false { /* debug['Q'] */
-		fmt.Printf("\n%v\ts/%v/%v/\n", p, Rconv(from), Rconv(to))
-	}
-
-	if int(p.From.Reg) == from {
-		p.From.Reg = int16(to)
-		p.Ft = 0
-	}
-
-	if int(p.To.Reg) == from {
-		p.To.Reg = int16(to)
-		p.Tt = 0
-	}
-
-	if int(p.From.Index) == from {
-		p.From.Index = int16(to)
-		p.Ft = 0
-	}
-
-	if int(p.To.Index) == from {
-		p.To.Index = int16(to)
-		p.Tt = 0
-	}
-
-	if false { /* debug['Q'] */
-		fmt.Printf("%v\n", p)
-	}
-}
-
-func mediaop(ctxt *obj.Link, o *Optab, op int, osize int, z int) int {
-	switch op {
-	case Pm, Pe, Pf2, Pf3:
-		if osize != 1 {
-			if op != Pm {
-				ctxt.AsmBuf.Put1(byte(op))
-			}
-			ctxt.AsmBuf.Put1(Pm)
-			z++
-			op = int(o.op[z])
-			break
-		}
-		fallthrough
-
-	default:
-		if ctxt.AsmBuf.Len() == 0 || ctxt.AsmBuf.Last() != Pm {
-			ctxt.AsmBuf.Put1(Pm)
-		}
-	}
-
-	ctxt.AsmBuf.Put1(byte(op))
-	return z
-}
-
-var bpduff1 = []byte{
-	0x48, 0x89, 0x6c, 0x24, 0xf0, // MOVQ BP, -16(SP)
-	0x48, 0x8d, 0x6c, 0x24, 0xf0, // LEAQ -16(SP), BP
-}
-
-var bpduff2 = []byte{
-	0x48, 0x8b, 0x6d, 0x00, // MOVQ 0(BP), BP
-}
-
-// Emit VEX prefix and opcode byte.
-// The three addresses are the r/m, vvvv, and reg fields.
-// The reg and rm arguments appear in the same order as the
-// arguments to asmand, which typically follows the call to asmvex.
-// The final two arguments are the VEX prefix (see encoding above)
-// and the opcode byte.
-// For details about vex prefix see:
-// https://en.wikipedia.org/wiki/VEX_prefix#Technical_description
-func asmvex(ctxt *obj.Link, rm, v, r *obj.Addr, vex, opcode uint8) {
-	ctxt.Vexflag = 1
-	rexR := 0
-	if r != nil {
-		rexR = regrex[r.Reg] & Rxr
-	}
-	rexB := 0
-	rexX := 0
-	if rm != nil {
-		rexB = regrex[rm.Reg] & Rxb
-		rexX = regrex[rm.Index] & Rxx
-	}
-	vexM := (vex >> 3) & 0xF
-	vexWLP := vex & 0x87
-	vexV := byte(0)
-	if v != nil {
-		vexV = byte(reg[v.Reg]|(regrex[v.Reg]&Rxr)<<1) & 0xF
-	}
-	vexV ^= 0xF
-	if vexM == 1 && (rexX|rexB) == 0 && vex&vexW1 == 0 {
-		// Can use 2-byte encoding.
-		ctxt.AsmBuf.Put2(0xc5, byte(rexR<<5)^0x80|vexV<<3|vexWLP)
-	} else {
-		// Must use 3-byte encoding.
-		ctxt.AsmBuf.Put3(0xc4,
-			(byte(rexR|rexX|rexB)<<5)^0xE0|vexM,
-			vexV<<3|vexWLP,
-		)
-	}
-	ctxt.AsmBuf.Put1(opcode)
-}
-
-func doasm(ctxt *obj.Link, p *obj.Prog) {
-	ctxt.Curp = p // TODO
-
-	o := opindex[p.As&obj.AMask]
-
-	if o == nil {
-		ctxt.Diag("asmins: missing op %v", p)
-		return
-	}
-
-	pre := prefixof(ctxt, p, &p.From)
-	if pre != 0 {
-		ctxt.AsmBuf.Put1(byte(pre))
-	}
-	pre = prefixof(ctxt, p, &p.To)
-	if pre != 0 {
-		ctxt.AsmBuf.Put1(byte(pre))
-	}
-
-	// TODO(rsc): This special case is for SHRQ $3, AX:DX,
-	// which encodes as SHRQ $32(DX*0), AX.
-	// Similarly SHRQ CX, AX:DX is really SHRQ CX(DX*0), AX.
-	// Change encoding generated by assemblers and compilers and remove.
-	if (p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_REG) && p.From.Index != REG_NONE && p.From.Scale == 0 {
-		p.From3 = new(obj.Addr)
-		p.From3.Type = obj.TYPE_REG
-		p.From3.Reg = p.From.Index
-		p.From.Index = 0
-	}
-
-	// TODO(rsc): This special case is for PINSRQ etc, CMPSD etc.
-	// Change encoding generated by assemblers and compilers (if any) and remove.
-	switch p.As {
-	case AIMUL3Q, APEXTRW, APINSRW, APINSRD, APINSRQ, APSHUFHW, APSHUFL, APSHUFW, ASHUFPD, ASHUFPS, AAESKEYGENASSIST, APSHUFD, APCLMULQDQ:
-		if p.From3Type() == obj.TYPE_NONE {
-			p.From3 = new(obj.Addr)
-			*p.From3 = p.From
-			p.From = obj.Addr{}
-			p.From.Type = obj.TYPE_CONST
-			p.From.Offset = p.To.Offset
-			p.To.Offset = 0
-		}
-	case ACMPSD, ACMPSS, ACMPPS, ACMPPD:
-		if p.From3Type() == obj.TYPE_NONE {
-			p.From3 = new(obj.Addr)
-			*p.From3 = p.To
-			p.To = obj.Addr{}
-			p.To.Type = obj.TYPE_CONST
-			p.To.Offset = p.From3.Offset
-			p.From3.Offset = 0
-		}
-	}
-
-	if p.Ft == 0 {
-		p.Ft = uint8(oclass(ctxt, p, &p.From))
-	}
-	if p.Tt == 0 {
-		p.Tt = uint8(oclass(ctxt, p, &p.To))
-	}
-
-	ft := int(p.Ft) * Ymax
-	f3t := Ynone * Ymax
-	if p.From3 != nil {
-		f3t = oclass(ctxt, p, p.From3) * Ymax
-	}
-	tt := int(p.Tt) * Ymax
-
-	xo := obj.Bool2int(o.op[0] == 0x0f)
-	z := 0
-	var a *obj.Addr
-	var l int
-	var op int
-	var q *obj.Prog
-	var r *obj.Reloc
-	var rel obj.Reloc
-	var v int64
-	for i := range o.ytab {
-		yt := &o.ytab[i]
-		if ycover[ft+int(yt.from)] != 0 && ycover[f3t+int(yt.from3)] != 0 && ycover[tt+int(yt.to)] != 0 {
-			switch o.prefix {
-			case Px1: /* first option valid only in 32-bit mode */
-				if ctxt.Mode == 64 && z == 0 {
-					z += int(yt.zoffset) + xo
-					continue
-				}
-			case Pq: /* 16 bit escape and opcode escape */
-				ctxt.AsmBuf.Put2(Pe, Pm)
-
-			case Pq3: /* 16 bit escape and opcode escape + REX.W */
-				ctxt.Rexflag |= Pw
-				ctxt.AsmBuf.Put2(Pe, Pm)
-
-			case Pq4: /*  66 0F 38 */
-				ctxt.AsmBuf.Put3(0x66, 0x0F, 0x38)
-
-			case Pf2, /* xmm opcode escape */
-				Pf3:
-				ctxt.AsmBuf.Put2(o.prefix, Pm)
-
-			case Pef3:
-				ctxt.AsmBuf.Put3(Pe, Pf3, Pm)
-
-			case Pfw: /* xmm opcode escape + REX.W */
-				ctxt.Rexflag |= Pw
-				ctxt.AsmBuf.Put2(Pf3, Pm)
-
-			case Pm: /* opcode escape */
-				ctxt.AsmBuf.Put1(Pm)
-
-			case Pe: /* 16 bit escape */
-				ctxt.AsmBuf.Put1(Pe)
-
-			case Pw: /* 64-bit escape */
-				if p.Mode != 64 {
-					ctxt.Diag("asmins: illegal 64: %v", p)
-				}
-				ctxt.Rexflag |= Pw
-
-			case Pw8: /* 64-bit escape if z >= 8 */
-				if z >= 8 {
-					if p.Mode != 64 {
-						ctxt.Diag("asmins: illegal 64: %v", p)
-					}
-					ctxt.Rexflag |= Pw
-				}
-
-			case Pb: /* botch */
-				if p.Mode != 64 && (isbadbyte(&p.From) || isbadbyte(&p.To)) {
-					goto bad
-				}
-				// NOTE(rsc): This is probably safe to do always,
-				// but when enabled it chooses different encodings
-				// than the old cmd/internal/obj/i386 code did,
-				// which breaks our "same bits out" checks.
-				// In particular, CMPB AX, $0 encodes as 80 f8 00
-				// in the original obj/i386, and it would encode
-				// (using a valid, shorter form) as 3c 00 if we enabled
-				// the call to bytereg here.
-				if p.Mode == 64 {
-					bytereg(&p.From, &p.Ft)
-					bytereg(&p.To, &p.Tt)
-				}
-
-			case P32: /* 32 bit but illegal if 64-bit mode */
-				if p.Mode == 64 {
-					ctxt.Diag("asmins: illegal in 64-bit mode: %v", p)
-				}
-
-			case Py: /* 64-bit only, no prefix */
-				if p.Mode != 64 {
-					ctxt.Diag("asmins: illegal in %d-bit mode: %v", p.Mode, p)
-				}
-
-			case Py1: /* 64-bit only if z < 1, no prefix */
-				if z < 1 && p.Mode != 64 {
-					ctxt.Diag("asmins: illegal in %d-bit mode: %v", p.Mode, p)
-				}
-
-			case Py3: /* 64-bit only if z < 3, no prefix */
-				if z < 3 && p.Mode != 64 {
-					ctxt.Diag("asmins: illegal in %d-bit mode: %v", p.Mode, p)
-				}
-			}
-
-			if z >= len(o.op) {
-				log.Fatalf("asmins bad table %v", p)
-			}
-			op = int(o.op[z])
-			// In vex case 0x0f is actually VEX_256_F2_0F_WIG
-			if op == 0x0f && o.prefix != Pvex {
-				ctxt.AsmBuf.Put1(byte(op))
-				z++
-				op = int(o.op[z])
-			}
-
-			switch yt.zcase {
-			default:
-				ctxt.Diag("asmins: unknown z %d %v", yt.zcase, p)
-				return
-
-			case Zpseudo:
-				break
-
-			case Zlit:
-				for ; ; z++ {
-					op = int(o.op[z])
-					if op == 0 {
-						break
-					}
-					ctxt.AsmBuf.Put1(byte(op))
-				}
-
-			case Zlitm_r:
-				for ; ; z++ {
-					op = int(o.op[z])
-					if op == 0 {
-						break
-					}
-					ctxt.AsmBuf.Put1(byte(op))
-				}
-				asmand(ctxt, p, &p.From, &p.To)
-
-			case Zmb_r:
-				bytereg(&p.From, &p.Ft)
-				fallthrough
-
-			case Zm_r:
-				ctxt.AsmBuf.Put1(byte(op))
-				asmand(ctxt, p, &p.From, &p.To)
-
-			case Zm2_r:
-				ctxt.AsmBuf.Put2(byte(op), o.op[z+1])
-				asmand(ctxt, p, &p.From, &p.To)
-
-			case Zm_r_xm:
-				mediaop(ctxt, o, op, int(yt.zoffset), z)
-				asmand(ctxt, p, &p.From, &p.To)
-
-			case Zm_r_xm_nr:
-				ctxt.Rexflag = 0
-				mediaop(ctxt, o, op, int(yt.zoffset), z)
-				asmand(ctxt, p, &p.From, &p.To)
-
-			case Zm_r_i_xm:
-				mediaop(ctxt, o, op, int(yt.zoffset), z)
-				asmand(ctxt, p, &p.From, p.From3)
-				ctxt.AsmBuf.Put1(byte(p.To.Offset))
-
-			case Zibm_r, Zibr_m:
-				for {
-					tmp1 := z
-					z++
-					op = int(o.op[tmp1])
-					if op == 0 {
-						break
-					}
-					ctxt.AsmBuf.Put1(byte(op))
-				}
-				if yt.zcase == Zibr_m {
-					asmand(ctxt, p, &p.To, p.From3)
-				} else {
-					asmand(ctxt, p, p.From3, &p.To)
-				}
-				ctxt.AsmBuf.Put1(byte(p.From.Offset))
-
-			case Zaut_r:
-				ctxt.AsmBuf.Put1(0x8d) // leal
-				if p.From.Type != obj.TYPE_ADDR {
-					ctxt.Diag("asmins: Zaut sb type ADDR")
-				}
-				p.From.Type = obj.TYPE_MEM
-				asmand(ctxt, p, &p.From, &p.To)
-				p.From.Type = obj.TYPE_ADDR
-
-			case Zm_o:
-				ctxt.AsmBuf.Put1(byte(op))
-				asmando(ctxt, p, &p.From, int(o.op[z+1]))
-
-			case Zr_m:
-				ctxt.AsmBuf.Put1(byte(op))
-				asmand(ctxt, p, &p.To, &p.From)
-
-			case Zvex_rm_v_r:
-				asmvex(ctxt, &p.From, p.From3, &p.To, o.op[z], o.op[z+1])
-				asmand(ctxt, p, &p.From, &p.To)
-
-			case Zvex_i_r_v:
-				asmvex(ctxt, p.From3, &p.To, nil, o.op[z], o.op[z+1])
-				regnum := byte(0x7)
-				if p.From3.Reg >= REG_X0 && p.From3.Reg <= REG_X15 {
-					regnum &= byte(p.From3.Reg - REG_X0)
-				} else {
-					regnum &= byte(p.From3.Reg - REG_Y0)
-				}
-				ctxt.AsmBuf.Put1(byte(o.op[z+2]) | regnum)
-				ctxt.AsmBuf.Put1(byte(p.From.Offset))
-
-			case Zvex_i_rm_v_r:
-				asmvex(ctxt, &p.From, p.From3, &p.To, o.op[z], o.op[z+1])
-				asmand(ctxt, p, &p.From, &p.To)
-				ctxt.AsmBuf.Put1(byte(p.From3.Offset))
-
-			case Zvex_i_rm_r:
-				asmvex(ctxt, p.From3, nil, &p.To, o.op[z], o.op[z+1])
-				asmand(ctxt, p, p.From3, &p.To)
-				ctxt.AsmBuf.Put1(byte(p.From.Offset))
-
-			case Zvex_v_rm_r:
-				asmvex(ctxt, p.From3, &p.From, &p.To, o.op[z], o.op[z+1])
-				asmand(ctxt, p, p.From3, &p.To)
-
-			case Zvex_r_v_rm:
-				asmvex(ctxt, &p.To, p.From3, &p.From, o.op[z], o.op[z+1])
-				asmand(ctxt, p, &p.To, &p.From)
-
-			case Zr_m_xm:
-				mediaop(ctxt, o, op, int(yt.zoffset), z)
-				asmand(ctxt, p, &p.To, &p.From)
-
-			case Zr_m_xm_nr:
-				ctxt.Rexflag = 0
-				mediaop(ctxt, o, op, int(yt.zoffset), z)
-				asmand(ctxt, p, &p.To, &p.From)
-
-			case Zo_m:
-				ctxt.AsmBuf.Put1(byte(op))
-				asmando(ctxt, p, &p.To, int(o.op[z+1]))
-
-			case Zcallindreg:
-				r = obj.Addrel(ctxt.Cursym)
-				r.Off = int32(p.Pc)
-				r.Type = obj.R_CALLIND
-				r.Siz = 0
-				fallthrough
-
-			case Zo_m64:
-				ctxt.AsmBuf.Put1(byte(op))
-				asmandsz(ctxt, p, &p.To, int(o.op[z+1]), 0, 1)
-
-			case Zm_ibo:
-				ctxt.AsmBuf.Put1(byte(op))
-				asmando(ctxt, p, &p.From, int(o.op[z+1]))
-				ctxt.AsmBuf.Put1(byte(vaddr(ctxt, p, &p.To, nil)))
-
-			case Zibo_m:
-				ctxt.AsmBuf.Put1(byte(op))
-				asmando(ctxt, p, &p.To, int(o.op[z+1]))
-				ctxt.AsmBuf.Put1(byte(vaddr(ctxt, p, &p.From, nil)))
-
-			case Zibo_m_xm:
-				z = mediaop(ctxt, o, op, int(yt.zoffset), z)
-				asmando(ctxt, p, &p.To, int(o.op[z+1]))
-				ctxt.AsmBuf.Put1(byte(vaddr(ctxt, p, &p.From, nil)))
-
-			case Z_ib, Zib_:
-				if yt.zcase == Zib_ {
-					a = &p.From
-				} else {
-					a = &p.To
-				}
-				ctxt.AsmBuf.Put1(byte(op))
-				if p.As == AXABORT {
-					ctxt.AsmBuf.Put1(o.op[z+1])
-				}
-				ctxt.AsmBuf.Put1(byte(vaddr(ctxt, p, a, nil)))
-
-			case Zib_rp:
-				ctxt.Rexflag |= regrex[p.To.Reg] & (Rxb | 0x40)
-				ctxt.AsmBuf.Put2(byte(op+reg[p.To.Reg]), byte(vaddr(ctxt, p, &p.From, nil)))
-
-			case Zil_rp:
-				ctxt.Rexflag |= regrex[p.To.Reg] & Rxb
-				ctxt.AsmBuf.Put1(byte(op + reg[p.To.Reg]))
-				if o.prefix == Pe {
-					v = vaddr(ctxt, p, &p.From, nil)
-					ctxt.AsmBuf.PutInt16(int16(v))
-				} else {
-					relput4(ctxt, p, &p.From)
-				}
-
-			case Zo_iw:
-				ctxt.AsmBuf.Put1(byte(op))
-				if p.From.Type != obj.TYPE_NONE {
-					v = vaddr(ctxt, p, &p.From, nil)
-					ctxt.AsmBuf.PutInt16(int16(v))
-				}
-
-			case Ziq_rp:
-				v = vaddr(ctxt, p, &p.From, &rel)
-				l = int(v >> 32)
-				if l == 0 && rel.Siz != 8 {
-					//p->mark |= 0100;
-					//print("zero: %llux %v\n", v, p);
-					ctxt.Rexflag &^= (0x40 | Rxw)
-
-					ctxt.Rexflag |= regrex[p.To.Reg] & Rxb
-					ctxt.AsmBuf.Put1(byte(0xb8 + reg[p.To.Reg]))
-					if rel.Type != 0 {
-						r = obj.Addrel(ctxt.Cursym)
-						*r = rel
-						r.Off = int32(p.Pc + int64(ctxt.AsmBuf.Len()))
-					}
-
-					ctxt.AsmBuf.PutInt32(int32(v))
-				} else if l == -1 && uint64(v)&(uint64(1)<<31) != 0 { /* sign extend */
-
-					//p->mark |= 0100;
-					//print("sign: %llux %v\n", v, p);
-					ctxt.AsmBuf.Put1(0xc7)
-					asmando(ctxt, p, &p.To, 0)
-
-					ctxt.AsmBuf.PutInt32(int32(v)) // need all 8
-				} else {
-					//print("all: %llux %v\n", v, p);
-					ctxt.Rexflag |= regrex[p.To.Reg] & Rxb
-					ctxt.AsmBuf.Put1(byte(op + reg[p.To.Reg]))
-					if rel.Type != 0 {
-						r = obj.Addrel(ctxt.Cursym)
-						*r = rel
-						r.Off = int32(p.Pc + int64(ctxt.AsmBuf.Len()))
-					}
-
-					ctxt.AsmBuf.PutInt64(v)
-				}
-
-			case Zib_rr:
-				ctxt.AsmBuf.Put1(byte(op))
-				asmand(ctxt, p, &p.To, &p.To)
-				ctxt.AsmBuf.Put1(byte(vaddr(ctxt, p, &p.From, nil)))
-
-			case Z_il, Zil_:
-				if yt.zcase == Zil_ {
-					a = &p.From
-				} else {
-					a = &p.To
-				}
-				ctxt.AsmBuf.Put1(byte(op))
-				if o.prefix == Pe {
-					v = vaddr(ctxt, p, a, nil)
-					ctxt.AsmBuf.PutInt16(int16(v))
-				} else {
-					relput4(ctxt, p, a)
-				}
-
-			case Zm_ilo, Zilo_m:
-				ctxt.AsmBuf.Put1(byte(op))
-				if yt.zcase == Zilo_m {
-					a = &p.From
-					asmando(ctxt, p, &p.To, int(o.op[z+1]))
-				} else {
-					a = &p.To
-					asmando(ctxt, p, &p.From, int(o.op[z+1]))
-				}
-
-				if o.prefix == Pe {
-					v = vaddr(ctxt, p, a, nil)
-					ctxt.AsmBuf.PutInt16(int16(v))
-				} else {
-					relput4(ctxt, p, a)
-				}
-
-			case Zil_rr:
-				ctxt.AsmBuf.Put1(byte(op))
-				asmand(ctxt, p, &p.To, &p.To)
-				if o.prefix == Pe {
-					v = vaddr(ctxt, p, &p.From, nil)
-					ctxt.AsmBuf.PutInt16(int16(v))
-				} else {
-					relput4(ctxt, p, &p.From)
-				}
-
-			case Z_rp:
-				ctxt.Rexflag |= regrex[p.To.Reg] & (Rxb | 0x40)
-				ctxt.AsmBuf.Put1(byte(op + reg[p.To.Reg]))
-
-			case Zrp_:
-				ctxt.Rexflag |= regrex[p.From.Reg] & (Rxb | 0x40)
-				ctxt.AsmBuf.Put1(byte(op + reg[p.From.Reg]))
-
-			case Zclr:
-				ctxt.Rexflag &^= Pw
-				ctxt.AsmBuf.Put1(byte(op))
-				asmand(ctxt, p, &p.To, &p.To)
-
-			case Zcallcon, Zjmpcon:
-				if yt.zcase == Zcallcon {
-					ctxt.AsmBuf.Put1(byte(op))
-				} else {
-					ctxt.AsmBuf.Put1(o.op[z+1])
-				}
-				r = obj.Addrel(ctxt.Cursym)
-				r.Off = int32(p.Pc + int64(ctxt.AsmBuf.Len()))
-				r.Type = obj.R_PCREL
-				r.Siz = 4
-				r.Add = p.To.Offset
-				ctxt.AsmBuf.PutInt32(0)
-
-			case Zcallind:
-				ctxt.AsmBuf.Put2(byte(op), o.op[z+1])
-				r = obj.Addrel(ctxt.Cursym)
-				r.Off = int32(p.Pc + int64(ctxt.AsmBuf.Len()))
-				if p.Mode == 64 {
-					r.Type = obj.R_PCREL
-				} else {
-					r.Type = obj.R_ADDR
-				}
-				r.Siz = 4
-				r.Add = p.To.Offset
-				r.Sym = p.To.Sym
-				ctxt.AsmBuf.PutInt32(0)
-
-			case Zcall, Zcallduff:
-				if p.To.Sym == nil {
-					ctxt.Diag("call without target")
-					log.Fatalf("bad code")
-				}
-
-				if yt.zcase == Zcallduff && ctxt.Flag_dynlink {
-					ctxt.Diag("directly calling duff when dynamically linking Go")
-				}
-
-				if ctxt.Framepointer_enabled && yt.zcase == Zcallduff && p.Mode == 64 {
-					// Maintain BP around call, since duffcopy/duffzero can't do it
-					// (the call jumps into the middle of the function).
-					// This makes it possible to see call sites for duffcopy/duffzero in
-					// BP-based profiling tools like Linux perf (which is the
-					// whole point of obj.Framepointer_enabled).
-					// MOVQ BP, -16(SP)
-					// LEAQ -16(SP), BP
-					ctxt.AsmBuf.Put(bpduff1)
-				}
-				ctxt.AsmBuf.Put1(byte(op))
-				r = obj.Addrel(ctxt.Cursym)
-				r.Off = int32(p.Pc + int64(ctxt.AsmBuf.Len()))
-				r.Sym = p.To.Sym
-				r.Add = p.To.Offset
-				r.Type = obj.R_CALL
-				r.Siz = 4
-				ctxt.AsmBuf.PutInt32(0)
-
-				if ctxt.Framepointer_enabled && yt.zcase == Zcallduff && p.Mode == 64 {
-					// Pop BP pushed above.
-					// MOVQ 0(BP), BP
-					ctxt.AsmBuf.Put(bpduff2)
-				}
-
-			// TODO: jump across functions needs reloc
-			case Zbr, Zjmp, Zloop:
-				if p.As == AXBEGIN {
-					ctxt.AsmBuf.Put1(byte(op))
-				}
-				if p.To.Sym != nil {
-					if yt.zcase != Zjmp {
-						ctxt.Diag("branch to ATEXT")
-						log.Fatalf("bad code")
-					}
-
-					ctxt.AsmBuf.Put1(o.op[z+1])
-					r = obj.Addrel(ctxt.Cursym)
-					r.Off = int32(p.Pc + int64(ctxt.AsmBuf.Len()))
-					r.Sym = p.To.Sym
-					r.Type = obj.R_PCREL
-					r.Siz = 4
-					ctxt.AsmBuf.PutInt32(0)
-					break
-				}
-
-				// Assumes q is in this function.
-				// TODO: Check in input, preserve in brchain.
-
-				// Fill in backward jump now.
-				q = p.Pcond
-
-				if q == nil {
-					ctxt.Diag("jmp/branch/loop without target")
-					log.Fatalf("bad code")
-				}
-
-				if p.Back&1 != 0 {
-					v = q.Pc - (p.Pc + 2)
-					if v >= -128 && p.As != AXBEGIN {
-						if p.As == AJCXZL {
-							ctxt.AsmBuf.Put1(0x67)
-						}
-						ctxt.AsmBuf.Put2(byte(op), byte(v))
-					} else if yt.zcase == Zloop {
-						ctxt.Diag("loop too far: %v", p)
-					} else {
-						v -= 5 - 2
-						if p.As == AXBEGIN {
-							v--
-						}
-						if yt.zcase == Zbr {
-							ctxt.AsmBuf.Put1(0x0f)
-							v--
-						}
-
-						ctxt.AsmBuf.Put1(o.op[z+1])
-						ctxt.AsmBuf.PutInt32(int32(v))
-					}
-
-					break
-				}
-
-				// Annotate target; will fill in later.
-				p.Forwd = q.Rel
-
-				q.Rel = p
-				if p.Back&2 != 0 && p.As != AXBEGIN { // short
-					if p.As == AJCXZL {
-						ctxt.AsmBuf.Put1(0x67)
-					}
-					ctxt.AsmBuf.Put2(byte(op), 0)
-				} else if yt.zcase == Zloop {
-					ctxt.Diag("loop too far: %v", p)
-				} else {
-					if yt.zcase == Zbr {
-						ctxt.AsmBuf.Put1(0x0f)
-					}
-					ctxt.AsmBuf.Put1(o.op[z+1])
-					ctxt.AsmBuf.PutInt32(0)
-				}
-
-				break
-
-			/*
-				v = q->pc - p->pc - 2;
-				if((v >= -128 && v <= 127) || p->pc == -1 || q->pc == -1) {
-					*ctxt->andptr++ = op;
-					*ctxt->andptr++ = v;
-				} else {
-					v -= 5-2;
-					if(yt.zcase == Zbr) {
-						*ctxt->andptr++ = 0x0f;
-						v--;
-					}
-					*ctxt->andptr++ = o->op[z+1];
-					*ctxt->andptr++ = v;
-					*ctxt->andptr++ = v>>8;
-					*ctxt->andptr++ = v>>16;
-					*ctxt->andptr++ = v>>24;
-				}
-			*/
-
-			case Zbyte:
-				v = vaddr(ctxt, p, &p.From, &rel)
-				if rel.Siz != 0 {
-					rel.Siz = uint8(op)
-					r = obj.Addrel(ctxt.Cursym)
-					*r = rel
-					r.Off = int32(p.Pc + int64(ctxt.AsmBuf.Len()))
-				}
-
-				ctxt.AsmBuf.Put1(byte(v))
-				if op > 1 {
-					ctxt.AsmBuf.Put1(byte(v >> 8))
-					if op > 2 {
-						ctxt.AsmBuf.PutInt16(int16(v >> 16))
-						if op > 4 {
-							ctxt.AsmBuf.PutInt32(int32(v >> 32))
-						}
-					}
-				}
-			}
-
-			return
-		}
-		z += int(yt.zoffset) + xo
-	}
-	for mo := ymovtab; mo[0].as != 0; mo = mo[1:] {
-		var pp obj.Prog
-		var t []byte
-		if p.As == mo[0].as {
-			if ycover[ft+int(mo[0].ft)] != 0 && ycover[f3t+int(mo[0].f3t)] != 0 && ycover[tt+int(mo[0].tt)] != 0 {
-				t = mo[0].op[:]
-				switch mo[0].code {
-				default:
-					ctxt.Diag("asmins: unknown mov %d %v", mo[0].code, p)
-
-				case 0: /* lit */
-					for z = 0; t[z] != E; z++ {
-						ctxt.AsmBuf.Put1(t[z])
-					}
-
-				case 1: /* r,m */
-					ctxt.AsmBuf.Put1(t[0])
-					asmando(ctxt, p, &p.To, int(t[1]))
-
-				case 2: /* m,r */
-					ctxt.AsmBuf.Put1(t[0])
-					asmando(ctxt, p, &p.From, int(t[1]))
-
-				case 3: /* r,m - 2op */
-					ctxt.AsmBuf.Put2(t[0], t[1])
-					asmando(ctxt, p, &p.To, int(t[2]))
-					ctxt.Rexflag |= regrex[p.From.Reg] & (Rxr | 0x40)
-
-				case 4: /* m,r - 2op */
-					ctxt.AsmBuf.Put2(t[0], t[1])
-					asmando(ctxt, p, &p.From, int(t[2]))
-					ctxt.Rexflag |= regrex[p.To.Reg] & (Rxr | 0x40)
-
-				case 5: /* load full pointer, trash heap */
-					if t[0] != 0 {
-						ctxt.AsmBuf.Put1(t[0])
-					}
-					switch p.To.Index {
-					default:
-						goto bad
-
-					case REG_DS:
-						ctxt.AsmBuf.Put1(0xc5)
-
-					case REG_SS:
-						ctxt.AsmBuf.Put2(0x0f, 0xb2)
-
-					case REG_ES:
-						ctxt.AsmBuf.Put1(0xc4)
-
-					case REG_FS:
-						ctxt.AsmBuf.Put2(0x0f, 0xb4)
-
-					case REG_GS:
-						ctxt.AsmBuf.Put2(0x0f, 0xb5)
-					}
-
-					asmand(ctxt, p, &p.From, &p.To)
-
-				case 6: /* double shift */
-					if t[0] == Pw {
-						if p.Mode != 64 {
-							ctxt.Diag("asmins: illegal 64: %v", p)
-						}
-						ctxt.Rexflag |= Pw
-						t = t[1:]
-					} else if t[0] == Pe {
-						ctxt.AsmBuf.Put1(Pe)
-						t = t[1:]
-					}
-
-					switch p.From.Type {
-					default:
-						goto bad
-
-					case obj.TYPE_CONST:
-						ctxt.AsmBuf.Put2(0x0f, t[0])
-						asmandsz(ctxt, p, &p.To, reg[p.From3.Reg], regrex[p.From3.Reg], 0)
-						ctxt.AsmBuf.Put1(byte(p.From.Offset))
-
-					case obj.TYPE_REG:
-						switch p.From.Reg {
-						default:
-							goto bad
-
-						case REG_CL, REG_CX:
-							ctxt.AsmBuf.Put2(0x0f, t[1])
-							asmandsz(ctxt, p, &p.To, reg[p.From3.Reg], regrex[p.From3.Reg], 0)
-						}
-					}
-
-				// NOTE: The systems listed here are the ones that use the "TLS initial exec" model,
-				// where you load the TLS base register into a register and then index off that
-				// register to access the actual TLS variables. Systems that allow direct TLS access
-				// are handled in prefixof above and should not be listed here.
-				case 7: /* mov tls, r */
-					if p.Mode == 64 && p.As != AMOVQ || p.Mode == 32 && p.As != AMOVL {
-						ctxt.Diag("invalid load of TLS: %v", p)
-					}
-
-					if p.Mode == 32 {
-						// NOTE: The systems listed here are the ones that use the "TLS initial exec" model,
-						// where you load the TLS base register into a register and then index off that
-						// register to access the actual TLS variables. Systems that allow direct TLS access
-						// are handled in prefixof above and should not be listed here.
-						switch ctxt.Headtype {
-						default:
-							log.Fatalf("unknown TLS base location for %v", ctxt.Headtype)
-
-						case obj.Hlinux,
-							obj.Hnacl:
-							if ctxt.Flag_shared {
-								// Note that this is not generating the same insns as the other cases.
-								//     MOV TLS, dst
-								// becomes
-								//     call __x86.get_pc_thunk.dst
-								//     movl (gotpc + g@gotntpoff)(dst), dst
-								// which is encoded as
-								//     call __x86.get_pc_thunk.dst
-								//     movq 0(dst), dst
-								// and R_CALL & R_TLS_IE relocs. This all assumes the only tls variable we access
-								// is g, which we can't check here, but will when we assemble the second
-								// instruction.
-								dst := p.To.Reg
-								ctxt.AsmBuf.Put1(0xe8)
-								r = obj.Addrel(ctxt.Cursym)
-								r.Off = int32(p.Pc + int64(ctxt.AsmBuf.Len()))
-								r.Type = obj.R_CALL
-								r.Siz = 4
-								r.Sym = obj.Linklookup(ctxt, "__x86.get_pc_thunk."+strings.ToLower(Rconv(int(dst))), 0)
-								ctxt.AsmBuf.PutInt32(0)
-
-								ctxt.AsmBuf.Put2(0x8B, byte(2<<6|reg[dst]|(reg[dst]<<3)))
-								r = obj.Addrel(ctxt.Cursym)
-								r.Off = int32(p.Pc + int64(ctxt.AsmBuf.Len()))
-								r.Type = obj.R_TLS_IE
-								r.Siz = 4
-								r.Add = 2
-								ctxt.AsmBuf.PutInt32(0)
-							} else {
-								// ELF TLS base is 0(GS).
-								pp.From = p.From
-
-								pp.From.Type = obj.TYPE_MEM
-								pp.From.Reg = REG_GS
-								pp.From.Offset = 0
-								pp.From.Index = REG_NONE
-								pp.From.Scale = 0
-								ctxt.AsmBuf.Put2(0x65, // GS
-									0x8B)
-								asmand(ctxt, p, &pp.From, &p.To)
-							}
-						case obj.Hplan9:
-							if ctxt.Plan9privates == nil {
-								ctxt.Plan9privates = obj.Linklookup(ctxt, "_privates", 0)
-							}
-							pp.From = obj.Addr{}
-							pp.From.Type = obj.TYPE_MEM
-							pp.From.Name = obj.NAME_EXTERN
-							pp.From.Sym = ctxt.Plan9privates
-							pp.From.Offset = 0
-							pp.From.Index = REG_NONE
-							ctxt.AsmBuf.Put1(0x8B)
-							asmand(ctxt, p, &pp.From, &p.To)
-
-						case obj.Hwindows, obj.Hwindowsgui:
-							// Windows TLS base is always 0x14(FS).
-							pp.From = p.From
-
-							pp.From.Type = obj.TYPE_MEM
-							pp.From.Reg = REG_FS
-							pp.From.Offset = 0x14
-							pp.From.Index = REG_NONE
-							pp.From.Scale = 0
-							ctxt.AsmBuf.Put2(0x64, // FS
-								0x8B)
-							asmand(ctxt, p, &pp.From, &p.To)
-						}
-						break
-					}
-
-					switch ctxt.Headtype {
-					default:
-						log.Fatalf("unknown TLS base location for %v", ctxt.Headtype)
-
-					case obj.Hlinux:
-						if !ctxt.Flag_shared {
-							log.Fatalf("unknown TLS base location for linux without -shared")
-						}
-						// Note that this is not generating the same insn as the other cases.
-						//     MOV TLS, R_to
-						// becomes
-						//     movq g@gottpoff(%rip), R_to
-						// which is encoded as
-						//     movq 0(%rip), R_to
-						// and a R_TLS_IE reloc. This all assumes the only tls variable we access
-						// is g, which we can't check here, but will when we assemble the second
-						// instruction.
-						ctxt.Rexflag = Pw | (regrex[p.To.Reg] & Rxr)
-
-						ctxt.AsmBuf.Put2(0x8B, byte(0x05|(reg[p.To.Reg]<<3)))
-						r = obj.Addrel(ctxt.Cursym)
-						r.Off = int32(p.Pc + int64(ctxt.AsmBuf.Len()))
-						r.Type = obj.R_TLS_IE
-						r.Siz = 4
-						r.Add = -4
-						ctxt.AsmBuf.PutInt32(0)
-
-					case obj.Hplan9:
-						if ctxt.Plan9privates == nil {
-							ctxt.Plan9privates = obj.Linklookup(ctxt, "_privates", 0)
-						}
-						pp.From = obj.Addr{}
-						pp.From.Type = obj.TYPE_MEM
-						pp.From.Name = obj.NAME_EXTERN
-						pp.From.Sym = ctxt.Plan9privates
-						pp.From.Offset = 0
-						pp.From.Index = REG_NONE
-						ctxt.Rexflag |= Pw
-						ctxt.AsmBuf.Put1(0x8B)
-						asmand(ctxt, p, &pp.From, &p.To)
-
-					case obj.Hsolaris: // TODO(rsc): Delete Hsolaris from list. Should not use this code. See progedit in obj6.c.
-						// TLS base is 0(FS).
-						pp.From = p.From
-
-						pp.From.Type = obj.TYPE_MEM
-						pp.From.Name = obj.NAME_NONE
-						pp.From.Reg = REG_NONE
-						pp.From.Offset = 0
-						pp.From.Index = REG_NONE
-						pp.From.Scale = 0
-						ctxt.Rexflag |= Pw
-						ctxt.AsmBuf.Put2(0x64, // FS
-							0x8B)
-						asmand(ctxt, p, &pp.From, &p.To)
-
-					case obj.Hwindows, obj.Hwindowsgui:
-						// Windows TLS base is always 0x28(GS).
-						pp.From = p.From
-
-						pp.From.Type = obj.TYPE_MEM
-						pp.From.Name = obj.NAME_NONE
-						pp.From.Reg = REG_GS
-						pp.From.Offset = 0x28
-						pp.From.Index = REG_NONE
-						pp.From.Scale = 0
-						ctxt.Rexflag |= Pw
-						ctxt.AsmBuf.Put2(0x65, // GS
-							0x8B)
-						asmand(ctxt, p, &pp.From, &p.To)
-					}
-				}
-				return
-			}
-		}
-	}
-	goto bad
-
-bad:
-	if p.Mode != 64 {
-		/*
-		 * here, the assembly has failed.
-		 * if its a byte instruction that has
-		 * unaddressable registers, try to
-		 * exchange registers and reissue the
-		 * instruction with the operands renamed.
-		 */
-		pp := *p
-
-		unbytereg(&pp.From, &pp.Ft)
-		unbytereg(&pp.To, &pp.Tt)
-
-		z := int(p.From.Reg)
-		if p.From.Type == obj.TYPE_REG && z >= REG_BP && z <= REG_DI {
-			// TODO(rsc): Use this code for x86-64 too. It has bug fixes not present in the amd64 code base.
-			// For now, different to keep bit-for-bit compatibility.
-			if p.Mode == 32 {
-				breg := byteswapreg(ctxt, &p.To)
-				if breg != REG_AX {
-					ctxt.AsmBuf.Put1(0x87) // xchg lhs,bx
-					asmando(ctxt, p, &p.From, reg[breg])
-					subreg(&pp, z, breg)
-					doasm(ctxt, &pp)
-					ctxt.AsmBuf.Put1(0x87) // xchg lhs,bx
-					asmando(ctxt, p, &p.From, reg[breg])
-				} else {
-					ctxt.AsmBuf.Put1(byte(0x90 + reg[z])) // xchg lsh,ax
-					subreg(&pp, z, REG_AX)
-					doasm(ctxt, &pp)
-					ctxt.AsmBuf.Put1(byte(0x90 + reg[z])) // xchg lsh,ax
-				}
-				return
-			}
-
-			if isax(&p.To) || p.To.Type == obj.TYPE_NONE {
-				// We certainly don't want to exchange
-				// with AX if the op is MUL or DIV.
-				ctxt.AsmBuf.Put1(0x87) // xchg lhs,bx
-				asmando(ctxt, p, &p.From, reg[REG_BX])
-				subreg(&pp, z, REG_BX)
-				doasm(ctxt, &pp)
-				ctxt.AsmBuf.Put1(0x87) // xchg lhs,bx
-				asmando(ctxt, p, &p.From, reg[REG_BX])
-			} else {
-				ctxt.AsmBuf.Put1(byte(0x90 + reg[z])) // xchg lsh,ax
-				subreg(&pp, z, REG_AX)
-				doasm(ctxt, &pp)
-				ctxt.AsmBuf.Put1(byte(0x90 + reg[z])) // xchg lsh,ax
-			}
-			return
-		}
-
-		z = int(p.To.Reg)
-		if p.To.Type == obj.TYPE_REG && z >= REG_BP && z <= REG_DI {
-			// TODO(rsc): Use this code for x86-64 too. It has bug fixes not present in the amd64 code base.
-			// For now, different to keep bit-for-bit compatibility.
-			if p.Mode == 32 {
-				breg := byteswapreg(ctxt, &p.From)
-				if breg != REG_AX {
-					ctxt.AsmBuf.Put1(0x87) //xchg rhs,bx
-					asmando(ctxt, p, &p.To, reg[breg])
-					subreg(&pp, z, breg)
-					doasm(ctxt, &pp)
-					ctxt.AsmBuf.Put1(0x87) // xchg rhs,bx
-					asmando(ctxt, p, &p.To, reg[breg])
-				} else {
-					ctxt.AsmBuf.Put1(byte(0x90 + reg[z])) // xchg rsh,ax
-					subreg(&pp, z, REG_AX)
-					doasm(ctxt, &pp)
-					ctxt.AsmBuf.Put1(byte(0x90 + reg[z])) // xchg rsh,ax
-				}
-				return
-			}
-
-			if isax(&p.From) {
-				ctxt.AsmBuf.Put1(0x87) // xchg rhs,bx
-				asmando(ctxt, p, &p.To, reg[REG_BX])
-				subreg(&pp, z, REG_BX)
-				doasm(ctxt, &pp)
-				ctxt.AsmBuf.Put1(0x87) // xchg rhs,bx
-				asmando(ctxt, p, &p.To, reg[REG_BX])
-			} else {
-				ctxt.AsmBuf.Put1(byte(0x90 + reg[z])) // xchg rsh,ax
-				subreg(&pp, z, REG_AX)
-				doasm(ctxt, &pp)
-				ctxt.AsmBuf.Put1(byte(0x90 + reg[z])) // xchg rsh,ax
-			}
-			return
-		}
-	}
-
-	ctxt.Diag("invalid instruction: %v", p)
-	//	ctxt.Diag("doasm: notfound ft=%d tt=%d %v %d %d", p.Ft, p.Tt, p, oclass(ctxt, p, &p.From), oclass(ctxt, p, &p.To))
-	return
-}
-
-// byteswapreg returns a byte-addressable register (AX, BX, CX, DX)
-// which is not referenced in a.
-// If a is empty, it returns BX to account for MULB-like instructions
-// that might use DX and AX.
-func byteswapreg(ctxt *obj.Link, a *obj.Addr) int {
-	cand := 1
-	canc := cand
-	canb := canc
-	cana := canb
-
-	if a.Type == obj.TYPE_NONE {
-		cand = 0
-		cana = cand
-	}
-
-	if a.Type == obj.TYPE_REG || ((a.Type == obj.TYPE_MEM || a.Type == obj.TYPE_ADDR) && a.Name == obj.NAME_NONE) {
-		switch a.Reg {
-		case REG_NONE:
-			cand = 0
-			cana = cand
-
-		case REG_AX, REG_AL, REG_AH:
-			cana = 0
-
-		case REG_BX, REG_BL, REG_BH:
-			canb = 0
-
-		case REG_CX, REG_CL, REG_CH:
-			canc = 0
-
-		case REG_DX, REG_DL, REG_DH:
-			cand = 0
-		}
-	}
-
-	if a.Type == obj.TYPE_MEM || a.Type == obj.TYPE_ADDR {
-		switch a.Index {
-		case REG_AX:
-			cana = 0
-
-		case REG_BX:
-			canb = 0
-
-		case REG_CX:
-			canc = 0
-
-		case REG_DX:
-			cand = 0
-		}
-	}
-
-	if cana != 0 {
-		return REG_AX
-	}
-	if canb != 0 {
-		return REG_BX
-	}
-	if canc != 0 {
-		return REG_CX
-	}
-	if cand != 0 {
-		return REG_DX
-	}
-
-	ctxt.Diag("impossible byte register")
-	log.Fatalf("bad code")
-	return 0
-}
-
-func isbadbyte(a *obj.Addr) bool {
-	return a.Type == obj.TYPE_REG && (REG_BP <= a.Reg && a.Reg <= REG_DI || REG_BPB <= a.Reg && a.Reg <= REG_DIB)
-}
-
-var naclret = []uint8{
-	0x5e, // POPL SI
-	// 0x8b, 0x7d, 0x00, // MOVL (BP), DI - catch return to invalid address, for debugging
-	0x83,
-	0xe6,
-	0xe0, // ANDL $~31, SI
-	0x4c,
-	0x01,
-	0xfe, // ADDQ R15, SI
-	0xff,
-	0xe6, // JMP SI
-}
-
-var naclret8 = []uint8{
-	0x5d, // POPL BP
-	// 0x8b, 0x7d, 0x00, // MOVL (BP), DI - catch return to invalid address, for debugging
-	0x83,
-	0xe5,
-	0xe0, // ANDL $~31, BP
-	0xff,
-	0xe5, // JMP BP
-}
-
-var naclspfix = []uint8{0x4c, 0x01, 0xfc} // ADDQ R15, SP
-
-var naclbpfix = []uint8{0x4c, 0x01, 0xfd} // ADDQ R15, BP
-
-var naclmovs = []uint8{
-	0x89,
-	0xf6, // MOVL SI, SI
-	0x49,
-	0x8d,
-	0x34,
-	0x37, // LEAQ (R15)(SI*1), SI
-	0x89,
-	0xff, // MOVL DI, DI
-	0x49,
-	0x8d,
-	0x3c,
-	0x3f, // LEAQ (R15)(DI*1), DI
-}
-
-var naclstos = []uint8{
-	0x89,
-	0xff, // MOVL DI, DI
-	0x49,
-	0x8d,
-	0x3c,
-	0x3f, // LEAQ (R15)(DI*1), DI
-}
-
-func nacltrunc(ctxt *obj.Link, reg int) {
-	if reg >= REG_R8 {
-		ctxt.AsmBuf.Put1(0x45)
-	}
-	reg = (reg - REG_AX) & 7
-	ctxt.AsmBuf.Put2(0x89, byte(3<<6|reg<<3|reg))
-}
-
-func asmins(ctxt *obj.Link, p *obj.Prog) {
-	ctxt.AsmBuf.Reset()
-	ctxt.Asmode = int(p.Mode)
-
-	if ctxt.Headtype == obj.Hnacl && p.Mode == 32 {
-		switch p.As {
-		case obj.ARET:
-			ctxt.AsmBuf.Put(naclret8)
-			return
-
-		case obj.ACALL,
-			obj.AJMP:
-			if p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_DI {
-				ctxt.AsmBuf.Put3(0x83, byte(0xe0|(p.To.Reg-REG_AX)), 0xe0)
-			}
-
-		case AINT:
-			ctxt.AsmBuf.Put1(0xf4)
-			return
-		}
-	}
-
-	if ctxt.Headtype == obj.Hnacl && p.Mode == 64 {
-		if p.As == AREP {
-			ctxt.Rep++
-			return
-		}
-
-		if p.As == AREPN {
-			ctxt.Repn++
-			return
-		}
-
-		if p.As == ALOCK {
-			ctxt.Lock++
-			return
-		}
-
-		if p.As != ALEAQ && p.As != ALEAL {
-			if p.From.Index != REG_NONE && p.From.Scale > 0 {
-				nacltrunc(ctxt, int(p.From.Index))
-			}
-			if p.To.Index != REG_NONE && p.To.Scale > 0 {
-				nacltrunc(ctxt, int(p.To.Index))
-			}
-		}
-
-		switch p.As {
-		case obj.ARET:
-			ctxt.AsmBuf.Put(naclret)
-			return
-
-		case obj.ACALL,
-			obj.AJMP:
-			if p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_DI {
-				// ANDL $~31, reg
-				ctxt.AsmBuf.Put3(0x83, byte(0xe0|(p.To.Reg-REG_AX)), 0xe0)
-				// ADDQ R15, reg
-				ctxt.AsmBuf.Put3(0x4c, 0x01, byte(0xf8|(p.To.Reg-REG_AX)))
-			}
-
-			if p.To.Type == obj.TYPE_REG && REG_R8 <= p.To.Reg && p.To.Reg <= REG_R15 {
-				// ANDL $~31, reg
-				ctxt.AsmBuf.Put4(0x41, 0x83, byte(0xe0|(p.To.Reg-REG_R8)), 0xe0)
-				// ADDQ R15, reg
-				ctxt.AsmBuf.Put3(0x4d, 0x01, byte(0xf8|(p.To.Reg-REG_R8)))
-			}
-
-		case AINT:
-			ctxt.AsmBuf.Put1(0xf4)
-			return
-
-		case ASCASB,
-			ASCASW,
-			ASCASL,
-			ASCASQ,
-			ASTOSB,
-			ASTOSW,
-			ASTOSL,
-			ASTOSQ:
-			ctxt.AsmBuf.Put(naclstos)
-
-		case AMOVSB, AMOVSW, AMOVSL, AMOVSQ:
-			ctxt.AsmBuf.Put(naclmovs)
-		}
-
-		if ctxt.Rep != 0 {
-			ctxt.AsmBuf.Put1(0xf3)
-			ctxt.Rep = 0
-		}
-
-		if ctxt.Repn != 0 {
-			ctxt.AsmBuf.Put1(0xf2)
-			ctxt.Repn = 0
-		}
-
-		if ctxt.Lock != 0 {
-			ctxt.AsmBuf.Put1(0xf0)
-			ctxt.Lock = 0
-		}
-	}
-
-	ctxt.Rexflag = 0
-	ctxt.Vexflag = 0
-	mark := ctxt.AsmBuf.Len()
-	ctxt.Asmode = int(p.Mode)
-	doasm(ctxt, p)
-	if ctxt.Rexflag != 0 && ctxt.Vexflag == 0 {
-		/*
-		 * as befits the whole approach of the architecture,
-		 * the rex prefix must appear before the first opcode byte
-		 * (and thus after any 66/67/f2/f3/26/2e/3e prefix bytes, but
-		 * before the 0f opcode escape!), or it might be ignored.
-		 * note that the handbook often misleadingly shows 66/f2/f3 in `opcode'.
-		 */
-		if p.Mode != 64 {
-			ctxt.Diag("asmins: illegal in mode %d: %v (%d %d)", p.Mode, p, p.Ft, p.Tt)
-		}
-		n := ctxt.AsmBuf.Len()
-		var np int
-		for np = mark; np < n; np++ {
-			c := ctxt.AsmBuf.Peek(np)
-			if c != 0xf2 && c != 0xf3 && (c < 0x64 || c > 0x67) && c != 0x2e && c != 0x3e && c != 0x26 {
-				break
-			}
-		}
-		ctxt.AsmBuf.Insert(np, byte(0x40|ctxt.Rexflag))
-	}
-
-	n := ctxt.AsmBuf.Len()
-	for i := len(ctxt.Cursym.R) - 1; i >= 0; i-- {
-		r := &ctxt.Cursym.R[i]
-		if int64(r.Off) < p.Pc {
-			break
-		}
-		if ctxt.Rexflag != 0 {
-			r.Off++
-		}
-		if r.Type == obj.R_PCREL {
-			if p.Mode == 64 || p.As == obj.AJMP || p.As == obj.ACALL {
-				// PC-relative addressing is relative to the end of the instruction,
-				// but the relocations applied by the linker are relative to the end
-				// of the relocation. Because immediate instruction
-				// arguments can follow the PC-relative memory reference in the
-				// instruction encoding, the two may not coincide. In this case,
-				// adjust addend so that linker can keep relocating relative to the
-				// end of the relocation.
-				r.Add -= p.Pc + int64(n) - (int64(r.Off) + int64(r.Siz))
-			} else if p.Mode == 32 {
-				// On 386 PC-relative addressing (for non-call/jmp instructions)
-				// assumes that the previous instruction loaded the PC of the end
-				// of that instruction into CX, so the adjustment is relative to
-				// that.
-				r.Add += int64(r.Off) - p.Pc + int64(r.Siz)
-			}
-		}
-		if r.Type == obj.R_GOTPCREL && p.Mode == 32 {
-			// On 386, R_GOTPCREL makes the same assumptions as R_PCREL.
-			r.Add += int64(r.Off) - p.Pc + int64(r.Siz)
-		}
-
-	}
-
-	if p.Mode == 64 && ctxt.Headtype == obj.Hnacl && p.As != ACMPL && p.As != ACMPQ && p.To.Type == obj.TYPE_REG {
-		switch p.To.Reg {
-		case REG_SP:
-			ctxt.AsmBuf.Put(naclspfix)
-		case REG_BP:
-			ctxt.AsmBuf.Put(naclbpfix)
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/x86/list6.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/x86/list6.go
deleted file mode 100644
index eaab227..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/x86/list6.go
+++ /dev/null
@@ -1,183 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/x86/list6.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/x86/list6.go:1
-// Inferno utils/6c/list.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6c/list.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package x86
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"fmt"
-)
-
-var Register = []string{
-	"AL", /* [D_AL] */
-	"CL",
-	"DL",
-	"BL",
-	"SPB",
-	"BPB",
-	"SIB",
-	"DIB",
-	"R8B",
-	"R9B",
-	"R10B",
-	"R11B",
-	"R12B",
-	"R13B",
-	"R14B",
-	"R15B",
-	"AX", /* [D_AX] */
-	"CX",
-	"DX",
-	"BX",
-	"SP",
-	"BP",
-	"SI",
-	"DI",
-	"R8",
-	"R9",
-	"R10",
-	"R11",
-	"R12",
-	"R13",
-	"R14",
-	"R15",
-	"AH",
-	"CH",
-	"DH",
-	"BH",
-	"F0", /* [D_F0] */
-	"F1",
-	"F2",
-	"F3",
-	"F4",
-	"F5",
-	"F6",
-	"F7",
-	"M0",
-	"M1",
-	"M2",
-	"M3",
-	"M4",
-	"M5",
-	"M6",
-	"M7",
-	"X0",
-	"X1",
-	"X2",
-	"X3",
-	"X4",
-	"X5",
-	"X6",
-	"X7",
-	"X8",
-	"X9",
-	"X10",
-	"X11",
-	"X12",
-	"X13",
-	"X14",
-	"X15",
-	"Y0",
-	"Y1",
-	"Y2",
-	"Y3",
-	"Y4",
-	"Y5",
-	"Y6",
-	"Y7",
-	"Y8",
-	"Y9",
-	"Y10",
-	"Y11",
-	"Y12",
-	"Y13",
-	"Y14",
-	"Y15",
-	"CS", /* [D_CS] */
-	"SS",
-	"DS",
-	"ES",
-	"FS",
-	"GS",
-	"GDTR", /* [D_GDTR] */
-	"IDTR", /* [D_IDTR] */
-	"LDTR", /* [D_LDTR] */
-	"MSW",  /* [D_MSW] */
-	"TASK", /* [D_TASK] */
-	"CR0",  /* [D_CR] */
-	"CR1",
-	"CR2",
-	"CR3",
-	"CR4",
-	"CR5",
-	"CR6",
-	"CR7",
-	"CR8",
-	"CR9",
-	"CR10",
-	"CR11",
-	"CR12",
-	"CR13",
-	"CR14",
-	"CR15",
-	"DR0", /* [D_DR] */
-	"DR1",
-	"DR2",
-	"DR3",
-	"DR4",
-	"DR5",
-	"DR6",
-	"DR7",
-	"TR0", /* [D_TR] */
-	"TR1",
-	"TR2",
-	"TR3",
-	"TR4",
-	"TR5",
-	"TR6",
-	"TR7",
-	"TLS",    /* [D_TLS] */
-	"MAXREG", /* [MAXREG] */
-}
-
-func init() {
-	obj.RegisterRegister(REG_AL, REG_AL+len(Register), Rconv)
-	obj.RegisterOpcode(obj.ABaseAMD64, Anames)
-}
-
-func Rconv(r int) string {
-	if REG_AL <= r && r-REG_AL < len(Register) {
-		return Register[r-REG_AL]
-	}
-	return fmt.Sprintf("Rgok(%d)", r-obj.RBaseAMD64)
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/x86/obj6.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/x86/obj6.go
deleted file mode 100644
index 5ee4742..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/x86/obj6.go
+++ /dev/null
@@ -1,1499 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/x86/obj6.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/x86/obj6.go:1
-// Inferno utils/6l/pass.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6l/pass.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package x86
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"fmt"
-	"log"
-	"math"
-	"strings"
-)
-
-func CanUse1InsnTLS(ctxt *obj.Link) bool {
-	if isAndroid {
-		// For android, we use a disgusting hack that assumes
-		// the thread-local storage slot for g is allocated
-		// using pthread_key_create with a fixed offset
-		// (see src/runtime/cgo/gcc_android_amd64.c).
-		// This makes access to the TLS storage (for g) doable
-		// with 1 instruction.
-		return true
-	}
-
-	if ctxt.Arch.RegSize == 4 {
-		switch ctxt.Headtype {
-		case obj.Hlinux,
-			obj.Hnacl,
-			obj.Hplan9,
-			obj.Hwindows,
-			obj.Hwindowsgui:
-			return false
-		}
-
-		return true
-	}
-
-	switch ctxt.Headtype {
-	case obj.Hplan9, obj.Hwindows, obj.Hwindowsgui:
-		return false
-	case obj.Hlinux:
-		return !ctxt.Flag_shared
-	}
-
-	return true
-}
-
-func progedit(ctxt *obj.Link, p *obj.Prog) {
-	// Maintain information about code generation mode.
-	if ctxt.Mode == 0 {
-		ctxt.Mode = ctxt.Arch.RegSize * 8
-	}
-	p.Mode = int8(ctxt.Mode)
-
-	switch p.As {
-	case AMODE:
-		if p.From.Type == obj.TYPE_CONST || (p.From.Type == obj.TYPE_MEM && p.From.Reg == REG_NONE) {
-			switch int(p.From.Offset) {
-			case 16, 32, 64:
-				ctxt.Mode = int(p.From.Offset)
-			}
-		}
-		obj.Nopout(p)
-	}
-
-	// Thread-local storage references use the TLS pseudo-register.
-	// As a register, TLS refers to the thread-local storage base, and it
-	// can only be loaded into another register:
-	//
-	//         MOVQ TLS, AX
-	//
-	// An offset from the thread-local storage base is written off(reg)(TLS*1).
-	// Semantically it is off(reg), but the (TLS*1) annotation marks this as
-	// indexing from the loaded TLS base. This emits a relocation so that
-	// if the linker needs to adjust the offset, it can. For example:
-	//
-	//         MOVQ TLS, AX
-	//         MOVQ 0(AX)(TLS*1), CX // load g into CX
-	//
-	// On systems that support direct access to the TLS memory, this
-	// pair of instructions can be reduced to a direct TLS memory reference:
-	//
-	//         MOVQ 0(TLS), CX // load g into CX
-	//
-	// The 2-instruction and 1-instruction forms correspond to the two code
-	// sequences for loading a TLS variable in the local exec model given in "ELF
-	// Handling For Thread-Local Storage".
-	//
-	// We apply this rewrite on systems that support the 1-instruction form.
-	// The decision is made using only the operating system and the -shared flag,
-	// not the link mode. If some link modes on a particular operating system
-	// require the 2-instruction form, then all builds for that operating system
-	// will use the 2-instruction form, so that the link mode decision can be
-	// delayed to link time.
-	//
-	// In this way, all supported systems use identical instructions to
-	// access TLS, and they are rewritten appropriately first here in
-	// liblink and then finally using relocations in the linker.
-	//
-	// When -shared is passed, we leave the code in the 2-instruction form but
-	// assemble (and relocate) them in different ways to generate the initial
-	// exec code sequence. It's a bit of a fluke that this is possible without
-	// rewriting the instructions more comprehensively, and it only does because
-	// we only support a single TLS variable (g).
-
-	if CanUse1InsnTLS(ctxt) {
-		// Reduce 2-instruction sequence to 1-instruction sequence.
-		// Sequences like
-		//	MOVQ TLS, BX
-		//	... off(BX)(TLS*1) ...
-		// become
-		//	NOP
-		//	... off(TLS) ...
-		//
-		// TODO(rsc): Remove the Hsolaris special case. It exists only to
-		// guarantee we are producing byte-identical binaries as before this code.
-		// But it should be unnecessary.
-		if (p.As == AMOVQ || p.As == AMOVL) && p.From.Type == obj.TYPE_REG && p.From.Reg == REG_TLS && p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 && ctxt.Headtype != obj.Hsolaris {
-			obj.Nopout(p)
-		}
-		if p.From.Type == obj.TYPE_MEM && p.From.Index == REG_TLS && REG_AX <= p.From.Reg && p.From.Reg <= REG_R15 {
-			p.From.Reg = REG_TLS
-			p.From.Scale = 0
-			p.From.Index = REG_NONE
-		}
-
-		if p.To.Type == obj.TYPE_MEM && p.To.Index == REG_TLS && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 {
-			p.To.Reg = REG_TLS
-			p.To.Scale = 0
-			p.To.Index = REG_NONE
-		}
-	} else {
-		// load_g_cx, below, always inserts the 1-instruction sequence. Rewrite it
-		// as the 2-instruction sequence if necessary.
-		//	MOVQ 0(TLS), BX
-		// becomes
-		//	MOVQ TLS, BX
-		//	MOVQ 0(BX)(TLS*1), BX
-		if (p.As == AMOVQ || p.As == AMOVL) && p.From.Type == obj.TYPE_MEM && p.From.Reg == REG_TLS && p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 {
-			q := obj.Appendp(ctxt, p)
-			q.As = p.As
-			q.From = p.From
-			q.From.Type = obj.TYPE_MEM
-			q.From.Reg = p.To.Reg
-			q.From.Index = REG_TLS
-			q.From.Scale = 2 // TODO: use 1
-			q.To = p.To
-			p.From.Type = obj.TYPE_REG
-			p.From.Reg = REG_TLS
-			p.From.Index = REG_NONE
-			p.From.Offset = 0
-		}
-	}
-
-	// TODO: Remove.
-	if (ctxt.Headtype == obj.Hwindows || ctxt.Headtype == obj.Hwindowsgui) && p.Mode == 64 || ctxt.Headtype == obj.Hplan9 {
-		if p.From.Scale == 1 && p.From.Index == REG_TLS {
-			p.From.Scale = 2
-		}
-		if p.To.Scale == 1 && p.To.Index == REG_TLS {
-			p.To.Scale = 2
-		}
-	}
-
-	// Rewrite 0 to $0 in 3rd argument to CMPPS etc.
-	// That's what the tables expect.
-	switch p.As {
-	case ACMPPD, ACMPPS, ACMPSD, ACMPSS:
-		if p.To.Type == obj.TYPE_MEM && p.To.Name == obj.NAME_NONE && p.To.Reg == REG_NONE && p.To.Index == REG_NONE && p.To.Sym == nil {
-			p.To.Type = obj.TYPE_CONST
-		}
-	}
-
-	// Rewrite CALL/JMP/RET to symbol as TYPE_BRANCH.
-	switch p.As {
-	case obj.ACALL, obj.AJMP, obj.ARET:
-		if p.To.Type == obj.TYPE_MEM && (p.To.Name == obj.NAME_EXTERN || p.To.Name == obj.NAME_STATIC) && p.To.Sym != nil {
-			p.To.Type = obj.TYPE_BRANCH
-		}
-	}
-
-	// Rewrite MOVL/MOVQ $XXX(FP/SP) as LEAL/LEAQ.
-	if p.From.Type == obj.TYPE_ADDR && (ctxt.Arch.Family == sys.AMD64 || p.From.Name != obj.NAME_EXTERN && p.From.Name != obj.NAME_STATIC) {
-		switch p.As {
-		case AMOVL:
-			p.As = ALEAL
-			p.From.Type = obj.TYPE_MEM
-		case AMOVQ:
-			p.As = ALEAQ
-			p.From.Type = obj.TYPE_MEM
-		}
-	}
-
-	if ctxt.Headtype == obj.Hnacl && p.Mode == 64 {
-		if p.From3 != nil {
-			nacladdr(ctxt, p, p.From3)
-		}
-		nacladdr(ctxt, p, &p.From)
-		nacladdr(ctxt, p, &p.To)
-	}
-
-	// Rewrite float constants to values stored in memory.
-	switch p.As {
-	// Convert AMOVSS $(0), Xx to AXORPS Xx, Xx
-	case AMOVSS:
-		if p.From.Type == obj.TYPE_FCONST {
-			//  f == 0 can't be used here due to -0, so use Float64bits
-			if f := p.From.Val.(float64); math.Float64bits(f) == 0 {
-				if p.To.Type == obj.TYPE_REG && REG_X0 <= p.To.Reg && p.To.Reg <= REG_X15 {
-					p.As = AXORPS
-					p.From = p.To
-					break
-				}
-			}
-		}
-		fallthrough
-
-	case AFMOVF,
-		AFADDF,
-		AFSUBF,
-		AFSUBRF,
-		AFMULF,
-		AFDIVF,
-		AFDIVRF,
-		AFCOMF,
-		AFCOMFP,
-		AADDSS,
-		ASUBSS,
-		AMULSS,
-		ADIVSS,
-		ACOMISS,
-		AUCOMISS:
-		if p.From.Type == obj.TYPE_FCONST {
-			f32 := float32(p.From.Val.(float64))
-			i32 := math.Float32bits(f32)
-			literal := fmt.Sprintf("$f32.%08x", i32)
-			s := obj.Linklookup(ctxt, literal, 0)
-			p.From.Type = obj.TYPE_MEM
-			p.From.Name = obj.NAME_EXTERN
-			p.From.Sym = s
-			p.From.Sym.Set(obj.AttrLocal, true)
-			p.From.Offset = 0
-		}
-
-	case AMOVSD:
-		// Convert AMOVSD $(0), Xx to AXORPS Xx, Xx
-		if p.From.Type == obj.TYPE_FCONST {
-			//  f == 0 can't be used here due to -0, so use Float64bits
-			if f := p.From.Val.(float64); math.Float64bits(f) == 0 {
-				if p.To.Type == obj.TYPE_REG && REG_X0 <= p.To.Reg && p.To.Reg <= REG_X15 {
-					p.As = AXORPS
-					p.From = p.To
-					break
-				}
-			}
-		}
-		fallthrough
-
-	case AFMOVD,
-		AFADDD,
-		AFSUBD,
-		AFSUBRD,
-		AFMULD,
-		AFDIVD,
-		AFDIVRD,
-		AFCOMD,
-		AFCOMDP,
-		AADDSD,
-		ASUBSD,
-		AMULSD,
-		ADIVSD,
-		ACOMISD,
-		AUCOMISD:
-		if p.From.Type == obj.TYPE_FCONST {
-			i64 := math.Float64bits(p.From.Val.(float64))
-			literal := fmt.Sprintf("$f64.%016x", i64)
-			s := obj.Linklookup(ctxt, literal, 0)
-			p.From.Type = obj.TYPE_MEM
-			p.From.Name = obj.NAME_EXTERN
-			p.From.Sym = s
-			p.From.Sym.Set(obj.AttrLocal, true)
-			p.From.Offset = 0
-		}
-	}
-
-	if ctxt.Flag_dynlink {
-		rewriteToUseGot(ctxt, p)
-	}
-
-	if ctxt.Flag_shared && p.Mode == 32 {
-		rewriteToPcrel(ctxt, p)
-	}
-}
-
-// Rewrite p, if necessary, to access global data via the global offset table.
-func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog) {
-	var add, lea, mov obj.As
-	var reg int16
-	if p.Mode == 64 {
-		add = AADDQ
-		lea = ALEAQ
-		mov = AMOVQ
-		reg = REG_R15
-	} else {
-		add = AADDL
-		lea = ALEAL
-		mov = AMOVL
-		reg = REG_CX
-		if p.As == ALEAL && p.To.Reg != p.From.Reg && p.To.Reg != p.From.Index {
-			// Special case: clobber the destination register with
-			// the PC so we don't have to clobber CX.
-			// The SSA backend depends on CX not being clobbered across LEAL.
-			// See cmd/compile/internal/ssa/gen/386.rules (search for Flag_shared).
-			reg = p.To.Reg
-		}
-	}
-
-	if p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO {
-		//     ADUFFxxx $offset
-		// becomes
-		//     $MOV runtime.duffxxx@GOT, $reg
-		//     $ADD $offset, $reg
-		//     CALL $reg
-		var sym *obj.LSym
-		if p.As == obj.ADUFFZERO {
-			sym = obj.Linklookup(ctxt, "runtime.duffzero", 0)
-		} else {
-			sym = obj.Linklookup(ctxt, "runtime.duffcopy", 0)
-		}
-		offset := p.To.Offset
-		p.As = mov
-		p.From.Type = obj.TYPE_MEM
-		p.From.Name = obj.NAME_GOTREF
-		p.From.Sym = sym
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = reg
-		p.To.Offset = 0
-		p.To.Sym = nil
-		p1 := obj.Appendp(ctxt, p)
-		p1.As = add
-		p1.From.Type = obj.TYPE_CONST
-		p1.From.Offset = offset
-		p1.To.Type = obj.TYPE_REG
-		p1.To.Reg = reg
-		p2 := obj.Appendp(ctxt, p1)
-		p2.As = obj.ACALL
-		p2.To.Type = obj.TYPE_REG
-		p2.To.Reg = reg
-	}
-
-	// We only care about global data: NAME_EXTERN means a global
-	// symbol in the Go sense, and p.Sym.Local is true for a few
-	// internally defined symbols.
-	if p.As == lea && p.From.Type == obj.TYPE_MEM && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() {
-		// $LEA sym, Rx becomes $MOV $sym, Rx which will be rewritten below
-		p.As = mov
-		p.From.Type = obj.TYPE_ADDR
-	}
-	if p.From.Type == obj.TYPE_ADDR && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() {
-		// $MOV $sym, Rx becomes $MOV sym@GOT, Rx
-		// $MOV $sym+<off>, Rx becomes $MOV sym@GOT, Rx; $LEA <off>(Rx), Rx
-		// On 386 only, more complicated things like PUSHL $sym become $MOV sym@GOT, CX; PUSHL CX
-		cmplxdest := false
-		pAs := p.As
-		var dest obj.Addr
-		if p.To.Type != obj.TYPE_REG || pAs != mov {
-			if p.Mode == 64 {
-				ctxt.Diag("do not know how to handle LEA-type insn to non-register in %v with -dynlink", p)
-			}
-			cmplxdest = true
-			dest = p.To
-			p.As = mov
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = reg
-			p.To.Sym = nil
-			p.To.Name = obj.NAME_NONE
-		}
-		p.From.Type = obj.TYPE_MEM
-		p.From.Name = obj.NAME_GOTREF
-		q := p
-		if p.From.Offset != 0 {
-			q = obj.Appendp(ctxt, p)
-			q.As = lea
-			q.From.Type = obj.TYPE_MEM
-			q.From.Reg = p.To.Reg
-			q.From.Offset = p.From.Offset
-			q.To = p.To
-			p.From.Offset = 0
-		}
-		if cmplxdest {
-			q = obj.Appendp(ctxt, q)
-			q.As = pAs
-			q.To = dest
-			q.From.Type = obj.TYPE_REG
-			q.From.Reg = reg
-		}
-	}
-	if p.From3 != nil && p.From3.Name == obj.NAME_EXTERN {
-		ctxt.Diag("don't know how to handle %v with -dynlink", p)
-	}
-	var source *obj.Addr
-	// MOVx sym, Ry becomes $MOV sym@GOT, R15; MOVx (R15), Ry
-	// MOVx Ry, sym becomes $MOV sym@GOT, R15; MOVx Ry, (R15)
-	// An addition may be inserted between the two MOVs if there is an offset.
-	if p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() {
-		if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() {
-			ctxt.Diag("cannot handle NAME_EXTERN on both sides in %v with -dynlink", p)
-		}
-		source = &p.From
-	} else if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() {
-		source = &p.To
-	} else {
-		return
-	}
-	if p.As == obj.ACALL {
-		// When dynlinking on 386, almost any call might end up being a call
-		// to a PLT, so make sure the GOT pointer is loaded into BX.
-		// RegTo2 is set on the replacement call insn to stop it being
-		// processed when it is in turn passed to progedit.
-		if p.Mode == 64 || (p.To.Sym != nil && p.To.Sym.Local()) || p.RegTo2 != 0 {
-			return
-		}
-		p1 := obj.Appendp(ctxt, p)
-		p2 := obj.Appendp(ctxt, p1)
-
-		p1.As = ALEAL
-		p1.From.Type = obj.TYPE_MEM
-		p1.From.Name = obj.NAME_STATIC
-		p1.From.Sym = obj.Linklookup(ctxt, "_GLOBAL_OFFSET_TABLE_", 0)
-		p1.To.Type = obj.TYPE_REG
-		p1.To.Reg = REG_BX
-
-		p2.As = p.As
-		p2.Scond = p.Scond
-		p2.From = p.From
-		p2.From3 = p.From3
-		p2.Reg = p.Reg
-		p2.To = p.To
-		// p.To.Type was set to TYPE_BRANCH above, but that makes checkaddr
-		// in ../pass.go complain, so set it back to TYPE_MEM here, until p2
-		// itself gets passed to progedit.
-		p2.To.Type = obj.TYPE_MEM
-		p2.RegTo2 = 1
-
-		obj.Nopout(p)
-		return
-
-	}
-	if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ARET || p.As == obj.AJMP {
-		return
-	}
-	if source.Type != obj.TYPE_MEM {
-		ctxt.Diag("don't know how to handle %v with -dynlink", p)
-	}
-	p1 := obj.Appendp(ctxt, p)
-	p2 := obj.Appendp(ctxt, p1)
-
-	p1.As = mov
-	p1.From.Type = obj.TYPE_MEM
-	p1.From.Sym = source.Sym
-	p1.From.Name = obj.NAME_GOTREF
-	p1.To.Type = obj.TYPE_REG
-	p1.To.Reg = reg
-
-	p2.As = p.As
-	p2.From = p.From
-	p2.To = p.To
-	if p.From.Name == obj.NAME_EXTERN {
-		p2.From.Reg = reg
-		p2.From.Name = obj.NAME_NONE
-		p2.From.Sym = nil
-	} else if p.To.Name == obj.NAME_EXTERN {
-		p2.To.Reg = reg
-		p2.To.Name = obj.NAME_NONE
-		p2.To.Sym = nil
-	} else {
-		return
-	}
-	obj.Nopout(p)
-}
-
-func rewriteToPcrel(ctxt *obj.Link, p *obj.Prog) {
-	// RegTo2 is set on the instructions we insert here so they don't get
-	// processed twice.
-	if p.RegTo2 != 0 {
-		return
-	}
-	if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP {
-		return
-	}
-	// Any Prog (aside from the above special cases) with an Addr with Name ==
-	// NAME_EXTERN, NAME_STATIC or NAME_GOTREF has a CALL __x86.get_pc_thunk.XX
-	// inserted before it.
-	isName := func(a *obj.Addr) bool {
-		if a.Sym == nil || (a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR) || a.Reg != 0 {
-			return false
-		}
-		if a.Sym.Type == obj.STLSBSS {
-			return false
-		}
-		return a.Name == obj.NAME_EXTERN || a.Name == obj.NAME_STATIC || a.Name == obj.NAME_GOTREF
-	}
-
-	if isName(&p.From) && p.From.Type == obj.TYPE_ADDR {
-		// Handle things like "MOVL $sym, (SP)" or "PUSHL $sym" by rewriting
-		// to "MOVL $sym, CX; MOVL CX, (SP)" or "MOVL $sym, CX; PUSHL CX"
-		// respectively.
-		if p.To.Type != obj.TYPE_REG {
-			q := obj.Appendp(ctxt, p)
-			q.As = p.As
-			q.From.Type = obj.TYPE_REG
-			q.From.Reg = REG_CX
-			q.To = p.To
-			p.As = AMOVL
-			p.To.Type = obj.TYPE_REG
-			p.To.Reg = REG_CX
-			p.To.Sym = nil
-			p.To.Name = obj.NAME_NONE
-		}
-	}
-
-	if !isName(&p.From) && !isName(&p.To) && (p.From3 == nil || !isName(p.From3)) {
-		return
-	}
-	var dst int16 = REG_CX
-	if (p.As == ALEAL || p.As == AMOVL) && p.To.Reg != p.From.Reg && p.To.Reg != p.From.Index {
-		dst = p.To.Reg
-		// Why?  See the comment near the top of rewriteToUseGot above.
-		// AMOVLs might be introduced by the GOT rewrites.
-	}
-	q := obj.Appendp(ctxt, p)
-	q.RegTo2 = 1
-	r := obj.Appendp(ctxt, q)
-	r.RegTo2 = 1
-	q.As = obj.ACALL
-	q.To.Sym = obj.Linklookup(ctxt, "__x86.get_pc_thunk."+strings.ToLower(Rconv(int(dst))), 0)
-	q.To.Type = obj.TYPE_MEM
-	q.To.Name = obj.NAME_EXTERN
-	q.To.Sym.Set(obj.AttrLocal, true)
-	r.As = p.As
-	r.Scond = p.Scond
-	r.From = p.From
-	r.From3 = p.From3
-	r.Reg = p.Reg
-	r.To = p.To
-	if isName(&p.From) {
-		r.From.Reg = dst
-	}
-	if isName(&p.To) {
-		r.To.Reg = dst
-	}
-	if p.From3 != nil && isName(p.From3) {
-		r.From3.Reg = dst
-	}
-	obj.Nopout(p)
-}
-
-func nacladdr(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
-	if p.As == ALEAL || p.As == ALEAQ {
-		return
-	}
-
-	if a.Reg == REG_BP {
-		ctxt.Diag("invalid address: %v", p)
-		return
-	}
-
-	if a.Reg == REG_TLS {
-		a.Reg = REG_BP
-	}
-	if a.Type == obj.TYPE_MEM && a.Name == obj.NAME_NONE {
-		switch a.Reg {
-		// all ok
-		case REG_BP, REG_SP, REG_R15:
-			break
-
-		default:
-			if a.Index != REG_NONE {
-				ctxt.Diag("invalid address %v", p)
-			}
-			a.Index = a.Reg
-			if a.Index != REG_NONE {
-				a.Scale = 1
-			}
-			a.Reg = REG_R15
-		}
-	}
-}
-
-func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
-	if ctxt.Headtype == obj.Hplan9 && ctxt.Plan9privates == nil {
-		ctxt.Plan9privates = obj.Linklookup(ctxt, "_privates", 0)
-	}
-
-	ctxt.Cursym = cursym
-
-	if cursym.Text == nil || cursym.Text.Link == nil {
-		return
-	}
-
-	p := cursym.Text
-	autoffset := int32(p.To.Offset)
-	if autoffset < 0 {
-		autoffset = 0
-	}
-
-	hasCall := false
-	for q := p; q != nil; q = q.Link {
-		if q.As == obj.ACALL || q.As == obj.ADUFFCOPY || q.As == obj.ADUFFZERO {
-			hasCall = true
-			break
-		}
-	}
-
-	var bpsize int
-	if p.Mode == 64 && ctxt.Framepointer_enabled &&
-		p.From3.Offset&obj.NOFRAME == 0 && // (1) below
-		!(autoffset == 0 && p.From3.Offset&obj.NOSPLIT != 0) && // (2) below
-		!(autoffset == 0 && !hasCall) { // (3) below
-		// Make room to save a base pointer.
-		// There are 2 cases we must avoid:
-		// 1) If noframe is set (which we do for functions which tail call).
-		// 2) Scary runtime internals which would be all messed up by frame pointers.
-		//    We detect these using a heuristic: frameless nosplit functions.
-		//    TODO: Maybe someday we label them all with NOFRAME and get rid of this heuristic.
-		// For performance, we also want to avoid:
-		// 3) Frameless leaf functions
-		bpsize = ctxt.Arch.PtrSize
-		autoffset += int32(bpsize)
-		p.To.Offset += int64(bpsize)
-	} else {
-		bpsize = 0
-	}
-
-	textarg := int64(p.To.Val.(int32))
-	cursym.Args = int32(textarg)
-	cursym.Locals = int32(p.To.Offset)
-
-	// TODO(rsc): Remove.
-	if p.Mode == 32 && cursym.Locals < 0 {
-		cursym.Locals = 0
-	}
-
-	// TODO(rsc): Remove 'p.Mode == 64 &&'.
-	if p.Mode == 64 && autoffset < obj.StackSmall && p.From3Offset()&obj.NOSPLIT == 0 {
-		leaf := true
-	LeafSearch:
-		for q := p; q != nil; q = q.Link {
-			switch q.As {
-			case obj.ACALL:
-				// Treat common runtime calls that take no arguments
-				// the same as duffcopy and duffzero.
-				if !isZeroArgRuntimeCall(q.To.Sym) {
-					leaf = false
-					break LeafSearch
-				}
-				fallthrough
-			case obj.ADUFFCOPY, obj.ADUFFZERO:
-				if autoffset >= obj.StackSmall-8 {
-					leaf = false
-					break LeafSearch
-				}
-			}
-		}
-
-		if leaf {
-			p.From3.Offset |= obj.NOSPLIT
-		}
-	}
-
-	if p.From3Offset()&obj.NOSPLIT == 0 || p.From3Offset()&obj.WRAPPER != 0 {
-		p = obj.Appendp(ctxt, p)
-		p = load_g_cx(ctxt, p) // load g into CX
-	}
-
-	if cursym.Text.From3Offset()&obj.NOSPLIT == 0 {
-		p = stacksplit(ctxt, p, autoffset, int32(textarg)) // emit split check
-	}
-
-	if autoffset != 0 {
-		if autoffset%int32(ctxt.Arch.RegSize) != 0 {
-			ctxt.Diag("unaligned stack size %d", autoffset)
-		}
-		p = obj.Appendp(ctxt, p)
-		p.As = AADJSP
-		p.From.Type = obj.TYPE_CONST
-		p.From.Offset = int64(autoffset)
-		p.Spadj = autoffset
-	}
-
-	deltasp := autoffset
-
-	if bpsize > 0 {
-		// Save caller's BP
-		p = obj.Appendp(ctxt, p)
-
-		p.As = AMOVQ
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_BP
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = REG_SP
-		p.To.Scale = 1
-		p.To.Offset = int64(autoffset) - int64(bpsize)
-
-		// Move current frame to BP
-		p = obj.Appendp(ctxt, p)
-
-		p.As = ALEAQ
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = REG_SP
-		p.From.Scale = 1
-		p.From.Offset = int64(autoffset) - int64(bpsize)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_BP
-	}
-
-	if cursym.Text.From3Offset()&obj.WRAPPER != 0 {
-		// if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
-		//
-		//	MOVQ g_panic(CX), BX
-		//	TESTQ BX, BX
-		//	JEQ end
-		//	LEAQ (autoffset+8)(SP), DI
-		//	CMPQ panic_argp(BX), DI
-		//	JNE end
-		//	MOVQ SP, panic_argp(BX)
-		// end:
-		//	NOP
-		//
-		// The NOP is needed to give the jumps somewhere to land.
-		// It is a liblink NOP, not an x86 NOP: it encodes to 0 instruction bytes.
-
-		p = obj.Appendp(ctxt, p)
-
-		p.As = AMOVQ
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = REG_CX
-		p.From.Offset = 4 * int64(ctxt.Arch.PtrSize) // G.panic
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_BX
-		if ctxt.Headtype == obj.Hnacl && p.Mode == 64 {
-			p.As = AMOVL
-			p.From.Type = obj.TYPE_MEM
-			p.From.Reg = REG_R15
-			p.From.Scale = 1
-			p.From.Index = REG_CX
-		}
-		if p.Mode == 32 {
-			p.As = AMOVL
-		}
-
-		p = obj.Appendp(ctxt, p)
-		p.As = ATESTQ
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_BX
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_BX
-		if ctxt.Headtype == obj.Hnacl || p.Mode == 32 {
-			p.As = ATESTL
-		}
-
-		p = obj.Appendp(ctxt, p)
-		p.As = AJEQ
-		p.To.Type = obj.TYPE_BRANCH
-		p1 := p
-
-		p = obj.Appendp(ctxt, p)
-		p.As = ALEAQ
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = REG_SP
-		p.From.Offset = int64(autoffset) + int64(ctxt.Arch.RegSize)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_DI
-		if ctxt.Headtype == obj.Hnacl || p.Mode == 32 {
-			p.As = ALEAL
-		}
-
-		p = obj.Appendp(ctxt, p)
-		p.As = ACMPQ
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = REG_BX
-		p.From.Offset = 0 // Panic.argp
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_DI
-		if ctxt.Headtype == obj.Hnacl && p.Mode == 64 {
-			p.As = ACMPL
-			p.From.Type = obj.TYPE_MEM
-			p.From.Reg = REG_R15
-			p.From.Scale = 1
-			p.From.Index = REG_BX
-		}
-		if p.Mode == 32 {
-			p.As = ACMPL
-		}
-
-		p = obj.Appendp(ctxt, p)
-		p.As = AJNE
-		p.To.Type = obj.TYPE_BRANCH
-		p2 := p
-
-		p = obj.Appendp(ctxt, p)
-		p.As = AMOVQ
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_SP
-		p.To.Type = obj.TYPE_MEM
-		p.To.Reg = REG_BX
-		p.To.Offset = 0 // Panic.argp
-		if ctxt.Headtype == obj.Hnacl && p.Mode == 64 {
-			p.As = AMOVL
-			p.To.Type = obj.TYPE_MEM
-			p.To.Reg = REG_R15
-			p.To.Scale = 1
-			p.To.Index = REG_BX
-		}
-		if p.Mode == 32 {
-			p.As = AMOVL
-		}
-
-		p = obj.Appendp(ctxt, p)
-		p.As = obj.ANOP
-		p1.Pcond = p
-		p2.Pcond = p
-	}
-
-	for ; p != nil; p = p.Link {
-		pcsize := int(p.Mode) / 8
-		switch p.From.Name {
-		case obj.NAME_AUTO:
-			p.From.Offset += int64(deltasp) - int64(bpsize)
-		case obj.NAME_PARAM:
-			p.From.Offset += int64(deltasp) + int64(pcsize)
-		}
-		if p.From3 != nil {
-			switch p.From3.Name {
-			case obj.NAME_AUTO:
-				p.From3.Offset += int64(deltasp) - int64(bpsize)
-			case obj.NAME_PARAM:
-				p.From3.Offset += int64(deltasp) + int64(pcsize)
-			}
-		}
-		switch p.To.Name {
-		case obj.NAME_AUTO:
-			p.To.Offset += int64(deltasp) - int64(bpsize)
-		case obj.NAME_PARAM:
-			p.To.Offset += int64(deltasp) + int64(pcsize)
-		}
-
-		switch p.As {
-		default:
-			continue
-
-		case APUSHL, APUSHFL:
-			deltasp += 4
-			p.Spadj = 4
-			continue
-
-		case APUSHQ, APUSHFQ:
-			deltasp += 8
-			p.Spadj = 8
-			continue
-
-		case APUSHW, APUSHFW:
-			deltasp += 2
-			p.Spadj = 2
-			continue
-
-		case APOPL, APOPFL:
-			deltasp -= 4
-			p.Spadj = -4
-			continue
-
-		case APOPQ, APOPFQ:
-			deltasp -= 8
-			p.Spadj = -8
-			continue
-
-		case APOPW, APOPFW:
-			deltasp -= 2
-			p.Spadj = -2
-			continue
-
-		case obj.ARET:
-			// do nothing
-		}
-
-		if autoffset != deltasp {
-			ctxt.Diag("unbalanced PUSH/POP")
-		}
-
-		if autoffset != 0 {
-			if bpsize > 0 {
-				// Restore caller's BP
-				p.As = AMOVQ
-
-				p.From.Type = obj.TYPE_MEM
-				p.From.Reg = REG_SP
-				p.From.Scale = 1
-				p.From.Offset = int64(autoffset) - int64(bpsize)
-				p.To.Type = obj.TYPE_REG
-				p.To.Reg = REG_BP
-				p = obj.Appendp(ctxt, p)
-			}
-
-			p.As = AADJSP
-			p.From.Type = obj.TYPE_CONST
-			p.From.Offset = int64(-autoffset)
-			p.Spadj = -autoffset
-			p = obj.Appendp(ctxt, p)
-			p.As = obj.ARET
-
-			// If there are instructions following
-			// this ARET, they come from a branch
-			// with the same stackframe, so undo
-			// the cleanup.
-			p.Spadj = +autoffset
-		}
-
-		if p.To.Sym != nil { // retjmp
-			p.As = obj.AJMP
-		}
-	}
-}
-
-func isZeroArgRuntimeCall(s *obj.LSym) bool {
-	if s == nil {
-		return false
-	}
-	switch s.Name {
-	case "runtime.panicindex", "runtime.panicslice", "runtime.panicdivide":
-		return true
-	}
-	return false
-}
-
-func indir_cx(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
-	if ctxt.Headtype == obj.Hnacl && p.Mode == 64 {
-		a.Type = obj.TYPE_MEM
-		a.Reg = REG_R15
-		a.Index = REG_CX
-		a.Scale = 1
-		return
-	}
-
-	a.Type = obj.TYPE_MEM
-	a.Reg = REG_CX
-}
-
-// Append code to p to load g into cx.
-// Overwrites p with the first instruction (no first appendp).
-// Overwriting p is unusual but it lets use this in both the
-// prologue (caller must call appendp first) and in the epilogue.
-// Returns last new instruction.
-func load_g_cx(ctxt *obj.Link, p *obj.Prog) *obj.Prog {
-	p.As = AMOVQ
-	if ctxt.Arch.PtrSize == 4 {
-		p.As = AMOVL
-	}
-	p.From.Type = obj.TYPE_MEM
-	p.From.Reg = REG_TLS
-	p.From.Offset = 0
-	p.To.Type = obj.TYPE_REG
-	p.To.Reg = REG_CX
-
-	next := p.Link
-	progedit(ctxt, p)
-	for p.Link != next {
-		p = p.Link
-	}
-
-	if p.From.Index == REG_TLS {
-		p.From.Scale = 2
-	}
-
-	return p
-}
-
-// Append code to p to check for stack split.
-// Appends to (does not overwrite) p.
-// Assumes g is in CX.
-// Returns last new instruction.
-func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, textarg int32) *obj.Prog {
-	cmp := ACMPQ
-	lea := ALEAQ
-	mov := AMOVQ
-	sub := ASUBQ
-
-	if ctxt.Headtype == obj.Hnacl || p.Mode == 32 {
-		cmp = ACMPL
-		lea = ALEAL
-		mov = AMOVL
-		sub = ASUBL
-	}
-
-	var q1 *obj.Prog
-	if framesize <= obj.StackSmall {
-		// small stack: SP <= stackguard
-		//	CMPQ SP, stackguard
-		p = obj.Appendp(ctxt, p)
-
-		p.As = cmp
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_SP
-		indir_cx(ctxt, p, &p.To)
-		p.To.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0
-		if ctxt.Cursym.CFunc() {
-			p.To.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1
-		}
-	} else if framesize <= obj.StackBig {
-		// large stack: SP-framesize <= stackguard-StackSmall
-		//	LEAQ -xxx(SP), AX
-		//	CMPQ AX, stackguard
-		p = obj.Appendp(ctxt, p)
-
-		p.As = lea
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = REG_SP
-		p.From.Offset = -(int64(framesize) - obj.StackSmall)
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_AX
-
-		p = obj.Appendp(ctxt, p)
-		p.As = cmp
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_AX
-		indir_cx(ctxt, p, &p.To)
-		p.To.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0
-		if ctxt.Cursym.CFunc() {
-			p.To.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1
-		}
-	} else {
-		// Such a large stack we need to protect against wraparound.
-		// If SP is close to zero:
-		//	SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall)
-		// The +StackGuard on both sides is required to keep the left side positive:
-		// SP is allowed to be slightly below stackguard. See stack.h.
-		//
-		// Preemption sets stackguard to StackPreempt, a very large value.
-		// That breaks the math above, so we have to check for that explicitly.
-		//	MOVQ	stackguard, CX
-		//	CMPQ	CX, $StackPreempt
-		//	JEQ	label-of-call-to-morestack
-		//	LEAQ	StackGuard(SP), AX
-		//	SUBQ	CX, AX
-		//	CMPQ	AX, $(framesize+(StackGuard-StackSmall))
-
-		p = obj.Appendp(ctxt, p)
-
-		p.As = mov
-		indir_cx(ctxt, p, &p.From)
-		p.From.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0
-		if ctxt.Cursym.CFunc() {
-			p.From.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1
-		}
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_SI
-
-		p = obj.Appendp(ctxt, p)
-		p.As = cmp
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_SI
-		p.To.Type = obj.TYPE_CONST
-		p.To.Offset = obj.StackPreempt
-		if p.Mode == 32 {
-			p.To.Offset = int64(uint32(obj.StackPreempt & (1<<32 - 1)))
-		}
-
-		p = obj.Appendp(ctxt, p)
-		p.As = AJEQ
-		p.To.Type = obj.TYPE_BRANCH
-		q1 = p
-
-		p = obj.Appendp(ctxt, p)
-		p.As = lea
-		p.From.Type = obj.TYPE_MEM
-		p.From.Reg = REG_SP
-		p.From.Offset = obj.StackGuard
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_AX
-
-		p = obj.Appendp(ctxt, p)
-		p.As = sub
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_SI
-		p.To.Type = obj.TYPE_REG
-		p.To.Reg = REG_AX
-
-		p = obj.Appendp(ctxt, p)
-		p.As = cmp
-		p.From.Type = obj.TYPE_REG
-		p.From.Reg = REG_AX
-		p.To.Type = obj.TYPE_CONST
-		p.To.Offset = int64(framesize) + (obj.StackGuard - obj.StackSmall)
-	}
-
-	// common
-	jls := obj.Appendp(ctxt, p)
-	jls.As = AJLS
-	jls.To.Type = obj.TYPE_BRANCH
-
-	var last *obj.Prog
-	for last = ctxt.Cursym.Text; last.Link != nil; last = last.Link {
-	}
-
-	// Now we are at the end of the function, but logically
-	// we are still in function prologue. We need to fix the
-	// SP data and PCDATA.
-	spfix := obj.Appendp(ctxt, last)
-	spfix.As = obj.ANOP
-	spfix.Spadj = -framesize
-
-	pcdata := obj.Appendp(ctxt, spfix)
-	pcdata.Lineno = ctxt.Cursym.Text.Lineno
-	pcdata.Mode = ctxt.Cursym.Text.Mode
-	pcdata.As = obj.APCDATA
-	pcdata.From.Type = obj.TYPE_CONST
-	pcdata.From.Offset = obj.PCDATA_StackMapIndex
-	pcdata.To.Type = obj.TYPE_CONST
-	pcdata.To.Offset = -1 // pcdata starts at -1 at function entry
-
-	call := obj.Appendp(ctxt, pcdata)
-	call.Lineno = ctxt.Cursym.Text.Lineno
-	call.Mode = ctxt.Cursym.Text.Mode
-	call.As = obj.ACALL
-	call.To.Type = obj.TYPE_BRANCH
-	call.To.Name = obj.NAME_EXTERN
-	morestack := "runtime.morestack"
-	switch {
-	case ctxt.Cursym.CFunc():
-		morestack = "runtime.morestackc"
-	case ctxt.Cursym.Text.From3Offset()&obj.NEEDCTXT == 0:
-		morestack = "runtime.morestack_noctxt"
-	}
-	call.To.Sym = obj.Linklookup(ctxt, morestack, 0)
-	// When compiling 386 code for dynamic linking, the call needs to be adjusted
-	// to follow PIC rules. This in turn can insert more instructions, so we need
-	// to keep track of the start of the call (where the jump will be to) and the
-	// end (which following instructions are appended to).
-	callend := call
-	progedit(ctxt, callend)
-	for ; callend.Link != nil; callend = callend.Link {
-		progedit(ctxt, callend.Link)
-	}
-
-	jmp := obj.Appendp(ctxt, callend)
-	jmp.As = obj.AJMP
-	jmp.To.Type = obj.TYPE_BRANCH
-	jmp.Pcond = ctxt.Cursym.Text.Link
-	jmp.Spadj = +framesize
-
-	jls.Pcond = call
-	if q1 != nil {
-		q1.Pcond = call
-	}
-
-	return jls
-}
-
-func follow(ctxt *obj.Link, s *obj.LSym) {
-	ctxt.Cursym = s
-
-	firstp := ctxt.NewProg()
-	lastp := firstp
-	xfol(ctxt, s.Text, &lastp)
-	lastp.Link = nil
-	s.Text = firstp.Link
-}
-
-func nofollow(a obj.As) bool {
-	switch a {
-	case obj.AJMP,
-		obj.ARET,
-		AIRETL,
-		AIRETQ,
-		AIRETW,
-		ARETFL,
-		ARETFQ,
-		ARETFW,
-		obj.AUNDEF:
-		return true
-	}
-
-	return false
-}
-
-func pushpop(a obj.As) bool {
-	switch a {
-	case APUSHL,
-		APUSHFL,
-		APUSHQ,
-		APUSHFQ,
-		APUSHW,
-		APUSHFW,
-		APOPL,
-		APOPFL,
-		APOPQ,
-		APOPFQ,
-		APOPW,
-		APOPFW:
-		return true
-	}
-
-	return false
-}
-
-func relinv(a obj.As) obj.As {
-	switch a {
-	case AJEQ:
-		return AJNE
-	case AJNE:
-		return AJEQ
-	case AJLE:
-		return AJGT
-	case AJLS:
-		return AJHI
-	case AJLT:
-		return AJGE
-	case AJMI:
-		return AJPL
-	case AJGE:
-		return AJLT
-	case AJPL:
-		return AJMI
-	case AJGT:
-		return AJLE
-	case AJHI:
-		return AJLS
-	case AJCS:
-		return AJCC
-	case AJCC:
-		return AJCS
-	case AJPS:
-		return AJPC
-	case AJPC:
-		return AJPS
-	case AJOS:
-		return AJOC
-	case AJOC:
-		return AJOS
-	}
-
-	log.Fatalf("unknown relation: %s", a)
-	return 0
-}
-
-func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) {
-	var q *obj.Prog
-	var i int
-	var a obj.As
-
-loop:
-	if p == nil {
-		return
-	}
-	if p.As == obj.AJMP {
-		q = p.Pcond
-		if q != nil && q.As != obj.ATEXT {
-			/* mark instruction as done and continue layout at target of jump */
-			p.Mark |= DONE
-
-			p = q
-			if p.Mark&DONE == 0 {
-				goto loop
-			}
-		}
-	}
-
-	if p.Mark&DONE != 0 {
-		/*
-		 * p goes here, but already used it elsewhere.
-		 * copy up to 4 instructions or else branch to other copy.
-		 */
-		i = 0
-		q = p
-		for ; i < 4; i, q = i+1, q.Link {
-			if q == nil {
-				break
-			}
-			if q == *last {
-				break
-			}
-			a = q.As
-			if a == obj.ANOP {
-				i--
-				continue
-			}
-
-			if nofollow(a) || pushpop(a) {
-				break // NOTE(rsc): arm does goto copy
-			}
-			if q.Pcond == nil || q.Pcond.Mark&DONE != 0 {
-				continue
-			}
-			if a == obj.ACALL || a == ALOOP {
-				continue
-			}
-			for {
-				if p.As == obj.ANOP {
-					p = p.Link
-					continue
-				}
-
-				q = obj.Copyp(ctxt, p)
-				p = p.Link
-				q.Mark |= DONE
-				(*last).Link = q
-				*last = q
-				if q.As != a || q.Pcond == nil || q.Pcond.Mark&DONE != 0 {
-					continue
-				}
-
-				q.As = relinv(q.As)
-				p = q.Pcond
-				q.Pcond = q.Link
-				q.Link = p
-				xfol(ctxt, q.Link, last)
-				p = q.Link
-				if p.Mark&DONE != 0 {
-					return
-				}
-				goto loop
-				/* */
-			}
-		}
-		q = ctxt.NewProg()
-		q.As = obj.AJMP
-		q.Lineno = p.Lineno
-		q.To.Type = obj.TYPE_BRANCH
-		q.To.Offset = p.Pc
-		q.Pcond = p
-		p = q
-	}
-
-	/* emit p */
-	p.Mark |= DONE
-
-	(*last).Link = p
-	*last = p
-	a = p.As
-
-	/* continue loop with what comes after p */
-	if nofollow(a) {
-		return
-	}
-	if p.Pcond != nil && a != obj.ACALL {
-		/*
-		 * some kind of conditional branch.
-		 * recurse to follow one path.
-		 * continue loop on the other.
-		 */
-		q = obj.Brchain(ctxt, p.Pcond)
-		if q != nil {
-			p.Pcond = q
-		}
-		q = obj.Brchain(ctxt, p.Link)
-		if q != nil {
-			p.Link = q
-		}
-		if p.From.Type == obj.TYPE_CONST {
-			if p.From.Offset == 1 {
-				/*
-				 * expect conditional jump to be taken.
-				 * rewrite so that's the fall-through case.
-				 */
-				p.As = relinv(a)
-
-				q = p.Link
-				p.Link = p.Pcond
-				p.Pcond = q
-			}
-		} else {
-			q = p.Link
-			if q.Mark&DONE != 0 {
-				if a != ALOOP {
-					p.As = relinv(a)
-					p.Link = p.Pcond
-					p.Pcond = q
-				}
-			}
-		}
-
-		xfol(ctxt, p.Link, last)
-		if p.Pcond.Mark&DONE != 0 {
-			return
-		}
-		p = p.Pcond
-		goto loop
-	}
-
-	p = p.Link
-	goto loop
-}
-
-var unaryDst = map[obj.As]bool{
-	ABSWAPL:    true,
-	ABSWAPQ:    true,
-	ACMPXCHG8B: true,
-	ADECB:      true,
-	ADECL:      true,
-	ADECQ:      true,
-	ADECW:      true,
-	AINCB:      true,
-	AINCL:      true,
-	AINCQ:      true,
-	AINCW:      true,
-	ANEGB:      true,
-	ANEGL:      true,
-	ANEGQ:      true,
-	ANEGW:      true,
-	ANOTB:      true,
-	ANOTL:      true,
-	ANOTQ:      true,
-	ANOTW:      true,
-	APOPL:      true,
-	APOPQ:      true,
-	APOPW:      true,
-	ASETCC:     true,
-	ASETCS:     true,
-	ASETEQ:     true,
-	ASETGE:     true,
-	ASETGT:     true,
-	ASETHI:     true,
-	ASETLE:     true,
-	ASETLS:     true,
-	ASETLT:     true,
-	ASETMI:     true,
-	ASETNE:     true,
-	ASETOC:     true,
-	ASETOS:     true,
-	ASETPC:     true,
-	ASETPL:     true,
-	ASETPS:     true,
-	AFFREE:     true,
-	AFLDENV:    true,
-	AFSAVE:     true,
-	AFSTCW:     true,
-	AFSTENV:    true,
-	AFSTSW:     true,
-	AFXSAVE:    true,
-	AFXSAVE64:  true,
-	ASTMXCSR:   true,
-}
-
-var Linkamd64 = obj.LinkArch{
-	Arch:       sys.ArchAMD64,
-	Preprocess: preprocess,
-	Assemble:   span6,
-	Follow:     follow,
-	Progedit:   progedit,
-	UnaryDst:   unaryDst,
-}
-
-var Linkamd64p32 = obj.LinkArch{
-	Arch:       sys.ArchAMD64P32,
-	Preprocess: preprocess,
-	Assemble:   span6,
-	Follow:     follow,
-	Progedit:   progedit,
-	UnaryDst:   unaryDst,
-}
-
-var Link386 = obj.LinkArch{
-	Arch:       sys.Arch386,
-	Preprocess: preprocess,
-	Assemble:   span6,
-	Follow:     follow,
-	Progedit:   progedit,
-	UnaryDst:   unaryDst,
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/x86/obj6_test.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/x86/obj6_test.go
deleted file mode 100644
index d1df0bc..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/x86/obj6_test.go
+++ /dev/null
@@ -1,173 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/x86/obj6_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/x86/obj6_test.go:1
-package x86_test
-
-import (
-	"bufio"
-	"bytes"
-	"fmt"
-	"internal/testenv"
-	"io/ioutil"
-	"os"
-	"os/exec"
-	"path/filepath"
-	"regexp"
-	"strconv"
-	"strings"
-	"testing"
-)
-
-const testdata = `
-MOVQ AX, AX -> MOVQ AX, AX
-
-LEAQ name(SB), AX -> MOVQ name@GOT(SB), AX
-LEAQ name+10(SB), AX -> MOVQ name@GOT(SB), AX; LEAQ 10(AX), AX
-MOVQ $name(SB), AX -> MOVQ name@GOT(SB), AX
-MOVQ $name+10(SB), AX -> MOVQ name@GOT(SB), AX; LEAQ 10(AX), AX
-
-MOVQ name(SB), AX -> NOP; MOVQ name@GOT(SB), R15; MOVQ (R15), AX
-MOVQ name+10(SB), AX -> NOP; MOVQ name@GOT(SB), R15; MOVQ 10(R15), AX
-
-CMPQ name(SB), $0 -> NOP; MOVQ name@GOT(SB), R15; CMPQ (R15), $0
-
-MOVQ $1, name(SB) -> NOP; MOVQ name@GOT(SB), R15; MOVQ $1, (R15)
-MOVQ $1, name+10(SB) -> NOP; MOVQ name@GOT(SB), R15; MOVQ $1, 10(R15)
-`
-
-type ParsedTestData struct {
-	input              string
-	marks              []int
-	marker_to_input    map[int][]string
-	marker_to_expected map[int][]string
-	marker_to_output   map[int][]string
-}
-
-const marker_start = 1234
-
-func parseTestData(t *testing.T) *ParsedTestData {
-	r := &ParsedTestData{}
-	scanner := bufio.NewScanner(strings.NewReader(testdata))
-	r.marker_to_input = make(map[int][]string)
-	r.marker_to_expected = make(map[int][]string)
-	marker := marker_start
-	input_insns := []string{}
-	for scanner.Scan() {
-		line := scanner.Text()
-		if len(strings.TrimSpace(line)) == 0 {
-			continue
-		}
-		parts := strings.Split(line, "->")
-		if len(parts) != 2 {
-			t.Fatalf("malformed line %v", line)
-		}
-		r.marks = append(r.marks, marker)
-		marker_insn := fmt.Sprintf("MOVQ $%d, AX", marker)
-		input_insns = append(input_insns, marker_insn)
-		for _, input_insn := range strings.Split(parts[0], ";") {
-			input_insns = append(input_insns, input_insn)
-			r.marker_to_input[marker] = append(r.marker_to_input[marker], normalize(input_insn))
-		}
-		for _, expected_insn := range strings.Split(parts[1], ";") {
-			r.marker_to_expected[marker] = append(r.marker_to_expected[marker], normalize(expected_insn))
-		}
-		marker++
-	}
-	r.input = "TEXT ·foo(SB),$0\n" + strings.Join(input_insns, "\n") + "\n"
-	return r
-}
-
-var spaces_re *regexp.Regexp = regexp.MustCompile("\\s+")
-
-func normalize(s string) string {
-	return spaces_re.ReplaceAllLiteralString(strings.TrimSpace(s), " ")
-}
-
-func asmOutput(t *testing.T, s string) []byte {
-	tmpdir, err := ioutil.TempDir("", "progedittest")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer os.RemoveAll(tmpdir)
-	tmpfile, err := os.Create(filepath.Join(tmpdir, "input.s"))
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer tmpfile.Close()
-	_, err = tmpfile.WriteString(s)
-	if err != nil {
-		t.Fatal(err)
-	}
-	cmd := exec.Command(
-		testenv.GoToolPath(t), "tool", "asm", "-S", "-dynlink",
-		"-o", filepath.Join(tmpdir, "output.6"), tmpfile.Name())
-
-	var env []string
-	for _, v := range os.Environ() {
-		if !strings.HasPrefix(v, "GOARCH=") {
-			env = append(env, v)
-		}
-	}
-	cmd.Env = append(env, "GOARCH=amd64")
-	asmout, err := cmd.CombinedOutput()
-	if err != nil {
-		t.Fatalf("error %s output %s", err, asmout)
-	}
-	return asmout
-}
-
-func parseOutput(t *testing.T, td *ParsedTestData, asmout []byte) {
-	scanner := bufio.NewScanner(bytes.NewReader(asmout))
-	marker := regexp.MustCompile("MOVQ \\$([0-9]+), AX")
-	mark := -1
-	td.marker_to_output = make(map[int][]string)
-	for scanner.Scan() {
-		line := scanner.Text()
-		if line[0] != '\t' {
-			continue
-		}
-		parts := strings.SplitN(line, "\t", 3)
-		if len(parts) != 3 {
-			continue
-		}
-		n := normalize(parts[2])
-		mark_matches := marker.FindStringSubmatch(n)
-		if mark_matches != nil {
-			mark, _ = strconv.Atoi(mark_matches[1])
-			if _, ok := td.marker_to_input[mark]; !ok {
-				t.Fatalf("unexpected marker %d", mark)
-			}
-		} else if mark != -1 {
-			td.marker_to_output[mark] = append(td.marker_to_output[mark], n)
-		}
-	}
-}
-
-func TestDynlink(t *testing.T) {
-	testenv.MustHaveGoBuild(t)
-
-	if os.Getenv("GOHOSTARCH") != "" {
-		// TODO: make this work? It was failing due to the
-		// GOARCH= filtering above and skipping is easiest for
-		// now.
-		t.Skip("skipping when GOHOSTARCH is set")
-	}
-
-	testdata := parseTestData(t)
-	asmout := asmOutput(t, testdata.input)
-	parseOutput(t, testdata, asmout)
-	for _, m := range testdata.marks {
-		i := strings.Join(testdata.marker_to_input[m], "; ")
-		o := strings.Join(testdata.marker_to_output[m], "; ")
-		e := strings.Join(testdata.marker_to_expected[m], "; ")
-		if o != e {
-			if o == i {
-				t.Errorf("%s was unchanged; should have become %s", i, e)
-			} else {
-				t.Errorf("%s became %s; should have become %s", i, o, e)
-			}
-		} else if i != e {
-			t.Logf("%s correctly became %s", i, o)
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/zbootstrap.go b/pkg/bootstrap/src/bootstrap/cmd/internal/obj/zbootstrap.go
deleted file mode 100644
index ba03f87..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/obj/zbootstrap.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/zbootstrap.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/obj/zbootstrap.go:1
-// auto generated by go tool dist
-
-package obj
-
-import "runtime"
-
-const defaultGOROOT = `./prebuilts/go/linux-x86`
-const defaultGO386 = `sse2`
-const defaultGOARM = `5`
-const defaultGOOS = runtime.GOOS
-const defaultGOARCH = runtime.GOARCH
-const defaultGO_EXTLINK_ENABLED = ``
-const version = `go1.8rc2`
-const stackGuardMultiplier = 1
-const goexperiment = ``
diff --git a/pkg/bootstrap/src/bootstrap/cmd/internal/sys/arch.go b/pkg/bootstrap/src/bootstrap/cmd/internal/sys/arch.go
deleted file mode 100644
index ffc0f97..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/internal/sys/arch.go
+++ /dev/null
@@ -1,187 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/sys/arch.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/internal/sys/arch.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sys
-
-import "encoding/binary"
-
-// ArchFamily represents a family of one or more related architectures.
-// For example, amd64 and amd64p32 are both members of the AMD64 family,
-// and ppc64 and ppc64le are both members of the PPC64 family.
-type ArchFamily byte
-
-const (
-	AMD64 ArchFamily = iota
-	ARM
-	ARM64
-	I386
-	MIPS
-	MIPS64
-	PPC64
-	S390X
-)
-
-// Arch represents an individual architecture.
-type Arch struct {
-	Name   string
-	Family ArchFamily
-
-	ByteOrder binary.ByteOrder
-
-	IntSize int
-	PtrSize int
-	RegSize int
-
-	// MinLC is the minimum length of an instruction code.
-	MinLC int
-}
-
-// InFamily reports whether a is a member of any of the specified
-// architecture families.
-func (a *Arch) InFamily(xs ...ArchFamily) bool {
-	for _, x := range xs {
-		if a.Family == x {
-			return true
-		}
-	}
-	return false
-}
-
-var Arch386 = &Arch{
-	Name:      "386",
-	Family:    I386,
-	ByteOrder: binary.LittleEndian,
-	IntSize:   4,
-	PtrSize:   4,
-	RegSize:   4,
-	MinLC:     1,
-}
-
-var ArchAMD64 = &Arch{
-	Name:      "amd64",
-	Family:    AMD64,
-	ByteOrder: binary.LittleEndian,
-	IntSize:   8,
-	PtrSize:   8,
-	RegSize:   8,
-	MinLC:     1,
-}
-
-var ArchAMD64P32 = &Arch{
-	Name:      "amd64p32",
-	Family:    AMD64,
-	ByteOrder: binary.LittleEndian,
-	IntSize:   4,
-	PtrSize:   4,
-	RegSize:   8,
-	MinLC:     1,
-}
-
-var ArchARM = &Arch{
-	Name:      "arm",
-	Family:    ARM,
-	ByteOrder: binary.LittleEndian,
-	IntSize:   4,
-	PtrSize:   4,
-	RegSize:   4,
-	MinLC:     4,
-}
-
-var ArchARM64 = &Arch{
-	Name:      "arm64",
-	Family:    ARM64,
-	ByteOrder: binary.LittleEndian,
-	IntSize:   8,
-	PtrSize:   8,
-	RegSize:   8,
-	MinLC:     4,
-}
-
-var ArchMIPS = &Arch{
-	Name:      "mips",
-	Family:    MIPS,
-	ByteOrder: binary.BigEndian,
-	IntSize:   4,
-	PtrSize:   4,
-	RegSize:   4,
-	MinLC:     4,
-}
-
-var ArchMIPSLE = &Arch{
-	Name:      "mipsle",
-	Family:    MIPS,
-	ByteOrder: binary.LittleEndian,
-	IntSize:   4,
-	PtrSize:   4,
-	RegSize:   4,
-	MinLC:     4,
-}
-
-var ArchMIPS64 = &Arch{
-	Name:      "mips64",
-	Family:    MIPS64,
-	ByteOrder: binary.BigEndian,
-	IntSize:   8,
-	PtrSize:   8,
-	RegSize:   8,
-	MinLC:     4,
-}
-
-var ArchMIPS64LE = &Arch{
-	Name:      "mips64le",
-	Family:    MIPS64,
-	ByteOrder: binary.LittleEndian,
-	IntSize:   8,
-	PtrSize:   8,
-	RegSize:   8,
-	MinLC:     4,
-}
-
-var ArchPPC64 = &Arch{
-	Name:      "ppc64",
-	Family:    PPC64,
-	ByteOrder: binary.BigEndian,
-	IntSize:   8,
-	PtrSize:   8,
-	RegSize:   8,
-	MinLC:     4,
-}
-
-var ArchPPC64LE = &Arch{
-	Name:      "ppc64le",
-	Family:    PPC64,
-	ByteOrder: binary.LittleEndian,
-	IntSize:   8,
-	PtrSize:   8,
-	RegSize:   8,
-	MinLC:     4,
-}
-
-var ArchS390X = &Arch{
-	Name:      "s390x",
-	Family:    S390X,
-	ByteOrder: binary.BigEndian,
-	IntSize:   8,
-	PtrSize:   8,
-	RegSize:   8,
-	MinLC:     2,
-}
-
-var Archs = [...]*Arch{
-	Arch386,
-	ArchAMD64,
-	ArchAMD64P32,
-	ArchARM,
-	ArchARM64,
-	ArchMIPS,
-	ArchMIPSLE,
-	ArchMIPS64,
-	ArchMIPS64LE,
-	ArchPPC64,
-	ArchPPC64LE,
-	ArchS390X,
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/doc.go b/pkg/bootstrap/src/bootstrap/cmd/link/doc.go
deleted file mode 100644
index d7a3837..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/doc.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/doc.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/doc.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Link, typically invoked as ``go tool link,'' reads the Go archive or object
-for a package main, along with its dependencies, and combines them
-into an executable binary.
-
-Command Line
-
-Usage:
-
-	go tool link [flags] main.a
-
-Flags:
-
-	-B note
-		Add an ELF_NT_GNU_BUILD_ID note when using ELF.
-		The value should start with 0x and be an even number of hex digits.
-	-D address
-		Set data segment address.
-	-E entry
-		Set entry symbol name.
-	-H type
-		Set executable format type.
-		The default format is inferred from GOOS and GOARCH.
-		On Windows, -H windowsgui writes a "GUI binary" instead of a "console binary."
-	-I interpreter
-		Set the ELF dynamic linker to use.
-	-L dir1 -L dir2
-		Search for imported packages in dir1, dir2, etc,
-		after consulting $GOROOT/pkg/$GOOS_$GOARCH.
-	-R quantum
-		Set address rounding quantum.
-	-T address
-		Set text segment address.
-	-V
-		Print the linker version and exit.
-	-X importpath.name=value
-		Set the value of the string variable in importpath named name to value.
-		Note that before Go 1.5 this option took two separate arguments.
-		Now it takes one argument split on the first = sign.
-	-buildmode mode
-		Set build mode (default exe).
-	-cpuprofile file
-		Write CPU profile to file.
-	-d
-		Disable generation of dynamic executables.
-		The emitted code is the same in either case; the option
-		controls only whether a dynamic header is included.
-		The dynamic header is on by default, even without any
-		references to dynamic libraries, because many common
-		system tools now assume the presence of the header.
-	-extar ar
-		Set the external archive program (default "ar").
-		Used only for -buildmode=c-archive.
-	-extld linker
-		Set the external linker (default "clang" or "gcc").
-	-extldflags flags
-		Set space-separated flags to pass to the external linker.
-	-f
-		Ignore version mismatch in the linked archives.
-	-g
-		Disable Go package data checks.
-	-installsuffix suffix
-		Look for packages in $GOROOT/pkg/$GOOS_$GOARCH_suffix
-		instead of $GOROOT/pkg/$GOOS_$GOARCH.
-	-libgcc file
-		Set name of compiler support library.
-		This is only used in internal link mode.
-		If not set, default value comes from running the compiler,
-		which may be set by the -extld option.
-		Set to "none" to use no support library.
-	-linkmode mode
-		Set link mode (internal, external, auto).
-		This sets the linking mode as described in cmd/cgo/doc.go.
-	-linkshared
-		Link against installed Go shared libraries (experimental).
-	-memprofile file
-		Write memory profile to file.
-	-memprofilerate rate
-		Set runtime.MemProfileRate to rate.
-	-msan
-		Link with C/C++ memory sanitizer support.
-	-o file
-		Write output to file (default a.out, or a.out.exe on Windows).
-	-pluginpath path
-		The path name used to prefix exported plugin symbols.
-	-r dir1:dir2:...
-		Set the ELF dynamic linker search path.
-	-race
-		Link with race detection libraries.
-	-s
-		Omit the symbol table and debug information.
-	-shared
-		Generated shared object (implies -linkmode external; experimental).
-	-tmpdir dir
-		Write temporary files to dir.
-		Temporary files are only used in external linking mode.
-	-v
-		Print trace of linker operations.
-	-w
-		Omit the DWARF symbol table.
-*/
-package main
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/amd64/asm.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/amd64/asm.go
deleted file mode 100644
index a0431f4..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/amd64/asm.go
+++ /dev/null
@@ -1,878 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/amd64/asm.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/amd64/asm.go:1
-// Inferno utils/6l/asm.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6l/asm.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package amd64
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/link/internal/ld"
-	"debug/elf"
-	"log"
-)
-
-func PADDR(x uint32) uint32 {
-	return x &^ 0x80000000
-}
-
-func Addcall(ctxt *ld.Link, s *ld.Symbol, t *ld.Symbol) int64 {
-	s.Attr |= ld.AttrReachable
-	i := s.Size
-	s.Size += 4
-	ld.Symgrow(s, s.Size)
-	r := ld.Addrel(s)
-	r.Sym = t
-	r.Off = int32(i)
-	r.Type = obj.R_CALL
-	r.Siz = 4
-	return i + int64(r.Siz)
-}
-
-func gentext(ctxt *ld.Link) {
-	if !ctxt.DynlinkingGo() {
-		return
-	}
-	addmoduledata := ctxt.Syms.Lookup("runtime.addmoduledata", 0)
-	if addmoduledata.Type == obj.STEXT && ld.Buildmode != ld.BuildmodePlugin {
-		// we're linking a module containing the runtime -> no need for
-		// an init function
-		return
-	}
-	addmoduledata.Attr |= ld.AttrReachable
-	initfunc := ctxt.Syms.Lookup("go.link.addmoduledata", 0)
-	initfunc.Type = obj.STEXT
-	initfunc.Attr |= ld.AttrLocal
-	initfunc.Attr |= ld.AttrReachable
-	o := func(op ...uint8) {
-		for _, op1 := range op {
-			ld.Adduint8(ctxt, initfunc, op1)
-		}
-	}
-	// 0000000000000000 <local.dso_init>:
-	//    0:	48 8d 3d 00 00 00 00 	lea    0x0(%rip),%rdi        # 7 <local.dso_init+0x7>
-	// 			3: R_X86_64_PC32	runtime.firstmoduledata-0x4
-	o(0x48, 0x8d, 0x3d)
-	ld.Addpcrelplus(ctxt, initfunc, ctxt.Moduledata, 0)
-	//    7:	e8 00 00 00 00       	callq  c <local.dso_init+0xc>
-	// 			8: R_X86_64_PLT32	runtime.addmoduledata-0x4
-	o(0xe8)
-	Addcall(ctxt, initfunc, addmoduledata)
-	//    c:	c3                   	retq
-	o(0xc3)
-	if ld.Buildmode == ld.BuildmodePlugin {
-		ctxt.Textp = append(ctxt.Textp, addmoduledata)
-	}
-	ctxt.Textp = append(ctxt.Textp, initfunc)
-	initarray_entry := ctxt.Syms.Lookup("go.link.addmoduledatainit", 0)
-	initarray_entry.Attr |= ld.AttrReachable
-	initarray_entry.Attr |= ld.AttrLocal
-	initarray_entry.Type = obj.SINITARR
-	ld.Addaddr(ctxt, initarray_entry, initfunc)
-}
-
-func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool {
-	targ := r.Sym
-
-	switch r.Type {
-	default:
-		if r.Type >= 256 {
-			ld.Errorf(s, "unexpected relocation type %d", r.Type)
-			return false
-		}
-
-		// Handle relocations found in ELF object files.
-	case 256 + ld.R_X86_64_PC32:
-		if targ.Type == obj.SDYNIMPORT {
-			ld.Errorf(s, "unexpected R_X86_64_PC32 relocation for dynamic symbol %s", targ.Name)
-		}
-		if targ.Type == 0 || targ.Type == obj.SXREF {
-			ld.Errorf(s, "unknown symbol %s in pcrel", targ.Name)
-		}
-		r.Type = obj.R_PCREL
-		r.Add += 4
-		return true
-
-	case 256 + ld.R_X86_64_PLT32:
-		r.Type = obj.R_PCREL
-		r.Add += 4
-		if targ.Type == obj.SDYNIMPORT {
-			addpltsym(ctxt, targ)
-			r.Sym = ctxt.Syms.Lookup(".plt", 0)
-			r.Add += int64(targ.Plt)
-		}
-
-		return true
-
-	case 256 + ld.R_X86_64_GOTPCREL, 256 + ld.R_X86_64_GOTPCRELX, 256 + ld.R_X86_64_REX_GOTPCRELX:
-		if targ.Type != obj.SDYNIMPORT {
-			// have symbol
-			if r.Off >= 2 && s.P[r.Off-2] == 0x8b {
-				// turn MOVQ of GOT entry into LEAQ of symbol itself
-				s.P[r.Off-2] = 0x8d
-
-				r.Type = obj.R_PCREL
-				r.Add += 4
-				return true
-			}
-		}
-
-		// fall back to using GOT and hope for the best (CMOV*)
-		// TODO: just needs relocation, no need to put in .dynsym
-		addgotsym(ctxt, targ)
-
-		r.Type = obj.R_PCREL
-		r.Sym = ctxt.Syms.Lookup(".got", 0)
-		r.Add += 4
-		r.Add += int64(targ.Got)
-		return true
-
-	case 256 + ld.R_X86_64_64:
-		if targ.Type == obj.SDYNIMPORT {
-			ld.Errorf(s, "unexpected R_X86_64_64 relocation for dynamic symbol %s", targ.Name)
-		}
-		r.Type = obj.R_ADDR
-		return true
-
-	// Handle relocations found in Mach-O object files.
-	case 512 + ld.MACHO_X86_64_RELOC_UNSIGNED*2 + 0,
-		512 + ld.MACHO_X86_64_RELOC_SIGNED*2 + 0,
-		512 + ld.MACHO_X86_64_RELOC_BRANCH*2 + 0:
-		// TODO: What is the difference between all these?
-		r.Type = obj.R_ADDR
-
-		if targ.Type == obj.SDYNIMPORT {
-			ld.Errorf(s, "unexpected reloc for dynamic symbol %s", targ.Name)
-		}
-		return true
-
-	case 512 + ld.MACHO_X86_64_RELOC_BRANCH*2 + 1:
-		if targ.Type == obj.SDYNIMPORT {
-			addpltsym(ctxt, targ)
-			r.Sym = ctxt.Syms.Lookup(".plt", 0)
-			r.Add = int64(targ.Plt)
-			r.Type = obj.R_PCREL
-			return true
-		}
-		fallthrough
-
-		// fall through
-	case 512 + ld.MACHO_X86_64_RELOC_UNSIGNED*2 + 1,
-		512 + ld.MACHO_X86_64_RELOC_SIGNED*2 + 1,
-		512 + ld.MACHO_X86_64_RELOC_SIGNED_1*2 + 1,
-		512 + ld.MACHO_X86_64_RELOC_SIGNED_2*2 + 1,
-		512 + ld.MACHO_X86_64_RELOC_SIGNED_4*2 + 1:
-		r.Type = obj.R_PCREL
-
-		if targ.Type == obj.SDYNIMPORT {
-			ld.Errorf(s, "unexpected pc-relative reloc for dynamic symbol %s", targ.Name)
-		}
-		return true
-
-	case 512 + ld.MACHO_X86_64_RELOC_GOT_LOAD*2 + 1:
-		if targ.Type != obj.SDYNIMPORT {
-			// have symbol
-			// turn MOVQ of GOT entry into LEAQ of symbol itself
-			if r.Off < 2 || s.P[r.Off-2] != 0x8b {
-				ld.Errorf(s, "unexpected GOT_LOAD reloc for non-dynamic symbol %s", targ.Name)
-				return false
-			}
-
-			s.P[r.Off-2] = 0x8d
-			r.Type = obj.R_PCREL
-			return true
-		}
-		fallthrough
-
-		// fall through
-	case 512 + ld.MACHO_X86_64_RELOC_GOT*2 + 1:
-		if targ.Type != obj.SDYNIMPORT {
-			ld.Errorf(s, "unexpected GOT reloc for non-dynamic symbol %s", targ.Name)
-		}
-		addgotsym(ctxt, targ)
-		r.Type = obj.R_PCREL
-		r.Sym = ctxt.Syms.Lookup(".got", 0)
-		r.Add += int64(targ.Got)
-		return true
-	}
-
-	switch r.Type {
-	case obj.R_CALL,
-		obj.R_PCREL:
-		if targ.Type != obj.SDYNIMPORT {
-			// nothing to do, the relocation will be laid out in reloc
-			return true
-		}
-		if ld.Headtype == obj.Hwindows || ld.Headtype == obj.Hwindowsgui {
-			// nothing to do, the relocation will be laid out in pereloc1
-			return true
-		} else {
-			// for both ELF and Mach-O
-			addpltsym(ctxt, targ)
-			r.Sym = ctxt.Syms.Lookup(".plt", 0)
-			r.Add = int64(targ.Plt)
-			return true
-		}
-
-	case obj.R_ADDR:
-		if s.Type == obj.STEXT && ld.Iself {
-			if ld.Headtype == obj.Hsolaris {
-				addpltsym(ctxt, targ)
-				r.Sym = ctxt.Syms.Lookup(".plt", 0)
-				r.Add += int64(targ.Plt)
-				return true
-			}
-			// The code is asking for the address of an external
-			// function. We provide it with the address of the
-			// correspondent GOT symbol.
-			addgotsym(ctxt, targ)
-
-			r.Sym = ctxt.Syms.Lookup(".got", 0)
-			r.Add += int64(targ.Got)
-			return true
-		}
-
-		// Process dynamic relocations for the data sections.
-		if ld.Buildmode == ld.BuildmodePIE && ld.Linkmode == ld.LinkInternal {
-			// When internally linking, generate dynamic relocations
-			// for all typical R_ADDR relocations. The exception
-			// are those R_ADDR that are created as part of generating
-			// the dynamic relocations and must be resolved statically.
-			//
-			// There are three phases relevant to understanding this:
-			//
-			//	dodata()  // we are here
-			//	address() // symbol address assignment
-			//	reloc()   // resolution of static R_ADDR relocs
-			//
-			// At this point symbol addresses have not been
-			// assigned yet (as the final size of the .rela section
-			// will affect the addresses), and so we cannot write
-			// the Elf64_Rela.r_offset now. Instead we delay it
-			// until after the 'address' phase of the linker is
-			// complete. We do this via Addaddrplus, which creates
-			// a new R_ADDR relocation which will be resolved in
-			// the 'reloc' phase.
-			//
-			// These synthetic static R_ADDR relocs must be skipped
-			// now, or else we will be caught in an infinite loop
-			// of generating synthetic relocs for our synthetic
-			// relocs.
-			switch s.Name {
-			case ".dynsym", ".rela", ".got.plt", ".dynamic":
-				return false
-			}
-		} else {
-			// Either internally linking a static executable,
-			// in which case we can resolve these relocations
-			// statically in the 'reloc' phase, or externally
-			// linking, in which case the relocation will be
-			// prepared in the 'reloc' phase and passed to the
-			// external linker in the 'asmb' phase.
-			if s.Type != obj.SDATA && s.Type != obj.SRODATA {
-				break
-			}
-		}
-
-		if ld.Iself {
-			// TODO: We generate a R_X86_64_64 relocation for every R_ADDR, even
-			// though it would be more efficient (for the dynamic linker) if we
-			// generated R_X86_RELATIVE instead.
-			ld.Adddynsym(ctxt, targ)
-			rela := ctxt.Syms.Lookup(".rela", 0)
-			ld.Addaddrplus(ctxt, rela, s, int64(r.Off))
-			if r.Siz == 8 {
-				ld.Adduint64(ctxt, rela, ld.ELF64_R_INFO(uint32(targ.Dynid), ld.R_X86_64_64))
-			} else {
-				// TODO: never happens, remove.
-				ld.Adduint64(ctxt, rela, ld.ELF64_R_INFO(uint32(targ.Dynid), ld.R_X86_64_32))
-			}
-			ld.Adduint64(ctxt, rela, uint64(r.Add))
-			r.Type = 256 // ignore during relocsym
-			return true
-		}
-
-		if ld.Headtype == obj.Hdarwin && s.Size == int64(ld.SysArch.PtrSize) && r.Off == 0 {
-			// Mach-O relocations are a royal pain to lay out.
-			// They use a compact stateful bytecode representation
-			// that is too much bother to deal with.
-			// Instead, interpret the C declaration
-			//	void *_Cvar_stderr = &stderr;
-			// as making _Cvar_stderr the name of a GOT entry
-			// for stderr. This is separate from the usual GOT entry,
-			// just in case the C code assigns to the variable,
-			// and of course it only works for single pointers,
-			// but we only need to support cgo and that's all it needs.
-			ld.Adddynsym(ctxt, targ)
-
-			got := ctxt.Syms.Lookup(".got", 0)
-			s.Type = got.Type | obj.SSUB
-			s.Outer = got
-			s.Sub = got.Sub
-			got.Sub = s
-			s.Value = got.Size
-			ld.Adduint64(ctxt, got, 0)
-			ld.Adduint32(ctxt, ctxt.Syms.Lookup(".linkedit.got", 0), uint32(targ.Dynid))
-			r.Type = 256 // ignore during relocsym
-			return true
-		}
-
-		if ld.Headtype == obj.Hwindows || ld.Headtype == obj.Hwindowsgui {
-			// nothing to do, the relocation will be laid out in pereloc1
-			return true
-		}
-	}
-
-	return false
-}
-
-func elfreloc1(ctxt *ld.Link, r *ld.Reloc, sectoff int64) int {
-	ld.Thearch.Vput(uint64(sectoff))
-
-	elfsym := r.Xsym.ElfsymForReloc()
-	switch r.Type {
-	default:
-		return -1
-
-	case obj.R_ADDR:
-		if r.Siz == 4 {
-			ld.Thearch.Vput(ld.R_X86_64_32 | uint64(elfsym)<<32)
-		} else if r.Siz == 8 {
-			ld.Thearch.Vput(ld.R_X86_64_64 | uint64(elfsym)<<32)
-		} else {
-			return -1
-		}
-
-	case obj.R_TLS_LE:
-		if r.Siz == 4 {
-			ld.Thearch.Vput(ld.R_X86_64_TPOFF32 | uint64(elfsym)<<32)
-		} else {
-			return -1
-		}
-
-	case obj.R_TLS_IE:
-		if r.Siz == 4 {
-			ld.Thearch.Vput(ld.R_X86_64_GOTTPOFF | uint64(elfsym)<<32)
-		} else {
-			return -1
-		}
-
-	case obj.R_CALL:
-		if r.Siz == 4 {
-			if r.Xsym.Type == obj.SDYNIMPORT {
-				if ctxt.DynlinkingGo() {
-					ld.Thearch.Vput(ld.R_X86_64_PLT32 | uint64(elfsym)<<32)
-				} else {
-					ld.Thearch.Vput(ld.R_X86_64_GOTPCREL | uint64(elfsym)<<32)
-				}
-			} else {
-				ld.Thearch.Vput(ld.R_X86_64_PC32 | uint64(elfsym)<<32)
-			}
-		} else {
-			return -1
-		}
-
-	case obj.R_PCREL:
-		if r.Siz == 4 {
-			if r.Xsym.Type == obj.SDYNIMPORT && r.Xsym.ElfType == elf.STT_FUNC {
-				ld.Thearch.Vput(ld.R_X86_64_PLT32 | uint64(elfsym)<<32)
-			} else {
-				ld.Thearch.Vput(ld.R_X86_64_PC32 | uint64(elfsym)<<32)
-			}
-		} else {
-			return -1
-		}
-
-	case obj.R_GOTPCREL:
-		if r.Siz == 4 {
-			ld.Thearch.Vput(ld.R_X86_64_GOTPCREL | uint64(elfsym)<<32)
-		} else {
-			return -1
-		}
-	}
-
-	ld.Thearch.Vput(uint64(r.Xadd))
-	return 0
-}
-
-func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int {
-	var v uint32
-
-	rs := r.Xsym
-
-	if rs.Type == obj.SHOSTOBJ || r.Type == obj.R_PCREL || r.Type == obj.R_GOTPCREL {
-		if rs.Dynid < 0 {
-			ld.Errorf(s, "reloc %d to non-macho symbol %s type=%d", r.Type, rs.Name, rs.Type)
-			return -1
-		}
-
-		v = uint32(rs.Dynid)
-		v |= 1 << 27 // external relocation
-	} else {
-		v = uint32(rs.Sect.Extnum)
-		if v == 0 {
-			ld.Errorf(s, "reloc %d to symbol %s in non-macho section %s type=%d", r.Type, rs.Name, rs.Sect.Name, rs.Type)
-			return -1
-		}
-	}
-
-	switch r.Type {
-	default:
-		return -1
-
-	case obj.R_ADDR:
-		v |= ld.MACHO_X86_64_RELOC_UNSIGNED << 28
-
-	case obj.R_CALL:
-		v |= 1 << 24 // pc-relative bit
-		v |= ld.MACHO_X86_64_RELOC_BRANCH << 28
-
-		// NOTE: Only works with 'external' relocation. Forced above.
-	case obj.R_PCREL:
-		v |= 1 << 24 // pc-relative bit
-		v |= ld.MACHO_X86_64_RELOC_SIGNED << 28
-	case obj.R_GOTPCREL:
-		v |= 1 << 24 // pc-relative bit
-		v |= ld.MACHO_X86_64_RELOC_GOT_LOAD << 28
-	}
-
-	switch r.Siz {
-	default:
-		return -1
-
-	case 1:
-		v |= 0 << 25
-
-	case 2:
-		v |= 1 << 25
-
-	case 4:
-		v |= 2 << 25
-
-	case 8:
-		v |= 3 << 25
-	}
-
-	ld.Thearch.Lput(uint32(sectoff))
-	ld.Thearch.Lput(v)
-	return 0
-}
-
-func pereloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) bool {
-	var v uint32
-
-	rs := r.Xsym
-
-	if rs.Dynid < 0 {
-		ld.Errorf(s, "reloc %d to non-coff symbol %s type=%d", r.Type, rs.Name, rs.Type)
-		return false
-	}
-
-	ld.Thearch.Lput(uint32(sectoff))
-	ld.Thearch.Lput(uint32(rs.Dynid))
-
-	switch r.Type {
-	default:
-		return false
-
-	case obj.R_ADDR:
-		if r.Siz == 8 {
-			v = ld.IMAGE_REL_AMD64_ADDR64
-		} else {
-			v = ld.IMAGE_REL_AMD64_ADDR32
-		}
-
-	case obj.R_CALL,
-		obj.R_PCREL:
-		v = ld.IMAGE_REL_AMD64_REL32
-	}
-
-	ld.Thearch.Wput(uint16(v))
-
-	return true
-}
-
-func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int {
-	return -1
-}
-
-func archrelocvariant(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, t int64) int64 {
-	log.Fatalf("unexpected relocation variant")
-	return t
-}
-
-func elfsetupplt(ctxt *ld.Link) {
-	plt := ctxt.Syms.Lookup(".plt", 0)
-	got := ctxt.Syms.Lookup(".got.plt", 0)
-	if plt.Size == 0 {
-		// pushq got+8(IP)
-		ld.Adduint8(ctxt, plt, 0xff)
-
-		ld.Adduint8(ctxt, plt, 0x35)
-		ld.Addpcrelplus(ctxt, plt, got, 8)
-
-		// jmpq got+16(IP)
-		ld.Adduint8(ctxt, plt, 0xff)
-
-		ld.Adduint8(ctxt, plt, 0x25)
-		ld.Addpcrelplus(ctxt, plt, got, 16)
-
-		// nopl 0(AX)
-		ld.Adduint32(ctxt, plt, 0x00401f0f)
-
-		// assume got->size == 0 too
-		ld.Addaddrplus(ctxt, got, ctxt.Syms.Lookup(".dynamic", 0), 0)
-
-		ld.Adduint64(ctxt, got, 0)
-		ld.Adduint64(ctxt, got, 0)
-	}
-}
-
-func addpltsym(ctxt *ld.Link, s *ld.Symbol) {
-	if s.Plt >= 0 {
-		return
-	}
-
-	ld.Adddynsym(ctxt, s)
-
-	if ld.Iself {
-		plt := ctxt.Syms.Lookup(".plt", 0)
-		got := ctxt.Syms.Lookup(".got.plt", 0)
-		rela := ctxt.Syms.Lookup(".rela.plt", 0)
-		if plt.Size == 0 {
-			elfsetupplt(ctxt)
-		}
-
-		// jmpq *got+size(IP)
-		ld.Adduint8(ctxt, plt, 0xff)
-
-		ld.Adduint8(ctxt, plt, 0x25)
-		ld.Addpcrelplus(ctxt, plt, got, got.Size)
-
-		// add to got: pointer to current pos in plt
-		ld.Addaddrplus(ctxt, got, plt, plt.Size)
-
-		// pushq $x
-		ld.Adduint8(ctxt, plt, 0x68)
-
-		ld.Adduint32(ctxt, plt, uint32((got.Size-24-8)/8))
-
-		// jmpq .plt
-		ld.Adduint8(ctxt, plt, 0xe9)
-
-		ld.Adduint32(ctxt, plt, uint32(-(plt.Size + 4)))
-
-		// rela
-		ld.Addaddrplus(ctxt, rela, got, got.Size-8)
-
-		ld.Adduint64(ctxt, rela, ld.ELF64_R_INFO(uint32(s.Dynid), ld.R_X86_64_JMP_SLOT))
-		ld.Adduint64(ctxt, rela, 0)
-
-		s.Plt = int32(plt.Size - 16)
-	} else if ld.Headtype == obj.Hdarwin {
-		// To do lazy symbol lookup right, we're supposed
-		// to tell the dynamic loader which library each
-		// symbol comes from and format the link info
-		// section just so. I'm too lazy (ha!) to do that
-		// so for now we'll just use non-lazy pointers,
-		// which don't need to be told which library to use.
-		//
-		// http://networkpx.blogspot.com/2009/09/about-lcdyldinfoonly-command.html
-		// has details about what we're avoiding.
-
-		addgotsym(ctxt, s)
-		plt := ctxt.Syms.Lookup(".plt", 0)
-
-		ld.Adduint32(ctxt, ctxt.Syms.Lookup(".linkedit.plt", 0), uint32(s.Dynid))
-
-		// jmpq *got+size(IP)
-		s.Plt = int32(plt.Size)
-
-		ld.Adduint8(ctxt, plt, 0xff)
-		ld.Adduint8(ctxt, plt, 0x25)
-		ld.Addpcrelplus(ctxt, plt, ctxt.Syms.Lookup(".got", 0), int64(s.Got))
-	} else {
-		ld.Errorf(s, "addpltsym: unsupported binary format")
-	}
-}
-
-func addgotsym(ctxt *ld.Link, s *ld.Symbol) {
-	if s.Got >= 0 {
-		return
-	}
-
-	ld.Adddynsym(ctxt, s)
-	got := ctxt.Syms.Lookup(".got", 0)
-	s.Got = int32(got.Size)
-	ld.Adduint64(ctxt, got, 0)
-
-	if ld.Iself {
-		rela := ctxt.Syms.Lookup(".rela", 0)
-		ld.Addaddrplus(ctxt, rela, got, int64(s.Got))
-		ld.Adduint64(ctxt, rela, ld.ELF64_R_INFO(uint32(s.Dynid), ld.R_X86_64_GLOB_DAT))
-		ld.Adduint64(ctxt, rela, 0)
-	} else if ld.Headtype == obj.Hdarwin {
-		ld.Adduint32(ctxt, ctxt.Syms.Lookup(".linkedit.got", 0), uint32(s.Dynid))
-	} else {
-		ld.Errorf(s, "addgotsym: unsupported binary format")
-	}
-}
-
-func asmb(ctxt *ld.Link) {
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f asmb\n", obj.Cputime())
-	}
-
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f codeblk\n", obj.Cputime())
-	}
-
-	if ld.Iself {
-		ld.Asmbelfsetup()
-	}
-
-	sect := ld.Segtext.Sect
-	ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
-	// 0xCC is INT $3 - breakpoint instruction
-	ld.CodeblkPad(ctxt, int64(sect.Vaddr), int64(sect.Length), []byte{0xCC})
-	for sect = sect.Next; sect != nil; sect = sect.Next {
-		ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
-		ld.Datblk(ctxt, int64(sect.Vaddr), int64(sect.Length))
-	}
-
-	if ld.Segrodata.Filelen > 0 {
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f rodatblk\n", obj.Cputime())
-		}
-		ld.Cseek(int64(ld.Segrodata.Fileoff))
-		ld.Datblk(ctxt, int64(ld.Segrodata.Vaddr), int64(ld.Segrodata.Filelen))
-	}
-	if ld.Segrelrodata.Filelen > 0 {
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f relrodatblk\n", obj.Cputime())
-		}
-		ld.Cseek(int64(ld.Segrelrodata.Fileoff))
-		ld.Datblk(ctxt, int64(ld.Segrelrodata.Vaddr), int64(ld.Segrelrodata.Filelen))
-	}
-
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f datblk\n", obj.Cputime())
-	}
-
-	ld.Cseek(int64(ld.Segdata.Fileoff))
-	ld.Datblk(ctxt, int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen))
-
-	ld.Cseek(int64(ld.Segdwarf.Fileoff))
-	ld.Dwarfblk(ctxt, int64(ld.Segdwarf.Vaddr), int64(ld.Segdwarf.Filelen))
-
-	machlink := int64(0)
-	if ld.Headtype == obj.Hdarwin {
-		machlink = ld.Domacholink(ctxt)
-	}
-
-	switch ld.Headtype {
-	default:
-		ld.Errorf(nil, "unknown header type %v", ld.Headtype)
-		fallthrough
-
-	case obj.Hplan9:
-		break
-
-	case obj.Hdarwin:
-		ld.Flag8 = true /* 64-bit addresses */
-
-	case obj.Hlinux,
-		obj.Hfreebsd,
-		obj.Hnetbsd,
-		obj.Hopenbsd,
-		obj.Hdragonfly,
-		obj.Hsolaris:
-		ld.Flag8 = true /* 64-bit addresses */
-
-	case obj.Hnacl,
-		obj.Hwindows,
-		obj.Hwindowsgui:
-		break
-	}
-
-	ld.Symsize = 0
-	ld.Spsize = 0
-	ld.Lcsize = 0
-	symo := int64(0)
-	if !*ld.FlagS {
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f sym\n", obj.Cputime())
-		}
-		switch ld.Headtype {
-		default:
-		case obj.Hplan9:
-			*ld.FlagS = true
-			symo = int64(ld.Segdata.Fileoff + ld.Segdata.Filelen)
-
-		case obj.Hdarwin:
-			symo = int64(ld.Segdwarf.Fileoff + uint64(ld.Rnd(int64(ld.Segdwarf.Filelen), int64(*ld.FlagRound))) + uint64(machlink))
-
-		case obj.Hlinux,
-			obj.Hfreebsd,
-			obj.Hnetbsd,
-			obj.Hopenbsd,
-			obj.Hdragonfly,
-			obj.Hsolaris,
-			obj.Hnacl:
-			symo = int64(ld.Segdwarf.Fileoff + ld.Segdwarf.Filelen)
-			symo = ld.Rnd(symo, int64(*ld.FlagRound))
-
-		case obj.Hwindows,
-			obj.Hwindowsgui:
-			symo = int64(ld.Segdwarf.Fileoff + ld.Segdwarf.Filelen)
-			symo = ld.Rnd(symo, ld.PEFILEALIGN)
-		}
-
-		ld.Cseek(symo)
-		switch ld.Headtype {
-		default:
-			if ld.Iself {
-				ld.Cseek(symo)
-				ld.Asmelfsym(ctxt)
-				ld.Cflush()
-				ld.Cwrite(ld.Elfstrdat)
-
-				if ctxt.Debugvlog != 0 {
-					ctxt.Logf("%5.2f dwarf\n", obj.Cputime())
-				}
-
-				if ld.Linkmode == ld.LinkExternal {
-					ld.Elfemitreloc(ctxt)
-				}
-			}
-
-		case obj.Hplan9:
-			ld.Asmplan9sym(ctxt)
-			ld.Cflush()
-
-			sym := ctxt.Syms.Lookup("pclntab", 0)
-			if sym != nil {
-				ld.Lcsize = int32(len(sym.P))
-				for i := 0; int32(i) < ld.Lcsize; i++ {
-					ld.Cput(sym.P[i])
-				}
-
-				ld.Cflush()
-			}
-
-		case obj.Hwindows, obj.Hwindowsgui:
-			if ctxt.Debugvlog != 0 {
-				ctxt.Logf("%5.2f dwarf\n", obj.Cputime())
-			}
-
-		case obj.Hdarwin:
-			if ld.Linkmode == ld.LinkExternal {
-				ld.Machoemitreloc(ctxt)
-			}
-		}
-	}
-
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f headr\n", obj.Cputime())
-	}
-	ld.Cseek(0)
-	switch ld.Headtype {
-	default:
-	case obj.Hplan9: /* plan9 */
-		magic := int32(4*26*26 + 7)
-
-		magic |= 0x00008000                  /* fat header */
-		ld.Lputb(uint32(magic))              /* magic */
-		ld.Lputb(uint32(ld.Segtext.Filelen)) /* sizes */
-		ld.Lputb(uint32(ld.Segdata.Filelen))
-		ld.Lputb(uint32(ld.Segdata.Length - ld.Segdata.Filelen))
-		ld.Lputb(uint32(ld.Symsize)) /* nsyms */
-		vl := ld.Entryvalue(ctxt)
-		ld.Lputb(PADDR(uint32(vl))) /* va of entry */
-		ld.Lputb(uint32(ld.Spsize)) /* sp offsets */
-		ld.Lputb(uint32(ld.Lcsize)) /* line offsets */
-		ld.Vputb(uint64(vl))        /* va of entry */
-
-	case obj.Hdarwin:
-		ld.Asmbmacho(ctxt)
-
-	case obj.Hlinux,
-		obj.Hfreebsd,
-		obj.Hnetbsd,
-		obj.Hopenbsd,
-		obj.Hdragonfly,
-		obj.Hsolaris,
-		obj.Hnacl:
-		ld.Asmbelf(ctxt, symo)
-
-	case obj.Hwindows,
-		obj.Hwindowsgui:
-		ld.Asmbpe(ctxt)
-	}
-
-	ld.Cflush()
-}
-
-func tlsIEtoLE(s *ld.Symbol, off, size int) {
-	// Transform the PC-relative instruction into a constant load.
-	// That is,
-	//
-	//	MOVQ X(IP), REG  ->  MOVQ $Y, REG
-	//
-	// To determine the instruction and register, we study the op codes.
-	// Consult an AMD64 instruction encoding guide to decipher this.
-	if off < 3 {
-		log.Fatal("R_X86_64_GOTTPOFF reloc not preceded by MOVQ or ADDQ instruction")
-	}
-	op := s.P[off-3 : off]
-	reg := op[2] >> 3
-
-	if op[1] == 0x8b || reg == 4 {
-		// MOVQ
-		if op[0] == 0x4c {
-			op[0] = 0x49
-		} else if size == 4 && op[0] == 0x44 {
-			op[0] = 0x41
-		}
-		if op[1] == 0x8b {
-			op[1] = 0xc7
-		} else {
-			op[1] = 0x81 // special case for SP
-		}
-		op[2] = 0xc0 | reg
-	} else {
-		// An alternate op is ADDQ. This is handled by GNU gold,
-		// but right now is not generated by the Go compiler:
-		//	ADDQ X(IP), REG  ->  ADDQ $Y, REG
-		// Consider adding support for it here.
-		log.Fatalf("expected TLS IE op to be MOVQ, got %v", op)
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/amd64/l.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/amd64/l.go
deleted file mode 100644
index 1b0ee14..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/amd64/l.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/amd64/l.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/amd64/l.go:1
-// Inferno utils/6l/l.h
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6l/l.h
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package amd64
-
-const (
-	maxAlign  = 32 // max data alignment
-	minAlign  = 1  // min data alignment
-	funcAlign = 16
-)
-
-/* Used by ../internal/ld/dwarf.go */
-const (
-	dwarfRegSP = 7
-	dwarfRegLR = 16
-)
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/amd64/obj.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/amd64/obj.go
deleted file mode 100644
index c4cb392..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/amd64/obj.go
+++ /dev/null
@@ -1,165 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/amd64/obj.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/amd64/obj.go:1
-// Inferno utils/6l/obj.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6l/obj.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package amd64
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"bootstrap/cmd/link/internal/ld"
-	"fmt"
-)
-
-func Init() {
-	ld.SysArch = sys.ArchAMD64
-	if obj.GOARCH == "amd64p32" {
-		ld.SysArch = sys.ArchAMD64P32
-	}
-
-	ld.Thearch.Funcalign = funcAlign
-	ld.Thearch.Maxalign = maxAlign
-	ld.Thearch.Minalign = minAlign
-	ld.Thearch.Dwarfregsp = dwarfRegSP
-	ld.Thearch.Dwarfreglr = dwarfRegLR
-
-	ld.Thearch.Adddynrel = adddynrel
-	ld.Thearch.Archinit = archinit
-	ld.Thearch.Archreloc = archreloc
-	ld.Thearch.Archrelocvariant = archrelocvariant
-	ld.Thearch.Asmb = asmb
-	ld.Thearch.Elfreloc1 = elfreloc1
-	ld.Thearch.Elfsetupplt = elfsetupplt
-	ld.Thearch.Gentext = gentext
-	ld.Thearch.Machoreloc1 = machoreloc1
-	ld.Thearch.PEreloc1 = pereloc1
-	ld.Thearch.Lput = ld.Lputl
-	ld.Thearch.Wput = ld.Wputl
-	ld.Thearch.Vput = ld.Vputl
-	ld.Thearch.Append16 = ld.Append16l
-	ld.Thearch.Append32 = ld.Append32l
-	ld.Thearch.Append64 = ld.Append64l
-	ld.Thearch.TLSIEtoLE = tlsIEtoLE
-
-	ld.Thearch.Linuxdynld = "/lib64/ld-linux-x86-64.so.2"
-	ld.Thearch.Freebsddynld = "/libexec/ld-elf.so.1"
-	ld.Thearch.Openbsddynld = "/usr/libexec/ld.so"
-	ld.Thearch.Netbsddynld = "/libexec/ld.elf_so"
-	ld.Thearch.Dragonflydynld = "/usr/libexec/ld-elf.so.2"
-	ld.Thearch.Solarisdynld = "/lib/amd64/ld.so.1"
-}
-
-func archinit(ctxt *ld.Link) {
-	switch ld.Headtype {
-	default:
-		ld.Exitf("unknown -H option: %v", ld.Headtype)
-
-	case obj.Hplan9: /* plan 9 */
-		ld.HEADR = 32 + 8
-
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 0x200000 + int64(ld.HEADR)
-		}
-		if *ld.FlagDataAddr == -1 {
-			*ld.FlagDataAddr = 0
-		}
-		if *ld.FlagRound == -1 {
-			*ld.FlagRound = 0x200000
-		}
-
-	case obj.Hdarwin: /* apple MACH */
-		ld.Machoinit()
-
-		ld.HEADR = ld.INITIAL_MACHO_HEADR
-		if *ld.FlagRound == -1 {
-			*ld.FlagRound = 4096
-		}
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 0x1000000 + int64(ld.HEADR)
-		}
-		if *ld.FlagDataAddr == -1 {
-			*ld.FlagDataAddr = 0
-		}
-
-	case obj.Hlinux, /* elf64 executable */
-		obj.Hfreebsd,   /* freebsd */
-		obj.Hnetbsd,    /* netbsd */
-		obj.Hopenbsd,   /* openbsd */
-		obj.Hdragonfly, /* dragonfly */
-		obj.Hsolaris:   /* solaris */
-		ld.Elfinit(ctxt)
-
-		ld.HEADR = ld.ELFRESERVE
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = (1 << 22) + int64(ld.HEADR)
-		}
-		if *ld.FlagDataAddr == -1 {
-			*ld.FlagDataAddr = 0
-		}
-		if *ld.FlagRound == -1 {
-			*ld.FlagRound = 4096
-		}
-
-	case obj.Hnacl:
-		ld.Elfinit(ctxt)
-		*ld.FlagW = true // disable dwarf, which gets confused and is useless anyway
-		ld.HEADR = 0x10000
-		ld.Funcalign = 32
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 0x20000
-		}
-		if *ld.FlagDataAddr == -1 {
-			*ld.FlagDataAddr = 0
-		}
-		if *ld.FlagRound == -1 {
-			*ld.FlagRound = 0x10000
-		}
-
-	case obj.Hwindows, obj.Hwindowsgui: /* PE executable */
-		ld.Peinit(ctxt)
-
-		ld.HEADR = ld.PEFILEHEADR
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = ld.PEBASE + int64(ld.PESECTHEADR)
-		}
-		if *ld.FlagDataAddr == -1 {
-			*ld.FlagDataAddr = 0
-		}
-		if *ld.FlagRound == -1 {
-			*ld.FlagRound = ld.PESECTALIGN
-		}
-	}
-
-	if *ld.FlagDataAddr != 0 && *ld.FlagRound != 0 {
-		fmt.Printf("warning: -D0x%x is ignored because of -R0x%x\n", uint64(*ld.FlagDataAddr), uint32(*ld.FlagRound))
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/arm/asm.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/arm/asm.go
deleted file mode 100644
index 4631fd3..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/arm/asm.go
+++ /dev/null
@@ -1,896 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/arm/asm.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/arm/asm.go:1
-// Inferno utils/5l/asm.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/5l/asm.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package arm
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/link/internal/ld"
-	"fmt"
-	"log"
-)
-
-// This assembler:
-//
-//         .align 2
-// local.dso_init:
-//         ldr r0, .Lmoduledata
-// .Lloadfrom:
-//         ldr r0, [r0]
-//         b runtime.addmoduledata@plt
-// .align 2
-// .Lmoduledata:
-//         .word local.moduledata(GOT_PREL) + (. - (.Lloadfrom + 4))
-// assembles to:
-//
-// 00000000 <local.dso_init>:
-//    0:        e59f0004        ldr     r0, [pc, #4]    ; c <local.dso_init+0xc>
-//    4:        e5900000        ldr     r0, [r0]
-//    8:        eafffffe        b       0 <runtime.addmoduledata>
-//                      8: R_ARM_JUMP24 runtime.addmoduledata
-//    c:        00000004        .word   0x00000004
-//                      c: R_ARM_GOT_PREL       local.moduledata
-
-func gentext(ctxt *ld.Link) {
-	if !ctxt.DynlinkingGo() {
-		return
-	}
-	addmoduledata := ctxt.Syms.Lookup("runtime.addmoduledata", 0)
-	if addmoduledata.Type == obj.STEXT && ld.Buildmode != ld.BuildmodePlugin {
-		// we're linking a module containing the runtime -> no need for
-		// an init function
-		return
-	}
-	addmoduledata.Attr |= ld.AttrReachable
-	initfunc := ctxt.Syms.Lookup("go.link.addmoduledata", 0)
-	initfunc.Type = obj.STEXT
-	initfunc.Attr |= ld.AttrLocal
-	initfunc.Attr |= ld.AttrReachable
-	o := func(op uint32) {
-		ld.Adduint32(ctxt, initfunc, op)
-	}
-	o(0xe59f0004)
-	o(0xe08f0000)
-
-	o(0xeafffffe)
-	rel := ld.Addrel(initfunc)
-	rel.Off = 8
-	rel.Siz = 4
-	rel.Sym = ctxt.Syms.Lookup("runtime.addmoduledata", 0)
-	rel.Type = obj.R_CALLARM
-	rel.Add = 0xeafffffe // vomit
-
-	o(0x00000000)
-	rel = ld.Addrel(initfunc)
-	rel.Off = 12
-	rel.Siz = 4
-	rel.Sym = ctxt.Moduledata
-	rel.Type = obj.R_PCREL
-	rel.Add = 4
-
-	if ld.Buildmode == ld.BuildmodePlugin {
-		ctxt.Textp = append(ctxt.Textp, addmoduledata)
-	}
-	ctxt.Textp = append(ctxt.Textp, initfunc)
-	initarray_entry := ctxt.Syms.Lookup("go.link.addmoduledatainit", 0)
-	initarray_entry.Attr |= ld.AttrReachable
-	initarray_entry.Attr |= ld.AttrLocal
-	initarray_entry.Type = obj.SINITARR
-	ld.Addaddr(ctxt, initarray_entry, initfunc)
-}
-
-// Preserve highest 8 bits of a, and do addition to lower 24-bit
-// of a and b; used to adjust ARM branch instruction's target
-func braddoff(a int32, b int32) int32 {
-	return int32((uint32(a))&0xff000000 | 0x00ffffff&uint32(a+b))
-}
-
-func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool {
-	targ := r.Sym
-
-	switch r.Type {
-	default:
-		if r.Type >= 256 {
-			ld.Errorf(s, "unexpected relocation type %d", r.Type)
-			return false
-		}
-
-		// Handle relocations found in ELF object files.
-	case 256 + ld.R_ARM_PLT32:
-		r.Type = obj.R_CALLARM
-
-		if targ.Type == obj.SDYNIMPORT {
-			addpltsym(ctxt, targ)
-			r.Sym = ctxt.Syms.Lookup(".plt", 0)
-			r.Add = int64(braddoff(int32(r.Add), targ.Plt/4))
-		}
-
-		return true
-
-	case 256 + ld.R_ARM_THM_PC22: // R_ARM_THM_CALL
-		ld.Exitf("R_ARM_THM_CALL, are you using -marm?")
-		return false
-
-	case 256 + ld.R_ARM_GOT32: // R_ARM_GOT_BREL
-		if targ.Type != obj.SDYNIMPORT {
-			addgotsyminternal(ctxt, targ)
-		} else {
-			addgotsym(ctxt, targ)
-		}
-
-		r.Type = obj.R_CONST // write r->add during relocsym
-		r.Sym = nil
-		r.Add += int64(targ.Got)
-		return true
-
-	case 256 + ld.R_ARM_GOT_PREL: // GOT(nil) + A - nil
-		if targ.Type != obj.SDYNIMPORT {
-			addgotsyminternal(ctxt, targ)
-		} else {
-			addgotsym(ctxt, targ)
-		}
-
-		r.Type = obj.R_PCREL
-		r.Sym = ctxt.Syms.Lookup(".got", 0)
-		r.Add += int64(targ.Got) + 4
-		return true
-
-	case 256 + ld.R_ARM_GOTOFF: // R_ARM_GOTOFF32
-		r.Type = obj.R_GOTOFF
-
-		return true
-
-	case 256 + ld.R_ARM_GOTPC: // R_ARM_BASE_PREL
-		r.Type = obj.R_PCREL
-
-		r.Sym = ctxt.Syms.Lookup(".got", 0)
-		r.Add += 4
-		return true
-
-	case 256 + ld.R_ARM_CALL:
-		r.Type = obj.R_CALLARM
-		if targ.Type == obj.SDYNIMPORT {
-			addpltsym(ctxt, targ)
-			r.Sym = ctxt.Syms.Lookup(".plt", 0)
-			r.Add = int64(braddoff(int32(r.Add), targ.Plt/4))
-		}
-
-		return true
-
-	case 256 + ld.R_ARM_REL32: // R_ARM_REL32
-		r.Type = obj.R_PCREL
-
-		r.Add += 4
-		return true
-
-	case 256 + ld.R_ARM_ABS32:
-		if targ.Type == obj.SDYNIMPORT {
-			ld.Errorf(s, "unexpected R_ARM_ABS32 relocation for dynamic symbol %s", targ.Name)
-		}
-		r.Type = obj.R_ADDR
-		return true
-
-		// we can just ignore this, because we are targeting ARM V5+ anyway
-	case 256 + ld.R_ARM_V4BX:
-		if r.Sym != nil {
-			// R_ARM_V4BX is ABS relocation, so this symbol is a dummy symbol, ignore it
-			r.Sym.Type = 0
-		}
-
-		r.Sym = nil
-		return true
-
-	case 256 + ld.R_ARM_PC24,
-		256 + ld.R_ARM_JUMP24:
-		r.Type = obj.R_CALLARM
-		if targ.Type == obj.SDYNIMPORT {
-			addpltsym(ctxt, targ)
-			r.Sym = ctxt.Syms.Lookup(".plt", 0)
-			r.Add = int64(braddoff(int32(r.Add), targ.Plt/4))
-		}
-
-		return true
-	}
-
-	// Handle references to ELF symbols from our own object files.
-	if targ.Type != obj.SDYNIMPORT {
-		return true
-	}
-
-	switch r.Type {
-	case obj.R_CALLARM:
-		addpltsym(ctxt, targ)
-		r.Sym = ctxt.Syms.Lookup(".plt", 0)
-		r.Add = int64(targ.Plt)
-		return true
-
-	case obj.R_ADDR:
-		if s.Type != obj.SDATA {
-			break
-		}
-		if ld.Iself {
-			ld.Adddynsym(ctxt, targ)
-			rel := ctxt.Syms.Lookup(".rel", 0)
-			ld.Addaddrplus(ctxt, rel, s, int64(r.Off))
-			ld.Adduint32(ctxt, rel, ld.ELF32_R_INFO(uint32(targ.Dynid), ld.R_ARM_GLOB_DAT)) // we need a nil + A dynamic reloc
-			r.Type = obj.R_CONST                                                            // write r->add during relocsym
-			r.Sym = nil
-			return true
-		}
-	}
-
-	return false
-}
-
-func elfreloc1(ctxt *ld.Link, r *ld.Reloc, sectoff int64) int {
-	ld.Thearch.Lput(uint32(sectoff))
-
-	elfsym := r.Xsym.ElfsymForReloc()
-	switch r.Type {
-	default:
-		return -1
-
-	case obj.R_ADDR:
-		if r.Siz == 4 {
-			ld.Thearch.Lput(ld.R_ARM_ABS32 | uint32(elfsym)<<8)
-		} else {
-			return -1
-		}
-
-	case obj.R_PCREL:
-		if r.Siz == 4 {
-			ld.Thearch.Lput(ld.R_ARM_REL32 | uint32(elfsym)<<8)
-		} else {
-			return -1
-		}
-
-	case obj.R_CALLARM:
-		if r.Siz == 4 {
-			if r.Add&0xff000000 == 0xeb000000 { // BL
-				ld.Thearch.Lput(ld.R_ARM_CALL | uint32(elfsym)<<8)
-			} else {
-				ld.Thearch.Lput(ld.R_ARM_JUMP24 | uint32(elfsym)<<8)
-			}
-		} else {
-			return -1
-		}
-
-	case obj.R_TLS_LE:
-		ld.Thearch.Lput(ld.R_ARM_TLS_LE32 | uint32(elfsym)<<8)
-
-	case obj.R_TLS_IE:
-		ld.Thearch.Lput(ld.R_ARM_TLS_IE32 | uint32(elfsym)<<8)
-
-	case obj.R_GOTPCREL:
-		if r.Siz == 4 {
-			ld.Thearch.Lput(ld.R_ARM_GOT_PREL | uint32(elfsym)<<8)
-		} else {
-			return -1
-		}
-	}
-
-	return 0
-}
-
-func elfsetupplt(ctxt *ld.Link) {
-	plt := ctxt.Syms.Lookup(".plt", 0)
-	got := ctxt.Syms.Lookup(".got.plt", 0)
-	if plt.Size == 0 {
-		// str lr, [sp, #-4]!
-		ld.Adduint32(ctxt, plt, 0xe52de004)
-
-		// ldr lr, [pc, #4]
-		ld.Adduint32(ctxt, plt, 0xe59fe004)
-
-		// add lr, pc, lr
-		ld.Adduint32(ctxt, plt, 0xe08fe00e)
-
-		// ldr pc, [lr, #8]!
-		ld.Adduint32(ctxt, plt, 0xe5bef008)
-
-		// .word &GLOBAL_OFFSET_TABLE[0] - .
-		ld.Addpcrelplus(ctxt, plt, got, 4)
-
-		// the first .plt entry requires 3 .plt.got entries
-		ld.Adduint32(ctxt, got, 0)
-
-		ld.Adduint32(ctxt, got, 0)
-		ld.Adduint32(ctxt, got, 0)
-	}
-}
-
-func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int {
-	var v uint32
-
-	rs := r.Xsym
-
-	if r.Type == obj.R_PCREL {
-		if rs.Type == obj.SHOSTOBJ {
-			ld.Errorf(s, "pc-relative relocation of external symbol is not supported")
-			return -1
-		}
-		if r.Siz != 4 {
-			return -1
-		}
-
-		// emit a pair of "scattered" relocations that
-		// resolve to the difference of section addresses of
-		// the symbol and the instruction
-		// this value is added to the field being relocated
-		o1 := uint32(sectoff)
-		o1 |= 1 << 31 // scattered bit
-		o1 |= ld.MACHO_ARM_RELOC_SECTDIFF << 24
-		o1 |= 2 << 28 // size = 4
-
-		o2 := uint32(0)
-		o2 |= 1 << 31 // scattered bit
-		o2 |= ld.MACHO_ARM_RELOC_PAIR << 24
-		o2 |= 2 << 28 // size = 4
-
-		ld.Thearch.Lput(o1)
-		ld.Thearch.Lput(uint32(ld.Symaddr(rs)))
-		ld.Thearch.Lput(o2)
-		ld.Thearch.Lput(uint32(s.Value + int64(r.Off)))
-		return 0
-	}
-
-	if rs.Type == obj.SHOSTOBJ || r.Type == obj.R_CALLARM {
-		if rs.Dynid < 0 {
-			ld.Errorf(s, "reloc %d to non-macho symbol %s type=%d", r.Type, rs.Name, rs.Type)
-			return -1
-		}
-
-		v = uint32(rs.Dynid)
-		v |= 1 << 27 // external relocation
-	} else {
-		v = uint32(rs.Sect.Extnum)
-		if v == 0 {
-			ld.Errorf(s, "reloc %d to symbol %s in non-macho section %s type=%d", r.Type, rs.Name, rs.Sect.Name, rs.Type)
-			return -1
-		}
-	}
-
-	switch r.Type {
-	default:
-		return -1
-
-	case obj.R_ADDR:
-		v |= ld.MACHO_GENERIC_RELOC_VANILLA << 28
-
-	case obj.R_CALLARM:
-		v |= 1 << 24 // pc-relative bit
-		v |= ld.MACHO_ARM_RELOC_BR24 << 28
-	}
-
-	switch r.Siz {
-	default:
-		return -1
-
-	case 1:
-		v |= 0 << 25
-
-	case 2:
-		v |= 1 << 25
-
-	case 4:
-		v |= 2 << 25
-
-	case 8:
-		v |= 3 << 25
-	}
-
-	ld.Thearch.Lput(uint32(sectoff))
-	ld.Thearch.Lput(v)
-	return 0
-}
-
-// sign extend a 24-bit integer
-func signext24(x int64) int32 {
-	return (int32(x) << 8) >> 8
-}
-
-// encode an immediate in ARM's imm12 format. copied from ../../../internal/obj/arm/asm5.go
-func immrot(v uint32) uint32 {
-	for i := 0; i < 16; i++ {
-		if v&^0xff == 0 {
-			return uint32(i<<8) | v | 1<<25
-		}
-		v = v<<2 | v>>30
-	}
-	return 0
-}
-
-// Convert the direct jump relocation r to refer to a trampoline if the target is too far
-func trampoline(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol) {
-	switch r.Type {
-	case obj.R_CALLARM:
-		// r.Add is the instruction
-		// low 24-bit encodes the target address
-		t := (ld.Symaddr(r.Sym) + int64(signext24(r.Add&0xffffff)*4) - (s.Value + int64(r.Off))) / 4
-		if t > 0x7fffff || t < -0x800000 || (*ld.FlagDebugTramp > 1 && s.File != r.Sym.File) {
-			// direct call too far, need to insert trampoline.
-			// look up existing trampolines first. if we found one within the range
-			// of direct call, we can reuse it. otherwise create a new one.
-			offset := (signext24(r.Add&0xffffff) + 2) * 4
-			var tramp *ld.Symbol
-			for i := 0; ; i++ {
-				name := r.Sym.Name + fmt.Sprintf("%+d-tramp%d", offset, i)
-				tramp = ctxt.Syms.Lookup(name, int(r.Sym.Version))
-				if tramp.Type == obj.SDYNIMPORT {
-					// don't reuse trampoline defined in other module
-					continue
-				}
-				if tramp.Value == 0 {
-					// either the trampoline does not exist -- we need to create one,
-					// or found one the address which is not assigned -- this will be
-					// laid down immediately after the current function. use this one.
-					break
-				}
-
-				t = (ld.Symaddr(tramp) - 8 - (s.Value + int64(r.Off))) / 4
-				if t >= -0x800000 && t < 0x7fffff {
-					// found an existing trampoline that is not too far
-					// we can just use it
-					break
-				}
-			}
-			if tramp.Type == 0 {
-				// trampoline does not exist, create one
-				ctxt.AddTramp(tramp)
-				if ctxt.DynlinkingGo() {
-					if immrot(uint32(offset)) == 0 {
-						ld.Errorf(s, "odd offset in dynlink direct call: %v+%d", r.Sym, offset)
-					}
-					gentrampdyn(tramp, r.Sym, int64(offset))
-				} else if ld.Buildmode == ld.BuildmodeCArchive || ld.Buildmode == ld.BuildmodeCShared || ld.Buildmode == ld.BuildmodePIE {
-					gentramppic(tramp, r.Sym, int64(offset))
-				} else {
-					gentramp(tramp, r.Sym, int64(offset))
-				}
-			}
-			// modify reloc to point to tramp, which will be resolved later
-			r.Sym = tramp
-			r.Add = r.Add&0xff000000 | 0xfffffe // clear the offset embedded in the instruction
-			r.Done = 0
-		}
-	default:
-		ld.Errorf(s, "trampoline called with non-jump reloc: %v", r.Type)
-	}
-}
-
-// generate a trampoline to target+offset
-func gentramp(tramp, target *ld.Symbol, offset int64) {
-	tramp.Size = 12 // 3 instructions
-	tramp.P = make([]byte, tramp.Size)
-	t := ld.Symaddr(target) + int64(offset)
-	o1 := uint32(0xe5900000 | 11<<12 | 15<<16) // MOVW (R15), R11 // R15 is actual pc + 8
-	o2 := uint32(0xe12fff10 | 11)              // JMP  (R11)
-	o3 := uint32(t)                            // WORD $target
-	ld.SysArch.ByteOrder.PutUint32(tramp.P, o1)
-	ld.SysArch.ByteOrder.PutUint32(tramp.P[4:], o2)
-	ld.SysArch.ByteOrder.PutUint32(tramp.P[8:], o3)
-
-	if ld.Linkmode == ld.LinkExternal {
-		r := ld.Addrel(tramp)
-		r.Off = 8
-		r.Type = obj.R_ADDR
-		r.Siz = 4
-		r.Sym = target
-		r.Add = offset
-	}
-}
-
-// generate a trampoline to target+offset in position independent code
-func gentramppic(tramp, target *ld.Symbol, offset int64) {
-	tramp.Size = 16 // 4 instructions
-	tramp.P = make([]byte, tramp.Size)
-	o1 := uint32(0xe5900000 | 11<<12 | 15<<16 | 4)  // MOVW 4(R15), R11 // R15 is actual pc + 8
-	o2 := uint32(0xe0800000 | 11<<12 | 15<<16 | 11) // ADD R15, R11, R11
-	o3 := uint32(0xe12fff10 | 11)                   // JMP  (R11)
-	o4 := uint32(0)                                 // WORD $(target-pc) // filled in with relocation
-	ld.SysArch.ByteOrder.PutUint32(tramp.P, o1)
-	ld.SysArch.ByteOrder.PutUint32(tramp.P[4:], o2)
-	ld.SysArch.ByteOrder.PutUint32(tramp.P[8:], o3)
-	ld.SysArch.ByteOrder.PutUint32(tramp.P[12:], o4)
-
-	r := ld.Addrel(tramp)
-	r.Off = 12
-	r.Type = obj.R_PCREL
-	r.Siz = 4
-	r.Sym = target
-	r.Add = offset + 4
-}
-
-// generate a trampoline to target+offset in dynlink mode (using GOT)
-func gentrampdyn(tramp, target *ld.Symbol, offset int64) {
-	tramp.Size = 20                                 // 5 instructions
-	o1 := uint32(0xe5900000 | 11<<12 | 15<<16 | 8)  // MOVW 8(R15), R11 // R15 is actual pc + 8
-	o2 := uint32(0xe0800000 | 11<<12 | 15<<16 | 11) // ADD R15, R11, R11
-	o3 := uint32(0xe5900000 | 11<<12 | 11<<16)      // MOVW (R11), R11
-	o4 := uint32(0xe12fff10 | 11)                   // JMP  (R11)
-	o5 := uint32(0)                                 // WORD $target@GOT // filled in with relocation
-	o6 := uint32(0)
-	if offset != 0 {
-		// insert an instruction to add offset
-		tramp.Size = 24 // 6 instructions
-		o6 = o5
-		o5 = o4
-		o4 = uint32(0xe2800000 | 11<<12 | 11<<16 | immrot(uint32(offset))) // ADD $offset, R11, R11
-		o1 = uint32(0xe5900000 | 11<<12 | 15<<16 | 12)                     // MOVW 12(R15), R11
-	}
-	tramp.P = make([]byte, tramp.Size)
-	ld.SysArch.ByteOrder.PutUint32(tramp.P, o1)
-	ld.SysArch.ByteOrder.PutUint32(tramp.P[4:], o2)
-	ld.SysArch.ByteOrder.PutUint32(tramp.P[8:], o3)
-	ld.SysArch.ByteOrder.PutUint32(tramp.P[12:], o4)
-	ld.SysArch.ByteOrder.PutUint32(tramp.P[16:], o5)
-	if offset != 0 {
-		ld.SysArch.ByteOrder.PutUint32(tramp.P[20:], o6)
-	}
-
-	r := ld.Addrel(tramp)
-	r.Off = 16
-	r.Type = obj.R_GOTPCREL
-	r.Siz = 4
-	r.Sym = target
-	r.Add = 8
-	if offset != 0 {
-		// increase reloc offset by 4 as we inserted an ADD instruction
-		r.Off = 20
-		r.Add = 12
-	}
-}
-
-func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int {
-	if ld.Linkmode == ld.LinkExternal {
-		switch r.Type {
-		case obj.R_CALLARM:
-			r.Done = 0
-
-			// set up addend for eventual relocation via outer symbol.
-			rs := r.Sym
-
-			r.Xadd = int64(signext24(r.Add & 0xffffff))
-			r.Xadd *= 4
-			for rs.Outer != nil {
-				r.Xadd += ld.Symaddr(rs) - ld.Symaddr(rs.Outer)
-				rs = rs.Outer
-			}
-
-			if rs.Type != obj.SHOSTOBJ && rs.Type != obj.SDYNIMPORT && rs.Sect == nil {
-				ld.Errorf(s, "missing section for %s", rs.Name)
-			}
-			r.Xsym = rs
-
-			// ld64 for arm seems to want the symbol table to contain offset
-			// into the section rather than pseudo virtual address that contains
-			// the section load address.
-			// we need to compensate that by removing the instruction's address
-			// from addend.
-			if ld.Headtype == obj.Hdarwin {
-				r.Xadd -= ld.Symaddr(s) + int64(r.Off)
-			}
-
-			if r.Xadd/4 > 0x7fffff || r.Xadd/4 < -0x800000 {
-				ld.Errorf(s, "direct call too far %d", r.Xadd/4)
-			}
-
-			*val = int64(braddoff(int32(0xff000000&uint32(r.Add)), int32(0xffffff&uint32(r.Xadd/4))))
-			return 0
-		}
-
-		return -1
-	}
-
-	switch r.Type {
-	case obj.R_CONST:
-		*val = r.Add
-		return 0
-
-	case obj.R_GOTOFF:
-		*val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0))
-		return 0
-
-	// The following three arch specific relocations are only for generation of
-	// Linux/ARM ELF's PLT entry (3 assembler instruction)
-	case obj.R_PLT0: // add ip, pc, #0xXX00000
-		if ld.Symaddr(ctxt.Syms.Lookup(".got.plt", 0)) < ld.Symaddr(ctxt.Syms.Lookup(".plt", 0)) {
-			ld.Errorf(s, ".got.plt should be placed after .plt section.")
-		}
-		*val = 0xe28fc600 + (0xff & (int64(uint32(ld.Symaddr(r.Sym)-(ld.Symaddr(ctxt.Syms.Lookup(".plt", 0))+int64(r.Off))+r.Add)) >> 20))
-		return 0
-
-	case obj.R_PLT1: // add ip, ip, #0xYY000
-		*val = 0xe28cca00 + (0xff & (int64(uint32(ld.Symaddr(r.Sym)-(ld.Symaddr(ctxt.Syms.Lookup(".plt", 0))+int64(r.Off))+r.Add+4)) >> 12))
-
-		return 0
-
-	case obj.R_PLT2: // ldr pc, [ip, #0xZZZ]!
-		*val = 0xe5bcf000 + (0xfff & int64(uint32(ld.Symaddr(r.Sym)-(ld.Symaddr(ctxt.Syms.Lookup(".plt", 0))+int64(r.Off))+r.Add+8)))
-
-		return 0
-
-	case obj.R_CALLARM: // bl XXXXXX or b YYYYYY
-		// r.Add is the instruction
-		// low 24-bit encodes the target address
-		t := (ld.Symaddr(r.Sym) + int64(signext24(r.Add&0xffffff)*4) - (s.Value + int64(r.Off))) / 4
-		if t > 0x7fffff || t < -0x800000 {
-			ld.Errorf(s, "direct call too far: %s %x", r.Sym.Name, t)
-		}
-		*val = int64(braddoff(int32(0xff000000&uint32(r.Add)), int32(0xffffff&t)))
-
-		return 0
-	}
-
-	return -1
-}
-
-func archrelocvariant(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, t int64) int64 {
-	log.Fatalf("unexpected relocation variant")
-	return t
-}
-
-func addpltreloc(ctxt *ld.Link, plt *ld.Symbol, got *ld.Symbol, sym *ld.Symbol, typ obj.RelocType) *ld.Reloc {
-	r := ld.Addrel(plt)
-	r.Sym = got
-	r.Off = int32(plt.Size)
-	r.Siz = 4
-	r.Type = typ
-	r.Add = int64(sym.Got) - 8
-
-	plt.Attr |= ld.AttrReachable
-	plt.Size += 4
-	ld.Symgrow(plt, plt.Size)
-
-	return r
-}
-
-func addpltsym(ctxt *ld.Link, s *ld.Symbol) {
-	if s.Plt >= 0 {
-		return
-	}
-
-	ld.Adddynsym(ctxt, s)
-
-	if ld.Iself {
-		plt := ctxt.Syms.Lookup(".plt", 0)
-		got := ctxt.Syms.Lookup(".got.plt", 0)
-		rel := ctxt.Syms.Lookup(".rel.plt", 0)
-		if plt.Size == 0 {
-			elfsetupplt(ctxt)
-		}
-
-		// .got entry
-		s.Got = int32(got.Size)
-
-		// In theory, all GOT should point to the first PLT entry,
-		// Linux/ARM's dynamic linker will do that for us, but FreeBSD/ARM's
-		// dynamic linker won't, so we'd better do it ourselves.
-		ld.Addaddrplus(ctxt, got, plt, 0)
-
-		// .plt entry, this depends on the .got entry
-		s.Plt = int32(plt.Size)
-
-		addpltreloc(ctxt, plt, got, s, obj.R_PLT0) // add lr, pc, #0xXX00000
-		addpltreloc(ctxt, plt, got, s, obj.R_PLT1) // add lr, lr, #0xYY000
-		addpltreloc(ctxt, plt, got, s, obj.R_PLT2) // ldr pc, [lr, #0xZZZ]!
-
-		// rel
-		ld.Addaddrplus(ctxt, rel, got, int64(s.Got))
-
-		ld.Adduint32(ctxt, rel, ld.ELF32_R_INFO(uint32(s.Dynid), ld.R_ARM_JUMP_SLOT))
-	} else {
-		ld.Errorf(s, "addpltsym: unsupported binary format")
-	}
-}
-
-func addgotsyminternal(ctxt *ld.Link, s *ld.Symbol) {
-	if s.Got >= 0 {
-		return
-	}
-
-	got := ctxt.Syms.Lookup(".got", 0)
-	s.Got = int32(got.Size)
-
-	ld.Addaddrplus(ctxt, got, s, 0)
-
-	if ld.Iself {
-	} else {
-		ld.Errorf(s, "addgotsyminternal: unsupported binary format")
-	}
-}
-
-func addgotsym(ctxt *ld.Link, s *ld.Symbol) {
-	if s.Got >= 0 {
-		return
-	}
-
-	ld.Adddynsym(ctxt, s)
-	got := ctxt.Syms.Lookup(".got", 0)
-	s.Got = int32(got.Size)
-	ld.Adduint32(ctxt, got, 0)
-
-	if ld.Iself {
-		rel := ctxt.Syms.Lookup(".rel", 0)
-		ld.Addaddrplus(ctxt, rel, got, int64(s.Got))
-		ld.Adduint32(ctxt, rel, ld.ELF32_R_INFO(uint32(s.Dynid), ld.R_ARM_GLOB_DAT))
-	} else {
-		ld.Errorf(s, "addgotsym: unsupported binary format")
-	}
-}
-
-func asmb(ctxt *ld.Link) {
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f asmb\n", obj.Cputime())
-	}
-
-	if ld.Iself {
-		ld.Asmbelfsetup()
-	}
-
-	sect := ld.Segtext.Sect
-	ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
-	ld.Codeblk(ctxt, int64(sect.Vaddr), int64(sect.Length))
-	for sect = sect.Next; sect != nil; sect = sect.Next {
-		ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
-		ld.Datblk(ctxt, int64(sect.Vaddr), int64(sect.Length))
-	}
-
-	if ld.Segrodata.Filelen > 0 {
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f rodatblk\n", obj.Cputime())
-		}
-		ld.Cseek(int64(ld.Segrodata.Fileoff))
-		ld.Datblk(ctxt, int64(ld.Segrodata.Vaddr), int64(ld.Segrodata.Filelen))
-	}
-	if ld.Segrelrodata.Filelen > 0 {
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f relrodatblk\n", obj.Cputime())
-		}
-		ld.Cseek(int64(ld.Segrelrodata.Fileoff))
-		ld.Datblk(ctxt, int64(ld.Segrelrodata.Vaddr), int64(ld.Segrelrodata.Filelen))
-	}
-
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f datblk\n", obj.Cputime())
-	}
-
-	ld.Cseek(int64(ld.Segdata.Fileoff))
-	ld.Datblk(ctxt, int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen))
-
-	ld.Cseek(int64(ld.Segdwarf.Fileoff))
-	ld.Dwarfblk(ctxt, int64(ld.Segdwarf.Vaddr), int64(ld.Segdwarf.Filelen))
-
-	machlink := uint32(0)
-	if ld.Headtype == obj.Hdarwin {
-		machlink = uint32(ld.Domacholink(ctxt))
-	}
-
-	/* output symbol table */
-	ld.Symsize = 0
-
-	ld.Lcsize = 0
-	symo := uint32(0)
-	if !*ld.FlagS {
-		// TODO: rationalize
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f sym\n", obj.Cputime())
-		}
-		switch ld.Headtype {
-		default:
-			if ld.Iself {
-				symo = uint32(ld.Segdwarf.Fileoff + ld.Segdwarf.Filelen)
-				symo = uint32(ld.Rnd(int64(symo), int64(*ld.FlagRound)))
-			}
-
-		case obj.Hplan9:
-			symo = uint32(ld.Segdata.Fileoff + ld.Segdata.Filelen)
-
-		case obj.Hdarwin:
-			symo = uint32(ld.Segdwarf.Fileoff + uint64(ld.Rnd(int64(ld.Segdwarf.Filelen), int64(*ld.FlagRound))) + uint64(machlink))
-		}
-
-		ld.Cseek(int64(symo))
-		switch ld.Headtype {
-		default:
-			if ld.Iself {
-				if ctxt.Debugvlog != 0 {
-					ctxt.Logf("%5.2f elfsym\n", obj.Cputime())
-				}
-				ld.Asmelfsym(ctxt)
-				ld.Cflush()
-				ld.Cwrite(ld.Elfstrdat)
-
-				if ld.Linkmode == ld.LinkExternal {
-					ld.Elfemitreloc(ctxt)
-				}
-			}
-
-		case obj.Hplan9:
-			ld.Asmplan9sym(ctxt)
-			ld.Cflush()
-
-			sym := ctxt.Syms.Lookup("pclntab", 0)
-			if sym != nil {
-				ld.Lcsize = int32(len(sym.P))
-				for i := 0; int32(i) < ld.Lcsize; i++ {
-					ld.Cput(sym.P[i])
-				}
-
-				ld.Cflush()
-			}
-
-		case obj.Hdarwin:
-			if ld.Linkmode == ld.LinkExternal {
-				ld.Machoemitreloc(ctxt)
-			}
-		}
-	}
-
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f header\n", obj.Cputime())
-	}
-	ld.Cseek(0)
-	switch ld.Headtype {
-	default:
-	case obj.Hplan9: /* plan 9 */
-		ld.Lputb(0x647)                      /* magic */
-		ld.Lputb(uint32(ld.Segtext.Filelen)) /* sizes */
-		ld.Lputb(uint32(ld.Segdata.Filelen))
-		ld.Lputb(uint32(ld.Segdata.Length - ld.Segdata.Filelen))
-		ld.Lputb(uint32(ld.Symsize))          /* nsyms */
-		ld.Lputb(uint32(ld.Entryvalue(ctxt))) /* va of entry */
-		ld.Lputb(0)
-		ld.Lputb(uint32(ld.Lcsize))
-
-	case obj.Hlinux,
-		obj.Hfreebsd,
-		obj.Hnetbsd,
-		obj.Hopenbsd,
-		obj.Hnacl:
-		ld.Asmbelf(ctxt, int64(symo))
-
-	case obj.Hdarwin:
-		ld.Asmbmacho(ctxt)
-	}
-
-	ld.Cflush()
-	if *ld.FlagC {
-		fmt.Printf("textsize=%d\n", ld.Segtext.Filelen)
-		fmt.Printf("datsize=%d\n", ld.Segdata.Filelen)
-		fmt.Printf("bsssize=%d\n", ld.Segdata.Length-ld.Segdata.Filelen)
-		fmt.Printf("symsize=%d\n", ld.Symsize)
-		fmt.Printf("lcsize=%d\n", ld.Lcsize)
-		fmt.Printf("total=%d\n", ld.Segtext.Filelen+ld.Segdata.Length+uint64(ld.Symsize)+uint64(ld.Lcsize))
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/arm/l.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/arm/l.go
deleted file mode 100644
index d148d61..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/arm/l.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/arm/l.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/arm/l.go:1
-// Inferno utils/5l/asm.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/5l/asm.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package arm
-
-// Writing object files.
-
-// Inferno utils/5l/l.h
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/5l/l.h
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-const (
-	maxAlign  = 8 // max data alignment
-	minAlign  = 1 // min data alignment
-	funcAlign = 4 // single-instruction alignment
-)
-
-/* Used by ../internal/ld/dwarf.go */
-const (
-	dwarfRegSP = 13
-	dwarfRegLR = 14
-)
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/arm/obj.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/arm/obj.go
deleted file mode 100644
index ed6f484..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/arm/obj.go
+++ /dev/null
@@ -1,145 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/arm/obj.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/arm/obj.go:1
-// Inferno utils/5l/obj.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/5l/obj.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package arm
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"bootstrap/cmd/link/internal/ld"
-	"fmt"
-)
-
-func Init() {
-	ld.SysArch = sys.ArchARM
-
-	ld.Thearch.Funcalign = funcAlign
-	ld.Thearch.Maxalign = maxAlign
-	ld.Thearch.Minalign = minAlign
-	ld.Thearch.Dwarfregsp = dwarfRegSP
-	ld.Thearch.Dwarfreglr = dwarfRegLR
-
-	ld.Thearch.Adddynrel = adddynrel
-	ld.Thearch.Archinit = archinit
-	ld.Thearch.Archreloc = archreloc
-	ld.Thearch.Archrelocvariant = archrelocvariant
-	ld.Thearch.Trampoline = trampoline
-	ld.Thearch.Asmb = asmb
-	ld.Thearch.Elfreloc1 = elfreloc1
-	ld.Thearch.Elfsetupplt = elfsetupplt
-	ld.Thearch.Gentext = gentext
-	ld.Thearch.Machoreloc1 = machoreloc1
-	ld.Thearch.Lput = ld.Lputl
-	ld.Thearch.Wput = ld.Wputl
-	ld.Thearch.Vput = ld.Vputl
-	ld.Thearch.Append16 = ld.Append16l
-	ld.Thearch.Append32 = ld.Append32l
-	ld.Thearch.Append64 = ld.Append64l
-
-	ld.Thearch.Linuxdynld = "/lib/ld-linux.so.3" // 2 for OABI, 3 for EABI
-	ld.Thearch.Freebsddynld = "/usr/libexec/ld-elf.so.1"
-	ld.Thearch.Openbsddynld = "/usr/libexec/ld.so"
-	ld.Thearch.Netbsddynld = "/libexec/ld.elf_so"
-	ld.Thearch.Dragonflydynld = "XXX"
-	ld.Thearch.Solarisdynld = "XXX"
-}
-
-func archinit(ctxt *ld.Link) {
-	switch ld.Headtype {
-	default:
-		ld.Exitf("unknown -H option: %v", ld.Headtype)
-
-	case obj.Hplan9: /* plan 9 */
-		ld.HEADR = 32
-
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 4128
-		}
-		if *ld.FlagDataAddr == -1 {
-			*ld.FlagDataAddr = 0
-		}
-		if *ld.FlagRound == -1 {
-			*ld.FlagRound = 4096
-		}
-
-	case obj.Hlinux, /* arm elf */
-		obj.Hfreebsd,
-		obj.Hnetbsd,
-		obj.Hopenbsd:
-		*ld.FlagD = false
-		// with dynamic linking
-		ld.Elfinit(ctxt)
-		ld.HEADR = ld.ELFRESERVE
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 0x10000 + int64(ld.HEADR)
-		}
-		if *ld.FlagDataAddr == -1 {
-			*ld.FlagDataAddr = 0
-		}
-		if *ld.FlagRound == -1 {
-			*ld.FlagRound = 0x10000
-		}
-
-	case obj.Hnacl:
-		ld.Elfinit(ctxt)
-		ld.HEADR = 0x10000
-		ld.Funcalign = 16
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 0x20000
-		}
-		if *ld.FlagDataAddr == -1 {
-			*ld.FlagDataAddr = 0
-		}
-		if *ld.FlagRound == -1 {
-			*ld.FlagRound = 0x10000
-		}
-
-	case obj.Hdarwin: /* apple MACH */
-		*ld.FlagW = true // disable DWARF generation
-		ld.Machoinit()
-		ld.HEADR = ld.INITIAL_MACHO_HEADR
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 4096 + int64(ld.HEADR)
-		}
-		if *ld.FlagDataAddr == -1 {
-			*ld.FlagDataAddr = 0
-		}
-		if *ld.FlagRound == -1 {
-			*ld.FlagRound = 4096
-		}
-	}
-
-	if *ld.FlagDataAddr != 0 && *ld.FlagRound != 0 {
-		fmt.Printf("warning: -D0x%x is ignored because of -R0x%x\n", uint64(*ld.FlagDataAddr), uint32(*ld.FlagRound))
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/arm64/asm.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/arm64/asm.go
deleted file mode 100644
index 7c6b5b4..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/arm64/asm.go
+++ /dev/null
@@ -1,541 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/arm64/asm.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/arm64/asm.go:1
-// Inferno utils/5l/asm.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/5l/asm.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package arm64
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/link/internal/ld"
-	"encoding/binary"
-	"fmt"
-	"log"
-)
-
-func gentext(ctxt *ld.Link) {
-	if !ctxt.DynlinkingGo() {
-		return
-	}
-	addmoduledata := ctxt.Syms.Lookup("runtime.addmoduledata", 0)
-	if addmoduledata.Type == obj.STEXT {
-		// we're linking a module containing the runtime -> no need for
-		// an init function
-		return
-	}
-	addmoduledata.Attr |= ld.AttrReachable
-	initfunc := ctxt.Syms.Lookup("go.link.addmoduledata", 0)
-	initfunc.Type = obj.STEXT
-	initfunc.Attr |= ld.AttrLocal
-	initfunc.Attr |= ld.AttrReachable
-	o := func(op uint32) {
-		ld.Adduint32(ctxt, initfunc, op)
-	}
-	// 0000000000000000 <local.dso_init>:
-	// 0:	90000000 	adrp	x0, 0 <runtime.firstmoduledata>
-	// 	0: R_AARCH64_ADR_PREL_PG_HI21	local.moduledata
-	// 4:	91000000 	add	x0, x0, #0x0
-	// 	4: R_AARCH64_ADD_ABS_LO12_NC	local.moduledata
-	o(0x90000000)
-	o(0x91000000)
-	rel := ld.Addrel(initfunc)
-	rel.Off = 0
-	rel.Siz = 8
-	rel.Sym = ctxt.Moduledata
-	rel.Type = obj.R_ADDRARM64
-
-	// 8:	14000000 	bl	0 <runtime.addmoduledata>
-	// 	8: R_AARCH64_CALL26	runtime.addmoduledata
-	o(0x14000000)
-	rel = ld.Addrel(initfunc)
-	rel.Off = 8
-	rel.Siz = 4
-	rel.Sym = ctxt.Syms.Lookup("runtime.addmoduledata", 0)
-	rel.Type = obj.R_CALLARM64 // Really should be R_AARCH64_JUMP26 but doesn't seem to make any difference
-
-	ctxt.Textp = append(ctxt.Textp, initfunc)
-	initarray_entry := ctxt.Syms.Lookup("go.link.addmoduledatainit", 0)
-	initarray_entry.Attr |= ld.AttrReachable
-	initarray_entry.Attr |= ld.AttrLocal
-	initarray_entry.Type = obj.SINITARR
-	ld.Addaddr(ctxt, initarray_entry, initfunc)
-}
-
-func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool {
-	log.Fatalf("adddynrel not implemented")
-	return false
-}
-
-func elfreloc1(ctxt *ld.Link, r *ld.Reloc, sectoff int64) int {
-	ld.Thearch.Vput(uint64(sectoff))
-
-	elfsym := r.Xsym.ElfsymForReloc()
-	switch r.Type {
-	default:
-		return -1
-
-	case obj.R_ADDR:
-		switch r.Siz {
-		case 4:
-			ld.Thearch.Vput(ld.R_AARCH64_ABS32 | uint64(elfsym)<<32)
-		case 8:
-			ld.Thearch.Vput(ld.R_AARCH64_ABS64 | uint64(elfsym)<<32)
-		default:
-			return -1
-		}
-
-	case obj.R_ADDRARM64:
-		// two relocations: R_AARCH64_ADR_PREL_PG_HI21 and R_AARCH64_ADD_ABS_LO12_NC
-		ld.Thearch.Vput(ld.R_AARCH64_ADR_PREL_PG_HI21 | uint64(elfsym)<<32)
-		ld.Thearch.Vput(uint64(r.Xadd))
-		ld.Thearch.Vput(uint64(sectoff + 4))
-		ld.Thearch.Vput(ld.R_AARCH64_ADD_ABS_LO12_NC | uint64(elfsym)<<32)
-
-	case obj.R_ARM64_TLS_LE:
-		ld.Thearch.Vput(ld.R_AARCH64_TLSLE_MOVW_TPREL_G0 | uint64(elfsym)<<32)
-
-	case obj.R_ARM64_TLS_IE:
-		ld.Thearch.Vput(ld.R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 | uint64(elfsym)<<32)
-		ld.Thearch.Vput(uint64(r.Xadd))
-		ld.Thearch.Vput(uint64(sectoff + 4))
-		ld.Thearch.Vput(ld.R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC | uint64(elfsym)<<32)
-
-	case obj.R_ARM64_GOTPCREL:
-		ld.Thearch.Vput(ld.R_AARCH64_ADR_GOT_PAGE | uint64(elfsym)<<32)
-		ld.Thearch.Vput(uint64(r.Xadd))
-		ld.Thearch.Vput(uint64(sectoff + 4))
-		ld.Thearch.Vput(ld.R_AARCH64_LD64_GOT_LO12_NC | uint64(elfsym)<<32)
-
-	case obj.R_CALLARM64:
-		if r.Siz != 4 {
-			return -1
-		}
-		ld.Thearch.Vput(ld.R_AARCH64_CALL26 | uint64(elfsym)<<32)
-
-	}
-	ld.Thearch.Vput(uint64(r.Xadd))
-
-	return 0
-}
-
-func elfsetupplt(ctxt *ld.Link) {
-	// TODO(aram)
-	return
-}
-
-func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int {
-	var v uint32
-
-	rs := r.Xsym
-
-	// ld64 has a bug handling MACHO_ARM64_RELOC_UNSIGNED with !extern relocation.
-	// see cmd/internal/ld/data.go for details. The workaround is that don't use !extern
-	// UNSIGNED relocation at all.
-	if rs.Type == obj.SHOSTOBJ || r.Type == obj.R_CALLARM64 || r.Type == obj.R_ADDRARM64 || r.Type == obj.R_ADDR {
-		if rs.Dynid < 0 {
-			ld.Errorf(s, "reloc %d to non-macho symbol %s type=%d", r.Type, rs.Name, rs.Type)
-			return -1
-		}
-
-		v = uint32(rs.Dynid)
-		v |= 1 << 27 // external relocation
-	} else {
-		v = uint32(rs.Sect.Extnum)
-		if v == 0 {
-			ld.Errorf(s, "reloc %d to symbol %s in non-macho section %s type=%d", r.Type, rs.Name, rs.Sect.Name, rs.Type)
-			return -1
-		}
-	}
-
-	switch r.Type {
-	default:
-		return -1
-
-	case obj.R_ADDR:
-		v |= ld.MACHO_ARM64_RELOC_UNSIGNED << 28
-
-	case obj.R_CALLARM64:
-		if r.Xadd != 0 {
-			ld.Errorf(s, "ld64 doesn't allow BR26 reloc with non-zero addend: %s+%d", rs.Name, r.Xadd)
-		}
-
-		v |= 1 << 24 // pc-relative bit
-		v |= ld.MACHO_ARM64_RELOC_BRANCH26 << 28
-
-	case obj.R_ADDRARM64:
-		r.Siz = 4
-		// Two relocation entries: MACHO_ARM64_RELOC_PAGEOFF12 MACHO_ARM64_RELOC_PAGE21
-		// if r.Xadd is non-zero, add two MACHO_ARM64_RELOC_ADDEND.
-		if r.Xadd != 0 {
-			ld.Thearch.Lput(uint32(sectoff + 4))
-			ld.Thearch.Lput((ld.MACHO_ARM64_RELOC_ADDEND << 28) | (2 << 25) | uint32(r.Xadd&0xffffff))
-		}
-		ld.Thearch.Lput(uint32(sectoff + 4))
-		ld.Thearch.Lput(v | (ld.MACHO_ARM64_RELOC_PAGEOFF12 << 28) | (2 << 25))
-		if r.Xadd != 0 {
-			ld.Thearch.Lput(uint32(sectoff))
-			ld.Thearch.Lput((ld.MACHO_ARM64_RELOC_ADDEND << 28) | (2 << 25) | uint32(r.Xadd&0xffffff))
-		}
-		v |= 1 << 24 // pc-relative bit
-		v |= ld.MACHO_ARM64_RELOC_PAGE21 << 28
-	}
-
-	switch r.Siz {
-	default:
-		return -1
-
-	case 1:
-		v |= 0 << 25
-
-	case 2:
-		v |= 1 << 25
-
-	case 4:
-		v |= 2 << 25
-
-	case 8:
-		v |= 3 << 25
-	}
-
-	ld.Thearch.Lput(uint32(sectoff))
-	ld.Thearch.Lput(v)
-	return 0
-}
-
-func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int {
-	if ld.Linkmode == ld.LinkExternal {
-		switch r.Type {
-		default:
-			return -1
-
-		case obj.R_ARM64_GOTPCREL:
-			var o1, o2 uint32
-			if ctxt.Arch.ByteOrder == binary.BigEndian {
-				o1 = uint32(*val >> 32)
-				o2 = uint32(*val)
-			} else {
-				o1 = uint32(*val)
-				o2 = uint32(*val >> 32)
-			}
-			// Any relocation against a function symbol is redirected to
-			// be against a local symbol instead (see putelfsym in
-			// symtab.go) but unfortunately the system linker was buggy
-			// when confronted with a R_AARCH64_ADR_GOT_PAGE relocation
-			// against a local symbol until May 2015
-			// (https://sourceware.org/bugzilla/show_bug.cgi?id=18270). So
-			// we convert the adrp; ld64 + R_ARM64_GOTPCREL into adrp;
-			// add + R_ADDRARM64.
-			if !(r.Sym.Version != 0 || (r.Sym.Type&obj.SHIDDEN != 0) || r.Sym.Attr.Local()) && r.Sym.Type == obj.STEXT && ctxt.DynlinkingGo() {
-				if o2&0xffc00000 != 0xf9400000 {
-					ld.Errorf(s, "R_ARM64_GOTPCREL against unexpected instruction %x", o2)
-				}
-				o2 = 0x91000000 | (o2 & 0x000003ff)
-				r.Type = obj.R_ADDRARM64
-			}
-			if ctxt.Arch.ByteOrder == binary.BigEndian {
-				*val = int64(o1)<<32 | int64(o2)
-			} else {
-				*val = int64(o2)<<32 | int64(o1)
-			}
-			fallthrough
-
-		case obj.R_ADDRARM64:
-			r.Done = 0
-
-			// set up addend for eventual relocation via outer symbol.
-			rs := r.Sym
-			r.Xadd = r.Add
-			for rs.Outer != nil {
-				r.Xadd += ld.Symaddr(rs) - ld.Symaddr(rs.Outer)
-				rs = rs.Outer
-			}
-
-			if rs.Type != obj.SHOSTOBJ && rs.Type != obj.SDYNIMPORT && rs.Sect == nil {
-				ld.Errorf(s, "missing section for %s", rs.Name)
-			}
-			r.Xsym = rs
-
-			// Note: ld64 currently has a bug that any non-zero addend for BR26 relocation
-			// will make the linking fail because it thinks the code is not PIC even though
-			// the BR26 relocation should be fully resolved at link time.
-			// That is the reason why the next if block is disabled. When the bug in ld64
-			// is fixed, we can enable this block and also enable duff's device in cmd/7g.
-			if false && ld.Headtype == obj.Hdarwin {
-				var o0, o1 uint32
-
-				if ctxt.Arch.ByteOrder == binary.BigEndian {
-					o0 = uint32(*val >> 32)
-					o1 = uint32(*val)
-				} else {
-					o0 = uint32(*val)
-					o1 = uint32(*val >> 32)
-				}
-				// Mach-O wants the addend to be encoded in the instruction
-				// Note that although Mach-O supports ARM64_RELOC_ADDEND, it
-				// can only encode 24-bit of signed addend, but the instructions
-				// supports 33-bit of signed addend, so we always encode the
-				// addend in place.
-				o0 |= (uint32((r.Xadd>>12)&3) << 29) | (uint32((r.Xadd>>12>>2)&0x7ffff) << 5)
-				o1 |= uint32(r.Xadd&0xfff) << 10
-				r.Xadd = 0
-
-				// when laid out, the instruction order must always be o1, o2.
-				if ctxt.Arch.ByteOrder == binary.BigEndian {
-					*val = int64(o0)<<32 | int64(o1)
-				} else {
-					*val = int64(o1)<<32 | int64(o0)
-				}
-			}
-
-			return 0
-
-		case obj.R_CALLARM64,
-			obj.R_ARM64_TLS_LE,
-			obj.R_ARM64_TLS_IE:
-			r.Done = 0
-			r.Xsym = r.Sym
-			r.Xadd = r.Add
-			return 0
-		}
-	}
-
-	switch r.Type {
-	case obj.R_CONST:
-		*val = r.Add
-		return 0
-
-	case obj.R_GOTOFF:
-		*val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0))
-		return 0
-
-	case obj.R_ADDRARM64:
-		t := ld.Symaddr(r.Sym) + r.Add - ((s.Value + int64(r.Off)) &^ 0xfff)
-		if t >= 1<<32 || t < -1<<32 {
-			ld.Errorf(s, "program too large, address relocation distance = %d", t)
-		}
-
-		var o0, o1 uint32
-
-		if ctxt.Arch.ByteOrder == binary.BigEndian {
-			o0 = uint32(*val >> 32)
-			o1 = uint32(*val)
-		} else {
-			o0 = uint32(*val)
-			o1 = uint32(*val >> 32)
-		}
-
-		o0 |= (uint32((t>>12)&3) << 29) | (uint32((t>>12>>2)&0x7ffff) << 5)
-		o1 |= uint32(t&0xfff) << 10
-
-		// when laid out, the instruction order must always be o1, o2.
-		if ctxt.Arch.ByteOrder == binary.BigEndian {
-			*val = int64(o0)<<32 | int64(o1)
-		} else {
-			*val = int64(o1)<<32 | int64(o0)
-		}
-		return 0
-
-	case obj.R_ARM64_TLS_LE:
-		r.Done = 0
-		if ld.Headtype != obj.Hlinux {
-			ld.Errorf(s, "TLS reloc on unsupported OS %v", ld.Headtype)
-		}
-		// The TCB is two pointers. This is not documented anywhere, but is
-		// de facto part of the ABI.
-		v := r.Sym.Value + int64(2*ld.SysArch.PtrSize)
-		if v < 0 || v >= 32678 {
-			ld.Errorf(s, "TLS offset out of range %d", v)
-		}
-		*val |= v << 5
-		return 0
-
-	case obj.R_CALLARM64:
-		t := (ld.Symaddr(r.Sym) + r.Add) - (s.Value + int64(r.Off))
-		if t >= 1<<27 || t < -1<<27 {
-			ld.Errorf(s, "program too large, call relocation distance = %d", t)
-		}
-		*val |= (t >> 2) & 0x03ffffff
-		return 0
-	}
-
-	return -1
-}
-
-func archrelocvariant(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, t int64) int64 {
-	log.Fatalf("unexpected relocation variant")
-	return -1
-}
-
-func asmb(ctxt *ld.Link) {
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f asmb\n", obj.Cputime())
-	}
-
-	if ld.Iself {
-		ld.Asmbelfsetup()
-	}
-
-	sect := ld.Segtext.Sect
-	ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
-	ld.Codeblk(ctxt, int64(sect.Vaddr), int64(sect.Length))
-	for sect = sect.Next; sect != nil; sect = sect.Next {
-		ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
-		ld.Datblk(ctxt, int64(sect.Vaddr), int64(sect.Length))
-	}
-
-	if ld.Segrodata.Filelen > 0 {
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f rodatblk\n", obj.Cputime())
-		}
-		ld.Cseek(int64(ld.Segrodata.Fileoff))
-		ld.Datblk(ctxt, int64(ld.Segrodata.Vaddr), int64(ld.Segrodata.Filelen))
-	}
-	if ld.Segrelrodata.Filelen > 0 {
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f relrodatblk\n", obj.Cputime())
-		}
-		ld.Cseek(int64(ld.Segrelrodata.Fileoff))
-		ld.Datblk(ctxt, int64(ld.Segrelrodata.Vaddr), int64(ld.Segrelrodata.Filelen))
-	}
-
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f datblk\n", obj.Cputime())
-	}
-
-	ld.Cseek(int64(ld.Segdata.Fileoff))
-	ld.Datblk(ctxt, int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen))
-
-	ld.Cseek(int64(ld.Segdwarf.Fileoff))
-	ld.Dwarfblk(ctxt, int64(ld.Segdwarf.Vaddr), int64(ld.Segdwarf.Filelen))
-
-	machlink := uint32(0)
-	if ld.Headtype == obj.Hdarwin {
-		machlink = uint32(ld.Domacholink(ctxt))
-	}
-
-	/* output symbol table */
-	ld.Symsize = 0
-
-	ld.Lcsize = 0
-	symo := uint32(0)
-	if !*ld.FlagS {
-		// TODO: rationalize
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f sym\n", obj.Cputime())
-		}
-		switch ld.Headtype {
-		default:
-			if ld.Iself {
-				symo = uint32(ld.Segdwarf.Fileoff + ld.Segdwarf.Filelen)
-				symo = uint32(ld.Rnd(int64(symo), int64(*ld.FlagRound)))
-			}
-
-		case obj.Hplan9:
-			symo = uint32(ld.Segdata.Fileoff + ld.Segdata.Filelen)
-
-		case obj.Hdarwin:
-			symo = uint32(ld.Segdwarf.Fileoff + uint64(ld.Rnd(int64(ld.Segdwarf.Filelen), int64(*ld.FlagRound))) + uint64(machlink))
-		}
-
-		ld.Cseek(int64(symo))
-		switch ld.Headtype {
-		default:
-			if ld.Iself {
-				if ctxt.Debugvlog != 0 {
-					ctxt.Logf("%5.2f elfsym\n", obj.Cputime())
-				}
-				ld.Asmelfsym(ctxt)
-				ld.Cflush()
-				ld.Cwrite(ld.Elfstrdat)
-
-				if ld.Linkmode == ld.LinkExternal {
-					ld.Elfemitreloc(ctxt)
-				}
-			}
-
-		case obj.Hplan9:
-			ld.Asmplan9sym(ctxt)
-			ld.Cflush()
-
-			sym := ctxt.Syms.Lookup("pclntab", 0)
-			if sym != nil {
-				ld.Lcsize = int32(len(sym.P))
-				for i := 0; int32(i) < ld.Lcsize; i++ {
-					ld.Cput(sym.P[i])
-				}
-
-				ld.Cflush()
-			}
-
-		case obj.Hdarwin:
-			if ld.Linkmode == ld.LinkExternal {
-				ld.Machoemitreloc(ctxt)
-			}
-		}
-	}
-
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f header\n", obj.Cputime())
-	}
-	ld.Cseek(0)
-	switch ld.Headtype {
-	default:
-	case obj.Hplan9: /* plan 9 */
-		ld.Thearch.Lput(0x647)                      /* magic */
-		ld.Thearch.Lput(uint32(ld.Segtext.Filelen)) /* sizes */
-		ld.Thearch.Lput(uint32(ld.Segdata.Filelen))
-		ld.Thearch.Lput(uint32(ld.Segdata.Length - ld.Segdata.Filelen))
-		ld.Thearch.Lput(uint32(ld.Symsize))          /* nsyms */
-		ld.Thearch.Lput(uint32(ld.Entryvalue(ctxt))) /* va of entry */
-		ld.Thearch.Lput(0)
-		ld.Thearch.Lput(uint32(ld.Lcsize))
-
-	case obj.Hlinux,
-		obj.Hfreebsd,
-		obj.Hnetbsd,
-		obj.Hopenbsd,
-		obj.Hnacl:
-		ld.Asmbelf(ctxt, int64(symo))
-
-	case obj.Hdarwin:
-		ld.Asmbmacho(ctxt)
-	}
-
-	ld.Cflush()
-	if *ld.FlagC {
-		fmt.Printf("textsize=%d\n", ld.Segtext.Filelen)
-		fmt.Printf("datsize=%d\n", ld.Segdata.Filelen)
-		fmt.Printf("bsssize=%d\n", ld.Segdata.Length-ld.Segdata.Filelen)
-		fmt.Printf("symsize=%d\n", ld.Symsize)
-		fmt.Printf("lcsize=%d\n", ld.Lcsize)
-		fmt.Printf("total=%d\n", ld.Segtext.Filelen+ld.Segdata.Length+uint64(ld.Symsize)+uint64(ld.Lcsize))
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/arm64/l.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/arm64/l.go
deleted file mode 100644
index 5510826..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/arm64/l.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/arm64/l.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/arm64/l.go:1
-// Inferno utils/5l/asm.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/5l/asm.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package arm64
-
-// Writing object files.
-
-// cmd/9l/l.h from Vita Nuova.
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-const (
-	maxAlign  = 32 // max data alignment
-	minAlign  = 1  // min data alignment
-	funcAlign = 8
-)
-
-/* Used by ../internal/ld/dwarf.go */
-const (
-	dwarfRegSP = 31
-	dwarfRegLR = 30
-)
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/arm64/obj.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/arm64/obj.go
deleted file mode 100644
index 96120b7..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/arm64/obj.go
+++ /dev/null
@@ -1,140 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/arm64/obj.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/arm64/obj.go:1
-// Inferno utils/5l/obj.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/5l/obj.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package arm64
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"bootstrap/cmd/link/internal/ld"
-	"fmt"
-)
-
-func Init() {
-	ld.SysArch = sys.ArchARM64
-
-	ld.Thearch.Funcalign = funcAlign
-	ld.Thearch.Maxalign = maxAlign
-	ld.Thearch.Minalign = minAlign
-	ld.Thearch.Dwarfregsp = dwarfRegSP
-	ld.Thearch.Dwarfreglr = dwarfRegLR
-
-	ld.Thearch.Adddynrel = adddynrel
-	ld.Thearch.Archinit = archinit
-	ld.Thearch.Archreloc = archreloc
-	ld.Thearch.Archrelocvariant = archrelocvariant
-	ld.Thearch.Asmb = asmb
-	ld.Thearch.Elfreloc1 = elfreloc1
-	ld.Thearch.Elfsetupplt = elfsetupplt
-	ld.Thearch.Gentext = gentext
-	ld.Thearch.Machoreloc1 = machoreloc1
-	ld.Thearch.Lput = ld.Lputl
-	ld.Thearch.Wput = ld.Wputl
-	ld.Thearch.Vput = ld.Vputl
-	ld.Thearch.Append16 = ld.Append16l
-	ld.Thearch.Append32 = ld.Append32l
-	ld.Thearch.Append64 = ld.Append64l
-
-	ld.Thearch.Linuxdynld = "/lib/ld-linux-aarch64.so.1"
-
-	ld.Thearch.Freebsddynld = "XXX"
-	ld.Thearch.Openbsddynld = "XXX"
-	ld.Thearch.Netbsddynld = "XXX"
-	ld.Thearch.Dragonflydynld = "XXX"
-	ld.Thearch.Solarisdynld = "XXX"
-}
-
-func archinit(ctxt *ld.Link) {
-	switch ld.Headtype {
-	default:
-		ld.Exitf("unknown -H option: %v", ld.Headtype)
-
-	case obj.Hplan9: /* plan 9 */
-		ld.HEADR = 32
-
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 4096 + int64(ld.HEADR)
-		}
-		if *ld.FlagDataAddr == -1 {
-			*ld.FlagDataAddr = 0
-		}
-		if *ld.FlagRound == -1 {
-			*ld.FlagRound = 4096
-		}
-
-	case obj.Hlinux: /* arm64 elf */
-		ld.Elfinit(ctxt)
-		ld.HEADR = ld.ELFRESERVE
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 0x10000 + int64(ld.HEADR)
-		}
-		if *ld.FlagDataAddr == -1 {
-			*ld.FlagDataAddr = 0
-		}
-		if *ld.FlagRound == -1 {
-			*ld.FlagRound = 0x10000
-		}
-
-	case obj.Hdarwin: /* apple MACH */
-		*ld.FlagW = true // disable DWARF generation
-		ld.Machoinit()
-		ld.HEADR = ld.INITIAL_MACHO_HEADR
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 4096 + int64(ld.HEADR)
-		}
-		if *ld.FlagDataAddr == -1 {
-			*ld.FlagDataAddr = 0
-		}
-		if *ld.FlagRound == -1 {
-			*ld.FlagRound = 4096
-		}
-
-	case obj.Hnacl:
-		ld.Elfinit(ctxt)
-		ld.HEADR = 0x10000
-		ld.Funcalign = 16
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 0x20000
-		}
-		if *ld.FlagDataAddr == -1 {
-			*ld.FlagDataAddr = 0
-		}
-		if *ld.FlagRound == -1 {
-			*ld.FlagRound = 0x10000
-		}
-	}
-
-	if *ld.FlagDataAddr != 0 && *ld.FlagRound != 0 {
-		fmt.Printf("warning: -D0x%x is ignored because of -R0x%x\n", uint64(*ld.FlagDataAddr), uint32(*ld.FlagRound))
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/ar.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/ar.go
deleted file mode 100644
index 04ee930..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/ar.go
+++ /dev/null
@@ -1,190 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/ar.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/ar.go:1
-// Inferno utils/include/ar.h
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/include/ar.h
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package ld
-
-import (
-	"bootstrap/cmd/internal/bio"
-	"bootstrap/cmd/internal/obj"
-	"encoding/binary"
-	"fmt"
-	"io"
-	"os"
-)
-
-const (
-	SARMAG  = 8
-	SAR_HDR = 16 + 44
-)
-
-const (
-	ARMAG = "!<arch>\n"
-)
-
-type ArHdr struct {
-	name string
-	date string
-	uid  string
-	gid  string
-	mode string
-	size string
-	fmag string
-}
-
-// hostArchive reads an archive file holding host objects and links in
-// required objects. The general format is the same as a Go archive
-// file, but it has an armap listing symbols and the objects that
-// define them. This is used for the compiler support library
-// libgcc.a.
-func hostArchive(ctxt *Link, name string) {
-	f, err := bio.Open(name)
-	if err != nil {
-		if os.IsNotExist(err) {
-			// It's OK if we don't have a libgcc file at all.
-			if ctxt.Debugvlog != 0 {
-				ctxt.Logf("skipping libgcc file: %v\n", err)
-			}
-			return
-		}
-		Exitf("cannot open file %s: %v", name, err)
-	}
-	defer f.Close()
-
-	var magbuf [len(ARMAG)]byte
-	if _, err := io.ReadFull(f, magbuf[:]); err != nil {
-		Exitf("file %s too short", name)
-	}
-
-	var arhdr ArHdr
-	l := nextar(f, f.Offset(), &arhdr)
-	if l <= 0 {
-		Exitf("%s missing armap", name)
-	}
-
-	var armap archiveMap
-	if arhdr.name == "/" || arhdr.name == "/SYM64/" {
-		armap = readArmap(name, f, arhdr)
-	} else {
-		Exitf("%s missing armap", name)
-	}
-
-	loaded := make(map[uint64]bool)
-	any := true
-	for any {
-		var load []uint64
-		for _, s := range ctxt.Syms.Allsym {
-			for _, r := range s.R {
-				if r.Sym != nil && r.Sym.Type&obj.SMASK == obj.SXREF {
-					if off := armap[r.Sym.Name]; off != 0 && !loaded[off] {
-						load = append(load, off)
-						loaded[off] = true
-					}
-				}
-			}
-		}
-
-		for _, off := range load {
-			l := nextar(f, int64(off), &arhdr)
-			if l <= 0 {
-				Exitf("%s missing archive entry at offset %d", name, off)
-			}
-			pname := fmt.Sprintf("%s(%s)", name, arhdr.name)
-			l = atolwhex(arhdr.size)
-
-			libgcc := Library{Pkg: "libgcc"}
-			h := ldobj(ctxt, f, &libgcc, l, pname, name, ArchiveObj)
-			f.Seek(h.off, 0)
-			h.ld(ctxt, f, h.pkg, h.length, h.pn)
-		}
-
-		any = len(load) > 0
-	}
-}
-
-// archiveMap is an archive symbol map: a mapping from symbol name to
-// offset within the archive file.
-type archiveMap map[string]uint64
-
-// readArmap reads the archive symbol map.
-func readArmap(filename string, f *bio.Reader, arhdr ArHdr) archiveMap {
-	is64 := arhdr.name == "/SYM64/"
-	wordSize := 4
-	if is64 {
-		wordSize = 8
-	}
-
-	contents := make([]byte, atolwhex(arhdr.size))
-	if _, err := io.ReadFull(f, contents); err != nil {
-		Exitf("short read from %s", filename)
-	}
-
-	var c uint64
-	if is64 {
-		c = binary.BigEndian.Uint64(contents)
-	} else {
-		c = uint64(binary.BigEndian.Uint32(contents))
-	}
-	contents = contents[wordSize:]
-
-	ret := make(archiveMap)
-
-	names := contents[c*uint64(wordSize):]
-	for i := uint64(0); i < c; i++ {
-		n := 0
-		for names[n] != 0 {
-			n++
-		}
-		name := string(names[:n])
-		names = names[n+1:]
-
-		// For Mach-O and PE/386 files we strip a leading
-		// underscore from the symbol name.
-		if obj.GOOS == "darwin" || (obj.GOOS == "windows" && obj.GOARCH == "386") {
-			if name[0] == '_' && len(name) > 1 {
-				name = name[1:]
-			}
-		}
-
-		var off uint64
-		if is64 {
-			off = binary.BigEndian.Uint64(contents)
-		} else {
-			off = uint64(binary.BigEndian.Uint32(contents))
-		}
-		contents = contents[wordSize:]
-
-		ret[name] = off
-	}
-
-	return ret
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/config.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/config.go
deleted file mode 100644
index c1390fb..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/config.go
+++ /dev/null
@@ -1,253 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/config.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/config.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ld
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"fmt"
-	"log"
-)
-
-var (
-	Linkmode  LinkMode
-	Buildmode BuildMode
-)
-
-// A BuildMode indicates the sort of object we are building.
-//
-// Possible build modes are the same as those for the -buildmode flag
-// in cmd/go, and are documented in 'go help buildmode'.
-type BuildMode uint8
-
-const (
-	BuildmodeUnset BuildMode = iota
-	BuildmodeExe
-	BuildmodePIE
-	BuildmodeCArchive
-	BuildmodeCShared
-	BuildmodeShared
-	BuildmodePlugin
-)
-
-func (mode *BuildMode) Set(s string) error {
-	badmode := func() error {
-		return fmt.Errorf("buildmode %s not supported on %s/%s", s, obj.GOOS, obj.GOARCH)
-	}
-	switch s {
-	default:
-		return fmt.Errorf("invalid buildmode: %q", s)
-	case "exe":
-		*mode = BuildmodeExe
-	case "pie":
-		switch obj.GOOS {
-		case "android", "linux":
-		default:
-			return badmode()
-		}
-		*mode = BuildmodePIE
-	case "c-archive":
-		switch obj.GOOS {
-		case "darwin", "linux":
-		case "windows":
-			switch obj.GOARCH {
-			case "amd64", "386":
-			default:
-				return badmode()
-			}
-		default:
-			return badmode()
-		}
-		*mode = BuildmodeCArchive
-	case "c-shared":
-		switch obj.GOARCH {
-		case "386", "amd64", "arm", "arm64":
-		default:
-			return badmode()
-		}
-		*mode = BuildmodeCShared
-	case "shared":
-		switch obj.GOOS {
-		case "linux":
-			switch obj.GOARCH {
-			case "386", "amd64", "arm", "arm64", "ppc64le", "s390x":
-			default:
-				return badmode()
-			}
-		default:
-			return badmode()
-		}
-		*mode = BuildmodeShared
-	case "plugin":
-		switch obj.GOOS {
-		case "linux":
-			switch obj.GOARCH {
-			case "386", "amd64", "arm", "arm64":
-			default:
-				return badmode()
-			}
-		case "darwin":
-			switch obj.GOARCH {
-			case "amd64":
-			default:
-				return badmode()
-			}
-		default:
-			return badmode()
-		}
-		*mode = BuildmodePlugin
-	}
-	return nil
-}
-
-func (mode *BuildMode) String() string {
-	switch *mode {
-	case BuildmodeUnset:
-		return "" // avoid showing a default in usage message
-	case BuildmodeExe:
-		return "exe"
-	case BuildmodePIE:
-		return "pie"
-	case BuildmodeCArchive:
-		return "c-archive"
-	case BuildmodeCShared:
-		return "c-shared"
-	case BuildmodeShared:
-		return "shared"
-	case BuildmodePlugin:
-		return "plugin"
-	}
-	return fmt.Sprintf("BuildMode(%d)", uint8(*mode))
-}
-
-// LinkMode indicates whether an external linker is used for the final link.
-type LinkMode uint8
-
-const (
-	LinkAuto LinkMode = iota
-	LinkInternal
-	LinkExternal
-)
-
-func (mode *LinkMode) Set(s string) error {
-	switch s {
-	default:
-		return fmt.Errorf("invalid linkmode: %q", s)
-	case "auto":
-		*mode = LinkAuto
-	case "internal":
-		*mode = LinkInternal
-	case "external":
-		*mode = LinkExternal
-	}
-	return nil
-}
-
-func (mode *LinkMode) String() string {
-	switch *mode {
-	case LinkAuto:
-		return "auto"
-	case LinkInternal:
-		return "internal"
-	case LinkExternal:
-		return "external"
-	}
-	return fmt.Sprintf("LinkMode(%d)", uint8(*mode))
-}
-
-// mustLinkExternal reports whether the program being linked requires
-// the external linker be used to complete the link.
-func mustLinkExternal(ctxt *Link) (res bool, reason string) {
-	if ctxt.Debugvlog > 1 {
-		defer func() {
-			if res {
-				log.Printf("external linking is forced by: %s\n", reason)
-			}
-		}()
-	}
-
-	switch obj.GOOS {
-	case "android":
-		return true, "android"
-	case "darwin":
-		if SysArch.InFamily(sys.ARM, sys.ARM64) {
-			return true, "iOS"
-		}
-	}
-
-	if *flagMsan {
-		return true, "msan"
-	}
-
-	// Internally linking cgo is incomplete on some architectures.
-	// https://golang.org/issue/10373
-	// https://golang.org/issue/14449
-	if iscgo && SysArch.InFamily(sys.ARM64, sys.MIPS64, sys.MIPS) {
-		return true, obj.GOARCH + " does not support internal cgo"
-	}
-
-	// Some build modes require work the internal linker cannot do (yet).
-	switch Buildmode {
-	case BuildmodeCArchive:
-		return true, "buildmode=c-archive"
-	case BuildmodeCShared:
-		return true, "buildmode=c-shared"
-	case BuildmodePIE:
-		switch obj.GOOS + "/" + obj.GOARCH {
-		case "linux/amd64":
-		default:
-			// Internal linking does not support TLS_IE.
-			return true, "buildmode=pie"
-		}
-	case BuildmodePlugin:
-		return true, "buildmode=plugin"
-	case BuildmodeShared:
-		return true, "buildmode=shared"
-	}
-	if *FlagLinkshared {
-		return true, "dynamically linking with a shared library"
-	}
-
-	return false, ""
-}
-
-// determineLinkMode sets Linkmode.
-//
-// It is called after flags are processed and inputs are processed,
-// so the Linkmode variable has an initial value from the -linkmode
-// flag and the iscgo externalobj variables are set.
-func determineLinkMode(ctxt *Link) {
-	switch Linkmode {
-	case LinkAuto:
-		// The environment variable GO_EXTLINK_ENABLED controls the
-		// default value of -linkmode. If it is not set when the
-		// linker is called we take the value it was set to when
-		// cmd/link was compiled. (See make.bash.)
-		switch obj.Getgoextlinkenabled() {
-		case "0":
-			if needed, reason := mustLinkExternal(ctxt); needed {
-				Exitf("internal linking requested via GO_EXTLINK_ENABLED, but external linking required: %s", reason)
-			}
-			Linkmode = LinkInternal
-		case "1":
-			Linkmode = LinkExternal
-		default:
-			if needed, _ := mustLinkExternal(ctxt); needed {
-				Linkmode = LinkExternal
-			} else if iscgo && externalobj {
-				Linkmode = LinkExternal
-			} else {
-				Linkmode = LinkInternal
-			}
-		}
-	case LinkInternal:
-		if needed, reason := mustLinkExternal(ctxt); needed {
-			Exitf("internal linking requested but external linking required: %s", reason)
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/data.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/data.go
deleted file mode 100644
index ed57709..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/data.go
+++ /dev/null
@@ -1,2358 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/data.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/data.go:1
-// Derived from Inferno utils/6l/obj.c and utils/6l/span.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6l/obj.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6l/span.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package ld
-
-import (
-	"bootstrap/cmd/internal/gcprog"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"fmt"
-	"log"
-	"os"
-	"sort"
-	"strconv"
-	"strings"
-	"sync"
-)
-
-func Symgrow(s *Symbol, siz int64) {
-	if int64(int(siz)) != siz {
-		log.Fatalf("symgrow size %d too long", siz)
-	}
-	if int64(len(s.P)) >= siz {
-		return
-	}
-	if cap(s.P) < int(siz) {
-		p := make([]byte, 2*(siz+1))
-		s.P = append(p[:0], s.P...)
-	}
-	s.P = s.P[:siz]
-}
-
-func Addrel(s *Symbol) *Reloc {
-	s.R = append(s.R, Reloc{})
-	return &s.R[len(s.R)-1]
-}
-
-func setuintxx(ctxt *Link, s *Symbol, off int64, v uint64, wid int64) int64 {
-	if s.Type == 0 {
-		s.Type = obj.SDATA
-	}
-	s.Attr |= AttrReachable
-	if s.Size < off+wid {
-		s.Size = off + wid
-		Symgrow(s, s.Size)
-	}
-
-	switch wid {
-	case 1:
-		s.P[off] = uint8(v)
-	case 2:
-		ctxt.Arch.ByteOrder.PutUint16(s.P[off:], uint16(v))
-	case 4:
-		ctxt.Arch.ByteOrder.PutUint32(s.P[off:], uint32(v))
-	case 8:
-		ctxt.Arch.ByteOrder.PutUint64(s.P[off:], v)
-	}
-
-	return off + wid
-}
-
-func Addbytes(s *Symbol, bytes []byte) int64 {
-	if s.Type == 0 {
-		s.Type = obj.SDATA
-	}
-	s.Attr |= AttrReachable
-	s.P = append(s.P, bytes...)
-	s.Size = int64(len(s.P))
-
-	return s.Size
-}
-
-func adduintxx(ctxt *Link, s *Symbol, v uint64, wid int) int64 {
-	off := s.Size
-	setuintxx(ctxt, s, off, v, int64(wid))
-	return off
-}
-
-func Adduint8(ctxt *Link, s *Symbol, v uint8) int64 {
-	off := s.Size
-	if s.Type == 0 {
-		s.Type = obj.SDATA
-	}
-	s.Attr |= AttrReachable
-	s.Size++
-	s.P = append(s.P, v)
-
-	return off
-}
-
-func Adduint16(ctxt *Link, s *Symbol, v uint16) int64 {
-	return adduintxx(ctxt, s, uint64(v), 2)
-}
-
-func Adduint32(ctxt *Link, s *Symbol, v uint32) int64 {
-	return adduintxx(ctxt, s, uint64(v), 4)
-}
-
-func Adduint64(ctxt *Link, s *Symbol, v uint64) int64 {
-	return adduintxx(ctxt, s, v, 8)
-}
-
-func adduint(ctxt *Link, s *Symbol, v uint64) int64 {
-	return adduintxx(ctxt, s, v, SysArch.IntSize)
-}
-
-func setuint8(ctxt *Link, s *Symbol, r int64, v uint8) int64 {
-	return setuintxx(ctxt, s, r, uint64(v), 1)
-}
-
-func setuint32(ctxt *Link, s *Symbol, r int64, v uint32) int64 {
-	return setuintxx(ctxt, s, r, uint64(v), 4)
-}
-
-func Addaddrplus(ctxt *Link, s *Symbol, t *Symbol, add int64) int64 {
-	if s.Type == 0 {
-		s.Type = obj.SDATA
-	}
-	s.Attr |= AttrReachable
-	i := s.Size
-	s.Size += int64(ctxt.Arch.PtrSize)
-	Symgrow(s, s.Size)
-	r := Addrel(s)
-	r.Sym = t
-	r.Off = int32(i)
-	r.Siz = uint8(ctxt.Arch.PtrSize)
-	r.Type = obj.R_ADDR
-	r.Add = add
-	return i + int64(r.Siz)
-}
-
-func Addpcrelplus(ctxt *Link, s *Symbol, t *Symbol, add int64) int64 {
-	if s.Type == 0 {
-		s.Type = obj.SDATA
-	}
-	s.Attr |= AttrReachable
-	i := s.Size
-	s.Size += 4
-	Symgrow(s, s.Size)
-	r := Addrel(s)
-	r.Sym = t
-	r.Off = int32(i)
-	r.Add = add
-	r.Type = obj.R_PCREL
-	r.Siz = 4
-	if SysArch.Family == sys.S390X {
-		r.Variant = RV_390_DBL
-	}
-	return i + int64(r.Siz)
-}
-
-func Addaddr(ctxt *Link, s *Symbol, t *Symbol) int64 {
-	return Addaddrplus(ctxt, s, t, 0)
-}
-
-func setaddrplus(ctxt *Link, s *Symbol, off int64, t *Symbol, add int64) int64 {
-	if s.Type == 0 {
-		s.Type = obj.SDATA
-	}
-	s.Attr |= AttrReachable
-	if off+int64(ctxt.Arch.PtrSize) > s.Size {
-		s.Size = off + int64(ctxt.Arch.PtrSize)
-		Symgrow(s, s.Size)
-	}
-
-	r := Addrel(s)
-	r.Sym = t
-	r.Off = int32(off)
-	r.Siz = uint8(ctxt.Arch.PtrSize)
-	r.Type = obj.R_ADDR
-	r.Add = add
-	return off + int64(r.Siz)
-}
-
-func setaddr(ctxt *Link, s *Symbol, off int64, t *Symbol) int64 {
-	return setaddrplus(ctxt, s, off, t, 0)
-}
-
-func addsize(ctxt *Link, s *Symbol, t *Symbol) int64 {
-	if s.Type == 0 {
-		s.Type = obj.SDATA
-	}
-	s.Attr |= AttrReachable
-	i := s.Size
-	s.Size += int64(ctxt.Arch.PtrSize)
-	Symgrow(s, s.Size)
-	r := Addrel(s)
-	r.Sym = t
-	r.Off = int32(i)
-	r.Siz = uint8(ctxt.Arch.PtrSize)
-	r.Type = obj.R_SIZE
-	return i + int64(r.Siz)
-}
-
-func addaddrplus4(ctxt *Link, s *Symbol, t *Symbol, add int64) int64 {
-	if s.Type == 0 {
-		s.Type = obj.SDATA
-	}
-	s.Attr |= AttrReachable
-	i := s.Size
-	s.Size += 4
-	Symgrow(s, s.Size)
-	r := Addrel(s)
-	r.Sym = t
-	r.Off = int32(i)
-	r.Siz = 4
-	r.Type = obj.R_ADDR
-	r.Add = add
-	return i + int64(r.Siz)
-}
-
-/*
- * divide-and-conquer list-link (by Sub) sort of Symbol* by Value.
- * Used for sub-symbols when loading host objects (see e.g. ldelf.go).
- */
-
-func listsort(l *Symbol) *Symbol {
-	if l == nil || l.Sub == nil {
-		return l
-	}
-
-	l1 := l
-	l2 := l
-	for {
-		l2 = l2.Sub
-		if l2 == nil {
-			break
-		}
-		l2 = l2.Sub
-		if l2 == nil {
-			break
-		}
-		l1 = l1.Sub
-	}
-
-	l2 = l1.Sub
-	l1.Sub = nil
-	l1 = listsort(l)
-	l2 = listsort(l2)
-
-	/* set up lead element */
-	if l1.Value < l2.Value {
-		l = l1
-		l1 = l1.Sub
-	} else {
-		l = l2
-		l2 = l2.Sub
-	}
-
-	le := l
-
-	for {
-		if l1 == nil {
-			for l2 != nil {
-				le.Sub = l2
-				le = l2
-				l2 = l2.Sub
-			}
-
-			le.Sub = nil
-			break
-		}
-
-		if l2 == nil {
-			for l1 != nil {
-				le.Sub = l1
-				le = l1
-				l1 = l1.Sub
-			}
-
-			break
-		}
-
-		if l1.Value < l2.Value {
-			le.Sub = l1
-			le = l1
-			l1 = l1.Sub
-		} else {
-			le.Sub = l2
-			le = l2
-			l2 = l2.Sub
-		}
-	}
-
-	le.Sub = nil
-	return l
-}
-
-// isRuntimeDepPkg returns whether pkg is the runtime package or its dependency
-func isRuntimeDepPkg(pkg string) bool {
-	switch pkg {
-	case "runtime",
-		"sync/atomic": // runtime may call to sync/atomic, due to go:linkname
-		return true
-	}
-	return strings.HasPrefix(pkg, "runtime/internal/") && !strings.HasSuffix(pkg, "_test")
-}
-
-// detect too-far jumps in function s, and add trampolines if necessary
-// ARM supports trampoline insertion for internal and external linking
-// PPC64 & PPC64LE support trampoline insertion for internal linking only
-func trampoline(ctxt *Link, s *Symbol) {
-	if Thearch.Trampoline == nil {
-		return // no need or no support of trampolines on this arch
-	}
-
-	if Linkmode == LinkExternal && SysArch.Family == sys.PPC64 {
-		return
-	}
-
-	for ri := range s.R {
-		r := &s.R[ri]
-		if !r.Type.IsDirectJump() {
-			continue
-		}
-		if Symaddr(r.Sym) == 0 && r.Sym.Type != obj.SDYNIMPORT {
-			if r.Sym.File != s.File {
-				if !isRuntimeDepPkg(s.File) || !isRuntimeDepPkg(r.Sym.File) {
-					Errorf(s, "unresolved inter-package jump to %s(%s)", r.Sym, r.Sym.File)
-				}
-				// runtime and its dependent packages may call to each other.
-				// they are fine, as they will be laid down together.
-			}
-			continue
-		}
-
-		Thearch.Trampoline(ctxt, r, s)
-	}
-
-}
-
-// resolve relocations in s.
-func relocsym(ctxt *Link, s *Symbol) {
-	var r *Reloc
-	var rs *Symbol
-	var i16 int16
-	var off int32
-	var siz int32
-	var fl int32
-	var o int64
-
-	for ri := int32(0); ri < int32(len(s.R)); ri++ {
-		r = &s.R[ri]
-
-		r.Done = 1
-		off = r.Off
-		siz = int32(r.Siz)
-		if off < 0 || off+siz > int32(len(s.P)) {
-			rname := ""
-			if r.Sym != nil {
-				rname = r.Sym.Name
-			}
-			Errorf(s, "invalid relocation %s: %d+%d not in [%d,%d)", rname, off, siz, 0, len(s.P))
-			continue
-		}
-
-		if r.Sym != nil && (r.Sym.Type&(obj.SMASK|obj.SHIDDEN) == 0 || r.Sym.Type&obj.SMASK == obj.SXREF) {
-			// When putting the runtime but not main into a shared library
-			// these symbols are undefined and that's OK.
-			if Buildmode == BuildmodeShared {
-				if r.Sym.Name == "main.main" || r.Sym.Name == "main.init" {
-					r.Sym.Type = obj.SDYNIMPORT
-				} else if strings.HasPrefix(r.Sym.Name, "go.info.") {
-					// Skip go.info symbols. They are only needed to communicate
-					// DWARF info between the compiler and linker.
-					continue
-				}
-			} else {
-				Errorf(s, "relocation target %s not defined", r.Sym.Name)
-				continue
-			}
-		}
-
-		if r.Type >= 256 {
-			continue
-		}
-		if r.Siz == 0 { // informational relocation - no work to do
-			continue
-		}
-
-		// We need to be able to reference dynimport symbols when linking against
-		// shared libraries, and Solaris needs it always
-		if Headtype != obj.Hsolaris && r.Sym != nil && r.Sym.Type == obj.SDYNIMPORT && !ctxt.DynlinkingGo() {
-			if !(SysArch.Family == sys.PPC64 && Linkmode == LinkExternal && r.Sym.Name == ".TOC.") {
-				Errorf(s, "unhandled relocation for %s (type %d rtype %d)", r.Sym.Name, r.Sym.Type, r.Type)
-			}
-		}
-		if r.Sym != nil && r.Sym.Type != obj.STLSBSS && r.Type != obj.R_WEAKADDROFF && !r.Sym.Attr.Reachable() {
-			Errorf(s, "unreachable sym in relocation: %s", r.Sym.Name)
-		}
-
-		// TODO(mundaym): remove this special case - see issue 14218.
-		if SysArch.Family == sys.S390X {
-			switch r.Type {
-			case obj.R_PCRELDBL:
-				r.Type = obj.R_PCREL
-				r.Variant = RV_390_DBL
-			case obj.R_CALL:
-				r.Variant = RV_390_DBL
-			}
-		}
-
-		switch r.Type {
-		default:
-			switch siz {
-			default:
-				Errorf(s, "bad reloc size %#x for %s", uint32(siz), r.Sym.Name)
-			case 1:
-				o = int64(s.P[off])
-			case 2:
-				o = int64(ctxt.Arch.ByteOrder.Uint16(s.P[off:]))
-			case 4:
-				o = int64(ctxt.Arch.ByteOrder.Uint32(s.P[off:]))
-			case 8:
-				o = int64(ctxt.Arch.ByteOrder.Uint64(s.P[off:]))
-			}
-			if Thearch.Archreloc(ctxt, r, s, &o) < 0 {
-				Errorf(s, "unknown reloc to %v: %v", r.Sym.Name, r.Type)
-			}
-
-		case obj.R_TLS_LE:
-			isAndroidX86 := obj.GOOS == "android" && (SysArch.InFamily(sys.AMD64, sys.I386))
-
-			if Linkmode == LinkExternal && Iself && Headtype != obj.Hopenbsd && !isAndroidX86 {
-				r.Done = 0
-				if r.Sym == nil {
-					r.Sym = ctxt.Tlsg
-				}
-				r.Xsym = r.Sym
-				r.Xadd = r.Add
-				o = 0
-				if SysArch.Family != sys.AMD64 {
-					o = r.Add
-				}
-				break
-			}
-
-			if Iself && SysArch.Family == sys.ARM {
-				// On ELF ARM, the thread pointer is 8 bytes before
-				// the start of the thread-local data block, so add 8
-				// to the actual TLS offset (r->sym->value).
-				// This 8 seems to be a fundamental constant of
-				// ELF on ARM (or maybe Glibc on ARM); it is not
-				// related to the fact that our own TLS storage happens
-				// to take up 8 bytes.
-				o = 8 + r.Sym.Value
-			} else if Iself || Headtype == obj.Hplan9 || Headtype == obj.Hdarwin || isAndroidX86 {
-				o = int64(ctxt.Tlsoffset) + r.Add
-			} else if Headtype == obj.Hwindows || Headtype == obj.Hwindowsgui {
-				o = r.Add
-			} else {
-				log.Fatalf("unexpected R_TLS_LE relocation for %v", Headtype)
-			}
-
-		case obj.R_TLS_IE:
-			isAndroidX86 := obj.GOOS == "android" && (SysArch.InFamily(sys.AMD64, sys.I386))
-
-			if Linkmode == LinkExternal && Iself && Headtype != obj.Hopenbsd && !isAndroidX86 {
-				r.Done = 0
-				if r.Sym == nil {
-					r.Sym = ctxt.Tlsg
-				}
-				r.Xsym = r.Sym
-				r.Xadd = r.Add
-				o = 0
-				if SysArch.Family != sys.AMD64 {
-					o = r.Add
-				}
-				break
-			}
-			if Buildmode == BuildmodePIE && Iself {
-				// We are linking the final executable, so we
-				// can optimize any TLS IE relocation to LE.
-				if Thearch.TLSIEtoLE == nil {
-					log.Fatalf("internal linking of TLS IE not supported on %v", SysArch.Family)
-				}
-				Thearch.TLSIEtoLE(s, int(off), int(r.Siz))
-				o = int64(ctxt.Tlsoffset)
-				// TODO: o += r.Add when SysArch.Family != sys.AMD64?
-				// Why do we treat r.Add differently on AMD64?
-				// Is the external linker using Xadd at all?
-			} else {
-				log.Fatalf("cannot handle R_TLS_IE (sym %s) when linking internally", s.Name)
-			}
-
-		case obj.R_ADDR:
-			if Linkmode == LinkExternal && r.Sym.Type != obj.SCONST {
-				r.Done = 0
-
-				// set up addend for eventual relocation via outer symbol.
-				rs = r.Sym
-
-				r.Xadd = r.Add
-				for rs.Outer != nil {
-					r.Xadd += Symaddr(rs) - Symaddr(rs.Outer)
-					rs = rs.Outer
-				}
-
-				if rs.Type != obj.SHOSTOBJ && rs.Type != obj.SDYNIMPORT && rs.Sect == nil {
-					Errorf(s, "missing section for relocation target %s", rs.Name)
-				}
-				r.Xsym = rs
-
-				o = r.Xadd
-				if Iself {
-					if SysArch.Family == sys.AMD64 {
-						o = 0
-					}
-				} else if Headtype == obj.Hdarwin {
-					// ld64 for arm64 has a bug where if the address pointed to by o exists in the
-					// symbol table (dynid >= 0), or is inside a symbol that exists in the symbol
-					// table, then it will add o twice into the relocated value.
-					// The workaround is that on arm64 don't ever add symaddr to o and always use
-					// extern relocation by requiring rs->dynid >= 0.
-					if rs.Type != obj.SHOSTOBJ {
-						if SysArch.Family == sys.ARM64 && rs.Dynid < 0 {
-							Errorf(s, "R_ADDR reloc to %s+%d is not supported on darwin/arm64", rs.Name, o)
-						}
-						if SysArch.Family != sys.ARM64 {
-							o += Symaddr(rs)
-						}
-					}
-				} else if Headtype == obj.Hwindows || Headtype == obj.Hwindowsgui {
-					// nothing to do
-				} else {
-					Errorf(s, "unhandled pcrel relocation to %s on %v", rs.Name, Headtype)
-				}
-
-				break
-			}
-
-			o = Symaddr(r.Sym) + r.Add
-
-			// On amd64, 4-byte offsets will be sign-extended, so it is impossible to
-			// access more than 2GB of static data; fail at link time is better than
-			// fail at runtime. See https://golang.org/issue/7980.
-			// Instead of special casing only amd64, we treat this as an error on all
-			// 64-bit architectures so as to be future-proof.
-			if int32(o) < 0 && SysArch.PtrSize > 4 && siz == 4 {
-				Errorf(s, "non-pc-relative relocation address for %s is too big: %#x (%#x + %#x)", r.Sym.Name, uint64(o), Symaddr(r.Sym), r.Add)
-				errorexit()
-			}
-
-		case obj.R_DWARFREF:
-			if r.Sym.Sect == nil {
-				Errorf(s, "missing DWARF section for relocation target %s", r.Sym.Name)
-			}
-			if Linkmode == LinkExternal {
-				r.Done = 0
-				r.Type = obj.R_ADDR
-
-				r.Xsym = ctxt.Syms.ROLookup(r.Sym.Sect.Name, 0)
-				r.Xadd = r.Add + Symaddr(r.Sym) - int64(r.Sym.Sect.Vaddr)
-				o = r.Xadd
-				rs = r.Xsym
-				if Iself && SysArch.Family == sys.AMD64 {
-					o = 0
-				}
-				break
-			}
-			o = Symaddr(r.Sym) + r.Add - int64(r.Sym.Sect.Vaddr)
-
-		case obj.R_WEAKADDROFF:
-			if !r.Sym.Attr.Reachable() {
-				continue
-			}
-			fallthrough
-		case obj.R_ADDROFF:
-			// The method offset tables using this relocation expect the offset to be relative
-			// to the start of the first text section, even if there are multiple.
-
-			if r.Sym.Sect.Name == ".text" {
-				o = Symaddr(r.Sym) - int64(Segtext.Sect.Vaddr) + r.Add
-			} else {
-				o = Symaddr(r.Sym) - int64(r.Sym.Sect.Vaddr) + r.Add
-			}
-
-			// r->sym can be null when CALL $(constant) is transformed from absolute PC to relative PC call.
-		case obj.R_GOTPCREL:
-			if ctxt.DynlinkingGo() && Headtype == obj.Hdarwin && r.Sym != nil && r.Sym.Type != obj.SCONST {
-				r.Done = 0
-				r.Xadd = r.Add
-				r.Xadd -= int64(r.Siz) // relative to address after the relocated chunk
-				r.Xsym = r.Sym
-
-				o = r.Xadd
-				o += int64(r.Siz)
-				break
-			}
-			fallthrough
-		case obj.R_CALL, obj.R_PCREL:
-			if Linkmode == LinkExternal && r.Sym != nil && r.Sym.Type != obj.SCONST && (r.Sym.Sect != s.Sect || r.Type == obj.R_GOTPCREL) {
-				r.Done = 0
-
-				// set up addend for eventual relocation via outer symbol.
-				rs = r.Sym
-
-				r.Xadd = r.Add
-				for rs.Outer != nil {
-					r.Xadd += Symaddr(rs) - Symaddr(rs.Outer)
-					rs = rs.Outer
-				}
-
-				r.Xadd -= int64(r.Siz) // relative to address after the relocated chunk
-				if rs.Type != obj.SHOSTOBJ && rs.Type != obj.SDYNIMPORT && rs.Sect == nil {
-					Errorf(s, "missing section for relocation target %s", rs.Name)
-				}
-				r.Xsym = rs
-
-				o = r.Xadd
-				if Iself {
-					if SysArch.Family == sys.AMD64 {
-						o = 0
-					}
-				} else if Headtype == obj.Hdarwin {
-					if r.Type == obj.R_CALL {
-						if rs.Type != obj.SHOSTOBJ {
-							o += int64(uint64(Symaddr(rs)) - rs.Sect.Vaddr)
-						}
-						o -= int64(r.Off) // relative to section offset, not symbol
-					} else if SysArch.Family == sys.ARM {
-						// see ../arm/asm.go:/machoreloc1
-						o += Symaddr(rs) - int64(s.Value) - int64(r.Off)
-					} else {
-						o += int64(r.Siz)
-					}
-				} else if (Headtype == obj.Hwindows || Headtype == obj.Hwindowsgui) && SysArch.Family == sys.AMD64 { // only amd64 needs PCREL
-					// PE/COFF's PC32 relocation uses the address after the relocated
-					// bytes as the base. Compensate by skewing the addend.
-					o += int64(r.Siz)
-					// GNU ld always add VirtualAddress of the .text section to the
-					// relocated address, compensate that.
-					o -= int64(s.Sect.Vaddr - PEBASE)
-				} else {
-					Errorf(s, "unhandled pcrel relocation to %s on %v", rs.Name, Headtype)
-				}
-
-				break
-			}
-
-			o = 0
-			if r.Sym != nil {
-				o += Symaddr(r.Sym)
-			}
-
-			o += r.Add - (s.Value + int64(r.Off) + int64(r.Siz))
-
-		case obj.R_SIZE:
-			o = r.Sym.Size + r.Add
-		}
-
-		if r.Variant != RV_NONE {
-			o = Thearch.Archrelocvariant(ctxt, r, s, o)
-		}
-
-		if false {
-			nam := "<nil>"
-			if r.Sym != nil {
-				nam = r.Sym.Name
-			}
-			fmt.Printf("relocate %s %#x (%#x+%#x, size %d) => %s %#x +%#x [type %d/%d, %x]\n", s.Name, s.Value+int64(off), s.Value, r.Off, r.Siz, nam, Symaddr(r.Sym), r.Add, r.Type, r.Variant, o)
-		}
-		switch siz {
-		default:
-			Errorf(s, "bad reloc size %#x for %s", uint32(siz), r.Sym.Name)
-			fallthrough
-
-			// TODO(rsc): Remove.
-		case 1:
-			s.P[off] = byte(int8(o))
-
-		case 2:
-			if o != int64(int16(o)) {
-				Errorf(s, "relocation address for %s is too big: %#x", r.Sym.Name, o)
-			}
-			i16 = int16(o)
-			ctxt.Arch.ByteOrder.PutUint16(s.P[off:], uint16(i16))
-
-		case 4:
-			if r.Type == obj.R_PCREL || r.Type == obj.R_CALL {
-				if o != int64(int32(o)) {
-					Errorf(s, "pc-relative relocation address for %s is too big: %#x", r.Sym.Name, o)
-				}
-			} else {
-				if o != int64(int32(o)) && o != int64(uint32(o)) {
-					Errorf(s, "non-pc-relative relocation address for %s is too big: %#x", r.Sym.Name, uint64(o))
-				}
-			}
-
-			fl = int32(o)
-			ctxt.Arch.ByteOrder.PutUint32(s.P[off:], uint32(fl))
-
-		case 8:
-			ctxt.Arch.ByteOrder.PutUint64(s.P[off:], uint64(o))
-		}
-	}
-}
-
-func (ctxt *Link) reloc() {
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f reloc\n", obj.Cputime())
-	}
-
-	for _, s := range ctxt.Textp {
-		relocsym(ctxt, s)
-	}
-	for _, sym := range datap {
-		relocsym(ctxt, sym)
-	}
-	for _, s := range dwarfp {
-		relocsym(ctxt, s)
-	}
-}
-
-func dynrelocsym(ctxt *Link, s *Symbol) {
-	if (Headtype == obj.Hwindows || Headtype == obj.Hwindowsgui) && Linkmode != LinkExternal {
-		rel := ctxt.Syms.Lookup(".rel", 0)
-		if s == rel {
-			return
-		}
-		for ri := 0; ri < len(s.R); ri++ {
-			r := &s.R[ri]
-			targ := r.Sym
-			if targ == nil {
-				continue
-			}
-			if !targ.Attr.Reachable() {
-				if r.Type == obj.R_WEAKADDROFF {
-					continue
-				}
-				Errorf(s, "dynamic relocation to unreachable symbol %s", targ.Name)
-			}
-			if r.Sym.Plt == -2 && r.Sym.Got != -2 { // make dynimport JMP table for PE object files.
-				targ.Plt = int32(rel.Size)
-				r.Sym = rel
-				r.Add = int64(targ.Plt)
-
-				// jmp *addr
-				if SysArch.Family == sys.I386 {
-					Adduint8(ctxt, rel, 0xff)
-					Adduint8(ctxt, rel, 0x25)
-					Addaddr(ctxt, rel, targ)
-					Adduint8(ctxt, rel, 0x90)
-					Adduint8(ctxt, rel, 0x90)
-				} else {
-					Adduint8(ctxt, rel, 0xff)
-					Adduint8(ctxt, rel, 0x24)
-					Adduint8(ctxt, rel, 0x25)
-					addaddrplus4(ctxt, rel, targ, 0)
-					Adduint8(ctxt, rel, 0x90)
-				}
-			} else if r.Sym.Plt >= 0 {
-				r.Sym = rel
-				r.Add = int64(targ.Plt)
-			}
-		}
-
-		return
-	}
-
-	for ri := 0; ri < len(s.R); ri++ {
-		r := &s.R[ri]
-		if Buildmode == BuildmodePIE && Linkmode == LinkInternal {
-			// It's expected that some relocations will be done
-			// later by relocsym (R_TLS_LE, R_ADDROFF), so
-			// don't worry if Adddynrel returns false.
-			Thearch.Adddynrel(ctxt, s, r)
-			continue
-		}
-		if r.Sym != nil && r.Sym.Type == obj.SDYNIMPORT || r.Type >= 256 {
-			if r.Sym != nil && !r.Sym.Attr.Reachable() {
-				Errorf(s, "dynamic relocation to unreachable symbol %s", r.Sym.Name)
-			}
-			if !Thearch.Adddynrel(ctxt, s, r) {
-				Errorf(s, "unsupported dynamic relocation for symbol %s (type=%d stype=%d)", r.Sym.Name, r.Type, r.Sym.Type)
-			}
-		}
-	}
-}
-
-func dynreloc(ctxt *Link, data *[obj.SXREF][]*Symbol) {
-	// -d suppresses dynamic loader format, so we may as well not
-	// compute these sections or mark their symbols as reachable.
-	if *FlagD && Headtype != obj.Hwindows && Headtype != obj.Hwindowsgui {
-		return
-	}
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f reloc\n", obj.Cputime())
-	}
-
-	for _, s := range ctxt.Textp {
-		dynrelocsym(ctxt, s)
-	}
-	for _, syms := range data {
-		for _, sym := range syms {
-			dynrelocsym(ctxt, sym)
-		}
-	}
-	if Iself {
-		elfdynhash(ctxt)
-	}
-}
-
-func Codeblk(ctxt *Link, addr int64, size int64) {
-	CodeblkPad(ctxt, addr, size, zeros[:])
-}
-func CodeblkPad(ctxt *Link, addr int64, size int64, pad []byte) {
-	if *flagA {
-		ctxt.Logf("codeblk [%#x,%#x) at offset %#x\n", addr, addr+size, coutbuf.Offset())
-	}
-
-	blk(ctxt, ctxt.Textp, addr, size, pad)
-
-	/* again for printing */
-	if !*flagA {
-		return
-	}
-
-	syms := ctxt.Textp
-	for i, sym := range syms {
-		if !sym.Attr.Reachable() {
-			continue
-		}
-		if sym.Value >= addr {
-			syms = syms[i:]
-			break
-		}
-	}
-
-	eaddr := addr + size
-	var q []byte
-	for _, sym := range syms {
-		if !sym.Attr.Reachable() {
-			continue
-		}
-		if sym.Value >= eaddr {
-			break
-		}
-
-		if addr < sym.Value {
-			ctxt.Logf("%-20s %.8x|", "_", uint64(addr))
-			for ; addr < sym.Value; addr++ {
-				ctxt.Logf(" %.2x", 0)
-			}
-			ctxt.Logf("\n")
-		}
-
-		ctxt.Logf("%.6x\t%-20s\n", uint64(addr), sym.Name)
-		q = sym.P
-
-		for len(q) >= 16 {
-			ctxt.Logf("%.6x\t% x\n", uint64(addr), q[:16])
-			addr += 16
-			q = q[16:]
-		}
-
-		if len(q) > 0 {
-			ctxt.Logf("%.6x\t% x\n", uint64(addr), q)
-			addr += int64(len(q))
-		}
-	}
-
-	if addr < eaddr {
-		ctxt.Logf("%-20s %.8x|", "_", uint64(addr))
-		for ; addr < eaddr; addr++ {
-			ctxt.Logf(" %.2x", 0)
-		}
-	}
-}
-
-func blk(ctxt *Link, syms []*Symbol, addr, size int64, pad []byte) {
-	for i, s := range syms {
-		if s.Type&obj.SSUB == 0 && s.Value >= addr {
-			syms = syms[i:]
-			break
-		}
-	}
-
-	eaddr := addr + size
-	for _, s := range syms {
-		if s.Type&obj.SSUB != 0 {
-			continue
-		}
-		if s.Value >= eaddr {
-			break
-		}
-		if s.Value < addr {
-			Errorf(s, "phase error: addr=%#x but sym=%#x type=%d", addr, s.Value, s.Type)
-			errorexit()
-		}
-		if addr < s.Value {
-			strnputPad("", int(s.Value-addr), pad)
-			addr = s.Value
-		}
-		Cwrite(s.P)
-		addr += int64(len(s.P))
-		if addr < s.Value+s.Size {
-			strnputPad("", int(s.Value+s.Size-addr), pad)
-			addr = s.Value + s.Size
-		}
-		if addr != s.Value+s.Size {
-			Errorf(s, "phase error: addr=%#x value+size=%#x", addr, s.Value+s.Size)
-			errorexit()
-		}
-		if s.Value+s.Size >= eaddr {
-			break
-		}
-	}
-
-	if addr < eaddr {
-		strnputPad("", int(eaddr-addr), pad)
-	}
-	Cflush()
-}
-
-func Datblk(ctxt *Link, addr int64, size int64) {
-	if *flagA {
-		ctxt.Logf("datblk [%#x,%#x) at offset %#x\n", addr, addr+size, coutbuf.Offset())
-	}
-
-	blk(ctxt, datap, addr, size, zeros[:])
-
-	/* again for printing */
-	if !*flagA {
-		return
-	}
-
-	syms := datap
-	for i, sym := range syms {
-		if sym.Value >= addr {
-			syms = syms[i:]
-			break
-		}
-	}
-
-	eaddr := addr + size
-	for _, sym := range syms {
-		if sym.Value >= eaddr {
-			break
-		}
-		if addr < sym.Value {
-			ctxt.Logf("\t%.8x| 00 ...\n", uint64(addr))
-			addr = sym.Value
-		}
-
-		ctxt.Logf("%s\n\t%.8x|", sym.Name, uint64(addr))
-		for i, b := range sym.P {
-			if i > 0 && i%16 == 0 {
-				ctxt.Logf("\n\t%.8x|", uint64(addr)+uint64(i))
-			}
-			ctxt.Logf(" %.2x", b)
-		}
-
-		addr += int64(len(sym.P))
-		for ; addr < sym.Value+sym.Size; addr++ {
-			ctxt.Logf(" %.2x", 0)
-		}
-		ctxt.Logf("\n")
-
-		if Linkmode != LinkExternal {
-			continue
-		}
-		for _, r := range sym.R {
-			rsname := ""
-			if r.Sym != nil {
-				rsname = r.Sym.Name
-			}
-			typ := "?"
-			switch r.Type {
-			case obj.R_ADDR:
-				typ = "addr"
-			case obj.R_PCREL:
-				typ = "pcrel"
-			case obj.R_CALL:
-				typ = "call"
-			}
-			ctxt.Logf("\treloc %.8x/%d %s %s+%#x [%#x]\n", uint(sym.Value+int64(r.Off)), r.Siz, typ, rsname, r.Add, r.Sym.Value+r.Add)
-		}
-	}
-
-	if addr < eaddr {
-		ctxt.Logf("\t%.8x| 00 ...\n", uint(addr))
-	}
-	ctxt.Logf("\t%.8x|\n", uint(eaddr))
-}
-
-func Dwarfblk(ctxt *Link, addr int64, size int64) {
-	if *flagA {
-		ctxt.Logf("dwarfblk [%#x,%#x) at offset %#x\n", addr, addr+size, coutbuf.Offset())
-	}
-
-	blk(ctxt, dwarfp, addr, size, zeros[:])
-}
-
-var zeros [512]byte
-
-// strnput writes the first n bytes of s.
-// If n is larger than len(s),
-// it is padded with NUL bytes.
-func strnput(s string, n int) {
-	strnputPad(s, n, zeros[:])
-}
-
-// strnput writes the first n bytes of s.
-// If n is larger than len(s),
-// it is padded with the bytes in pad (repeated as needed).
-func strnputPad(s string, n int, pad []byte) {
-	if len(s) >= n {
-		Cwritestring(s[:n])
-	} else {
-		Cwritestring(s)
-		n -= len(s)
-		for n > len(pad) {
-			Cwrite(pad)
-			n -= len(pad)
-
-		}
-		Cwrite(pad[:n])
-	}
-}
-
-var strdata []*Symbol
-
-func addstrdata1(ctxt *Link, arg string) {
-	eq := strings.Index(arg, "=")
-	dot := strings.LastIndex(arg[:eq+1], ".")
-	if eq < 0 || dot < 0 {
-		Exitf("-X flag requires argument of the form importpath.name=value")
-	}
-	addstrdata(ctxt, pathtoprefix(arg[:dot])+arg[dot:eq], arg[eq+1:])
-}
-
-func addstrdata(ctxt *Link, name string, value string) {
-	p := fmt.Sprintf("%s.str", name)
-	sp := ctxt.Syms.Lookup(p, 0)
-
-	Addstring(sp, value)
-	sp.Type = obj.SRODATA
-
-	s := ctxt.Syms.Lookup(name, 0)
-	s.Size = 0
-	s.Attr |= AttrDuplicateOK
-	reachable := s.Attr.Reachable()
-	Addaddr(ctxt, s, sp)
-	adduintxx(ctxt, s, uint64(len(value)), SysArch.PtrSize)
-
-	// addstring, addaddr, etc., mark the symbols as reachable.
-	// In this case that is not necessarily true, so stick to what
-	// we know before entering this function.
-	s.Attr.Set(AttrReachable, reachable)
-
-	strdata = append(strdata, s)
-
-	sp.Attr.Set(AttrReachable, reachable)
-}
-
-func (ctxt *Link) checkstrdata() {
-	for _, s := range strdata {
-		if s.Type == obj.STEXT {
-			Errorf(s, "cannot use -X with text symbol")
-		} else if s.Gotype != nil && s.Gotype.Name != "type.string" {
-			Errorf(s, "cannot use -X with non-string symbol")
-		}
-	}
-}
-
-func Addstring(s *Symbol, str string) int64 {
-	if s.Type == 0 {
-		s.Type = obj.SNOPTRDATA
-	}
-	s.Attr |= AttrReachable
-	r := s.Size
-	if s.Name == ".shstrtab" {
-		elfsetstring(s, str, int(r))
-	}
-	s.P = append(s.P, str...)
-	s.P = append(s.P, 0)
-	s.Size = int64(len(s.P))
-	return r
-}
-
-// addgostring adds str, as a Go string value, to s. symname is the name of the
-// symbol used to define the string data and must be unique per linked object.
-func addgostring(ctxt *Link, s *Symbol, symname, str string) {
-	sym := ctxt.Syms.Lookup(symname, 0)
-	if sym.Type != obj.Sxxx {
-		Errorf(s, "duplicate symname in addgostring: %s", symname)
-	}
-	sym.Attr |= AttrReachable
-	sym.Attr |= AttrLocal
-	sym.Type = obj.SRODATA
-	sym.Size = int64(len(str))
-	sym.P = []byte(str)
-	Addaddr(ctxt, s, sym)
-	adduint(ctxt, s, uint64(len(str)))
-}
-
-func addinitarrdata(ctxt *Link, s *Symbol) {
-	p := s.Name + ".ptr"
-	sp := ctxt.Syms.Lookup(p, 0)
-	sp.Type = obj.SINITARR
-	sp.Size = 0
-	sp.Attr |= AttrDuplicateOK
-	Addaddr(ctxt, sp, s)
-}
-
-func dosymtype(ctxt *Link) {
-	for _, s := range ctxt.Syms.Allsym {
-		if len(s.P) > 0 {
-			if s.Type == obj.SBSS {
-				s.Type = obj.SDATA
-			}
-			if s.Type == obj.SNOPTRBSS {
-				s.Type = obj.SNOPTRDATA
-			}
-		}
-		// Create a new entry in the .init_array section that points to the
-		// library initializer function.
-		switch Buildmode {
-		case BuildmodeCArchive, BuildmodeCShared:
-			if s.Name == *flagEntrySymbol {
-				addinitarrdata(ctxt, s)
-			}
-		}
-	}
-}
-
-// symalign returns the required alignment for the given symbol s.
-func symalign(s *Symbol) int32 {
-	min := int32(Thearch.Minalign)
-	if s.Align >= min {
-		return s.Align
-	} else if s.Align != 0 {
-		return min
-	}
-	if strings.HasPrefix(s.Name, "go.string.") || strings.HasPrefix(s.Name, "type..namedata.") {
-		// String data is just bytes.
-		// If we align it, we waste a lot of space to padding.
-		return min
-	}
-	align := int32(Thearch.Maxalign)
-	for int64(align) > s.Size && align > min {
-		align >>= 1
-	}
-	return align
-}
-
-func aligndatsize(datsize int64, s *Symbol) int64 {
-	return Rnd(datsize, int64(symalign(s)))
-}
-
-const debugGCProg = false
-
-type GCProg struct {
-	ctxt *Link
-	sym  *Symbol
-	w    gcprog.Writer
-}
-
-func (p *GCProg) Init(ctxt *Link, name string) {
-	p.ctxt = ctxt
-	p.sym = ctxt.Syms.Lookup(name, 0)
-	p.w.Init(p.writeByte(ctxt))
-	if debugGCProg {
-		fmt.Fprintf(os.Stderr, "ld: start GCProg %s\n", name)
-		p.w.Debug(os.Stderr)
-	}
-}
-
-func (p *GCProg) writeByte(ctxt *Link) func(x byte) {
-	return func(x byte) {
-		Adduint8(ctxt, p.sym, x)
-	}
-}
-
-func (p *GCProg) End(size int64) {
-	p.w.ZeroUntil(size / int64(SysArch.PtrSize))
-	p.w.End()
-	if debugGCProg {
-		fmt.Fprintf(os.Stderr, "ld: end GCProg\n")
-	}
-}
-
-func (p *GCProg) AddSym(s *Symbol) {
-	typ := s.Gotype
-	// Things without pointers should be in SNOPTRDATA or SNOPTRBSS;
-	// everything we see should have pointers and should therefore have a type.
-	if typ == nil {
-		switch s.Name {
-		case "runtime.data", "runtime.edata", "runtime.bss", "runtime.ebss":
-			// Ignore special symbols that are sometimes laid out
-			// as real symbols. See comment about dyld on darwin in
-			// the address function.
-			return
-		}
-		Errorf(s, "missing Go type information for global symbol: size %d", s.Size)
-		return
-	}
-
-	ptrsize := int64(SysArch.PtrSize)
-	nptr := decodetypePtrdata(p.ctxt.Arch, typ) / ptrsize
-
-	if debugGCProg {
-		fmt.Fprintf(os.Stderr, "gcprog sym: %s at %d (ptr=%d+%d)\n", s.Name, s.Value, s.Value/ptrsize, nptr)
-	}
-
-	if decodetypeUsegcprog(typ) == 0 {
-		// Copy pointers from mask into program.
-		mask := decodetypeGcmask(p.ctxt, typ)
-		for i := int64(0); i < nptr; i++ {
-			if (mask[i/8]>>uint(i%8))&1 != 0 {
-				p.w.Ptr(s.Value/ptrsize + i)
-			}
-		}
-		return
-	}
-
-	// Copy program.
-	prog := decodetypeGcprog(p.ctxt, typ)
-	p.w.ZeroUntil(s.Value / ptrsize)
-	p.w.Append(prog[4:], nptr)
-}
-
-// dataSortKey is used to sort a slice of data symbol *Symbol pointers.
-// The sort keys are kept inline to improve cache behavior while sorting.
-type dataSortKey struct {
-	size int64
-	name string
-	sym  *Symbol
-}
-
-type bySizeAndName []dataSortKey
-
-func (d bySizeAndName) Len() int      { return len(d) }
-func (d bySizeAndName) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
-func (d bySizeAndName) Less(i, j int) bool {
-	s1, s2 := d[i], d[j]
-	if s1.size != s2.size {
-		return s1.size < s2.size
-	}
-	return s1.name < s2.name
-}
-
-const cutoff int64 = 2e9 // 2 GB (or so; looks better in errors than 2^31)
-
-func checkdatsize(ctxt *Link, datsize int64, symn obj.SymKind) {
-	if datsize > cutoff {
-		Errorf(nil, "too much data in section %v (over %d bytes)", symn, cutoff)
-	}
-}
-
-// datap is a collection of reachable data symbols in address order.
-// Generated by dodata.
-var datap []*Symbol
-
-func (ctxt *Link) dodata() {
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f dodata\n", obj.Cputime())
-	}
-
-	if ctxt.DynlinkingGo() && Headtype == obj.Hdarwin {
-		// The values in moduledata are filled out by relocations
-		// pointing to the addresses of these special symbols.
-		// Typically these symbols have no size and are not laid
-		// out with their matching section.
-		//
-		// However on darwin, dyld will find the special symbol
-		// in the first loaded module, even though it is local.
-		//
-		// (An hypothesis, formed without looking in the dyld sources:
-		// these special symbols have no size, so their address
-		// matches a real symbol. The dynamic linker assumes we
-		// want the normal symbol with the same address and finds
-		// it in the other module.)
-		//
-		// To work around this we lay out the symbls whose
-		// addresses are vital for multi-module programs to work
-		// as normal symbols, and give them a little size.
-		bss := ctxt.Syms.Lookup("runtime.bss", 0)
-		bss.Size = 8
-		bss.Attr.Set(AttrSpecial, false)
-
-		ctxt.Syms.Lookup("runtime.ebss", 0).Attr.Set(AttrSpecial, false)
-
-		data := ctxt.Syms.Lookup("runtime.data", 0)
-		data.Size = 8
-		data.Attr.Set(AttrSpecial, false)
-
-		ctxt.Syms.Lookup("runtime.edata", 0).Attr.Set(AttrSpecial, false)
-
-		types := ctxt.Syms.Lookup("runtime.types", 0)
-		types.Type = obj.STYPE
-		types.Size = 8
-		types.Attr.Set(AttrSpecial, false)
-
-		etypes := ctxt.Syms.Lookup("runtime.etypes", 0)
-		etypes.Type = obj.SFUNCTAB
-		etypes.Attr.Set(AttrSpecial, false)
-	}
-
-	// Collect data symbols by type into data.
-	var data [obj.SXREF][]*Symbol
-	for _, s := range ctxt.Syms.Allsym {
-		if !s.Attr.Reachable() || s.Attr.Special() {
-			continue
-		}
-		if s.Type <= obj.STEXT || s.Type >= obj.SXREF {
-			continue
-		}
-		data[s.Type] = append(data[s.Type], s)
-	}
-
-	// Now that we have the data symbols, but before we start
-	// to assign addresses, record all the necessary
-	// dynamic relocations. These will grow the relocation
-	// symbol, which is itself data.
-	//
-	// On darwin, we need the symbol table numbers for dynreloc.
-	if Headtype == obj.Hdarwin {
-		machosymorder(ctxt)
-	}
-	dynreloc(ctxt, &data)
-
-	if UseRelro() {
-		// "read only" data with relocations needs to go in its own section
-		// when building a shared library. We do this by boosting objects of
-		// type SXXX with relocations to type SXXXRELRO.
-		for _, symnro := range obj.ReadOnly {
-			symnrelro := obj.RelROMap[symnro]
-
-			ro := []*Symbol{}
-			relro := data[symnrelro]
-
-			for _, s := range data[symnro] {
-				isRelro := len(s.R) > 0
-				switch s.Type {
-				case obj.STYPE, obj.STYPERELRO, obj.SGOFUNCRELRO:
-					// Symbols are not sorted yet, so it is possible
-					// that an Outer symbol has been changed to a
-					// relro Type before it reaches here.
-					isRelro = true
-				}
-				if isRelro {
-					s.Type = symnrelro
-					if s.Outer != nil {
-						s.Outer.Type = s.Type
-					}
-					relro = append(relro, s)
-				} else {
-					ro = append(ro, s)
-				}
-			}
-
-			// Check that we haven't made two symbols with the same .Outer into
-			// different types (because references two symbols with non-nil Outer
-			// become references to the outer symbol + offset it's vital that the
-			// symbol and the outer end up in the same section).
-			for _, s := range relro {
-				if s.Outer != nil && s.Outer.Type != s.Type {
-					Errorf(s, "inconsistent types for symbol and its Outer %s (%v != %v)",
-						s.Outer.Name, s.Type, s.Outer.Type)
-				}
-			}
-
-			data[symnro] = ro
-			data[symnrelro] = relro
-		}
-	}
-
-	// Sort symbols.
-	var dataMaxAlign [obj.SXREF]int32
-	var wg sync.WaitGroup
-	for symn := range data {
-		symn := obj.SymKind(symn)
-		wg.Add(1)
-		go func() {
-			data[symn], dataMaxAlign[symn] = dodataSect(ctxt, symn, data[symn])
-			wg.Done()
-		}()
-	}
-	wg.Wait()
-
-	// Allocate sections.
-	// Data is processed before segtext, because we need
-	// to see all symbols in the .data and .bss sections in order
-	// to generate garbage collection information.
-	datsize := int64(0)
-
-	// Writable data sections that do not need any specialized handling.
-	writable := []obj.SymKind{
-		obj.SELFSECT,
-		obj.SMACHO,
-		obj.SMACHOGOT,
-		obj.SWINDOWS,
-	}
-	for _, symn := range writable {
-		for _, s := range data[symn] {
-			sect := addsection(&Segdata, s.Name, 06)
-			sect.Align = symalign(s)
-			datsize = Rnd(datsize, int64(sect.Align))
-			sect.Vaddr = uint64(datsize)
-			s.Sect = sect
-			s.Type = obj.SDATA
-			s.Value = int64(uint64(datsize) - sect.Vaddr)
-			datsize += s.Size
-			sect.Length = uint64(datsize) - sect.Vaddr
-		}
-		checkdatsize(ctxt, datsize, symn)
-	}
-
-	// .got (and .toc on ppc64)
-	if len(data[obj.SELFGOT]) > 0 {
-		sect := addsection(&Segdata, ".got", 06)
-		sect.Align = dataMaxAlign[obj.SELFGOT]
-		datsize = Rnd(datsize, int64(sect.Align))
-		sect.Vaddr = uint64(datsize)
-		var toc *Symbol
-		for _, s := range data[obj.SELFGOT] {
-			datsize = aligndatsize(datsize, s)
-			s.Sect = sect
-			s.Type = obj.SDATA
-			s.Value = int64(uint64(datsize) - sect.Vaddr)
-
-			// Resolve .TOC. symbol for this object file (ppc64)
-			toc = ctxt.Syms.ROLookup(".TOC.", int(s.Version))
-			if toc != nil {
-				toc.Sect = sect
-				toc.Outer = s
-				toc.Sub = s.Sub
-				s.Sub = toc
-
-				toc.Value = 0x8000
-			}
-
-			datsize += s.Size
-		}
-		checkdatsize(ctxt, datsize, obj.SELFGOT)
-		sect.Length = uint64(datsize) - sect.Vaddr
-	}
-
-	/* pointer-free data */
-	sect := addsection(&Segdata, ".noptrdata", 06)
-	sect.Align = dataMaxAlign[obj.SNOPTRDATA]
-	datsize = Rnd(datsize, int64(sect.Align))
-	sect.Vaddr = uint64(datsize)
-	ctxt.Syms.Lookup("runtime.noptrdata", 0).Sect = sect
-	ctxt.Syms.Lookup("runtime.enoptrdata", 0).Sect = sect
-	for _, s := range data[obj.SNOPTRDATA] {
-		datsize = aligndatsize(datsize, s)
-		s.Sect = sect
-		s.Type = obj.SDATA
-		s.Value = int64(uint64(datsize) - sect.Vaddr)
-		datsize += s.Size
-	}
-	checkdatsize(ctxt, datsize, obj.SNOPTRDATA)
-	sect.Length = uint64(datsize) - sect.Vaddr
-
-	hasinitarr := *FlagLinkshared
-
-	/* shared library initializer */
-	switch Buildmode {
-	case BuildmodeCArchive, BuildmodeCShared, BuildmodeShared, BuildmodePlugin:
-		hasinitarr = true
-	}
-	if hasinitarr {
-		sect := addsection(&Segdata, ".init_array", 06)
-		sect.Align = dataMaxAlign[obj.SINITARR]
-		datsize = Rnd(datsize, int64(sect.Align))
-		sect.Vaddr = uint64(datsize)
-		for _, s := range data[obj.SINITARR] {
-			datsize = aligndatsize(datsize, s)
-			s.Sect = sect
-			s.Value = int64(uint64(datsize) - sect.Vaddr)
-			datsize += s.Size
-		}
-		sect.Length = uint64(datsize) - sect.Vaddr
-		checkdatsize(ctxt, datsize, obj.SINITARR)
-	}
-
-	/* data */
-	sect = addsection(&Segdata, ".data", 06)
-	sect.Align = dataMaxAlign[obj.SDATA]
-	datsize = Rnd(datsize, int64(sect.Align))
-	sect.Vaddr = uint64(datsize)
-	ctxt.Syms.Lookup("runtime.data", 0).Sect = sect
-	ctxt.Syms.Lookup("runtime.edata", 0).Sect = sect
-	var gc GCProg
-	gc.Init(ctxt, "runtime.gcdata")
-	for _, s := range data[obj.SDATA] {
-		s.Sect = sect
-		s.Type = obj.SDATA
-		datsize = aligndatsize(datsize, s)
-		s.Value = int64(uint64(datsize) - sect.Vaddr)
-		gc.AddSym(s)
-		datsize += s.Size
-	}
-	checkdatsize(ctxt, datsize, obj.SDATA)
-	sect.Length = uint64(datsize) - sect.Vaddr
-	gc.End(int64(sect.Length))
-
-	/* bss */
-	sect = addsection(&Segdata, ".bss", 06)
-	sect.Align = dataMaxAlign[obj.SBSS]
-	datsize = Rnd(datsize, int64(sect.Align))
-	sect.Vaddr = uint64(datsize)
-	ctxt.Syms.Lookup("runtime.bss", 0).Sect = sect
-	ctxt.Syms.Lookup("runtime.ebss", 0).Sect = sect
-	gc = GCProg{}
-	gc.Init(ctxt, "runtime.gcbss")
-	for _, s := range data[obj.SBSS] {
-		s.Sect = sect
-		datsize = aligndatsize(datsize, s)
-		s.Value = int64(uint64(datsize) - sect.Vaddr)
-		gc.AddSym(s)
-		datsize += s.Size
-	}
-	checkdatsize(ctxt, datsize, obj.SBSS)
-	sect.Length = uint64(datsize) - sect.Vaddr
-	gc.End(int64(sect.Length))
-
-	/* pointer-free bss */
-	sect = addsection(&Segdata, ".noptrbss", 06)
-	sect.Align = dataMaxAlign[obj.SNOPTRBSS]
-	datsize = Rnd(datsize, int64(sect.Align))
-	sect.Vaddr = uint64(datsize)
-	ctxt.Syms.Lookup("runtime.noptrbss", 0).Sect = sect
-	ctxt.Syms.Lookup("runtime.enoptrbss", 0).Sect = sect
-	for _, s := range data[obj.SNOPTRBSS] {
-		datsize = aligndatsize(datsize, s)
-		s.Sect = sect
-		s.Value = int64(uint64(datsize) - sect.Vaddr)
-		datsize += s.Size
-	}
-
-	sect.Length = uint64(datsize) - sect.Vaddr
-	ctxt.Syms.Lookup("runtime.end", 0).Sect = sect
-	checkdatsize(ctxt, datsize, obj.SNOPTRBSS)
-
-	if len(data[obj.STLSBSS]) > 0 {
-		var sect *Section
-		if Iself && (Linkmode == LinkExternal || !*FlagD) && Headtype != obj.Hopenbsd {
-			sect = addsection(&Segdata, ".tbss", 06)
-			sect.Align = int32(SysArch.PtrSize)
-			sect.Vaddr = 0
-		}
-		datsize = 0
-
-		for _, s := range data[obj.STLSBSS] {
-			datsize = aligndatsize(datsize, s)
-			s.Sect = sect
-			s.Value = datsize
-			datsize += s.Size
-		}
-		checkdatsize(ctxt, datsize, obj.STLSBSS)
-
-		if sect != nil {
-			sect.Length = uint64(datsize)
-		}
-	}
-
-	/*
-	 * We finished data, begin read-only data.
-	 * Not all systems support a separate read-only non-executable data section.
-	 * ELF systems do.
-	 * OS X and Plan 9 do not.
-	 * Windows PE may, but if so we have not implemented it.
-	 * And if we're using external linking mode, the point is moot,
-	 * since it's not our decision; that code expects the sections in
-	 * segtext.
-	 */
-	var segro *Segment
-	if Iself && Linkmode == LinkInternal {
-		segro = &Segrodata
-	} else {
-		segro = &Segtext
-	}
-
-	datsize = 0
-
-	/* read-only executable ELF, Mach-O sections */
-	if len(data[obj.STEXT]) != 0 {
-		Errorf(nil, "dodata found an STEXT symbol: %s", data[obj.STEXT][0].Name)
-	}
-	for _, s := range data[obj.SELFRXSECT] {
-		sect := addsection(&Segtext, s.Name, 04)
-		sect.Align = symalign(s)
-		datsize = Rnd(datsize, int64(sect.Align))
-		sect.Vaddr = uint64(datsize)
-		s.Sect = sect
-		s.Type = obj.SRODATA
-		s.Value = int64(uint64(datsize) - sect.Vaddr)
-		datsize += s.Size
-		sect.Length = uint64(datsize) - sect.Vaddr
-		checkdatsize(ctxt, datsize, obj.SELFRXSECT)
-	}
-
-	/* read-only data */
-	sect = addsection(segro, ".rodata", 04)
-
-	sect.Vaddr = 0
-	ctxt.Syms.Lookup("runtime.rodata", 0).Sect = sect
-	ctxt.Syms.Lookup("runtime.erodata", 0).Sect = sect
-	if !UseRelro() {
-		ctxt.Syms.Lookup("runtime.types", 0).Sect = sect
-		ctxt.Syms.Lookup("runtime.etypes", 0).Sect = sect
-	}
-	for _, symn := range obj.ReadOnly {
-		align := dataMaxAlign[symn]
-		if sect.Align < align {
-			sect.Align = align
-		}
-	}
-	datsize = Rnd(datsize, int64(sect.Align))
-	for _, symn := range obj.ReadOnly {
-		for _, s := range data[symn] {
-			datsize = aligndatsize(datsize, s)
-			s.Sect = sect
-			s.Type = obj.SRODATA
-			s.Value = int64(uint64(datsize) - sect.Vaddr)
-			datsize += s.Size
-		}
-		checkdatsize(ctxt, datsize, symn)
-	}
-	sect.Length = uint64(datsize) - sect.Vaddr
-
-	/* read-only ELF, Mach-O sections */
-	for _, s := range data[obj.SELFROSECT] {
-		sect = addsection(segro, s.Name, 04)
-		sect.Align = symalign(s)
-		datsize = Rnd(datsize, int64(sect.Align))
-		sect.Vaddr = uint64(datsize)
-		s.Sect = sect
-		s.Type = obj.SRODATA
-		s.Value = int64(uint64(datsize) - sect.Vaddr)
-		datsize += s.Size
-		sect.Length = uint64(datsize) - sect.Vaddr
-	}
-	checkdatsize(ctxt, datsize, obj.SELFROSECT)
-
-	for _, s := range data[obj.SMACHOPLT] {
-		sect = addsection(segro, s.Name, 04)
-		sect.Align = symalign(s)
-		datsize = Rnd(datsize, int64(sect.Align))
-		sect.Vaddr = uint64(datsize)
-		s.Sect = sect
-		s.Type = obj.SRODATA
-		s.Value = int64(uint64(datsize) - sect.Vaddr)
-		datsize += s.Size
-		sect.Length = uint64(datsize) - sect.Vaddr
-	}
-	checkdatsize(ctxt, datsize, obj.SMACHOPLT)
-
-	// There is some data that are conceptually read-only but are written to by
-	// relocations. On GNU systems, we can arrange for the dynamic linker to
-	// mprotect sections after relocations are applied by giving them write
-	// permissions in the object file and calling them ".data.rel.ro.FOO". We
-	// divide the .rodata section between actual .rodata and .data.rel.ro.rodata,
-	// but for the other sections that this applies to, we just write a read-only
-	// .FOO section or a read-write .data.rel.ro.FOO section depending on the
-	// situation.
-	// TODO(mwhudson): It would make sense to do this more widely, but it makes
-	// the system linker segfault on darwin.
-	addrelrosection := func(suffix string) *Section {
-		return addsection(segro, suffix, 04)
-	}
-
-	if UseRelro() {
-		addrelrosection = func(suffix string) *Section {
-			seg := &Segrelrodata
-			if Linkmode == LinkExternal {
-				// Using a separate segment with an external
-				// linker results in some programs moving
-				// their data sections unexpectedly, which
-				// corrupts the moduledata. So we use the
-				// rodata segment and let the external linker
-				// sort out a rel.ro segment.
-				seg = &Segrodata
-			}
-			return addsection(seg, ".data.rel.ro"+suffix, 06)
-		}
-		/* data only written by relocations */
-		sect = addrelrosection("")
-
-		sect.Vaddr = 0
-		ctxt.Syms.Lookup("runtime.types", 0).Sect = sect
-		ctxt.Syms.Lookup("runtime.etypes", 0).Sect = sect
-		for _, symnro := range obj.ReadOnly {
-			symn := obj.RelROMap[symnro]
-			align := dataMaxAlign[symn]
-			if sect.Align < align {
-				sect.Align = align
-			}
-		}
-		datsize = Rnd(datsize, int64(sect.Align))
-		for _, symnro := range obj.ReadOnly {
-			symn := obj.RelROMap[symnro]
-			for _, s := range data[symn] {
-				datsize = aligndatsize(datsize, s)
-				if s.Outer != nil && s.Outer.Sect != nil && s.Outer.Sect != sect {
-					Errorf(s, "s.Outer (%s) in different section from s, %s != %s", s.Outer.Name, s.Outer.Sect.Name, sect.Name)
-				}
-				s.Sect = sect
-				s.Type = obj.SRODATA
-				s.Value = int64(uint64(datsize) - sect.Vaddr)
-				datsize += s.Size
-			}
-			checkdatsize(ctxt, datsize, symn)
-		}
-
-		sect.Length = uint64(datsize) - sect.Vaddr
-	}
-
-	/* typelink */
-	sect = addrelrosection(".typelink")
-	sect.Align = dataMaxAlign[obj.STYPELINK]
-	datsize = Rnd(datsize, int64(sect.Align))
-	sect.Vaddr = uint64(datsize)
-	typelink := ctxt.Syms.Lookup("runtime.typelink", 0)
-	typelink.Sect = sect
-	typelink.Type = obj.RODATA
-	datsize += typelink.Size
-	checkdatsize(ctxt, datsize, obj.STYPELINK)
-	sect.Length = uint64(datsize) - sect.Vaddr
-
-	/* itablink */
-	sect = addrelrosection(".itablink")
-	sect.Align = dataMaxAlign[obj.SITABLINK]
-	datsize = Rnd(datsize, int64(sect.Align))
-	sect.Vaddr = uint64(datsize)
-	ctxt.Syms.Lookup("runtime.itablink", 0).Sect = sect
-	ctxt.Syms.Lookup("runtime.eitablink", 0).Sect = sect
-	for _, s := range data[obj.SITABLINK] {
-		datsize = aligndatsize(datsize, s)
-		s.Sect = sect
-		s.Type = obj.SRODATA
-		s.Value = int64(uint64(datsize) - sect.Vaddr)
-		datsize += s.Size
-	}
-	checkdatsize(ctxt, datsize, obj.SITABLINK)
-	sect.Length = uint64(datsize) - sect.Vaddr
-
-	/* gosymtab */
-	sect = addrelrosection(".gosymtab")
-	sect.Align = dataMaxAlign[obj.SSYMTAB]
-	datsize = Rnd(datsize, int64(sect.Align))
-	sect.Vaddr = uint64(datsize)
-	ctxt.Syms.Lookup("runtime.symtab", 0).Sect = sect
-	ctxt.Syms.Lookup("runtime.esymtab", 0).Sect = sect
-	for _, s := range data[obj.SSYMTAB] {
-		datsize = aligndatsize(datsize, s)
-		s.Sect = sect
-		s.Type = obj.SRODATA
-		s.Value = int64(uint64(datsize) - sect.Vaddr)
-		datsize += s.Size
-	}
-	checkdatsize(ctxt, datsize, obj.SSYMTAB)
-	sect.Length = uint64(datsize) - sect.Vaddr
-
-	/* gopclntab */
-	sect = addrelrosection(".gopclntab")
-	sect.Align = dataMaxAlign[obj.SPCLNTAB]
-	datsize = Rnd(datsize, int64(sect.Align))
-	sect.Vaddr = uint64(datsize)
-	ctxt.Syms.Lookup("runtime.pclntab", 0).Sect = sect
-	ctxt.Syms.Lookup("runtime.epclntab", 0).Sect = sect
-	for _, s := range data[obj.SPCLNTAB] {
-		datsize = aligndatsize(datsize, s)
-		s.Sect = sect
-		s.Type = obj.SRODATA
-		s.Value = int64(uint64(datsize) - sect.Vaddr)
-		datsize += s.Size
-	}
-	checkdatsize(ctxt, datsize, obj.SRODATA)
-	sect.Length = uint64(datsize) - sect.Vaddr
-
-	// 6g uses 4-byte relocation offsets, so the entire segment must fit in 32 bits.
-	if datsize != int64(uint32(datsize)) {
-		Errorf(nil, "read-only data segment too large: %d", datsize)
-	}
-
-	for symn := obj.SELFRXSECT; symn < obj.SXREF; symn++ {
-		datap = append(datap, data[symn]...)
-	}
-
-	dwarfgeneratedebugsyms(ctxt)
-
-	var s *Symbol
-	var i int
-	for i, s = range dwarfp {
-		if s.Type != obj.SDWARFSECT {
-			break
-		}
-		sect = addsection(&Segdwarf, s.Name, 04)
-		sect.Align = 1
-		datsize = Rnd(datsize, int64(sect.Align))
-		sect.Vaddr = uint64(datsize)
-		s.Sect = sect
-		s.Type = obj.SRODATA
-		s.Value = int64(uint64(datsize) - sect.Vaddr)
-		datsize += s.Size
-		sect.Length = uint64(datsize) - sect.Vaddr
-	}
-	checkdatsize(ctxt, datsize, obj.SDWARFSECT)
-
-	if i < len(dwarfp) {
-		sect = addsection(&Segdwarf, ".debug_info", 04)
-		sect.Align = 1
-		datsize = Rnd(datsize, int64(sect.Align))
-		sect.Vaddr = uint64(datsize)
-		for _, s := range dwarfp[i:] {
-			if s.Type != obj.SDWARFINFO {
-				break
-			}
-			s.Sect = sect
-			s.Type = obj.SRODATA
-			s.Value = int64(uint64(datsize) - sect.Vaddr)
-			s.Attr |= AttrLocal
-			datsize += s.Size
-		}
-		sect.Length = uint64(datsize) - sect.Vaddr
-		checkdatsize(ctxt, datsize, obj.SDWARFINFO)
-	}
-
-	/* number the sections */
-	n := int32(1)
-
-	for sect := Segtext.Sect; sect != nil; sect = sect.Next {
-		sect.Extnum = int16(n)
-		n++
-	}
-	for sect := Segrodata.Sect; sect != nil; sect = sect.Next {
-		sect.Extnum = int16(n)
-		n++
-	}
-	for sect := Segrelrodata.Sect; sect != nil; sect = sect.Next {
-		sect.Extnum = int16(n)
-		n++
-	}
-	for sect := Segdata.Sect; sect != nil; sect = sect.Next {
-		sect.Extnum = int16(n)
-		n++
-	}
-	for sect := Segdwarf.Sect; sect != nil; sect = sect.Next {
-		sect.Extnum = int16(n)
-		n++
-	}
-}
-
-func dodataSect(ctxt *Link, symn obj.SymKind, syms []*Symbol) (result []*Symbol, maxAlign int32) {
-	if Headtype == obj.Hdarwin {
-		// Some symbols may no longer belong in syms
-		// due to movement in machosymorder.
-		newSyms := make([]*Symbol, 0, len(syms))
-		for _, s := range syms {
-			if s.Type == symn {
-				newSyms = append(newSyms, s)
-			}
-		}
-		syms = newSyms
-	}
-
-	var head, tail *Symbol
-	symsSort := make([]dataSortKey, 0, len(syms))
-	for _, s := range syms {
-		if s.Attr.OnList() {
-			log.Fatalf("symbol %s listed multiple times", s.Name)
-		}
-		s.Attr |= AttrOnList
-		switch {
-		case s.Size < int64(len(s.P)):
-			Errorf(s, "initialize bounds (%d < %d)", s.Size, len(s.P))
-		case s.Size < 0:
-			Errorf(s, "negative size (%d bytes)", s.Size)
-		case s.Size > cutoff:
-			Errorf(s, "symbol too large (%d bytes)", s.Size)
-		}
-
-		// If the usually-special section-marker symbols are being laid
-		// out as regular symbols, put them either at the beginning or
-		// end of their section.
-		if ctxt.DynlinkingGo() && Headtype == obj.Hdarwin {
-			switch s.Name {
-			case "runtime.text", "runtime.bss", "runtime.data", "runtime.types":
-				head = s
-				continue
-			case "runtime.etext", "runtime.ebss", "runtime.edata", "runtime.etypes":
-				tail = s
-				continue
-			}
-		}
-
-		key := dataSortKey{
-			size: s.Size,
-			name: s.Name,
-			sym:  s,
-		}
-
-		switch s.Type {
-		case obj.SELFGOT:
-			// For ppc64, we want to interleave the .got and .toc sections
-			// from input files. Both are type SELFGOT, so in that case
-			// we skip size comparison and fall through to the name
-			// comparison (conveniently, .got sorts before .toc).
-			key.size = 0
-		}
-
-		symsSort = append(symsSort, key)
-	}
-
-	sort.Sort(bySizeAndName(symsSort))
-
-	off := 0
-	if head != nil {
-		syms[0] = head
-		off++
-	}
-	for i, symSort := range symsSort {
-		syms[i+off] = symSort.sym
-		align := symalign(symSort.sym)
-		if maxAlign < align {
-			maxAlign = align
-		}
-	}
-	if tail != nil {
-		syms[len(syms)-1] = tail
-	}
-
-	if Iself && symn == obj.SELFROSECT {
-		// Make .rela and .rela.plt contiguous, the ELF ABI requires this
-		// and Solaris actually cares.
-		reli, plti := -1, -1
-		for i, s := range syms {
-			switch s.Name {
-			case ".rel.plt", ".rela.plt":
-				plti = i
-			case ".rel", ".rela":
-				reli = i
-			}
-		}
-		if reli >= 0 && plti >= 0 && plti != reli+1 {
-			var first, second int
-			if plti > reli {
-				first, second = reli, plti
-			} else {
-				first, second = plti, reli
-			}
-			rel, plt := syms[reli], syms[plti]
-			copy(syms[first+2:], syms[first+1:second])
-			syms[first+0] = rel
-			syms[first+1] = plt
-
-			// Make sure alignment doesn't introduce a gap.
-			// Setting the alignment explicitly prevents
-			// symalign from basing it on the size and
-			// getting it wrong.
-			rel.Align = int32(SysArch.RegSize)
-			plt.Align = int32(SysArch.RegSize)
-		}
-	}
-
-	return syms, maxAlign
-}
-
-// Add buildid to beginning of text segment, on non-ELF systems.
-// Non-ELF binary formats are not always flexible enough to
-// give us a place to put the Go build ID. On those systems, we put it
-// at the very beginning of the text segment.
-// This ``header'' is read by cmd/go.
-func (ctxt *Link) textbuildid() {
-	if Iself || Buildmode == BuildmodePlugin || *flagBuildid == "" {
-		return
-	}
-
-	sym := ctxt.Syms.Lookup("go.buildid", 0)
-	sym.Attr |= AttrReachable
-	// The \xff is invalid UTF-8, meant to make it less likely
-	// to find one of these accidentally.
-	data := "\xff Go build ID: " + strconv.Quote(*flagBuildid) + "\n \xff"
-	sym.Type = obj.STEXT
-	sym.P = []byte(data)
-	sym.Size = int64(len(sym.P))
-
-	ctxt.Textp = append(ctxt.Textp, nil)
-	copy(ctxt.Textp[1:], ctxt.Textp)
-	ctxt.Textp[0] = sym
-}
-
-// assign addresses to text
-func (ctxt *Link) textaddress() {
-	addsection(&Segtext, ".text", 05)
-
-	// Assign PCs in text segment.
-	// Could parallelize, by assigning to text
-	// and then letting threads copy down, but probably not worth it.
-	sect := Segtext.Sect
-
-	sect.Align = int32(Funcalign)
-
-	text := ctxt.Syms.Lookup("runtime.text", 0)
-	text.Sect = sect
-
-	if ctxt.DynlinkingGo() && Headtype == obj.Hdarwin {
-		etext := ctxt.Syms.Lookup("runtime.etext", 0)
-		etext.Sect = sect
-
-		ctxt.Textp = append(ctxt.Textp, etext, nil)
-		copy(ctxt.Textp[1:], ctxt.Textp)
-		ctxt.Textp[0] = text
-	}
-
-	if Headtype == obj.Hwindows || Headtype == obj.Hwindowsgui {
-		ctxt.Syms.Lookup(".text", 0).Sect = sect
-	}
-	va := uint64(*FlagTextAddr)
-	n := 1
-	sect.Vaddr = va
-	ntramps := 0
-	for _, sym := range ctxt.Textp {
-		sect, n, va = assignAddress(ctxt, sect, n, sym, va)
-
-		trampoline(ctxt, sym) // resolve jumps, may add trampolines if jump too far
-
-		// lay down trampolines after each function
-		for ; ntramps < len(ctxt.tramps); ntramps++ {
-			tramp := ctxt.tramps[ntramps]
-			sect, n, va = assignAddress(ctxt, sect, n, tramp, va)
-		}
-	}
-
-	sect.Length = va - sect.Vaddr
-	ctxt.Syms.Lookup("runtime.etext", 0).Sect = sect
-
-	// merge tramps into Textp, keeping Textp in address order
-	if ntramps != 0 {
-		newtextp := make([]*Symbol, 0, len(ctxt.Textp)+ntramps)
-		i := 0
-		for _, sym := range ctxt.Textp {
-			for ; i < ntramps && ctxt.tramps[i].Value < sym.Value; i++ {
-				newtextp = append(newtextp, ctxt.tramps[i])
-			}
-			newtextp = append(newtextp, sym)
-		}
-		newtextp = append(newtextp, ctxt.tramps[i:ntramps]...)
-
-		ctxt.Textp = newtextp
-	}
-}
-
-// assigns address for a text symbol, returns (possibly new) section, its number, and the address
-// Note: once we have trampoline insertion support for external linking, this function
-// will not need to create new text sections, and so no need to return sect and n.
-func assignAddress(ctxt *Link, sect *Section, n int, sym *Symbol, va uint64) (*Section, int, uint64) {
-	sym.Sect = sect
-	if sym.Type&obj.SSUB != 0 {
-		return sect, n, va
-	}
-	if sym.Align != 0 {
-		va = uint64(Rnd(int64(va), int64(sym.Align)))
-	} else {
-		va = uint64(Rnd(int64(va), int64(Funcalign)))
-	}
-	sym.Value = 0
-	for sub := sym; sub != nil; sub = sub.Sub {
-		sub.Value += int64(va)
-	}
-
-	funcsize := uint64(MINFUNC) // spacing required for findfunctab
-	if sym.Size > MINFUNC {
-		funcsize = uint64(sym.Size)
-	}
-
-	// On ppc64x a text section should not be larger than 2^26 bytes due to the size of
-	// call target offset field in the bl instruction.  Splitting into smaller text
-	// sections smaller than this limit allows the GNU linker to modify the long calls
-	// appropriately.  The limit allows for the space needed for tables inserted by the linker.
-
-	// If this function doesn't fit in the current text section, then create a new one.
-
-	// Only break at outermost syms.
-
-	if SysArch.InFamily(sys.PPC64) && sym.Outer == nil && Iself && Linkmode == LinkExternal && va-sect.Vaddr+funcsize > 0x1c00000 {
-
-		// Set the length for the previous text section
-		sect.Length = va - sect.Vaddr
-
-		// Create new section, set the starting Vaddr
-		sect = addsection(&Segtext, ".text", 05)
-		sect.Vaddr = va
-		sym.Sect = sect
-
-		// Create a symbol for the start of the secondary text sections
-		ctxt.Syms.Lookup(fmt.Sprintf("runtime.text.%d", n), 0).Sect = sect
-		n++
-	}
-	va += funcsize
-
-	return sect, n, va
-}
-
-// assign addresses
-func (ctxt *Link) address() {
-	va := uint64(*FlagTextAddr)
-	Segtext.Rwx = 05
-	Segtext.Vaddr = va
-	Segtext.Fileoff = uint64(HEADR)
-	for s := Segtext.Sect; s != nil; s = s.Next {
-		va = uint64(Rnd(int64(va), int64(s.Align)))
-		s.Vaddr = va
-		va += s.Length
-	}
-
-	Segtext.Length = va - uint64(*FlagTextAddr)
-	Segtext.Filelen = Segtext.Length
-	if Headtype == obj.Hnacl {
-		va += 32 // room for the "halt sled"
-	}
-
-	if Segrodata.Sect != nil {
-		// align to page boundary so as not to mix
-		// rodata and executable text.
-		//
-		// Note: gold or GNU ld will reduce the size of the executable
-		// file by arranging for the relro segment to end at a page
-		// boundary, and overlap the end of the text segment with the
-		// start of the relro segment in the file.  The PT_LOAD segments
-		// will be such that the last page of the text segment will be
-		// mapped twice, once r-x and once starting out rw- and, after
-		// relocation processing, changed to r--.
-		//
-		// Ideally the last page of the text segment would not be
-		// writable even for this short period.
-		va = uint64(Rnd(int64(va), int64(*FlagRound)))
-
-		Segrodata.Rwx = 04
-		Segrodata.Vaddr = va
-		Segrodata.Fileoff = va - Segtext.Vaddr + Segtext.Fileoff
-		Segrodata.Filelen = 0
-		for s := Segrodata.Sect; s != nil; s = s.Next {
-			va = uint64(Rnd(int64(va), int64(s.Align)))
-			s.Vaddr = va
-			va += s.Length
-		}
-
-		Segrodata.Length = va - Segrodata.Vaddr
-		Segrodata.Filelen = Segrodata.Length
-	}
-	if Segrelrodata.Sect != nil {
-		// align to page boundary so as not to mix
-		// rodata, rel-ro data, and executable text.
-		va = uint64(Rnd(int64(va), int64(*FlagRound)))
-
-		Segrelrodata.Rwx = 06
-		Segrelrodata.Vaddr = va
-		Segrelrodata.Fileoff = va - Segrodata.Vaddr + Segrodata.Fileoff
-		Segrelrodata.Filelen = 0
-		for s := Segrelrodata.Sect; s != nil; s = s.Next {
-			va = uint64(Rnd(int64(va), int64(s.Align)))
-			s.Vaddr = va
-			va += s.Length
-		}
-
-		Segrelrodata.Length = va - Segrelrodata.Vaddr
-		Segrelrodata.Filelen = Segrelrodata.Length
-	}
-
-	va = uint64(Rnd(int64(va), int64(*FlagRound)))
-	Segdata.Rwx = 06
-	Segdata.Vaddr = va
-	Segdata.Fileoff = va - Segtext.Vaddr + Segtext.Fileoff
-	Segdata.Filelen = 0
-	if Headtype == obj.Hwindows || Headtype == obj.Hwindowsgui {
-		Segdata.Fileoff = Segtext.Fileoff + uint64(Rnd(int64(Segtext.Length), PEFILEALIGN))
-	}
-	if Headtype == obj.Hplan9 {
-		Segdata.Fileoff = Segtext.Fileoff + Segtext.Filelen
-	}
-	var data *Section
-	var noptr *Section
-	var bss *Section
-	var noptrbss *Section
-	var vlen int64
-	for s := Segdata.Sect; s != nil; s = s.Next {
-		if Iself && s.Name == ".tbss" {
-			continue
-		}
-		vlen = int64(s.Length)
-		if s.Next != nil && !(Iself && s.Next.Name == ".tbss") {
-			vlen = int64(s.Next.Vaddr - s.Vaddr)
-		}
-		s.Vaddr = va
-		va += uint64(vlen)
-		Segdata.Length = va - Segdata.Vaddr
-		if s.Name == ".data" {
-			data = s
-		}
-		if s.Name == ".noptrdata" {
-			noptr = s
-		}
-		if s.Name == ".bss" {
-			bss = s
-		}
-		if s.Name == ".noptrbss" {
-			noptrbss = s
-		}
-	}
-
-	Segdata.Filelen = bss.Vaddr - Segdata.Vaddr
-
-	va = uint64(Rnd(int64(va), int64(*FlagRound)))
-	Segdwarf.Rwx = 06
-	Segdwarf.Vaddr = va
-	Segdwarf.Fileoff = Segdata.Fileoff + uint64(Rnd(int64(Segdata.Filelen), int64(*FlagRound)))
-	Segdwarf.Filelen = 0
-	if Headtype == obj.Hwindows || Headtype == obj.Hwindowsgui {
-		Segdwarf.Fileoff = Segdata.Fileoff + uint64(Rnd(int64(Segdata.Filelen), int64(PEFILEALIGN)))
-	}
-	for s := Segdwarf.Sect; s != nil; s = s.Next {
-		vlen = int64(s.Length)
-		if s.Next != nil {
-			vlen = int64(s.Next.Vaddr - s.Vaddr)
-		}
-		s.Vaddr = va
-		va += uint64(vlen)
-		if Headtype == obj.Hwindows || Headtype == obj.Hwindowsgui {
-			va = uint64(Rnd(int64(va), PEFILEALIGN))
-		}
-		Segdwarf.Length = va - Segdwarf.Vaddr
-	}
-
-	Segdwarf.Filelen = va - Segdwarf.Vaddr
-
-	var (
-		text     = Segtext.Sect
-		rodata   = ctxt.Syms.Lookup("runtime.rodata", 0).Sect
-		itablink = ctxt.Syms.Lookup("runtime.itablink", 0).Sect
-		symtab   = ctxt.Syms.Lookup("runtime.symtab", 0).Sect
-		pclntab  = ctxt.Syms.Lookup("runtime.pclntab", 0).Sect
-		types    = ctxt.Syms.Lookup("runtime.types", 0).Sect
-	)
-	lasttext := text
-	// Could be multiple .text sections
-	for sect := text.Next; sect != nil && sect.Name == ".text"; sect = sect.Next {
-		lasttext = sect
-	}
-
-	for _, s := range datap {
-		if s.Sect != nil {
-			s.Value += int64(s.Sect.Vaddr)
-		}
-		for sub := s.Sub; sub != nil; sub = sub.Sub {
-			sub.Value += s.Value
-		}
-	}
-
-	for _, sym := range dwarfp {
-		if sym.Sect != nil {
-			sym.Value += int64(sym.Sect.Vaddr)
-		}
-		for sub := sym.Sub; sub != nil; sub = sub.Sub {
-			sub.Value += sym.Value
-		}
-	}
-
-	if Buildmode == BuildmodeShared {
-		s := ctxt.Syms.Lookup("go.link.abihashbytes", 0)
-		sectSym := ctxt.Syms.Lookup(".note.go.abihash", 0)
-		s.Sect = sectSym.Sect
-		s.Value = int64(sectSym.Sect.Vaddr + 16)
-	}
-
-	ctxt.xdefine("runtime.text", obj.STEXT, int64(text.Vaddr))
-	ctxt.xdefine("runtime.etext", obj.STEXT, int64(lasttext.Vaddr+lasttext.Length))
-	if Headtype == obj.Hwindows || Headtype == obj.Hwindowsgui {
-		ctxt.xdefine(".text", obj.STEXT, int64(text.Vaddr))
-	}
-
-	// If there are multiple text sections, create runtime.text.n for
-	// their section Vaddr, using n for index
-	n := 1
-	for sect := Segtext.Sect.Next; sect != nil && sect.Name == ".text"; sect = sect.Next {
-		symname := fmt.Sprintf("runtime.text.%d", n)
-		ctxt.xdefine(symname, obj.STEXT, int64(sect.Vaddr))
-		n++
-	}
-
-	ctxt.xdefine("runtime.rodata", obj.SRODATA, int64(rodata.Vaddr))
-	ctxt.xdefine("runtime.erodata", obj.SRODATA, int64(rodata.Vaddr+rodata.Length))
-	ctxt.xdefine("runtime.types", obj.SRODATA, int64(types.Vaddr))
-	ctxt.xdefine("runtime.etypes", obj.SRODATA, int64(types.Vaddr+types.Length))
-	ctxt.xdefine("runtime.itablink", obj.SRODATA, int64(itablink.Vaddr))
-	ctxt.xdefine("runtime.eitablink", obj.SRODATA, int64(itablink.Vaddr+itablink.Length))
-
-	sym := ctxt.Syms.Lookup("runtime.gcdata", 0)
-	sym.Attr |= AttrLocal
-	ctxt.xdefine("runtime.egcdata", obj.SRODATA, Symaddr(sym)+sym.Size)
-	ctxt.Syms.Lookup("runtime.egcdata", 0).Sect = sym.Sect
-
-	sym = ctxt.Syms.Lookup("runtime.gcbss", 0)
-	sym.Attr |= AttrLocal
-	ctxt.xdefine("runtime.egcbss", obj.SRODATA, Symaddr(sym)+sym.Size)
-	ctxt.Syms.Lookup("runtime.egcbss", 0).Sect = sym.Sect
-
-	ctxt.xdefine("runtime.symtab", obj.SRODATA, int64(symtab.Vaddr))
-	ctxt.xdefine("runtime.esymtab", obj.SRODATA, int64(symtab.Vaddr+symtab.Length))
-	ctxt.xdefine("runtime.pclntab", obj.SRODATA, int64(pclntab.Vaddr))
-	ctxt.xdefine("runtime.epclntab", obj.SRODATA, int64(pclntab.Vaddr+pclntab.Length))
-	ctxt.xdefine("runtime.noptrdata", obj.SNOPTRDATA, int64(noptr.Vaddr))
-	ctxt.xdefine("runtime.enoptrdata", obj.SNOPTRDATA, int64(noptr.Vaddr+noptr.Length))
-	ctxt.xdefine("runtime.bss", obj.SBSS, int64(bss.Vaddr))
-	ctxt.xdefine("runtime.ebss", obj.SBSS, int64(bss.Vaddr+bss.Length))
-	ctxt.xdefine("runtime.data", obj.SDATA, int64(data.Vaddr))
-	ctxt.xdefine("runtime.edata", obj.SDATA, int64(data.Vaddr+data.Length))
-	ctxt.xdefine("runtime.noptrbss", obj.SNOPTRBSS, int64(noptrbss.Vaddr))
-	ctxt.xdefine("runtime.enoptrbss", obj.SNOPTRBSS, int64(noptrbss.Vaddr+noptrbss.Length))
-	ctxt.xdefine("runtime.end", obj.SBSS, int64(Segdata.Vaddr+Segdata.Length))
-}
-
-// add a trampoline with symbol s (to be laid down after the current function)
-func (ctxt *Link) AddTramp(s *Symbol) {
-	s.Type = obj.STEXT
-	s.Attr |= AttrReachable
-	s.Attr |= AttrOnList
-	ctxt.tramps = append(ctxt.tramps, s)
-	if *FlagDebugTramp > 0 && ctxt.Debugvlog > 0 {
-		ctxt.Logf("trampoline %s inserted\n", s)
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/deadcode.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/deadcode.go
deleted file mode 100644
index 2c49b11..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/deadcode.go
+++ /dev/null
@@ -1,366 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/deadcode.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/deadcode.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ld
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"fmt"
-	"strings"
-	"unicode"
-)
-
-// deadcode marks all reachable symbols.
-//
-// The basis of the dead code elimination is a flood fill of symbols,
-// following their relocations, beginning at *flagEntrySymbol.
-//
-// This flood fill is wrapped in logic for pruning unused methods.
-// All methods are mentioned by relocations on their receiver's *rtype.
-// These relocations are specially defined as R_METHODOFF by the compiler
-// so we can detect and manipulated them here.
-//
-// There are three ways a method of a reachable type can be invoked:
-//
-//	1. direct call
-//	2. through a reachable interface type
-//	3. reflect.Value.Call, .Method, or reflect.Method.Func
-//
-// The first case is handled by the flood fill, a directly called method
-// is marked as reachable.
-//
-// The second case is handled by decomposing all reachable interface
-// types into method signatures. Each encountered method is compared
-// against the interface method signatures, if it matches it is marked
-// as reachable. This is extremely conservative, but easy and correct.
-//
-// The third case is handled by looking to see if any of:
-//	- reflect.Value.Call is reachable
-//	- reflect.Value.Method is reachable
-// 	- reflect.Type.Method or MethodByName is called.
-// If any of these happen, all bets are off and all exported methods
-// of reachable types are marked reachable.
-//
-// Any unreached text symbols are removed from ctxt.Textp.
-func deadcode(ctxt *Link) {
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f deadcode\n", obj.Cputime())
-	}
-
-	d := &deadcodepass{
-		ctxt:        ctxt,
-		ifaceMethod: make(map[methodsig]bool),
-	}
-
-	// First, flood fill any symbols directly reachable in the call
-	// graph from *flagEntrySymbol. Ignore all methods not directly called.
-	d.init()
-	d.flood()
-
-	callSym := ctxt.Syms.ROLookup("reflect.Value.Call", 0)
-	methSym := ctxt.Syms.ROLookup("reflect.Value.Method", 0)
-	reflectSeen := false
-
-	if ctxt.DynlinkingGo() {
-		// Exported methods may satisfy interfaces we don't know
-		// about yet when dynamically linking.
-		reflectSeen = true
-	}
-
-	for {
-		if !reflectSeen {
-			if d.reflectMethod || (callSym != nil && callSym.Attr.Reachable()) || (methSym != nil && methSym.Attr.Reachable()) {
-				// Methods might be called via reflection. Give up on
-				// static analysis, mark all exported methods of
-				// all reachable types as reachable.
-				reflectSeen = true
-			}
-		}
-
-		// Mark all methods that could satisfy a discovered
-		// interface as reachable. We recheck old marked interfaces
-		// as new types (with new methods) may have been discovered
-		// in the last pass.
-		var rem []methodref
-		for _, m := range d.markableMethods {
-			if (reflectSeen && m.isExported()) || d.ifaceMethod[m.m] {
-				d.markMethod(m)
-			} else {
-				rem = append(rem, m)
-			}
-		}
-		d.markableMethods = rem
-
-		if len(d.markQueue) == 0 {
-			// No new work was discovered. Done.
-			break
-		}
-		d.flood()
-	}
-
-	// Remove all remaining unreached R_METHODOFF relocations.
-	for _, m := range d.markableMethods {
-		for _, r := range m.r {
-			d.cleanupReloc(r)
-		}
-	}
-
-	if Buildmode != BuildmodeShared {
-		// Keep a itablink if the symbol it points at is being kept.
-		// (When BuildmodeShared, always keep itablinks.)
-		for _, s := range ctxt.Syms.Allsym {
-			if strings.HasPrefix(s.Name, "go.itablink.") {
-				s.Attr.Set(AttrReachable, len(s.R) == 1 && s.R[0].Sym.Attr.Reachable())
-			}
-		}
-	}
-
-	// Remove dead text but keep file information (z symbols).
-	textp := make([]*Symbol, 0, len(ctxt.Textp))
-	for _, s := range ctxt.Textp {
-		if s.Attr.Reachable() {
-			textp = append(textp, s)
-		}
-	}
-	ctxt.Textp = textp
-}
-
-var markextra = []string{
-	"runtime.morestack",
-	"runtime.morestackx",
-	"runtime.morestack00",
-	"runtime.morestack10",
-	"runtime.morestack01",
-	"runtime.morestack11",
-	"runtime.morestack8",
-	"runtime.morestack16",
-	"runtime.morestack24",
-	"runtime.morestack32",
-	"runtime.morestack40",
-	"runtime.morestack48",
-
-	// on arm, lock in the div/mod helpers too
-	"_div",
-	"_divu",
-	"_mod",
-	"_modu",
-}
-
-// methodref holds the relocations from a receiver type symbol to its
-// method. There are three relocations, one for each of the fields in
-// the reflect.method struct: mtyp, ifn, and tfn.
-type methodref struct {
-	m   methodsig
-	src *Symbol   // receiver type symbol
-	r   [3]*Reloc // R_METHODOFF relocations to fields of runtime.method
-}
-
-func (m methodref) ifn() *Symbol { return m.r[1].Sym }
-
-func (m methodref) isExported() bool {
-	for _, r := range m.m {
-		return unicode.IsUpper(r)
-	}
-	panic("methodref has no signature")
-}
-
-// deadcodepass holds state for the deadcode flood fill.
-type deadcodepass struct {
-	ctxt            *Link
-	markQueue       []*Symbol          // symbols to flood fill in next pass
-	ifaceMethod     map[methodsig]bool // methods declared in reached interfaces
-	markableMethods []methodref        // methods of reached types
-	reflectMethod   bool
-}
-
-func (d *deadcodepass) cleanupReloc(r *Reloc) {
-	if r.Sym.Attr.Reachable() {
-		r.Type = obj.R_ADDROFF
-	} else {
-		if d.ctxt.Debugvlog > 1 {
-			d.ctxt.Logf("removing method %s\n", r.Sym.Name)
-		}
-		r.Sym = nil
-		r.Siz = 0
-	}
-}
-
-// mark appends a symbol to the mark queue for flood filling.
-func (d *deadcodepass) mark(s, parent *Symbol) {
-	if s == nil || s.Attr.Reachable() {
-		return
-	}
-	if s.Attr.ReflectMethod() {
-		d.reflectMethod = true
-	}
-	if *flagDumpDep {
-		p := "_"
-		if parent != nil {
-			p = parent.Name
-		}
-		fmt.Printf("%s -> %s\n", p, s.Name)
-	}
-	s.Attr |= AttrReachable
-	s.Reachparent = parent
-	d.markQueue = append(d.markQueue, s)
-}
-
-// markMethod marks a method as reachable.
-func (d *deadcodepass) markMethod(m methodref) {
-	for _, r := range m.r {
-		d.mark(r.Sym, m.src)
-		r.Type = obj.R_ADDROFF
-	}
-}
-
-// init marks all initial symbols as reachable.
-// In a typical binary, this is *flagEntrySymbol.
-func (d *deadcodepass) init() {
-	var names []string
-
-	if SysArch.Family == sys.ARM {
-		// mark some functions that are only referenced after linker code editing
-		if obj.GOARM == 5 {
-			names = append(names, "_sfloat")
-		}
-		names = append(names, "runtime.read_tls_fallback")
-	}
-
-	if Buildmode == BuildmodeShared {
-		// Mark all symbols defined in this library as reachable when
-		// building a shared library.
-		for _, s := range d.ctxt.Syms.Allsym {
-			if s.Type != 0 && s.Type != obj.SDYNIMPORT {
-				d.mark(s, nil)
-			}
-		}
-	} else {
-		// In a normal binary, start at main.main and the init
-		// functions and mark what is reachable from there.
-		names = append(names, *flagEntrySymbol)
-		if *FlagLinkshared && (Buildmode == BuildmodeExe || Buildmode == BuildmodePIE) {
-			names = append(names, "main.main", "main.init")
-		} else if Buildmode == BuildmodePlugin {
-			names = append(names, *flagPluginPath+".init", *flagPluginPath+".main", "go.plugin.tabs")
-
-			// We don't keep the go.plugin.exports symbol,
-			// but we do keep the symbols it refers to.
-			exports := d.ctxt.Syms.ROLookup("go.plugin.exports", 0)
-			if exports != nil {
-				for _, r := range exports.R {
-					d.mark(r.Sym, nil)
-				}
-			}
-		}
-		for _, name := range markextra {
-			names = append(names, name)
-		}
-		for _, s := range dynexp {
-			d.mark(s, nil)
-		}
-	}
-
-	for _, name := range names {
-		d.mark(d.ctxt.Syms.ROLookup(name, 0), nil)
-	}
-}
-
-// flood flood fills symbols reachable from the markQueue symbols.
-// As it goes, it collects methodref and interface method declarations.
-func (d *deadcodepass) flood() {
-	for len(d.markQueue) > 0 {
-		s := d.markQueue[0]
-		d.markQueue = d.markQueue[1:]
-		if s.Type == obj.STEXT {
-			if d.ctxt.Debugvlog > 1 {
-				d.ctxt.Logf("marktext %s\n", s.Name)
-			}
-			if s.FuncInfo != nil {
-				for _, a := range s.FuncInfo.Autom {
-					d.mark(a.Gotype, s)
-				}
-			}
-
-		}
-
-		if strings.HasPrefix(s.Name, "type.") && s.Name[5] != '.' {
-			if len(s.P) == 0 {
-				// Probably a bug. The undefined symbol check
-				// later will give a better error than deadcode.
-				continue
-			}
-			if decodetypeKind(s)&kindMask == kindInterface {
-				for _, sig := range decodeIfaceMethods(d.ctxt.Arch, s) {
-					if d.ctxt.Debugvlog > 1 {
-						d.ctxt.Logf("reached iface method: %s\n", sig)
-					}
-					d.ifaceMethod[sig] = true
-				}
-			}
-		}
-
-		mpos := 0 // 0-3, the R_METHODOFF relocs of runtime.uncommontype
-		var methods []methodref
-		for i := 0; i < len(s.R); i++ {
-			r := &s.R[i]
-			if r.Sym == nil {
-				continue
-			}
-			if r.Type == obj.R_WEAKADDROFF {
-				// An R_WEAKADDROFF relocation is not reason
-				// enough to mark the pointed-to symbol as
-				// reachable.
-				continue
-			}
-			if r.Type != obj.R_METHODOFF {
-				d.mark(r.Sym, s)
-				continue
-			}
-			// Collect rtype pointers to methods for
-			// later processing in deadcode.
-			if mpos == 0 {
-				m := methodref{src: s}
-				m.r[0] = r
-				methods = append(methods, m)
-			} else {
-				methods[len(methods)-1].r[mpos] = r
-			}
-			mpos++
-			if mpos == len(methodref{}.r) {
-				mpos = 0
-			}
-		}
-		if len(methods) > 0 {
-			// Decode runtime type information for type methods
-			// to help work out which methods can be called
-			// dynamically via interfaces.
-			methodsigs := decodetypeMethods(d.ctxt.Arch, s)
-			if len(methods) != len(methodsigs) {
-				panic(fmt.Sprintf("%q has %d method relocations for %d methods", s.Name, len(methods), len(methodsigs)))
-			}
-			for i, m := range methodsigs {
-				name := string(m)
-				name = name[:strings.Index(name, "(")]
-				if !strings.HasSuffix(methods[i].ifn().Name, name) {
-					panic(fmt.Sprintf("%q relocation for %q does not match method %q", s.Name, methods[i].ifn().Name, name))
-				}
-				methods[i].m = m
-			}
-			d.markableMethods = append(d.markableMethods, methods...)
-		}
-
-		if s.FuncInfo != nil {
-			for i := range s.FuncInfo.Funcdata {
-				d.mark(s.FuncInfo.Funcdata[i], s)
-			}
-		}
-		d.mark(s.Gotype, s)
-		d.mark(s.Sub, s)
-		d.mark(s.Outer, s)
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/decodesym.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/decodesym.go
deleted file mode 100644
index a18fccd..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/decodesym.go
+++ /dev/null
@@ -1,372 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/decodesym.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/decodesym.go:1
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ld
-
-import (
-	"bytes"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"debug/elf"
-	"fmt"
-)
-
-// Decoding the type.* symbols.	 This has to be in sync with
-// ../../runtime/type.go, or more specifically, with what
-// ../gc/reflect.c stuffs in these.
-
-// tflag is documented in reflect/type.go.
-//
-// tflag values must be kept in sync with copies in:
-//	cmd/compile/internal/gc/reflect.go
-//	cmd/link/internal/ld/decodesym.go
-//	reflect/type.go
-//	runtime/type.go
-const (
-	tflagUncommon  = 1 << 0
-	tflagExtraStar = 1 << 1
-)
-
-func decodeReloc(s *Symbol, off int32) *Reloc {
-	for i := range s.R {
-		if s.R[i].Off == off {
-			return &s.R[i]
-		}
-	}
-	return nil
-}
-
-func decodeRelocSym(s *Symbol, off int32) *Symbol {
-	r := decodeReloc(s, off)
-	if r == nil {
-		return nil
-	}
-	return r.Sym
-}
-
-func decodeInuxi(arch *sys.Arch, p []byte, sz int) uint64 {
-	switch sz {
-	case 2:
-		return uint64(arch.ByteOrder.Uint16(p))
-	case 4:
-		return uint64(arch.ByteOrder.Uint32(p))
-	case 8:
-		return arch.ByteOrder.Uint64(p)
-	default:
-		Exitf("dwarf: decode inuxi %d", sz)
-		panic("unreachable")
-	}
-}
-
-func commonsize() int      { return 4*SysArch.PtrSize + 8 + 8 } // runtime._type
-func structfieldSize() int { return 3 * SysArch.PtrSize }       // runtime.structfield
-func uncommonSize() int    { return 4 + 2 + 2 + 4 + 4 }         // runtime.uncommontype
-
-// Type.commonType.kind
-func decodetypeKind(s *Symbol) uint8 {
-	return s.P[2*SysArch.PtrSize+7] & obj.KindMask //  0x13 / 0x1f
-}
-
-// Type.commonType.kind
-func decodetypeUsegcprog(s *Symbol) uint8 {
-	return s.P[2*SysArch.PtrSize+7] & obj.KindGCProg //  0x13 / 0x1f
-}
-
-// Type.commonType.size
-func decodetypeSize(arch *sys.Arch, s *Symbol) int64 {
-	return int64(decodeInuxi(arch, s.P, SysArch.PtrSize)) // 0x8 / 0x10
-}
-
-// Type.commonType.ptrdata
-func decodetypePtrdata(arch *sys.Arch, s *Symbol) int64 {
-	return int64(decodeInuxi(arch, s.P[SysArch.PtrSize:], SysArch.PtrSize)) // 0x8 / 0x10
-}
-
-// Type.commonType.tflag
-func decodetypeHasUncommon(s *Symbol) bool {
-	return s.P[2*SysArch.PtrSize+4]&tflagUncommon != 0
-}
-
-// Find the elf.Section of a given shared library that contains a given address.
-func findShlibSection(ctxt *Link, path string, addr uint64) *elf.Section {
-	for _, shlib := range ctxt.Shlibs {
-		if shlib.Path == path {
-			for _, sect := range shlib.File.Sections {
-				if sect.Addr <= addr && addr <= sect.Addr+sect.Size {
-					return sect
-				}
-			}
-		}
-	}
-	return nil
-}
-
-// Type.commonType.gc
-func decodetypeGcprog(ctxt *Link, s *Symbol) []byte {
-	if s.Type == obj.SDYNIMPORT {
-		addr := decodetypeGcprogShlib(ctxt, s)
-		sect := findShlibSection(ctxt, s.File, addr)
-		if sect != nil {
-			// A gcprog is a 4-byte uint32 indicating length, followed by
-			// the actual program.
-			progsize := make([]byte, 4)
-			sect.ReadAt(progsize, int64(addr-sect.Addr))
-			progbytes := make([]byte, ctxt.Arch.ByteOrder.Uint32(progsize))
-			sect.ReadAt(progbytes, int64(addr-sect.Addr+4))
-			return append(progsize, progbytes...)
-		}
-		Exitf("cannot find gcprog for %s", s.Name)
-		return nil
-	}
-	return decodeRelocSym(s, 2*int32(SysArch.PtrSize)+8+1*int32(SysArch.PtrSize)).P
-}
-
-func decodetypeGcprogShlib(ctxt *Link, s *Symbol) uint64 {
-	if SysArch.Family == sys.ARM64 {
-		for _, shlib := range ctxt.Shlibs {
-			if shlib.Path == s.File {
-				return shlib.gcdataAddresses[s]
-			}
-		}
-		return 0
-	}
-	return decodeInuxi(ctxt.Arch, s.P[2*int32(SysArch.PtrSize)+8+1*int32(SysArch.PtrSize):], SysArch.PtrSize)
-}
-
-func decodetypeGcmask(ctxt *Link, s *Symbol) []byte {
-	if s.Type == obj.SDYNIMPORT {
-		addr := decodetypeGcprogShlib(ctxt, s)
-		ptrdata := decodetypePtrdata(ctxt.Arch, s)
-		sect := findShlibSection(ctxt, s.File, addr)
-		if sect != nil {
-			r := make([]byte, ptrdata/int64(SysArch.PtrSize))
-			sect.ReadAt(r, int64(addr-sect.Addr))
-			return r
-		}
-		Exitf("cannot find gcmask for %s", s.Name)
-		return nil
-	}
-	mask := decodeRelocSym(s, 2*int32(SysArch.PtrSize)+8+1*int32(SysArch.PtrSize))
-	return mask.P
-}
-
-// Type.ArrayType.elem and Type.SliceType.Elem
-func decodetypeArrayElem(s *Symbol) *Symbol {
-	return decodeRelocSym(s, int32(commonsize())) // 0x1c / 0x30
-}
-
-func decodetypeArrayLen(arch *sys.Arch, s *Symbol) int64 {
-	return int64(decodeInuxi(arch, s.P[commonsize()+2*SysArch.PtrSize:], SysArch.PtrSize))
-}
-
-// Type.PtrType.elem
-func decodetypePtrElem(s *Symbol) *Symbol {
-	return decodeRelocSym(s, int32(commonsize())) // 0x1c / 0x30
-}
-
-// Type.MapType.key, elem
-func decodetypeMapKey(s *Symbol) *Symbol {
-	return decodeRelocSym(s, int32(commonsize())) // 0x1c / 0x30
-}
-
-func decodetypeMapValue(s *Symbol) *Symbol {
-	return decodeRelocSym(s, int32(commonsize())+int32(SysArch.PtrSize)) // 0x20 / 0x38
-}
-
-// Type.ChanType.elem
-func decodetypeChanElem(s *Symbol) *Symbol {
-	return decodeRelocSym(s, int32(commonsize())) // 0x1c / 0x30
-}
-
-// Type.FuncType.dotdotdot
-func decodetypeFuncDotdotdot(arch *sys.Arch, s *Symbol) bool {
-	return uint16(decodeInuxi(arch, s.P[commonsize()+2:], 2))&(1<<15) != 0
-}
-
-// Type.FuncType.inCount
-func decodetypeFuncInCount(arch *sys.Arch, s *Symbol) int {
-	return int(decodeInuxi(arch, s.P[commonsize():], 2))
-}
-
-func decodetypeFuncOutCount(arch *sys.Arch, s *Symbol) int {
-	return int(uint16(decodeInuxi(arch, s.P[commonsize()+2:], 2)) & (1<<15 - 1))
-}
-
-func decodetypeFuncInType(s *Symbol, i int) *Symbol {
-	uadd := commonsize() + 4
-	if SysArch.PtrSize == 8 {
-		uadd += 4
-	}
-	if decodetypeHasUncommon(s) {
-		uadd += uncommonSize()
-	}
-	return decodeRelocSym(s, int32(uadd+i*SysArch.PtrSize))
-}
-
-func decodetypeFuncOutType(arch *sys.Arch, s *Symbol, i int) *Symbol {
-	return decodetypeFuncInType(s, i+decodetypeFuncInCount(arch, s))
-}
-
-// Type.StructType.fields.Slice::length
-func decodetypeStructFieldCount(arch *sys.Arch, s *Symbol) int {
-	return int(decodeInuxi(arch, s.P[commonsize()+2*SysArch.PtrSize:], SysArch.IntSize))
-}
-
-func decodetypeStructFieldArrayOff(s *Symbol, i int) int {
-	off := commonsize() + 2*SysArch.PtrSize + 2*SysArch.IntSize
-	if decodetypeHasUncommon(s) {
-		off += uncommonSize()
-	}
-	off += i * structfieldSize()
-	return off
-}
-
-// decodetypeStr returns the contents of an rtype's str field (a nameOff).
-func decodetypeStr(s *Symbol) string {
-	str := decodetypeName(s, 4*SysArch.PtrSize+8)
-	if s.P[2*SysArch.PtrSize+4]&tflagExtraStar != 0 {
-		return str[1:]
-	}
-	return str
-}
-
-// decodetypeName decodes the name from a reflect.name.
-func decodetypeName(s *Symbol, off int) string {
-	r := decodeReloc(s, int32(off))
-	if r == nil {
-		return ""
-	}
-
-	data := r.Sym.P
-	namelen := int(uint16(data[1])<<8 | uint16(data[2]))
-	return string(data[3 : 3+namelen])
-}
-
-func decodetypeStructFieldName(s *Symbol, i int) string {
-	off := decodetypeStructFieldArrayOff(s, i)
-	return decodetypeName(s, off)
-}
-
-func decodetypeStructFieldType(s *Symbol, i int) *Symbol {
-	off := decodetypeStructFieldArrayOff(s, i)
-	return decodeRelocSym(s, int32(off+SysArch.PtrSize))
-}
-
-func decodetypeStructFieldOffs(arch *sys.Arch, s *Symbol, i int) int64 {
-	off := decodetypeStructFieldArrayOff(s, i)
-	return int64(decodeInuxi(arch, s.P[off+2*SysArch.PtrSize:], SysArch.IntSize))
-}
-
-// InterfaceType.methods.length
-func decodetypeIfaceMethodCount(arch *sys.Arch, s *Symbol) int64 {
-	return int64(decodeInuxi(arch, s.P[commonsize()+2*SysArch.PtrSize:], SysArch.IntSize))
-}
-
-// methodsig is a fully qualified typed method signature, like
-// "Visit(type.go/ast.Node) (type.go/ast.Visitor)".
-type methodsig string
-
-// Matches runtime/typekind.go and reflect.Kind.
-const (
-	kindArray     = 17
-	kindChan      = 18
-	kindFunc      = 19
-	kindInterface = 20
-	kindMap       = 21
-	kindPtr       = 22
-	kindSlice     = 23
-	kindStruct    = 25
-	kindMask      = (1 << 5) - 1
-)
-
-// decodeMethodSig decodes an array of method signature information.
-// Each element of the array is size bytes. The first 4 bytes is a
-// nameOff for the method name, and the next 4 bytes is a typeOff for
-// the function type.
-//
-// Conveniently this is the layout of both runtime.method and runtime.imethod.
-func decodeMethodSig(arch *sys.Arch, s *Symbol, off, size, count int) []methodsig {
-	var buf bytes.Buffer
-	var methods []methodsig
-	for i := 0; i < count; i++ {
-		buf.WriteString(decodetypeName(s, off))
-		mtypSym := decodeRelocSym(s, int32(off+4))
-
-		buf.WriteRune('(')
-		inCount := decodetypeFuncInCount(arch, mtypSym)
-		for i := 0; i < inCount; i++ {
-			if i > 0 {
-				buf.WriteString(", ")
-			}
-			buf.WriteString(decodetypeFuncInType(mtypSym, i).Name)
-		}
-		buf.WriteString(") (")
-		outCount := decodetypeFuncOutCount(arch, mtypSym)
-		for i := 0; i < outCount; i++ {
-			if i > 0 {
-				buf.WriteString(", ")
-			}
-			buf.WriteString(decodetypeFuncOutType(arch, mtypSym, i).Name)
-		}
-		buf.WriteRune(')')
-
-		off += size
-		methods = append(methods, methodsig(buf.String()))
-		buf.Reset()
-	}
-	return methods
-}
-
-func decodeIfaceMethods(arch *sys.Arch, s *Symbol) []methodsig {
-	if decodetypeKind(s)&kindMask != kindInterface {
-		panic(fmt.Sprintf("symbol %q is not an interface", s.Name))
-	}
-	r := decodeReloc(s, int32(commonsize()+SysArch.PtrSize))
-	if r == nil {
-		return nil
-	}
-	if r.Sym != s {
-		panic(fmt.Sprintf("imethod slice pointer in %q leads to a different symbol", s.Name))
-	}
-	off := int(r.Add) // array of reflect.imethod values
-	numMethods := int(decodetypeIfaceMethodCount(arch, s))
-	sizeofIMethod := 4 + 4
-	return decodeMethodSig(arch, s, off, sizeofIMethod, numMethods)
-}
-
-func decodetypeMethods(arch *sys.Arch, s *Symbol) []methodsig {
-	if !decodetypeHasUncommon(s) {
-		panic(fmt.Sprintf("no methods on %q", s.Name))
-	}
-	off := commonsize() // reflect.rtype
-	switch decodetypeKind(s) & kindMask {
-	case kindStruct: // reflect.structType
-		off += 2*SysArch.PtrSize + 2*SysArch.IntSize
-	case kindPtr: // reflect.ptrType
-		off += SysArch.PtrSize
-	case kindFunc: // reflect.funcType
-		off += SysArch.PtrSize // 4 bytes, pointer aligned
-	case kindSlice: // reflect.sliceType
-		off += SysArch.PtrSize
-	case kindArray: // reflect.arrayType
-		off += 3 * SysArch.PtrSize
-	case kindChan: // reflect.chanType
-		off += 2 * SysArch.PtrSize
-	case kindMap: // reflect.mapType
-		off += 4*SysArch.PtrSize + 8
-	case kindInterface: // reflect.interfaceType
-		off += SysArch.PtrSize + 2*SysArch.IntSize
-	default:
-		// just Sizeof(rtype)
-	}
-
-	mcount := int(decodeInuxi(arch, s.P[off+4:], 2))
-	moff := int(decodeInuxi(arch, s.P[off+4+2+2:], 4))
-	off += moff                // offset to array of reflect.method values
-	const sizeofMethod = 4 * 4 // sizeof reflect.method in program
-	return decodeMethodSig(arch, s, off, sizeofMethod, mcount)
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/dwarf.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/dwarf.go
deleted file mode 100644
index 4d6397f..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/dwarf.go
+++ /dev/null
@@ -1,1617 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/dwarf.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/dwarf.go:1
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// TODO/NICETOHAVE:
-//   - eliminate DW_CLS_ if not used
-//   - package info in compilation units
-//   - assign global variables and types to their packages
-//   - gdb uses c syntax, meaning clumsy quoting is needed for go identifiers. eg
-//     ptype struct '[]uint8' and qualifiers need to be quoted away
-//   - lexical scoping is lost, so gdb gets confused as to which 'main.i' you mean.
-//   - file:line info for variables
-//   - make strings a typedef so prettyprinters can see the underlying string type
-
-package ld
-
-import (
-	"bootstrap/cmd/internal/dwarf"
-	"bootstrap/cmd/internal/obj"
-	"fmt"
-	"log"
-	"os"
-	"strings"
-)
-
-type dwctxt struct {
-	linkctxt *Link
-}
-
-func (c dwctxt) PtrSize() int {
-	return SysArch.PtrSize
-}
-func (c dwctxt) AddInt(s dwarf.Sym, size int, i int64) {
-	ls := s.(*Symbol)
-	adduintxx(c.linkctxt, ls, uint64(i), size)
-}
-func (c dwctxt) AddBytes(s dwarf.Sym, b []byte) {
-	ls := s.(*Symbol)
-	Addbytes(ls, b)
-}
-func (c dwctxt) AddString(s dwarf.Sym, v string) {
-	Addstring(s.(*Symbol), v)
-}
-func (c dwctxt) SymValue(s dwarf.Sym) int64 {
-	return s.(*Symbol).Value
-}
-
-func (c dwctxt) AddAddress(s dwarf.Sym, data interface{}, value int64) {
-	if value != 0 {
-		value -= (data.(*Symbol)).Value
-	}
-	Addaddrplus(c.linkctxt, s.(*Symbol), data.(*Symbol), value)
-}
-
-func (c dwctxt) AddSectionOffset(s dwarf.Sym, size int, t interface{}, ofs int64) {
-	ls := s.(*Symbol)
-	switch size {
-	default:
-		Errorf(ls, "invalid size %d in adddwarfref\n", size)
-		fallthrough
-	case SysArch.PtrSize:
-		Addaddr(c.linkctxt, ls, t.(*Symbol))
-	case 4:
-		addaddrplus4(c.linkctxt, ls, t.(*Symbol), 0)
-	}
-	r := &ls.R[len(ls.R)-1]
-	r.Type = obj.R_DWARFREF
-	r.Add = ofs
-}
-
-/*
- * Offsets and sizes of the debug_* sections in the cout file.
- */
-var abbrevsym *Symbol
-var arangessec *Symbol
-var framesec *Symbol
-var infosec *Symbol
-var linesec *Symbol
-
-var gdbscript string
-
-var dwarfp []*Symbol
-
-func writeabbrev(ctxt *Link, syms []*Symbol) []*Symbol {
-	s := ctxt.Syms.Lookup(".debug_abbrev", 0)
-	s.Type = obj.SDWARFSECT
-	abbrevsym = s
-	Addbytes(s, dwarf.GetAbbrev())
-	return append(syms, s)
-}
-
-/*
- * Root DIEs for compilation units, types and global variables.
- */
-var dwroot dwarf.DWDie
-
-var dwtypes dwarf.DWDie
-
-var dwglobals dwarf.DWDie
-
-func newattr(die *dwarf.DWDie, attr uint16, cls int, value int64, data interface{}) *dwarf.DWAttr {
-	a := new(dwarf.DWAttr)
-	a.Link = die.Attr
-	die.Attr = a
-	a.Atr = attr
-	a.Cls = uint8(cls)
-	a.Value = value
-	a.Data = data
-	return a
-}
-
-// Each DIE (except the root ones) has at least 1 attribute: its
-// name. getattr moves the desired one to the front so
-// frequently searched ones are found faster.
-func getattr(die *dwarf.DWDie, attr uint16) *dwarf.DWAttr {
-	if die.Attr.Atr == attr {
-		return die.Attr
-	}
-
-	a := die.Attr
-	b := a.Link
-	for b != nil {
-		if b.Atr == attr {
-			a.Link = b.Link
-			b.Link = die.Attr
-			die.Attr = b
-			return b
-		}
-
-		a = b
-		b = b.Link
-	}
-
-	return nil
-}
-
-// Every DIE has at least a AT_name attribute (but it will only be
-// written out if it is listed in the abbrev).
-func newdie(ctxt *Link, parent *dwarf.DWDie, abbrev int, name string, version int) *dwarf.DWDie {
-	die := new(dwarf.DWDie)
-	die.Abbrev = abbrev
-	die.Link = parent.Child
-	parent.Child = die
-
-	newattr(die, dwarf.DW_AT_name, dwarf.DW_CLS_STRING, int64(len(name)), name)
-
-	if name != "" && (abbrev <= dwarf.DW_ABRV_VARIABLE || abbrev >= dwarf.DW_ABRV_NULLTYPE) {
-		if abbrev != dwarf.DW_ABRV_VARIABLE || version == 0 {
-			sym := ctxt.Syms.Lookup(dwarf.InfoPrefix+name, version)
-			sym.Attr |= AttrHidden
-			sym.Type = obj.SDWARFINFO
-			die.Sym = sym
-		}
-	}
-
-	return die
-}
-
-func walktypedef(die *dwarf.DWDie) *dwarf.DWDie {
-	if die == nil {
-		return nil
-	}
-	// Resolve typedef if present.
-	if die.Abbrev == dwarf.DW_ABRV_TYPEDECL {
-		for attr := die.Attr; attr != nil; attr = attr.Link {
-			if attr.Atr == dwarf.DW_AT_type && attr.Cls == dwarf.DW_CLS_REFERENCE && attr.Data != nil {
-				return attr.Data.(*dwarf.DWDie)
-			}
-		}
-	}
-
-	return die
-}
-
-func walksymtypedef(ctxt *Link, s *Symbol) *Symbol {
-	if t := ctxt.Syms.ROLookup(s.Name+"..def", int(s.Version)); t != nil {
-		return t
-	}
-	return s
-}
-
-// Find child by AT_name using hashtable if available or linear scan
-// if not.
-func findchild(die *dwarf.DWDie, name string) *dwarf.DWDie {
-	var prev *dwarf.DWDie
-	for ; die != prev; prev, die = die, walktypedef(die) {
-		for a := die.Child; a != nil; a = a.Link {
-			if name == getattr(a, dwarf.DW_AT_name).Data {
-				return a
-			}
-		}
-		continue
-	}
-	return nil
-}
-
-// Used to avoid string allocation when looking up dwarf symbols
-var prefixBuf = []byte(dwarf.InfoPrefix)
-
-func find(ctxt *Link, name string) *Symbol {
-	n := append(prefixBuf, name...)
-	// The string allocation below is optimized away because it is only used in a map lookup.
-	s := ctxt.Syms.ROLookup(string(n), 0)
-	prefixBuf = n[:len(dwarf.InfoPrefix)]
-	if s != nil && s.Type == obj.SDWARFINFO {
-		return s
-	}
-	return nil
-}
-
-func mustFind(ctxt *Link, name string) *Symbol {
-	r := find(ctxt, name)
-	if r == nil {
-		Exitf("dwarf find: cannot find %s", name)
-	}
-	return r
-}
-
-func adddwarfref(ctxt *Link, s *Symbol, t *Symbol, size int) int64 {
-	var result int64
-	switch size {
-	default:
-		Errorf(s, "invalid size %d in adddwarfref\n", size)
-		fallthrough
-	case SysArch.PtrSize:
-		result = Addaddr(ctxt, s, t)
-	case 4:
-		result = addaddrplus4(ctxt, s, t, 0)
-	}
-	r := &s.R[len(s.R)-1]
-	r.Type = obj.R_DWARFREF
-	return result
-}
-
-func newrefattr(die *dwarf.DWDie, attr uint16, ref *Symbol) *dwarf.DWAttr {
-	if ref == nil {
-		return nil
-	}
-	return newattr(die, attr, dwarf.DW_CLS_REFERENCE, 0, ref)
-}
-
-func putdies(linkctxt *Link, ctxt dwarf.Context, syms []*Symbol, die *dwarf.DWDie) []*Symbol {
-	for ; die != nil; die = die.Link {
-		syms = putdie(linkctxt, ctxt, syms, die)
-	}
-	Adduint8(linkctxt, syms[len(syms)-1], 0)
-
-	return syms
-}
-
-func dtolsym(s dwarf.Sym) *Symbol {
-	if s == nil {
-		return nil
-	}
-	return s.(*Symbol)
-}
-
-func putdie(linkctxt *Link, ctxt dwarf.Context, syms []*Symbol, die *dwarf.DWDie) []*Symbol {
-	s := dtolsym(die.Sym)
-	if s == nil {
-		s = syms[len(syms)-1]
-	} else {
-		if s.Attr.OnList() {
-			log.Fatalf("symbol %s listed multiple times", s.Name)
-		}
-		s.Attr |= AttrOnList
-		syms = append(syms, s)
-	}
-	dwarf.Uleb128put(ctxt, s, int64(die.Abbrev))
-	dwarf.PutAttrs(ctxt, s, die.Abbrev, die.Attr)
-	if dwarf.HasChildren(die) {
-		return putdies(linkctxt, ctxt, syms, die.Child)
-	}
-	return syms
-}
-
-func reverselist(list **dwarf.DWDie) {
-	curr := *list
-	var prev *dwarf.DWDie
-	for curr != nil {
-		var next *dwarf.DWDie = curr.Link
-		curr.Link = prev
-		prev = curr
-		curr = next
-	}
-
-	*list = prev
-}
-
-func reversetree(list **dwarf.DWDie) {
-	reverselist(list)
-	for die := *list; die != nil; die = die.Link {
-		if dwarf.HasChildren(die) {
-			reversetree(&die.Child)
-		}
-	}
-}
-
-func newmemberoffsetattr(die *dwarf.DWDie, offs int32) {
-	var block [20]byte
-	b := append(block[:0], dwarf.DW_OP_plus_uconst)
-	b = dwarf.AppendUleb128(b, uint64(offs))
-	newattr(die, dwarf.DW_AT_data_member_location, dwarf.DW_CLS_BLOCK, int64(len(b)), b)
-}
-
-// GDB doesn't like FORM_addr for AT_location, so emit a
-// location expression that evals to a const.
-func newabslocexprattr(die *dwarf.DWDie, addr int64, sym *Symbol) {
-	newattr(die, dwarf.DW_AT_location, dwarf.DW_CLS_ADDRESS, addr, sym)
-	// below
-}
-
-// Lookup predefined types
-func lookupOrDiag(ctxt *Link, n string) *Symbol {
-	s := ctxt.Syms.ROLookup(n, 0)
-	if s == nil || s.Size == 0 {
-		Exitf("dwarf: missing type: %s", n)
-	}
-
-	return s
-}
-
-func dotypedef(ctxt *Link, parent *dwarf.DWDie, name string, def *dwarf.DWDie) {
-	// Only emit typedefs for real names.
-	if strings.HasPrefix(name, "map[") {
-		return
-	}
-	if strings.HasPrefix(name, "struct {") {
-		return
-	}
-	if strings.HasPrefix(name, "chan ") {
-		return
-	}
-	if name[0] == '[' || name[0] == '*' {
-		return
-	}
-	if def == nil {
-		Errorf(nil, "dwarf: bad def in dotypedef")
-	}
-
-	sym := ctxt.Syms.Lookup(dtolsym(def.Sym).Name+"..def", 0)
-	sym.Attr |= AttrHidden
-	sym.Type = obj.SDWARFINFO
-	def.Sym = sym
-
-	// The typedef entry must be created after the def,
-	// so that future lookups will find the typedef instead
-	// of the real definition. This hooks the typedef into any
-	// circular definition loops, so that gdb can understand them.
-	die := newdie(ctxt, parent, dwarf.DW_ABRV_TYPEDECL, name, 0)
-
-	newrefattr(die, dwarf.DW_AT_type, sym)
-}
-
-// Define gotype, for composite ones recurse into constituents.
-func defgotype(ctxt *Link, gotype *Symbol) *Symbol {
-	if gotype == nil {
-		return mustFind(ctxt, "<unspecified>")
-	}
-
-	if !strings.HasPrefix(gotype.Name, "type.") {
-		Errorf(gotype, "dwarf: type name doesn't start with \"type.\"")
-		return mustFind(ctxt, "<unspecified>")
-	}
-
-	name := gotype.Name[5:] // could also decode from Type.string
-
-	sdie := find(ctxt, name)
-
-	if sdie != nil {
-		return sdie
-	}
-
-	return newtype(ctxt, gotype).Sym.(*Symbol)
-}
-
-func newtype(ctxt *Link, gotype *Symbol) *dwarf.DWDie {
-	name := gotype.Name[5:] // could also decode from Type.string
-	kind := decodetypeKind(gotype)
-	bytesize := decodetypeSize(ctxt.Arch, gotype)
-
-	var die *dwarf.DWDie
-	switch kind {
-	case obj.KindBool:
-		die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_BASETYPE, name, 0)
-		newattr(die, dwarf.DW_AT_encoding, dwarf.DW_CLS_CONSTANT, dwarf.DW_ATE_boolean, 0)
-		newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0)
-
-	case obj.KindInt,
-		obj.KindInt8,
-		obj.KindInt16,
-		obj.KindInt32,
-		obj.KindInt64:
-		die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_BASETYPE, name, 0)
-		newattr(die, dwarf.DW_AT_encoding, dwarf.DW_CLS_CONSTANT, dwarf.DW_ATE_signed, 0)
-		newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0)
-
-	case obj.KindUint,
-		obj.KindUint8,
-		obj.KindUint16,
-		obj.KindUint32,
-		obj.KindUint64,
-		obj.KindUintptr:
-		die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_BASETYPE, name, 0)
-		newattr(die, dwarf.DW_AT_encoding, dwarf.DW_CLS_CONSTANT, dwarf.DW_ATE_unsigned, 0)
-		newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0)
-
-	case obj.KindFloat32,
-		obj.KindFloat64:
-		die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_BASETYPE, name, 0)
-		newattr(die, dwarf.DW_AT_encoding, dwarf.DW_CLS_CONSTANT, dwarf.DW_ATE_float, 0)
-		newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0)
-
-	case obj.KindComplex64,
-		obj.KindComplex128:
-		die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_BASETYPE, name, 0)
-		newattr(die, dwarf.DW_AT_encoding, dwarf.DW_CLS_CONSTANT, dwarf.DW_ATE_complex_float, 0)
-		newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0)
-
-	case obj.KindArray:
-		die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_ARRAYTYPE, name, 0)
-		dotypedef(ctxt, &dwtypes, name, die)
-		newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0)
-		s := decodetypeArrayElem(gotype)
-		newrefattr(die, dwarf.DW_AT_type, defgotype(ctxt, s))
-		fld := newdie(ctxt, die, dwarf.DW_ABRV_ARRAYRANGE, "range", 0)
-
-		// use actual length not upper bound; correct for 0-length arrays.
-		newattr(fld, dwarf.DW_AT_count, dwarf.DW_CLS_CONSTANT, decodetypeArrayLen(ctxt.Arch, gotype), 0)
-
-		newrefattr(fld, dwarf.DW_AT_type, mustFind(ctxt, "uintptr"))
-
-	case obj.KindChan:
-		die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_CHANTYPE, name, 0)
-		newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0)
-		s := decodetypeChanElem(gotype)
-		newrefattr(die, dwarf.DW_AT_go_elem, defgotype(ctxt, s))
-		// Save elem type for synthesizechantypes. We could synthesize here
-		// but that would change the order of DIEs we output.
-		newrefattr(die, dwarf.DW_AT_type, s)
-
-	case obj.KindFunc:
-		die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_FUNCTYPE, name, 0)
-		dotypedef(ctxt, &dwtypes, name, die)
-		newrefattr(die, dwarf.DW_AT_type, mustFind(ctxt, "void"))
-		nfields := decodetypeFuncInCount(ctxt.Arch, gotype)
-		var fld *dwarf.DWDie
-		var s *Symbol
-		for i := 0; i < nfields; i++ {
-			s = decodetypeFuncInType(gotype, i)
-			fld = newdie(ctxt, die, dwarf.DW_ABRV_FUNCTYPEPARAM, s.Name[5:], 0)
-			newrefattr(fld, dwarf.DW_AT_type, defgotype(ctxt, s))
-		}
-
-		if decodetypeFuncDotdotdot(ctxt.Arch, gotype) {
-			newdie(ctxt, die, dwarf.DW_ABRV_DOTDOTDOT, "...", 0)
-		}
-		nfields = decodetypeFuncOutCount(ctxt.Arch, gotype)
-		for i := 0; i < nfields; i++ {
-			s = decodetypeFuncOutType(ctxt.Arch, gotype, i)
-			fld = newdie(ctxt, die, dwarf.DW_ABRV_FUNCTYPEPARAM, s.Name[5:], 0)
-			newrefattr(fld, dwarf.DW_AT_type, defptrto(ctxt, defgotype(ctxt, s)))
-		}
-
-	case obj.KindInterface:
-		die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_IFACETYPE, name, 0)
-		dotypedef(ctxt, &dwtypes, name, die)
-		newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0)
-		nfields := int(decodetypeIfaceMethodCount(ctxt.Arch, gotype))
-		var s *Symbol
-		if nfields == 0 {
-			s = lookupOrDiag(ctxt, "type.runtime.eface")
-		} else {
-			s = lookupOrDiag(ctxt, "type.runtime.iface")
-		}
-		newrefattr(die, dwarf.DW_AT_type, defgotype(ctxt, s))
-
-	case obj.KindMap:
-		die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_MAPTYPE, name, 0)
-		s := decodetypeMapKey(gotype)
-		newrefattr(die, dwarf.DW_AT_go_key, defgotype(ctxt, s))
-		s = decodetypeMapValue(gotype)
-		newrefattr(die, dwarf.DW_AT_go_elem, defgotype(ctxt, s))
-		// Save gotype for use in synthesizemaptypes. We could synthesize here,
-		// but that would change the order of the DIEs.
-		newrefattr(die, dwarf.DW_AT_type, gotype)
-
-	case obj.KindPtr:
-		die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_PTRTYPE, name, 0)
-		dotypedef(ctxt, &dwtypes, name, die)
-		s := decodetypePtrElem(gotype)
-		newrefattr(die, dwarf.DW_AT_type, defgotype(ctxt, s))
-
-	case obj.KindSlice:
-		die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_SLICETYPE, name, 0)
-		dotypedef(ctxt, &dwtypes, name, die)
-		newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0)
-		s := decodetypeArrayElem(gotype)
-		elem := defgotype(ctxt, s)
-		newrefattr(die, dwarf.DW_AT_go_elem, elem)
-
-	case obj.KindString:
-		die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_STRINGTYPE, name, 0)
-		newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0)
-
-	case obj.KindStruct:
-		die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_STRUCTTYPE, name, 0)
-		dotypedef(ctxt, &dwtypes, name, die)
-		newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0)
-		nfields := decodetypeStructFieldCount(ctxt.Arch, gotype)
-		var f string
-		var fld *dwarf.DWDie
-		var s *Symbol
-		for i := 0; i < nfields; i++ {
-			f = decodetypeStructFieldName(gotype, i)
-			s = decodetypeStructFieldType(gotype, i)
-			if f == "" {
-				f = s.Name[5:] // skip "type."
-			}
-			fld = newdie(ctxt, die, dwarf.DW_ABRV_STRUCTFIELD, f, 0)
-			newrefattr(fld, dwarf.DW_AT_type, defgotype(ctxt, s))
-			newmemberoffsetattr(fld, int32(decodetypeStructFieldOffs(ctxt.Arch, gotype, i)))
-		}
-
-	case obj.KindUnsafePointer:
-		die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_BARE_PTRTYPE, name, 0)
-
-	default:
-		Errorf(gotype, "dwarf: definition of unknown kind %d", kind)
-		die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_TYPEDECL, name, 0)
-		newrefattr(die, dwarf.DW_AT_type, mustFind(ctxt, "<unspecified>"))
-	}
-
-	newattr(die, dwarf.DW_AT_go_kind, dwarf.DW_CLS_CONSTANT, int64(kind), 0)
-
-	if _, ok := prototypedies[gotype.Name]; ok {
-		prototypedies[gotype.Name] = die
-	}
-
-	return die
-}
-
-func nameFromDIESym(dwtype *Symbol) string {
-	return strings.TrimSuffix(dwtype.Name[len(dwarf.InfoPrefix):], "..def")
-}
-
-// Find or construct *T given T.
-func defptrto(ctxt *Link, dwtype *Symbol) *Symbol {
-	ptrname := "*" + nameFromDIESym(dwtype)
-	die := find(ctxt, ptrname)
-	if die == nil {
-		pdie := newdie(ctxt, &dwtypes, dwarf.DW_ABRV_PTRTYPE, ptrname, 0)
-		newrefattr(pdie, dwarf.DW_AT_type, dwtype)
-		return dtolsym(pdie.Sym)
-	}
-
-	return die
-}
-
-// Copies src's children into dst. Copies attributes by value.
-// DWAttr.data is copied as pointer only. If except is one of
-// the top-level children, it will not be copied.
-func copychildrenexcept(ctxt *Link, dst *dwarf.DWDie, src *dwarf.DWDie, except *dwarf.DWDie) {
-	for src = src.Child; src != nil; src = src.Link {
-		if src == except {
-			continue
-		}
-		c := newdie(ctxt, dst, src.Abbrev, getattr(src, dwarf.DW_AT_name).Data.(string), 0)
-		for a := src.Attr; a != nil; a = a.Link {
-			newattr(c, a.Atr, int(a.Cls), a.Value, a.Data)
-		}
-		copychildrenexcept(ctxt, c, src, nil)
-	}
-
-	reverselist(&dst.Child)
-}
-
-func copychildren(ctxt *Link, dst *dwarf.DWDie, src *dwarf.DWDie) {
-	copychildrenexcept(ctxt, dst, src, nil)
-}
-
-// Search children (assumed to have TAG_member) for the one named
-// field and set its AT_type to dwtype
-func substitutetype(structdie *dwarf.DWDie, field string, dwtype *Symbol) {
-	child := findchild(structdie, field)
-	if child == nil {
-		Exitf("dwarf substitutetype: %s does not have member %s",
-			getattr(structdie, dwarf.DW_AT_name).Data, field)
-		return
-	}
-
-	a := getattr(child, dwarf.DW_AT_type)
-	if a != nil {
-		a.Data = dwtype
-	} else {
-		newrefattr(child, dwarf.DW_AT_type, dwtype)
-	}
-}
-
-func findprotodie(ctxt *Link, name string) *dwarf.DWDie {
-	die, ok := prototypedies[name]
-	if ok && die == nil {
-		defgotype(ctxt, lookupOrDiag(ctxt, name))
-		die = prototypedies[name]
-	}
-	return die
-}
-
-func synthesizestringtypes(ctxt *Link, die *dwarf.DWDie) {
-	prototype := walktypedef(findprotodie(ctxt, "type.runtime.stringStructDWARF"))
-	if prototype == nil {
-		return
-	}
-
-	for ; die != nil; die = die.Link {
-		if die.Abbrev != dwarf.DW_ABRV_STRINGTYPE {
-			continue
-		}
-		copychildren(ctxt, die, prototype)
-	}
-}
-
-func synthesizeslicetypes(ctxt *Link, die *dwarf.DWDie) {
-	prototype := walktypedef(findprotodie(ctxt, "type.runtime.slice"))
-	if prototype == nil {
-		return
-	}
-
-	for ; die != nil; die = die.Link {
-		if die.Abbrev != dwarf.DW_ABRV_SLICETYPE {
-			continue
-		}
-		copychildren(ctxt, die, prototype)
-		elem := getattr(die, dwarf.DW_AT_go_elem).Data.(*Symbol)
-		substitutetype(die, "array", defptrto(ctxt, elem))
-	}
-}
-
-func mkinternaltypename(base string, arg1 string, arg2 string) string {
-	var buf string
-
-	if arg2 == "" {
-		buf = fmt.Sprintf("%s<%s>", base, arg1)
-	} else {
-		buf = fmt.Sprintf("%s<%s,%s>", base, arg1, arg2)
-	}
-	n := buf
-	return n
-}
-
-// synthesizemaptypes is way too closely married to runtime/hashmap.c
-const (
-	MaxKeySize = 128
-	MaxValSize = 128
-	BucketSize = 8
-)
-
-func mkinternaltype(ctxt *Link, abbrev int, typename, keyname, valname string, f func(*dwarf.DWDie)) *Symbol {
-	name := mkinternaltypename(typename, keyname, valname)
-	symname := dwarf.InfoPrefix + name
-	s := ctxt.Syms.ROLookup(symname, 0)
-	if s != nil && s.Type == obj.SDWARFINFO {
-		return s
-	}
-	die := newdie(ctxt, &dwtypes, abbrev, name, 0)
-	f(die)
-	return dtolsym(die.Sym)
-}
-
-func synthesizemaptypes(ctxt *Link, die *dwarf.DWDie) {
-	hash := walktypedef(findprotodie(ctxt, "type.runtime.hmap"))
-	bucket := walktypedef(findprotodie(ctxt, "type.runtime.bmap"))
-
-	if hash == nil {
-		return
-	}
-
-	for ; die != nil; die = die.Link {
-		if die.Abbrev != dwarf.DW_ABRV_MAPTYPE {
-			continue
-		}
-		gotype := getattr(die, dwarf.DW_AT_type).Data.(*Symbol)
-		keytype := decodetypeMapKey(gotype)
-		valtype := decodetypeMapValue(gotype)
-		keysize, valsize := decodetypeSize(ctxt.Arch, keytype), decodetypeSize(ctxt.Arch, valtype)
-		keytype, valtype = walksymtypedef(ctxt, defgotype(ctxt, keytype)), walksymtypedef(ctxt, defgotype(ctxt, valtype))
-
-		// compute size info like hashmap.c does.
-		indirectKey, indirectVal := false, false
-		if keysize > MaxKeySize {
-			keysize = int64(SysArch.PtrSize)
-			indirectKey = true
-		}
-		if valsize > MaxValSize {
-			valsize = int64(SysArch.PtrSize)
-			indirectVal = true
-		}
-
-		// Construct type to represent an array of BucketSize keys
-		keyname := nameFromDIESym(keytype)
-		dwhks := mkinternaltype(ctxt, dwarf.DW_ABRV_ARRAYTYPE, "[]key", keyname, "", func(dwhk *dwarf.DWDie) {
-			newattr(dwhk, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, BucketSize*keysize, 0)
-			t := keytype
-			if indirectKey {
-				t = defptrto(ctxt, keytype)
-			}
-			newrefattr(dwhk, dwarf.DW_AT_type, t)
-			fld := newdie(ctxt, dwhk, dwarf.DW_ABRV_ARRAYRANGE, "size", 0)
-			newattr(fld, dwarf.DW_AT_count, dwarf.DW_CLS_CONSTANT, BucketSize, 0)
-			newrefattr(fld, dwarf.DW_AT_type, mustFind(ctxt, "uintptr"))
-		})
-
-		// Construct type to represent an array of BucketSize values
-		valname := nameFromDIESym(valtype)
-		dwhvs := mkinternaltype(ctxt, dwarf.DW_ABRV_ARRAYTYPE, "[]val", valname, "", func(dwhv *dwarf.DWDie) {
-			newattr(dwhv, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, BucketSize*valsize, 0)
-			t := valtype
-			if indirectVal {
-				t = defptrto(ctxt, valtype)
-			}
-			newrefattr(dwhv, dwarf.DW_AT_type, t)
-			fld := newdie(ctxt, dwhv, dwarf.DW_ABRV_ARRAYRANGE, "size", 0)
-			newattr(fld, dwarf.DW_AT_count, dwarf.DW_CLS_CONSTANT, BucketSize, 0)
-			newrefattr(fld, dwarf.DW_AT_type, mustFind(ctxt, "uintptr"))
-		})
-
-		// Construct bucket<K,V>
-		dwhbs := mkinternaltype(ctxt, dwarf.DW_ABRV_STRUCTTYPE, "bucket", keyname, valname, func(dwhb *dwarf.DWDie) {
-			// Copy over all fields except the field "data" from the generic
-			// bucket. "data" will be replaced with keys/values below.
-			copychildrenexcept(ctxt, dwhb, bucket, findchild(bucket, "data"))
-
-			fld := newdie(ctxt, dwhb, dwarf.DW_ABRV_STRUCTFIELD, "keys", 0)
-			newrefattr(fld, dwarf.DW_AT_type, dwhks)
-			newmemberoffsetattr(fld, BucketSize)
-			fld = newdie(ctxt, dwhb, dwarf.DW_ABRV_STRUCTFIELD, "values", 0)
-			newrefattr(fld, dwarf.DW_AT_type, dwhvs)
-			newmemberoffsetattr(fld, BucketSize+BucketSize*int32(keysize))
-			fld = newdie(ctxt, dwhb, dwarf.DW_ABRV_STRUCTFIELD, "overflow", 0)
-			newrefattr(fld, dwarf.DW_AT_type, defptrto(ctxt, dtolsym(dwhb.Sym)))
-			newmemberoffsetattr(fld, BucketSize+BucketSize*(int32(keysize)+int32(valsize)))
-			if SysArch.RegSize > SysArch.PtrSize {
-				fld = newdie(ctxt, dwhb, dwarf.DW_ABRV_STRUCTFIELD, "pad", 0)
-				newrefattr(fld, dwarf.DW_AT_type, mustFind(ctxt, "uintptr"))
-				newmemberoffsetattr(fld, BucketSize+BucketSize*(int32(keysize)+int32(valsize))+int32(SysArch.PtrSize))
-			}
-
-			newattr(dwhb, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, BucketSize+BucketSize*keysize+BucketSize*valsize+int64(SysArch.RegSize), 0)
-		})
-
-		// Construct hash<K,V>
-		dwhs := mkinternaltype(ctxt, dwarf.DW_ABRV_STRUCTTYPE, "hash", keyname, valname, func(dwh *dwarf.DWDie) {
-			copychildren(ctxt, dwh, hash)
-			substitutetype(dwh, "buckets", defptrto(ctxt, dwhbs))
-			substitutetype(dwh, "oldbuckets", defptrto(ctxt, dwhbs))
-			newattr(dwh, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, getattr(hash, dwarf.DW_AT_byte_size).Value, nil)
-		})
-
-		// make map type a pointer to hash<K,V>
-		newrefattr(die, dwarf.DW_AT_type, defptrto(ctxt, dwhs))
-	}
-}
-
-func synthesizechantypes(ctxt *Link, die *dwarf.DWDie) {
-	sudog := walktypedef(findprotodie(ctxt, "type.runtime.sudog"))
-	waitq := walktypedef(findprotodie(ctxt, "type.runtime.waitq"))
-	hchan := walktypedef(findprotodie(ctxt, "type.runtime.hchan"))
-	if sudog == nil || waitq == nil || hchan == nil {
-		return
-	}
-
-	sudogsize := int(getattr(sudog, dwarf.DW_AT_byte_size).Value)
-
-	for ; die != nil; die = die.Link {
-		if die.Abbrev != dwarf.DW_ABRV_CHANTYPE {
-			continue
-		}
-		elemgotype := getattr(die, dwarf.DW_AT_type).Data.(*Symbol)
-		elemsize := decodetypeSize(ctxt.Arch, elemgotype)
-		elemname := elemgotype.Name[5:]
-		elemtype := walksymtypedef(ctxt, defgotype(ctxt, elemgotype))
-
-		// sudog<T>
-		dwss := mkinternaltype(ctxt, dwarf.DW_ABRV_STRUCTTYPE, "sudog", elemname, "", func(dws *dwarf.DWDie) {
-			copychildren(ctxt, dws, sudog)
-			substitutetype(dws, "elem", elemtype)
-			if elemsize > 8 {
-				elemsize -= 8
-			} else {
-				elemsize = 0
-			}
-			newattr(dws, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, int64(sudogsize)+elemsize, nil)
-		})
-
-		// waitq<T>
-		dwws := mkinternaltype(ctxt, dwarf.DW_ABRV_STRUCTTYPE, "waitq", elemname, "", func(dww *dwarf.DWDie) {
-
-			copychildren(ctxt, dww, waitq)
-			substitutetype(dww, "first", defptrto(ctxt, dwss))
-			substitutetype(dww, "last", defptrto(ctxt, dwss))
-			newattr(dww, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, getattr(waitq, dwarf.DW_AT_byte_size).Value, nil)
-		})
-
-		// hchan<T>
-		dwhs := mkinternaltype(ctxt, dwarf.DW_ABRV_STRUCTTYPE, "hchan", elemname, "", func(dwh *dwarf.DWDie) {
-			copychildren(ctxt, dwh, hchan)
-			substitutetype(dwh, "recvq", dwws)
-			substitutetype(dwh, "sendq", dwws)
-			newattr(dwh, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, getattr(hchan, dwarf.DW_AT_byte_size).Value, nil)
-		})
-
-		newrefattr(die, dwarf.DW_AT_type, defptrto(ctxt, dwhs))
-	}
-}
-
-// For use with pass.c::genasmsym
-func defdwsymb(ctxt *Link, sym *Symbol, s string, t SymbolType, v int64, gotype *Symbol) {
-	if strings.HasPrefix(s, "go.string.") {
-		return
-	}
-	if strings.HasPrefix(s, "runtime.gcbits.") {
-		return
-	}
-
-	if strings.HasPrefix(s, "type.") && s != "type.*" && !strings.HasPrefix(s, "type..") {
-		defgotype(ctxt, sym)
-		return
-	}
-
-	var dv *dwarf.DWDie
-
-	var dt *Symbol
-	switch t {
-	default:
-		return
-
-	case DataSym, BSSSym:
-		dv = newdie(ctxt, &dwglobals, dwarf.DW_ABRV_VARIABLE, s, int(sym.Version))
-		newabslocexprattr(dv, v, sym)
-		if sym.Version == 0 {
-			newattr(dv, dwarf.DW_AT_external, dwarf.DW_CLS_FLAG, 1, 0)
-		}
-		fallthrough
-
-	case AutoSym, ParamSym:
-		dt = defgotype(ctxt, gotype)
-	}
-
-	if dv != nil {
-		newrefattr(dv, dwarf.DW_AT_type, dt)
-	}
-}
-
-func movetomodule(parent *dwarf.DWDie) {
-	die := dwroot.Child.Child
-	if die == nil {
-		dwroot.Child.Child = parent.Child
-		return
-	}
-	for die.Link != nil {
-		die = die.Link
-	}
-	die.Link = parent.Child
-}
-
-// If the pcln table contains runtime/runtime.go, use that to set gdbscript path.
-func finddebugruntimepath(s *Symbol) {
-	if gdbscript != "" {
-		return
-	}
-
-	for i := range s.FuncInfo.File {
-		f := s.FuncInfo.File[i]
-		if i := strings.Index(f.Name, "runtime/runtime.go"); i >= 0 {
-			gdbscript = f.Name[:i] + "runtime/runtime-gdb.py"
-			break
-		}
-	}
-}
-
-/*
- * Generate a sequence of opcodes that is as short as possible.
- * See section 6.2.5
- */
-const (
-	LINE_BASE   = -4
-	LINE_RANGE  = 10
-	PC_RANGE    = (255 - OPCODE_BASE) / LINE_RANGE
-	OPCODE_BASE = 10
-)
-
-func putpclcdelta(linkctxt *Link, ctxt dwarf.Context, s *Symbol, deltaPC uint64, deltaLC int64) {
-	// Choose a special opcode that minimizes the number of bytes needed to
-	// encode the remaining PC delta and LC delta.
-	var opcode int64
-	if deltaLC < LINE_BASE {
-		if deltaPC >= PC_RANGE {
-			opcode = OPCODE_BASE + (LINE_RANGE * PC_RANGE)
-		} else {
-			opcode = OPCODE_BASE + (LINE_RANGE * int64(deltaPC))
-		}
-	} else if deltaLC < LINE_BASE+LINE_RANGE {
-		if deltaPC >= PC_RANGE {
-			opcode = OPCODE_BASE + (deltaLC - LINE_BASE) + (LINE_RANGE * PC_RANGE)
-			if opcode > 255 {
-				opcode -= LINE_RANGE
-			}
-		} else {
-			opcode = OPCODE_BASE + (deltaLC - LINE_BASE) + (LINE_RANGE * int64(deltaPC))
-		}
-	} else {
-		if deltaPC <= PC_RANGE {
-			opcode = OPCODE_BASE + (LINE_RANGE - 1) + (LINE_RANGE * int64(deltaPC))
-			if opcode > 255 {
-				opcode = 255
-			}
-		} else {
-			// Use opcode 249 (pc+=23, lc+=5) or 255 (pc+=24, lc+=1).
-			//
-			// Let x=deltaPC-PC_RANGE.  If we use opcode 255, x will be the remaining
-			// deltaPC that we need to encode separately before emitting 255.  If we
-			// use opcode 249, we will need to encode x+1.  If x+1 takes one more
-			// byte to encode than x, then we use opcode 255.
-			//
-			// In all other cases x and x+1 take the same number of bytes to encode,
-			// so we use opcode 249, which may save us a byte in encoding deltaLC,
-			// for similar reasons.
-			switch deltaPC - PC_RANGE {
-			// PC_RANGE is the largest deltaPC we can encode in one byte, using
-			// DW_LNS_const_add_pc.
-			//
-			// (1<<16)-1 is the largest deltaPC we can encode in three bytes, using
-			// DW_LNS_fixed_advance_pc.
-			//
-			// (1<<(7n))-1 is the largest deltaPC we can encode in n+1 bytes for
-			// n=1,3,4,5,..., using DW_LNS_advance_pc.
-			case PC_RANGE, (1 << 7) - 1, (1 << 16) - 1, (1 << 21) - 1, (1 << 28) - 1,
-				(1 << 35) - 1, (1 << 42) - 1, (1 << 49) - 1, (1 << 56) - 1, (1 << 63) - 1:
-				opcode = 255
-			default:
-				opcode = OPCODE_BASE + LINE_RANGE*PC_RANGE - 1 // 249
-			}
-		}
-	}
-	if opcode < OPCODE_BASE || opcode > 255 {
-		panic(fmt.Sprintf("produced invalid special opcode %d", opcode))
-	}
-
-	// Subtract from deltaPC and deltaLC the amounts that the opcode will add.
-	deltaPC -= uint64((opcode - OPCODE_BASE) / LINE_RANGE)
-	deltaLC -= int64((opcode-OPCODE_BASE)%LINE_RANGE + LINE_BASE)
-
-	// Encode deltaPC.
-	if deltaPC != 0 {
-		if deltaPC <= PC_RANGE {
-			// Adjust the opcode so that we can use the 1-byte DW_LNS_const_add_pc
-			// instruction.
-			opcode -= LINE_RANGE * int64(PC_RANGE-deltaPC)
-			if opcode < OPCODE_BASE {
-				panic(fmt.Sprintf("produced invalid special opcode %d", opcode))
-			}
-			Adduint8(linkctxt, s, dwarf.DW_LNS_const_add_pc)
-		} else if (1<<14) <= deltaPC && deltaPC < (1<<16) {
-			Adduint8(linkctxt, s, dwarf.DW_LNS_fixed_advance_pc)
-			Adduint16(linkctxt, s, uint16(deltaPC))
-		} else {
-			Adduint8(linkctxt, s, dwarf.DW_LNS_advance_pc)
-			dwarf.Uleb128put(ctxt, s, int64(deltaPC))
-		}
-	}
-
-	// Encode deltaLC.
-	if deltaLC != 0 {
-		Adduint8(linkctxt, s, dwarf.DW_LNS_advance_line)
-		dwarf.Sleb128put(ctxt, s, deltaLC)
-	}
-
-	// Output the special opcode.
-	Adduint8(linkctxt, s, uint8(opcode))
-}
-
-/*
- * Walk prog table, emit line program and build DIE tree.
- */
-
-func getCompilationDir() string {
-	if dir, err := os.Getwd(); err == nil {
-		return dir
-	}
-	return "/"
-}
-
-func writelines(ctxt *Link, syms []*Symbol) ([]*Symbol, []*Symbol) {
-	var dwarfctxt dwarf.Context = dwctxt{ctxt}
-	if linesec == nil {
-		linesec = ctxt.Syms.Lookup(".debug_line", 0)
-	}
-	linesec.Type = obj.SDWARFSECT
-	linesec.R = linesec.R[:0]
-
-	ls := linesec
-	syms = append(syms, ls)
-	var funcs []*Symbol
-
-	unitstart := int64(-1)
-	headerstart := int64(-1)
-	headerend := int64(-1)
-	epc := int64(0)
-	var epcs *Symbol
-	var dwinfo *dwarf.DWDie
-
-	lang := dwarf.DW_LANG_Go
-
-	s := ctxt.Textp[0]
-	if ctxt.DynlinkingGo() && Headtype == obj.Hdarwin {
-		s = ctxt.Textp[1] // skip runtime.text
-	}
-
-	dwinfo = newdie(ctxt, &dwroot, dwarf.DW_ABRV_COMPUNIT, "go", 0)
-	newattr(dwinfo, dwarf.DW_AT_language, dwarf.DW_CLS_CONSTANT, int64(lang), 0)
-	newattr(dwinfo, dwarf.DW_AT_stmt_list, dwarf.DW_CLS_PTR, 0, linesec)
-	newattr(dwinfo, dwarf.DW_AT_low_pc, dwarf.DW_CLS_ADDRESS, s.Value, s)
-	// OS X linker requires compilation dir or absolute path in comp unit name to output debug info.
-	compDir := getCompilationDir()
-	newattr(dwinfo, dwarf.DW_AT_comp_dir, dwarf.DW_CLS_STRING, int64(len(compDir)), compDir)
-
-	// Write .debug_line Line Number Program Header (sec 6.2.4)
-	// Fields marked with (*) must be changed for 64-bit dwarf
-	unitLengthOffset := ls.Size
-	Adduint32(ctxt, ls, 0) // unit_length (*), filled in at end.
-	unitstart = ls.Size
-	Adduint16(ctxt, ls, 2) // dwarf version (appendix F)
-	headerLengthOffset := ls.Size
-	Adduint32(ctxt, ls, 0) // header_length (*), filled in at end.
-	headerstart = ls.Size
-
-	// cpos == unitstart + 4 + 2 + 4
-	Adduint8(ctxt, ls, 1)              // minimum_instruction_length
-	Adduint8(ctxt, ls, 1)              // default_is_stmt
-	Adduint8(ctxt, ls, LINE_BASE&0xFF) // line_base
-	Adduint8(ctxt, ls, LINE_RANGE)     // line_range
-	Adduint8(ctxt, ls, OPCODE_BASE)    // opcode_base
-	Adduint8(ctxt, ls, 0)              // standard_opcode_lengths[1]
-	Adduint8(ctxt, ls, 1)              // standard_opcode_lengths[2]
-	Adduint8(ctxt, ls, 1)              // standard_opcode_lengths[3]
-	Adduint8(ctxt, ls, 1)              // standard_opcode_lengths[4]
-	Adduint8(ctxt, ls, 1)              // standard_opcode_lengths[5]
-	Adduint8(ctxt, ls, 0)              // standard_opcode_lengths[6]
-	Adduint8(ctxt, ls, 0)              // standard_opcode_lengths[7]
-	Adduint8(ctxt, ls, 0)              // standard_opcode_lengths[8]
-	Adduint8(ctxt, ls, 1)              // standard_opcode_lengths[9]
-	Adduint8(ctxt, ls, 0)              // include_directories  (empty)
-
-	for _, f := range ctxt.Filesyms {
-		Addstring(ls, f.Name)
-		Adduint8(ctxt, ls, 0)
-		Adduint8(ctxt, ls, 0)
-		Adduint8(ctxt, ls, 0)
-	}
-
-	// 4 zeros: the string termination + 3 fields.
-	Adduint8(ctxt, ls, 0)
-	// terminate file_names.
-	headerend = ls.Size
-
-	Adduint8(ctxt, ls, 0) // start extended opcode
-	dwarf.Uleb128put(dwarfctxt, ls, 1+int64(SysArch.PtrSize))
-	Adduint8(ctxt, ls, dwarf.DW_LNE_set_address)
-
-	pc := s.Value
-	line := 1
-	file := 1
-	Addaddr(ctxt, ls, s)
-
-	var pcfile Pciter
-	var pcline Pciter
-	for _, s := range ctxt.Textp {
-
-		epc = s.Value + s.Size
-		epcs = s
-
-		dsym := ctxt.Syms.Lookup(dwarf.InfoPrefix+s.Name, int(s.Version))
-		dsym.Attr |= AttrHidden
-		dsym.Type = obj.SDWARFINFO
-		for _, r := range dsym.R {
-			if r.Type == obj.R_DWARFREF && r.Sym.Size == 0 {
-				if Buildmode == BuildmodeShared {
-					// These type symbols may not be present in BuildmodeShared. Skip.
-					continue
-				}
-				n := nameFromDIESym(r.Sym)
-				defgotype(ctxt, ctxt.Syms.Lookup("type."+n, 0))
-			}
-		}
-		funcs = append(funcs, dsym)
-
-		if s.FuncInfo == nil {
-			continue
-		}
-
-		finddebugruntimepath(s)
-
-		pciterinit(ctxt, &pcfile, &s.FuncInfo.Pcfile)
-		pciterinit(ctxt, &pcline, &s.FuncInfo.Pcline)
-		epc = pc
-		for pcfile.done == 0 && pcline.done == 0 {
-			if epc-s.Value >= int64(pcfile.nextpc) {
-				pciternext(&pcfile)
-				continue
-			}
-
-			if epc-s.Value >= int64(pcline.nextpc) {
-				pciternext(&pcline)
-				continue
-			}
-
-			if int32(file) != pcfile.value {
-				Adduint8(ctxt, ls, dwarf.DW_LNS_set_file)
-				dwarf.Uleb128put(dwarfctxt, ls, int64(pcfile.value))
-				file = int(pcfile.value)
-			}
-
-			putpclcdelta(ctxt, dwarfctxt, ls, uint64(s.Value+int64(pcline.pc)-pc), int64(pcline.value)-int64(line))
-
-			pc = s.Value + int64(pcline.pc)
-			line = int(pcline.value)
-			if pcfile.nextpc < pcline.nextpc {
-				epc = int64(pcfile.nextpc)
-			} else {
-				epc = int64(pcline.nextpc)
-			}
-			epc += s.Value
-		}
-	}
-
-	Adduint8(ctxt, ls, 0) // start extended opcode
-	dwarf.Uleb128put(dwarfctxt, ls, 1)
-	Adduint8(ctxt, ls, dwarf.DW_LNE_end_sequence)
-
-	newattr(dwinfo, dwarf.DW_AT_high_pc, dwarf.DW_CLS_ADDRESS, epc+1, epcs)
-
-	setuint32(ctxt, ls, unitLengthOffset, uint32(ls.Size-unitstart))
-	setuint32(ctxt, ls, headerLengthOffset, uint32(headerend-headerstart))
-
-	return syms, funcs
-}
-
-/*
- *  Emit .debug_frame
- */
-const (
-	dataAlignmentFactor = -4
-)
-
-// appendPCDeltaCFA appends per-PC CFA deltas to b and returns the final slice.
-func appendPCDeltaCFA(b []byte, deltapc, cfa int64) []byte {
-	b = append(b, dwarf.DW_CFA_def_cfa_offset_sf)
-	b = dwarf.AppendSleb128(b, cfa/dataAlignmentFactor)
-
-	switch {
-	case deltapc < 0x40:
-		b = append(b, uint8(dwarf.DW_CFA_advance_loc+deltapc))
-	case deltapc < 0x100:
-		b = append(b, dwarf.DW_CFA_advance_loc1)
-		b = append(b, uint8(deltapc))
-	case deltapc < 0x10000:
-		b = append(b, dwarf.DW_CFA_advance_loc2)
-		b = Thearch.Append16(b, uint16(deltapc))
-	default:
-		b = append(b, dwarf.DW_CFA_advance_loc4)
-		b = Thearch.Append32(b, uint32(deltapc))
-	}
-	return b
-}
-
-func writeframes(ctxt *Link, syms []*Symbol) []*Symbol {
-	var dwarfctxt dwarf.Context = dwctxt{ctxt}
-	if framesec == nil {
-		framesec = ctxt.Syms.Lookup(".debug_frame", 0)
-	}
-	framesec.Type = obj.SDWARFSECT
-	framesec.R = framesec.R[:0]
-	fs := framesec
-	syms = append(syms, fs)
-
-	// Emit the CIE, Section 6.4.1
-	cieReserve := uint32(16)
-	if haslinkregister(ctxt) {
-		cieReserve = 32
-	}
-	Adduint32(ctxt, fs, cieReserve)                            // initial length, must be multiple of thearch.ptrsize
-	Adduint32(ctxt, fs, 0xffffffff)                            // cid.
-	Adduint8(ctxt, fs, 3)                                      // dwarf version (appendix F)
-	Adduint8(ctxt, fs, 0)                                      // augmentation ""
-	dwarf.Uleb128put(dwarfctxt, fs, 1)                         // code_alignment_factor
-	dwarf.Sleb128put(dwarfctxt, fs, dataAlignmentFactor)       // all CFI offset calculations include multiplication with this factor
-	dwarf.Uleb128put(dwarfctxt, fs, int64(Thearch.Dwarfreglr)) // return_address_register
-
-	Adduint8(ctxt, fs, dwarf.DW_CFA_def_cfa)                   // Set the current frame address..
-	dwarf.Uleb128put(dwarfctxt, fs, int64(Thearch.Dwarfregsp)) // ...to use the value in the platform's SP register (defined in l.go)...
-	if haslinkregister(ctxt) {
-		dwarf.Uleb128put(dwarfctxt, fs, int64(0)) // ...plus a 0 offset.
-
-		Adduint8(ctxt, fs, dwarf.DW_CFA_same_value) // The platform's link register is unchanged during the prologue.
-		dwarf.Uleb128put(dwarfctxt, fs, int64(Thearch.Dwarfreglr))
-
-		Adduint8(ctxt, fs, dwarf.DW_CFA_val_offset)                // The previous value...
-		dwarf.Uleb128put(dwarfctxt, fs, int64(Thearch.Dwarfregsp)) // ...of the platform's SP register...
-		dwarf.Uleb128put(dwarfctxt, fs, int64(0))                  // ...is CFA+0.
-	} else {
-		dwarf.Uleb128put(dwarfctxt, fs, int64(SysArch.PtrSize)) // ...plus the word size (because the call instruction implicitly adds one word to the frame).
-
-		Adduint8(ctxt, fs, dwarf.DW_CFA_offset_extended)                             // The previous value...
-		dwarf.Uleb128put(dwarfctxt, fs, int64(Thearch.Dwarfreglr))                   // ...of the return address...
-		dwarf.Uleb128put(dwarfctxt, fs, int64(-SysArch.PtrSize)/dataAlignmentFactor) // ...is saved at [CFA - (PtrSize/4)].
-	}
-
-	// 4 is to exclude the length field.
-	pad := int64(cieReserve) + 4 - fs.Size
-
-	if pad < 0 {
-		Exitf("dwarf: cieReserve too small by %d bytes.", -pad)
-	}
-
-	Addbytes(fs, zeros[:pad])
-
-	var deltaBuf []byte
-	var pcsp Pciter
-	for _, s := range ctxt.Textp {
-		if s.FuncInfo == nil {
-			continue
-		}
-
-		// Emit a FDE, Section 6.4.1.
-		// First build the section contents into a byte buffer.
-		deltaBuf = deltaBuf[:0]
-		for pciterinit(ctxt, &pcsp, &s.FuncInfo.Pcsp); pcsp.done == 0; pciternext(&pcsp) {
-			nextpc := pcsp.nextpc
-
-			// pciterinit goes up to the end of the function,
-			// but DWARF expects us to stop just before the end.
-			if int64(nextpc) == s.Size {
-				nextpc--
-				if nextpc < pcsp.pc {
-					continue
-				}
-			}
-
-			if haslinkregister(ctxt) {
-				// TODO(bryanpkc): This is imprecise. In general, the instruction
-				// that stores the return address to the stack frame is not the
-				// same one that allocates the frame.
-				if pcsp.value > 0 {
-					// The return address is preserved at (CFA-frame_size)
-					// after a stack frame has been allocated.
-					deltaBuf = append(deltaBuf, dwarf.DW_CFA_offset_extended_sf)
-					deltaBuf = dwarf.AppendUleb128(deltaBuf, uint64(Thearch.Dwarfreglr))
-					deltaBuf = dwarf.AppendSleb128(deltaBuf, -int64(pcsp.value)/dataAlignmentFactor)
-				} else {
-					// The return address is restored into the link register
-					// when a stack frame has been de-allocated.
-					deltaBuf = append(deltaBuf, dwarf.DW_CFA_same_value)
-					deltaBuf = dwarf.AppendUleb128(deltaBuf, uint64(Thearch.Dwarfreglr))
-				}
-				deltaBuf = appendPCDeltaCFA(deltaBuf, int64(nextpc)-int64(pcsp.pc), int64(pcsp.value))
-			} else {
-				deltaBuf = appendPCDeltaCFA(deltaBuf, int64(nextpc)-int64(pcsp.pc), int64(SysArch.PtrSize)+int64(pcsp.value))
-			}
-		}
-		pad := int(Rnd(int64(len(deltaBuf)), int64(SysArch.PtrSize))) - len(deltaBuf)
-		deltaBuf = append(deltaBuf, zeros[:pad]...)
-
-		// Emit the FDE header, Section 6.4.1.
-		//	4 bytes: length, must be multiple of thearch.ptrsize
-		//	4 bytes: Pointer to the CIE above, at offset 0
-		//	ptrsize: initial location
-		//	ptrsize: address range
-		Adduint32(ctxt, fs, uint32(4+2*SysArch.PtrSize+len(deltaBuf))) // length (excludes itself)
-		if Linkmode == LinkExternal {
-			adddwarfref(ctxt, fs, framesec, 4)
-		} else {
-			Adduint32(ctxt, fs, 0) // CIE offset
-		}
-		Addaddr(ctxt, fs, s)
-		adduintxx(ctxt, fs, uint64(s.Size), SysArch.PtrSize) // address range
-		Addbytes(fs, deltaBuf)
-	}
-	return syms
-}
-
-/*
- *  Walk DWarfDebugInfoEntries, and emit .debug_info
- */
-const (
-	COMPUNITHEADERSIZE = 4 + 2 + 4 + 1
-)
-
-func writeinfo(ctxt *Link, syms []*Symbol, funcs []*Symbol) []*Symbol {
-	if infosec == nil {
-		infosec = ctxt.Syms.Lookup(".debug_info", 0)
-	}
-	infosec.R = infosec.R[:0]
-	infosec.Type = obj.SDWARFINFO
-	infosec.Attr |= AttrReachable
-	syms = append(syms, infosec)
-
-	if arangessec == nil {
-		arangessec = ctxt.Syms.Lookup(".dwarfaranges", 0)
-	}
-	arangessec.R = arangessec.R[:0]
-
-	var dwarfctxt dwarf.Context = dwctxt{ctxt}
-
-	for compunit := dwroot.Child; compunit != nil; compunit = compunit.Link {
-		s := dtolsym(compunit.Sym)
-
-		// Write .debug_info Compilation Unit Header (sec 7.5.1)
-		// Fields marked with (*) must be changed for 64-bit dwarf
-		// This must match COMPUNITHEADERSIZE above.
-		Adduint32(ctxt, s, 0) // unit_length (*), will be filled in later.
-		Adduint16(ctxt, s, 2) // dwarf version (appendix F)
-
-		// debug_abbrev_offset (*)
-		adddwarfref(ctxt, s, abbrevsym, 4)
-
-		Adduint8(ctxt, s, uint8(SysArch.PtrSize)) // address_size
-
-		dwarf.Uleb128put(dwarfctxt, s, int64(compunit.Abbrev))
-		dwarf.PutAttrs(dwarfctxt, s, compunit.Abbrev, compunit.Attr)
-
-		cu := []*Symbol{s}
-		if funcs != nil {
-			cu = append(cu, funcs...)
-			funcs = nil
-		}
-		cu = putdies(ctxt, dwarfctxt, cu, compunit.Child)
-		var cusize int64
-		for _, child := range cu {
-			cusize += child.Size
-		}
-		cusize -= 4 // exclude the length field.
-		setuint32(ctxt, s, 0, uint32(cusize))
-		newattr(compunit, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, cusize, 0)
-		syms = append(syms, cu...)
-	}
-	return syms
-}
-
-/*
- *  Emit .debug_pubnames/_types.  _info must have been written before,
- *  because we need die->offs and infoo/infosize;
- */
-func ispubname(die *dwarf.DWDie) bool {
-	switch die.Abbrev {
-	case dwarf.DW_ABRV_FUNCTION, dwarf.DW_ABRV_VARIABLE:
-		a := getattr(die, dwarf.DW_AT_external)
-		return a != nil && a.Value != 0
-	}
-
-	return false
-}
-
-func ispubtype(die *dwarf.DWDie) bool {
-	return die.Abbrev >= dwarf.DW_ABRV_NULLTYPE
-}
-
-func writepub(ctxt *Link, sname string, ispub func(*dwarf.DWDie) bool, syms []*Symbol) []*Symbol {
-	s := ctxt.Syms.Lookup(sname, 0)
-	s.Type = obj.SDWARFSECT
-	syms = append(syms, s)
-
-	for compunit := dwroot.Child; compunit != nil; compunit = compunit.Link {
-		sectionstart := s.Size
-		culength := uint32(getattr(compunit, dwarf.DW_AT_byte_size).Value) + 4
-
-		// Write .debug_pubnames/types	Header (sec 6.1.1)
-		Adduint32(ctxt, s, 0)                          // unit_length (*), will be filled in later.
-		Adduint16(ctxt, s, 2)                          // dwarf version (appendix F)
-		adddwarfref(ctxt, s, dtolsym(compunit.Sym), 4) // debug_info_offset (of the Comp unit Header)
-		Adduint32(ctxt, s, culength)                   // debug_info_length
-
-		for die := compunit.Child; die != nil; die = die.Link {
-			if !ispub(die) {
-				continue
-			}
-			dwa := getattr(die, dwarf.DW_AT_name)
-			name := dwa.Data.(string)
-			if die.Sym == nil {
-				fmt.Println("Missing sym for ", name)
-			}
-			adddwarfref(ctxt, s, dtolsym(die.Sym), 4)
-			Addstring(s, name)
-		}
-
-		Adduint32(ctxt, s, 0)
-
-		setuint32(ctxt, s, sectionstart, uint32(s.Size-sectionstart)-4) // exclude the length field.
-	}
-
-	return syms
-}
-
-/*
- *  emit .debug_aranges.  _info must have been written before,
- *  because we need die->offs of dwarf.DW_globals.
- */
-func writearanges(ctxt *Link, syms []*Symbol) []*Symbol {
-	s := ctxt.Syms.Lookup(".debug_aranges", 0)
-	s.Type = obj.SDWARFSECT
-	// The first tuple is aligned to a multiple of the size of a single tuple
-	// (twice the size of an address)
-	headersize := int(Rnd(4+2+4+1+1, int64(SysArch.PtrSize*2))) // don't count unit_length field itself
-
-	for compunit := dwroot.Child; compunit != nil; compunit = compunit.Link {
-		b := getattr(compunit, dwarf.DW_AT_low_pc)
-		if b == nil {
-			continue
-		}
-		e := getattr(compunit, dwarf.DW_AT_high_pc)
-		if e == nil {
-			continue
-		}
-
-		// Write .debug_aranges	 Header + entry	 (sec 6.1.2)
-		unitlength := uint32(headersize) + 4*uint32(SysArch.PtrSize) - 4
-		Adduint32(ctxt, s, unitlength) // unit_length (*)
-		Adduint16(ctxt, s, 2)          // dwarf version (appendix F)
-
-		adddwarfref(ctxt, s, dtolsym(compunit.Sym), 4)
-
-		Adduint8(ctxt, s, uint8(SysArch.PtrSize)) // address_size
-		Adduint8(ctxt, s, 0)                      // segment_size
-		padding := headersize - (4 + 2 + 4 + 1 + 1)
-		for i := 0; i < padding; i++ {
-			Adduint8(ctxt, s, 0)
-		}
-
-		Addaddrplus(ctxt, s, b.Data.(*Symbol), b.Value-(b.Data.(*Symbol)).Value)
-		adduintxx(ctxt, s, uint64(e.Value-b.Value), SysArch.PtrSize)
-		adduintxx(ctxt, s, 0, SysArch.PtrSize)
-		adduintxx(ctxt, s, 0, SysArch.PtrSize)
-	}
-	if s.Size > 0 {
-		syms = append(syms, s)
-	}
-	return syms
-}
-
-func writegdbscript(ctxt *Link, syms []*Symbol) []*Symbol {
-
-	if gdbscript != "" {
-		s := ctxt.Syms.Lookup(".debug_gdb_scripts", 0)
-		s.Type = obj.SDWARFSECT
-		syms = append(syms, s)
-		Adduint8(ctxt, s, 1) // magic 1 byte?
-		Addstring(s, gdbscript)
-	}
-
-	return syms
-}
-
-var prototypedies map[string]*dwarf.DWDie
-
-/*
- * This is the main entry point for generating dwarf.  After emitting
- * the mandatory debug_abbrev section, it calls writelines() to set up
- * the per-compilation unit part of the DIE tree, while simultaneously
- * emitting the debug_line section.  When the final tree contains
- * forward references, it will write the debug_info section in 2
- * passes.
- *
- */
-func dwarfgeneratedebugsyms(ctxt *Link) {
-	if *FlagW { // disable dwarf
-		return
-	}
-	if *FlagS && Headtype != obj.Hdarwin {
-		return
-	}
-	if Headtype == obj.Hplan9 {
-		return
-	}
-
-	if Linkmode == LinkExternal {
-		if !Iself && Headtype != obj.Hdarwin {
-			return
-		}
-	}
-
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f dwarf\n", obj.Cputime())
-	}
-
-	// Forctxt.Diagnostic messages.
-	newattr(&dwtypes, dwarf.DW_AT_name, dwarf.DW_CLS_STRING, int64(len("dwtypes")), "dwtypes")
-
-	// Some types that must exist to define other ones.
-	newdie(ctxt, &dwtypes, dwarf.DW_ABRV_NULLTYPE, "<unspecified>", 0)
-
-	newdie(ctxt, &dwtypes, dwarf.DW_ABRV_NULLTYPE, "void", 0)
-	newdie(ctxt, &dwtypes, dwarf.DW_ABRV_BARE_PTRTYPE, "unsafe.Pointer", 0)
-
-	die := newdie(ctxt, &dwtypes, dwarf.DW_ABRV_BASETYPE, "uintptr", 0) // needed for array size
-	newattr(die, dwarf.DW_AT_encoding, dwarf.DW_CLS_CONSTANT, dwarf.DW_ATE_unsigned, 0)
-	newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, int64(SysArch.PtrSize), 0)
-	newattr(die, dwarf.DW_AT_go_kind, dwarf.DW_CLS_CONSTANT, obj.KindUintptr, 0)
-
-	// Prototypes needed for type synthesis.
-	prototypedies = map[string]*dwarf.DWDie{
-		"type.runtime.stringStructDWARF": nil,
-		"type.runtime.slice":             nil,
-		"type.runtime.hmap":              nil,
-		"type.runtime.bmap":              nil,
-		"type.runtime.sudog":             nil,
-		"type.runtime.waitq":             nil,
-		"type.runtime.hchan":             nil,
-	}
-
-	// Needed by the prettyprinter code for interface inspection.
-	defgotype(ctxt, lookupOrDiag(ctxt, "type.runtime._type"))
-
-	defgotype(ctxt, lookupOrDiag(ctxt, "type.runtime.interfacetype"))
-	defgotype(ctxt, lookupOrDiag(ctxt, "type.runtime.itab"))
-
-	genasmsym(ctxt, defdwsymb)
-
-	syms := writeabbrev(ctxt, nil)
-	syms, funcs := writelines(ctxt, syms)
-	syms = writeframes(ctxt, syms)
-
-	synthesizestringtypes(ctxt, dwtypes.Child)
-	synthesizeslicetypes(ctxt, dwtypes.Child)
-	synthesizemaptypes(ctxt, dwtypes.Child)
-	synthesizechantypes(ctxt, dwtypes.Child)
-
-	reversetree(&dwroot.Child)
-	reversetree(&dwtypes.Child)
-	reversetree(&dwglobals.Child)
-
-	movetomodule(&dwtypes)
-	movetomodule(&dwglobals)
-
-	// Need to reorder symbols so SDWARFINFO is after all SDWARFSECT
-	// (but we need to generate dies before writepub)
-	infosyms := writeinfo(ctxt, nil, funcs)
-
-	syms = writepub(ctxt, ".debug_pubnames", ispubname, syms)
-	syms = writepub(ctxt, ".debug_pubtypes", ispubtype, syms)
-	syms = writearanges(ctxt, syms)
-	syms = writegdbscript(ctxt, syms)
-	syms = append(syms, infosyms...)
-	dwarfp = syms
-}
-
-/*
- *  Elf.
- */
-func dwarfaddshstrings(ctxt *Link, shstrtab *Symbol) {
-	if *FlagW { // disable dwarf
-		return
-	}
-
-	Addstring(shstrtab, ".debug_abbrev")
-	Addstring(shstrtab, ".debug_aranges")
-	Addstring(shstrtab, ".debug_frame")
-	Addstring(shstrtab, ".debug_info")
-	Addstring(shstrtab, ".debug_line")
-	Addstring(shstrtab, ".debug_pubnames")
-	Addstring(shstrtab, ".debug_pubtypes")
-	Addstring(shstrtab, ".debug_gdb_scripts")
-	if Linkmode == LinkExternal {
-		Addstring(shstrtab, elfRelType+".debug_info")
-		Addstring(shstrtab, elfRelType+".debug_aranges")
-		Addstring(shstrtab, elfRelType+".debug_line")
-		Addstring(shstrtab, elfRelType+".debug_frame")
-		Addstring(shstrtab, elfRelType+".debug_pubnames")
-		Addstring(shstrtab, elfRelType+".debug_pubtypes")
-	}
-}
-
-// Add section symbols for DWARF debug info.  This is called before
-// dwarfaddelfheaders.
-func dwarfaddelfsectionsyms(ctxt *Link) {
-	if *FlagW { // disable dwarf
-		return
-	}
-	if Linkmode != LinkExternal {
-		return
-	}
-	sym := ctxt.Syms.Lookup(".debug_info", 0)
-	putelfsectionsym(sym, sym.Sect.Elfsect.shnum)
-	sym = ctxt.Syms.Lookup(".debug_abbrev", 0)
-	putelfsectionsym(sym, sym.Sect.Elfsect.shnum)
-	sym = ctxt.Syms.Lookup(".debug_line", 0)
-	putelfsectionsym(sym, sym.Sect.Elfsect.shnum)
-	sym = ctxt.Syms.Lookup(".debug_frame", 0)
-	putelfsectionsym(sym, sym.Sect.Elfsect.shnum)
-}
-
-/*
- * Windows PE
- */
-func dwarfaddpeheaders(ctxt *Link) {
-	if *FlagW { // disable dwarf
-		return
-	}
-	for sect := Segdwarf.Sect; sect != nil; sect = sect.Next {
-		h := newPEDWARFSection(ctxt, sect.Name, int64(sect.Length))
-		fileoff := sect.Vaddr - Segdwarf.Vaddr + Segdwarf.Fileoff
-		if uint64(h.PointerToRawData) != fileoff {
-			Exitf("%s.PointerToRawData = %#x, want %#x", sect.Name, h.PointerToRawData, fileoff)
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/elf.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/elf.go
deleted file mode 100644
index 8e891c6..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/elf.go
+++ /dev/null
@@ -1,2857 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/elf.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/elf.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ld
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"crypto/sha1"
-	"encoding/binary"
-	"encoding/hex"
-	"io"
-	"path/filepath"
-	"sort"
-	"strings"
-)
-
-/*
- * Derived from:
- * $FreeBSD: src/sys/sys/elf32.h,v 1.8.14.1 2005/12/30 22:13:58 marcel Exp $
- * $FreeBSD: src/sys/sys/elf64.h,v 1.10.14.1 2005/12/30 22:13:58 marcel Exp $
- * $FreeBSD: src/sys/sys/elf_common.h,v 1.15.8.1 2005/12/30 22:13:58 marcel Exp $
- * $FreeBSD: src/sys/alpha/include/elf.h,v 1.14 2003/09/25 01:10:22 peter Exp $
- * $FreeBSD: src/sys/amd64/include/elf.h,v 1.18 2004/08/03 08:21:48 dfr Exp $
- * $FreeBSD: src/sys/arm/include/elf.h,v 1.5.2.1 2006/06/30 21:42:52 cognet Exp $
- * $FreeBSD: src/sys/i386/include/elf.h,v 1.16 2004/08/02 19:12:17 dfr Exp $
- * $FreeBSD: src/sys/powerpc/include/elf.h,v 1.7 2004/11/02 09:47:01 ssouhlal Exp $
- * $FreeBSD: src/sys/sparc64/include/elf.h,v 1.12 2003/09/25 01:10:26 peter Exp $
- *
- * Copyright (c) 1996-1998 John D. Polstra.  All rights reserved.
- * Copyright (c) 2001 David E. O'Brien
- * Portions Copyright 2009 The Go Authors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- */
-
-/*
- * ELF definitions that are independent of architecture or word size.
- */
-
-/*
- * Note header.  The ".note" section contains an array of notes.  Each
- * begins with this header, aligned to a word boundary.  Immediately
- * following the note header is n_namesz bytes of name, padded to the
- * next word boundary.  Then comes n_descsz bytes of descriptor, again
- * padded to a word boundary.  The values of n_namesz and n_descsz do
- * not include the padding.
- */
-type elfNote struct {
-	nNamesz uint32
-	nDescsz uint32
-	nType   uint32
-}
-
-const (
-	EI_MAG0              = 0
-	EI_MAG1              = 1
-	EI_MAG2              = 2
-	EI_MAG3              = 3
-	EI_CLASS             = 4
-	EI_DATA              = 5
-	EI_VERSION           = 6
-	EI_OSABI             = 7
-	EI_ABIVERSION        = 8
-	OLD_EI_BRAND         = 8
-	EI_PAD               = 9
-	EI_NIDENT            = 16
-	ELFMAG0              = 0x7f
-	ELFMAG1              = 'E'
-	ELFMAG2              = 'L'
-	ELFMAG3              = 'F'
-	SELFMAG              = 4
-	EV_NONE              = 0
-	EV_CURRENT           = 1
-	ELFCLASSNONE         = 0
-	ELFCLASS32           = 1
-	ELFCLASS64           = 2
-	ELFDATANONE          = 0
-	ELFDATA2LSB          = 1
-	ELFDATA2MSB          = 2
-	ELFOSABI_NONE        = 0
-	ELFOSABI_HPUX        = 1
-	ELFOSABI_NETBSD      = 2
-	ELFOSABI_LINUX       = 3
-	ELFOSABI_HURD        = 4
-	ELFOSABI_86OPEN      = 5
-	ELFOSABI_SOLARIS     = 6
-	ELFOSABI_AIX         = 7
-	ELFOSABI_IRIX        = 8
-	ELFOSABI_FREEBSD     = 9
-	ELFOSABI_TRU64       = 10
-	ELFOSABI_MODESTO     = 11
-	ELFOSABI_OPENBSD     = 12
-	ELFOSABI_OPENVMS     = 13
-	ELFOSABI_NSK         = 14
-	ELFOSABI_ARM         = 97
-	ELFOSABI_STANDALONE  = 255
-	ELFOSABI_SYSV        = ELFOSABI_NONE
-	ELFOSABI_MONTEREY    = ELFOSABI_AIX
-	ET_NONE              = 0
-	ET_REL               = 1
-	ET_EXEC              = 2
-	ET_DYN               = 3
-	ET_CORE              = 4
-	ET_LOOS              = 0xfe00
-	ET_HIOS              = 0xfeff
-	ET_LOPROC            = 0xff00
-	ET_HIPROC            = 0xffff
-	EM_NONE              = 0
-	EM_M32               = 1
-	EM_SPARC             = 2
-	EM_386               = 3
-	EM_68K               = 4
-	EM_88K               = 5
-	EM_860               = 7
-	EM_MIPS              = 8
-	EM_S370              = 9
-	EM_MIPS_RS3_LE       = 10
-	EM_PARISC            = 15
-	EM_VPP500            = 17
-	EM_SPARC32PLUS       = 18
-	EM_960               = 19
-	EM_PPC               = 20
-	EM_PPC64             = 21
-	EM_S390              = 22
-	EM_V800              = 36
-	EM_FR20              = 37
-	EM_RH32              = 38
-	EM_RCE               = 39
-	EM_ARM               = 40
-	EM_SH                = 42
-	EM_SPARCV9           = 43
-	EM_TRICORE           = 44
-	EM_ARC               = 45
-	EM_H8_300            = 46
-	EM_H8_300H           = 47
-	EM_H8S               = 48
-	EM_H8_500            = 49
-	EM_IA_64             = 50
-	EM_MIPS_X            = 51
-	EM_COLDFIRE          = 52
-	EM_68HC12            = 53
-	EM_MMA               = 54
-	EM_PCP               = 55
-	EM_NCPU              = 56
-	EM_NDR1              = 57
-	EM_STARCORE          = 58
-	EM_ME16              = 59
-	EM_ST100             = 60
-	EM_TINYJ             = 61
-	EM_X86_64            = 62
-	EM_AARCH64           = 183
-	EM_486               = 6
-	EM_MIPS_RS4_BE       = 10
-	EM_ALPHA_STD         = 41
-	EM_ALPHA             = 0x9026
-	SHN_UNDEF            = 0
-	SHN_LORESERVE        = 0xff00
-	SHN_LOPROC           = 0xff00
-	SHN_HIPROC           = 0xff1f
-	SHN_LOOS             = 0xff20
-	SHN_HIOS             = 0xff3f
-	SHN_ABS              = 0xfff1
-	SHN_COMMON           = 0xfff2
-	SHN_XINDEX           = 0xffff
-	SHN_HIRESERVE        = 0xffff
-	SHT_NULL             = 0
-	SHT_PROGBITS         = 1
-	SHT_SYMTAB           = 2
-	SHT_STRTAB           = 3
-	SHT_RELA             = 4
-	SHT_HASH             = 5
-	SHT_DYNAMIC          = 6
-	SHT_NOTE             = 7
-	SHT_NOBITS           = 8
-	SHT_REL              = 9
-	SHT_SHLIB            = 10
-	SHT_DYNSYM           = 11
-	SHT_INIT_ARRAY       = 14
-	SHT_FINI_ARRAY       = 15
-	SHT_PREINIT_ARRAY    = 16
-	SHT_GROUP            = 17
-	SHT_SYMTAB_SHNDX     = 18
-	SHT_LOOS             = 0x60000000
-	SHT_HIOS             = 0x6fffffff
-	SHT_GNU_VERDEF       = 0x6ffffffd
-	SHT_GNU_VERNEED      = 0x6ffffffe
-	SHT_GNU_VERSYM       = 0x6fffffff
-	SHT_LOPROC           = 0x70000000
-	SHT_ARM_ATTRIBUTES   = 0x70000003
-	SHT_HIPROC           = 0x7fffffff
-	SHT_LOUSER           = 0x80000000
-	SHT_HIUSER           = 0xffffffff
-	SHF_WRITE            = 0x1
-	SHF_ALLOC            = 0x2
-	SHF_EXECINSTR        = 0x4
-	SHF_MERGE            = 0x10
-	SHF_STRINGS          = 0x20
-	SHF_INFO_LINK        = 0x40
-	SHF_LINK_ORDER       = 0x80
-	SHF_OS_NONCONFORMING = 0x100
-	SHF_GROUP            = 0x200
-	SHF_TLS              = 0x400
-	SHF_MASKOS           = 0x0ff00000
-	SHF_MASKPROC         = 0xf0000000
-	PT_NULL              = 0
-	PT_LOAD              = 1
-	PT_DYNAMIC           = 2
-	PT_INTERP            = 3
-	PT_NOTE              = 4
-	PT_SHLIB             = 5
-	PT_PHDR              = 6
-	PT_TLS               = 7
-	PT_LOOS              = 0x60000000
-	PT_HIOS              = 0x6fffffff
-	PT_LOPROC            = 0x70000000
-	PT_HIPROC            = 0x7fffffff
-	PT_GNU_STACK         = 0x6474e551
-	PT_GNU_RELRO         = 0x6474e552
-	PT_PAX_FLAGS         = 0x65041580
-	PT_SUNWSTACK         = 0x6ffffffb
-	PF_X                 = 0x1
-	PF_W                 = 0x2
-	PF_R                 = 0x4
-	PF_MASKOS            = 0x0ff00000
-	PF_MASKPROC          = 0xf0000000
-	DT_NULL              = 0
-	DT_NEEDED            = 1
-	DT_PLTRELSZ          = 2
-	DT_PLTGOT            = 3
-	DT_HASH              = 4
-	DT_STRTAB            = 5
-	DT_SYMTAB            = 6
-	DT_RELA              = 7
-	DT_RELASZ            = 8
-	DT_RELAENT           = 9
-	DT_STRSZ             = 10
-	DT_SYMENT            = 11
-	DT_INIT              = 12
-	DT_FINI              = 13
-	DT_SONAME            = 14
-	DT_RPATH             = 15
-	DT_SYMBOLIC          = 16
-	DT_REL               = 17
-	DT_RELSZ             = 18
-	DT_RELENT            = 19
-	DT_PLTREL            = 20
-	DT_DEBUG             = 21
-	DT_TEXTREL           = 22
-	DT_JMPREL            = 23
-	DT_BIND_NOW          = 24
-	DT_INIT_ARRAY        = 25
-	DT_FINI_ARRAY        = 26
-	DT_INIT_ARRAYSZ      = 27
-	DT_FINI_ARRAYSZ      = 28
-	DT_RUNPATH           = 29
-	DT_FLAGS             = 30
-	DT_ENCODING          = 32
-	DT_PREINIT_ARRAY     = 32
-	DT_PREINIT_ARRAYSZ   = 33
-	DT_LOOS              = 0x6000000d
-	DT_HIOS              = 0x6ffff000
-	DT_LOPROC            = 0x70000000
-	DT_HIPROC            = 0x7fffffff
-	DT_VERNEED           = 0x6ffffffe
-	DT_VERNEEDNUM        = 0x6fffffff
-	DT_VERSYM            = 0x6ffffff0
-	DT_PPC64_GLINK       = DT_LOPROC + 0
-	DT_PPC64_OPT         = DT_LOPROC + 3
-	DF_ORIGIN            = 0x0001
-	DF_SYMBOLIC          = 0x0002
-	DF_TEXTREL           = 0x0004
-	DF_BIND_NOW          = 0x0008
-	DF_STATIC_TLS        = 0x0010
-	NT_PRSTATUS          = 1
-	NT_FPREGSET          = 2
-	NT_PRPSINFO          = 3
-	STB_LOCAL            = 0
-	STB_GLOBAL           = 1
-	STB_WEAK             = 2
-	STB_LOOS             = 10
-	STB_HIOS             = 12
-	STB_LOPROC           = 13
-	STB_HIPROC           = 15
-	STT_NOTYPE           = 0
-	STT_OBJECT           = 1
-	STT_FUNC             = 2
-	STT_SECTION          = 3
-	STT_FILE             = 4
-	STT_COMMON           = 5
-	STT_TLS              = 6
-	STT_LOOS             = 10
-	STT_HIOS             = 12
-	STT_LOPROC           = 13
-	STT_HIPROC           = 15
-	STV_DEFAULT          = 0x0
-	STV_INTERNAL         = 0x1
-	STV_HIDDEN           = 0x2
-	STV_PROTECTED        = 0x3
-	STN_UNDEF            = 0
-)
-
-/* For accessing the fields of r_info. */
-
-/* For constructing r_info from field values. */
-
-/*
- * Relocation types.
- */
-const (
-	R_X86_64_NONE           = 0
-	R_X86_64_64             = 1
-	R_X86_64_PC32           = 2
-	R_X86_64_GOT32          = 3
-	R_X86_64_PLT32          = 4
-	R_X86_64_COPY           = 5
-	R_X86_64_GLOB_DAT       = 6
-	R_X86_64_JMP_SLOT       = 7
-	R_X86_64_RELATIVE       = 8
-	R_X86_64_GOTPCREL       = 9
-	R_X86_64_32             = 10
-	R_X86_64_32S            = 11
-	R_X86_64_16             = 12
-	R_X86_64_PC16           = 13
-	R_X86_64_8              = 14
-	R_X86_64_PC8            = 15
-	R_X86_64_DTPMOD64       = 16
-	R_X86_64_DTPOFF64       = 17
-	R_X86_64_TPOFF64        = 18
-	R_X86_64_TLSGD          = 19
-	R_X86_64_TLSLD          = 20
-	R_X86_64_DTPOFF32       = 21
-	R_X86_64_GOTTPOFF       = 22
-	R_X86_64_TPOFF32        = 23
-	R_X86_64_PC64           = 24
-	R_X86_64_GOTOFF64       = 25
-	R_X86_64_GOTPC32        = 26
-	R_X86_64_GOT64          = 27
-	R_X86_64_GOTPCREL64     = 28
-	R_X86_64_GOTPC64        = 29
-	R_X86_64_GOTPLT64       = 30
-	R_X86_64_PLTOFF64       = 31
-	R_X86_64_SIZE32         = 32
-	R_X86_64_SIZE64         = 33
-	R_X86_64_GOTPC32_TLSDEC = 34
-	R_X86_64_TLSDESC_CALL   = 35
-	R_X86_64_TLSDESC        = 36
-	R_X86_64_IRELATIVE      = 37
-	R_X86_64_PC32_BND       = 40
-	R_X86_64_GOTPCRELX      = 41
-	R_X86_64_REX_GOTPCRELX  = 42
-
-	R_AARCH64_ABS64                       = 257
-	R_AARCH64_ABS32                       = 258
-	R_AARCH64_CALL26                      = 283
-	R_AARCH64_ADR_PREL_PG_HI21            = 275
-	R_AARCH64_ADD_ABS_LO12_NC             = 277
-	R_AARCH64_LDST8_ABS_LO12_NC           = 278
-	R_AARCH64_LDST16_ABS_LO12_NC          = 284
-	R_AARCH64_LDST32_ABS_LO12_NC          = 285
-	R_AARCH64_LDST64_ABS_LO12_NC          = 286
-	R_AARCH64_ADR_GOT_PAGE                = 311
-	R_AARCH64_LD64_GOT_LO12_NC            = 312
-	R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21   = 541
-	R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC = 542
-	R_AARCH64_TLSLE_MOVW_TPREL_G0         = 547
-
-	R_ALPHA_NONE           = 0
-	R_ALPHA_REFLONG        = 1
-	R_ALPHA_REFQUAD        = 2
-	R_ALPHA_GPREL32        = 3
-	R_ALPHA_LITERAL        = 4
-	R_ALPHA_LITUSE         = 5
-	R_ALPHA_GPDISP         = 6
-	R_ALPHA_BRADDR         = 7
-	R_ALPHA_HINT           = 8
-	R_ALPHA_SREL16         = 9
-	R_ALPHA_SREL32         = 10
-	R_ALPHA_SREL64         = 11
-	R_ALPHA_OP_PUSH        = 12
-	R_ALPHA_OP_STORE       = 13
-	R_ALPHA_OP_PSUB        = 14
-	R_ALPHA_OP_PRSHIFT     = 15
-	R_ALPHA_GPVALUE        = 16
-	R_ALPHA_GPRELHIGH      = 17
-	R_ALPHA_GPRELLOW       = 18
-	R_ALPHA_IMMED_GP_16    = 19
-	R_ALPHA_IMMED_GP_HI32  = 20
-	R_ALPHA_IMMED_SCN_HI32 = 21
-	R_ALPHA_IMMED_BR_HI32  = 22
-	R_ALPHA_IMMED_LO32     = 23
-	R_ALPHA_COPY           = 24
-	R_ALPHA_GLOB_DAT       = 25
-	R_ALPHA_JMP_SLOT       = 26
-	R_ALPHA_RELATIVE       = 27
-
-	R_ARM_NONE          = 0
-	R_ARM_PC24          = 1
-	R_ARM_ABS32         = 2
-	R_ARM_REL32         = 3
-	R_ARM_PC13          = 4
-	R_ARM_ABS16         = 5
-	R_ARM_ABS12         = 6
-	R_ARM_THM_ABS5      = 7
-	R_ARM_ABS8          = 8
-	R_ARM_SBREL32       = 9
-	R_ARM_THM_PC22      = 10
-	R_ARM_THM_PC8       = 11
-	R_ARM_AMP_VCALL9    = 12
-	R_ARM_SWI24         = 13
-	R_ARM_THM_SWI8      = 14
-	R_ARM_XPC25         = 15
-	R_ARM_THM_XPC22     = 16
-	R_ARM_COPY          = 20
-	R_ARM_GLOB_DAT      = 21
-	R_ARM_JUMP_SLOT     = 22
-	R_ARM_RELATIVE      = 23
-	R_ARM_GOTOFF        = 24
-	R_ARM_GOTPC         = 25
-	R_ARM_GOT32         = 26
-	R_ARM_PLT32         = 27
-	R_ARM_CALL          = 28
-	R_ARM_JUMP24        = 29
-	R_ARM_V4BX          = 40
-	R_ARM_GOT_PREL      = 96
-	R_ARM_GNU_VTENTRY   = 100
-	R_ARM_GNU_VTINHERIT = 101
-	R_ARM_TLS_IE32      = 107
-	R_ARM_TLS_LE32      = 108
-	R_ARM_RSBREL32      = 250
-	R_ARM_THM_RPC22     = 251
-	R_ARM_RREL32        = 252
-	R_ARM_RABS32        = 253
-	R_ARM_RPC24         = 254
-	R_ARM_RBASE         = 255
-
-	R_386_NONE          = 0
-	R_386_32            = 1
-	R_386_PC32          = 2
-	R_386_GOT32         = 3
-	R_386_PLT32         = 4
-	R_386_COPY          = 5
-	R_386_GLOB_DAT      = 6
-	R_386_JMP_SLOT      = 7
-	R_386_RELATIVE      = 8
-	R_386_GOTOFF        = 9
-	R_386_GOTPC         = 10
-	R_386_TLS_TPOFF     = 14
-	R_386_TLS_IE        = 15
-	R_386_TLS_GOTIE     = 16
-	R_386_TLS_LE        = 17
-	R_386_TLS_GD        = 18
-	R_386_TLS_LDM       = 19
-	R_386_TLS_GD_32     = 24
-	R_386_TLS_GD_PUSH   = 25
-	R_386_TLS_GD_CALL   = 26
-	R_386_TLS_GD_POP    = 27
-	R_386_TLS_LDM_32    = 28
-	R_386_TLS_LDM_PUSH  = 29
-	R_386_TLS_LDM_CALL  = 30
-	R_386_TLS_LDM_POP   = 31
-	R_386_TLS_LDO_32    = 32
-	R_386_TLS_IE_32     = 33
-	R_386_TLS_LE_32     = 34
-	R_386_TLS_DTPMOD32  = 35
-	R_386_TLS_DTPOFF32  = 36
-	R_386_TLS_TPOFF32   = 37
-	R_386_TLS_GOTDESC   = 39
-	R_386_TLS_DESC_CALL = 40
-	R_386_TLS_DESC      = 41
-	R_386_IRELATIVE     = 42
-	R_386_GOT32X        = 43
-
-	R_MIPS_NONE            = 0
-	R_MIPS_16              = 1
-	R_MIPS_32              = 2
-	R_MIPS_REL32           = 3
-	R_MIPS_26              = 4
-	R_MIPS_HI16            = 5
-	R_MIPS_LO16            = 6
-	R_MIPS_GPREL16         = 7
-	R_MIPS_LITERAL         = 8
-	R_MIPS_GOT16           = 9
-	R_MIPS_PC16            = 10
-	R_MIPS_CALL16          = 11
-	R_MIPS_GPREL32         = 12
-	R_MIPS_SHIFT5          = 16
-	R_MIPS_SHIFT6          = 17
-	R_MIPS_64              = 18
-	R_MIPS_GOT_DISP        = 19
-	R_MIPS_GOT_PAGE        = 20
-	R_MIPS_GOT_OFST        = 21
-	R_MIPS_GOT_HI16        = 22
-	R_MIPS_GOT_LO16        = 23
-	R_MIPS_SUB             = 24
-	R_MIPS_INSERT_A        = 25
-	R_MIPS_INSERT_B        = 26
-	R_MIPS_DELETE          = 27
-	R_MIPS_HIGHER          = 28
-	R_MIPS_HIGHEST         = 29
-	R_MIPS_CALL_HI16       = 30
-	R_MIPS_CALL_LO16       = 31
-	R_MIPS_SCN_DISP        = 32
-	R_MIPS_REL16           = 33
-	R_MIPS_ADD_IMMEDIATE   = 34
-	R_MIPS_PJUMP           = 35
-	R_MIPS_RELGOT          = 36
-	R_MIPS_JALR            = 37
-	R_MIPS_TLS_DTPMOD32    = 38
-	R_MIPS_TLS_DTPREL32    = 39
-	R_MIPS_TLS_DTPMOD64    = 40
-	R_MIPS_TLS_DTPREL64    = 41
-	R_MIPS_TLS_GD          = 42
-	R_MIPS_TLS_LDM         = 43
-	R_MIPS_TLS_DTPREL_HI16 = 44
-	R_MIPS_TLS_DTPREL_LO16 = 45
-	R_MIPS_TLS_GOTTPREL    = 46
-	R_MIPS_TLS_TPREL32     = 47
-	R_MIPS_TLS_TPREL64     = 48
-	R_MIPS_TLS_TPREL_HI16  = 49
-	R_MIPS_TLS_TPREL_LO16  = 50
-
-	R_PPC_NONE            = 0
-	R_PPC_ADDR32          = 1
-	R_PPC_ADDR24          = 2
-	R_PPC_ADDR16          = 3
-	R_PPC_ADDR16_LO       = 4
-	R_PPC_ADDR16_HI       = 5
-	R_PPC_ADDR16_HA       = 6
-	R_PPC_ADDR14          = 7
-	R_PPC_ADDR14_BRTAKEN  = 8
-	R_PPC_ADDR14_BRNTAKEN = 9
-	R_PPC_REL24           = 10
-	R_PPC_REL14           = 11
-	R_PPC_REL14_BRTAKEN   = 12
-	R_PPC_REL14_BRNTAKEN  = 13
-	R_PPC_GOT16           = 14
-	R_PPC_GOT16_LO        = 15
-	R_PPC_GOT16_HI        = 16
-	R_PPC_GOT16_HA        = 17
-	R_PPC_PLTREL24        = 18
-	R_PPC_COPY            = 19
-	R_PPC_GLOB_DAT        = 20
-	R_PPC_JMP_SLOT        = 21
-	R_PPC_RELATIVE        = 22
-	R_PPC_LOCAL24PC       = 23
-	R_PPC_UADDR32         = 24
-	R_PPC_UADDR16         = 25
-	R_PPC_REL32           = 26
-	R_PPC_PLT32           = 27
-	R_PPC_PLTREL32        = 28
-	R_PPC_PLT16_LO        = 29
-	R_PPC_PLT16_HI        = 30
-	R_PPC_PLT16_HA        = 31
-	R_PPC_SDAREL16        = 32
-	R_PPC_SECTOFF         = 33
-	R_PPC_SECTOFF_LO      = 34
-	R_PPC_SECTOFF_HI      = 35
-	R_PPC_SECTOFF_HA      = 36
-	R_PPC_TLS             = 67
-	R_PPC_DTPMOD32        = 68
-	R_PPC_TPREL16         = 69
-	R_PPC_TPREL16_LO      = 70
-	R_PPC_TPREL16_HI      = 71
-	R_PPC_TPREL16_HA      = 72
-	R_PPC_TPREL32         = 73
-	R_PPC_DTPREL16        = 74
-	R_PPC_DTPREL16_LO     = 75
-	R_PPC_DTPREL16_HI     = 76
-	R_PPC_DTPREL16_HA     = 77
-	R_PPC_DTPREL32        = 78
-	R_PPC_GOT_TLSGD16     = 79
-	R_PPC_GOT_TLSGD16_LO  = 80
-	R_PPC_GOT_TLSGD16_HI  = 81
-	R_PPC_GOT_TLSGD16_HA  = 82
-	R_PPC_GOT_TLSLD16     = 83
-	R_PPC_GOT_TLSLD16_LO  = 84
-	R_PPC_GOT_TLSLD16_HI  = 85
-	R_PPC_GOT_TLSLD16_HA  = 86
-	R_PPC_GOT_TPREL16     = 87
-	R_PPC_GOT_TPREL16_LO  = 88
-	R_PPC_GOT_TPREL16_HI  = 89
-	R_PPC_GOT_TPREL16_HA  = 90
-	R_PPC_EMB_NADDR32     = 101
-	R_PPC_EMB_NADDR16     = 102
-	R_PPC_EMB_NADDR16_LO  = 103
-	R_PPC_EMB_NADDR16_HI  = 104
-	R_PPC_EMB_NADDR16_HA  = 105
-	R_PPC_EMB_SDAI16      = 106
-	R_PPC_EMB_SDA2I16     = 107
-	R_PPC_EMB_SDA2REL     = 108
-	R_PPC_EMB_SDA21       = 109
-	R_PPC_EMB_MRKREF      = 110
-	R_PPC_EMB_RELSEC16    = 111
-	R_PPC_EMB_RELST_LO    = 112
-	R_PPC_EMB_RELST_HI    = 113
-	R_PPC_EMB_RELST_HA    = 114
-	R_PPC_EMB_BIT_FLD     = 115
-	R_PPC_EMB_RELSDA      = 116
-
-	R_PPC64_ADDR32            = R_PPC_ADDR32
-	R_PPC64_ADDR16_LO         = R_PPC_ADDR16_LO
-	R_PPC64_ADDR16_HA         = R_PPC_ADDR16_HA
-	R_PPC64_REL24             = R_PPC_REL24
-	R_PPC64_GOT16_HA          = R_PPC_GOT16_HA
-	R_PPC64_JMP_SLOT          = R_PPC_JMP_SLOT
-	R_PPC64_TPREL16           = R_PPC_TPREL16
-	R_PPC64_ADDR64            = 38
-	R_PPC64_TOC16             = 47
-	R_PPC64_TOC16_LO          = 48
-	R_PPC64_TOC16_HI          = 49
-	R_PPC64_TOC16_HA          = 50
-	R_PPC64_ADDR16_LO_DS      = 57
-	R_PPC64_GOT16_LO_DS       = 59
-	R_PPC64_TOC16_DS          = 63
-	R_PPC64_TOC16_LO_DS       = 64
-	R_PPC64_TLS               = 67
-	R_PPC64_GOT_TPREL16_LO_DS = 88
-	R_PPC64_GOT_TPREL16_HA    = 90
-	R_PPC64_REL16_LO          = 250
-	R_PPC64_REL16_HI          = 251
-	R_PPC64_REL16_HA          = 252
-
-	R_SPARC_NONE     = 0
-	R_SPARC_8        = 1
-	R_SPARC_16       = 2
-	R_SPARC_32       = 3
-	R_SPARC_DISP8    = 4
-	R_SPARC_DISP16   = 5
-	R_SPARC_DISP32   = 6
-	R_SPARC_WDISP30  = 7
-	R_SPARC_WDISP22  = 8
-	R_SPARC_HI22     = 9
-	R_SPARC_22       = 10
-	R_SPARC_13       = 11
-	R_SPARC_LO10     = 12
-	R_SPARC_GOT10    = 13
-	R_SPARC_GOT13    = 14
-	R_SPARC_GOT22    = 15
-	R_SPARC_PC10     = 16
-	R_SPARC_PC22     = 17
-	R_SPARC_WPLT30   = 18
-	R_SPARC_COPY     = 19
-	R_SPARC_GLOB_DAT = 20
-	R_SPARC_JMP_SLOT = 21
-	R_SPARC_RELATIVE = 22
-	R_SPARC_UA32     = 23
-	R_SPARC_PLT32    = 24
-	R_SPARC_HIPLT22  = 25
-	R_SPARC_LOPLT10  = 26
-	R_SPARC_PCPLT32  = 27
-	R_SPARC_PCPLT22  = 28
-	R_SPARC_PCPLT10  = 29
-	R_SPARC_10       = 30
-	R_SPARC_11       = 31
-	R_SPARC_64       = 32
-	R_SPARC_OLO10    = 33
-	R_SPARC_HH22     = 34
-	R_SPARC_HM10     = 35
-	R_SPARC_LM22     = 36
-	R_SPARC_PC_HH22  = 37
-	R_SPARC_PC_HM10  = 38
-	R_SPARC_PC_LM22  = 39
-	R_SPARC_WDISP16  = 40
-	R_SPARC_WDISP19  = 41
-	R_SPARC_GLOB_JMP = 42
-	R_SPARC_7        = 43
-	R_SPARC_5        = 44
-	R_SPARC_6        = 45
-	R_SPARC_DISP64   = 46
-	R_SPARC_PLT64    = 47
-	R_SPARC_HIX22    = 48
-	R_SPARC_LOX10    = 49
-	R_SPARC_H44      = 50
-	R_SPARC_M44      = 51
-	R_SPARC_L44      = 52
-	R_SPARC_REGISTER = 53
-	R_SPARC_UA64     = 54
-	R_SPARC_UA16     = 55
-
-	R_390_NONE        = 0
-	R_390_8           = 1
-	R_390_12          = 2
-	R_390_16          = 3
-	R_390_32          = 4
-	R_390_PC32        = 5
-	R_390_GOT12       = 6
-	R_390_GOT32       = 7
-	R_390_PLT32       = 8
-	R_390_COPY        = 9
-	R_390_GLOB_DAT    = 10
-	R_390_JMP_SLOT    = 11
-	R_390_RELATIVE    = 12
-	R_390_GOTOFF      = 13
-	R_390_GOTPC       = 14
-	R_390_GOT16       = 15
-	R_390_PC16        = 16
-	R_390_PC16DBL     = 17
-	R_390_PLT16DBL    = 18
-	R_390_PC32DBL     = 19
-	R_390_PLT32DBL    = 20
-	R_390_GOTPCDBL    = 21
-	R_390_64          = 22
-	R_390_PC64        = 23
-	R_390_GOT64       = 24
-	R_390_PLT64       = 25
-	R_390_GOTENT      = 26
-	R_390_GOTOFF16    = 27
-	R_390_GOTOFF64    = 28
-	R_390_GOTPLT12    = 29
-	R_390_GOTPLT16    = 30
-	R_390_GOTPLT32    = 31
-	R_390_GOTPLT64    = 32
-	R_390_GOTPLTENT   = 33
-	R_390_GOTPLTOFF16 = 34
-	R_390_GOTPLTOFF32 = 35
-	R_390_GOTPLTOFF64 = 36
-	R_390_TLS_LOAD    = 37
-	R_390_TLS_GDCALL  = 38
-	R_390_TLS_LDCALL  = 39
-	R_390_TLS_GD32    = 40
-	R_390_TLS_GD64    = 41
-	R_390_TLS_GOTIE12 = 42
-	R_390_TLS_GOTIE32 = 43
-	R_390_TLS_GOTIE64 = 44
-	R_390_TLS_LDM32   = 45
-	R_390_TLS_LDM64   = 46
-	R_390_TLS_IE32    = 47
-	R_390_TLS_IE64    = 48
-	R_390_TLS_IEENT   = 49
-	R_390_TLS_LE32    = 50
-	R_390_TLS_LE64    = 51
-	R_390_TLS_LDO32   = 52
-	R_390_TLS_LDO64   = 53
-	R_390_TLS_DTPMOD  = 54
-	R_390_TLS_DTPOFF  = 55
-	R_390_TLS_TPOFF   = 56
-	R_390_20          = 57
-	R_390_GOT20       = 58
-	R_390_GOTPLT20    = 59
-	R_390_TLS_GOTIE20 = 60
-
-	ARM_MAGIC_TRAMP_NUMBER = 0x5c000003
-)
-
-/*
- * Symbol table entries.
- */
-
-/* For accessing the fields of st_info. */
-
-/* For constructing st_info from field values. */
-
-/* For accessing the fields of st_other. */
-
-/*
- * ELF header.
- */
-type ElfEhdr struct {
-	ident     [EI_NIDENT]uint8
-	type_     uint16
-	machine   uint16
-	version   uint32
-	entry     uint64
-	phoff     uint64
-	shoff     uint64
-	flags     uint32
-	ehsize    uint16
-	phentsize uint16
-	phnum     uint16
-	shentsize uint16
-	shnum     uint16
-	shstrndx  uint16
-}
-
-/*
- * Section header.
- */
-type ElfShdr struct {
-	name      uint32
-	type_     uint32
-	flags     uint64
-	addr      uint64
-	off       uint64
-	size      uint64
-	link      uint32
-	info      uint32
-	addralign uint64
-	entsize   uint64
-	shnum     int
-	secsym    *Symbol
-}
-
-/*
- * Program header.
- */
-type ElfPhdr struct {
-	type_  uint32
-	flags  uint32
-	off    uint64
-	vaddr  uint64
-	paddr  uint64
-	filesz uint64
-	memsz  uint64
-	align  uint64
-}
-
-/* For accessing the fields of r_info. */
-
-/* For constructing r_info from field values. */
-
-/*
- * Symbol table entries.
- */
-
-/* For accessing the fields of st_info. */
-
-/* For constructing st_info from field values. */
-
-/* For accessing the fields of st_other. */
-
-/*
- * Go linker interface
- */
-const (
-	ELF64HDRSIZE  = 64
-	ELF64PHDRSIZE = 56
-	ELF64SHDRSIZE = 64
-	ELF64RELSIZE  = 16
-	ELF64RELASIZE = 24
-	ELF64SYMSIZE  = 24
-	ELF32HDRSIZE  = 52
-	ELF32PHDRSIZE = 32
-	ELF32SHDRSIZE = 40
-	ELF32SYMSIZE  = 16
-	ELF32RELSIZE  = 8
-)
-
-/*
- * The interface uses the 64-bit structures always,
- * to avoid code duplication.  The writers know how to
- * marshal a 32-bit representation from the 64-bit structure.
- */
-
-var Elfstrdat []byte
-
-/*
- * Total amount of space to reserve at the start of the file
- * for Header, PHeaders, SHeaders, and interp.
- * May waste some.
- * On FreeBSD, cannot be larger than a page.
- */
-const (
-	ELFRESERVE = 4096
-)
-
-/*
- * We use the 64-bit data structures on both 32- and 64-bit machines
- * in order to write the code just once.  The 64-bit data structure is
- * written in the 32-bit format on the 32-bit machines.
- */
-const (
-	NSECT = 400
-)
-
-var (
-	Iself bool
-
-	Nelfsym int = 1
-
-	elf64 bool
-	// Either ".rel" or ".rela" depending on which type of relocation the
-	// target platform uses.
-	elfRelType string
-
-	ehdr ElfEhdr
-	phdr [NSECT]*ElfPhdr
-	shdr [NSECT]*ElfShdr
-
-	interp string
-)
-
-type Elfstring struct {
-	s   string
-	off int
-}
-
-var elfstr [100]Elfstring
-
-var nelfstr int
-
-var buildinfo []byte
-
-/*
- Initialize the global variable that describes the ELF header. It will be updated as
- we write section and prog headers.
-*/
-func Elfinit(ctxt *Link) {
-	Iself = true
-
-	if SysArch.InFamily(sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.S390X) {
-		elfRelType = ".rela"
-	} else {
-		elfRelType = ".rel"
-	}
-
-	switch SysArch.Family {
-	// 64-bit architectures
-	case sys.PPC64, sys.S390X:
-		if ctxt.Arch.ByteOrder == binary.BigEndian {
-			ehdr.flags = 1 /* Version 1 ABI */
-		} else {
-			ehdr.flags = 2 /* Version 2 ABI */
-		}
-		fallthrough
-	case sys.AMD64, sys.ARM64, sys.MIPS64:
-		if SysArch.Family == sys.MIPS64 {
-			ehdr.flags = 0x20000000 /* MIPS 3 */
-		}
-		elf64 = true
-
-		ehdr.phoff = ELF64HDRSIZE      /* Must be be ELF64HDRSIZE: first PHdr must follow ELF header */
-		ehdr.shoff = ELF64HDRSIZE      /* Will move as we add PHeaders */
-		ehdr.ehsize = ELF64HDRSIZE     /* Must be ELF64HDRSIZE */
-		ehdr.phentsize = ELF64PHDRSIZE /* Must be ELF64PHDRSIZE */
-		ehdr.shentsize = ELF64SHDRSIZE /* Must be ELF64SHDRSIZE */
-
-	// 32-bit architectures
-	case sys.ARM, sys.MIPS:
-		if SysArch.Family == sys.ARM {
-			// we use EABI on linux/arm, freebsd/arm, netbsd/arm.
-			if Headtype == obj.Hlinux || Headtype == obj.Hfreebsd || Headtype == obj.Hnetbsd {
-				// We set a value here that makes no indication of which
-				// float ABI the object uses, because this is information
-				// used by the dynamic linker to compare executables and
-				// shared libraries -- so it only matters for cgo calls, and
-				// the information properly comes from the object files
-				// produced by the host C compiler. parseArmAttributes in
-				// ldelf.go reads that information and updates this field as
-				// appropriate.
-				ehdr.flags = 0x5000002 // has entry point, Version5 EABI
-			}
-		} else if SysArch.Family == sys.MIPS {
-			ehdr.flags = 0x50001004 /* MIPS 32 CPIC O32*/
-		}
-		fallthrough
-	default:
-		ehdr.phoff = ELF32HDRSIZE
-		/* Must be be ELF32HDRSIZE: first PHdr must follow ELF header */
-		ehdr.shoff = ELF32HDRSIZE      /* Will move as we add PHeaders */
-		ehdr.ehsize = ELF32HDRSIZE     /* Must be ELF32HDRSIZE */
-		ehdr.phentsize = ELF32PHDRSIZE /* Must be ELF32PHDRSIZE */
-		ehdr.shentsize = ELF32SHDRSIZE /* Must be ELF32SHDRSIZE */
-	}
-}
-
-// Make sure PT_LOAD is aligned properly and
-// that there is no gap,
-// correct ELF loaders will do this implicitly,
-// but buggy ELF loaders like the one in some
-// versions of QEMU and UPX won't.
-func fixElfPhdr(e *ElfPhdr) {
-	frag := int(e.vaddr & (e.align - 1))
-
-	e.off -= uint64(frag)
-	e.vaddr -= uint64(frag)
-	e.paddr -= uint64(frag)
-	e.filesz += uint64(frag)
-	e.memsz += uint64(frag)
-}
-
-func elf64phdr(e *ElfPhdr) {
-	if e.type_ == PT_LOAD {
-		fixElfPhdr(e)
-	}
-
-	Thearch.Lput(e.type_)
-	Thearch.Lput(e.flags)
-	Thearch.Vput(e.off)
-	Thearch.Vput(e.vaddr)
-	Thearch.Vput(e.paddr)
-	Thearch.Vput(e.filesz)
-	Thearch.Vput(e.memsz)
-	Thearch.Vput(e.align)
-}
-
-func elf32phdr(e *ElfPhdr) {
-	if e.type_ == PT_LOAD {
-		fixElfPhdr(e)
-	}
-
-	Thearch.Lput(e.type_)
-	Thearch.Lput(uint32(e.off))
-	Thearch.Lput(uint32(e.vaddr))
-	Thearch.Lput(uint32(e.paddr))
-	Thearch.Lput(uint32(e.filesz))
-	Thearch.Lput(uint32(e.memsz))
-	Thearch.Lput(e.flags)
-	Thearch.Lput(uint32(e.align))
-}
-
-func elf64shdr(e *ElfShdr) {
-	Thearch.Lput(e.name)
-	Thearch.Lput(e.type_)
-	Thearch.Vput(e.flags)
-	Thearch.Vput(e.addr)
-	Thearch.Vput(e.off)
-	Thearch.Vput(e.size)
-	Thearch.Lput(e.link)
-	Thearch.Lput(e.info)
-	Thearch.Vput(e.addralign)
-	Thearch.Vput(e.entsize)
-}
-
-func elf32shdr(e *ElfShdr) {
-	Thearch.Lput(e.name)
-	Thearch.Lput(e.type_)
-	Thearch.Lput(uint32(e.flags))
-	Thearch.Lput(uint32(e.addr))
-	Thearch.Lput(uint32(e.off))
-	Thearch.Lput(uint32(e.size))
-	Thearch.Lput(e.link)
-	Thearch.Lput(e.info)
-	Thearch.Lput(uint32(e.addralign))
-	Thearch.Lput(uint32(e.entsize))
-}
-
-func elfwriteshdrs() uint32 {
-	if elf64 {
-		for i := 0; i < int(ehdr.shnum); i++ {
-			elf64shdr(shdr[i])
-		}
-		return uint32(ehdr.shnum) * ELF64SHDRSIZE
-	}
-
-	for i := 0; i < int(ehdr.shnum); i++ {
-		elf32shdr(shdr[i])
-	}
-	return uint32(ehdr.shnum) * ELF32SHDRSIZE
-}
-
-func elfsetstring(s *Symbol, str string, off int) {
-	if nelfstr >= len(elfstr) {
-		Errorf(s, "too many elf strings")
-		errorexit()
-	}
-
-	elfstr[nelfstr].s = str
-	elfstr[nelfstr].off = off
-	nelfstr++
-}
-
-func elfwritephdrs() uint32 {
-	if elf64 {
-		for i := 0; i < int(ehdr.phnum); i++ {
-			elf64phdr(phdr[i])
-		}
-		return uint32(ehdr.phnum) * ELF64PHDRSIZE
-	}
-
-	for i := 0; i < int(ehdr.phnum); i++ {
-		elf32phdr(phdr[i])
-	}
-	return uint32(ehdr.phnum) * ELF32PHDRSIZE
-}
-
-func newElfPhdr() *ElfPhdr {
-	e := new(ElfPhdr)
-	if ehdr.phnum >= NSECT {
-		Errorf(nil, "too many phdrs")
-	} else {
-		phdr[ehdr.phnum] = e
-		ehdr.phnum++
-	}
-	if elf64 {
-		ehdr.shoff += ELF64PHDRSIZE
-	} else {
-		ehdr.shoff += ELF32PHDRSIZE
-	}
-	return e
-}
-
-func newElfShdr(name int64) *ElfShdr {
-	e := new(ElfShdr)
-	e.name = uint32(name)
-	e.shnum = int(ehdr.shnum)
-	if ehdr.shnum >= NSECT {
-		Errorf(nil, "too many shdrs")
-	} else {
-		shdr[ehdr.shnum] = e
-		ehdr.shnum++
-	}
-
-	return e
-}
-
-func getElfEhdr() *ElfEhdr {
-	return &ehdr
-}
-
-func elf64writehdr() uint32 {
-	for i := 0; i < EI_NIDENT; i++ {
-		Cput(ehdr.ident[i])
-	}
-	Thearch.Wput(ehdr.type_)
-	Thearch.Wput(ehdr.machine)
-	Thearch.Lput(ehdr.version)
-	Thearch.Vput(ehdr.entry)
-	Thearch.Vput(ehdr.phoff)
-	Thearch.Vput(ehdr.shoff)
-	Thearch.Lput(ehdr.flags)
-	Thearch.Wput(ehdr.ehsize)
-	Thearch.Wput(ehdr.phentsize)
-	Thearch.Wput(ehdr.phnum)
-	Thearch.Wput(ehdr.shentsize)
-	Thearch.Wput(ehdr.shnum)
-	Thearch.Wput(ehdr.shstrndx)
-	return ELF64HDRSIZE
-}
-
-func elf32writehdr() uint32 {
-	for i := 0; i < EI_NIDENT; i++ {
-		Cput(ehdr.ident[i])
-	}
-	Thearch.Wput(ehdr.type_)
-	Thearch.Wput(ehdr.machine)
-	Thearch.Lput(ehdr.version)
-	Thearch.Lput(uint32(ehdr.entry))
-	Thearch.Lput(uint32(ehdr.phoff))
-	Thearch.Lput(uint32(ehdr.shoff))
-	Thearch.Lput(ehdr.flags)
-	Thearch.Wput(ehdr.ehsize)
-	Thearch.Wput(ehdr.phentsize)
-	Thearch.Wput(ehdr.phnum)
-	Thearch.Wput(ehdr.shentsize)
-	Thearch.Wput(ehdr.shnum)
-	Thearch.Wput(ehdr.shstrndx)
-	return ELF32HDRSIZE
-}
-
-func elfwritehdr() uint32 {
-	if elf64 {
-		return elf64writehdr()
-	}
-	return elf32writehdr()
-}
-
-/* Taken directly from the definition document for ELF64 */
-func elfhash(name string) uint32 {
-	var h uint32
-	for i := 0; i < len(name); i++ {
-		h = (h << 4) + uint32(name[i])
-		if g := h & 0xf0000000; g != 0 {
-			h ^= g >> 24
-		}
-		h &= 0x0fffffff
-	}
-	return h
-}
-
-func Elfwritedynent(ctxt *Link, s *Symbol, tag int, val uint64) {
-	if elf64 {
-		Adduint64(ctxt, s, uint64(tag))
-		Adduint64(ctxt, s, val)
-	} else {
-		Adduint32(ctxt, s, uint32(tag))
-		Adduint32(ctxt, s, uint32(val))
-	}
-}
-
-func elfwritedynentsym(ctxt *Link, s *Symbol, tag int, t *Symbol) {
-	Elfwritedynentsymplus(ctxt, s, tag, t, 0)
-}
-
-func Elfwritedynentsymplus(ctxt *Link, s *Symbol, tag int, t *Symbol, add int64) {
-	if elf64 {
-		Adduint64(ctxt, s, uint64(tag))
-	} else {
-		Adduint32(ctxt, s, uint32(tag))
-	}
-	Addaddrplus(ctxt, s, t, add)
-}
-
-func elfwritedynentsymsize(ctxt *Link, s *Symbol, tag int, t *Symbol) {
-	if elf64 {
-		Adduint64(ctxt, s, uint64(tag))
-	} else {
-		Adduint32(ctxt, s, uint32(tag))
-	}
-	addsize(ctxt, s, t)
-}
-
-func elfinterp(sh *ElfShdr, startva uint64, resoff uint64, p string) int {
-	interp = p
-	n := len(interp) + 1
-	sh.addr = startva + resoff - uint64(n)
-	sh.off = resoff - uint64(n)
-	sh.size = uint64(n)
-
-	return n
-}
-
-func elfwriteinterp() int {
-	sh := elfshname(".interp")
-	Cseek(int64(sh.off))
-	coutbuf.WriteString(interp)
-	Cput(0)
-	return int(sh.size)
-}
-
-func elfnote(sh *ElfShdr, startva uint64, resoff uint64, sz int, alloc bool) int {
-	n := 3*4 + uint64(sz) + resoff%4
-
-	sh.type_ = SHT_NOTE
-	if alloc {
-		sh.flags = SHF_ALLOC
-	}
-	sh.addralign = 4
-	sh.addr = startva + resoff - n
-	sh.off = resoff - n
-	sh.size = n - resoff%4
-
-	return int(n)
-}
-
-func elfwritenotehdr(str string, namesz uint32, descsz uint32, tag uint32) *ElfShdr {
-	sh := elfshname(str)
-
-	// Write Elf_Note header.
-	Cseek(int64(sh.off))
-
-	Thearch.Lput(namesz)
-	Thearch.Lput(descsz)
-	Thearch.Lput(tag)
-
-	return sh
-}
-
-// NetBSD Signature (as per sys/exec_elf.h)
-const (
-	ELF_NOTE_NETBSD_NAMESZ  = 7
-	ELF_NOTE_NETBSD_DESCSZ  = 4
-	ELF_NOTE_NETBSD_TAG     = 1
-	ELF_NOTE_NETBSD_VERSION = 599000000 /* NetBSD 5.99 */
-)
-
-var ELF_NOTE_NETBSD_NAME = []byte("NetBSD\x00")
-
-func elfnetbsdsig(sh *ElfShdr, startva uint64, resoff uint64) int {
-	n := int(Rnd(ELF_NOTE_NETBSD_NAMESZ, 4) + Rnd(ELF_NOTE_NETBSD_DESCSZ, 4))
-	return elfnote(sh, startva, resoff, n, true)
-}
-
-func elfwritenetbsdsig() int {
-	// Write Elf_Note header.
-	sh := elfwritenotehdr(".note.netbsd.ident", ELF_NOTE_NETBSD_NAMESZ, ELF_NOTE_NETBSD_DESCSZ, ELF_NOTE_NETBSD_TAG)
-
-	if sh == nil {
-		return 0
-	}
-
-	// Followed by NetBSD string and version.
-	Cwrite(ELF_NOTE_NETBSD_NAME)
-	Cput(0)
-
-	Thearch.Lput(ELF_NOTE_NETBSD_VERSION)
-
-	return int(sh.size)
-}
-
-// OpenBSD Signature
-const (
-	ELF_NOTE_OPENBSD_NAMESZ  = 8
-	ELF_NOTE_OPENBSD_DESCSZ  = 4
-	ELF_NOTE_OPENBSD_TAG     = 1
-	ELF_NOTE_OPENBSD_VERSION = 0
-)
-
-var ELF_NOTE_OPENBSD_NAME = []byte("OpenBSD\x00")
-
-func elfopenbsdsig(sh *ElfShdr, startva uint64, resoff uint64) int {
-	n := ELF_NOTE_OPENBSD_NAMESZ + ELF_NOTE_OPENBSD_DESCSZ
-	return elfnote(sh, startva, resoff, n, true)
-}
-
-func elfwriteopenbsdsig() int {
-	// Write Elf_Note header.
-	sh := elfwritenotehdr(".note.openbsd.ident", ELF_NOTE_OPENBSD_NAMESZ, ELF_NOTE_OPENBSD_DESCSZ, ELF_NOTE_OPENBSD_TAG)
-
-	if sh == nil {
-		return 0
-	}
-
-	// Followed by OpenBSD string and version.
-	Cwrite(ELF_NOTE_OPENBSD_NAME)
-
-	Thearch.Lput(ELF_NOTE_OPENBSD_VERSION)
-
-	return int(sh.size)
-}
-
-func addbuildinfo(val string) {
-	if !strings.HasPrefix(val, "0x") {
-		Exitf("-B argument must start with 0x: %s", val)
-	}
-
-	ov := val
-	val = val[2:]
-
-	const maxLen = 32
-	if hex.DecodedLen(len(val)) > maxLen {
-		Exitf("-B option too long (max %d digits): %s", maxLen, ov)
-	}
-
-	b, err := hex.DecodeString(val)
-	if err != nil {
-		if err == hex.ErrLength {
-			Exitf("-B argument must have even number of digits: %s", ov)
-		}
-		if inv, ok := err.(hex.InvalidByteError); ok {
-			Exitf("-B argument contains invalid hex digit %c: %s", byte(inv), ov)
-		}
-		Exitf("-B argument contains invalid hex: %s", ov)
-	}
-
-	buildinfo = b
-}
-
-// Build info note
-const (
-	ELF_NOTE_BUILDINFO_NAMESZ = 4
-	ELF_NOTE_BUILDINFO_TAG    = 3
-)
-
-var ELF_NOTE_BUILDINFO_NAME = []byte("GNU\x00")
-
-func elfbuildinfo(sh *ElfShdr, startva uint64, resoff uint64) int {
-	n := int(ELF_NOTE_BUILDINFO_NAMESZ + Rnd(int64(len(buildinfo)), 4))
-	return elfnote(sh, startva, resoff, n, true)
-}
-
-func elfgobuildid(sh *ElfShdr, startva uint64, resoff uint64) int {
-	n := len(ELF_NOTE_GO_NAME) + int(Rnd(int64(len(*flagBuildid)), 4))
-	return elfnote(sh, startva, resoff, n, true)
-}
-
-func elfwritebuildinfo() int {
-	sh := elfwritenotehdr(".note.gnu.build-id", ELF_NOTE_BUILDINFO_NAMESZ, uint32(len(buildinfo)), ELF_NOTE_BUILDINFO_TAG)
-	if sh == nil {
-		return 0
-	}
-
-	Cwrite(ELF_NOTE_BUILDINFO_NAME)
-	Cwrite(buildinfo)
-	var zero = make([]byte, 4)
-	Cwrite(zero[:int(Rnd(int64(len(buildinfo)), 4)-int64(len(buildinfo)))])
-
-	return int(sh.size)
-}
-
-func elfwritegobuildid() int {
-	sh := elfwritenotehdr(".note.go.buildid", uint32(len(ELF_NOTE_GO_NAME)), uint32(len(*flagBuildid)), ELF_NOTE_GOBUILDID_TAG)
-	if sh == nil {
-		return 0
-	}
-
-	Cwrite(ELF_NOTE_GO_NAME)
-	Cwrite([]byte(*flagBuildid))
-	var zero = make([]byte, 4)
-	Cwrite(zero[:int(Rnd(int64(len(*flagBuildid)), 4)-int64(len(*flagBuildid)))])
-
-	return int(sh.size)
-}
-
-// Go specific notes
-const (
-	ELF_NOTE_GOPKGLIST_TAG = 1
-	ELF_NOTE_GOABIHASH_TAG = 2
-	ELF_NOTE_GODEPS_TAG    = 3
-	ELF_NOTE_GOBUILDID_TAG = 4
-)
-
-var ELF_NOTE_GO_NAME = []byte("Go\x00\x00")
-
-var elfverneed int
-
-type Elfaux struct {
-	next *Elfaux
-	num  int
-	vers string
-}
-
-type Elflib struct {
-	next *Elflib
-	aux  *Elfaux
-	file string
-}
-
-func addelflib(list **Elflib, file string, vers string) *Elfaux {
-	var lib *Elflib
-
-	for lib = *list; lib != nil; lib = lib.next {
-		if lib.file == file {
-			goto havelib
-		}
-	}
-	lib = new(Elflib)
-	lib.next = *list
-	lib.file = file
-	*list = lib
-
-havelib:
-	for aux := lib.aux; aux != nil; aux = aux.next {
-		if aux.vers == vers {
-			return aux
-		}
-	}
-	aux := new(Elfaux)
-	aux.next = lib.aux
-	aux.vers = vers
-	lib.aux = aux
-
-	return aux
-}
-
-func elfdynhash(ctxt *Link) {
-	if !Iself {
-		return
-	}
-
-	nsym := Nelfsym
-	s := ctxt.Syms.Lookup(".hash", 0)
-	s.Type = obj.SELFROSECT
-	s.Attr |= AttrReachable
-
-	i := nsym
-	nbucket := 1
-	for i > 0 {
-		nbucket++
-		i >>= 1
-	}
-
-	var needlib *Elflib
-	need := make([]*Elfaux, nsym)
-	chain := make([]uint32, nsym)
-	buckets := make([]uint32, nbucket)
-
-	var b int
-	for _, sy := range ctxt.Syms.Allsym {
-		if sy.Dynid <= 0 {
-			continue
-		}
-
-		if sy.Dynimpvers != "" {
-			need[sy.Dynid] = addelflib(&needlib, sy.Dynimplib, sy.Dynimpvers)
-		}
-
-		name := sy.Extname
-		hc := elfhash(name)
-
-		b = int(hc % uint32(nbucket))
-		chain[sy.Dynid] = buckets[b]
-		buckets[b] = uint32(sy.Dynid)
-	}
-
-	// s390x (ELF64) hash table entries are 8 bytes
-	if SysArch.Family == sys.S390X {
-		Adduint64(ctxt, s, uint64(nbucket))
-		Adduint64(ctxt, s, uint64(nsym))
-		for i := 0; i < nbucket; i++ {
-			Adduint64(ctxt, s, uint64(buckets[i]))
-		}
-		for i := 0; i < nsym; i++ {
-			Adduint64(ctxt, s, uint64(chain[i]))
-		}
-	} else {
-		Adduint32(ctxt, s, uint32(nbucket))
-		Adduint32(ctxt, s, uint32(nsym))
-		for i := 0; i < nbucket; i++ {
-			Adduint32(ctxt, s, buckets[i])
-		}
-		for i := 0; i < nsym; i++ {
-			Adduint32(ctxt, s, chain[i])
-		}
-	}
-
-	// version symbols
-	dynstr := ctxt.Syms.Lookup(".dynstr", 0)
-
-	s = ctxt.Syms.Lookup(".gnu.version_r", 0)
-	i = 2
-	nfile := 0
-	var j int
-	var x *Elfaux
-	for l := needlib; l != nil; l = l.next {
-		nfile++
-
-		// header
-		Adduint16(ctxt, s, 1) // table version
-		j = 0
-		for x = l.aux; x != nil; x = x.next {
-			j++
-		}
-		Adduint16(ctxt, s, uint16(j))                         // aux count
-		Adduint32(ctxt, s, uint32(Addstring(dynstr, l.file))) // file string offset
-		Adduint32(ctxt, s, 16)                                // offset from header to first aux
-		if l.next != nil {
-			Adduint32(ctxt, s, 16+uint32(j)*16) // offset from this header to next
-		} else {
-			Adduint32(ctxt, s, 0)
-		}
-
-		for x = l.aux; x != nil; x = x.next {
-			x.num = i
-			i++
-
-			// aux struct
-			Adduint32(ctxt, s, elfhash(x.vers))                   // hash
-			Adduint16(ctxt, s, 0)                                 // flags
-			Adduint16(ctxt, s, uint16(x.num))                     // other - index we refer to this by
-			Adduint32(ctxt, s, uint32(Addstring(dynstr, x.vers))) // version string offset
-			if x.next != nil {
-				Adduint32(ctxt, s, 16) // offset from this aux to next
-			} else {
-				Adduint32(ctxt, s, 0)
-			}
-		}
-	}
-
-	// version references
-	s = ctxt.Syms.Lookup(".gnu.version", 0)
-
-	for i := 0; i < nsym; i++ {
-		if i == 0 {
-			Adduint16(ctxt, s, 0) // first entry - no symbol
-		} else if need[i] == nil {
-			Adduint16(ctxt, s, 1) // global
-		} else {
-			Adduint16(ctxt, s, uint16(need[i].num))
-		}
-	}
-
-	s = ctxt.Syms.Lookup(".dynamic", 0)
-	elfverneed = nfile
-	if elfverneed != 0 {
-		elfwritedynentsym(ctxt, s, DT_VERNEED, ctxt.Syms.Lookup(".gnu.version_r", 0))
-		Elfwritedynent(ctxt, s, DT_VERNEEDNUM, uint64(nfile))
-		elfwritedynentsym(ctxt, s, DT_VERSYM, ctxt.Syms.Lookup(".gnu.version", 0))
-	}
-
-	sy := ctxt.Syms.Lookup(elfRelType+".plt", 0)
-	if sy.Size > 0 {
-		if elfRelType == ".rela" {
-			Elfwritedynent(ctxt, s, DT_PLTREL, DT_RELA)
-		} else {
-			Elfwritedynent(ctxt, s, DT_PLTREL, DT_REL)
-		}
-		elfwritedynentsymsize(ctxt, s, DT_PLTRELSZ, sy)
-		elfwritedynentsym(ctxt, s, DT_JMPREL, sy)
-	}
-
-	Elfwritedynent(ctxt, s, DT_NULL, 0)
-}
-
-func elfphload(seg *Segment) *ElfPhdr {
-	ph := newElfPhdr()
-	ph.type_ = PT_LOAD
-	if seg.Rwx&4 != 0 {
-		ph.flags |= PF_R
-	}
-	if seg.Rwx&2 != 0 {
-		ph.flags |= PF_W
-	}
-	if seg.Rwx&1 != 0 {
-		ph.flags |= PF_X
-	}
-	ph.vaddr = seg.Vaddr
-	ph.paddr = seg.Vaddr
-	ph.memsz = seg.Length
-	ph.off = seg.Fileoff
-	ph.filesz = seg.Filelen
-	ph.align = uint64(*FlagRound)
-
-	return ph
-}
-
-func elfphrelro(seg *Segment) {
-	ph := newElfPhdr()
-	ph.type_ = PT_GNU_RELRO
-	ph.vaddr = seg.Vaddr
-	ph.paddr = seg.Vaddr
-	ph.memsz = seg.Length
-	ph.off = seg.Fileoff
-	ph.filesz = seg.Filelen
-	ph.align = uint64(*FlagRound)
-}
-
-func elfshname(name string) *ElfShdr {
-	var off int
-	var sh *ElfShdr
-
-	for i := 0; i < nelfstr; i++ {
-		if name == elfstr[i].s {
-			off = elfstr[i].off
-			for i = 0; i < int(ehdr.shnum); i++ {
-				sh = shdr[i]
-				if sh.name == uint32(off) {
-					return sh
-				}
-			}
-
-			sh = newElfShdr(int64(off))
-			return sh
-		}
-	}
-
-	Exitf("cannot find elf name %s", name)
-	return nil
-}
-
-// Create an ElfShdr for the section with name.
-// Create a duplicate if one already exists with that name
-func elfshnamedup(name string) *ElfShdr {
-	var off int
-	var sh *ElfShdr
-
-	for i := 0; i < nelfstr; i++ {
-		if name == elfstr[i].s {
-			off = elfstr[i].off
-			sh = newElfShdr(int64(off))
-			return sh
-		}
-	}
-
-	Errorf(nil, "cannot find elf name %s", name)
-	errorexit()
-	return nil
-}
-
-func elfshalloc(sect *Section) *ElfShdr {
-	sh := elfshname(sect.Name)
-	sect.Elfsect = sh
-	return sh
-}
-
-func elfshbits(sect *Section) *ElfShdr {
-	var sh *ElfShdr
-
-	if sect.Name == ".text" {
-		if sect.Elfsect == nil {
-			sect.Elfsect = elfshnamedup(sect.Name)
-		}
-		sh = sect.Elfsect
-	} else {
-		sh = elfshalloc(sect)
-	}
-
-	// If this section has already been set up as a note, we assume type_ and
-	// flags are already correct, but the other fields still need filling in.
-	if sh.type_ == SHT_NOTE {
-		if Linkmode != LinkExternal {
-			// TODO(mwhudson): the approach here will work OK when
-			// linking internally for notes that we want to be included
-			// in a loadable segment (e.g. the abihash note) but not for
-			// notes that we do not want to be mapped (e.g. the package
-			// list note). The real fix is probably to define new values
-			// for Symbol.Type corresponding to mapped and unmapped notes
-			// and handle them in dodata().
-			Errorf(nil, "sh.type_ == SHT_NOTE in elfshbits when linking internally")
-		}
-		sh.addralign = uint64(sect.Align)
-		sh.size = sect.Length
-		sh.off = sect.Seg.Fileoff + sect.Vaddr - sect.Seg.Vaddr
-		return sh
-	}
-	if sh.type_ > 0 {
-		return sh
-	}
-
-	if sect.Vaddr < sect.Seg.Vaddr+sect.Seg.Filelen {
-		sh.type_ = SHT_PROGBITS
-	} else {
-		sh.type_ = SHT_NOBITS
-	}
-	sh.flags = SHF_ALLOC
-	if sect.Rwx&1 != 0 {
-		sh.flags |= SHF_EXECINSTR
-	}
-	if sect.Rwx&2 != 0 {
-		sh.flags |= SHF_WRITE
-	}
-	if sect.Name == ".tbss" {
-		sh.flags |= SHF_TLS
-		sh.type_ = SHT_NOBITS
-	}
-	if strings.HasPrefix(sect.Name, ".debug") {
-		sh.flags = 0
-	}
-
-	if Linkmode != LinkExternal {
-		sh.addr = sect.Vaddr
-	}
-	sh.addralign = uint64(sect.Align)
-	sh.size = sect.Length
-	if sect.Name != ".tbss" {
-		sh.off = sect.Seg.Fileoff + sect.Vaddr - sect.Seg.Vaddr
-	}
-
-	return sh
-}
-
-func elfshreloc(sect *Section) *ElfShdr {
-	// If main section is SHT_NOBITS, nothing to relocate.
-	// Also nothing to relocate in .shstrtab or notes.
-	if sect.Vaddr >= sect.Seg.Vaddr+sect.Seg.Filelen {
-		return nil
-	}
-	if sect.Name == ".shstrtab" || sect.Name == ".tbss" {
-		return nil
-	}
-	if sect.Elfsect.type_ == SHT_NOTE {
-		return nil
-	}
-
-	var typ int
-	if elfRelType == ".rela" {
-		typ = SHT_RELA
-	} else {
-		typ = SHT_REL
-	}
-
-	sh := elfshname(elfRelType + sect.Name)
-	// There could be multiple text sections but each needs
-	// its own .rela.text.
-
-	if sect.Name == ".text" {
-		if sh.info != 0 && sh.info != uint32(sect.Elfsect.shnum) {
-			sh = elfshnamedup(elfRelType + sect.Name)
-		}
-	}
-
-	sh.type_ = uint32(typ)
-	sh.entsize = uint64(SysArch.RegSize) * 2
-	if typ == SHT_RELA {
-		sh.entsize += uint64(SysArch.RegSize)
-	}
-	sh.link = uint32(elfshname(".symtab").shnum)
-	sh.info = uint32(sect.Elfsect.shnum)
-	sh.off = sect.Reloff
-	sh.size = sect.Rellen
-	sh.addralign = uint64(SysArch.RegSize)
-	return sh
-}
-
-func elfrelocsect(ctxt *Link, sect *Section, syms []*Symbol) {
-	// If main section is SHT_NOBITS, nothing to relocate.
-	// Also nothing to relocate in .shstrtab.
-	if sect.Vaddr >= sect.Seg.Vaddr+sect.Seg.Filelen {
-		return
-	}
-	if sect.Name == ".shstrtab" {
-		return
-	}
-
-	sect.Reloff = uint64(coutbuf.Offset())
-	for i, s := range syms {
-		if !s.Attr.Reachable() {
-			continue
-		}
-		if uint64(s.Value) >= sect.Vaddr {
-			syms = syms[i:]
-			break
-		}
-	}
-
-	eaddr := int32(sect.Vaddr + sect.Length)
-	for _, sym := range syms {
-		if !sym.Attr.Reachable() {
-			continue
-		}
-		if sym.Value >= int64(eaddr) {
-			break
-		}
-		for ri := 0; ri < len(sym.R); ri++ {
-			r := &sym.R[ri]
-			if r.Done != 0 {
-				continue
-			}
-			if r.Xsym == nil {
-				Errorf(sym, "missing xsym in relocation")
-				continue
-			}
-			if r.Xsym.ElfsymForReloc() == 0 {
-				Errorf(sym, "reloc %d to non-elf symbol %s (outer=%s) %d", r.Type, r.Sym.Name, r.Xsym.Name, r.Sym.Type)
-			}
-			if !r.Xsym.Attr.Reachable() {
-				Errorf(sym, "unreachable reloc %v target %v", r.Type, r.Xsym.Name)
-			}
-			if Thearch.Elfreloc1(ctxt, r, int64(uint64(sym.Value+int64(r.Off))-sect.Vaddr)) < 0 {
-				Errorf(sym, "unsupported obj reloc %d/%d to %s", r.Type, r.Siz, r.Sym.Name)
-			}
-		}
-	}
-
-	sect.Rellen = uint64(coutbuf.Offset()) - sect.Reloff
-}
-
-func Elfemitreloc(ctxt *Link) {
-	for coutbuf.Offset()&7 != 0 {
-		Cput(0)
-	}
-
-	for sect := Segtext.Sect; sect != nil; sect = sect.Next {
-		if sect.Name == ".text" {
-			elfrelocsect(ctxt, sect, ctxt.Textp)
-		} else {
-			elfrelocsect(ctxt, sect, datap)
-		}
-	}
-
-	for sect := Segrodata.Sect; sect != nil; sect = sect.Next {
-		elfrelocsect(ctxt, sect, datap)
-	}
-	for sect := Segrelrodata.Sect; sect != nil; sect = sect.Next {
-		elfrelocsect(ctxt, sect, datap)
-	}
-	for sect := Segdata.Sect; sect != nil; sect = sect.Next {
-		elfrelocsect(ctxt, sect, datap)
-	}
-	for sect := Segdwarf.Sect; sect != nil; sect = sect.Next {
-		elfrelocsect(ctxt, sect, dwarfp)
-	}
-}
-
-func addgonote(ctxt *Link, sectionName string, tag uint32, desc []byte) {
-	s := ctxt.Syms.Lookup(sectionName, 0)
-	s.Attr |= AttrReachable
-	s.Type = obj.SELFROSECT
-	// namesz
-	Adduint32(ctxt, s, uint32(len(ELF_NOTE_GO_NAME)))
-	// descsz
-	Adduint32(ctxt, s, uint32(len(desc)))
-	// tag
-	Adduint32(ctxt, s, tag)
-	// name + padding
-	s.P = append(s.P, ELF_NOTE_GO_NAME...)
-	for len(s.P)%4 != 0 {
-		s.P = append(s.P, 0)
-	}
-	// desc + padding
-	s.P = append(s.P, desc...)
-	for len(s.P)%4 != 0 {
-		s.P = append(s.P, 0)
-	}
-	s.Size = int64(len(s.P))
-}
-
-func (ctxt *Link) doelf() {
-	if !Iself {
-		return
-	}
-
-	/* predefine strings we need for section headers */
-	shstrtab := ctxt.Syms.Lookup(".shstrtab", 0)
-
-	shstrtab.Type = obj.SELFROSECT
-	shstrtab.Attr |= AttrReachable
-
-	Addstring(shstrtab, "")
-	Addstring(shstrtab, ".text")
-	Addstring(shstrtab, ".noptrdata")
-	Addstring(shstrtab, ".data")
-	Addstring(shstrtab, ".bss")
-	Addstring(shstrtab, ".noptrbss")
-
-	// generate .tbss section (except for OpenBSD where it's not supported)
-	// for dynamic internal linker or external linking, so that various
-	// binutils could correctly calculate PT_TLS size.
-	// see https://golang.org/issue/5200.
-	if Headtype != obj.Hopenbsd {
-		if !*FlagD || Linkmode == LinkExternal {
-			Addstring(shstrtab, ".tbss")
-		}
-	}
-	if Headtype == obj.Hnetbsd {
-		Addstring(shstrtab, ".note.netbsd.ident")
-	}
-	if Headtype == obj.Hopenbsd {
-		Addstring(shstrtab, ".note.openbsd.ident")
-	}
-	if len(buildinfo) > 0 {
-		Addstring(shstrtab, ".note.gnu.build-id")
-	}
-	if *flagBuildid != "" {
-		Addstring(shstrtab, ".note.go.buildid")
-	}
-	Addstring(shstrtab, ".elfdata")
-	Addstring(shstrtab, ".rodata")
-	// See the comment about data.rel.ro.FOO section names in data.go.
-	relro_prefix := ""
-	if UseRelro() {
-		Addstring(shstrtab, ".data.rel.ro")
-		relro_prefix = ".data.rel.ro"
-	}
-	Addstring(shstrtab, relro_prefix+".typelink")
-	Addstring(shstrtab, relro_prefix+".itablink")
-	Addstring(shstrtab, relro_prefix+".gosymtab")
-	Addstring(shstrtab, relro_prefix+".gopclntab")
-
-	if Linkmode == LinkExternal {
-		*FlagD = true
-
-		Addstring(shstrtab, elfRelType+".text")
-		Addstring(shstrtab, elfRelType+".rodata")
-		Addstring(shstrtab, elfRelType+relro_prefix+".typelink")
-		Addstring(shstrtab, elfRelType+relro_prefix+".itablink")
-		Addstring(shstrtab, elfRelType+relro_prefix+".gosymtab")
-		Addstring(shstrtab, elfRelType+relro_prefix+".gopclntab")
-		Addstring(shstrtab, elfRelType+".noptrdata")
-		Addstring(shstrtab, elfRelType+".data")
-		if UseRelro() {
-			Addstring(shstrtab, elfRelType+".data.rel.ro")
-		}
-
-		// add a .note.GNU-stack section to mark the stack as non-executable
-		Addstring(shstrtab, ".note.GNU-stack")
-
-		if Buildmode == BuildmodeShared {
-			Addstring(shstrtab, ".note.go.abihash")
-			Addstring(shstrtab, ".note.go.pkg-list")
-			Addstring(shstrtab, ".note.go.deps")
-		}
-	}
-
-	hasinitarr := *FlagLinkshared
-
-	/* shared library initializer */
-	switch Buildmode {
-	case BuildmodeCArchive, BuildmodeCShared, BuildmodeShared, BuildmodePlugin:
-		hasinitarr = true
-	}
-
-	if hasinitarr {
-		Addstring(shstrtab, ".init_array")
-		Addstring(shstrtab, elfRelType+".init_array")
-	}
-
-	if !*FlagS {
-		Addstring(shstrtab, ".symtab")
-		Addstring(shstrtab, ".strtab")
-		dwarfaddshstrings(ctxt, shstrtab)
-	}
-
-	Addstring(shstrtab, ".shstrtab")
-
-	if !*FlagD { /* -d suppresses dynamic loader format */
-		Addstring(shstrtab, ".interp")
-		Addstring(shstrtab, ".hash")
-		Addstring(shstrtab, ".got")
-		if SysArch.Family == sys.PPC64 {
-			Addstring(shstrtab, ".glink")
-		}
-		Addstring(shstrtab, ".got.plt")
-		Addstring(shstrtab, ".dynamic")
-		Addstring(shstrtab, ".dynsym")
-		Addstring(shstrtab, ".dynstr")
-		Addstring(shstrtab, elfRelType)
-		Addstring(shstrtab, elfRelType+".plt")
-
-		Addstring(shstrtab, ".plt")
-		Addstring(shstrtab, ".gnu.version")
-		Addstring(shstrtab, ".gnu.version_r")
-
-		/* dynamic symbol table - first entry all zeros */
-		s := ctxt.Syms.Lookup(".dynsym", 0)
-
-		s.Type = obj.SELFROSECT
-		s.Attr |= AttrReachable
-		if elf64 {
-			s.Size += ELF64SYMSIZE
-		} else {
-			s.Size += ELF32SYMSIZE
-		}
-
-		/* dynamic string table */
-		s = ctxt.Syms.Lookup(".dynstr", 0)
-
-		s.Type = obj.SELFROSECT
-		s.Attr |= AttrReachable
-		if s.Size == 0 {
-			Addstring(s, "")
-		}
-		dynstr := s
-
-		/* relocation table */
-		s = ctxt.Syms.Lookup(elfRelType, 0)
-		s.Attr |= AttrReachable
-		s.Type = obj.SELFROSECT
-
-		/* global offset table */
-		s = ctxt.Syms.Lookup(".got", 0)
-
-		s.Attr |= AttrReachable
-		s.Type = obj.SELFGOT // writable
-
-		/* ppc64 glink resolver */
-		if SysArch.Family == sys.PPC64 {
-			s := ctxt.Syms.Lookup(".glink", 0)
-			s.Attr |= AttrReachable
-			s.Type = obj.SELFRXSECT
-		}
-
-		/* hash */
-		s = ctxt.Syms.Lookup(".hash", 0)
-
-		s.Attr |= AttrReachable
-		s.Type = obj.SELFROSECT
-
-		s = ctxt.Syms.Lookup(".got.plt", 0)
-		s.Attr |= AttrReachable
-		s.Type = obj.SELFSECT // writable
-
-		s = ctxt.Syms.Lookup(".plt", 0)
-
-		s.Attr |= AttrReachable
-		if SysArch.Family == sys.PPC64 {
-			// In the ppc64 ABI, .plt is a data section
-			// written by the dynamic linker.
-			s.Type = obj.SELFSECT
-		} else {
-			s.Type = obj.SELFRXSECT
-		}
-
-		Thearch.Elfsetupplt(ctxt)
-
-		s = ctxt.Syms.Lookup(elfRelType+".plt", 0)
-		s.Attr |= AttrReachable
-		s.Type = obj.SELFROSECT
-
-		s = ctxt.Syms.Lookup(".gnu.version", 0)
-		s.Attr |= AttrReachable
-		s.Type = obj.SELFROSECT
-
-		s = ctxt.Syms.Lookup(".gnu.version_r", 0)
-		s.Attr |= AttrReachable
-		s.Type = obj.SELFROSECT
-
-		/* define dynamic elf table */
-		s = ctxt.Syms.Lookup(".dynamic", 0)
-
-		s.Attr |= AttrReachable
-		s.Type = obj.SELFSECT // writable
-
-		/*
-		 * .dynamic table
-		 */
-		elfwritedynentsym(ctxt, s, DT_HASH, ctxt.Syms.Lookup(".hash", 0))
-
-		elfwritedynentsym(ctxt, s, DT_SYMTAB, ctxt.Syms.Lookup(".dynsym", 0))
-		if elf64 {
-			Elfwritedynent(ctxt, s, DT_SYMENT, ELF64SYMSIZE)
-		} else {
-			Elfwritedynent(ctxt, s, DT_SYMENT, ELF32SYMSIZE)
-		}
-		elfwritedynentsym(ctxt, s, DT_STRTAB, ctxt.Syms.Lookup(".dynstr", 0))
-		elfwritedynentsymsize(ctxt, s, DT_STRSZ, ctxt.Syms.Lookup(".dynstr", 0))
-		if elfRelType == ".rela" {
-			elfwritedynentsym(ctxt, s, DT_RELA, ctxt.Syms.Lookup(".rela", 0))
-			elfwritedynentsymsize(ctxt, s, DT_RELASZ, ctxt.Syms.Lookup(".rela", 0))
-			Elfwritedynent(ctxt, s, DT_RELAENT, ELF64RELASIZE)
-		} else {
-			elfwritedynentsym(ctxt, s, DT_REL, ctxt.Syms.Lookup(".rel", 0))
-			elfwritedynentsymsize(ctxt, s, DT_RELSZ, ctxt.Syms.Lookup(".rel", 0))
-			Elfwritedynent(ctxt, s, DT_RELENT, ELF32RELSIZE)
-		}
-
-		if rpath.val != "" {
-			Elfwritedynent(ctxt, s, DT_RUNPATH, uint64(Addstring(dynstr, rpath.val)))
-		}
-
-		if SysArch.Family == sys.PPC64 {
-			elfwritedynentsym(ctxt, s, DT_PLTGOT, ctxt.Syms.Lookup(".plt", 0))
-		} else if SysArch.Family == sys.S390X {
-			elfwritedynentsym(ctxt, s, DT_PLTGOT, ctxt.Syms.Lookup(".got", 0))
-		} else {
-			elfwritedynentsym(ctxt, s, DT_PLTGOT, ctxt.Syms.Lookup(".got.plt", 0))
-		}
-
-		if SysArch.Family == sys.PPC64 {
-			Elfwritedynent(ctxt, s, DT_PPC64_OPT, 0)
-		}
-
-		// Solaris dynamic linker can't handle an empty .rela.plt if
-		// DT_JMPREL is emitted so we have to defer generation of DT_PLTREL,
-		// DT_PLTRELSZ, and DT_JMPREL dynamic entries until after we know the
-		// size of .rel(a).plt section.
-		Elfwritedynent(ctxt, s, DT_DEBUG, 0)
-	}
-
-	if Buildmode == BuildmodeShared {
-		// The go.link.abihashbytes symbol will be pointed at the appropriate
-		// part of the .note.go.abihash section in data.go:func address().
-		s := ctxt.Syms.Lookup("go.link.abihashbytes", 0)
-		s.Attr |= AttrLocal
-		s.Type = obj.SRODATA
-		s.Attr |= AttrSpecial
-		s.Attr |= AttrReachable
-		s.Size = int64(sha1.Size)
-
-		sort.Sort(byPkg(ctxt.Library))
-		h := sha1.New()
-		for _, l := range ctxt.Library {
-			io.WriteString(h, l.hash)
-		}
-		addgonote(ctxt, ".note.go.abihash", ELF_NOTE_GOABIHASH_TAG, h.Sum([]byte{}))
-		addgonote(ctxt, ".note.go.pkg-list", ELF_NOTE_GOPKGLIST_TAG, pkglistfornote)
-		var deplist []string
-		for _, shlib := range ctxt.Shlibs {
-			deplist = append(deplist, filepath.Base(shlib.Path))
-		}
-		addgonote(ctxt, ".note.go.deps", ELF_NOTE_GODEPS_TAG, []byte(strings.Join(deplist, "\n")))
-	}
-
-	if Linkmode == LinkExternal && *flagBuildid != "" {
-		addgonote(ctxt, ".note.go.buildid", ELF_NOTE_GOBUILDID_TAG, []byte(*flagBuildid))
-	}
-}
-
-// Do not write DT_NULL.  elfdynhash will finish it.
-func shsym(sh *ElfShdr, s *Symbol) {
-	addr := Symaddr(s)
-	if sh.flags&SHF_ALLOC != 0 {
-		sh.addr = uint64(addr)
-	}
-	sh.off = uint64(datoff(s, addr))
-	sh.size = uint64(s.Size)
-}
-
-func phsh(ph *ElfPhdr, sh *ElfShdr) {
-	ph.vaddr = sh.addr
-	ph.paddr = ph.vaddr
-	ph.off = sh.off
-	ph.filesz = sh.size
-	ph.memsz = sh.size
-	ph.align = sh.addralign
-}
-
-func Asmbelfsetup() {
-	/* This null SHdr must appear before all others */
-	elfshname("")
-
-	for sect := Segtext.Sect; sect != nil; sect = sect.Next {
-		// There could be multiple .text sections. Instead check the Elfsect
-		// field to determine if already has an ElfShdr and if not, create one.
-		if sect.Name == ".text" {
-			if sect.Elfsect == nil {
-				sect.Elfsect = elfshnamedup(sect.Name)
-			}
-		} else {
-			elfshalloc(sect)
-		}
-	}
-	for sect := Segrodata.Sect; sect != nil; sect = sect.Next {
-		elfshalloc(sect)
-	}
-	for sect := Segrelrodata.Sect; sect != nil; sect = sect.Next {
-		elfshalloc(sect)
-	}
-	for sect := Segdata.Sect; sect != nil; sect = sect.Next {
-		elfshalloc(sect)
-	}
-	for sect := Segdwarf.Sect; sect != nil; sect = sect.Next {
-		elfshalloc(sect)
-	}
-}
-
-func Asmbelf(ctxt *Link, symo int64) {
-	eh := getElfEhdr()
-	switch SysArch.Family {
-	default:
-		Exitf("unknown architecture in asmbelf: %v", SysArch.Family)
-	case sys.MIPS, sys.MIPS64:
-		eh.machine = EM_MIPS
-	case sys.ARM:
-		eh.machine = EM_ARM
-	case sys.AMD64:
-		eh.machine = EM_X86_64
-	case sys.ARM64:
-		eh.machine = EM_AARCH64
-	case sys.I386:
-		eh.machine = EM_386
-	case sys.PPC64:
-		eh.machine = EM_PPC64
-	case sys.S390X:
-		eh.machine = EM_S390
-	}
-
-	elfreserve := int64(ELFRESERVE)
-
-	numtext := int64(0)
-	for sect := Segtext.Sect; sect != nil; sect = sect.Next {
-		if sect.Name == ".text" {
-			numtext++
-		}
-	}
-
-	// If there are multiple text sections, extra space is needed
-	// in the elfreserve for the additional .text and .rela.text
-	// section headers.  It can handle 4 extra now. Headers are
-	// 64 bytes.
-
-	if numtext > 4 {
-		elfreserve += elfreserve + numtext*64*2
-	}
-
-	startva := *FlagTextAddr - int64(HEADR)
-	resoff := elfreserve
-
-	var pph *ElfPhdr
-	var pnote *ElfPhdr
-	if Linkmode == LinkExternal {
-		/* skip program headers */
-		eh.phoff = 0
-
-		eh.phentsize = 0
-
-		if Buildmode == BuildmodeShared {
-			sh := elfshname(".note.go.pkg-list")
-			sh.type_ = SHT_NOTE
-			sh = elfshname(".note.go.abihash")
-			sh.type_ = SHT_NOTE
-			sh.flags = SHF_ALLOC
-			sh = elfshname(".note.go.deps")
-			sh.type_ = SHT_NOTE
-		}
-
-		if *flagBuildid != "" {
-			sh := elfshname(".note.go.buildid")
-			sh.type_ = SHT_NOTE
-			sh.flags = SHF_ALLOC
-		}
-
-		goto elfobj
-	}
-
-	/* program header info */
-	pph = newElfPhdr()
-
-	pph.type_ = PT_PHDR
-	pph.flags = PF_R
-	pph.off = uint64(eh.ehsize)
-	pph.vaddr = uint64(*FlagTextAddr) - uint64(HEADR) + pph.off
-	pph.paddr = uint64(*FlagTextAddr) - uint64(HEADR) + pph.off
-	pph.align = uint64(*FlagRound)
-
-	/*
-	 * PHDR must be in a loaded segment. Adjust the text
-	 * segment boundaries downwards to include it.
-	 * Except on NaCl where it must not be loaded.
-	 */
-	if Headtype != obj.Hnacl {
-		o := int64(Segtext.Vaddr - pph.vaddr)
-		Segtext.Vaddr -= uint64(o)
-		Segtext.Length += uint64(o)
-		o = int64(Segtext.Fileoff - pph.off)
-		Segtext.Fileoff -= uint64(o)
-		Segtext.Filelen += uint64(o)
-	}
-
-	if !*FlagD { /* -d suppresses dynamic loader format */
-		/* interpreter */
-		sh := elfshname(".interp")
-
-		sh.type_ = SHT_PROGBITS
-		sh.flags = SHF_ALLOC
-		sh.addralign = 1
-		if interpreter == "" {
-			switch Headtype {
-			case obj.Hlinux:
-				interpreter = Thearch.Linuxdynld
-
-			case obj.Hfreebsd:
-				interpreter = Thearch.Freebsddynld
-
-			case obj.Hnetbsd:
-				interpreter = Thearch.Netbsddynld
-
-			case obj.Hopenbsd:
-				interpreter = Thearch.Openbsddynld
-
-			case obj.Hdragonfly:
-				interpreter = Thearch.Dragonflydynld
-
-			case obj.Hsolaris:
-				interpreter = Thearch.Solarisdynld
-			}
-		}
-
-		resoff -= int64(elfinterp(sh, uint64(startva), uint64(resoff), interpreter))
-
-		ph := newElfPhdr()
-		ph.type_ = PT_INTERP
-		ph.flags = PF_R
-		phsh(ph, sh)
-	}
-
-	pnote = nil
-	if Headtype == obj.Hnetbsd || Headtype == obj.Hopenbsd {
-		var sh *ElfShdr
-		switch Headtype {
-		case obj.Hnetbsd:
-			sh = elfshname(".note.netbsd.ident")
-			resoff -= int64(elfnetbsdsig(sh, uint64(startva), uint64(resoff)))
-
-		case obj.Hopenbsd:
-			sh = elfshname(".note.openbsd.ident")
-			resoff -= int64(elfopenbsdsig(sh, uint64(startva), uint64(resoff)))
-		}
-
-		pnote = newElfPhdr()
-		pnote.type_ = PT_NOTE
-		pnote.flags = PF_R
-		phsh(pnote, sh)
-	}
-
-	if len(buildinfo) > 0 {
-		sh := elfshname(".note.gnu.build-id")
-		resoff -= int64(elfbuildinfo(sh, uint64(startva), uint64(resoff)))
-
-		if pnote == nil {
-			pnote = newElfPhdr()
-			pnote.type_ = PT_NOTE
-			pnote.flags = PF_R
-		}
-
-		phsh(pnote, sh)
-	}
-
-	if *flagBuildid != "" {
-		sh := elfshname(".note.go.buildid")
-		resoff -= int64(elfgobuildid(sh, uint64(startva), uint64(resoff)))
-
-		pnote := newElfPhdr()
-		pnote.type_ = PT_NOTE
-		pnote.flags = PF_R
-		phsh(pnote, sh)
-	}
-
-	// Additions to the reserved area must be above this line.
-
-	elfphload(&Segtext)
-	if Segrodata.Sect != nil {
-		elfphload(&Segrodata)
-	}
-	if Segrelrodata.Sect != nil {
-		elfphload(&Segrelrodata)
-		elfphrelro(&Segrelrodata)
-	}
-	elfphload(&Segdata)
-
-	/* Dynamic linking sections */
-	if !*FlagD {
-		sh := elfshname(".dynsym")
-		sh.type_ = SHT_DYNSYM
-		sh.flags = SHF_ALLOC
-		if elf64 {
-			sh.entsize = ELF64SYMSIZE
-		} else {
-			sh.entsize = ELF32SYMSIZE
-		}
-		sh.addralign = uint64(SysArch.RegSize)
-		sh.link = uint32(elfshname(".dynstr").shnum)
-
-		// sh->info = index of first non-local symbol (number of local symbols)
-		shsym(sh, ctxt.Syms.Lookup(".dynsym", 0))
-
-		sh = elfshname(".dynstr")
-		sh.type_ = SHT_STRTAB
-		sh.flags = SHF_ALLOC
-		sh.addralign = 1
-		shsym(sh, ctxt.Syms.Lookup(".dynstr", 0))
-
-		if elfverneed != 0 {
-			sh := elfshname(".gnu.version")
-			sh.type_ = SHT_GNU_VERSYM
-			sh.flags = SHF_ALLOC
-			sh.addralign = 2
-			sh.link = uint32(elfshname(".dynsym").shnum)
-			sh.entsize = 2
-			shsym(sh, ctxt.Syms.Lookup(".gnu.version", 0))
-
-			sh = elfshname(".gnu.version_r")
-			sh.type_ = SHT_GNU_VERNEED
-			sh.flags = SHF_ALLOC
-			sh.addralign = uint64(SysArch.RegSize)
-			sh.info = uint32(elfverneed)
-			sh.link = uint32(elfshname(".dynstr").shnum)
-			shsym(sh, ctxt.Syms.Lookup(".gnu.version_r", 0))
-		}
-
-		if elfRelType == ".rela" {
-			sh := elfshname(".rela.plt")
-			sh.type_ = SHT_RELA
-			sh.flags = SHF_ALLOC
-			sh.entsize = ELF64RELASIZE
-			sh.addralign = uint64(SysArch.RegSize)
-			sh.link = uint32(elfshname(".dynsym").shnum)
-			sh.info = uint32(elfshname(".plt").shnum)
-			shsym(sh, ctxt.Syms.Lookup(".rela.plt", 0))
-
-			sh = elfshname(".rela")
-			sh.type_ = SHT_RELA
-			sh.flags = SHF_ALLOC
-			sh.entsize = ELF64RELASIZE
-			sh.addralign = 8
-			sh.link = uint32(elfshname(".dynsym").shnum)
-			shsym(sh, ctxt.Syms.Lookup(".rela", 0))
-		} else {
-			sh := elfshname(".rel.plt")
-			sh.type_ = SHT_REL
-			sh.flags = SHF_ALLOC
-			sh.entsize = ELF32RELSIZE
-			sh.addralign = 4
-			sh.link = uint32(elfshname(".dynsym").shnum)
-			shsym(sh, ctxt.Syms.Lookup(".rel.plt", 0))
-
-			sh = elfshname(".rel")
-			sh.type_ = SHT_REL
-			sh.flags = SHF_ALLOC
-			sh.entsize = ELF32RELSIZE
-			sh.addralign = 4
-			sh.link = uint32(elfshname(".dynsym").shnum)
-			shsym(sh, ctxt.Syms.Lookup(".rel", 0))
-		}
-
-		if eh.machine == EM_PPC64 {
-			sh := elfshname(".glink")
-			sh.type_ = SHT_PROGBITS
-			sh.flags = SHF_ALLOC + SHF_EXECINSTR
-			sh.addralign = 4
-			shsym(sh, ctxt.Syms.Lookup(".glink", 0))
-		}
-
-		sh = elfshname(".plt")
-		sh.type_ = SHT_PROGBITS
-		sh.flags = SHF_ALLOC + SHF_EXECINSTR
-		if eh.machine == EM_X86_64 {
-			sh.entsize = 16
-		} else if eh.machine == EM_S390 {
-			sh.entsize = 32
-		} else if eh.machine == EM_PPC64 {
-			// On ppc64, this is just a table of addresses
-			// filled by the dynamic linker
-			sh.type_ = SHT_NOBITS
-
-			sh.flags = SHF_ALLOC + SHF_WRITE
-			sh.entsize = 8
-		} else {
-			sh.entsize = 4
-		}
-		sh.addralign = sh.entsize
-		shsym(sh, ctxt.Syms.Lookup(".plt", 0))
-
-		// On ppc64, .got comes from the input files, so don't
-		// create it here, and .got.plt is not used.
-		if eh.machine != EM_PPC64 {
-			sh := elfshname(".got")
-			sh.type_ = SHT_PROGBITS
-			sh.flags = SHF_ALLOC + SHF_WRITE
-			sh.entsize = uint64(SysArch.RegSize)
-			sh.addralign = uint64(SysArch.RegSize)
-			shsym(sh, ctxt.Syms.Lookup(".got", 0))
-
-			sh = elfshname(".got.plt")
-			sh.type_ = SHT_PROGBITS
-			sh.flags = SHF_ALLOC + SHF_WRITE
-			sh.entsize = uint64(SysArch.RegSize)
-			sh.addralign = uint64(SysArch.RegSize)
-			shsym(sh, ctxt.Syms.Lookup(".got.plt", 0))
-		}
-
-		sh = elfshname(".hash")
-		sh.type_ = SHT_HASH
-		sh.flags = SHF_ALLOC
-		sh.entsize = 4
-		sh.addralign = uint64(SysArch.RegSize)
-		sh.link = uint32(elfshname(".dynsym").shnum)
-		shsym(sh, ctxt.Syms.Lookup(".hash", 0))
-
-		/* sh and PT_DYNAMIC for .dynamic section */
-		sh = elfshname(".dynamic")
-
-		sh.type_ = SHT_DYNAMIC
-		sh.flags = SHF_ALLOC + SHF_WRITE
-		sh.entsize = 2 * uint64(SysArch.RegSize)
-		sh.addralign = uint64(SysArch.RegSize)
-		sh.link = uint32(elfshname(".dynstr").shnum)
-		shsym(sh, ctxt.Syms.Lookup(".dynamic", 0))
-		ph := newElfPhdr()
-		ph.type_ = PT_DYNAMIC
-		ph.flags = PF_R + PF_W
-		phsh(ph, sh)
-
-		/*
-		 * Thread-local storage segment (really just size).
-		 */
-		// Do not emit PT_TLS for OpenBSD since ld.so(1) does
-		// not currently support it. This is handled
-		// appropriately in runtime/cgo.
-		if Headtype != obj.Hopenbsd {
-			tlssize := uint64(0)
-			for sect := Segdata.Sect; sect != nil; sect = sect.Next {
-				if sect.Name == ".tbss" {
-					tlssize = sect.Length
-				}
-			}
-			if tlssize != 0 {
-				ph := newElfPhdr()
-				ph.type_ = PT_TLS
-				ph.flags = PF_R
-				ph.memsz = tlssize
-				ph.align = uint64(SysArch.RegSize)
-			}
-		}
-	}
-
-	if Headtype == obj.Hlinux {
-		ph := newElfPhdr()
-		ph.type_ = PT_GNU_STACK
-		ph.flags = PF_W + PF_R
-		ph.align = uint64(SysArch.RegSize)
-
-		ph = newElfPhdr()
-		ph.type_ = PT_PAX_FLAGS
-		ph.flags = 0x2a00 // mprotect, randexec, emutramp disabled
-		ph.align = uint64(SysArch.RegSize)
-	} else if Headtype == obj.Hsolaris {
-		ph := newElfPhdr()
-		ph.type_ = PT_SUNWSTACK
-		ph.flags = PF_W + PF_R
-	}
-
-elfobj:
-	sh := elfshname(".shstrtab")
-	sh.type_ = SHT_STRTAB
-	sh.addralign = 1
-	shsym(sh, ctxt.Syms.Lookup(".shstrtab", 0))
-	eh.shstrndx = uint16(sh.shnum)
-
-	// put these sections early in the list
-	if !*FlagS {
-		elfshname(".symtab")
-		elfshname(".strtab")
-	}
-
-	for sect := Segtext.Sect; sect != nil; sect = sect.Next {
-		elfshbits(sect)
-	}
-	for sect := Segrodata.Sect; sect != nil; sect = sect.Next {
-		elfshbits(sect)
-	}
-	for sect := Segrelrodata.Sect; sect != nil; sect = sect.Next {
-		elfshbits(sect)
-	}
-	for sect := Segdata.Sect; sect != nil; sect = sect.Next {
-		elfshbits(sect)
-	}
-	for sect := Segdwarf.Sect; sect != nil; sect = sect.Next {
-		elfshbits(sect)
-	}
-
-	if Linkmode == LinkExternal {
-		for sect := Segtext.Sect; sect != nil; sect = sect.Next {
-			elfshreloc(sect)
-		}
-		for sect := Segrodata.Sect; sect != nil; sect = sect.Next {
-			elfshreloc(sect)
-		}
-		for sect := Segrelrodata.Sect; sect != nil; sect = sect.Next {
-			elfshreloc(sect)
-		}
-		for sect := Segdata.Sect; sect != nil; sect = sect.Next {
-			elfshreloc(sect)
-		}
-		for _, s := range dwarfp {
-			if len(s.R) > 0 || s.Type == obj.SDWARFINFO {
-				elfshreloc(s.Sect)
-			}
-			if s.Type == obj.SDWARFINFO {
-				break
-			}
-		}
-		// add a .note.GNU-stack section to mark the stack as non-executable
-		sh := elfshname(".note.GNU-stack")
-
-		sh.type_ = SHT_PROGBITS
-		sh.addralign = 1
-		sh.flags = 0
-	}
-
-	if !*FlagS {
-		sh := elfshname(".symtab")
-		sh.type_ = SHT_SYMTAB
-		sh.off = uint64(symo)
-		sh.size = uint64(Symsize)
-		sh.addralign = uint64(SysArch.RegSize)
-		sh.entsize = 8 + 2*uint64(SysArch.RegSize)
-		sh.link = uint32(elfshname(".strtab").shnum)
-		sh.info = uint32(elfglobalsymndx)
-
-		sh = elfshname(".strtab")
-		sh.type_ = SHT_STRTAB
-		sh.off = uint64(symo) + uint64(Symsize)
-		sh.size = uint64(len(Elfstrdat))
-		sh.addralign = 1
-	}
-
-	/* Main header */
-	eh.ident[EI_MAG0] = '\177'
-
-	eh.ident[EI_MAG1] = 'E'
-	eh.ident[EI_MAG2] = 'L'
-	eh.ident[EI_MAG3] = 'F'
-	if Headtype == obj.Hfreebsd {
-		eh.ident[EI_OSABI] = ELFOSABI_FREEBSD
-	} else if Headtype == obj.Hnetbsd {
-		eh.ident[EI_OSABI] = ELFOSABI_NETBSD
-	} else if Headtype == obj.Hopenbsd {
-		eh.ident[EI_OSABI] = ELFOSABI_OPENBSD
-	} else if Headtype == obj.Hdragonfly {
-		eh.ident[EI_OSABI] = ELFOSABI_NONE
-	}
-	if elf64 {
-		eh.ident[EI_CLASS] = ELFCLASS64
-	} else {
-		eh.ident[EI_CLASS] = ELFCLASS32
-	}
-	if ctxt.Arch.ByteOrder == binary.BigEndian {
-		eh.ident[EI_DATA] = ELFDATA2MSB
-	} else {
-		eh.ident[EI_DATA] = ELFDATA2LSB
-	}
-	eh.ident[EI_VERSION] = EV_CURRENT
-
-	if Linkmode == LinkExternal {
-		eh.type_ = ET_REL
-	} else if Buildmode == BuildmodePIE {
-		eh.type_ = ET_DYN
-	} else {
-		eh.type_ = ET_EXEC
-	}
-
-	if Linkmode != LinkExternal {
-		eh.entry = uint64(Entryvalue(ctxt))
-	}
-
-	eh.version = EV_CURRENT
-
-	if pph != nil {
-		pph.filesz = uint64(eh.phnum) * uint64(eh.phentsize)
-		pph.memsz = pph.filesz
-	}
-
-	Cseek(0)
-	a := int64(0)
-	a += int64(elfwritehdr())
-	a += int64(elfwritephdrs())
-	a += int64(elfwriteshdrs())
-	if !*FlagD {
-		a += int64(elfwriteinterp())
-	}
-	if Linkmode != LinkExternal {
-		if Headtype == obj.Hnetbsd {
-			a += int64(elfwritenetbsdsig())
-		}
-		if Headtype == obj.Hopenbsd {
-			a += int64(elfwriteopenbsdsig())
-		}
-		if len(buildinfo) > 0 {
-			a += int64(elfwritebuildinfo())
-		}
-		if *flagBuildid != "" {
-			a += int64(elfwritegobuildid())
-		}
-	}
-
-	if a > elfreserve {
-		Errorf(nil, "ELFRESERVE too small: %d > %d with %d text sections", a, elfreserve, numtext)
-	}
-}
-
-func Elfadddynsym(ctxt *Link, s *Symbol) {
-	if elf64 {
-		s.Dynid = int32(Nelfsym)
-		Nelfsym++
-
-		d := ctxt.Syms.Lookup(".dynsym", 0)
-
-		name := s.Extname
-		Adduint32(ctxt, d, uint32(Addstring(ctxt.Syms.Lookup(".dynstr", 0), name)))
-
-		/* type */
-		t := STB_GLOBAL << 4
-
-		if s.Attr.CgoExport() && s.Type&obj.SMASK == obj.STEXT {
-			t |= STT_FUNC
-		} else {
-			t |= STT_OBJECT
-		}
-		Adduint8(ctxt, d, uint8(t))
-
-		/* reserved */
-		Adduint8(ctxt, d, 0)
-
-		/* section where symbol is defined */
-		if s.Type == obj.SDYNIMPORT {
-			Adduint16(ctxt, d, SHN_UNDEF)
-		} else {
-			Adduint16(ctxt, d, 1)
-		}
-
-		/* value */
-		if s.Type == obj.SDYNIMPORT {
-			Adduint64(ctxt, d, 0)
-		} else {
-			Addaddr(ctxt, d, s)
-		}
-
-		/* size of object */
-		Adduint64(ctxt, d, uint64(s.Size))
-
-		if SysArch.Family == sys.AMD64 && !s.Attr.CgoExportDynamic() && s.Dynimplib != "" && !seenlib[s.Dynimplib] {
-			Elfwritedynent(ctxt, ctxt.Syms.Lookup(".dynamic", 0), DT_NEEDED, uint64(Addstring(ctxt.Syms.Lookup(".dynstr", 0), s.Dynimplib)))
-		}
-	} else {
-		s.Dynid = int32(Nelfsym)
-		Nelfsym++
-
-		d := ctxt.Syms.Lookup(".dynsym", 0)
-
-		/* name */
-		name := s.Extname
-
-		Adduint32(ctxt, d, uint32(Addstring(ctxt.Syms.Lookup(".dynstr", 0), name)))
-
-		/* value */
-		if s.Type == obj.SDYNIMPORT {
-			Adduint32(ctxt, d, 0)
-		} else {
-			Addaddr(ctxt, d, s)
-		}
-
-		/* size of object */
-		Adduint32(ctxt, d, uint32(s.Size))
-
-		/* type */
-		t := STB_GLOBAL << 4
-
-		// TODO(mwhudson): presumably the behavior should actually be the same on both arm and 386.
-		if SysArch.Family == sys.I386 && s.Attr.CgoExport() && s.Type&obj.SMASK == obj.STEXT {
-			t |= STT_FUNC
-		} else if SysArch.Family == sys.ARM && s.Attr.CgoExportDynamic() && s.Type&obj.SMASK == obj.STEXT {
-			t |= STT_FUNC
-		} else {
-			t |= STT_OBJECT
-		}
-		Adduint8(ctxt, d, uint8(t))
-		Adduint8(ctxt, d, 0)
-
-		/* shndx */
-		if s.Type == obj.SDYNIMPORT {
-			Adduint16(ctxt, d, SHN_UNDEF)
-		} else {
-			Adduint16(ctxt, d, 1)
-		}
-	}
-}
-
-func ELF32_R_SYM(info uint32) uint32 {
-	return info >> 8
-}
-
-func ELF32_R_TYPE(info uint32) uint32 {
-	return uint32(uint8(info))
-}
-
-func ELF32_R_INFO(sym uint32, type_ uint32) uint32 {
-	return sym<<8 | type_
-}
-
-func ELF32_ST_BIND(info uint8) uint8 {
-	return info >> 4
-}
-
-func ELF32_ST_TYPE(info uint8) uint8 {
-	return info & 0xf
-}
-
-func ELF32_ST_INFO(bind uint8, type_ uint8) uint8 {
-	return bind<<4 | type_&0xf
-}
-
-func ELF32_ST_VISIBILITY(oth uint8) uint8 {
-	return oth & 3
-}
-
-func ELF64_R_SYM(info uint64) uint32 {
-	return uint32(info >> 32)
-}
-
-func ELF64_R_TYPE(info uint64) uint32 {
-	return uint32(info)
-}
-
-func ELF64_R_INFO(sym uint32, type_ uint32) uint64 {
-	return uint64(sym)<<32 | uint64(type_)
-}
-
-func ELF64_ST_BIND(info uint8) uint8 {
-	return info >> 4
-}
-
-func ELF64_ST_TYPE(info uint8) uint8 {
-	return info & 0xf
-}
-
-func ELF64_ST_INFO(bind uint8, type_ uint8) uint8 {
-	return bind<<4 | type_&0xf
-}
-
-func ELF64_ST_VISIBILITY(oth uint8) uint8 {
-	return oth & 3
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/go.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/go.go
deleted file mode 100644
index 8943d88..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/go.go
+++ /dev/null
@@ -1,424 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/go.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/go.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// go-specific code shared across loaders (5l, 6l, 8l).
-
-package ld
-
-import (
-	"bytes"
-	"bootstrap/cmd/internal/bio"
-	"bootstrap/cmd/internal/obj"
-	"fmt"
-	"io"
-	"os"
-	"strings"
-)
-
-// go-specific code shared across loaders (5l, 6l, 8l).
-
-// replace all "". with pkg.
-func expandpkg(t0 string, pkg string) string {
-	return strings.Replace(t0, `"".`, pkg+".", -1)
-}
-
-// TODO:
-//	generate debugging section in binary.
-//	once the dust settles, try to move some code to
-//		libmach, so that other linkers and ar can share.
-
-func ldpkg(ctxt *Link, f *bio.Reader, pkg string, length int64, filename string, whence int) {
-	var p0, p1 int
-
-	if *flagG {
-		return
-	}
-
-	if int64(int(length)) != length {
-		fmt.Fprintf(os.Stderr, "%s: too much pkg data in %s\n", os.Args[0], filename)
-		if *flagU {
-			errorexit()
-		}
-		return
-	}
-
-	// In a __.PKGDEF, we only care about the package name.
-	// Don't read all the export data.
-	if length > 1000 && whence == Pkgdef {
-		length = 1000
-	}
-
-	bdata := make([]byte, length)
-	if _, err := io.ReadFull(f, bdata); err != nil {
-		fmt.Fprintf(os.Stderr, "%s: short pkg read %s\n", os.Args[0], filename)
-		if *flagU {
-			errorexit()
-		}
-		return
-	}
-	data := string(bdata)
-
-	// process header lines
-	isSafe := false
-	isMain := false
-	for data != "" {
-		var line string
-		if i := strings.Index(data, "\n"); i >= 0 {
-			line, data = data[:i], data[i+1:]
-		} else {
-			line, data = data, ""
-		}
-		if line == "safe" {
-			isSafe = true
-		}
-		if line == "main" {
-			isMain = true
-		}
-		if line == "" {
-			break
-		}
-	}
-
-	if whence == Pkgdef || whence == FileObj {
-		if pkg == "main" && !isMain {
-			Exitf("%s: not package main", filename)
-		}
-		if *flagU && whence != ArchiveObj && !isSafe {
-			Exitf("load of unsafe package %s", filename)
-		}
-	}
-
-	// __.PKGDEF has no cgo section - those are in the C compiler-generated object files.
-	if whence == Pkgdef {
-		return
-	}
-
-	// look for cgo section
-	p0 = strings.Index(data, "\n$$  // cgo")
-	if p0 >= 0 {
-		p0 += p1
-		i := strings.IndexByte(data[p0+1:], '\n')
-		if i < 0 {
-			fmt.Fprintf(os.Stderr, "%s: found $$ // cgo but no newline in %s\n", os.Args[0], filename)
-			if *flagU {
-				errorexit()
-			}
-			return
-		}
-		p0 += 1 + i
-
-		p1 = strings.Index(data[p0:], "\n$$")
-		if p1 < 0 {
-			p1 = strings.Index(data[p0:], "\n!\n")
-		}
-		if p1 < 0 {
-			fmt.Fprintf(os.Stderr, "%s: cannot find end of // cgo section in %s\n", os.Args[0], filename)
-			if *flagU {
-				errorexit()
-			}
-			return
-		}
-		p1 += p0
-
-		loadcgo(ctxt, filename, pkg, data[p0:p1])
-	}
-}
-
-func loadcgo(ctxt *Link, file string, pkg string, p string) {
-	var next string
-	var q string
-	var f []string
-	var local string
-	var remote string
-	var lib string
-	var s *Symbol
-
-	p0 := ""
-	for ; p != ""; p = next {
-		if i := strings.Index(p, "\n"); i >= 0 {
-			p, next = p[:i], p[i+1:]
-		} else {
-			next = ""
-		}
-
-		p0 = p // save for error message
-		f = tokenize(p)
-		if len(f) == 0 {
-			continue
-		}
-
-		if f[0] == "cgo_import_dynamic" {
-			if len(f) < 2 || len(f) > 4 {
-				goto err
-			}
-
-			local = f[1]
-			remote = local
-			if len(f) > 2 {
-				remote = f[2]
-			}
-			lib = ""
-			if len(f) > 3 {
-				lib = f[3]
-			}
-
-			if *FlagD {
-				fmt.Fprintf(os.Stderr, "%s: %s: cannot use dynamic imports with -d flag\n", os.Args[0], file)
-				nerrors++
-				return
-			}
-
-			if local == "_" && remote == "_" {
-				// allow #pragma dynimport _ _ "foo.so"
-				// to force a link of foo.so.
-				havedynamic = 1
-
-				if Headtype == obj.Hdarwin {
-					Machoadddynlib(lib)
-				} else {
-					dynlib = append(dynlib, lib)
-				}
-				continue
-			}
-
-			local = expandpkg(local, pkg)
-			q = ""
-			if i := strings.Index(remote, "#"); i >= 0 {
-				remote, q = remote[:i], remote[i+1:]
-			}
-			s = ctxt.Syms.Lookup(local, 0)
-			if local != f[1] {
-			}
-			if s.Type == 0 || s.Type == obj.SXREF || s.Type == obj.SHOSTOBJ {
-				s.Dynimplib = lib
-				s.Extname = remote
-				s.Dynimpvers = q
-				if s.Type != obj.SHOSTOBJ {
-					s.Type = obj.SDYNIMPORT
-				}
-				havedynamic = 1
-			}
-
-			continue
-		}
-
-		if f[0] == "cgo_import_static" {
-			if len(f) != 2 {
-				goto err
-			}
-			local = f[1]
-			s = ctxt.Syms.Lookup(local, 0)
-			s.Type = obj.SHOSTOBJ
-			s.Size = 0
-			continue
-		}
-
-		if f[0] == "cgo_export_static" || f[0] == "cgo_export_dynamic" {
-			if len(f) < 2 || len(f) > 3 {
-				goto err
-			}
-			local = f[1]
-			if len(f) > 2 {
-				remote = f[2]
-			} else {
-				remote = local
-			}
-			local = expandpkg(local, pkg)
-			s = ctxt.Syms.Lookup(local, 0)
-
-			switch Buildmode {
-			case BuildmodeCShared, BuildmodeCArchive, BuildmodePlugin:
-				if s == ctxt.Syms.Lookup("main", 0) {
-					continue
-				}
-			}
-
-			// export overrides import, for openbsd/cgo.
-			// see issue 4878.
-			if s.Dynimplib != "" {
-				s.Dynimplib = ""
-				s.Extname = ""
-				s.Dynimpvers = ""
-				s.Type = 0
-			}
-
-			if !s.Attr.CgoExport() {
-				s.Extname = remote
-				dynexp = append(dynexp, s)
-			} else if s.Extname != remote {
-				fmt.Fprintf(os.Stderr, "%s: conflicting cgo_export directives: %s as %s and %s\n", os.Args[0], s.Name, s.Extname, remote)
-				nerrors++
-				return
-			}
-
-			if f[0] == "cgo_export_static" {
-				s.Attr |= AttrCgoExportStatic
-			} else {
-				s.Attr |= AttrCgoExportDynamic
-			}
-			if local != f[1] {
-			}
-			continue
-		}
-
-		if f[0] == "cgo_dynamic_linker" {
-			if len(f) != 2 {
-				goto err
-			}
-
-			if *flagInterpreter == "" {
-				if interpreter != "" && interpreter != f[1] {
-					fmt.Fprintf(os.Stderr, "%s: conflict dynlinker: %s and %s\n", os.Args[0], interpreter, f[1])
-					nerrors++
-					return
-				}
-
-				interpreter = f[1]
-			}
-
-			continue
-		}
-
-		if f[0] == "cgo_ldflag" {
-			if len(f) != 2 {
-				goto err
-			}
-			ldflag = append(ldflag, f[1])
-			continue
-		}
-	}
-
-	return
-
-err:
-	fmt.Fprintf(os.Stderr, "%s: %s: invalid dynimport line: %s\n", os.Args[0], file, p0)
-	nerrors++
-}
-
-var seenlib = make(map[string]bool)
-
-func adddynlib(ctxt *Link, lib string) {
-	if seenlib[lib] || Linkmode == LinkExternal {
-		return
-	}
-	seenlib[lib] = true
-
-	if Iself {
-		s := ctxt.Syms.Lookup(".dynstr", 0)
-		if s.Size == 0 {
-			Addstring(s, "")
-		}
-		Elfwritedynent(ctxt, ctxt.Syms.Lookup(".dynamic", 0), DT_NEEDED, uint64(Addstring(s, lib)))
-	} else {
-		Errorf(nil, "adddynlib: unsupported binary format")
-	}
-}
-
-func Adddynsym(ctxt *Link, s *Symbol) {
-	if s.Dynid >= 0 || Linkmode == LinkExternal {
-		return
-	}
-
-	if Iself {
-		Elfadddynsym(ctxt, s)
-	} else if Headtype == obj.Hdarwin {
-		Errorf(s, "adddynsym: missed symbol (Extname=%s)", s.Extname)
-	} else if Headtype == obj.Hwindows {
-		// already taken care of
-	} else {
-		Errorf(s, "adddynsym: unsupported binary format")
-	}
-}
-
-func fieldtrack(ctxt *Link) {
-	// record field tracking references
-	var buf bytes.Buffer
-	for _, s := range ctxt.Syms.Allsym {
-		if strings.HasPrefix(s.Name, "go.track.") {
-			s.Attr |= AttrSpecial // do not lay out in data segment
-			s.Attr |= AttrHidden
-			if s.Attr.Reachable() {
-				buf.WriteString(s.Name[9:])
-				for p := s.Reachparent; p != nil; p = p.Reachparent {
-					buf.WriteString("\t")
-					buf.WriteString(p.Name)
-				}
-				buf.WriteString("\n")
-			}
-
-			s.Type = obj.SCONST
-			s.Value = 0
-		}
-	}
-
-	if *flagFieldTrack == "" {
-		return
-	}
-	s := ctxt.Syms.Lookup(*flagFieldTrack, 0)
-	if !s.Attr.Reachable() {
-		return
-	}
-	addstrdata(ctxt, *flagFieldTrack, buf.String())
-}
-
-func (ctxt *Link) addexport() {
-	if Headtype == obj.Hdarwin {
-		return
-	}
-
-	for _, exp := range dynexp {
-		Adddynsym(ctxt, exp)
-	}
-	for _, lib := range dynlib {
-		adddynlib(ctxt, lib)
-	}
-}
-
-type Pkg struct {
-	mark    bool
-	checked bool
-	path    string
-	impby   []*Pkg
-}
-
-var pkgall []*Pkg
-
-func (p *Pkg) cycle() *Pkg {
-	if p.checked {
-		return nil
-	}
-
-	if p.mark {
-		nerrors++
-		fmt.Printf("import cycle:\n")
-		fmt.Printf("\t%s\n", p.path)
-		return p
-	}
-
-	p.mark = true
-	for _, q := range p.impby {
-		if bad := q.cycle(); bad != nil {
-			p.mark = false
-			p.checked = true
-			fmt.Printf("\timports %s\n", p.path)
-			if bad == p {
-				return nil
-			}
-			return bad
-		}
-	}
-
-	p.checked = true
-	p.mark = false
-	return nil
-}
-
-func importcycles() {
-	for _, p := range pkgall {
-		p.cycle()
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/ld.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/ld.go
deleted file mode 100644
index 4c73380..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/ld.go
+++ /dev/null
@@ -1,133 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/ld.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/ld.go:1
-// Derived from Inferno utils/6l/obj.c and utils/6l/span.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6l/obj.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6l/span.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package ld
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"io/ioutil"
-	"os"
-	"path"
-	"path/filepath"
-	"strconv"
-	"strings"
-)
-
-func addlib(ctxt *Link, src string, obj string, pathname string) *Library {
-	name := path.Clean(pathname)
-
-	// runtime.a -> runtime, runtime.6 -> runtime
-	pkg := name
-	if len(pkg) >= 2 && pkg[len(pkg)-2] == '.' {
-		pkg = pkg[:len(pkg)-2]
-	}
-
-	// already loaded?
-	for i := 0; i < len(ctxt.Library); i++ {
-		if ctxt.Library[i].Pkg == pkg {
-			return ctxt.Library[i]
-		}
-	}
-
-	var pname string
-	isshlib := false
-	if filepath.IsAbs(name) {
-		pname = name
-	} else {
-		// try dot, -L "libdir", and then goroot.
-		for _, dir := range ctxt.Libdir {
-			if *FlagLinkshared {
-				pname = dir + "/" + pkg + ".shlibname"
-				if _, err := os.Stat(pname); err == nil {
-					isshlib = true
-					break
-				}
-			}
-			pname = dir + "/" + name
-			if _, err := os.Stat(pname); err == nil {
-				break
-			}
-		}
-	}
-
-	pname = path.Clean(pname)
-
-	if ctxt.Debugvlog > 1 {
-		ctxt.Logf("%5.2f addlib: %s %s pulls in %s isshlib %v\n", elapsed(), obj, src, pname, isshlib)
-	}
-
-	if isshlib {
-		return addlibpath(ctxt, src, obj, "", pkg, pname)
-	}
-	return addlibpath(ctxt, src, obj, pname, pkg, "")
-}
-
-/*
- * add library to library list, return added library.
- *	srcref: src file referring to package
- *	objref: object file referring to package
- *	file: object file, e.g., /home/rsc/go/pkg/container/vector.a
- *	pkg: package import path, e.g. container/vector
- */
-func addlibpath(ctxt *Link, srcref string, objref string, file string, pkg string, shlibnamefile string) *Library {
-	for i := 0; i < len(ctxt.Library); i++ {
-		if pkg == ctxt.Library[i].Pkg {
-			return ctxt.Library[i]
-		}
-	}
-
-	if ctxt.Debugvlog > 1 {
-		ctxt.Logf("%5.2f addlibpath: srcref: %s objref: %s file: %s pkg: %s shlibnamefile: %s\n", obj.Cputime(), srcref, objref, file, pkg, shlibnamefile)
-	}
-
-	ctxt.Library = append(ctxt.Library, &Library{})
-	l := ctxt.Library[len(ctxt.Library)-1]
-	l.Objref = objref
-	l.Srcref = srcref
-	l.File = file
-	l.Pkg = pkg
-	if shlibnamefile != "" {
-		shlibbytes, err := ioutil.ReadFile(shlibnamefile)
-		if err != nil {
-			Errorf(nil, "cannot read %s: %v", shlibnamefile, err)
-		}
-		l.Shlib = strings.TrimSpace(string(shlibbytes))
-	}
-	return l
-}
-
-func atolwhex(s string) int64 {
-	n, _ := strconv.ParseInt(s, 0, 64)
-	return n
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/ldelf.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/ldelf.go
deleted file mode 100644
index 4202c04..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/ldelf.go
+++ /dev/null
@@ -1,1218 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/ldelf.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/ldelf.go:1
-package ld
-
-import (
-	"bytes"
-	"bootstrap/cmd/internal/bio"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"encoding/binary"
-	"fmt"
-	"io"
-	"log"
-	"sort"
-	"strings"
-)
-
-/*
-Derived from Plan 9 from User Space's src/libmach/elf.h, elf.c
-http://code.swtch.com/plan9port/src/tip/src/libmach/
-
-	Copyright © 2004 Russ Cox.
-	Portions Copyright © 2008-2010 Google Inc.
-	Portions Copyright © 2010 The Go Authors.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-*/
-const (
-	ElfClassNone = 0
-	ElfClass32   = 1
-	ElfClass64   = 2
-)
-
-const (
-	ElfDataNone = 0
-	ElfDataLsb  = 1
-	ElfDataMsb  = 2
-)
-
-const (
-	ElfTypeNone         = 0
-	ElfTypeRelocatable  = 1
-	ElfTypeExecutable   = 2
-	ElfTypeSharedObject = 3
-	ElfTypeCore         = 4
-)
-
-const (
-	ElfMachNone        = 0
-	ElfMach32100       = 1
-	ElfMachSparc       = 2
-	ElfMach386         = 3
-	ElfMach68000       = 4
-	ElfMach88000       = 5
-	ElfMach486         = 6
-	ElfMach860         = 7
-	ElfMachMips        = 8
-	ElfMachS370        = 9
-	ElfMachMipsLe      = 10
-	ElfMachParisc      = 15
-	ElfMachVpp500      = 17
-	ElfMachSparc32Plus = 18
-	ElfMach960         = 19
-	ElfMachPower       = 20
-	ElfMachPower64     = 21
-	ElfMachS390        = 22
-	ElfMachV800        = 36
-	ElfMachFr20        = 37
-	ElfMachRh32        = 38
-	ElfMachRce         = 39
-	ElfMachArm         = 40
-	ElfMachAlpha       = 41
-	ElfMachSH          = 42
-	ElfMachSparc9      = 43
-	ElfMachAmd64       = 62
-	ElfMachArm64       = 183
-)
-
-const (
-	ElfAbiNone     = 0
-	ElfAbiSystemV  = 0
-	ElfAbiHPUX     = 1
-	ElfAbiNetBSD   = 2
-	ElfAbiLinux    = 3
-	ElfAbiSolaris  = 6
-	ElfAbiAix      = 7
-	ElfAbiIrix     = 8
-	ElfAbiFreeBSD  = 9
-	ElfAbiTru64    = 10
-	ElfAbiModesto  = 11
-	ElfAbiOpenBSD  = 12
-	ElfAbiARM      = 97
-	ElfAbiEmbedded = 255
-)
-
-const (
-	ElfSectNone      = 0
-	ElfSectProgbits  = 1
-	ElfSectSymtab    = 2
-	ElfSectStrtab    = 3
-	ElfSectRela      = 4
-	ElfSectHash      = 5
-	ElfSectDynamic   = 6
-	ElfSectNote      = 7
-	ElfSectNobits    = 8
-	ElfSectRel       = 9
-	ElfSectShlib     = 10
-	ElfSectDynsym    = 11
-	ElfSectFlagWrite = 0x1
-	ElfSectFlagAlloc = 0x2
-	ElfSectFlagExec  = 0x4
-)
-
-const (
-	ElfSymBindLocal  = 0
-	ElfSymBindGlobal = 1
-	ElfSymBindWeak   = 2
-)
-
-const (
-	ElfSymTypeNone    = 0
-	ElfSymTypeObject  = 1
-	ElfSymTypeFunc    = 2
-	ElfSymTypeSection = 3
-	ElfSymTypeFile    = 4
-	ElfSymTypeCommon  = 5
-	ElfSymTypeTLS     = 6
-)
-
-const (
-	ElfSymShnNone   = 0
-	ElfSymShnAbs    = 0xFFF1
-	ElfSymShnCommon = 0xFFF2
-)
-
-const (
-	ElfProgNone      = 0
-	ElfProgLoad      = 1
-	ElfProgDynamic   = 2
-	ElfProgInterp    = 3
-	ElfProgNote      = 4
-	ElfProgShlib     = 5
-	ElfProgPhdr      = 6
-	ElfProgFlagExec  = 0x1
-	ElfProgFlagWrite = 0x2
-	ElfProgFlagRead  = 0x4
-)
-
-const (
-	ElfNotePrStatus     = 1
-	ElfNotePrFpreg      = 2
-	ElfNotePrPsinfo     = 3
-	ElfNotePrTaskstruct = 4
-	ElfNotePrAuxv       = 6
-	ElfNotePrXfpreg     = 0x46e62b7f
-)
-
-type ElfHdrBytes struct {
-	Ident     [16]uint8
-	Type      [2]uint8
-	Machine   [2]uint8
-	Version   [4]uint8
-	Entry     [4]uint8
-	Phoff     [4]uint8
-	Shoff     [4]uint8
-	Flags     [4]uint8
-	Ehsize    [2]uint8
-	Phentsize [2]uint8
-	Phnum     [2]uint8
-	Shentsize [2]uint8
-	Shnum     [2]uint8
-	Shstrndx  [2]uint8
-}
-
-type ElfSectBytes struct {
-	Name    [4]uint8
-	Type    [4]uint8
-	Flags   [4]uint8
-	Addr    [4]uint8
-	Off     [4]uint8
-	Size    [4]uint8
-	Link    [4]uint8
-	Info    [4]uint8
-	Align   [4]uint8
-	Entsize [4]uint8
-}
-
-type ElfProgBytes struct {
-}
-
-type ElfSymBytes struct {
-	Name  [4]uint8
-	Value [4]uint8
-	Size  [4]uint8
-	Info  uint8
-	Other uint8
-	Shndx [2]uint8
-}
-
-type ElfHdrBytes64 struct {
-	Ident     [16]uint8
-	Type      [2]uint8
-	Machine   [2]uint8
-	Version   [4]uint8
-	Entry     [8]uint8
-	Phoff     [8]uint8
-	Shoff     [8]uint8
-	Flags     [4]uint8
-	Ehsize    [2]uint8
-	Phentsize [2]uint8
-	Phnum     [2]uint8
-	Shentsize [2]uint8
-	Shnum     [2]uint8
-	Shstrndx  [2]uint8
-}
-
-type ElfSectBytes64 struct {
-	Name    [4]uint8
-	Type    [4]uint8
-	Flags   [8]uint8
-	Addr    [8]uint8
-	Off     [8]uint8
-	Size    [8]uint8
-	Link    [4]uint8
-	Info    [4]uint8
-	Align   [8]uint8
-	Entsize [8]uint8
-}
-
-type ElfProgBytes64 struct {
-}
-
-type ElfSymBytes64 struct {
-	Name  [4]uint8
-	Info  uint8
-	Other uint8
-	Shndx [2]uint8
-	Value [8]uint8
-	Size  [8]uint8
-}
-
-type ElfSect struct {
-	name    string
-	nameoff uint32
-	type_   uint32
-	flags   uint64
-	addr    uint64
-	off     uint64
-	size    uint64
-	link    uint32
-	info    uint32
-	align   uint64
-	entsize uint64
-	base    []byte
-	sym     *Symbol
-}
-
-type ElfObj struct {
-	f         *bio.Reader
-	base      int64 // offset in f where ELF begins
-	length    int64 // length of ELF
-	is64      int
-	name      string
-	e         binary.ByteOrder
-	sect      []ElfSect
-	nsect     uint
-	shstrtab  string
-	nsymtab   int
-	symtab    *ElfSect
-	symstr    *ElfSect
-	type_     uint32
-	machine   uint32
-	version   uint32
-	entry     uint64
-	phoff     uint64
-	shoff     uint64
-	flags     uint32
-	ehsize    uint32
-	phentsize uint32
-	phnum     uint32
-	shentsize uint32
-	shnum     uint32
-	shstrndx  uint32
-}
-
-type ElfSym struct {
-	name  string
-	value uint64
-	size  uint64
-	bind  uint8
-	type_ uint8
-	other uint8
-	shndx uint16
-	sym   *Symbol
-}
-
-var ElfMagic = [4]uint8{0x7F, 'E', 'L', 'F'}
-
-const (
-	TagFile               = 1
-	TagCPUName            = 4
-	TagCPURawName         = 5
-	TagCompatibility      = 32
-	TagNoDefaults         = 64
-	TagAlsoCompatibleWith = 65
-	TagABIVFPArgs         = 28
-)
-
-type elfAttribute struct {
-	tag  uint64
-	sval string
-	ival uint64
-}
-
-type elfAttributeList struct {
-	data []byte
-	err  error
-}
-
-func (a *elfAttributeList) string() string {
-	if a.err != nil {
-		return ""
-	}
-	nul := bytes.IndexByte(a.data, 0)
-	if nul < 0 {
-		a.err = io.EOF
-		return ""
-	}
-	s := string(a.data[:nul])
-	a.data = a.data[nul+1:]
-	return s
-}
-
-func (a *elfAttributeList) uleb128() uint64 {
-	if a.err != nil {
-		return 0
-	}
-	v, size := binary.Uvarint(a.data)
-	a.data = a.data[size:]
-	return v
-}
-
-// Read an elfAttribute from the list following the rules used on ARM systems.
-func (a *elfAttributeList) armAttr() elfAttribute {
-	attr := elfAttribute{tag: a.uleb128()}
-	switch {
-	case attr.tag == TagCompatibility:
-		attr.ival = a.uleb128()
-		attr.sval = a.string()
-
-	case attr.tag == 64: // Tag_nodefaults has no argument
-
-	case attr.tag == 65: // Tag_also_compatible_with
-		// Not really, but we don't actually care about this tag.
-		attr.sval = a.string()
-
-	// Tag with string argument
-	case attr.tag == TagCPUName || attr.tag == TagCPURawName || (attr.tag >= 32 && attr.tag&1 != 0):
-		attr.sval = a.string()
-
-	default: // Tag with integer argument
-		attr.ival = a.uleb128()
-	}
-	return attr
-}
-
-func (a *elfAttributeList) done() bool {
-	if a.err != nil || len(a.data) == 0 {
-		return true
-	}
-	return false
-}
-
-// Look for the attribute that indicates the object uses the hard-float ABI (a
-// file-level attribute with tag Tag_VFP_arch and value 1). Unfortunately the
-// format used means that we have to parse all of the file-level attributes to
-// find the one we are looking for. This format is slightly documented in "ELF
-// for the ARM Architecture" but mostly this is derived from reading the source
-// to gold and readelf.
-func parseArmAttributes(ctxt *Link, e binary.ByteOrder, data []byte) {
-	// We assume the soft-float ABI unless we see a tag indicating otherwise.
-	if ehdr.flags == 0x5000002 {
-		ehdr.flags = 0x5000202
-	}
-	if data[0] != 'A' {
-		// TODO(dfc) should this be ctxt.Diag ?
-		ctxt.Logf(".ARM.attributes has unexpected format %c\n", data[0])
-		return
-	}
-	data = data[1:]
-	for len(data) != 0 {
-		sectionlength := e.Uint32(data)
-		sectiondata := data[4:sectionlength]
-		data = data[sectionlength:]
-
-		nulIndex := bytes.IndexByte(sectiondata, 0)
-		if nulIndex < 0 {
-			// TODO(dfc) should this be ctxt.Diag ?
-			ctxt.Logf("corrupt .ARM.attributes (section name not NUL-terminated)\n")
-			return
-		}
-		name := string(sectiondata[:nulIndex])
-		sectiondata = sectiondata[nulIndex+1:]
-
-		if name != "aeabi" {
-			continue
-		}
-		for len(sectiondata) != 0 {
-			subsectiontag, sz := binary.Uvarint(sectiondata)
-			subsectionsize := e.Uint32(sectiondata[sz:])
-			subsectiondata := sectiondata[sz+4 : subsectionsize]
-			sectiondata = sectiondata[subsectionsize:]
-
-			if subsectiontag == TagFile {
-				attrList := elfAttributeList{data: subsectiondata}
-				for !attrList.done() {
-					attr := attrList.armAttr()
-					if attr.tag == TagABIVFPArgs && attr.ival == 1 {
-						ehdr.flags = 0x5000402 // has entry point, Version5 EABI, hard-float ABI
-					}
-				}
-				if attrList.err != nil {
-					// TODO(dfc) should this be ctxt.Diag ?
-					ctxt.Logf("could not parse .ARM.attributes\n")
-				}
-			}
-		}
-	}
-}
-
-func ldelf(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) {
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f ldelf %s\n", obj.Cputime(), pn)
-	}
-
-	localSymVersion := ctxt.Syms.IncVersion()
-	base := f.Offset()
-
-	var add uint64
-	var e binary.ByteOrder
-	var elfobj *ElfObj
-	var err error
-	var flag int
-	var hdr *ElfHdrBytes
-	var hdrbuf [64]uint8
-	var info uint64
-	var is64 int
-	var j int
-	var n int
-	var name string
-	var p []byte
-	var r []Reloc
-	var rela int
-	var rp *Reloc
-	var rsect *ElfSect
-	var s *Symbol
-	var sect *ElfSect
-	var sym ElfSym
-	var symbols []*Symbol
-	if _, err := io.ReadFull(f, hdrbuf[:]); err != nil {
-		goto bad
-	}
-	hdr = new(ElfHdrBytes)
-	binary.Read(bytes.NewReader(hdrbuf[:]), binary.BigEndian, hdr) // only byte arrays; byte order doesn't matter
-	if string(hdr.Ident[:4]) != "\x7FELF" {
-		goto bad
-	}
-	switch hdr.Ident[5] {
-	case ElfDataLsb:
-		e = binary.LittleEndian
-
-	case ElfDataMsb:
-		e = binary.BigEndian
-
-	default:
-		goto bad
-	}
-
-	// read header
-	elfobj = new(ElfObj)
-
-	elfobj.e = e
-	elfobj.f = f
-	elfobj.base = base
-	elfobj.length = length
-	elfobj.name = pn
-
-	is64 = 0
-	if hdr.Ident[4] == ElfClass64 {
-		is64 = 1
-		hdr := new(ElfHdrBytes64)
-		binary.Read(bytes.NewReader(hdrbuf[:]), binary.BigEndian, hdr) // only byte arrays; byte order doesn't matter
-		elfobj.type_ = uint32(e.Uint16(hdr.Type[:]))
-		elfobj.machine = uint32(e.Uint16(hdr.Machine[:]))
-		elfobj.version = e.Uint32(hdr.Version[:])
-		elfobj.phoff = e.Uint64(hdr.Phoff[:])
-		elfobj.shoff = e.Uint64(hdr.Shoff[:])
-		elfobj.flags = e.Uint32(hdr.Flags[:])
-		elfobj.ehsize = uint32(e.Uint16(hdr.Ehsize[:]))
-		elfobj.phentsize = uint32(e.Uint16(hdr.Phentsize[:]))
-		elfobj.phnum = uint32(e.Uint16(hdr.Phnum[:]))
-		elfobj.shentsize = uint32(e.Uint16(hdr.Shentsize[:]))
-		elfobj.shnum = uint32(e.Uint16(hdr.Shnum[:]))
-		elfobj.shstrndx = uint32(e.Uint16(hdr.Shstrndx[:]))
-	} else {
-		elfobj.type_ = uint32(e.Uint16(hdr.Type[:]))
-		elfobj.machine = uint32(e.Uint16(hdr.Machine[:]))
-		elfobj.version = e.Uint32(hdr.Version[:])
-		elfobj.entry = uint64(e.Uint32(hdr.Entry[:]))
-		elfobj.phoff = uint64(e.Uint32(hdr.Phoff[:]))
-		elfobj.shoff = uint64(e.Uint32(hdr.Shoff[:]))
-		elfobj.flags = e.Uint32(hdr.Flags[:])
-		elfobj.ehsize = uint32(e.Uint16(hdr.Ehsize[:]))
-		elfobj.phentsize = uint32(e.Uint16(hdr.Phentsize[:]))
-		elfobj.phnum = uint32(e.Uint16(hdr.Phnum[:]))
-		elfobj.shentsize = uint32(e.Uint16(hdr.Shentsize[:]))
-		elfobj.shnum = uint32(e.Uint16(hdr.Shnum[:]))
-		elfobj.shstrndx = uint32(e.Uint16(hdr.Shstrndx[:]))
-	}
-
-	elfobj.is64 = is64
-
-	if uint32(hdr.Ident[6]) != elfobj.version {
-		goto bad
-	}
-
-	if e.Uint16(hdr.Type[:]) != ElfTypeRelocatable {
-		Errorf(nil, "%s: elf but not elf relocatable object", pn)
-		return
-	}
-
-	switch SysArch.Family {
-	default:
-		Errorf(nil, "%s: elf %s unimplemented", pn, SysArch.Name)
-		return
-
-	case sys.MIPS:
-		if elfobj.machine != ElfMachMips || hdr.Ident[4] != ElfClass32 {
-			Errorf(nil, "%s: elf object but not mips", pn)
-			return
-		}
-
-	case sys.MIPS64:
-		if elfobj.machine != ElfMachMips || hdr.Ident[4] != ElfClass64 {
-			Errorf(nil, "%s: elf object but not mips64", pn)
-			return
-		}
-
-	case sys.ARM:
-		if e != binary.LittleEndian || elfobj.machine != ElfMachArm || hdr.Ident[4] != ElfClass32 {
-			Errorf(nil, "%s: elf object but not arm", pn)
-			return
-		}
-
-	case sys.AMD64:
-		if e != binary.LittleEndian || elfobj.machine != ElfMachAmd64 || hdr.Ident[4] != ElfClass64 {
-			Errorf(nil, "%s: elf object but not amd64", pn)
-			return
-		}
-
-	case sys.ARM64:
-		if e != binary.LittleEndian || elfobj.machine != ElfMachArm64 || hdr.Ident[4] != ElfClass64 {
-			Errorf(nil, "%s: elf object but not arm64", pn)
-			return
-		}
-
-	case sys.I386:
-		if e != binary.LittleEndian || elfobj.machine != ElfMach386 || hdr.Ident[4] != ElfClass32 {
-			Errorf(nil, "%s: elf object but not 386", pn)
-			return
-		}
-
-	case sys.PPC64:
-		if elfobj.machine != ElfMachPower64 || hdr.Ident[4] != ElfClass64 {
-			Errorf(nil, "%s: elf object but not ppc64", pn)
-			return
-		}
-
-	case sys.S390X:
-		if elfobj.machine != ElfMachS390 || hdr.Ident[4] != ElfClass64 {
-			Errorf(nil, "%s: elf object but not s390x", pn)
-			return
-		}
-	}
-
-	// load section list into memory.
-	elfobj.sect = make([]ElfSect, elfobj.shnum)
-
-	elfobj.nsect = uint(elfobj.shnum)
-	for i := 0; uint(i) < elfobj.nsect; i++ {
-		if f.Seek(int64(uint64(base)+elfobj.shoff+uint64(int64(i)*int64(elfobj.shentsize))), 0) < 0 {
-			goto bad
-		}
-		sect = &elfobj.sect[i]
-		if is64 != 0 {
-			var b ElfSectBytes64
-
-			if err = binary.Read(f, e, &b); err != nil {
-				goto bad
-			}
-
-			sect.nameoff = e.Uint32(b.Name[:])
-			sect.type_ = e.Uint32(b.Type[:])
-			sect.flags = e.Uint64(b.Flags[:])
-			sect.addr = e.Uint64(b.Addr[:])
-			sect.off = e.Uint64(b.Off[:])
-			sect.size = e.Uint64(b.Size[:])
-			sect.link = e.Uint32(b.Link[:])
-			sect.info = e.Uint32(b.Info[:])
-			sect.align = e.Uint64(b.Align[:])
-			sect.entsize = e.Uint64(b.Entsize[:])
-		} else {
-			var b ElfSectBytes
-
-			if err = binary.Read(f, e, &b); err != nil {
-				goto bad
-			}
-
-			sect.nameoff = e.Uint32(b.Name[:])
-			sect.type_ = e.Uint32(b.Type[:])
-			sect.flags = uint64(e.Uint32(b.Flags[:]))
-			sect.addr = uint64(e.Uint32(b.Addr[:]))
-			sect.off = uint64(e.Uint32(b.Off[:]))
-			sect.size = uint64(e.Uint32(b.Size[:]))
-			sect.link = e.Uint32(b.Link[:])
-			sect.info = e.Uint32(b.Info[:])
-			sect.align = uint64(e.Uint32(b.Align[:]))
-			sect.entsize = uint64(e.Uint32(b.Entsize[:]))
-		}
-	}
-
-	// read section string table and translate names
-	if elfobj.shstrndx >= uint32(elfobj.nsect) {
-		err = fmt.Errorf("shstrndx out of range %d >= %d", elfobj.shstrndx, elfobj.nsect)
-		goto bad
-	}
-
-	sect = &elfobj.sect[elfobj.shstrndx]
-	if err = elfmap(elfobj, sect); err != nil {
-		goto bad
-	}
-	for i := 0; uint(i) < elfobj.nsect; i++ {
-		if elfobj.sect[i].nameoff != 0 {
-			elfobj.sect[i].name = cstring(sect.base[elfobj.sect[i].nameoff:])
-		}
-	}
-
-	// load string table for symbols into memory.
-	elfobj.symtab = section(elfobj, ".symtab")
-
-	if elfobj.symtab == nil {
-		// our work is done here - no symbols means nothing can refer to this file
-		return
-	}
-
-	if elfobj.symtab.link <= 0 || elfobj.symtab.link >= uint32(elfobj.nsect) {
-		Errorf(nil, "%s: elf object has symbol table with invalid string table link", pn)
-		return
-	}
-
-	elfobj.symstr = &elfobj.sect[elfobj.symtab.link]
-	if is64 != 0 {
-		elfobj.nsymtab = int(elfobj.symtab.size / ELF64SYMSIZE)
-	} else {
-		elfobj.nsymtab = int(elfobj.symtab.size / ELF32SYMSIZE)
-	}
-
-	if err = elfmap(elfobj, elfobj.symtab); err != nil {
-		goto bad
-	}
-	if err = elfmap(elfobj, elfobj.symstr); err != nil {
-		goto bad
-	}
-
-	// load text and data segments into memory.
-	// they are not as small as the section lists, but we'll need
-	// the memory anyway for the symbol images, so we might
-	// as well use one large chunk.
-
-	// create symbols for elfmapped sections
-	for i := 0; uint(i) < elfobj.nsect; i++ {
-		sect = &elfobj.sect[i]
-		if sect.type_ == SHT_ARM_ATTRIBUTES && sect.name == ".ARM.attributes" {
-			if err = elfmap(elfobj, sect); err != nil {
-				goto bad
-			}
-			parseArmAttributes(ctxt, e, sect.base[:sect.size])
-		}
-		if (sect.type_ != ElfSectProgbits && sect.type_ != ElfSectNobits) || sect.flags&ElfSectFlagAlloc == 0 {
-			continue
-		}
-		if sect.type_ != ElfSectNobits {
-			if err = elfmap(elfobj, sect); err != nil {
-				goto bad
-			}
-		}
-
-		name = fmt.Sprintf("%s(%s)", pkg, sect.name)
-		s = ctxt.Syms.Lookup(name, localSymVersion)
-
-		switch int(sect.flags) & (ElfSectFlagAlloc | ElfSectFlagWrite | ElfSectFlagExec) {
-		default:
-			err = fmt.Errorf("unexpected flags for ELF section %s", sect.name)
-			goto bad
-
-		case ElfSectFlagAlloc:
-			s.Type = obj.SRODATA
-
-		case ElfSectFlagAlloc + ElfSectFlagWrite:
-			if sect.type_ == ElfSectNobits {
-				s.Type = obj.SNOPTRBSS
-			} else {
-				s.Type = obj.SNOPTRDATA
-			}
-
-		case ElfSectFlagAlloc + ElfSectFlagExec:
-			s.Type = obj.STEXT
-		}
-
-		if sect.name == ".got" || sect.name == ".toc" {
-			s.Type = obj.SELFGOT
-		}
-		if sect.type_ == ElfSectProgbits {
-			s.P = sect.base
-			s.P = s.P[:sect.size]
-		}
-
-		s.Size = int64(sect.size)
-		s.Align = int32(sect.align)
-		sect.sym = s
-	}
-
-	// enter sub-symbols into symbol table.
-	// symbol 0 is the null symbol.
-	symbols = make([]*Symbol, elfobj.nsymtab)
-
-	for i := 1; i < elfobj.nsymtab; i++ {
-		if err = readelfsym(ctxt, elfobj, i, &sym, 1, localSymVersion); err != nil {
-			goto bad
-		}
-		symbols[i] = sym.sym
-		if sym.type_ != ElfSymTypeFunc && sym.type_ != ElfSymTypeObject && sym.type_ != ElfSymTypeNone && sym.type_ != ElfSymTypeCommon {
-			continue
-		}
-		if sym.shndx == ElfSymShnCommon || sym.type_ == ElfSymTypeCommon {
-			s = sym.sym
-			if uint64(s.Size) < sym.size {
-				s.Size = int64(sym.size)
-			}
-			if s.Type == 0 || s.Type == obj.SXREF {
-				s.Type = obj.SNOPTRBSS
-			}
-			continue
-		}
-
-		if uint(sym.shndx) >= elfobj.nsect || sym.shndx == 0 {
-			continue
-		}
-
-		// even when we pass needSym == 1 to readelfsym, it might still return nil to skip some unwanted symbols
-		if sym.sym == nil {
-			continue
-		}
-		sect = &elfobj.sect[sym.shndx]
-		if sect.sym == nil {
-			if strings.HasPrefix(sym.name, ".Linfo_string") { // clang does this
-				continue
-			}
-
-			if sym.name == "" && sym.type_ == 0 && sect.name == ".debug_str" {
-				// This reportedly happens with clang 3.7 on ARM.
-				// See issue 13139.
-				continue
-			}
-
-			if strings.HasPrefix(sym.name, ".LASF") { // gcc on s390x does this
-				continue
-			}
-			Errorf(sym.sym, "%s: sym#%d: ignoring symbol in section %d (type %d)", pn, i, sym.shndx, sym.type_)
-			continue
-		}
-
-		s = sym.sym
-		if s.Outer != nil {
-			if s.Attr.DuplicateOK() {
-				continue
-			}
-			Exitf("%s: duplicate symbol reference: %s in both %s and %s", pn, s.Name, s.Outer.Name, sect.sym.Name)
-		}
-
-		s.Sub = sect.sym.Sub
-		sect.sym.Sub = s
-		s.Type = sect.sym.Type | s.Type&^obj.SMASK | obj.SSUB
-		if !s.Attr.CgoExportDynamic() {
-			s.Dynimplib = "" // satisfy dynimport
-		}
-		s.Value = int64(sym.value)
-		s.Size = int64(sym.size)
-		s.Outer = sect.sym
-		if sect.sym.Type == obj.STEXT {
-			if s.Attr.External() && !s.Attr.DuplicateOK() {
-				Errorf(s, "%s: duplicate symbol definition", pn)
-			}
-			s.Attr |= AttrExternal
-		}
-
-		if elfobj.machine == ElfMachPower64 {
-			flag = int(sym.other) >> 5
-			if 2 <= flag && flag <= 6 {
-				s.Localentry = 1 << uint(flag-2)
-			} else if flag == 7 {
-				Errorf(s, "%s: invalid sym.other 0x%x", pn, sym.other)
-			}
-		}
-	}
-
-	// Sort outer lists by address, adding to textp.
-	// This keeps textp in increasing address order.
-	for i := 0; uint(i) < elfobj.nsect; i++ {
-		s = elfobj.sect[i].sym
-		if s == nil {
-			continue
-		}
-		if s.Sub != nil {
-			s.Sub = listsort(s.Sub)
-		}
-		if s.Type == obj.STEXT {
-			if s.Attr.OnList() {
-				log.Fatalf("symbol %s listed multiple times", s.Name)
-			}
-			s.Attr |= AttrOnList
-			ctxt.Textp = append(ctxt.Textp, s)
-			for s = s.Sub; s != nil; s = s.Sub {
-				if s.Attr.OnList() {
-					log.Fatalf("symbol %s listed multiple times", s.Name)
-				}
-				s.Attr |= AttrOnList
-				ctxt.Textp = append(ctxt.Textp, s)
-			}
-		}
-	}
-
-	// load relocations
-	for i := 0; uint(i) < elfobj.nsect; i++ {
-		rsect = &elfobj.sect[i]
-		if rsect.type_ != ElfSectRela && rsect.type_ != ElfSectRel {
-			continue
-		}
-		if rsect.info >= uint32(elfobj.nsect) || elfobj.sect[rsect.info].base == nil {
-			continue
-		}
-		sect = &elfobj.sect[rsect.info]
-		if err = elfmap(elfobj, rsect); err != nil {
-			goto bad
-		}
-		rela = 0
-		if rsect.type_ == ElfSectRela {
-			rela = 1
-		}
-		n = int(rsect.size / uint64(4+4*is64) / uint64(2+rela))
-		r = make([]Reloc, n)
-		p = rsect.base
-		for j = 0; j < n; j++ {
-			add = 0
-			rp = &r[j]
-			if is64 != 0 {
-				// 64-bit rel/rela
-				rp.Off = int32(e.Uint64(p))
-
-				p = p[8:]
-				info = e.Uint64(p)
-				p = p[8:]
-				if rela != 0 {
-					add = e.Uint64(p)
-					p = p[8:]
-				}
-			} else {
-				// 32-bit rel/rela
-				rp.Off = int32(e.Uint32(p))
-
-				p = p[4:]
-				info = uint64(e.Uint32(p))
-				info = info>>8<<32 | info&0xff // convert to 64-bit info
-				p = p[4:]
-				if rela != 0 {
-					add = uint64(e.Uint32(p))
-					p = p[4:]
-				}
-			}
-
-			if info&0xffffffff == 0 { // skip R_*_NONE relocation
-				j--
-				n--
-				continue
-			}
-
-			if info>>32 == 0 { // absolute relocation, don't bother reading the null symbol
-				rp.Sym = nil
-			} else {
-				if err = readelfsym(ctxt, elfobj, int(info>>32), &sym, 0, 0); err != nil {
-					goto bad
-				}
-				sym.sym = symbols[info>>32]
-				if sym.sym == nil {
-					err = fmt.Errorf("%s#%d: reloc of invalid sym #%d %s shndx=%d type=%d", sect.sym.Name, j, int(info>>32), sym.name, sym.shndx, sym.type_)
-					goto bad
-				}
-
-				rp.Sym = sym.sym
-			}
-
-			rp.Type = 256 + obj.RelocType(info)
-			rp.Siz = relSize(ctxt, pn, uint32(info))
-			if rela != 0 {
-				rp.Add = int64(add)
-			} else {
-				// load addend from image
-				if rp.Siz == 4 {
-					rp.Add = int64(e.Uint32(sect.base[rp.Off:]))
-				} else if rp.Siz == 8 {
-					rp.Add = int64(e.Uint64(sect.base[rp.Off:]))
-				} else {
-					Errorf(nil, "invalid rela size %d", rp.Siz)
-				}
-			}
-
-			if rp.Siz == 2 {
-				rp.Add = int64(int16(rp.Add))
-			}
-			if rp.Siz == 4 {
-				rp.Add = int64(int32(rp.Add))
-			}
-		}
-
-		//print("rel %s %d %d %s %#llx\n", sect->sym->name, rp->type, rp->siz, rp->sym->name, rp->add);
-		sort.Sort(rbyoff(r[:n]))
-		// just in case
-
-		s = sect.sym
-		s.R = r
-		s.R = s.R[:n]
-	}
-
-	return
-
-bad:
-	Errorf(nil, "%s: malformed elf file: %v", pn, err)
-}
-
-func section(elfobj *ElfObj, name string) *ElfSect {
-	for i := 0; uint(i) < elfobj.nsect; i++ {
-		if elfobj.sect[i].name != "" && name != "" && elfobj.sect[i].name == name {
-			return &elfobj.sect[i]
-		}
-	}
-	return nil
-}
-
-func elfmap(elfobj *ElfObj, sect *ElfSect) (err error) {
-	if sect.base != nil {
-		return nil
-	}
-
-	if sect.off+sect.size > uint64(elfobj.length) {
-		err = fmt.Errorf("elf section past end of file")
-		return err
-	}
-
-	sect.base = make([]byte, sect.size)
-	if elfobj.f.Seek(int64(uint64(elfobj.base)+sect.off), 0) < 0 {
-		return fmt.Errorf("short read: seek not successful")
-	}
-	if _, err := io.ReadFull(elfobj.f, sect.base); err != nil {
-		return fmt.Errorf("short read: %v", err)
-	}
-
-	return nil
-}
-
-func readelfsym(ctxt *Link, elfobj *ElfObj, i int, sym *ElfSym, needSym int, localSymVersion int) (err error) {
-	if i >= elfobj.nsymtab || i < 0 {
-		err = fmt.Errorf("invalid elf symbol index")
-		return err
-	}
-
-	if i == 0 {
-		Errorf(nil, "readym: read null symbol!")
-	}
-
-	if elfobj.is64 != 0 {
-		b := new(ElfSymBytes64)
-		binary.Read(bytes.NewReader(elfobj.symtab.base[i*ELF64SYMSIZE:(i+1)*ELF64SYMSIZE]), elfobj.e, b)
-		sym.name = cstring(elfobj.symstr.base[elfobj.e.Uint32(b.Name[:]):])
-		sym.value = elfobj.e.Uint64(b.Value[:])
-		sym.size = elfobj.e.Uint64(b.Size[:])
-		sym.shndx = elfobj.e.Uint16(b.Shndx[:])
-		sym.bind = b.Info >> 4
-		sym.type_ = b.Info & 0xf
-		sym.other = b.Other
-	} else {
-		b := new(ElfSymBytes)
-		binary.Read(bytes.NewReader(elfobj.symtab.base[i*ELF32SYMSIZE:(i+1)*ELF32SYMSIZE]), elfobj.e, b)
-		sym.name = cstring(elfobj.symstr.base[elfobj.e.Uint32(b.Name[:]):])
-		sym.value = uint64(elfobj.e.Uint32(b.Value[:]))
-		sym.size = uint64(elfobj.e.Uint32(b.Size[:]))
-		sym.shndx = elfobj.e.Uint16(b.Shndx[:])
-		sym.bind = b.Info >> 4
-		sym.type_ = b.Info & 0xf
-		sym.other = b.Other
-	}
-
-	var s *Symbol
-	if sym.name == "_GLOBAL_OFFSET_TABLE_" {
-		sym.name = ".got"
-	}
-	if sym.name == ".TOC." {
-		// Magic symbol on ppc64.  Will be set to this object
-		// file's .got+0x8000.
-		sym.bind = ElfSymBindLocal
-	}
-
-	switch sym.type_ {
-	case ElfSymTypeSection:
-		s = elfobj.sect[sym.shndx].sym
-
-	case ElfSymTypeObject, ElfSymTypeFunc, ElfSymTypeNone, ElfSymTypeCommon:
-		switch sym.bind {
-		case ElfSymBindGlobal:
-			if needSym != 0 {
-				s = ctxt.Syms.Lookup(sym.name, 0)
-
-				// for global scoped hidden symbols we should insert it into
-				// symbol hash table, but mark them as hidden.
-				// __i686.get_pc_thunk.bx is allowed to be duplicated, to
-				// workaround that we set dupok.
-				// TODO(minux): correctly handle __i686.get_pc_thunk.bx without
-				// set dupok generally. See http://codereview.appspot.com/5823055/
-				// comment #5 for details.
-				if s != nil && sym.other == 2 {
-					s.Type |= obj.SHIDDEN
-					s.Attr |= AttrDuplicateOK
-				}
-			}
-
-		case ElfSymBindLocal:
-			if SysArch.Family == sys.ARM && (strings.HasPrefix(sym.name, "$a") || strings.HasPrefix(sym.name, "$d")) {
-				// binutils for arm generate these mapping
-				// symbols, ignore these
-				break
-			}
-
-			if sym.name == ".TOC." {
-				// We need to be able to look this up,
-				// so put it in the hash table.
-				if needSym != 0 {
-					s = ctxt.Syms.Lookup(sym.name, localSymVersion)
-					s.Type |= obj.SHIDDEN
-				}
-
-				break
-			}
-
-			if needSym != 0 {
-				// local names and hidden global names are unique
-				// and should only be referenced by their index, not name, so we
-				// don't bother to add them into the hash table
-				s = ctxt.Syms.newsym(sym.name, localSymVersion)
-
-				s.Type |= obj.SHIDDEN
-			}
-
-		case ElfSymBindWeak:
-			if needSym != 0 {
-				s = ctxt.Syms.Lookup(sym.name, 0)
-				if sym.other == 2 {
-					s.Type |= obj.SHIDDEN
-				}
-			}
-
-		default:
-			err = fmt.Errorf("%s: invalid symbol binding %d", sym.name, sym.bind)
-			return err
-		}
-	}
-
-	if s != nil && s.Type == 0 && sym.type_ != ElfSymTypeSection {
-		s.Type = obj.SXREF
-	}
-	sym.sym = s
-
-	return nil
-}
-
-type rbyoff []Reloc
-
-func (x rbyoff) Len() int {
-	return len(x)
-}
-
-func (x rbyoff) Swap(i, j int) {
-	x[i], x[j] = x[j], x[i]
-}
-
-func (x rbyoff) Less(i, j int) bool {
-	a := &x[i]
-	b := &x[j]
-	if a.Off < b.Off {
-		return true
-	}
-	if a.Off > b.Off {
-		return false
-	}
-	return false
-}
-
-func relSize(ctxt *Link, pn string, elftype uint32) uint8 {
-	// TODO(mdempsky): Replace this with a struct-valued switch statement
-	// once golang.org/issue/15164 is fixed or found to not impair cmd/link
-	// performance.
-
-	const (
-		AMD64 = uint32(sys.AMD64)
-		ARM   = uint32(sys.ARM)
-		I386  = uint32(sys.I386)
-		PPC64 = uint32(sys.PPC64)
-		S390X = uint32(sys.S390X)
-	)
-
-	switch uint32(SysArch.Family) | elftype<<24 {
-	default:
-		Errorf(nil, "%s: unknown relocation type %d; compiled without -fpic?", pn, elftype)
-		fallthrough
-
-	case S390X | R_390_8<<24:
-		return 1
-
-	case PPC64 | R_PPC64_TOC16<<24,
-		PPC64 | R_PPC64_TOC16_LO<<24,
-		PPC64 | R_PPC64_TOC16_HI<<24,
-		PPC64 | R_PPC64_TOC16_HA<<24,
-		PPC64 | R_PPC64_TOC16_DS<<24,
-		PPC64 | R_PPC64_TOC16_LO_DS<<24,
-		PPC64 | R_PPC64_REL16_LO<<24,
-		PPC64 | R_PPC64_REL16_HI<<24,
-		PPC64 | R_PPC64_REL16_HA<<24,
-		S390X | R_390_16<<24,
-		S390X | R_390_GOT16<<24,
-		S390X | R_390_PC16<<24,
-		S390X | R_390_PC16DBL<<24,
-		S390X | R_390_PLT16DBL<<24:
-		return 2
-
-	case ARM | R_ARM_ABS32<<24,
-		ARM | R_ARM_GOT32<<24,
-		ARM | R_ARM_PLT32<<24,
-		ARM | R_ARM_GOTOFF<<24,
-		ARM | R_ARM_GOTPC<<24,
-		ARM | R_ARM_THM_PC22<<24,
-		ARM | R_ARM_REL32<<24,
-		ARM | R_ARM_CALL<<24,
-		ARM | R_ARM_V4BX<<24,
-		ARM | R_ARM_GOT_PREL<<24,
-		ARM | R_ARM_PC24<<24,
-		ARM | R_ARM_JUMP24<<24,
-		AMD64 | R_X86_64_PC32<<24,
-		AMD64 | R_X86_64_PLT32<<24,
-		AMD64 | R_X86_64_GOTPCREL<<24,
-		AMD64 | R_X86_64_GOTPCRELX<<24,
-		AMD64 | R_X86_64_REX_GOTPCRELX<<24,
-		I386 | R_386_32<<24,
-		I386 | R_386_PC32<<24,
-		I386 | R_386_GOT32<<24,
-		I386 | R_386_PLT32<<24,
-		I386 | R_386_GOTOFF<<24,
-		I386 | R_386_GOTPC<<24,
-		I386 | R_386_GOT32X<<24,
-		PPC64 | R_PPC64_REL24<<24,
-		PPC64 | R_PPC_REL32<<24,
-		S390X | R_390_32<<24,
-		S390X | R_390_PC32<<24,
-		S390X | R_390_GOT32<<24,
-		S390X | R_390_PLT32<<24,
-		S390X | R_390_PC32DBL<<24,
-		S390X | R_390_PLT32DBL<<24,
-		S390X | R_390_GOTPCDBL<<24,
-		S390X | R_390_GOTENT<<24:
-		return 4
-
-	case AMD64 | R_X86_64_64<<24,
-		PPC64 | R_PPC64_ADDR64<<24,
-		S390X | R_390_GLOB_DAT<<24,
-		S390X | R_390_RELATIVE<<24,
-		S390X | R_390_GOTOFF<<24,
-		S390X | R_390_GOTPC<<24,
-		S390X | R_390_64<<24,
-		S390X | R_390_PC64<<24,
-		S390X | R_390_GOT64<<24,
-		S390X | R_390_PLT64<<24:
-		return 8
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/ldmacho.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/ldmacho.go
deleted file mode 100644
index e0043a9..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/ldmacho.go
+++ /dev/null
@@ -1,907 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/ldmacho.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/ldmacho.go:1
-package ld
-
-import (
-	"bootstrap/cmd/internal/bio"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"encoding/binary"
-	"fmt"
-	"io"
-	"log"
-	"sort"
-)
-
-/*
-Derived from Plan 9 from User Space's src/libmach/elf.h, elf.c
-http://code.swtch.com/plan9port/src/tip/src/libmach/
-
-	Copyright © 2004 Russ Cox.
-	Portions Copyright © 2008-2010 Google Inc.
-	Portions Copyright © 2010 The Go Authors.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-*/
-const (
-	N_EXT  = 0x01
-	N_TYPE = 0x1e
-	N_STAB = 0xe0
-)
-
-type ldMachoObj struct {
-	f          *bio.Reader
-	base       int64 // off in f where Mach-O begins
-	length     int64 // length of Mach-O
-	is64       bool
-	name       string
-	e          binary.ByteOrder
-	cputype    uint
-	subcputype uint
-	filetype   uint32
-	flags      uint32
-	cmd        []ldMachoCmd
-	ncmd       uint
-}
-
-type ldMachoCmd struct {
-	type_ int
-	off   uint32
-	size  uint32
-	seg   ldMachoSeg
-	sym   ldMachoSymtab
-	dsym  ldMachoDysymtab
-}
-
-type ldMachoSeg struct {
-	name     string
-	vmaddr   uint64
-	vmsize   uint64
-	fileoff  uint32
-	filesz   uint32
-	maxprot  uint32
-	initprot uint32
-	nsect    uint32
-	flags    uint32
-	sect     []ldMachoSect
-}
-
-type ldMachoSect struct {
-	name    string
-	segname string
-	addr    uint64
-	size    uint64
-	off     uint32
-	align   uint32
-	reloff  uint32
-	nreloc  uint32
-	flags   uint32
-	res1    uint32
-	res2    uint32
-	sym     *Symbol
-	rel     []ldMachoRel
-}
-
-type ldMachoRel struct {
-	addr      uint32
-	symnum    uint32
-	pcrel     uint8
-	length    uint8
-	extrn     uint8
-	type_     uint8
-	scattered uint8
-	value     uint32
-}
-
-type ldMachoSymtab struct {
-	symoff  uint32
-	nsym    uint32
-	stroff  uint32
-	strsize uint32
-	str     []byte
-	sym     []ldMachoSym
-}
-
-type ldMachoSym struct {
-	name    string
-	type_   uint8
-	sectnum uint8
-	desc    uint16
-	kind    int8
-	value   uint64
-	sym     *Symbol
-}
-
-type ldMachoDysymtab struct {
-	ilocalsym      uint32
-	nlocalsym      uint32
-	iextdefsym     uint32
-	nextdefsym     uint32
-	iundefsym      uint32
-	nundefsym      uint32
-	tocoff         uint32
-	ntoc           uint32
-	modtaboff      uint32
-	nmodtab        uint32
-	extrefsymoff   uint32
-	nextrefsyms    uint32
-	indirectsymoff uint32
-	nindirectsyms  uint32
-	extreloff      uint32
-	nextrel        uint32
-	locreloff      uint32
-	nlocrel        uint32
-	indir          []uint32
-}
-
-const (
-	LdMachoCpuVax         = 1
-	LdMachoCpu68000       = 6
-	LdMachoCpu386         = 7
-	LdMachoCpuAmd64       = 0x1000007
-	LdMachoCpuMips        = 8
-	LdMachoCpu98000       = 10
-	LdMachoCpuHppa        = 11
-	LdMachoCpuArm         = 12
-	LdMachoCpu88000       = 13
-	LdMachoCpuSparc       = 14
-	LdMachoCpu860         = 15
-	LdMachoCpuAlpha       = 16
-	LdMachoCpuPower       = 18
-	LdMachoCmdSegment     = 1
-	LdMachoCmdSymtab      = 2
-	LdMachoCmdSymseg      = 3
-	LdMachoCmdThread      = 4
-	LdMachoCmdDysymtab    = 11
-	LdMachoCmdSegment64   = 25
-	LdMachoFileObject     = 1
-	LdMachoFileExecutable = 2
-	LdMachoFileFvmlib     = 3
-	LdMachoFileCore       = 4
-	LdMachoFilePreload    = 5
-)
-
-func unpackcmd(p []byte, m *ldMachoObj, c *ldMachoCmd, type_ uint, sz uint) int {
-	e4 := m.e.Uint32
-	e8 := m.e.Uint64
-
-	c.type_ = int(type_)
-	c.size = uint32(sz)
-	switch type_ {
-	default:
-		return -1
-
-	case LdMachoCmdSegment:
-		if sz < 56 {
-			return -1
-		}
-		c.seg.name = cstring(p[8:24])
-		c.seg.vmaddr = uint64(e4(p[24:]))
-		c.seg.vmsize = uint64(e4(p[28:]))
-		c.seg.fileoff = e4(p[32:])
-		c.seg.filesz = e4(p[36:])
-		c.seg.maxprot = e4(p[40:])
-		c.seg.initprot = e4(p[44:])
-		c.seg.nsect = e4(p[48:])
-		c.seg.flags = e4(p[52:])
-		c.seg.sect = make([]ldMachoSect, c.seg.nsect)
-		if uint32(sz) < 56+c.seg.nsect*68 {
-			return -1
-		}
-		p = p[56:]
-		var s *ldMachoSect
-		for i := 0; uint32(i) < c.seg.nsect; i++ {
-			s = &c.seg.sect[i]
-			s.name = cstring(p[0:16])
-			s.segname = cstring(p[16:32])
-			s.addr = uint64(e4(p[32:]))
-			s.size = uint64(e4(p[36:]))
-			s.off = e4(p[40:])
-			s.align = e4(p[44:])
-			s.reloff = e4(p[48:])
-			s.nreloc = e4(p[52:])
-			s.flags = e4(p[56:])
-			s.res1 = e4(p[60:])
-			s.res2 = e4(p[64:])
-			p = p[68:]
-		}
-
-	case LdMachoCmdSegment64:
-		if sz < 72 {
-			return -1
-		}
-		c.seg.name = cstring(p[8:24])
-		c.seg.vmaddr = e8(p[24:])
-		c.seg.vmsize = e8(p[32:])
-		c.seg.fileoff = uint32(e8(p[40:]))
-		c.seg.filesz = uint32(e8(p[48:]))
-		c.seg.maxprot = e4(p[56:])
-		c.seg.initprot = e4(p[60:])
-		c.seg.nsect = e4(p[64:])
-		c.seg.flags = e4(p[68:])
-		c.seg.sect = make([]ldMachoSect, c.seg.nsect)
-		if uint32(sz) < 72+c.seg.nsect*80 {
-			return -1
-		}
-		p = p[72:]
-		var s *ldMachoSect
-		for i := 0; uint32(i) < c.seg.nsect; i++ {
-			s = &c.seg.sect[i]
-			s.name = cstring(p[0:16])
-			s.segname = cstring(p[16:32])
-			s.addr = e8(p[32:])
-			s.size = e8(p[40:])
-			s.off = e4(p[48:])
-			s.align = e4(p[52:])
-			s.reloff = e4(p[56:])
-			s.nreloc = e4(p[60:])
-			s.flags = e4(p[64:])
-			s.res1 = e4(p[68:])
-			s.res2 = e4(p[72:])
-
-			// p+76 is reserved
-			p = p[80:]
-		}
-
-	case LdMachoCmdSymtab:
-		if sz < 24 {
-			return -1
-		}
-		c.sym.symoff = e4(p[8:])
-		c.sym.nsym = e4(p[12:])
-		c.sym.stroff = e4(p[16:])
-		c.sym.strsize = e4(p[20:])
-
-	case LdMachoCmdDysymtab:
-		if sz < 80 {
-			return -1
-		}
-		c.dsym.ilocalsym = e4(p[8:])
-		c.dsym.nlocalsym = e4(p[12:])
-		c.dsym.iextdefsym = e4(p[16:])
-		c.dsym.nextdefsym = e4(p[20:])
-		c.dsym.iundefsym = e4(p[24:])
-		c.dsym.nundefsym = e4(p[28:])
-		c.dsym.tocoff = e4(p[32:])
-		c.dsym.ntoc = e4(p[36:])
-		c.dsym.modtaboff = e4(p[40:])
-		c.dsym.nmodtab = e4(p[44:])
-		c.dsym.extrefsymoff = e4(p[48:])
-		c.dsym.nextrefsyms = e4(p[52:])
-		c.dsym.indirectsymoff = e4(p[56:])
-		c.dsym.nindirectsyms = e4(p[60:])
-		c.dsym.extreloff = e4(p[64:])
-		c.dsym.nextrel = e4(p[68:])
-		c.dsym.locreloff = e4(p[72:])
-		c.dsym.nlocrel = e4(p[76:])
-	}
-
-	return 0
-}
-
-func macholoadrel(m *ldMachoObj, sect *ldMachoSect) int {
-	if sect.rel != nil || sect.nreloc == 0 {
-		return 0
-	}
-	rel := make([]ldMachoRel, sect.nreloc)
-	n := int(sect.nreloc * 8)
-	buf := make([]byte, n)
-	if m.f.Seek(m.base+int64(sect.reloff), 0) < 0 {
-		return -1
-	}
-	if _, err := io.ReadFull(m.f, buf); err != nil {
-		return -1
-	}
-	var p []byte
-	var r *ldMachoRel
-	var v uint32
-	for i := 0; uint32(i) < sect.nreloc; i++ {
-		r = &rel[i]
-		p = buf[i*8:]
-		r.addr = m.e.Uint32(p)
-
-		// TODO(rsc): Wrong interpretation for big-endian bitfields?
-		if r.addr&0x80000000 != 0 {
-			// scatterbrained relocation
-			r.scattered = 1
-
-			v = r.addr >> 24
-			r.addr &= 0xFFFFFF
-			r.type_ = uint8(v & 0xF)
-			v >>= 4
-			r.length = 1 << (v & 3)
-			v >>= 2
-			r.pcrel = uint8(v & 1)
-			r.value = m.e.Uint32(p[4:])
-		} else {
-			v = m.e.Uint32(p[4:])
-			r.symnum = v & 0xFFFFFF
-			v >>= 24
-			r.pcrel = uint8(v & 1)
-			v >>= 1
-			r.length = 1 << (v & 3)
-			v >>= 2
-			r.extrn = uint8(v & 1)
-			v >>= 1
-			r.type_ = uint8(v)
-		}
-	}
-
-	sect.rel = rel
-	return 0
-}
-
-func macholoaddsym(m *ldMachoObj, d *ldMachoDysymtab) int {
-	n := int(d.nindirectsyms)
-
-	p := make([]byte, n*4)
-	if m.f.Seek(m.base+int64(d.indirectsymoff), 0) < 0 {
-		return -1
-	}
-	if _, err := io.ReadFull(m.f, p); err != nil {
-		return -1
-	}
-
-	d.indir = make([]uint32, n)
-	for i := 0; i < n; i++ {
-		d.indir[i] = m.e.Uint32(p[4*i:])
-	}
-	return 0
-}
-
-func macholoadsym(m *ldMachoObj, symtab *ldMachoSymtab) int {
-	if symtab.sym != nil {
-		return 0
-	}
-
-	strbuf := make([]byte, symtab.strsize)
-	if m.f.Seek(m.base+int64(symtab.stroff), 0) < 0 {
-		return -1
-	}
-	if _, err := io.ReadFull(m.f, strbuf); err != nil {
-		return -1
-	}
-
-	symsize := 12
-	if m.is64 {
-		symsize = 16
-	}
-	n := int(symtab.nsym * uint32(symsize))
-	symbuf := make([]byte, n)
-	if m.f.Seek(m.base+int64(symtab.symoff), 0) < 0 {
-		return -1
-	}
-	if _, err := io.ReadFull(m.f, symbuf); err != nil {
-		return -1
-	}
-	sym := make([]ldMachoSym, symtab.nsym)
-	p := symbuf
-	var s *ldMachoSym
-	var v uint32
-	for i := 0; uint32(i) < symtab.nsym; i++ {
-		s = &sym[i]
-		v = m.e.Uint32(p)
-		if v >= symtab.strsize {
-			return -1
-		}
-		s.name = cstring(strbuf[v:])
-		s.type_ = p[4]
-		s.sectnum = p[5]
-		s.desc = m.e.Uint16(p[6:])
-		if m.is64 {
-			s.value = m.e.Uint64(p[8:])
-		} else {
-			s.value = uint64(m.e.Uint32(p[8:]))
-		}
-		p = p[symsize:]
-	}
-
-	symtab.str = strbuf
-	symtab.sym = sym
-	return 0
-}
-
-func ldmacho(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) {
-	var err error
-	var j int
-	var is64 bool
-	var secaddr uint64
-	var hdr [7 * 4]uint8
-	var cmdp []byte
-	var dat []byte
-	var ncmd uint32
-	var cmdsz uint32
-	var ty uint32
-	var sz uint32
-	var off uint32
-	var m *ldMachoObj
-	var e binary.ByteOrder
-	var sect *ldMachoSect
-	var rel *ldMachoRel
-	var rpi int
-	var s *Symbol
-	var s1 *Symbol
-	var outer *Symbol
-	var c *ldMachoCmd
-	var symtab *ldMachoSymtab
-	var dsymtab *ldMachoDysymtab
-	var sym *ldMachoSym
-	var r []Reloc
-	var rp *Reloc
-	var name string
-
-	localSymVersion := ctxt.Syms.IncVersion()
-	base := f.Offset()
-	if _, err := io.ReadFull(f, hdr[:]); err != nil {
-		goto bad
-	}
-
-	if binary.BigEndian.Uint32(hdr[:])&^1 == 0xFEEDFACE {
-		e = binary.BigEndian
-	} else if binary.LittleEndian.Uint32(hdr[:])&^1 == 0xFEEDFACE {
-		e = binary.LittleEndian
-	} else {
-		err = fmt.Errorf("bad magic - not mach-o file")
-		goto bad
-	}
-
-	is64 = e.Uint32(hdr[:]) == 0xFEEDFACF
-	ncmd = e.Uint32(hdr[4*4:])
-	cmdsz = e.Uint32(hdr[5*4:])
-	if ncmd > 0x10000 || cmdsz >= 0x01000000 {
-		err = fmt.Errorf("implausible mach-o header ncmd=%d cmdsz=%d", ncmd, cmdsz)
-		goto bad
-	}
-
-	if is64 {
-		f.Seek(4, 1) // skip reserved word in header
-	}
-
-	m = new(ldMachoObj)
-
-	m.f = f
-	m.e = e
-	m.cputype = uint(e.Uint32(hdr[1*4:]))
-	m.subcputype = uint(e.Uint32(hdr[2*4:]))
-	m.filetype = e.Uint32(hdr[3*4:])
-	m.ncmd = uint(ncmd)
-	m.flags = e.Uint32(hdr[6*4:])
-	m.is64 = is64
-	m.base = base
-	m.length = length
-	m.name = pn
-
-	switch SysArch.Family {
-	default:
-		Errorf(nil, "%s: mach-o %s unimplemented", pn, SysArch.Name)
-		return
-
-	case sys.AMD64:
-		if e != binary.LittleEndian || m.cputype != LdMachoCpuAmd64 {
-			Errorf(nil, "%s: mach-o object but not amd64", pn)
-			return
-		}
-
-	case sys.I386:
-		if e != binary.LittleEndian || m.cputype != LdMachoCpu386 {
-			Errorf(nil, "%s: mach-o object but not 386", pn)
-			return
-		}
-	}
-
-	m.cmd = make([]ldMachoCmd, ncmd)
-	off = uint32(len(hdr))
-	cmdp = make([]byte, cmdsz)
-	if _, err2 := io.ReadFull(f, cmdp); err2 != nil {
-		err = fmt.Errorf("reading cmds: %v", err)
-		goto bad
-	}
-
-	// read and parse load commands
-	c = nil
-
-	symtab = nil
-	dsymtab = nil
-
-	for i := 0; uint32(i) < ncmd; i++ {
-		ty = e.Uint32(cmdp)
-		sz = e.Uint32(cmdp[4:])
-		m.cmd[i].off = off
-		unpackcmd(cmdp, m, &m.cmd[i], uint(ty), uint(sz))
-		cmdp = cmdp[sz:]
-		off += sz
-		if ty == LdMachoCmdSymtab {
-			if symtab != nil {
-				err = fmt.Errorf("multiple symbol tables")
-				goto bad
-			}
-
-			symtab = &m.cmd[i].sym
-			macholoadsym(m, symtab)
-		}
-
-		if ty == LdMachoCmdDysymtab {
-			dsymtab = &m.cmd[i].dsym
-			macholoaddsym(m, dsymtab)
-		}
-
-		if (is64 && ty == LdMachoCmdSegment64) || (!is64 && ty == LdMachoCmdSegment) {
-			if c != nil {
-				err = fmt.Errorf("multiple load commands")
-				goto bad
-			}
-
-			c = &m.cmd[i]
-		}
-	}
-
-	// load text and data segments into memory.
-	// they are not as small as the load commands, but we'll need
-	// the memory anyway for the symbol images, so we might
-	// as well use one large chunk.
-	if c == nil {
-		err = fmt.Errorf("no load command")
-		goto bad
-	}
-
-	if symtab == nil {
-		// our work is done here - no symbols means nothing can refer to this file
-		return
-	}
-
-	if int64(c.seg.fileoff+c.seg.filesz) >= length {
-		err = fmt.Errorf("load segment out of range")
-		goto bad
-	}
-
-	dat = make([]byte, c.seg.filesz)
-	if f.Seek(m.base+int64(c.seg.fileoff), 0) < 0 {
-		err = fmt.Errorf("cannot load object data: %v", err)
-		goto bad
-	}
-	if _, err2 := io.ReadFull(f, dat); err2 != nil {
-		err = fmt.Errorf("cannot load object data: %v", err)
-		goto bad
-	}
-
-	for i := 0; uint32(i) < c.seg.nsect; i++ {
-		sect = &c.seg.sect[i]
-		if sect.segname != "__TEXT" && sect.segname != "__DATA" {
-			continue
-		}
-		if sect.name == "__eh_frame" {
-			continue
-		}
-		name = fmt.Sprintf("%s(%s/%s)", pkg, sect.segname, sect.name)
-		s = ctxt.Syms.Lookup(name, localSymVersion)
-		if s.Type != 0 {
-			err = fmt.Errorf("duplicate %s/%s", sect.segname, sect.name)
-			goto bad
-		}
-
-		if sect.flags&0xff == 1 { // S_ZEROFILL
-			s.P = make([]byte, sect.size)
-		} else {
-			s.P = dat[sect.addr-c.seg.vmaddr:][:sect.size]
-		}
-		s.Size = int64(len(s.P))
-
-		if sect.segname == "__TEXT" {
-			if sect.name == "__text" {
-				s.Type = obj.STEXT
-			} else {
-				s.Type = obj.SRODATA
-			}
-		} else {
-			if sect.name == "__bss" {
-				s.Type = obj.SNOPTRBSS
-				s.P = s.P[:0]
-			} else {
-				s.Type = obj.SNOPTRDATA
-			}
-		}
-
-		sect.sym = s
-	}
-
-	// enter sub-symbols into symbol table.
-	// have to guess sizes from next symbol.
-	for i := 0; uint32(i) < symtab.nsym; i++ {
-		sym = &symtab.sym[i]
-		if sym.type_&N_STAB != 0 {
-			continue
-		}
-
-		// TODO: check sym->type against outer->type.
-		name = sym.name
-
-		if name[0] == '_' && name[1] != '\x00' {
-			name = name[1:]
-		}
-		v := 0
-		if sym.type_&N_EXT == 0 {
-			v = localSymVersion
-		}
-		s = ctxt.Syms.Lookup(name, v)
-		if sym.type_&N_EXT == 0 {
-			s.Attr |= AttrDuplicateOK
-		}
-		sym.sym = s
-		if sym.sectnum == 0 { // undefined
-			continue
-		}
-		if uint32(sym.sectnum) > c.seg.nsect {
-			err = fmt.Errorf("reference to invalid section %d", sym.sectnum)
-			goto bad
-		}
-
-		sect = &c.seg.sect[sym.sectnum-1]
-		outer = sect.sym
-		if outer == nil {
-			err = fmt.Errorf("reference to invalid section %s/%s", sect.segname, sect.name)
-			continue
-		}
-
-		if s.Outer != nil {
-			if s.Attr.DuplicateOK() {
-				continue
-			}
-			Exitf("%s: duplicate symbol reference: %s in both %s and %s", pn, s.Name, s.Outer.Name, sect.sym.Name)
-		}
-
-		s.Type = outer.Type | obj.SSUB
-		s.Sub = outer.Sub
-		outer.Sub = s
-		s.Outer = outer
-		s.Value = int64(sym.value - sect.addr)
-		if !s.Attr.CgoExportDynamic() {
-			s.Dynimplib = "" // satisfy dynimport
-		}
-		if outer.Type == obj.STEXT {
-			if s.Attr.External() && !s.Attr.DuplicateOK() {
-				Errorf(s, "%s: duplicate symbol definition", pn)
-			}
-			s.Attr |= AttrExternal
-		}
-
-		sym.sym = s
-	}
-
-	// Sort outer lists by address, adding to textp.
-	// This keeps textp in increasing address order.
-	for i := 0; uint32(i) < c.seg.nsect; i++ {
-		sect = &c.seg.sect[i]
-		s = sect.sym
-		if s == nil {
-			continue
-		}
-		if s.Sub != nil {
-			s.Sub = listsort(s.Sub)
-
-			// assign sizes, now that we know symbols in sorted order.
-			for s1 = s.Sub; s1 != nil; s1 = s1.Sub {
-				if s1.Sub != nil {
-					s1.Size = s1.Sub.Value - s1.Value
-				} else {
-					s1.Size = s.Value + s.Size - s1.Value
-				}
-			}
-		}
-
-		if s.Type == obj.STEXT {
-			if s.Attr.OnList() {
-				log.Fatalf("symbol %s listed multiple times", s.Name)
-			}
-			s.Attr |= AttrOnList
-			ctxt.Textp = append(ctxt.Textp, s)
-			for s1 = s.Sub; s1 != nil; s1 = s1.Sub {
-				if s1.Attr.OnList() {
-					log.Fatalf("symbol %s listed multiple times", s1.Name)
-				}
-				s1.Attr |= AttrOnList
-				ctxt.Textp = append(ctxt.Textp, s1)
-			}
-		}
-	}
-
-	// load relocations
-	for i := 0; uint32(i) < c.seg.nsect; i++ {
-		sect = &c.seg.sect[i]
-		s = sect.sym
-		if s == nil {
-			continue
-		}
-		macholoadrel(m, sect)
-		if sect.rel == nil {
-			continue
-		}
-		r = make([]Reloc, sect.nreloc)
-		rpi = 0
-	Reloc:
-		for j = 0; uint32(j) < sect.nreloc; j++ {
-			rp = &r[rpi]
-			rel = &sect.rel[j]
-			if rel.scattered != 0 {
-				if SysArch.Family != sys.I386 {
-					// mach-o only uses scattered relocation on 32-bit platforms
-					Errorf(s, "unexpected scattered relocation")
-					continue
-				}
-
-				// on 386, rewrite scattered 4/1 relocation and some
-				// scattered 2/1 relocation into the pseudo-pc-relative
-				// reference that it is.
-				// assume that the second in the pair is in this section
-				// and use that as the pc-relative base.
-				if uint32(j+1) >= sect.nreloc {
-					err = fmt.Errorf("unsupported scattered relocation %d", int(rel.type_))
-					goto bad
-				}
-
-				if sect.rel[j+1].scattered == 0 || sect.rel[j+1].type_ != 1 || (rel.type_ != 4 && rel.type_ != 2) || uint64(sect.rel[j+1].value) < sect.addr || uint64(sect.rel[j+1].value) >= sect.addr+sect.size {
-					err = fmt.Errorf("unsupported scattered relocation %d/%d", int(rel.type_), int(sect.rel[j+1].type_))
-					goto bad
-				}
-
-				rp.Siz = rel.length
-				rp.Off = int32(rel.addr)
-
-				// NOTE(rsc): I haven't worked out why (really when)
-				// we should ignore the addend on a
-				// scattered relocation, but it seems that the
-				// common case is we ignore it.
-				// It's likely that this is not strictly correct
-				// and that the math should look something
-				// like the non-scattered case below.
-				rp.Add = 0
-
-				// want to make it pc-relative aka relative to rp->off+4
-				// but the scatter asks for relative to off = sect->rel[j+1].value - sect->addr.
-				// adjust rp->add accordingly.
-				rp.Type = obj.R_PCREL
-
-				rp.Add += int64(uint64(int64(rp.Off)+4) - (uint64(sect.rel[j+1].value) - sect.addr))
-
-				// now consider the desired symbol.
-				// find the section where it lives.
-				var ks *ldMachoSect
-				for k := 0; uint32(k) < c.seg.nsect; k++ {
-					ks = &c.seg.sect[k]
-					if ks.addr <= uint64(rel.value) && uint64(rel.value) < ks.addr+ks.size {
-						if ks.sym != nil {
-							rp.Sym = ks.sym
-							rp.Add += int64(uint64(rel.value) - ks.addr)
-						} else if ks.segname == "__IMPORT" && ks.name == "__pointers" {
-							// handle reference to __IMPORT/__pointers.
-							// how much worse can this get?
-							// why are we supporting 386 on the mac anyway?
-							rp.Type = 512 + MACHO_FAKE_GOTPCREL
-
-							// figure out which pointer this is a reference to.
-							k = int(uint64(ks.res1) + (uint64(rel.value)-ks.addr)/4)
-
-							// load indirect table for __pointers
-							// fetch symbol number
-							if dsymtab == nil || k < 0 || uint32(k) >= dsymtab.nindirectsyms || dsymtab.indir == nil {
-								err = fmt.Errorf("invalid scattered relocation: indirect symbol reference out of range")
-								goto bad
-							}
-
-							k = int(dsymtab.indir[k])
-							if k < 0 || uint32(k) >= symtab.nsym {
-								err = fmt.Errorf("invalid scattered relocation: symbol reference out of range")
-								goto bad
-							}
-
-							rp.Sym = symtab.sym[k].sym
-						} else {
-							err = fmt.Errorf("unsupported scattered relocation: reference to %s/%s", ks.segname, ks.name)
-							goto bad
-						}
-
-						rpi++
-
-						// skip #1 of 2 rel; continue skips #2 of 2.
-						j++
-
-						continue Reloc
-					}
-				}
-
-				err = fmt.Errorf("unsupported scattered relocation: invalid address %#x", rel.addr)
-				goto bad
-
-			}
-
-			rp.Siz = rel.length
-			rp.Type = 512 + (obj.RelocType(rel.type_) << 1) + obj.RelocType(rel.pcrel)
-			rp.Off = int32(rel.addr)
-
-			// Handle X86_64_RELOC_SIGNED referencing a section (rel->extrn == 0).
-			if SysArch.Family == sys.AMD64 && rel.extrn == 0 && rel.type_ == 1 {
-				// Calculate the addend as the offset into the section.
-				//
-				// The rip-relative offset stored in the object file is encoded
-				// as follows:
-				//
-				//    movsd	0x00000360(%rip),%xmm0
-				//
-				// To get the absolute address of the value this rip-relative address is pointing
-				// to, we must add the address of the next instruction to it. This is done by
-				// taking the address of the relocation and adding 4 to it (since the rip-relative
-				// offset can at most be 32 bits long).  To calculate the offset into the section the
-				// relocation is referencing, we subtract the vaddr of the start of the referenced
-				// section found in the original object file.
-				//
-				// [For future reference, see Darwin's /usr/include/mach-o/x86_64/reloc.h]
-				secaddr = c.seg.sect[rel.symnum-1].addr
-
-				rp.Add = int64(uint64(int64(int32(e.Uint32(s.P[rp.Off:])))+int64(rp.Off)+4) - secaddr)
-			} else {
-				rp.Add = int64(int32(e.Uint32(s.P[rp.Off:])))
-			}
-
-			// For i386 Mach-O PC-relative, the addend is written such that
-			// it *is* the PC being subtracted. Use that to make
-			// it match our version of PC-relative.
-			if rel.pcrel != 0 && SysArch.Family == sys.I386 {
-				rp.Add += int64(rp.Off) + int64(rp.Siz)
-			}
-			if rel.extrn == 0 {
-				if rel.symnum < 1 || rel.symnum > c.seg.nsect {
-					err = fmt.Errorf("invalid relocation: section reference out of range %d vs %d", rel.symnum, c.seg.nsect)
-					goto bad
-				}
-
-				rp.Sym = c.seg.sect[rel.symnum-1].sym
-				if rp.Sym == nil {
-					err = fmt.Errorf("invalid relocation: %s", c.seg.sect[rel.symnum-1].name)
-					goto bad
-				}
-
-				// References to symbols in other sections
-				// include that information in the addend.
-				// We only care about the delta from the
-				// section base.
-				if SysArch.Family == sys.I386 {
-					rp.Add -= int64(c.seg.sect[rel.symnum-1].addr)
-				}
-			} else {
-				if rel.symnum >= symtab.nsym {
-					err = fmt.Errorf("invalid relocation: symbol reference out of range")
-					goto bad
-				}
-
-				rp.Sym = symtab.sym[rel.symnum].sym
-			}
-
-			rpi++
-		}
-
-		sort.Sort(rbyoff(r[:rpi]))
-		s.R = r
-		s.R = s.R[:rpi]
-	}
-
-	return
-
-bad:
-	Errorf(nil, "%s: malformed mach-o file: %v", pn, err)
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/ldpe.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/ldpe.go
deleted file mode 100644
index 75abcf9..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/ldpe.go
+++ /dev/null
@@ -1,445 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/ldpe.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/ldpe.go:1
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ld
-
-import (
-	"bootstrap/cmd/internal/bio"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"bootstrap/debug/pe"
-	"errors"
-	"fmt"
-	"io"
-	"log"
-	"sort"
-	"strings"
-)
-
-const (
-	IMAGE_SYM_UNDEFINED              = 0
-	IMAGE_SYM_ABSOLUTE               = -1
-	IMAGE_SYM_DEBUG                  = -2
-	IMAGE_SYM_TYPE_NULL              = 0
-	IMAGE_SYM_TYPE_VOID              = 1
-	IMAGE_SYM_TYPE_CHAR              = 2
-	IMAGE_SYM_TYPE_SHORT             = 3
-	IMAGE_SYM_TYPE_INT               = 4
-	IMAGE_SYM_TYPE_LONG              = 5
-	IMAGE_SYM_TYPE_FLOAT             = 6
-	IMAGE_SYM_TYPE_DOUBLE            = 7
-	IMAGE_SYM_TYPE_STRUCT            = 8
-	IMAGE_SYM_TYPE_UNION             = 9
-	IMAGE_SYM_TYPE_ENUM              = 10
-	IMAGE_SYM_TYPE_MOE               = 11
-	IMAGE_SYM_TYPE_BYTE              = 12
-	IMAGE_SYM_TYPE_WORD              = 13
-	IMAGE_SYM_TYPE_UINT              = 14
-	IMAGE_SYM_TYPE_DWORD             = 15
-	IMAGE_SYM_TYPE_PCODE             = 32768
-	IMAGE_SYM_DTYPE_NULL             = 0
-	IMAGE_SYM_DTYPE_POINTER          = 0x10
-	IMAGE_SYM_DTYPE_FUNCTION         = 0x20
-	IMAGE_SYM_DTYPE_ARRAY            = 0x30
-	IMAGE_SYM_CLASS_END_OF_FUNCTION  = -1
-	IMAGE_SYM_CLASS_NULL             = 0
-	IMAGE_SYM_CLASS_AUTOMATIC        = 1
-	IMAGE_SYM_CLASS_EXTERNAL         = 2
-	IMAGE_SYM_CLASS_STATIC           = 3
-	IMAGE_SYM_CLASS_REGISTER         = 4
-	IMAGE_SYM_CLASS_EXTERNAL_DEF     = 5
-	IMAGE_SYM_CLASS_LABEL            = 6
-	IMAGE_SYM_CLASS_UNDEFINED_LABEL  = 7
-	IMAGE_SYM_CLASS_MEMBER_OF_STRUCT = 8
-	IMAGE_SYM_CLASS_ARGUMENT         = 9
-	IMAGE_SYM_CLASS_STRUCT_TAG       = 10
-	IMAGE_SYM_CLASS_MEMBER_OF_UNION  = 11
-	IMAGE_SYM_CLASS_UNION_TAG        = 12
-	IMAGE_SYM_CLASS_TYPE_DEFINITION  = 13
-	IMAGE_SYM_CLASS_UNDEFINED_STATIC = 14
-	IMAGE_SYM_CLASS_ENUM_TAG         = 15
-	IMAGE_SYM_CLASS_MEMBER_OF_ENUM   = 16
-	IMAGE_SYM_CLASS_REGISTER_PARAM   = 17
-	IMAGE_SYM_CLASS_BIT_FIELD        = 18
-	IMAGE_SYM_CLASS_FAR_EXTERNAL     = 68 /* Not in PECOFF v8 spec */
-	IMAGE_SYM_CLASS_BLOCK            = 100
-	IMAGE_SYM_CLASS_FUNCTION         = 101
-	IMAGE_SYM_CLASS_END_OF_STRUCT    = 102
-	IMAGE_SYM_CLASS_FILE             = 103
-	IMAGE_SYM_CLASS_SECTION          = 104
-	IMAGE_SYM_CLASS_WEAK_EXTERNAL    = 105
-	IMAGE_SYM_CLASS_CLR_TOKEN        = 107
-	IMAGE_REL_I386_ABSOLUTE          = 0x0000
-	IMAGE_REL_I386_DIR16             = 0x0001
-	IMAGE_REL_I386_REL16             = 0x0002
-	IMAGE_REL_I386_DIR32             = 0x0006
-	IMAGE_REL_I386_DIR32NB           = 0x0007
-	IMAGE_REL_I386_SEG12             = 0x0009
-	IMAGE_REL_I386_SECTION           = 0x000A
-	IMAGE_REL_I386_SECREL            = 0x000B
-	IMAGE_REL_I386_TOKEN             = 0x000C
-	IMAGE_REL_I386_SECREL7           = 0x000D
-	IMAGE_REL_I386_REL32             = 0x0014
-	IMAGE_REL_AMD64_ABSOLUTE         = 0x0000
-	IMAGE_REL_AMD64_ADDR64           = 0x0001
-	IMAGE_REL_AMD64_ADDR32           = 0x0002
-	IMAGE_REL_AMD64_ADDR32NB         = 0x0003
-	IMAGE_REL_AMD64_REL32            = 0x0004
-	IMAGE_REL_AMD64_REL32_1          = 0x0005
-	IMAGE_REL_AMD64_REL32_2          = 0x0006
-	IMAGE_REL_AMD64_REL32_3          = 0x0007
-	IMAGE_REL_AMD64_REL32_4          = 0x0008
-	IMAGE_REL_AMD64_REL32_5          = 0x0009
-	IMAGE_REL_AMD64_SECTION          = 0x000A
-	IMAGE_REL_AMD64_SECREL           = 0x000B
-	IMAGE_REL_AMD64_SECREL7          = 0x000C
-	IMAGE_REL_AMD64_TOKEN            = 0x000D
-	IMAGE_REL_AMD64_SREL32           = 0x000E
-	IMAGE_REL_AMD64_PAIR             = 0x000F
-	IMAGE_REL_AMD64_SSPAN32          = 0x0010
-)
-
-// TODO(brainman): maybe just add ReadAt method to bio.Reader instead of creating peBiobuf
-
-// peBiobuf makes bio.Reader look like io.ReaderAt.
-type peBiobuf bio.Reader
-
-func (f *peBiobuf) ReadAt(p []byte, off int64) (int, error) {
-	ret := ((*bio.Reader)(f)).Seek(off, 0)
-	if ret < 0 {
-		return 0, errors.New("fail to seek")
-	}
-	n, err := f.Read(p)
-	if err != nil {
-		return 0, err
-	}
-	return n, nil
-}
-
-func ldpe(ctxt *Link, input *bio.Reader, pkg string, length int64, pn string) {
-	err := ldpeError(ctxt, input, pkg, length, pn)
-	if err != nil {
-		Errorf(nil, "%s: malformed pe file: %v", pn, err)
-	}
-}
-
-func ldpeError(ctxt *Link, input *bio.Reader, pkg string, length int64, pn string) error {
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f ldpe %s\n", obj.Cputime(), pn)
-	}
-
-	localSymVersion := ctxt.Syms.IncVersion()
-
-	sectsyms := make(map[*pe.Section]*Symbol)
-	sectdata := make(map[*pe.Section][]byte)
-
-	// Some input files are archives containing multiple of
-	// object files, and pe.NewFile seeks to the start of
-	// input file and get confused. Create section reader
-	// to stop pe.NewFile looking before current position.
-	sr := io.NewSectionReader((*peBiobuf)(input), input.Offset(), 1<<63-1)
-
-	// TODO: replace pe.NewFile with pe.Load (grep for "add Load function" in debug/pe for details)
-	f, err := pe.NewFile(sr)
-	if err != nil {
-		return err
-	}
-	defer f.Close()
-
-	// TODO return error if found .cormeta
-
-	// create symbols for mapped sections
-	for _, sect := range f.Sections {
-		if sect.Characteristics&IMAGE_SCN_MEM_DISCARDABLE != 0 {
-			continue
-		}
-
-		if sect.Characteristics&(IMAGE_SCN_CNT_CODE|IMAGE_SCN_CNT_INITIALIZED_DATA|IMAGE_SCN_CNT_UNINITIALIZED_DATA) == 0 {
-			// This has been seen for .idata sections, which we
-			// want to ignore. See issues 5106 and 5273.
-			continue
-		}
-
-		data, err := sect.Data()
-		if err != nil {
-			return err
-		}
-		sectdata[sect] = data
-
-		name := fmt.Sprintf("%s(%s)", pkg, sect.Name)
-		s := ctxt.Syms.Lookup(name, localSymVersion)
-
-		switch sect.Characteristics & (IMAGE_SCN_CNT_UNINITIALIZED_DATA | IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE | IMAGE_SCN_CNT_CODE | IMAGE_SCN_MEM_EXECUTE) {
-		case IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ: //.rdata
-			s.Type = obj.SRODATA
-
-		case IMAGE_SCN_CNT_UNINITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE: //.bss
-			s.Type = obj.SNOPTRBSS
-
-		case IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE: //.data
-			s.Type = obj.SNOPTRDATA
-
-		case IMAGE_SCN_CNT_CODE | IMAGE_SCN_MEM_EXECUTE | IMAGE_SCN_MEM_READ: //.text
-			s.Type = obj.STEXT
-
-		default:
-			return fmt.Errorf("unexpected flags %#06x for PE section %s", sect.Characteristics, sect.Name)
-		}
-
-		s.P = data
-		s.Size = int64(len(data))
-		sectsyms[sect] = s
-		if sect.Name == ".rsrc" {
-			setpersrc(ctxt, s)
-		}
-	}
-
-	// load relocations
-	for _, rsect := range f.Sections {
-		if _, found := sectsyms[rsect]; !found {
-			continue
-		}
-		if rsect.NumberOfRelocations == 0 {
-			continue
-		}
-		if rsect.Characteristics&IMAGE_SCN_MEM_DISCARDABLE != 0 {
-			continue
-		}
-		if rsect.Characteristics&(IMAGE_SCN_CNT_CODE|IMAGE_SCN_CNT_INITIALIZED_DATA|IMAGE_SCN_CNT_UNINITIALIZED_DATA) == 0 {
-			// This has been seen for .idata sections, which we
-			// want to ignore. See issues 5106 and 5273.
-			continue
-		}
-
-		rs := make([]Reloc, rsect.NumberOfRelocations)
-		for j, r := range rsect.Relocs {
-			rp := &rs[j]
-			if int(r.SymbolTableIndex) >= len(f.COFFSymbols) {
-				return fmt.Errorf("relocation number %d symbol index idx=%d cannot be large then number of symbols %d", j, r.SymbolTableIndex, len(f.COFFSymbols))
-			}
-			pesym := &f.COFFSymbols[r.SymbolTableIndex]
-			gosym, err := readpesym(ctxt, f, pesym, sectsyms, localSymVersion)
-			if err != nil {
-				return err
-			}
-			if gosym == nil {
-				name, err := pesym.FullName(f.StringTable)
-				if err != nil {
-					name = string(pesym.Name[:])
-				}
-				return fmt.Errorf("reloc of invalid sym %s idx=%d type=%d", name, r.SymbolTableIndex, pesym.Type)
-			}
-
-			rp.Sym = gosym
-			rp.Siz = 4
-			rp.Off = int32(r.VirtualAddress)
-			switch r.Type {
-			default:
-				Errorf(sectsyms[rsect], "%s: unknown relocation type %d;", pn, r.Type)
-				fallthrough
-
-			case IMAGE_REL_I386_REL32, IMAGE_REL_AMD64_REL32,
-				IMAGE_REL_AMD64_ADDR32, // R_X86_64_PC32
-				IMAGE_REL_AMD64_ADDR32NB:
-				rp.Type = obj.R_PCREL
-
-				rp.Add = int64(int32(Le32(sectdata[rsect][rp.Off:])))
-
-			case IMAGE_REL_I386_DIR32NB, IMAGE_REL_I386_DIR32:
-				rp.Type = obj.R_ADDR
-
-				// load addend from image
-				rp.Add = int64(int32(Le32(sectdata[rsect][rp.Off:])))
-
-			case IMAGE_REL_AMD64_ADDR64: // R_X86_64_64
-				rp.Siz = 8
-
-				rp.Type = obj.R_ADDR
-
-				// load addend from image
-				rp.Add = int64(Le64(sectdata[rsect][rp.Off:]))
-			}
-
-			// ld -r could generate multiple section symbols for the
-			// same section but with different values, we have to take
-			// that into account
-			if issect(pesym) {
-				rp.Add += int64(pesym.Value)
-			}
-		}
-
-		sort.Sort(rbyoff(rs[:rsect.NumberOfRelocations]))
-
-		s := sectsyms[rsect]
-		s.R = rs
-		s.R = s.R[:rsect.NumberOfRelocations]
-	}
-
-	// enter sub-symbols into symbol table.
-	for i, numaux := 0, 0; i < len(f.COFFSymbols); i += numaux + 1 {
-		pesym := &f.COFFSymbols[i]
-
-		numaux = int(pesym.NumberOfAuxSymbols)
-
-		name, err := pesym.FullName(f.StringTable)
-		if err != nil {
-			return err
-		}
-		if name == "" {
-			continue
-		}
-		if issect(pesym) {
-			continue
-		}
-		if int(pesym.SectionNumber) > len(f.Sections) {
-			continue
-		}
-		if pesym.SectionNumber == IMAGE_SYM_DEBUG {
-			continue
-		}
-		var sect *pe.Section
-		if pesym.SectionNumber > 0 {
-			sect = f.Sections[pesym.SectionNumber-1]
-			if _, found := sectsyms[sect]; !found {
-				continue
-			}
-		}
-
-		s, err := readpesym(ctxt, f, pesym, sectsyms, localSymVersion)
-		if err != nil {
-			return err
-		}
-
-		if pesym.SectionNumber == 0 { // extern
-			if s.Type == obj.SDYNIMPORT {
-				s.Plt = -2 // flag for dynimport in PE object files.
-			}
-			if s.Type == obj.SXREF && pesym.Value > 0 { // global data
-				s.Type = obj.SNOPTRDATA
-				s.Size = int64(pesym.Value)
-			}
-
-			continue
-		} else if pesym.SectionNumber > 0 && int(pesym.SectionNumber) <= len(f.Sections) {
-			sect = f.Sections[pesym.SectionNumber-1]
-			if _, found := sectsyms[sect]; !found {
-				Errorf(s, "%s: missing sect.sym", pn)
-			}
-		} else {
-			Errorf(s, "%s: sectnum < 0!", pn)
-		}
-
-		if sect == nil {
-			return nil
-		}
-
-		if s.Outer != nil {
-			if s.Attr.DuplicateOK() {
-				continue
-			}
-			Exitf("%s: duplicate symbol reference: %s in both %s and %s", pn, s.Name, s.Outer.Name, sectsyms[sect].Name)
-		}
-
-		sectsym := sectsyms[sect]
-		s.Sub = sectsym.Sub
-		sectsym.Sub = s
-		s.Type = sectsym.Type | obj.SSUB
-		s.Value = int64(pesym.Value)
-		s.Size = 4
-		s.Outer = sectsym
-		if sectsym.Type == obj.STEXT {
-			if s.Attr.External() && !s.Attr.DuplicateOK() {
-				Errorf(s, "%s: duplicate symbol definition", pn)
-			}
-			s.Attr |= AttrExternal
-		}
-	}
-
-	// Sort outer lists by address, adding to textp.
-	// This keeps textp in increasing address order.
-	for _, sect := range f.Sections {
-		s := sectsyms[sect]
-		if s == nil {
-			continue
-		}
-		if s.Sub != nil {
-			s.Sub = listsort(s.Sub)
-		}
-		if s.Type == obj.STEXT {
-			if s.Attr.OnList() {
-				log.Fatalf("symbol %s listed multiple times", s.Name)
-			}
-			s.Attr |= AttrOnList
-			ctxt.Textp = append(ctxt.Textp, s)
-			for s = s.Sub; s != nil; s = s.Sub {
-				if s.Attr.OnList() {
-					log.Fatalf("symbol %s listed multiple times", s.Name)
-				}
-				s.Attr |= AttrOnList
-				ctxt.Textp = append(ctxt.Textp, s)
-			}
-		}
-	}
-
-	return nil
-}
-
-func issect(s *pe.COFFSymbol) bool {
-	return s.StorageClass == IMAGE_SYM_CLASS_STATIC && s.Type == 0 && s.Name[0] == '.'
-}
-
-func readpesym(ctxt *Link, f *pe.File, sym *pe.COFFSymbol, sectsyms map[*pe.Section]*Symbol, localSymVersion int) (*Symbol, error) {
-	symname, err := sym.FullName(f.StringTable)
-	if err != nil {
-		return nil, err
-	}
-	var name string
-	if issect(sym) {
-		name = sectsyms[f.Sections[sym.SectionNumber-1]].Name
-	} else {
-		name = symname
-		if strings.HasPrefix(name, "__imp_") {
-			name = name[6:] // __imp_Name => Name
-		}
-		if SysArch.Family == sys.I386 && name[0] == '_' {
-			name = name[1:] // _Name => Name
-		}
-	}
-
-	// remove last @XXX
-	if i := strings.LastIndex(name, "@"); i >= 0 {
-		name = name[:i]
-	}
-
-	var s *Symbol
-	switch sym.Type {
-	default:
-		return nil, fmt.Errorf("%s: invalid symbol type %d", symname, sym.Type)
-
-	case IMAGE_SYM_DTYPE_FUNCTION, IMAGE_SYM_DTYPE_NULL:
-		switch sym.StorageClass {
-		case IMAGE_SYM_CLASS_EXTERNAL: //global
-			s = ctxt.Syms.Lookup(name, 0)
-
-		case IMAGE_SYM_CLASS_NULL, IMAGE_SYM_CLASS_STATIC, IMAGE_SYM_CLASS_LABEL:
-			s = ctxt.Syms.Lookup(name, localSymVersion)
-			s.Attr |= AttrDuplicateOK
-
-		default:
-			return nil, fmt.Errorf("%s: invalid symbol binding %d", symname, sym.StorageClass)
-		}
-	}
-
-	if s != nil && s.Type == 0 && (sym.StorageClass != IMAGE_SYM_CLASS_STATIC || sym.Value != 0) {
-		s.Type = obj.SXREF
-	}
-	if strings.HasPrefix(symname, "__imp_") {
-		s.Got = -2 // flag for __imp_
-	}
-
-	return s, nil
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/lib.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/lib.go
deleted file mode 100644
index 40eeec5..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/lib.go
+++ /dev/null
@@ -1,2155 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/lib.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/lib.go:1
-// Inferno utils/8l/asm.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/8l/asm.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package ld
-
-import (
-	"bufio"
-	"bytes"
-	"bootstrap/cmd/internal/bio"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"crypto/sha1"
-	"debug/elf"
-	"encoding/binary"
-	"encoding/hex"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"log"
-	"os"
-	"os/exec"
-	"path/filepath"
-	"runtime"
-	"strings"
-	"sync"
-)
-
-// Data layout and relocation.
-
-// Derived from Inferno utils/6l/l.h
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6l/l.h
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-type Arch struct {
-	Funcalign        int
-	Maxalign         int
-	Minalign         int
-	Dwarfregsp       int
-	Dwarfreglr       int
-	Linuxdynld       string
-	Freebsddynld     string
-	Netbsddynld      string
-	Openbsddynld     string
-	Dragonflydynld   string
-	Solarisdynld     string
-	Adddynrel        func(*Link, *Symbol, *Reloc) bool
-	Archinit         func(*Link)
-	Archreloc        func(*Link, *Reloc, *Symbol, *int64) int
-	Archrelocvariant func(*Link, *Reloc, *Symbol, int64) int64
-	Trampoline       func(*Link, *Reloc, *Symbol)
-	Asmb             func(*Link)
-	Elfreloc1        func(*Link, *Reloc, int64) int
-	Elfsetupplt      func(*Link)
-	Gentext          func(*Link)
-	Machoreloc1      func(*Symbol, *Reloc, int64) int
-	PEreloc1         func(*Symbol, *Reloc, int64) bool
-	Wput             func(uint16)
-	Lput             func(uint32)
-	Vput             func(uint64)
-	Append16         func(b []byte, v uint16) []byte
-	Append32         func(b []byte, v uint32) []byte
-	Append64         func(b []byte, v uint64) []byte
-
-	// TLSIEtoLE converts a TLS Initial Executable relocation to
-	// a TLS Local Executable relocation.
-	//
-	// This is possible when a TLS IE relocation refers to a local
-	// symbol in an executable, which is typical when internally
-	// linking PIE binaries.
-	TLSIEtoLE func(s *Symbol, off, size int)
-}
-
-var (
-	Thearch Arch
-	Lcsize  int32
-	rpath   Rpath
-	Spsize  int32
-	Symsize int32
-)
-
-// Terrible but standard terminology.
-// A segment describes a block of file to load into memory.
-// A section further describes the pieces of that block for
-// use in debuggers and such.
-
-const (
-	MINFUNC = 16 // minimum size for a function
-)
-
-type Segment struct {
-	Rwx     uint8  // permission as usual unix bits (5 = r-x etc)
-	Vaddr   uint64 // virtual address
-	Length  uint64 // length in memory
-	Fileoff uint64 // file offset
-	Filelen uint64 // length on disk
-	Sect    *Section
-}
-
-type Section struct {
-	Rwx     uint8
-	Extnum  int16
-	Align   int32
-	Name    string
-	Vaddr   uint64
-	Length  uint64
-	Next    *Section
-	Seg     *Segment
-	Elfsect *ElfShdr
-	Reloff  uint64
-	Rellen  uint64
-}
-
-// DynlinkingGo returns whether we are producing Go code that can live
-// in separate shared libraries linked together at runtime.
-func (ctxt *Link) DynlinkingGo() bool {
-	if !ctxt.Loaded {
-		panic("DynlinkingGo called before all symbols loaded")
-	}
-	canUsePlugins := ctxt.Syms.ROLookup("plugin.Open", 0) != nil
-	return Buildmode == BuildmodeShared || *FlagLinkshared || Buildmode == BuildmodePlugin || canUsePlugins
-}
-
-// UseRelro returns whether to make use of "read only relocations" aka
-// relro.
-func UseRelro() bool {
-	switch Buildmode {
-	case BuildmodeCArchive, BuildmodeCShared, BuildmodeShared, BuildmodePIE, BuildmodePlugin:
-		return Iself
-	default:
-		return *FlagLinkshared
-	}
-}
-
-var (
-	SysArch         *sys.Arch
-	dynexp          []*Symbol
-	dynlib          []string
-	ldflag          []string
-	havedynamic     int
-	Funcalign       int
-	iscgo           bool
-	elfglobalsymndx int
-	interpreter     string
-
-	debug_s  bool // backup old value of debug['s']
-	HEADR    int32
-	Headtype obj.HeadType
-
-	nerrors  int
-	liveness int64
-)
-
-var (
-	Segtext      Segment
-	Segrodata    Segment
-	Segrelrodata Segment
-	Segdata      Segment
-	Segdwarf     Segment
-)
-
-/* whence for ldpkg */
-const (
-	FileObj = 0 + iota
-	ArchiveObj
-	Pkgdef
-)
-
-// TODO(dfc) outBuf duplicates bio.Writer
-type outBuf struct {
-	w   *bufio.Writer
-	f   *os.File
-	off int64
-}
-
-func (w *outBuf) Write(p []byte) (n int, err error) {
-	n, err = w.w.Write(p)
-	w.off += int64(n)
-	return n, err
-}
-
-func (w *outBuf) WriteString(s string) (n int, err error) {
-	n, err = coutbuf.w.WriteString(s)
-	w.off += int64(n)
-	return n, err
-}
-
-func (w *outBuf) Offset() int64 {
-	return w.off
-}
-
-var coutbuf outBuf
-
-const pkgname = "__.PKGDEF"
-
-var (
-	// Set if we see an object compiled by the host compiler that is not
-	// from a package that is known to support internal linking mode.
-	externalobj = false
-	theline     string
-)
-
-func Lflag(ctxt *Link, arg string) {
-	ctxt.Libdir = append(ctxt.Libdir, arg)
-}
-
-/*
- * Unix doesn't like it when we write to a running (or, sometimes,
- * recently run) binary, so remove the output file before writing it.
- * On Windows 7, remove() can force a subsequent create() to fail.
- * S_ISREG() does not exist on Plan 9.
- */
-func mayberemoveoutfile() {
-	if fi, err := os.Lstat(*flagOutfile); err == nil && !fi.Mode().IsRegular() {
-		return
-	}
-	os.Remove(*flagOutfile)
-}
-
-func libinit(ctxt *Link) {
-	Funcalign = Thearch.Funcalign
-
-	// add goroot to the end of the libdir list.
-	suffix := ""
-
-	suffixsep := ""
-	if *flagInstallSuffix != "" {
-		suffixsep = "_"
-		suffix = *flagInstallSuffix
-	} else if *flagRace {
-		suffixsep = "_"
-		suffix = "race"
-	} else if *flagMsan {
-		suffixsep = "_"
-		suffix = "msan"
-	}
-
-	Lflag(ctxt, filepath.Join(obj.GOROOT, "pkg", fmt.Sprintf("%s_%s%s%s", obj.GOOS, obj.GOARCH, suffixsep, suffix)))
-
-	mayberemoveoutfile()
-	f, err := os.OpenFile(*flagOutfile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0775)
-	if err != nil {
-		Exitf("cannot create %s: %v", *flagOutfile, err)
-	}
-
-	coutbuf.w = bufio.NewWriter(f)
-	coutbuf.f = f
-
-	if *flagEntrySymbol == "" {
-		switch Buildmode {
-		case BuildmodeCShared, BuildmodeCArchive:
-			*flagEntrySymbol = fmt.Sprintf("_rt0_%s_%s_lib", obj.GOARCH, obj.GOOS)
-		case BuildmodeExe, BuildmodePIE:
-			*flagEntrySymbol = fmt.Sprintf("_rt0_%s_%s", obj.GOARCH, obj.GOOS)
-		case BuildmodeShared, BuildmodePlugin:
-			// No *flagEntrySymbol for -buildmode=shared and plugin
-		default:
-			Errorf(nil, "unknown *flagEntrySymbol for buildmode %v", Buildmode)
-		}
-	}
-}
-
-func errorexit() {
-	if coutbuf.f != nil {
-		if nerrors != 0 {
-			Cflush()
-		}
-		// For rmtemp run at atexit time on Windows.
-		if err := coutbuf.f.Close(); err != nil {
-			Exitf("close: %v", err)
-		}
-	}
-
-	if nerrors != 0 {
-		if coutbuf.f != nil {
-			mayberemoveoutfile()
-		}
-		Exit(2)
-	}
-
-	Exit(0)
-}
-
-func loadinternal(ctxt *Link, name string) *Library {
-	for i := 0; i < len(ctxt.Libdir); i++ {
-		if *FlagLinkshared {
-			shlibname := filepath.Join(ctxt.Libdir[i], name+".shlibname")
-			if ctxt.Debugvlog != 0 {
-				ctxt.Logf("searching for %s.a in %s\n", name, shlibname)
-			}
-			if _, err := os.Stat(shlibname); err == nil {
-				return addlibpath(ctxt, "internal", "internal", "", name, shlibname)
-			}
-		}
-		pname := filepath.Join(ctxt.Libdir[i], name+".a")
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("searching for %s.a in %s\n", name, pname)
-		}
-		if _, err := os.Stat(pname); err == nil {
-			return addlibpath(ctxt, "internal", "internal", pname, name, "")
-		}
-	}
-
-	ctxt.Logf("warning: unable to find %s.a\n", name)
-	return nil
-}
-
-// findLibPathCmd uses cmd command to find gcc library libname.
-// It returns library full path if found, or "none" if not found.
-func (ctxt *Link) findLibPathCmd(cmd, libname string) string {
-	if *flagExtld == "" {
-		*flagExtld = "gcc"
-	}
-	args := hostlinkArchArgs()
-	args = append(args, cmd)
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%s %v\n", *flagExtld, args)
-	}
-	out, err := exec.Command(*flagExtld, args...).Output()
-	if err != nil {
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("not using a %s file because compiler failed\n%v\n%s\n", libname, err, out)
-		}
-		return "none"
-	}
-	return strings.TrimSpace(string(out))
-}
-
-// findLibPath searches for library libname.
-// It returns library full path if found, or "none" if not found.
-func (ctxt *Link) findLibPath(libname string) string {
-	return ctxt.findLibPathCmd("--print-file-name="+libname, libname)
-}
-
-func (ctxt *Link) loadlib() {
-	switch Buildmode {
-	case BuildmodeCShared, BuildmodePlugin:
-		s := ctxt.Syms.Lookup("runtime.islibrary", 0)
-		s.Attr |= AttrDuplicateOK
-		Adduint8(ctxt, s, 1)
-	case BuildmodeCArchive:
-		s := ctxt.Syms.Lookup("runtime.isarchive", 0)
-		s.Attr |= AttrDuplicateOK
-		Adduint8(ctxt, s, 1)
-	}
-
-	loadinternal(ctxt, "runtime")
-	if SysArch.Family == sys.ARM {
-		loadinternal(ctxt, "math")
-	}
-	if *flagRace {
-		loadinternal(ctxt, "runtime/race")
-	}
-	if *flagMsan {
-		loadinternal(ctxt, "runtime/msan")
-	}
-
-	var i int
-	for i = 0; i < len(ctxt.Library); i++ {
-		iscgo = iscgo || ctxt.Library[i].Pkg == "runtime/cgo"
-		if ctxt.Library[i].Shlib == "" {
-			if ctxt.Debugvlog > 1 {
-				ctxt.Logf("%5.2f autolib: %s (from %s)\n", obj.Cputime(), ctxt.Library[i].File, ctxt.Library[i].Objref)
-			}
-			objfile(ctxt, ctxt.Library[i])
-		}
-	}
-
-	for i = 0; i < len(ctxt.Library); i++ {
-		if ctxt.Library[i].Shlib != "" {
-			if ctxt.Debugvlog > 1 {
-				ctxt.Logf("%5.2f autolib: %s (from %s)\n", obj.Cputime(), ctxt.Library[i].Shlib, ctxt.Library[i].Objref)
-			}
-			ldshlibsyms(ctxt, ctxt.Library[i].Shlib)
-		}
-	}
-
-	// We now have enough information to determine the link mode.
-	determineLinkMode(ctxt)
-
-	if Linkmode == LinkExternal && SysArch.Family == sys.PPC64 {
-		toc := ctxt.Syms.Lookup(".TOC.", 0)
-		toc.Type = obj.SDYNIMPORT
-	}
-
-	if Linkmode == LinkExternal && !iscgo {
-		// This indicates a user requested -linkmode=external.
-		// The startup code uses an import of runtime/cgo to decide
-		// whether to initialize the TLS.  So give it one. This could
-		// be handled differently but it's an unusual case.
-		loadinternal(ctxt, "runtime/cgo")
-
-		if i < len(ctxt.Library) {
-			if ctxt.Library[i].Shlib != "" {
-				ldshlibsyms(ctxt, ctxt.Library[i].Shlib)
-			} else {
-				if Buildmode == BuildmodeShared || *FlagLinkshared {
-					Exitf("cannot implicitly include runtime/cgo in a shared library")
-				}
-				objfile(ctxt, ctxt.Library[i])
-			}
-		}
-	}
-
-	if Linkmode == LinkInternal {
-		// Drop all the cgo_import_static declarations.
-		// Turns out we won't be needing them.
-		for _, s := range ctxt.Syms.Allsym {
-			if s.Type == obj.SHOSTOBJ {
-				// If a symbol was marked both
-				// cgo_import_static and cgo_import_dynamic,
-				// then we want to make it cgo_import_dynamic
-				// now.
-				if s.Extname != "" && s.Dynimplib != "" && !s.Attr.CgoExport() {
-					s.Type = obj.SDYNIMPORT
-				} else {
-					s.Type = 0
-				}
-			}
-		}
-	}
-
-	tlsg := ctxt.Syms.Lookup("runtime.tlsg", 0)
-
-	// runtime.tlsg is used for external linking on platforms that do not define
-	// a variable to hold g in assembly (currently only intel).
-	if tlsg.Type == 0 {
-		tlsg.Type = obj.STLSBSS
-		tlsg.Size = int64(SysArch.PtrSize)
-	} else if tlsg.Type != obj.SDYNIMPORT {
-		Errorf(nil, "runtime declared tlsg variable %v", tlsg.Type)
-	}
-	tlsg.Attr |= AttrReachable
-	ctxt.Tlsg = tlsg
-
-	var moduledata *Symbol
-	if Buildmode == BuildmodePlugin {
-		moduledata = ctxt.Syms.Lookup("local.pluginmoduledata", 0)
-		moduledata.Attr |= AttrLocal
-	} else {
-		moduledata = ctxt.Syms.Lookup("runtime.firstmoduledata", 0)
-	}
-	if moduledata.Type != 0 && moduledata.Type != obj.SDYNIMPORT {
-		// If the module (toolchain-speak for "executable or shared
-		// library") we are linking contains the runtime package, it
-		// will define the runtime.firstmoduledata symbol and we
-		// truncate it back to 0 bytes so we can define its entire
-		// contents in symtab.go:symtab().
-		moduledata.Size = 0
-
-		// In addition, on ARM, the runtime depends on the linker
-		// recording the value of GOARM.
-		if SysArch.Family == sys.ARM {
-			s := ctxt.Syms.Lookup("runtime.goarm", 0)
-			s.Type = obj.SRODATA
-			s.Size = 0
-			Adduint8(ctxt, s, uint8(obj.GOARM))
-		}
-
-		if obj.Framepointer_enabled(obj.GOOS, obj.GOARCH) {
-			s := ctxt.Syms.Lookup("runtime.framepointer_enabled", 0)
-			s.Type = obj.SRODATA
-			s.Size = 0
-			Adduint8(ctxt, s, 1)
-		}
-	} else {
-		// If OTOH the module does not contain the runtime package,
-		// create a local symbol for the moduledata.
-		moduledata = ctxt.Syms.Lookup("local.moduledata", 0)
-		moduledata.Attr |= AttrLocal
-	}
-	// In all cases way we mark the moduledata as noptrdata to hide it from
-	// the GC.
-	moduledata.Type = obj.SNOPTRDATA
-	moduledata.Attr |= AttrReachable
-	ctxt.Moduledata = moduledata
-
-	// Now that we know the link mode, trim the dynexp list.
-	x := AttrCgoExportDynamic
-
-	if Linkmode == LinkExternal {
-		x = AttrCgoExportStatic
-	}
-	w := 0
-	for i := 0; i < len(dynexp); i++ {
-		if dynexp[i].Attr&x != 0 {
-			dynexp[w] = dynexp[i]
-			w++
-		}
-	}
-	dynexp = dynexp[:w]
-
-	// In internal link mode, read the host object files.
-	if Linkmode == LinkInternal {
-		hostobjs(ctxt)
-
-		// If we have any undefined symbols in external
-		// objects, try to read them from the libgcc file.
-		any := false
-		for _, s := range ctxt.Syms.Allsym {
-			for _, r := range s.R {
-				if r.Sym != nil && r.Sym.Type&obj.SMASK == obj.SXREF && r.Sym.Name != ".got" {
-					any = true
-					break
-				}
-			}
-		}
-		if any {
-			if *flagLibGCC == "" {
-				*flagLibGCC = ctxt.findLibPathCmd("--print-libgcc-file-name", "libgcc")
-			}
-			if *flagLibGCC != "none" {
-				hostArchive(ctxt, *flagLibGCC)
-			}
-			if Headtype == obj.Hwindows || Headtype == obj.Hwindowsgui {
-				if p := ctxt.findLibPath("libmingwex.a"); p != "none" {
-					hostArchive(ctxt, p)
-				}
-				if p := ctxt.findLibPath("libmingw32.a"); p != "none" {
-					hostArchive(ctxt, p)
-				}
-				// TODO: maybe do something similar to peimporteddlls to collect all lib names
-				// and try link them all to final exe just like libmingwex.a and libmingw32.a:
-				/*
-					for:
-					#cgo windows LDFLAGS: -lmsvcrt -lm
-					import:
-					libmsvcrt.a libm.a
-				*/
-			}
-		}
-	} else {
-		hostlinksetup()
-	}
-
-	// We've loaded all the code now.
-	ctxt.Loaded = true
-
-	// If there are no dynamic libraries needed, gcc disables dynamic linking.
-	// Because of this, glibc's dynamic ELF loader occasionally (like in version 2.13)
-	// assumes that a dynamic binary always refers to at least one dynamic library.
-	// Rather than be a source of test cases for glibc, disable dynamic linking
-	// the same way that gcc would.
-	//
-	// Exception: on OS X, programs such as Shark only work with dynamic
-	// binaries, so leave it enabled on OS X (Mach-O) binaries.
-	// Also leave it enabled on Solaris which doesn't support
-	// statically linked binaries.
-	if Buildmode == BuildmodeExe {
-		if havedynamic == 0 && Headtype != obj.Hdarwin && Headtype != obj.Hsolaris {
-			*FlagD = true
-		}
-	}
-
-	// If package versioning is required, generate a hash of the
-	// the packages used in the link.
-	if Buildmode == BuildmodeShared || Buildmode == BuildmodePlugin || ctxt.Syms.ROLookup("plugin.Open", 0) != nil {
-		for i = 0; i < len(ctxt.Library); i++ {
-			if ctxt.Library[i].Shlib == "" {
-				genhash(ctxt, ctxt.Library[i])
-			}
-		}
-	}
-
-	if SysArch == sys.Arch386 {
-		if (Buildmode == BuildmodeCArchive && Iself) || Buildmode == BuildmodeCShared || Buildmode == BuildmodePIE || ctxt.DynlinkingGo() {
-			got := ctxt.Syms.Lookup("_GLOBAL_OFFSET_TABLE_", 0)
-			got.Type = obj.SDYNIMPORT
-			got.Attr |= AttrReachable
-		}
-	}
-
-	importcycles()
-
-	// put symbols into Textp
-	// do it in postorder so that packages are laid down in dependency order
-	// internal first, then everything else
-	ctxt.Library = postorder(ctxt.Library)
-	for _, doInternal := range [2]bool{true, false} {
-		for _, lib := range ctxt.Library {
-			if isRuntimeDepPkg(lib.Pkg) != doInternal {
-				continue
-			}
-			ctxt.Textp = append(ctxt.Textp, lib.textp...)
-			for _, s := range lib.dupTextSyms {
-				if !s.Attr.OnList() {
-					ctxt.Textp = append(ctxt.Textp, s)
-					s.Attr |= AttrOnList
-				}
-			}
-		}
-	}
-
-	if len(ctxt.Shlibs) > 0 {
-		// We might have overwritten some functions above (this tends to happen for the
-		// autogenerated type equality/hashing functions) and we don't want to generated
-		// pcln table entries for these any more so remove them from Textp.
-		textp := make([]*Symbol, 0, len(ctxt.Textp))
-		for _, s := range ctxt.Textp {
-			if s.Type != obj.SDYNIMPORT {
-				textp = append(textp, s)
-			}
-		}
-		ctxt.Textp = textp
-	}
-}
-
-/*
- * look for the next file in an archive.
- * adapted from libmach.
- */
-func nextar(bp *bio.Reader, off int64, a *ArHdr) int64 {
-	if off&1 != 0 {
-		off++
-	}
-	bp.Seek(off, 0)
-	var buf [SAR_HDR]byte
-	if n, err := io.ReadFull(bp, buf[:]); err != nil {
-		if n == 0 && err != io.EOF {
-			return -1
-		}
-		return 0
-	}
-
-	a.name = artrim(buf[0:16])
-	a.date = artrim(buf[16:28])
-	a.uid = artrim(buf[28:34])
-	a.gid = artrim(buf[34:40])
-	a.mode = artrim(buf[40:48])
-	a.size = artrim(buf[48:58])
-	a.fmag = artrim(buf[58:60])
-
-	arsize := atolwhex(a.size)
-	if arsize&1 != 0 {
-		arsize++
-	}
-	return arsize + SAR_HDR
-}
-
-func genhash(ctxt *Link, lib *Library) {
-	f, err := bio.Open(lib.File)
-	if err != nil {
-		Errorf(nil, "cannot open file %s for hash generation: %v", lib.File, err)
-		return
-	}
-	defer f.Close()
-
-	var arhdr ArHdr
-	l := nextar(f, int64(len(ARMAG)), &arhdr)
-	if l <= 0 {
-		Errorf(nil, "%s: short read on archive file symbol header", lib.File)
-		return
-	}
-
-	h := sha1.New()
-	if _, err := io.CopyN(h, f, atolwhex(arhdr.size)); err != nil {
-		Errorf(nil, "bad read of %s for hash generation: %v", lib.File, err)
-		return
-	}
-	lib.hash = hex.EncodeToString(h.Sum(nil))
-}
-
-func objfile(ctxt *Link, lib *Library) {
-	pkg := pathtoprefix(lib.Pkg)
-
-	if ctxt.Debugvlog > 1 {
-		ctxt.Logf("%5.2f ldobj: %s (%s)\n", obj.Cputime(), lib.File, pkg)
-	}
-	f, err := bio.Open(lib.File)
-	if err != nil {
-		Exitf("cannot open file %s: %v", lib.File, err)
-	}
-
-	for i := 0; i < len(ARMAG); i++ {
-		if c, err := f.ReadByte(); err == nil && c == ARMAG[i] {
-			continue
-		}
-
-		/* load it as a regular file */
-		l := f.Seek(0, 2)
-
-		f.Seek(0, 0)
-		ldobj(ctxt, f, lib, l, lib.File, lib.File, FileObj)
-		f.Close()
-
-		return
-	}
-
-	/* process __.PKGDEF */
-	off := f.Offset()
-
-	var arhdr ArHdr
-	l := nextar(f, off, &arhdr)
-	var pname string
-	if l <= 0 {
-		Errorf(nil, "%s: short read on archive file symbol header", lib.File)
-		goto out
-	}
-
-	if !strings.HasPrefix(arhdr.name, pkgname) {
-		Errorf(nil, "%s: cannot find package header", lib.File)
-		goto out
-	}
-
-	off += l
-
-	ldpkg(ctxt, f, pkg, atolwhex(arhdr.size), lib.File, Pkgdef)
-
-	/*
-	 * load all the object files from the archive now.
-	 * this gives us sequential file access and keeps us
-	 * from needing to come back later to pick up more
-	 * objects.  it breaks the usual C archive model, but
-	 * this is Go, not C.  the common case in Go is that
-	 * we need to load all the objects, and then we throw away
-	 * the individual symbols that are unused.
-	 *
-	 * loading every object will also make it possible to
-	 * load foreign objects not referenced by __.PKGDEF.
-	 */
-	for {
-		l = nextar(f, off, &arhdr)
-		if l == 0 {
-			break
-		}
-		if l < 0 {
-			Exitf("%s: malformed archive", lib.File)
-		}
-
-		off += l
-
-		pname = fmt.Sprintf("%s(%s)", lib.File, arhdr.name)
-		l = atolwhex(arhdr.size)
-		ldobj(ctxt, f, lib, l, pname, lib.File, ArchiveObj)
-	}
-
-out:
-	f.Close()
-}
-
-type Hostobj struct {
-	ld     func(*Link, *bio.Reader, string, int64, string)
-	pkg    string
-	pn     string
-	file   string
-	off    int64
-	length int64
-}
-
-var hostobj []Hostobj
-
-// These packages can use internal linking mode.
-// Others trigger external mode.
-var internalpkg = []string{
-	"crypto/x509",
-	"net",
-	"os/user",
-	"runtime/cgo",
-	"runtime/race",
-	"runtime/msan",
-}
-
-func ldhostobj(ld func(*Link, *bio.Reader, string, int64, string), f *bio.Reader, pkg string, length int64, pn string, file string) *Hostobj {
-	isinternal := false
-	for i := 0; i < len(internalpkg); i++ {
-		if pkg == internalpkg[i] {
-			isinternal = true
-			break
-		}
-	}
-
-	// DragonFly declares errno with __thread, which results in a symbol
-	// type of R_386_TLS_GD or R_X86_64_TLSGD. The Go linker does not
-	// currently know how to handle TLS relocations, hence we have to
-	// force external linking for any libraries that link in code that
-	// uses errno. This can be removed if the Go linker ever supports
-	// these relocation types.
-	if Headtype == obj.Hdragonfly {
-		if pkg == "net" || pkg == "os/user" {
-			isinternal = false
-		}
-	}
-
-	if !isinternal {
-		externalobj = true
-	}
-
-	hostobj = append(hostobj, Hostobj{})
-	h := &hostobj[len(hostobj)-1]
-	h.ld = ld
-	h.pkg = pkg
-	h.pn = pn
-	h.file = file
-	h.off = f.Offset()
-	h.length = length
-	return h
-}
-
-func hostobjs(ctxt *Link) {
-	var h *Hostobj
-
-	for i := 0; i < len(hostobj); i++ {
-		h = &hostobj[i]
-		f, err := bio.Open(h.file)
-		if err != nil {
-			Exitf("cannot reopen %s: %v", h.pn, err)
-		}
-
-		f.Seek(h.off, 0)
-		h.ld(ctxt, f, h.pkg, h.length, h.pn)
-		f.Close()
-	}
-}
-
-// provided by lib9
-
-func rmtemp() {
-	os.RemoveAll(*flagTmpdir)
-}
-
-func hostlinksetup() {
-	if Linkmode != LinkExternal {
-		return
-	}
-
-	// For external link, record that we need to tell the external linker -s,
-	// and turn off -s internally: the external linker needs the symbol
-	// information for its final link.
-	debug_s = *FlagS
-	*FlagS = false
-
-	// create temporary directory and arrange cleanup
-	if *flagTmpdir == "" {
-		dir, err := ioutil.TempDir("", "go-link-")
-		if err != nil {
-			log.Fatal(err)
-		}
-		*flagTmpdir = dir
-		AtExit(rmtemp)
-	}
-
-	// change our output to temporary object file
-	coutbuf.f.Close()
-	mayberemoveoutfile()
-
-	p := filepath.Join(*flagTmpdir, "go.o")
-	var err error
-	f, err := os.OpenFile(p, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0775)
-	if err != nil {
-		Exitf("cannot create %s: %v", p, err)
-	}
-
-	coutbuf.w = bufio.NewWriter(f)
-	coutbuf.f = f
-}
-
-// hostobjCopy creates a copy of the object files in hostobj in a
-// temporary directory.
-func hostobjCopy() (paths []string) {
-	var wg sync.WaitGroup
-	sema := make(chan struct{}, runtime.NumCPU()) // limit open file descriptors
-	for i, h := range hostobj {
-		h := h
-		dst := filepath.Join(*flagTmpdir, fmt.Sprintf("%06d.o", i))
-		paths = append(paths, dst)
-
-		wg.Add(1)
-		go func() {
-			sema <- struct{}{}
-			defer func() {
-				<-sema
-				wg.Done()
-			}()
-			f, err := os.Open(h.file)
-			if err != nil {
-				Exitf("cannot reopen %s: %v", h.pn, err)
-			}
-			if _, err := f.Seek(h.off, 0); err != nil {
-				Exitf("cannot seek %s: %v", h.pn, err)
-			}
-
-			w, err := os.Create(dst)
-			if err != nil {
-				Exitf("cannot create %s: %v", dst, err)
-			}
-			if _, err := io.CopyN(w, f, h.length); err != nil {
-				Exitf("cannot write %s: %v", dst, err)
-			}
-			if err := w.Close(); err != nil {
-				Exitf("cannot close %s: %v", dst, err)
-			}
-		}()
-	}
-	wg.Wait()
-	return paths
-}
-
-// archive builds a .a archive from the hostobj object files.
-func (ctxt *Link) archive() {
-	if Buildmode != BuildmodeCArchive {
-		return
-	}
-
-	if *flagExtar == "" {
-		*flagExtar = "ar"
-	}
-
-	mayberemoveoutfile()
-
-	// Force the buffer to flush here so that external
-	// tools will see a complete file.
-	Cflush()
-	if err := coutbuf.f.Close(); err != nil {
-		Exitf("close: %v", err)
-	}
-	coutbuf.f = nil
-
-	argv := []string{*flagExtar, "-q", "-c", "-s", *flagOutfile}
-	argv = append(argv, filepath.Join(*flagTmpdir, "go.o"))
-	argv = append(argv, hostobjCopy()...)
-
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("archive: %s\n", strings.Join(argv, " "))
-	}
-
-	if out, err := exec.Command(argv[0], argv[1:]...).CombinedOutput(); err != nil {
-		Exitf("running %s failed: %v\n%s", argv[0], err, out)
-	}
-}
-
-func (l *Link) hostlink() {
-	if Linkmode != LinkExternal || nerrors > 0 {
-		return
-	}
-	if Buildmode == BuildmodeCArchive {
-		return
-	}
-
-	if *flagExtld == "" {
-		*flagExtld = "gcc"
-	}
-
-	var argv []string
-	argv = append(argv, *flagExtld)
-	argv = append(argv, hostlinkArchArgs()...)
-
-	if !*FlagS && !debug_s {
-		argv = append(argv, "-gdwarf-2")
-	} else {
-		argv = append(argv, "-s")
-	}
-
-	switch Headtype {
-	case obj.Hdarwin:
-		argv = append(argv, "-Wl,-headerpad,1144")
-		if l.DynlinkingGo() {
-			argv = append(argv, "-Wl,-flat_namespace")
-		} else {
-			argv = append(argv, "-Wl,-no_pie")
-		}
-	case obj.Hopenbsd:
-		argv = append(argv, "-Wl,-nopie")
-	case obj.Hwindows:
-		argv = append(argv, "-mconsole")
-	case obj.Hwindowsgui:
-		argv = append(argv, "-mwindows")
-	}
-
-	switch Buildmode {
-	case BuildmodeExe:
-		if Headtype == obj.Hdarwin {
-			argv = append(argv, "-Wl,-pagezero_size,4000000")
-		}
-	case BuildmodePIE:
-		if UseRelro() {
-			argv = append(argv, "-Wl,-z,relro")
-		}
-		argv = append(argv, "-pie")
-	case BuildmodeCShared:
-		if Headtype == obj.Hdarwin {
-			argv = append(argv, "-dynamiclib", "-Wl,-read_only_relocs,suppress")
-		} else {
-			// ELF.
-			argv = append(argv, "-Wl,-Bsymbolic")
-			if UseRelro() {
-				argv = append(argv, "-Wl,-z,relro")
-			}
-			// Pass -z nodelete to mark the shared library as
-			// non-closeable: a dlclose will do nothing.
-			argv = append(argv, "-shared", "-Wl,-z,nodelete")
-		}
-	case BuildmodeShared:
-		if UseRelro() {
-			argv = append(argv, "-Wl,-z,relro")
-		}
-		argv = append(argv, "-shared")
-	case BuildmodePlugin:
-		if Headtype == obj.Hdarwin {
-			argv = append(argv, "-dynamiclib")
-		} else {
-			if UseRelro() {
-				argv = append(argv, "-Wl,-z,relro")
-			}
-			argv = append(argv, "-shared")
-		}
-	}
-
-	if Iself && l.DynlinkingGo() {
-		// We force all symbol resolution to be done at program startup
-		// because lazy PLT resolution can use large amounts of stack at
-		// times we cannot allow it to do so.
-		argv = append(argv, "-Wl,-znow")
-
-		// Do not let the host linker generate COPY relocations. These
-		// can move symbols out of sections that rely on stable offsets
-		// from the beginning of the section (like STYPE).
-		argv = append(argv, "-Wl,-znocopyreloc")
-
-		if SysArch.InFamily(sys.ARM, sys.ARM64) {
-			// On ARM, the GNU linker will generate COPY relocations
-			// even with -znocopyreloc set.
-			// https://sourceware.org/bugzilla/show_bug.cgi?id=19962
-			//
-			// On ARM64, the GNU linker will fail instead of
-			// generating COPY relocations.
-			//
-			// In both cases, switch to gold.
-			argv = append(argv, "-fuse-ld=gold")
-
-			// If gold is not installed, gcc will silently switch
-			// back to ld.bfd. So we parse the version information
-			// and provide a useful error if gold is missing.
-			cmd := exec.Command(*flagExtld, "-fuse-ld=gold", "-Wl,--version")
-			if out, err := cmd.CombinedOutput(); err == nil {
-				if !bytes.Contains(out, []byte("GNU gold")) {
-					log.Fatalf("ARM external linker must be gold (issue #15696), but is not: %s", out)
-				}
-			}
-		}
-	}
-
-	if Iself && len(buildinfo) > 0 {
-		argv = append(argv, fmt.Sprintf("-Wl,--build-id=0x%x", buildinfo))
-	}
-
-	// On Windows, given -o foo, GCC will append ".exe" to produce
-	// "foo.exe".  We have decided that we want to honor the -o
-	// option. To make this work, we append a '.' so that GCC
-	// will decide that the file already has an extension. We
-	// only want to do this when producing a Windows output file
-	// on a Windows host.
-	outopt := *flagOutfile
-	if obj.GOOS == "windows" && runtime.GOOS == "windows" && filepath.Ext(outopt) == "" {
-		outopt += "."
-	}
-	argv = append(argv, "-o")
-	argv = append(argv, outopt)
-
-	if rpath.val != "" {
-		argv = append(argv, fmt.Sprintf("-Wl,-rpath,%s", rpath.val))
-	}
-
-	// Force global symbols to be exported for dlopen, etc.
-	if Iself {
-		argv = append(argv, "-rdynamic")
-	}
-
-	if strings.Contains(argv[0], "clang") {
-		argv = append(argv, "-Qunused-arguments")
-	}
-
-	argv = append(argv, filepath.Join(*flagTmpdir, "go.o"))
-	argv = append(argv, hostobjCopy()...)
-
-	if *FlagLinkshared {
-		seenDirs := make(map[string]bool)
-		seenLibs := make(map[string]bool)
-		addshlib := func(path string) {
-			dir, base := filepath.Split(path)
-			if !seenDirs[dir] {
-				argv = append(argv, "-L"+dir)
-				if !rpath.set {
-					argv = append(argv, "-Wl,-rpath="+dir)
-				}
-				seenDirs[dir] = true
-			}
-			base = strings.TrimSuffix(base, ".so")
-			base = strings.TrimPrefix(base, "lib")
-			if !seenLibs[base] {
-				argv = append(argv, "-l"+base)
-				seenLibs[base] = true
-			}
-		}
-		for _, shlib := range l.Shlibs {
-			addshlib(shlib.Path)
-			for _, dep := range shlib.Deps {
-				if dep == "" {
-					continue
-				}
-				libpath := findshlib(l, dep)
-				if libpath != "" {
-					addshlib(libpath)
-				}
-			}
-		}
-	}
-
-	argv = append(argv, ldflag...)
-
-	// When building a program with the default -buildmode=exe the
-	// gc compiler generates code requires DT_TEXTREL in a
-	// position independent executable (PIE). On systems where the
-	// toolchain creates PIEs by default, and where DT_TEXTREL
-	// does not work, the resulting programs will not run. See
-	// issue #17847. To avoid this problem pass -no-pie to the
-	// toolchain if it is supported.
-	if Buildmode == BuildmodeExe {
-		src := filepath.Join(*flagTmpdir, "trivial.c")
-		if err := ioutil.WriteFile(src, []byte{}, 0666); err != nil {
-			Errorf(nil, "WriteFile trivial.c failed: %v", err)
-		}
-		cmd := exec.Command(argv[0], "-c", "-no-pie", "trivial.c")
-		cmd.Dir = *flagTmpdir
-		cmd.Env = append([]string{"LC_ALL=C"}, os.Environ()...)
-		out, err := cmd.CombinedOutput()
-		supported := err == nil && !bytes.Contains(out, []byte("unrecognized"))
-		if supported {
-			argv = append(argv, "-no-pie")
-		}
-	}
-
-	for _, p := range strings.Fields(*flagExtldflags) {
-		argv = append(argv, p)
-
-		// clang, unlike GCC, passes -rdynamic to the linker
-		// even when linking with -static, causing a linker
-		// error when using GNU ld. So take out -rdynamic if
-		// we added it. We do it in this order, rather than
-		// only adding -rdynamic later, so that -*extldflags
-		// can override -rdynamic without using -static.
-		if Iself && p == "-static" {
-			for i := range argv {
-				if argv[i] == "-rdynamic" {
-					argv[i] = "-static"
-				}
-			}
-		}
-	}
-	if Headtype == obj.Hwindows || Headtype == obj.Hwindowsgui {
-		// libmingw32 and libmingwex have some inter-dependencies,
-		// so must use linker groups.
-		argv = append(argv, "-Wl,--start-group", "-lmingwex", "-lmingw32", "-Wl,--end-group")
-		argv = append(argv, peimporteddlls()...)
-	}
-
-	if l.Debugvlog != 0 {
-		l.Logf("%5.2f host link:", obj.Cputime())
-		for _, v := range argv {
-			l.Logf(" %q", v)
-		}
-		l.Logf("\n")
-	}
-
-	if out, err := exec.Command(argv[0], argv[1:]...).CombinedOutput(); err != nil {
-		Exitf("running %s failed: %v\n%s", argv[0], err, out)
-	} else if l.Debugvlog != 0 && len(out) > 0 {
-		l.Logf("%s", out)
-	}
-
-	if !*FlagS && !debug_s && Headtype == obj.Hdarwin {
-		// Skip combining dwarf on arm.
-		if !SysArch.InFamily(sys.ARM, sys.ARM64) {
-			dsym := filepath.Join(*flagTmpdir, "go.dwarf")
-			if out, err := exec.Command("dsymutil", "-f", *flagOutfile, "-o", dsym).CombinedOutput(); err != nil {
-				Exitf("%s: running dsymutil failed: %v\n%s", os.Args[0], err, out)
-			}
-			// Skip combining if `dsymutil` didn't generate a file. See #11994.
-			if _, err := os.Stat(dsym); os.IsNotExist(err) {
-				return
-			}
-			// For os.Rename to work reliably, must be in same directory as outfile.
-			combinedOutput := *flagOutfile + "~"
-			if err := machoCombineDwarf(*flagOutfile, dsym, combinedOutput); err != nil {
-				Exitf("%s: combining dwarf failed: %v", os.Args[0], err)
-			}
-			os.Remove(*flagOutfile)
-			if err := os.Rename(combinedOutput, *flagOutfile); err != nil {
-				Exitf("%s: %v", os.Args[0], err)
-			}
-		}
-	}
-}
-
-// hostlinkArchArgs returns arguments to pass to the external linker
-// based on the architecture.
-func hostlinkArchArgs() []string {
-	switch SysArch.Family {
-	case sys.I386:
-		return []string{"-m32"}
-	case sys.AMD64, sys.PPC64, sys.S390X:
-		return []string{"-m64"}
-	case sys.ARM:
-		return []string{"-marm"}
-	case sys.ARM64:
-		// nothing needed
-	case sys.MIPS64:
-		return []string{"-mabi=64"}
-	case sys.MIPS:
-		return []string{"-mabi=32"}
-	}
-	return nil
-}
-
-// ldobj loads an input object. If it is a host object (an object
-// compiled by a non-Go compiler) it returns the Hostobj pointer. If
-// it is a Go object, it returns nil.
-func ldobj(ctxt *Link, f *bio.Reader, lib *Library, length int64, pn string, file string, whence int) *Hostobj {
-	pkg := pathtoprefix(lib.Pkg)
-
-	eof := f.Offset() + length
-	start := f.Offset()
-	c1 := bgetc(f)
-	c2 := bgetc(f)
-	c3 := bgetc(f)
-	c4 := bgetc(f)
-	f.Seek(start, 0)
-
-	magic := uint32(c1)<<24 | uint32(c2)<<16 | uint32(c3)<<8 | uint32(c4)
-	if magic == 0x7f454c46 { // \x7F E L F
-		return ldhostobj(ldelf, f, pkg, length, pn, file)
-	}
-
-	if magic&^1 == 0xfeedface || magic&^0x01000000 == 0xcefaedfe {
-		return ldhostobj(ldmacho, f, pkg, length, pn, file)
-	}
-
-	if c1 == 0x4c && c2 == 0x01 || c1 == 0x64 && c2 == 0x86 {
-		return ldhostobj(ldpe, f, pkg, length, pn, file)
-	}
-
-	/* check the header */
-	line, err := f.ReadString('\n')
-	if err != nil {
-		Errorf(nil, "truncated object file: %s: %v", pn, err)
-		return nil
-	}
-
-	if !strings.HasPrefix(line, "go object ") {
-		if strings.HasSuffix(pn, ".go") {
-			Exitf("%s: uncompiled .go source file", pn)
-			return nil
-		}
-
-		if line == SysArch.Name {
-			// old header format: just $GOOS
-			Errorf(nil, "%s: stale object file", pn)
-			return nil
-		}
-
-		Errorf(nil, "%s: not an object file", pn)
-		return nil
-	}
-
-	// First, check that the basic GOOS, GOARCH, and Version match.
-	t := fmt.Sprintf("%s %s %s ", obj.GOOS, obj.GOARCH, obj.Version)
-
-	line = strings.TrimRight(line, "\n")
-	if !strings.HasPrefix(line[10:]+" ", t) && !*flagF {
-		Errorf(nil, "%s: object is [%s] expected [%s]", pn, line[10:], t)
-		return nil
-	}
-
-	// Second, check that longer lines match each other exactly,
-	// so that the Go compiler and write additional information
-	// that must be the same from run to run.
-	if len(line) >= len(t)+10 {
-		if theline == "" {
-			theline = line[10:]
-		} else if theline != line[10:] {
-			Errorf(nil, "%s: object is [%s] expected [%s]", pn, line[10:], theline)
-			return nil
-		}
-	}
-
-	/* skip over exports and other info -- ends with \n!\n */
-	import0 := f.Offset()
-
-	c1 = '\n' // the last line ended in \n
-	c2 = bgetc(f)
-	c3 = bgetc(f)
-	for c1 != '\n' || c2 != '!' || c3 != '\n' {
-		c1 = c2
-		c2 = c3
-		c3 = bgetc(f)
-		if c3 == -1 {
-			Errorf(nil, "truncated object file: %s", pn)
-			return nil
-		}
-	}
-
-	import1 := f.Offset()
-
-	f.Seek(import0, 0)
-	ldpkg(ctxt, f, pkg, import1-import0-2, pn, whence) // -2 for !\n
-	f.Seek(import1, 0)
-
-	LoadObjFile(ctxt, f, lib, eof-f.Offset(), pn)
-	return nil
-}
-
-func readelfsymboldata(ctxt *Link, f *elf.File, sym *elf.Symbol) []byte {
-	data := make([]byte, sym.Size)
-	sect := f.Sections[sym.Section]
-	if sect.Type != elf.SHT_PROGBITS && sect.Type != elf.SHT_NOTE {
-		Errorf(nil, "reading %s from non-data section", sym.Name)
-	}
-	n, err := sect.ReadAt(data, int64(sym.Value-sect.Addr))
-	if uint64(n) != sym.Size {
-		Errorf(nil, "reading contents of %s: %v", sym.Name, err)
-	}
-	return data
-}
-
-func readwithpad(r io.Reader, sz int32) ([]byte, error) {
-	data := make([]byte, Rnd(int64(sz), 4))
-	_, err := io.ReadFull(r, data)
-	if err != nil {
-		return nil, err
-	}
-	data = data[:sz]
-	return data, nil
-}
-
-func readnote(f *elf.File, name []byte, typ int32) ([]byte, error) {
-	for _, sect := range f.Sections {
-		if sect.Type != elf.SHT_NOTE {
-			continue
-		}
-		r := sect.Open()
-		for {
-			var namesize, descsize, noteType int32
-			err := binary.Read(r, f.ByteOrder, &namesize)
-			if err != nil {
-				if err == io.EOF {
-					break
-				}
-				return nil, fmt.Errorf("read namesize failed: %v", err)
-			}
-			err = binary.Read(r, f.ByteOrder, &descsize)
-			if err != nil {
-				return nil, fmt.Errorf("read descsize failed: %v", err)
-			}
-			err = binary.Read(r, f.ByteOrder, &noteType)
-			if err != nil {
-				return nil, fmt.Errorf("read type failed: %v", err)
-			}
-			noteName, err := readwithpad(r, namesize)
-			if err != nil {
-				return nil, fmt.Errorf("read name failed: %v", err)
-			}
-			desc, err := readwithpad(r, descsize)
-			if err != nil {
-				return nil, fmt.Errorf("read desc failed: %v", err)
-			}
-			if string(name) == string(noteName) && typ == noteType {
-				return desc, nil
-			}
-		}
-	}
-	return nil, nil
-}
-
-func findshlib(ctxt *Link, shlib string) string {
-	for _, libdir := range ctxt.Libdir {
-		libpath := filepath.Join(libdir, shlib)
-		if _, err := os.Stat(libpath); err == nil {
-			return libpath
-		}
-	}
-	Errorf(nil, "cannot find shared library: %s", shlib)
-	return ""
-}
-
-func ldshlibsyms(ctxt *Link, shlib string) {
-	libpath := findshlib(ctxt, shlib)
-	if libpath == "" {
-		return
-	}
-	for _, processedlib := range ctxt.Shlibs {
-		if processedlib.Path == libpath {
-			return
-		}
-	}
-	if ctxt.Debugvlog > 1 {
-		ctxt.Logf("%5.2f ldshlibsyms: found library with name %s at %s\n", obj.Cputime(), shlib, libpath)
-	}
-
-	f, err := elf.Open(libpath)
-	if err != nil {
-		Errorf(nil, "cannot open shared library: %s", libpath)
-		return
-	}
-
-	hash, err := readnote(f, ELF_NOTE_GO_NAME, ELF_NOTE_GOABIHASH_TAG)
-	if err != nil {
-		Errorf(nil, "cannot read ABI hash from shared library %s: %v", libpath, err)
-		return
-	}
-
-	depsbytes, err := readnote(f, ELF_NOTE_GO_NAME, ELF_NOTE_GODEPS_TAG)
-	if err != nil {
-		Errorf(nil, "cannot read dep list from shared library %s: %v", libpath, err)
-		return
-	}
-	deps := strings.Split(string(depsbytes), "\n")
-
-	syms, err := f.DynamicSymbols()
-	if err != nil {
-		Errorf(nil, "cannot read symbols from shared library: %s", libpath)
-		return
-	}
-	gcdataLocations := make(map[uint64]*Symbol)
-	for _, elfsym := range syms {
-		if elf.ST_TYPE(elfsym.Info) == elf.STT_NOTYPE || elf.ST_TYPE(elfsym.Info) == elf.STT_SECTION {
-			continue
-		}
-		lsym := ctxt.Syms.Lookup(elfsym.Name, 0)
-		// Because loadlib above loads all .a files before loading any shared
-		// libraries, any non-dynimport symbols we find that duplicate symbols
-		// already loaded should be ignored (the symbols from the .a files
-		// "win").
-		if lsym.Type != 0 && lsym.Type != obj.SDYNIMPORT {
-			continue
-		}
-		lsym.Type = obj.SDYNIMPORT
-		lsym.ElfType = elf.ST_TYPE(elfsym.Info)
-		lsym.Size = int64(elfsym.Size)
-		if elfsym.Section != elf.SHN_UNDEF {
-			// Set .File for the library that actually defines the symbol.
-			lsym.File = libpath
-			// The decodetype_* functions in decodetype.go need access to
-			// the type data.
-			if strings.HasPrefix(lsym.Name, "type.") && !strings.HasPrefix(lsym.Name, "type..") {
-				lsym.P = readelfsymboldata(ctxt, f, &elfsym)
-				gcdataLocations[elfsym.Value+2*uint64(SysArch.PtrSize)+8+1*uint64(SysArch.PtrSize)] = lsym
-			}
-		}
-	}
-	gcdataAddresses := make(map[*Symbol]uint64)
-	if SysArch.Family == sys.ARM64 {
-		for _, sect := range f.Sections {
-			if sect.Type == elf.SHT_RELA {
-				var rela elf.Rela64
-				rdr := sect.Open()
-				for {
-					err := binary.Read(rdr, f.ByteOrder, &rela)
-					if err == io.EOF {
-						break
-					} else if err != nil {
-						Errorf(nil, "reading relocation failed %v", err)
-						return
-					}
-					t := elf.R_AARCH64(rela.Info & 0xffff)
-					if t != elf.R_AARCH64_RELATIVE {
-						continue
-					}
-					if lsym, ok := gcdataLocations[rela.Off]; ok {
-						gcdataAddresses[lsym] = uint64(rela.Addend)
-					}
-				}
-			}
-		}
-	}
-
-	ctxt.Shlibs = append(ctxt.Shlibs, Shlib{Path: libpath, Hash: hash, Deps: deps, File: f, gcdataAddresses: gcdataAddresses})
-}
-
-// Copied from ../gc/subr.c:/^pathtoprefix; must stay in sync.
-/*
- * Convert raw string to the prefix that will be used in the symbol table.
- * Invalid bytes turn into %xx.	 Right now the only bytes that need
- * escaping are %, ., and ", but we escape all control characters too.
- *
- * If you edit this, edit ../gc/subr.c:/^pathtoprefix too.
- * If you edit this, edit ../../debug/goobj/read.go:/importPathToPrefix too.
- */
-func pathtoprefix(s string) string {
-	slash := strings.LastIndex(s, "/")
-	for i := 0; i < len(s); i++ {
-		c := s[i]
-		if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
-			var buf bytes.Buffer
-			for i := 0; i < len(s); i++ {
-				c := s[i]
-				if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
-					fmt.Fprintf(&buf, "%%%02x", c)
-					continue
-				}
-				buf.WriteByte(c)
-			}
-			return buf.String()
-		}
-	}
-	return s
-}
-
-func addsection(seg *Segment, name string, rwx int) *Section {
-	var l **Section
-
-	for l = &seg.Sect; *l != nil; l = &(*l).Next {
-	}
-	sect := new(Section)
-	sect.Rwx = uint8(rwx)
-	sect.Name = name
-	sect.Seg = seg
-	sect.Align = int32(SysArch.PtrSize) // everything is at least pointer-aligned
-	*l = sect
-	return sect
-}
-
-func Le16(b []byte) uint16 {
-	return uint16(b[0]) | uint16(b[1])<<8
-}
-
-func Le32(b []byte) uint32 {
-	return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
-}
-
-func Le64(b []byte) uint64 {
-	return uint64(Le32(b)) | uint64(Le32(b[4:]))<<32
-}
-
-func Be16(b []byte) uint16 {
-	return uint16(b[0])<<8 | uint16(b[1])
-}
-
-func Be32(b []byte) uint32 {
-	return uint32(b[0])<<24 | uint32(b[1])<<16 | uint32(b[2])<<8 | uint32(b[3])
-}
-
-type chain struct {
-	sym   *Symbol
-	up    *chain
-	limit int // limit on entry to sym
-}
-
-var morestack *Symbol
-
-// TODO: Record enough information in new object files to
-// allow stack checks here.
-
-func haslinkregister(ctxt *Link) bool {
-	return ctxt.FixedFrameSize() != 0
-}
-
-func callsize(ctxt *Link) int {
-	if haslinkregister(ctxt) {
-		return 0
-	}
-	return SysArch.RegSize
-}
-
-func (ctxt *Link) dostkcheck() {
-	var ch chain
-
-	morestack = ctxt.Syms.Lookup("runtime.morestack", 0)
-
-	// Every splitting function ensures that there are at least StackLimit
-	// bytes available below SP when the splitting prologue finishes.
-	// If the splitting function calls F, then F begins execution with
-	// at least StackLimit - callsize() bytes available.
-	// Check that every function behaves correctly with this amount
-	// of stack, following direct calls in order to piece together chains
-	// of non-splitting functions.
-	ch.up = nil
-
-	ch.limit = obj.StackLimit - callsize(ctxt)
-
-	// Check every function, but do the nosplit functions in a first pass,
-	// to make the printed failure chains as short as possible.
-	for _, s := range ctxt.Textp {
-		// runtime.racesymbolizethunk is called from gcc-compiled C
-		// code running on the operating system thread stack.
-		// It uses more than the usual amount of stack but that's okay.
-		if s.Name == "runtime.racesymbolizethunk" {
-			continue
-		}
-
-		if s.Attr.NoSplit() {
-			ch.sym = s
-			stkcheck(ctxt, &ch, 0)
-		}
-	}
-
-	for _, s := range ctxt.Textp {
-		if !s.Attr.NoSplit() {
-			ch.sym = s
-			stkcheck(ctxt, &ch, 0)
-		}
-	}
-}
-
-func stkcheck(ctxt *Link, up *chain, depth int) int {
-	limit := up.limit
-	s := up.sym
-
-	// Don't duplicate work: only need to consider each
-	// function at top of safe zone once.
-	top := limit == obj.StackLimit-callsize(ctxt)
-	if top {
-		if s.Attr.StackCheck() {
-			return 0
-		}
-		s.Attr |= AttrStackCheck
-	}
-
-	if depth > 100 {
-		Errorf(s, "nosplit stack check too deep")
-		stkbroke(ctxt, up, 0)
-		return -1
-	}
-
-	if s.Attr.External() || s.FuncInfo == nil {
-		// external function.
-		// should never be called directly.
-		// onlyctxt.Diagnose the direct caller.
-		// TODO(mwhudson): actually think about this.
-		if depth == 1 && s.Type != obj.SXREF && !ctxt.DynlinkingGo() &&
-			Buildmode != BuildmodeCArchive && Buildmode != BuildmodePIE && Buildmode != BuildmodeCShared && Buildmode != BuildmodePlugin {
-
-			Errorf(s, "call to external function")
-		}
-		return -1
-	}
-
-	if limit < 0 {
-		stkbroke(ctxt, up, limit)
-		return -1
-	}
-
-	// morestack looks like it calls functions,
-	// but it switches the stack pointer first.
-	if s == morestack {
-		return 0
-	}
-
-	var ch chain
-	ch.up = up
-
-	if !s.Attr.NoSplit() {
-		// Ensure we have enough stack to call morestack.
-		ch.limit = limit - callsize(ctxt)
-		ch.sym = morestack
-		if stkcheck(ctxt, &ch, depth+1) < 0 {
-			return -1
-		}
-		if !top {
-			return 0
-		}
-		// Raise limit to allow frame.
-		locals := int32(0)
-		if s.FuncInfo != nil {
-			locals = s.FuncInfo.Locals
-		}
-		limit = int(obj.StackLimit+locals) + int(ctxt.FixedFrameSize())
-	}
-
-	// Walk through sp adjustments in function, consuming relocs.
-	ri := 0
-
-	endr := len(s.R)
-	var ch1 chain
-	var pcsp Pciter
-	var r *Reloc
-	for pciterinit(ctxt, &pcsp, &s.FuncInfo.Pcsp); pcsp.done == 0; pciternext(&pcsp) {
-		// pcsp.value is in effect for [pcsp.pc, pcsp.nextpc).
-
-		// Check stack size in effect for this span.
-		if int32(limit)-pcsp.value < 0 {
-			stkbroke(ctxt, up, int(int32(limit)-pcsp.value))
-			return -1
-		}
-
-		// Process calls in this span.
-		for ; ri < endr && uint32(s.R[ri].Off) < pcsp.nextpc; ri++ {
-			r = &s.R[ri]
-			switch r.Type {
-			// Direct call.
-			case obj.R_CALL, obj.R_CALLARM, obj.R_CALLARM64, obj.R_CALLPOWER, obj.R_CALLMIPS:
-				ch.limit = int(int32(limit) - pcsp.value - int32(callsize(ctxt)))
-				ch.sym = r.Sym
-				if stkcheck(ctxt, &ch, depth+1) < 0 {
-					return -1
-				}
-
-			// Indirect call. Assume it is a call to a splitting function,
-			// so we have to make sure it can call morestack.
-			// Arrange the data structures to report both calls, so that
-			// if there is an error, stkprint shows all the steps involved.
-			case obj.R_CALLIND:
-				ch.limit = int(int32(limit) - pcsp.value - int32(callsize(ctxt)))
-
-				ch.sym = nil
-				ch1.limit = ch.limit - callsize(ctxt) // for morestack in called prologue
-				ch1.up = &ch
-				ch1.sym = morestack
-				if stkcheck(ctxt, &ch1, depth+2) < 0 {
-					return -1
-				}
-			}
-		}
-	}
-
-	return 0
-}
-
-func stkbroke(ctxt *Link, ch *chain, limit int) {
-	Errorf(ch.sym, "nosplit stack overflow")
-	stkprint(ctxt, ch, limit)
-}
-
-func stkprint(ctxt *Link, ch *chain, limit int) {
-	var name string
-
-	if ch.sym != nil {
-		name = ch.sym.Name
-		if ch.sym.Attr.NoSplit() {
-			name += " (nosplit)"
-		}
-	} else {
-		name = "function pointer"
-	}
-
-	if ch.up == nil {
-		// top of chain.  ch->sym != nil.
-		if ch.sym.Attr.NoSplit() {
-			fmt.Printf("\t%d\tassumed on entry to %s\n", ch.limit, name)
-		} else {
-			fmt.Printf("\t%d\tguaranteed after split check in %s\n", ch.limit, name)
-		}
-	} else {
-		stkprint(ctxt, ch.up, ch.limit+callsize(ctxt))
-		if !haslinkregister(ctxt) {
-			fmt.Printf("\t%d\ton entry to %s\n", ch.limit, name)
-		}
-	}
-
-	if ch.limit != limit {
-		fmt.Printf("\t%d\tafter %s uses %d\n", limit, name, ch.limit-limit)
-	}
-}
-
-func Cflush() {
-	if err := coutbuf.w.Flush(); err != nil {
-		Exitf("flushing %s: %v", coutbuf.f.Name(), err)
-	}
-}
-
-func Cseek(p int64) {
-	if p == coutbuf.off {
-		return
-	}
-	Cflush()
-	if _, err := coutbuf.f.Seek(p, 0); err != nil {
-		Exitf("seeking in output [0, 1]: %v", err)
-	}
-	coutbuf.off = p
-}
-
-func Cwritestring(s string) {
-	coutbuf.WriteString(s)
-}
-
-func Cwrite(p []byte) {
-	coutbuf.Write(p)
-}
-
-func Cput(c uint8) {
-	coutbuf.w.WriteByte(c)
-	coutbuf.off++
-}
-
-func usage() {
-	fmt.Fprintf(os.Stderr, "usage: link [options] main.o\n")
-	obj.Flagprint(2)
-	Exit(2)
-}
-
-func doversion() {
-	Exitf("version %s", obj.Version)
-}
-
-type SymbolType int8
-
-const (
-	TextSym      SymbolType = 'T'
-	DataSym                 = 'D'
-	BSSSym                  = 'B'
-	UndefinedSym            = 'U'
-	TLSSym                  = 't'
-	FileSym                 = 'f'
-	FrameSym                = 'm'
-	ParamSym                = 'p'
-	AutoSym                 = 'a'
-)
-
-func genasmsym(ctxt *Link, put func(*Link, *Symbol, string, SymbolType, int64, *Symbol)) {
-	// These symbols won't show up in the first loop below because we
-	// skip STEXT symbols. Normal STEXT symbols are emitted by walking textp.
-	s := ctxt.Syms.Lookup("runtime.text", 0)
-	if s.Type == obj.STEXT {
-		put(ctxt, s, s.Name, TextSym, s.Value, nil)
-	}
-
-	n := 0
-
-	// Generate base addresses for all text sections if there are multiple
-	for sect := Segtext.Sect; sect != nil; sect = sect.Next {
-		if n == 0 {
-			n++
-			continue
-		}
-		if sect.Name != ".text" {
-			break
-		}
-		s = ctxt.Syms.ROLookup(fmt.Sprintf("runtime.text.%d", n), 0)
-		if s == nil {
-			break
-		}
-		if s.Type == obj.STEXT {
-			put(ctxt, s, s.Name, TextSym, s.Value, nil)
-		}
-		n++
-	}
-
-	s = ctxt.Syms.Lookup("runtime.etext", 0)
-	if s.Type == obj.STEXT {
-		put(ctxt, s, s.Name, TextSym, s.Value, nil)
-	}
-
-	for _, s := range ctxt.Syms.Allsym {
-		if s.Attr.Hidden() {
-			continue
-		}
-		if (s.Name == "" || s.Name[0] == '.') && s.Version == 0 && s.Name != ".rathole" && s.Name != ".TOC." {
-			continue
-		}
-		switch s.Type & obj.SMASK {
-		case obj.SCONST,
-			obj.SRODATA,
-			obj.SSYMTAB,
-			obj.SPCLNTAB,
-			obj.SINITARR,
-			obj.SDATA,
-			obj.SNOPTRDATA,
-			obj.SELFROSECT,
-			obj.SMACHOGOT,
-			obj.STYPE,
-			obj.SSTRING,
-			obj.SGOSTRING,
-			obj.SGOFUNC,
-			obj.SGCBITS,
-			obj.STYPERELRO,
-			obj.SSTRINGRELRO,
-			obj.SGOSTRINGRELRO,
-			obj.SGOFUNCRELRO,
-			obj.SGCBITSRELRO,
-			obj.SRODATARELRO,
-			obj.STYPELINK,
-			obj.SITABLINK,
-			obj.SWINDOWS:
-			if !s.Attr.Reachable() {
-				continue
-			}
-			put(ctxt, s, s.Name, DataSym, Symaddr(s), s.Gotype)
-
-		case obj.SBSS, obj.SNOPTRBSS:
-			if !s.Attr.Reachable() {
-				continue
-			}
-			if len(s.P) > 0 {
-				Errorf(s, "should not be bss (size=%d type=%d special=%v)", len(s.P), s.Type, s.Attr.Special())
-			}
-			put(ctxt, s, s.Name, BSSSym, Symaddr(s), s.Gotype)
-
-		case obj.SFILE:
-			put(ctxt, nil, s.Name, FileSym, s.Value, nil)
-
-		case obj.SHOSTOBJ:
-			if Headtype == obj.Hwindows || Headtype == obj.Hwindowsgui || Iself {
-				put(ctxt, s, s.Name, UndefinedSym, s.Value, nil)
-			}
-
-		case obj.SDYNIMPORT:
-			if !s.Attr.Reachable() {
-				continue
-			}
-			put(ctxt, s, s.Extname, UndefinedSym, 0, nil)
-
-		case obj.STLSBSS:
-			if Linkmode == LinkExternal && Headtype != obj.Hopenbsd {
-				put(ctxt, s, s.Name, TLSSym, Symaddr(s), s.Gotype)
-			}
-		}
-	}
-
-	var off int32
-	for _, s := range ctxt.Textp {
-		put(ctxt, s, s.Name, TextSym, s.Value, s.Gotype)
-
-		locals := int32(0)
-		if s.FuncInfo != nil {
-			locals = s.FuncInfo.Locals
-		}
-		// NOTE(ality): acid can't produce a stack trace without .frame symbols
-		put(ctxt, nil, ".frame", FrameSym, int64(locals)+int64(SysArch.PtrSize), nil)
-
-		if s.FuncInfo == nil {
-			continue
-		}
-		for _, a := range s.FuncInfo.Autom {
-			// Emit a or p according to actual offset, even if label is wrong.
-			// This avoids negative offsets, which cannot be encoded.
-			if a.Name != obj.A_AUTO && a.Name != obj.A_PARAM {
-				continue
-			}
-
-			// compute offset relative to FP
-			if a.Name == obj.A_PARAM {
-				off = a.Aoffset
-			} else {
-				off = a.Aoffset - int32(SysArch.PtrSize)
-			}
-
-			// FP
-			if off >= 0 {
-				put(ctxt, nil, a.Asym.Name, ParamSym, int64(off), a.Gotype)
-				continue
-			}
-
-			// SP
-			if off <= int32(-SysArch.PtrSize) {
-				put(ctxt, nil, a.Asym.Name, AutoSym, -(int64(off) + int64(SysArch.PtrSize)), a.Gotype)
-				continue
-			}
-			// Otherwise, off is addressing the saved program counter.
-			// Something underhanded is going on. Say nothing.
-		}
-	}
-
-	if ctxt.Debugvlog != 0 || *flagN {
-		ctxt.Logf("%5.2f symsize = %d\n", obj.Cputime(), uint32(Symsize))
-	}
-}
-
-func Symaddr(s *Symbol) int64 {
-	if !s.Attr.Reachable() {
-		Errorf(s, "unreachable symbol in symaddr")
-	}
-	return s.Value
-}
-
-func (ctxt *Link) xdefine(p string, t obj.SymKind, v int64) {
-	s := ctxt.Syms.Lookup(p, 0)
-	s.Type = t
-	s.Value = v
-	s.Attr |= AttrReachable
-	s.Attr |= AttrSpecial
-	s.Attr |= AttrLocal
-}
-
-func datoff(s *Symbol, addr int64) int64 {
-	if uint64(addr) >= Segdata.Vaddr {
-		return int64(uint64(addr) - Segdata.Vaddr + Segdata.Fileoff)
-	}
-	if uint64(addr) >= Segtext.Vaddr {
-		return int64(uint64(addr) - Segtext.Vaddr + Segtext.Fileoff)
-	}
-	Errorf(s, "invalid datoff %#x", addr)
-	return 0
-}
-
-func Entryvalue(ctxt *Link) int64 {
-	a := *flagEntrySymbol
-	if a[0] >= '0' && a[0] <= '9' {
-		return atolwhex(a)
-	}
-	s := ctxt.Syms.Lookup(a, 0)
-	if s.Type == 0 {
-		return *FlagTextAddr
-	}
-	if s.Type != obj.STEXT {
-		Errorf(s, "entry not text")
-	}
-	return s.Value
-}
-
-func undefsym(ctxt *Link, s *Symbol) {
-	var r *Reloc
-
-	for i := 0; i < len(s.R); i++ {
-		r = &s.R[i]
-		if r.Sym == nil { // happens for some external ARM relocs
-			continue
-		}
-		if r.Sym.Type == obj.Sxxx || r.Sym.Type == obj.SXREF {
-			Errorf(s, "undefined: %q", r.Sym.Name)
-		}
-		if !r.Sym.Attr.Reachable() && r.Type != obj.R_WEAKADDROFF {
-			Errorf(s, "relocation target %q", r.Sym.Name)
-		}
-	}
-}
-
-func (ctxt *Link) undef() {
-	for _, s := range ctxt.Textp {
-		undefsym(ctxt, s)
-	}
-	for _, s := range datap {
-		undefsym(ctxt, s)
-	}
-	if nerrors > 0 {
-		errorexit()
-	}
-}
-
-func (ctxt *Link) callgraph() {
-	if !*FlagC {
-		return
-	}
-
-	var i int
-	var r *Reloc
-	for _, s := range ctxt.Textp {
-		for i = 0; i < len(s.R); i++ {
-			r = &s.R[i]
-			if r.Sym == nil {
-				continue
-			}
-			if (r.Type == obj.R_CALL || r.Type == obj.R_CALLARM || r.Type == obj.R_CALLPOWER || r.Type == obj.R_CALLMIPS) && r.Sym.Type == obj.STEXT {
-				ctxt.Logf("%s calls %s\n", s.Name, r.Sym.Name)
-			}
-		}
-	}
-}
-
-func Rnd(v int64, r int64) int64 {
-	if r <= 0 {
-		return v
-	}
-	v += r - 1
-	c := v % r
-	if c < 0 {
-		c += r
-	}
-	v -= c
-	return v
-}
-
-func bgetc(r *bio.Reader) int {
-	c, err := r.ReadByte()
-	if err != nil {
-		if err != io.EOF {
-			log.Fatalf("reading input: %v", err)
-		}
-		return -1
-	}
-	return int(c)
-}
-
-type markKind uint8 // for postorder traversal
-const (
-	unvisited markKind = iota
-	visiting
-	visited
-)
-
-func postorder(libs []*Library) []*Library {
-	order := make([]*Library, 0, len(libs)) // hold the result
-	mark := make(map[*Library]markKind, len(libs))
-	for _, lib := range libs {
-		dfs(lib, mark, &order)
-	}
-	return order
-}
-
-func dfs(lib *Library, mark map[*Library]markKind, order *[]*Library) {
-	if mark[lib] == visited {
-		return
-	}
-	if mark[lib] == visiting {
-		panic("found import cycle while visiting " + lib.Pkg)
-	}
-	mark[lib] = visiting
-	for _, i := range lib.imports {
-		dfs(i, mark, order)
-	}
-	mark[lib] = visited
-	*order = append(*order, lib)
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/link.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/link.go
deleted file mode 100644
index c6a6080..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/link.go
+++ /dev/null
@@ -1,284 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/link.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/link.go:1
-// Derived from Inferno utils/6l/l.h and related files.
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6l/l.h
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package ld
-
-import (
-	"bufio"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"debug/elf"
-	"fmt"
-)
-
-// Symbol is an entry in the symbol table.
-type Symbol struct {
-	Name        string
-	Extname     string
-	Type        obj.SymKind
-	Version     int16
-	Attr        Attribute
-	Localentry  uint8
-	Dynid       int32
-	Plt         int32
-	Got         int32
-	Align       int32
-	Elfsym      int32
-	LocalElfsym int32
-	Value       int64
-	Size        int64
-	// ElfType is set for symbols read from shared libraries by ldshlibsyms. It
-	// is not set for symbols defined by the packages being linked or by symbols
-	// read by ldelf (and so is left as elf.STT_NOTYPE).
-	ElfType     elf.SymType
-	Sub         *Symbol
-	Outer       *Symbol
-	Gotype      *Symbol
-	Reachparent *Symbol
-	File        string
-	Dynimplib   string
-	Dynimpvers  string
-	Sect        *Section
-	FuncInfo    *FuncInfo
-	// P contains the raw symbol data.
-	P []byte
-	R []Reloc
-}
-
-func (s *Symbol) String() string {
-	if s.Version == 0 {
-		return s.Name
-	}
-	return fmt.Sprintf("%s<%d>", s.Name, s.Version)
-}
-
-func (s *Symbol) ElfsymForReloc() int32 {
-	// If putelfsym created a local version of this symbol, use that in all
-	// relocations.
-	if s.LocalElfsym != 0 {
-		return s.LocalElfsym
-	} else {
-		return s.Elfsym
-	}
-}
-
-// Attribute is a set of common symbol attributes.
-type Attribute int16
-
-const (
-	AttrDuplicateOK Attribute = 1 << iota
-	AttrExternal
-	AttrNoSplit
-	AttrReachable
-	AttrCgoExportDynamic
-	AttrCgoExportStatic
-	AttrSpecial
-	AttrStackCheck
-	AttrHidden
-	AttrOnList
-	AttrLocal
-	AttrReflectMethod
-	AttrMakeTypelink
-)
-
-func (a Attribute) DuplicateOK() bool      { return a&AttrDuplicateOK != 0 }
-func (a Attribute) External() bool         { return a&AttrExternal != 0 }
-func (a Attribute) NoSplit() bool          { return a&AttrNoSplit != 0 }
-func (a Attribute) Reachable() bool        { return a&AttrReachable != 0 }
-func (a Attribute) CgoExportDynamic() bool { return a&AttrCgoExportDynamic != 0 }
-func (a Attribute) CgoExportStatic() bool  { return a&AttrCgoExportStatic != 0 }
-func (a Attribute) Special() bool          { return a&AttrSpecial != 0 }
-func (a Attribute) StackCheck() bool       { return a&AttrStackCheck != 0 }
-func (a Attribute) Hidden() bool           { return a&AttrHidden != 0 }
-func (a Attribute) OnList() bool           { return a&AttrOnList != 0 }
-func (a Attribute) Local() bool            { return a&AttrLocal != 0 }
-func (a Attribute) ReflectMethod() bool    { return a&AttrReflectMethod != 0 }
-func (a Attribute) MakeTypelink() bool     { return a&AttrMakeTypelink != 0 }
-
-func (a Attribute) CgoExport() bool {
-	return a.CgoExportDynamic() || a.CgoExportStatic()
-}
-
-func (a *Attribute) Set(flag Attribute, value bool) {
-	if value {
-		*a |= flag
-	} else {
-		*a &^= flag
-	}
-}
-
-// Reloc is a relocation.
-//
-// The typical Reloc rewrites part of a symbol at offset Off to address Sym.
-// A Reloc is stored in a slice on the Symbol it rewrites.
-//
-// Relocations are generated by the compiler as the type
-// cmd/internal/obj.Reloc, which is encoded into the object file wire
-// format and decoded by the linker into this type. A separate type is
-// used to hold linker-specific state about the relocation.
-//
-// Some relocations are created by cmd/link.
-type Reloc struct {
-	Off     int32         // offset to rewrite
-	Siz     uint8         // number of bytes to rewrite, 1, 2, or 4
-	Done    uint8         // set to 1 when relocation is complete
-	Variant RelocVariant  // variation on Type
-	Type    obj.RelocType // the relocation type
-	Add     int64         // addend
-	Xadd    int64         // addend passed to external linker
-	Sym     *Symbol       // symbol the relocation addresses
-	Xsym    *Symbol       // symbol passed to external linker
-}
-
-type Auto struct {
-	Asym    *Symbol
-	Gotype  *Symbol
-	Aoffset int32
-	Name    int16
-}
-
-type Shlib struct {
-	Path            string
-	Hash            []byte
-	Deps            []string
-	File            *elf.File
-	gcdataAddresses map[*Symbol]uint64
-}
-
-// Link holds the context for writing object code from a compiler
-// or for reading that input into the linker.
-type Link struct {
-	Syms *Symbols
-
-	Arch      *sys.Arch
-	Debugvlog int
-	Bso       *bufio.Writer
-
-	Loaded bool // set after all inputs have been loaded as symbols
-
-	Tlsg       *Symbol
-	Libdir     []string
-	Library    []*Library
-	Shlibs     []Shlib
-	Tlsoffset  int
-	Textp      []*Symbol
-	Filesyms   []*Symbol
-	Moduledata *Symbol
-
-	tramps []*Symbol // trampolines
-}
-
-// The smallest possible offset from the hardware stack pointer to a local
-// variable on the stack. Architectures that use a link register save its value
-// on the stack in the function prologue and so always have a pointer between
-// the hardware stack pointer and the local variable area.
-func (ctxt *Link) FixedFrameSize() int64 {
-	switch ctxt.Arch.Family {
-	case sys.AMD64, sys.I386:
-		return 0
-	case sys.PPC64:
-		// PIC code on ppc64le requires 32 bytes of stack, and it's easier to
-		// just use that much stack always on ppc64x.
-		return int64(4 * ctxt.Arch.PtrSize)
-	default:
-		return int64(ctxt.Arch.PtrSize)
-	}
-}
-
-func (l *Link) Logf(format string, args ...interface{}) {
-	fmt.Fprintf(l.Bso, format, args...)
-	l.Bso.Flush()
-}
-
-type Library struct {
-	Objref      string
-	Srcref      string
-	File        string
-	Pkg         string
-	Shlib       string
-	hash        string
-	imports     []*Library
-	textp       []*Symbol // text symbols defined in this library
-	dupTextSyms []*Symbol // dupok text symbols defined in this library
-}
-
-func (l Library) String() string {
-	return l.Pkg
-}
-
-type FuncInfo struct {
-	Args        int32
-	Locals      int32
-	Autom       []Auto
-	Pcsp        Pcdata
-	Pcfile      Pcdata
-	Pcline      Pcdata
-	Pcdata      []Pcdata
-	Funcdata    []*Symbol
-	Funcdataoff []int64
-	File        []*Symbol
-}
-
-type Pcdata struct {
-	P []byte
-}
-
-type Pciter struct {
-	d       Pcdata
-	p       []byte
-	pc      uint32
-	nextpc  uint32
-	pcscale uint32
-	value   int32
-	start   int
-	done    int
-}
-
-// RelocVariant is a linker-internal variation on a relocation.
-type RelocVariant uint8
-
-const (
-	RV_NONE RelocVariant = iota
-	RV_POWER_LO
-	RV_POWER_HI
-	RV_POWER_HA
-	RV_POWER_DS
-
-	// RV_390_DBL is a s390x-specific relocation variant that indicates that
-	// the value to be placed into the relocatable field should first be
-	// divided by 2.
-	RV_390_DBL
-
-	RV_CHECK_OVERFLOW RelocVariant = 1 << 7
-	RV_TYPE_MASK      RelocVariant = RV_CHECK_OVERFLOW - 1
-)
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/macho.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/macho.go
deleted file mode 100644
index 2bb6453..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/macho.go
+++ /dev/null
@@ -1,908 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/macho.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/macho.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ld
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"sort"
-	"strings"
-)
-
-type MachoHdr struct {
-	cpu    uint32
-	subcpu uint32
-}
-
-type MachoSect struct {
-	name    string
-	segname string
-	addr    uint64
-	size    uint64
-	off     uint32
-	align   uint32
-	reloc   uint32
-	nreloc  uint32
-	flag    uint32
-	res1    uint32
-	res2    uint32
-}
-
-type MachoSeg struct {
-	name       string
-	vsize      uint64
-	vaddr      uint64
-	fileoffset uint64
-	filesize   uint64
-	prot1      uint32
-	prot2      uint32
-	nsect      uint32
-	msect      uint32
-	sect       []MachoSect
-	flag       uint32
-}
-
-type MachoLoad struct {
-	type_ uint32
-	data  []uint32
-}
-
-/*
- * Total amount of space to reserve at the start of the file
- * for Header, PHeaders, and SHeaders.
- * May waste some.
- */
-const (
-	INITIAL_MACHO_HEADR = 4 * 1024
-)
-
-const (
-	MACHO_CPU_AMD64               = 1<<24 | 7
-	MACHO_CPU_386                 = 7
-	MACHO_SUBCPU_X86              = 3
-	MACHO_CPU_ARM                 = 12
-	MACHO_SUBCPU_ARM              = 0
-	MACHO_SUBCPU_ARMV7            = 9
-	MACHO_CPU_ARM64               = 1<<24 | 12
-	MACHO_SUBCPU_ARM64_ALL        = 0
-	MACHO32SYMSIZE                = 12
-	MACHO64SYMSIZE                = 16
-	MACHO_X86_64_RELOC_UNSIGNED   = 0
-	MACHO_X86_64_RELOC_SIGNED     = 1
-	MACHO_X86_64_RELOC_BRANCH     = 2
-	MACHO_X86_64_RELOC_GOT_LOAD   = 3
-	MACHO_X86_64_RELOC_GOT        = 4
-	MACHO_X86_64_RELOC_SUBTRACTOR = 5
-	MACHO_X86_64_RELOC_SIGNED_1   = 6
-	MACHO_X86_64_RELOC_SIGNED_2   = 7
-	MACHO_X86_64_RELOC_SIGNED_4   = 8
-	MACHO_ARM_RELOC_VANILLA       = 0
-	MACHO_ARM_RELOC_PAIR          = 1
-	MACHO_ARM_RELOC_SECTDIFF      = 2
-	MACHO_ARM_RELOC_BR24          = 5
-	MACHO_ARM64_RELOC_UNSIGNED    = 0
-	MACHO_ARM64_RELOC_BRANCH26    = 2
-	MACHO_ARM64_RELOC_PAGE21      = 3
-	MACHO_ARM64_RELOC_PAGEOFF12   = 4
-	MACHO_ARM64_RELOC_ADDEND      = 10
-	MACHO_GENERIC_RELOC_VANILLA   = 0
-	MACHO_FAKE_GOTPCREL           = 100
-)
-
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Mach-O file writing
-// http://developer.apple.com/mac/library/DOCUMENTATION/DeveloperTools/Conceptual/MachORuntime/Reference/reference.html
-
-var macho64 bool
-
-var machohdr MachoHdr
-
-var load []MachoLoad
-
-var seg [16]MachoSeg
-
-var nseg int
-
-var ndebug int
-
-var nsect int
-
-const (
-	SymKindLocal = 0 + iota
-	SymKindExtdef
-	SymKindUndef
-	NumSymKind
-)
-
-var nkind [NumSymKind]int
-
-var sortsym []*Symbol
-
-var nsortsym int
-
-// Amount of space left for adding load commands
-// that refer to dynamic libraries. Because these have
-// to go in the Mach-O header, we can't just pick a
-// "big enough" header size. The initial header is
-// one page, the non-dynamic library stuff takes
-// up about 1300 bytes; we overestimate that as 2k.
-var loadBudget int = INITIAL_MACHO_HEADR - 2*1024
-
-func Machoinit() {
-	macho64 = SysArch.RegSize == 8
-}
-
-func getMachoHdr() *MachoHdr {
-	return &machohdr
-}
-
-func newMachoLoad(type_ uint32, ndata uint32) *MachoLoad {
-	if macho64 && (ndata&1 != 0) {
-		ndata++
-	}
-
-	load = append(load, MachoLoad{})
-	l := &load[len(load)-1]
-	l.type_ = type_
-	l.data = make([]uint32, ndata)
-	return l
-}
-
-func newMachoSeg(name string, msect int) *MachoSeg {
-	if nseg >= len(seg) {
-		Exitf("too many segs")
-	}
-
-	s := &seg[nseg]
-	nseg++
-	s.name = name
-	s.msect = uint32(msect)
-	s.sect = make([]MachoSect, msect)
-	return s
-}
-
-func newMachoSect(seg *MachoSeg, name string, segname string) *MachoSect {
-	if seg.nsect >= seg.msect {
-		Exitf("too many sects in segment %s", seg.name)
-	}
-
-	s := &seg.sect[seg.nsect]
-	seg.nsect++
-	s.name = name
-	s.segname = segname
-	nsect++
-	return s
-}
-
-// Generic linking code.
-
-var dylib []string
-
-var linkoff int64
-
-func machowrite() int {
-	o1 := coutbuf.Offset()
-
-	loadsize := 4 * 4 * ndebug
-	for i := 0; i < len(load); i++ {
-		loadsize += 4 * (len(load[i].data) + 2)
-	}
-	if macho64 {
-		loadsize += 18 * 4 * nseg
-		loadsize += 20 * 4 * nsect
-	} else {
-		loadsize += 14 * 4 * nseg
-		loadsize += 17 * 4 * nsect
-	}
-
-	if macho64 {
-		Thearch.Lput(0xfeedfacf)
-	} else {
-		Thearch.Lput(0xfeedface)
-	}
-	Thearch.Lput(machohdr.cpu)
-	Thearch.Lput(machohdr.subcpu)
-	if Linkmode == LinkExternal {
-		Thearch.Lput(1) /* file type - mach object */
-	} else {
-		Thearch.Lput(2) /* file type - mach executable */
-	}
-	Thearch.Lput(uint32(len(load)) + uint32(nseg) + uint32(ndebug))
-	Thearch.Lput(uint32(loadsize))
-	Thearch.Lput(1) /* flags - no undefines */
-	if macho64 {
-		Thearch.Lput(0) /* reserved */
-	}
-
-	var j int
-	var s *MachoSeg
-	var t *MachoSect
-	for i := 0; i < nseg; i++ {
-		s = &seg[i]
-		if macho64 {
-			Thearch.Lput(25) /* segment 64 */
-			Thearch.Lput(72 + 80*s.nsect)
-			strnput(s.name, 16)
-			Thearch.Vput(s.vaddr)
-			Thearch.Vput(s.vsize)
-			Thearch.Vput(s.fileoffset)
-			Thearch.Vput(s.filesize)
-			Thearch.Lput(s.prot1)
-			Thearch.Lput(s.prot2)
-			Thearch.Lput(s.nsect)
-			Thearch.Lput(s.flag)
-		} else {
-			Thearch.Lput(1) /* segment 32 */
-			Thearch.Lput(56 + 68*s.nsect)
-			strnput(s.name, 16)
-			Thearch.Lput(uint32(s.vaddr))
-			Thearch.Lput(uint32(s.vsize))
-			Thearch.Lput(uint32(s.fileoffset))
-			Thearch.Lput(uint32(s.filesize))
-			Thearch.Lput(s.prot1)
-			Thearch.Lput(s.prot2)
-			Thearch.Lput(s.nsect)
-			Thearch.Lput(s.flag)
-		}
-
-		for j = 0; uint32(j) < s.nsect; j++ {
-			t = &s.sect[j]
-			if macho64 {
-				strnput(t.name, 16)
-				strnput(t.segname, 16)
-				Thearch.Vput(t.addr)
-				Thearch.Vput(t.size)
-				Thearch.Lput(t.off)
-				Thearch.Lput(t.align)
-				Thearch.Lput(t.reloc)
-				Thearch.Lput(t.nreloc)
-				Thearch.Lput(t.flag)
-				Thearch.Lput(t.res1) /* reserved */
-				Thearch.Lput(t.res2) /* reserved */
-				Thearch.Lput(0)      /* reserved */
-			} else {
-				strnput(t.name, 16)
-				strnput(t.segname, 16)
-				Thearch.Lput(uint32(t.addr))
-				Thearch.Lput(uint32(t.size))
-				Thearch.Lput(t.off)
-				Thearch.Lput(t.align)
-				Thearch.Lput(t.reloc)
-				Thearch.Lput(t.nreloc)
-				Thearch.Lput(t.flag)
-				Thearch.Lput(t.res1) /* reserved */
-				Thearch.Lput(t.res2) /* reserved */
-			}
-		}
-	}
-
-	var l *MachoLoad
-	for i := 0; i < len(load); i++ {
-		l = &load[i]
-		Thearch.Lput(l.type_)
-		Thearch.Lput(4 * (uint32(len(l.data)) + 2))
-		for j = 0; j < len(l.data); j++ {
-			Thearch.Lput(l.data[j])
-		}
-	}
-
-	return int(coutbuf.Offset() - o1)
-}
-
-func (ctxt *Link) domacho() {
-	if *FlagD {
-		return
-	}
-
-	// empirically, string table must begin with " \x00".
-	s := ctxt.Syms.Lookup(".machosymstr", 0)
-
-	s.Type = obj.SMACHOSYMSTR
-	s.Attr |= AttrReachable
-	Adduint8(ctxt, s, ' ')
-	Adduint8(ctxt, s, '\x00')
-
-	s = ctxt.Syms.Lookup(".machosymtab", 0)
-	s.Type = obj.SMACHOSYMTAB
-	s.Attr |= AttrReachable
-
-	if Linkmode != LinkExternal {
-		s := ctxt.Syms.Lookup(".plt", 0) // will be __symbol_stub
-		s.Type = obj.SMACHOPLT
-		s.Attr |= AttrReachable
-
-		s = ctxt.Syms.Lookup(".got", 0) // will be __nl_symbol_ptr
-		s.Type = obj.SMACHOGOT
-		s.Attr |= AttrReachable
-		s.Align = 4
-
-		s = ctxt.Syms.Lookup(".linkedit.plt", 0) // indirect table for .plt
-		s.Type = obj.SMACHOINDIRECTPLT
-		s.Attr |= AttrReachable
-
-		s = ctxt.Syms.Lookup(".linkedit.got", 0) // indirect table for .got
-		s.Type = obj.SMACHOINDIRECTGOT
-		s.Attr |= AttrReachable
-	}
-}
-
-func Machoadddynlib(lib string) {
-	// Will need to store the library name rounded up
-	// and 24 bytes of header metadata. If not enough
-	// space, grab another page of initial space at the
-	// beginning of the output file.
-	loadBudget -= (len(lib)+7)/8*8 + 24
-
-	if loadBudget < 0 {
-		HEADR += 4096
-		*FlagTextAddr += 4096
-		loadBudget += 4096
-	}
-
-	dylib = append(dylib, lib)
-}
-
-func machoshbits(ctxt *Link, mseg *MachoSeg, sect *Section, segname string) {
-	buf := "__" + strings.Replace(sect.Name[1:], ".", "_", -1)
-
-	var msect *MachoSect
-	if sect.Rwx&1 == 0 && segname != "__DWARF" && (SysArch.Family == sys.ARM64 ||
-		(SysArch.Family == sys.AMD64 && (Buildmode == BuildmodeCShared || Buildmode == BuildmodeCArchive || Buildmode == BuildmodePlugin)) ||
-		(SysArch.Family == sys.ARM && (Buildmode == BuildmodeCShared || Buildmode == BuildmodeCArchive || Buildmode == BuildmodePlugin))) {
-		// Darwin external linker on arm64 and on amd64 and arm in c-shared/c-archive buildmode
-		// complains about absolute relocs in __TEXT, so if the section is not
-		// executable, put it in __DATA segment.
-		msect = newMachoSect(mseg, buf, "__DATA")
-	} else {
-		msect = newMachoSect(mseg, buf, segname)
-	}
-
-	if sect.Rellen > 0 {
-		msect.reloc = uint32(sect.Reloff)
-		msect.nreloc = uint32(sect.Rellen / 8)
-	}
-
-	for 1<<msect.align < sect.Align {
-		msect.align++
-	}
-	msect.addr = sect.Vaddr
-	msect.size = sect.Length
-
-	if sect.Vaddr < sect.Seg.Vaddr+sect.Seg.Filelen {
-		// data in file
-		if sect.Length > sect.Seg.Vaddr+sect.Seg.Filelen-sect.Vaddr {
-			Errorf(nil, "macho cannot represent section %s crossing data and bss", sect.Name)
-		}
-		msect.off = uint32(sect.Seg.Fileoff + sect.Vaddr - sect.Seg.Vaddr)
-	} else {
-		// zero fill
-		msect.off = 0
-
-		msect.flag |= 1
-	}
-
-	if sect.Rwx&1 != 0 {
-		msect.flag |= 0x400 /* has instructions */
-	}
-
-	if sect.Name == ".plt" {
-		msect.name = "__symbol_stub1"
-		msect.flag = 0x80000408 /* only instructions, code, symbol stubs */
-		msect.res1 = 0          //nkind[SymKindLocal];
-		msect.res2 = 6
-	}
-
-	if sect.Name == ".got" {
-		msect.name = "__nl_symbol_ptr"
-		msect.flag = 6                                                     /* section with nonlazy symbol pointers */
-		msect.res1 = uint32(ctxt.Syms.Lookup(".linkedit.plt", 0).Size / 4) /* offset into indirect symbol table */
-	}
-
-	if sect.Name == ".init_array" {
-		msect.name = "__mod_init_func"
-		msect.flag = 9 // S_MOD_INIT_FUNC_POINTERS
-	}
-
-	if segname == "__DWARF" {
-		msect.flag |= 0x02000000
-	}
-}
-
-func Asmbmacho(ctxt *Link) {
-	/* apple MACH */
-	va := *FlagTextAddr - int64(HEADR)
-
-	mh := getMachoHdr()
-	switch SysArch.Family {
-	default:
-		Exitf("unknown macho architecture: %v", SysArch.Family)
-
-	case sys.ARM:
-		mh.cpu = MACHO_CPU_ARM
-		mh.subcpu = MACHO_SUBCPU_ARMV7
-
-	case sys.AMD64:
-		mh.cpu = MACHO_CPU_AMD64
-		mh.subcpu = MACHO_SUBCPU_X86
-
-	case sys.ARM64:
-		mh.cpu = MACHO_CPU_ARM64
-		mh.subcpu = MACHO_SUBCPU_ARM64_ALL
-
-	case sys.I386:
-		mh.cpu = MACHO_CPU_386
-		mh.subcpu = MACHO_SUBCPU_X86
-	}
-
-	var ms *MachoSeg
-	if Linkmode == LinkExternal {
-		/* segment for entire file */
-		ms = newMachoSeg("", 40)
-
-		ms.fileoffset = Segtext.Fileoff
-		if SysArch.Family == sys.ARM || Buildmode == BuildmodeCArchive {
-			ms.filesize = Segdata.Fileoff + Segdata.Filelen - Segtext.Fileoff
-		} else {
-			ms.filesize = Segdwarf.Fileoff + Segdwarf.Filelen - Segtext.Fileoff
-			ms.vsize = ms.filesize
-		}
-	}
-
-	/* segment for zero page */
-	if Linkmode != LinkExternal {
-		ms = newMachoSeg("__PAGEZERO", 0)
-		ms.vsize = uint64(va)
-	}
-
-	/* text */
-	v := Rnd(int64(uint64(HEADR)+Segtext.Length), int64(*FlagRound))
-
-	if Linkmode != LinkExternal {
-		ms = newMachoSeg("__TEXT", 20)
-		ms.vaddr = uint64(va)
-		ms.vsize = uint64(v)
-		ms.fileoffset = 0
-		ms.filesize = uint64(v)
-		ms.prot1 = 7
-		ms.prot2 = 5
-	}
-
-	for sect := Segtext.Sect; sect != nil; sect = sect.Next {
-		machoshbits(ctxt, ms, sect, "__TEXT")
-	}
-
-	/* data */
-	if Linkmode != LinkExternal {
-		w := int64(Segdata.Length)
-		ms = newMachoSeg("__DATA", 20)
-		ms.vaddr = uint64(va) + uint64(v)
-		ms.vsize = uint64(w)
-		ms.fileoffset = uint64(v)
-		ms.filesize = Segdata.Filelen
-		ms.prot1 = 3
-		ms.prot2 = 3
-	}
-
-	for sect := Segdata.Sect; sect != nil; sect = sect.Next {
-		machoshbits(ctxt, ms, sect, "__DATA")
-	}
-
-	/* dwarf */
-	if !*FlagW {
-		if Linkmode != LinkExternal {
-			ms = newMachoSeg("__DWARF", 20)
-			ms.vaddr = Segdwarf.Vaddr
-			ms.vsize = 0
-			ms.fileoffset = Segdwarf.Fileoff
-			ms.filesize = Segdwarf.Filelen
-		}
-		for sect := Segdwarf.Sect; sect != nil; sect = sect.Next {
-			machoshbits(ctxt, ms, sect, "__DWARF")
-		}
-	}
-
-	if Linkmode != LinkExternal {
-		switch SysArch.Family {
-		default:
-			Exitf("unknown macho architecture: %v", SysArch.Family)
-
-		case sys.ARM:
-			ml := newMachoLoad(5, 17+2)              /* unix thread */
-			ml.data[0] = 1                           /* thread type */
-			ml.data[1] = 17                          /* word count */
-			ml.data[2+15] = uint32(Entryvalue(ctxt)) /* start pc */
-
-		case sys.AMD64:
-			ml := newMachoLoad(5, 42+2)              /* unix thread */
-			ml.data[0] = 4                           /* thread type */
-			ml.data[1] = 42                          /* word count */
-			ml.data[2+32] = uint32(Entryvalue(ctxt)) /* start pc */
-			ml.data[2+32+1] = uint32(Entryvalue(ctxt) >> 32)
-
-		case sys.ARM64:
-			ml := newMachoLoad(5, 68+2)              /* unix thread */
-			ml.data[0] = 6                           /* thread type */
-			ml.data[1] = 68                          /* word count */
-			ml.data[2+64] = uint32(Entryvalue(ctxt)) /* start pc */
-			ml.data[2+64+1] = uint32(Entryvalue(ctxt) >> 32)
-
-		case sys.I386:
-			ml := newMachoLoad(5, 16+2)              /* unix thread */
-			ml.data[0] = 1                           /* thread type */
-			ml.data[1] = 16                          /* word count */
-			ml.data[2+10] = uint32(Entryvalue(ctxt)) /* start pc */
-		}
-	}
-
-	if !*FlagD {
-		// must match domacholink below
-		s1 := ctxt.Syms.Lookup(".machosymtab", 0)
-		s2 := ctxt.Syms.Lookup(".linkedit.plt", 0)
-		s3 := ctxt.Syms.Lookup(".linkedit.got", 0)
-		s4 := ctxt.Syms.Lookup(".machosymstr", 0)
-
-		if Linkmode != LinkExternal {
-			ms := newMachoSeg("__LINKEDIT", 0)
-			ms.vaddr = uint64(va) + uint64(v) + uint64(Rnd(int64(Segdata.Length), int64(*FlagRound)))
-			ms.vsize = uint64(s1.Size) + uint64(s2.Size) + uint64(s3.Size) + uint64(s4.Size)
-			ms.fileoffset = uint64(linkoff)
-			ms.filesize = ms.vsize
-			ms.prot1 = 7
-			ms.prot2 = 3
-		}
-
-		ml := newMachoLoad(2, 4)                                   /* LC_SYMTAB */
-		ml.data[0] = uint32(linkoff)                               /* symoff */
-		ml.data[1] = uint32(nsortsym)                              /* nsyms */
-		ml.data[2] = uint32(linkoff + s1.Size + s2.Size + s3.Size) /* stroff */
-		ml.data[3] = uint32(s4.Size)                               /* strsize */
-
-		machodysymtab(ctxt)
-
-		if Linkmode != LinkExternal {
-			ml := newMachoLoad(14, 6) /* LC_LOAD_DYLINKER */
-			ml.data[0] = 12           /* offset to string */
-			stringtouint32(ml.data[1:], "/usr/lib/dyld")
-
-			for i := 0; i < len(dylib); i++ {
-				ml = newMachoLoad(12, 4+(uint32(len(dylib[i]))+1+7)/8*2) /* LC_LOAD_DYLIB */
-				ml.data[0] = 24                                          /* offset of string from beginning of load */
-				ml.data[1] = 0                                           /* time stamp */
-				ml.data[2] = 0                                           /* version */
-				ml.data[3] = 0                                           /* compatibility version */
-				stringtouint32(ml.data[4:], dylib[i])
-			}
-		}
-	}
-
-	if Linkmode == LinkInternal {
-		// For lldb, must say LC_VERSION_MIN_MACOSX or else
-		// it won't know that this Mach-O binary is from OS X
-		// (could be iOS or WatchOS instead).
-		// Go on iOS uses linkmode=external, and linkmode=external
-		// adds this itself. So we only need this code for linkmode=internal
-		// and we can assume OS X.
-		//
-		// See golang.org/issues/12941.
-		const LC_VERSION_MIN_MACOSX = 0x24
-
-		ml := newMachoLoad(LC_VERSION_MIN_MACOSX, 2)
-		ml.data[0] = 10<<16 | 7<<8 | 0<<0 // OS X version 10.7.0
-		ml.data[1] = 10<<16 | 7<<8 | 0<<0 // SDK 10.7.0
-	}
-
-	a := machowrite()
-	if int32(a) > HEADR {
-		Exitf("HEADR too small: %d > %d", a, HEADR)
-	}
-}
-
-func symkind(s *Symbol) int {
-	if s.Type == obj.SDYNIMPORT {
-		return SymKindUndef
-	}
-	if s.Attr.CgoExport() {
-		return SymKindExtdef
-	}
-	return SymKindLocal
-}
-
-func addsym(ctxt *Link, s *Symbol, name string, type_ SymbolType, addr int64, gotype *Symbol) {
-	if s == nil {
-		return
-	}
-
-	switch type_ {
-	default:
-		return
-
-	case DataSym, BSSSym, TextSym:
-		break
-	}
-
-	if sortsym != nil {
-		sortsym[nsortsym] = s
-		nkind[symkind(s)]++
-	}
-
-	nsortsym++
-}
-
-type machoscmp []*Symbol
-
-func (x machoscmp) Len() int {
-	return len(x)
-}
-
-func (x machoscmp) Swap(i, j int) {
-	x[i], x[j] = x[j], x[i]
-}
-
-func (x machoscmp) Less(i, j int) bool {
-	s1 := x[i]
-	s2 := x[j]
-
-	k1 := symkind(s1)
-	k2 := symkind(s2)
-	if k1 != k2 {
-		return k1 < k2
-	}
-
-	return s1.Extname < s2.Extname
-}
-
-func machogenasmsym(ctxt *Link) {
-	genasmsym(ctxt, addsym)
-	for _, s := range ctxt.Syms.Allsym {
-		if s.Type == obj.SDYNIMPORT || s.Type == obj.SHOSTOBJ {
-			if s.Attr.Reachable() {
-				addsym(ctxt, s, "", DataSym, 0, nil)
-			}
-		}
-	}
-}
-
-func machosymorder(ctxt *Link) {
-	// On Mac OS X Mountain Lion, we must sort exported symbols
-	// So we sort them here and pre-allocate dynid for them
-	// See https://golang.org/issue/4029
-	for i := 0; i < len(dynexp); i++ {
-		dynexp[i].Attr |= AttrReachable
-	}
-	machogenasmsym(ctxt)
-	sortsym = make([]*Symbol, nsortsym)
-	nsortsym = 0
-	machogenasmsym(ctxt)
-	sort.Sort(machoscmp(sortsym[:nsortsym]))
-	for i := 0; i < nsortsym; i++ {
-		sortsym[i].Dynid = int32(i)
-	}
-}
-
-// machoShouldExport reports whether a symbol needs to be exported.
-//
-// When dynamically linking, all non-local variables and plugin-exported
-// symbols need to be exported.
-func machoShouldExport(ctxt *Link, s *Symbol) bool {
-	if !ctxt.DynlinkingGo() || s.Attr.Local() {
-		return false
-	}
-	if Buildmode == BuildmodePlugin && strings.HasPrefix(s.Extname, *flagPluginPath) {
-		return true
-	}
-	if strings.HasPrefix(s.Name, "type.") && !strings.HasPrefix(s.Name, "type..") {
-		// reduce runtime typemap pressure, but do not
-		// export alg functions (type..*), as these
-		// appear in pclntable.
-		return true
-	}
-	if strings.HasPrefix(s.Name, "go.link.pkghash") {
-		return true
-	}
-	return s.Type >= obj.SELFSECT // only writable sections
-}
-
-func machosymtab(ctxt *Link) {
-	symtab := ctxt.Syms.Lookup(".machosymtab", 0)
-	symstr := ctxt.Syms.Lookup(".machosymstr", 0)
-
-	for i := 0; i < nsortsym; i++ {
-		s := sortsym[i]
-		Adduint32(ctxt, symtab, uint32(symstr.Size))
-
-		export := machoShouldExport(ctxt, s)
-
-		// In normal buildmodes, only add _ to C symbols, as
-		// Go symbols have dot in the name.
-		//
-		// Do not export C symbols in plugins, as runtime C
-		// symbols like crosscall2 are in pclntab and end up
-		// pointing at the host binary, breaking unwinding.
-		// See Issue #18190.
-		cexport := !strings.Contains(s.Extname, ".") && (Buildmode != BuildmodePlugin || onlycsymbol(s))
-		if cexport || export {
-			Adduint8(ctxt, symstr, '_')
-		}
-
-		// replace "·" as ".", because DTrace cannot handle it.
-		Addstring(symstr, strings.Replace(s.Extname, "·", ".", -1))
-
-		if s.Type == obj.SDYNIMPORT || s.Type == obj.SHOSTOBJ {
-			Adduint8(ctxt, symtab, 0x01)                // type N_EXT, external symbol
-			Adduint8(ctxt, symtab, 0)                   // no section
-			Adduint16(ctxt, symtab, 0)                  // desc
-			adduintxx(ctxt, symtab, 0, SysArch.PtrSize) // no value
-		} else {
-			if s.Attr.CgoExport() || export {
-				Adduint8(ctxt, symtab, 0x0f)
-			} else {
-				Adduint8(ctxt, symtab, 0x0e)
-			}
-			o := s
-			for o.Outer != nil {
-				o = o.Outer
-			}
-			if o.Sect == nil {
-				Errorf(s, "missing section for symbol")
-				Adduint8(ctxt, symtab, 0)
-			} else {
-				Adduint8(ctxt, symtab, uint8(o.Sect.Extnum))
-			}
-			Adduint16(ctxt, symtab, 0) // desc
-			adduintxx(ctxt, symtab, uint64(Symaddr(s)), SysArch.PtrSize)
-		}
-	}
-}
-
-func machodysymtab(ctxt *Link) {
-	ml := newMachoLoad(11, 18) /* LC_DYSYMTAB */
-
-	n := 0
-	ml.data[0] = uint32(n)                   /* ilocalsym */
-	ml.data[1] = uint32(nkind[SymKindLocal]) /* nlocalsym */
-	n += nkind[SymKindLocal]
-
-	ml.data[2] = uint32(n)                    /* iextdefsym */
-	ml.data[3] = uint32(nkind[SymKindExtdef]) /* nextdefsym */
-	n += nkind[SymKindExtdef]
-
-	ml.data[4] = uint32(n)                   /* iundefsym */
-	ml.data[5] = uint32(nkind[SymKindUndef]) /* nundefsym */
-
-	ml.data[6] = 0  /* tocoffset */
-	ml.data[7] = 0  /* ntoc */
-	ml.data[8] = 0  /* modtaboff */
-	ml.data[9] = 0  /* nmodtab */
-	ml.data[10] = 0 /* extrefsymoff */
-	ml.data[11] = 0 /* nextrefsyms */
-
-	// must match domacholink below
-	s1 := ctxt.Syms.Lookup(".machosymtab", 0)
-
-	s2 := ctxt.Syms.Lookup(".linkedit.plt", 0)
-	s3 := ctxt.Syms.Lookup(".linkedit.got", 0)
-	ml.data[12] = uint32(linkoff + s1.Size)       /* indirectsymoff */
-	ml.data[13] = uint32((s2.Size + s3.Size) / 4) /* nindirectsyms */
-
-	ml.data[14] = 0 /* extreloff */
-	ml.data[15] = 0 /* nextrel */
-	ml.data[16] = 0 /* locreloff */
-	ml.data[17] = 0 /* nlocrel */
-}
-
-func Domacholink(ctxt *Link) int64 {
-	machosymtab(ctxt)
-
-	// write data that will be linkedit section
-	s1 := ctxt.Syms.Lookup(".machosymtab", 0)
-
-	s2 := ctxt.Syms.Lookup(".linkedit.plt", 0)
-	s3 := ctxt.Syms.Lookup(".linkedit.got", 0)
-	s4 := ctxt.Syms.Lookup(".machosymstr", 0)
-
-	// Force the linkedit section to end on a 16-byte
-	// boundary. This allows pure (non-cgo) Go binaries
-	// to be code signed correctly.
-	//
-	// Apple's codesign_allocate (a helper utility for
-	// the codesign utility) can do this fine itself if
-	// it is run on a dynamic Mach-O binary. However,
-	// when it is run on a pure (non-cgo) Go binary, where
-	// the linkedit section is mostly empty, it fails to
-	// account for the extra padding that it itself adds
-	// when adding the LC_CODE_SIGNATURE load command
-	// (which must be aligned on a 16-byte boundary).
-	//
-	// By forcing the linkedit section to end on a 16-byte
-	// boundary, codesign_allocate will not need to apply
-	// any alignment padding itself, working around the
-	// issue.
-	for s4.Size%16 != 0 {
-		Adduint8(ctxt, s4, 0)
-	}
-
-	size := int(s1.Size + s2.Size + s3.Size + s4.Size)
-
-	if size > 0 {
-		linkoff = Rnd(int64(uint64(HEADR)+Segtext.Length), int64(*FlagRound)) + Rnd(int64(Segdata.Filelen), int64(*FlagRound)) + Rnd(int64(Segdwarf.Filelen), int64(*FlagRound))
-		Cseek(linkoff)
-
-		Cwrite(s1.P[:s1.Size])
-		Cwrite(s2.P[:s2.Size])
-		Cwrite(s3.P[:s3.Size])
-		Cwrite(s4.P[:s4.Size])
-	}
-
-	return Rnd(int64(size), int64(*FlagRound))
-}
-
-func machorelocsect(ctxt *Link, sect *Section, syms []*Symbol) {
-	// If main section has no bits, nothing to relocate.
-	if sect.Vaddr >= sect.Seg.Vaddr+sect.Seg.Filelen {
-		return
-	}
-
-	sect.Reloff = uint64(coutbuf.Offset())
-	for i, s := range syms {
-		if !s.Attr.Reachable() {
-			continue
-		}
-		if uint64(s.Value) >= sect.Vaddr {
-			syms = syms[i:]
-			break
-		}
-	}
-
-	eaddr := int32(sect.Vaddr + sect.Length)
-	for _, sym := range syms {
-		if !sym.Attr.Reachable() {
-			continue
-		}
-		if sym.Value >= int64(eaddr) {
-			break
-		}
-		for ri := 0; ri < len(sym.R); ri++ {
-			r := &sym.R[ri]
-			if r.Done != 0 {
-				continue
-			}
-			if r.Xsym == nil {
-				Errorf(sym, "missing xsym in relocation")
-				continue
-			}
-			if !r.Xsym.Attr.Reachable() {
-				Errorf(sym, "unreachable reloc %v target %v", r.Type, r.Xsym.Name)
-			}
-			if Thearch.Machoreloc1(sym, r, int64(uint64(sym.Value+int64(r.Off))-sect.Vaddr)) < 0 {
-				Errorf(sym, "unsupported obj reloc %v/%d to %s", r.Type, r.Siz, r.Sym.Name)
-			}
-		}
-	}
-
-	sect.Rellen = uint64(coutbuf.Offset()) - sect.Reloff
-}
-
-func Machoemitreloc(ctxt *Link) {
-	for coutbuf.Offset()&7 != 0 {
-		Cput(0)
-	}
-
-	machorelocsect(ctxt, Segtext.Sect, ctxt.Textp)
-	for sect := Segtext.Sect.Next; sect != nil; sect = sect.Next {
-		machorelocsect(ctxt, sect, datap)
-	}
-	for sect := Segdata.Sect; sect != nil; sect = sect.Next {
-		machorelocsect(ctxt, sect, datap)
-	}
-	for sect := Segdwarf.Sect; sect != nil; sect = sect.Next {
-		machorelocsect(ctxt, sect, dwarfp)
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/macho_combine_dwarf.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/macho_combine_dwarf.go
deleted file mode 100644
index 3c4c724..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/macho_combine_dwarf.go
+++ /dev/null
@@ -1,371 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/macho_combine_dwarf.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/macho_combine_dwarf.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ld
-
-import (
-	"bytes"
-	"debug/macho"
-	"encoding/binary"
-	"fmt"
-	"io"
-	"os"
-	"reflect"
-	"unsafe"
-)
-
-var realdwarf, linkseg *macho.Segment
-var dwarfstart, linkstart int64
-var linkoffset uint32
-
-const (
-	LC_ID_DYLIB             = 0xd
-	LC_LOAD_DYLINKER        = 0xe
-	LC_PREBOUND_DYLIB       = 0x10
-	LC_LOAD_WEAK_DYLIB      = 0x18
-	LC_UUID                 = 0x1b
-	LC_RPATH                = 0x8000001c
-	LC_CODE_SIGNATURE       = 0x1d
-	LC_SEGMENT_SPLIT_INFO   = 0x1e
-	LC_REEXPORT_DYLIB       = 0x8000001f
-	LC_ENCRYPTION_INFO      = 0x21
-	LC_DYLD_INFO            = 0x22
-	LC_DYLD_INFO_ONLY       = 0x80000022
-	LC_VERSION_MIN_MACOSX   = 0x24
-	LC_VERSION_MIN_IPHONEOS = 0x25
-	LC_FUNCTION_STARTS      = 0x26
-	LC_MAIN                 = 0x80000028
-	LC_DATA_IN_CODE         = 0x29
-	LC_SOURCE_VERSION       = 0x2A
-	LC_DYLIB_CODE_SIGN_DRS  = 0x2B
-	LC_ENCRYPTION_INFO_64   = 0x2C
-
-	dwarfMinAlign = 6  // 64 = 1 << 6
-	pageAlign     = 12 // 4096 = 1 << 12
-)
-
-type loadCmd struct {
-	Cmd macho.LoadCmd
-	Len uint32
-}
-
-type dyldInfoCmd struct {
-	Cmd                      macho.LoadCmd
-	Len                      uint32
-	RebaseOff, RebaseLen     uint32
-	BindOff, BindLen         uint32
-	WeakBindOff, WeakBindLen uint32
-	LazyBindOff, LazyBindLen uint32
-	ExportOff, ExportLen     uint32
-}
-
-type linkEditDataCmd struct {
-	Cmd              macho.LoadCmd
-	Len              uint32
-	DataOff, DataLen uint32
-}
-
-type encryptionInfoCmd struct {
-	Cmd                macho.LoadCmd
-	Len                uint32
-	CryptOff, CryptLen uint32
-	CryptId            uint32
-}
-
-type loadCmdReader struct {
-	offset, next int64
-	f            *os.File
-	order        binary.ByteOrder
-}
-
-func (r *loadCmdReader) Next() (cmd loadCmd, err error) {
-	r.offset = r.next
-	if _, err = r.f.Seek(r.offset, 0); err != nil {
-		return
-	}
-	if err = binary.Read(r.f, r.order, &cmd); err != nil {
-		return
-	}
-	r.next = r.offset + int64(cmd.Len)
-	return
-}
-
-func (r loadCmdReader) ReadAt(offset int64, data interface{}) error {
-	if _, err := r.f.Seek(r.offset+offset, 0); err != nil {
-		return err
-	}
-	return binary.Read(r.f, r.order, data)
-}
-
-func (r loadCmdReader) WriteAt(offset int64, data interface{}) error {
-	if _, err := r.f.Seek(r.offset+offset, 0); err != nil {
-		return err
-	}
-	return binary.Write(r.f, r.order, data)
-}
-
-// machoCombineDwarf merges dwarf info generated by dsymutil into a macho executable.
-// With internal linking, DWARF is embedded into the executable, this lets us do the
-// same for external linking.
-// inexe is the path to the executable with no DWARF. It must have enough room in the macho
-// header to add the DWARF sections. (Use ld's -headerpad option)
-// dsym is the path to the macho file containing DWARF from dsymutil.
-// outexe is the path where the combined executable should be saved.
-func machoCombineDwarf(inexe, dsym, outexe string) error {
-	exef, err := os.Open(inexe)
-	if err != nil {
-		return err
-	}
-	dwarff, err := os.Open(dsym)
-	if err != nil {
-		return err
-	}
-	outf, err := os.Create(outexe)
-	if err != nil {
-		return err
-	}
-	outf.Chmod(0755)
-
-	exem, err := macho.NewFile(exef)
-	if err != nil {
-		return err
-	}
-	dwarfm, err := macho.NewFile(dwarff)
-	if err != nil {
-		return err
-	}
-
-	// The string table needs to be the last thing in the file
-	// for code signing to work. So we'll need to move the
-	// linkedit section, but all the others can be copied directly.
-	linkseg = exem.Segment("__LINKEDIT")
-	if linkseg == nil {
-		return fmt.Errorf("missing __LINKEDIT segment")
-	}
-
-	if _, err = exef.Seek(0, 0); err != nil {
-		return err
-	}
-	if _, err := io.CopyN(outf, exef, int64(linkseg.Offset)); err != nil {
-		return err
-	}
-
-	realdwarf = dwarfm.Segment("__DWARF")
-	if realdwarf == nil {
-		return fmt.Errorf("missing __DWARF segment")
-	}
-
-	// Now copy the dwarf data into the output.
-	maxalign := uint32(dwarfMinAlign) //
-	for _, sect := range dwarfm.Sections {
-		if sect.Align > maxalign {
-			maxalign = sect.Align
-		}
-	}
-	dwarfstart = machoCalcStart(realdwarf.Offset, linkseg.Offset, maxalign)
-	if _, err = outf.Seek(dwarfstart, 0); err != nil {
-		return err
-	}
-
-	if _, err = dwarff.Seek(int64(realdwarf.Offset), 0); err != nil {
-		return err
-	}
-	if _, err := io.CopyN(outf, dwarff, int64(realdwarf.Filesz)); err != nil {
-		return err
-	}
-
-	// And finally the linkedit section.
-	if _, err = exef.Seek(int64(linkseg.Offset), 0); err != nil {
-		return err
-	}
-	linkstart = machoCalcStart(linkseg.Offset, uint64(dwarfstart)+realdwarf.Filesz, pageAlign)
-	linkoffset = uint32(linkstart - int64(linkseg.Offset))
-	if _, err = outf.Seek(linkstart, 0); err != nil {
-		return err
-	}
-	if _, err := io.Copy(outf, exef); err != nil {
-		return err
-	}
-
-	// Now we need to update the headers.
-	cmdOffset := unsafe.Sizeof(exem.FileHeader)
-	is64bit := exem.Magic == macho.Magic64
-	if is64bit {
-		// mach_header_64 has one extra uint32.
-		cmdOffset += unsafe.Sizeof(exem.Magic)
-	}
-
-	textsect := exem.Section("__text")
-	if linkseg == nil {
-		return fmt.Errorf("missing __text section")
-	}
-
-	dwarfCmdOffset := int64(cmdOffset) + int64(exem.FileHeader.Cmdsz)
-	availablePadding := int64(textsect.Offset) - dwarfCmdOffset
-	if availablePadding < int64(realdwarf.Len) {
-		return fmt.Errorf("No room to add dwarf info. Need at least %d padding bytes, found %d", realdwarf.Len, availablePadding)
-	}
-	// First, copy the dwarf load command into the header
-	if _, err = outf.Seek(dwarfCmdOffset, 0); err != nil {
-		return err
-	}
-	if _, err := io.CopyN(outf, bytes.NewReader(realdwarf.Raw()), int64(realdwarf.Len)); err != nil {
-		return err
-	}
-
-	if _, err = outf.Seek(int64(unsafe.Offsetof(exem.FileHeader.Ncmd)), 0); err != nil {
-		return err
-	}
-	if err = binary.Write(outf, exem.ByteOrder, exem.Ncmd+1); err != nil {
-		return err
-	}
-	if err = binary.Write(outf, exem.ByteOrder, exem.Cmdsz+realdwarf.Len); err != nil {
-		return err
-	}
-
-	reader := loadCmdReader{next: int64(cmdOffset), f: outf, order: exem.ByteOrder}
-	for i := uint32(0); i < exem.Ncmd; i++ {
-		cmd, err := reader.Next()
-		if err != nil {
-			return err
-		}
-		switch cmd.Cmd {
-		case macho.LoadCmdSegment64:
-			err = machoUpdateSegment(reader, &macho.Segment64{}, &macho.Section64{})
-		case macho.LoadCmdSegment:
-			err = machoUpdateSegment(reader, &macho.Segment32{}, &macho.Section32{})
-		case LC_DYLD_INFO, LC_DYLD_INFO_ONLY:
-			err = machoUpdateLoadCommand(reader, &dyldInfoCmd{}, "RebaseOff", "BindOff", "WeakBindOff", "LazyBindOff", "ExportOff")
-		case macho.LoadCmdSymtab:
-			err = machoUpdateLoadCommand(reader, &macho.SymtabCmd{}, "Symoff", "Stroff")
-		case macho.LoadCmdDysymtab:
-			err = machoUpdateLoadCommand(reader, &macho.DysymtabCmd{}, "Tocoffset", "Modtaboff", "Extrefsymoff", "Indirectsymoff", "Extreloff", "Locreloff")
-		case LC_CODE_SIGNATURE, LC_SEGMENT_SPLIT_INFO, LC_FUNCTION_STARTS, LC_DATA_IN_CODE, LC_DYLIB_CODE_SIGN_DRS:
-			err = machoUpdateLoadCommand(reader, &linkEditDataCmd{}, "DataOff")
-		case LC_ENCRYPTION_INFO, LC_ENCRYPTION_INFO_64:
-			err = machoUpdateLoadCommand(reader, &encryptionInfoCmd{}, "CryptOff")
-		case macho.LoadCmdDylib, macho.LoadCmdThread, macho.LoadCmdUnixThread, LC_PREBOUND_DYLIB, LC_UUID, LC_VERSION_MIN_MACOSX, LC_VERSION_MIN_IPHONEOS, LC_SOURCE_VERSION, LC_MAIN, LC_LOAD_DYLINKER, LC_LOAD_WEAK_DYLIB, LC_REEXPORT_DYLIB, LC_RPATH, LC_ID_DYLIB:
-			// Nothing to update
-		default:
-			err = fmt.Errorf("Unknown load command 0x%x (%s)\n", int(cmd.Cmd), cmd.Cmd)
-		}
-		if err != nil {
-			return err
-		}
-	}
-	return machoUpdateDwarfHeader(&reader)
-}
-
-// machoUpdateSegment updates the load command for a moved segment.
-// Only the linkedit segment should move, and it should have 0 sections.
-// seg should be a macho.Segment32 or macho.Segment64 as appropriate.
-// sect should be a macho.Section32 or macho.Section64 as appropriate.
-func machoUpdateSegment(r loadCmdReader, seg, sect interface{}) error {
-	if err := r.ReadAt(0, seg); err != nil {
-		return err
-	}
-	segValue := reflect.ValueOf(seg)
-	offset := reflect.Indirect(segValue).FieldByName("Offset")
-
-	// Only the linkedit segment moved, any thing before that is fine.
-	if offset.Uint() < linkseg.Offset {
-		return nil
-	}
-	offset.SetUint(offset.Uint() + uint64(linkoffset))
-	if err := r.WriteAt(0, seg); err != nil {
-		return err
-	}
-	// There shouldn't be any sections, but just to make sure...
-	return machoUpdateSections(r, segValue, reflect.ValueOf(sect), uint64(linkoffset))
-}
-
-func machoUpdateSections(r loadCmdReader, seg, sect reflect.Value, delta uint64) error {
-	iseg := reflect.Indirect(seg)
-	nsect := iseg.FieldByName("Nsect").Uint()
-	if nsect == 0 {
-		return nil
-	}
-	sectOffset := int64(iseg.Type().Size())
-
-	isect := reflect.Indirect(sect)
-	offsetField := isect.FieldByName("Offset")
-	reloffField := isect.FieldByName("Reloff")
-	sectSize := int64(isect.Type().Size())
-	for i := uint64(0); i < nsect; i++ {
-		if err := r.ReadAt(sectOffset, sect.Interface()); err != nil {
-			return err
-		}
-		if offsetField.Uint() != 0 {
-			offsetField.SetUint(offsetField.Uint() + delta)
-		}
-		if reloffField.Uint() != 0 {
-			reloffField.SetUint(reloffField.Uint() + delta)
-		}
-		if err := r.WriteAt(sectOffset, sect.Interface()); err != nil {
-			return err
-		}
-		sectOffset += sectSize
-	}
-	return nil
-}
-
-// machoUpdateDwarfHeader updates the DWARF segment load command.
-func machoUpdateDwarfHeader(r *loadCmdReader) error {
-	var seg, sect interface{}
-	cmd, err := r.Next()
-	if err != nil {
-		return err
-	}
-	if cmd.Cmd == macho.LoadCmdSegment64 {
-		seg = new(macho.Segment64)
-		sect = new(macho.Section64)
-	} else {
-		seg = new(macho.Segment32)
-		sect = new(macho.Section32)
-	}
-	if err := r.ReadAt(0, seg); err != nil {
-		return err
-	}
-	segValue := reflect.ValueOf(seg)
-	offset := reflect.Indirect(segValue).FieldByName("Offset")
-
-	delta := uint64(dwarfstart) - realdwarf.Offset
-	offset.SetUint(offset.Uint() + delta)
-	if err := r.WriteAt(0, seg); err != nil {
-		return err
-	}
-	return machoUpdateSections(*r, segValue, reflect.ValueOf(sect), delta)
-}
-
-func machoUpdateLoadCommand(r loadCmdReader, cmd interface{}, fields ...string) error {
-	if err := r.ReadAt(0, cmd); err != nil {
-		return err
-	}
-	value := reflect.Indirect(reflect.ValueOf(cmd))
-
-	for _, name := range fields {
-		field := value.FieldByName(name)
-		fieldval := field.Uint()
-		if fieldval >= linkseg.Offset {
-			field.SetUint(fieldval + uint64(linkoffset))
-		}
-	}
-	if err := r.WriteAt(0, cmd); err != nil {
-		return err
-	}
-	return nil
-}
-
-func machoCalcStart(origAddr, newAddr uint64, alignExp uint32) int64 {
-	align := uint64(1 << alignExp)
-	if (origAddr % align) == (newAddr % align) {
-		return int64(newAddr)
-	}
-	padding := (align - (newAddr % align))
-	padding += origAddr % align
-	return int64(padding + newAddr)
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/main.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/main.go
deleted file mode 100644
index 73d544c..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/main.go
+++ /dev/null
@@ -1,267 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/main.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/main.go:1
-// Inferno utils/6l/obj.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6l/obj.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package ld
-
-import (
-	"bufio"
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"flag"
-	"log"
-	"os"
-	"runtime"
-	"runtime/pprof"
-	"strings"
-)
-
-var (
-	pkglistfornote []byte
-)
-
-func init() {
-	flag.Var(&Linkmode, "linkmode", "set link `mode`")
-	flag.Var(&Buildmode, "buildmode", "set build `mode`")
-	flag.Var(&Headtype, "H", "set header `type`")
-	flag.Var(&rpath, "r", "set the ELF dynamic linker search `path` to dir1:dir2:...")
-}
-
-// Flags used by the linker. The exported flags are used by the architecture-specific packages.
-var (
-	flagBuildid = flag.String("buildid", "", "record `id` as Go toolchain build id")
-
-	flagOutfile    = flag.String("o", "", "write output to `file`")
-	flagPluginPath = flag.String("pluginpath", "", "full path name for plugin")
-	FlagLinkshared = flag.Bool("linkshared", false, "link against installed Go shared libraries")
-
-	flagInstallSuffix = flag.String("installsuffix", "", "set package directory `suffix`")
-	flagDumpDep       = flag.Bool("dumpdep", false, "dump symbol dependency graph")
-	flagRace          = flag.Bool("race", false, "enable race detector")
-	flagMsan          = flag.Bool("msan", false, "enable MSan interface")
-
-	flagFieldTrack = flag.String("k", "", "set field tracking `symbol`")
-	flagLibGCC     = flag.String("libgcc", "", "compiler support lib for internal linking; use \"none\" to disable")
-	flagTmpdir     = flag.String("tmpdir", "", "use `directory` for temporary files")
-
-	flagExtld      = flag.String("extld", "", "use `linker` when linking in external mode")
-	flagExtldflags = flag.String("extldflags", "", "pass `flags` to external linker")
-	flagExtar      = flag.String("extar", "", "archive program for buildmode=c-archive")
-
-	flagA           = flag.Bool("a", false, "disassemble output")
-	FlagC           = flag.Bool("c", false, "dump call graph")
-	FlagD           = flag.Bool("d", false, "disable dynamic executable")
-	flagF           = flag.Bool("f", false, "ignore version mismatch")
-	flagG           = flag.Bool("g", false, "disable go package data checks")
-	flagH           = flag.Bool("h", false, "halt on error")
-	flagN           = flag.Bool("n", false, "dump symbol table")
-	FlagS           = flag.Bool("s", false, "disable symbol table")
-	flagU           = flag.Bool("u", false, "reject unsafe packages")
-	FlagW           = flag.Bool("w", false, "disable DWARF generation")
-	Flag8           bool // use 64-bit addresses in symbol table
-	flagInterpreter = flag.String("I", "", "use `linker` as ELF dynamic linker")
-	FlagDebugTramp  = flag.Int("debugtramp", 0, "debug trampolines")
-
-	FlagRound       = flag.Int("R", -1, "set address rounding `quantum`")
-	FlagTextAddr    = flag.Int64("T", -1, "set text segment `address`")
-	FlagDataAddr    = flag.Int64("D", -1, "set data segment `address`")
-	flagEntrySymbol = flag.String("E", "", "set `entry` symbol name")
-
-	cpuprofile     = flag.String("cpuprofile", "", "write cpu profile to `file`")
-	memprofile     = flag.String("memprofile", "", "write memory profile to `file`")
-	memprofilerate = flag.Int64("memprofilerate", 0, "set runtime.MemProfileRate to `rate`")
-)
-
-// Main is the main entry point for the linker code.
-func Main() {
-	ctxt := linknew(SysArch)
-	ctxt.Bso = bufio.NewWriter(os.Stdout)
-
-	// For testing behavior of go command when tools crash silently.
-	// Undocumented, not in standard flag parser to avoid
-	// exposing in usage message.
-	for _, arg := range os.Args {
-		if arg == "-crash_for_testing" {
-			os.Exit(2)
-		}
-	}
-
-	// TODO(matloob): define these above and then check flag values here
-	if SysArch.Family == sys.AMD64 && obj.GOOS == "plan9" {
-		flag.BoolVar(&Flag8, "8", false, "use 64-bit addresses in symbol table")
-	}
-	obj.Flagfn1("B", "add an ELF NT_GNU_BUILD_ID `note` when using ELF", addbuildinfo)
-	obj.Flagfn1("L", "add specified `directory` to library path", func(a string) { Lflag(ctxt, a) })
-	obj.Flagfn0("V", "print version and exit", doversion)
-	obj.Flagfn1("X", "add string value `definition` of the form importpath.name=value", func(s string) { addstrdata1(ctxt, s) })
-	obj.Flagcount("v", "print link trace", &ctxt.Debugvlog)
-
-	obj.Flagparse(usage)
-
-	startProfile()
-	if Buildmode == BuildmodeUnset {
-		Buildmode = BuildmodeExe
-	}
-
-	if Buildmode != BuildmodeShared && flag.NArg() != 1 {
-		usage()
-	}
-
-	if *flagOutfile == "" {
-		*flagOutfile = "a.out"
-		if Headtype == obj.Hwindows || Headtype == obj.Hwindowsgui {
-			*flagOutfile += ".exe"
-		}
-	}
-
-	interpreter = *flagInterpreter
-
-	libinit(ctxt) // creates outfile
-
-	if Headtype == obj.Hunknown {
-		Headtype.Set(obj.GOOS)
-	}
-
-	ctxt.computeTLSOffset()
-	Thearch.Archinit(ctxt)
-
-	if *FlagLinkshared && !Iself {
-		Exitf("-linkshared can only be used on elf systems")
-	}
-
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("HEADER = -H%d -T0x%x -D0x%x -R0x%x\n", Headtype, uint64(*FlagTextAddr), uint64(*FlagDataAddr), uint32(*FlagRound))
-	}
-
-	switch Buildmode {
-	case BuildmodeShared:
-		for i := 0; i < flag.NArg(); i++ {
-			arg := flag.Arg(i)
-			parts := strings.SplitN(arg, "=", 2)
-			var pkgpath, file string
-			if len(parts) == 1 {
-				pkgpath, file = "main", arg
-			} else {
-				pkgpath, file = parts[0], parts[1]
-			}
-			pkglistfornote = append(pkglistfornote, pkgpath...)
-			pkglistfornote = append(pkglistfornote, '\n')
-			addlibpath(ctxt, "command line", "command line", file, pkgpath, "")
-		}
-	case BuildmodePlugin:
-		addlibpath(ctxt, "command line", "command line", flag.Arg(0), *flagPluginPath, "")
-	default:
-		addlibpath(ctxt, "command line", "command line", flag.Arg(0), "main", "")
-	}
-	ctxt.loadlib()
-
-	ctxt.checkstrdata()
-	deadcode(ctxt)
-	fieldtrack(ctxt)
-	ctxt.callgraph()
-
-	ctxt.doelf()
-	if Headtype == obj.Hdarwin {
-		ctxt.domacho()
-	}
-	ctxt.dostkcheck()
-	if Headtype == obj.Hwindows || Headtype == obj.Hwindowsgui {
-		ctxt.dope()
-	}
-	ctxt.addexport()
-	Thearch.Gentext(ctxt) // trampolines, call stubs, etc.
-	ctxt.textbuildid()
-	ctxt.textaddress()
-	ctxt.pclntab()
-	ctxt.findfunctab()
-	ctxt.typelink()
-	ctxt.symtab()
-	ctxt.dodata()
-	ctxt.address()
-	ctxt.reloc()
-	Thearch.Asmb(ctxt)
-	ctxt.undef()
-	ctxt.hostlink()
-	ctxt.archive()
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f cpu time\n", obj.Cputime())
-		ctxt.Logf("%d symbols\n", len(ctxt.Syms.Allsym))
-		ctxt.Logf("%d liveness data\n", liveness)
-	}
-
-	ctxt.Bso.Flush()
-
-	errorexit()
-}
-
-type Rpath struct {
-	set bool
-	val string
-}
-
-func (r *Rpath) Set(val string) error {
-	r.set = true
-	r.val = val
-	return nil
-}
-
-func (r *Rpath) String() string {
-	return r.val
-}
-
-func startProfile() {
-	if *cpuprofile != "" {
-		f, err := os.Create(*cpuprofile)
-		if err != nil {
-			log.Fatalf("%v", err)
-		}
-		if err := pprof.StartCPUProfile(f); err != nil {
-			log.Fatalf("%v", err)
-		}
-		AtExit(pprof.StopCPUProfile)
-	}
-	if *memprofile != "" {
-		if *memprofilerate != 0 {
-			runtime.MemProfileRate = int(*memprofilerate)
-		}
-		f, err := os.Create(*memprofile)
-		if err != nil {
-			log.Fatalf("%v", err)
-		}
-		AtExit(func() {
-			runtime.GC() // profile all outstanding allocations
-			if err := pprof.WriteHeapProfile(f); err != nil {
-				log.Fatalf("%v", err)
-			}
-		})
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/objfile.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/objfile.go
deleted file mode 100644
index 90389a6..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/objfile.go
+++ /dev/null
@@ -1,645 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/objfile.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/objfile.go:1
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ld
-
-// Reading of Go object files.
-//
-// Originally, Go object files were Plan 9 object files, but no longer.
-// Now they are more like standard object files, in that each symbol is defined
-// by an associated memory image (bytes) and a list of relocations to apply
-// during linking. We do not (yet?) use a standard file format, however.
-// For now, the format is chosen to be as simple as possible to read and write.
-// It may change for reasons of efficiency, or we may even switch to a
-// standard file format if there are compelling benefits to doing so.
-// See golang.org/s/go13linker for more background.
-//
-// The file format is:
-//
-//	- magic header: "\x00\x00go17ld"
-//	- byte 1 - version number
-//	- sequence of strings giving dependencies (imported packages)
-//	- empty string (marks end of sequence)
-//	- sequence of symbol references used by the defined symbols
-//	- byte 0xff (marks end of sequence)
-//	- sequence of integer lengths:
-//		- total data length
-//		- total number of relocations
-//		- total number of pcdata
-//		- total number of automatics
-//		- total number of funcdata
-//		- total number of files
-//	- data, the content of the defined symbols
-//	- sequence of defined symbols
-//	- byte 0xff (marks end of sequence)
-//	- magic footer: "\xff\xffgo17ld"
-//
-// All integers are stored in a zigzag varint format.
-// See golang.org/s/go12symtab for a definition.
-//
-// Data blocks and strings are both stored as an integer
-// followed by that many bytes.
-//
-// A symbol reference is a string name followed by a version.
-//
-// A symbol points to other symbols using an index into the symbol
-// reference sequence. Index 0 corresponds to a nil Object* pointer.
-// In the symbol layout described below "symref index" stands for this
-// index.
-//
-// Each symbol is laid out as the following fields (taken from Object*):
-//
-//	- byte 0xfe (sanity check for synchronization)
-//	- type [int]
-//	- name & version [symref index]
-//	- flags [int]
-//		1<<0 dupok
-//		1<<1 local
-//		1<<2 add to typelink table
-//	- size [int]
-//	- gotype [symref index]
-//	- p [data block]
-//	- nr [int]
-//	- r [nr relocations, sorted by off]
-//
-// If type == STEXT, there are a few more fields:
-//
-//	- args [int]
-//	- locals [int]
-//	- nosplit [int]
-//	- flags [int]
-//		1<<0 leaf
-//		1<<1 C function
-//		1<<2 function may call reflect.Type.Method
-//	- nlocal [int]
-//	- local [nlocal automatics]
-//	- pcln [pcln table]
-//
-// Each relocation has the encoding:
-//
-//	- off [int]
-//	- siz [int]
-//	- type [int]
-//	- add [int]
-//	- sym [symref index]
-//
-// Each local has the encoding:
-//
-//	- asym [symref index]
-//	- offset [int]
-//	- type [int]
-//	- gotype [symref index]
-//
-// The pcln table has the encoding:
-//
-//	- pcsp [data block]
-//	- pcfile [data block]
-//	- pcline [data block]
-//	- npcdata [int]
-//	- pcdata [npcdata data blocks]
-//	- nfuncdata [int]
-//	- funcdata [nfuncdata symref index]
-//	- funcdatasym [nfuncdata ints]
-//	- nfile [int]
-//	- file [nfile symref index]
-//
-// The file layout and meaning of type integers are architecture-independent.
-//
-// TODO(rsc): The file format is good for a first pass but needs work.
-//	- There are SymID in the object file that should really just be strings.
-
-import (
-	"bufio"
-	"bytes"
-	"bootstrap/cmd/internal/bio"
-	"bootstrap/cmd/internal/dwarf"
-	"bootstrap/cmd/internal/obj"
-	"crypto/sha1"
-	"encoding/base64"
-	"io"
-	"log"
-	"strconv"
-	"strings"
-)
-
-const (
-	startmagic = "\x00\x00go17ld"
-	endmagic   = "\xff\xffgo17ld"
-)
-
-var emptyPkg = []byte(`"".`)
-
-// objReader reads Go object files.
-type objReader struct {
-	rd              *bufio.Reader
-	ctxt            *Link
-	lib             *Library
-	pn              string
-	dupSym          *Symbol
-	localSymVersion int
-
-	// rdBuf is used by readString and readSymName as scratch for reading strings.
-	rdBuf []byte
-
-	// List of symbol references for the file being read.
-	refs        []*Symbol
-	data        []byte
-	reloc       []Reloc
-	pcdata      []Pcdata
-	autom       []Auto
-	funcdata    []*Symbol
-	funcdataoff []int64
-	file        []*Symbol
-}
-
-func LoadObjFile(ctxt *Link, f *bio.Reader, lib *Library, length int64, pn string) {
-
-	start := f.Offset()
-	r := &objReader{
-		rd:              f.Reader,
-		lib:             lib,
-		ctxt:            ctxt,
-		pn:              pn,
-		dupSym:          &Symbol{Name: ".dup"},
-		localSymVersion: ctxt.Syms.IncVersion(),
-	}
-	r.loadObjFile()
-	if f.Offset() != start+length {
-		log.Fatalf("%s: unexpected end at %d, want %d", pn, f.Offset(), start+length)
-	}
-}
-
-func (r *objReader) loadObjFile() {
-	pkg := pathtoprefix(r.lib.Pkg)
-
-	// Magic header
-	var buf [8]uint8
-	r.readFull(buf[:])
-	if string(buf[:]) != startmagic {
-		log.Fatalf("%s: invalid file start %x %x %x %x %x %x %x %x", r.pn, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7])
-	}
-
-	// Version
-	c, err := r.rd.ReadByte()
-	if err != nil || c != 1 {
-		log.Fatalf("%s: invalid file version number %d", r.pn, c)
-	}
-
-	// Autolib
-	for {
-		lib := r.readString()
-		if lib == "" {
-			break
-		}
-		l := addlib(r.ctxt, pkg, r.pn, lib)
-		if l != nil {
-			r.lib.imports = append(r.lib.imports, l)
-		}
-	}
-
-	// Symbol references
-	r.refs = []*Symbol{nil} // zeroth ref is nil
-	for {
-		c, err := r.rd.Peek(1)
-		if err != nil {
-			log.Fatalf("%s: peeking: %v", r.pn, err)
-		}
-		if c[0] == 0xff {
-			r.rd.ReadByte()
-			break
-		}
-		r.readRef()
-	}
-
-	// Lengths
-	r.readSlices()
-
-	// Data section
-	r.readFull(r.data)
-
-	// Defined symbols
-	for {
-		c, err := r.rd.Peek(1)
-		if err != nil {
-			log.Fatalf("%s: peeking: %v", r.pn, err)
-		}
-		if c[0] == 0xff {
-			break
-		}
-		r.readSym()
-	}
-
-	// Magic footer
-	buf = [8]uint8{}
-	r.readFull(buf[:])
-	if string(buf[:]) != endmagic {
-		log.Fatalf("%s: invalid file end", r.pn)
-	}
-}
-
-func (r *objReader) readSlices() {
-	n := r.readInt()
-	r.data = make([]byte, n)
-	n = r.readInt()
-	r.reloc = make([]Reloc, n)
-	n = r.readInt()
-	r.pcdata = make([]Pcdata, n)
-	n = r.readInt()
-	r.autom = make([]Auto, n)
-	n = r.readInt()
-	r.funcdata = make([]*Symbol, n)
-	r.funcdataoff = make([]int64, n)
-	n = r.readInt()
-	r.file = make([]*Symbol, n)
-}
-
-// Symbols are prefixed so their content doesn't get confused with the magic footer.
-const symPrefix = 0xfe
-
-func (r *objReader) readSym() {
-	if c, err := r.rd.ReadByte(); c != symPrefix || err != nil {
-		log.Fatalln("readSym out of sync")
-	}
-	t := obj.SymKind(r.readInt())
-	s := r.readSymIndex()
-	flags := r.readInt()
-	dupok := flags&1 != 0
-	local := flags&2 != 0
-	makeTypelink := flags&4 != 0
-	size := r.readInt()
-	typ := r.readSymIndex()
-	data := r.readData()
-	nreloc := r.readInt()
-	pkg := pathtoprefix(r.lib.Pkg)
-	isdup := false
-
-	var dup *Symbol
-	if s.Type != 0 && s.Type != obj.SXREF {
-		if (t == obj.SDATA || t == obj.SBSS || t == obj.SNOPTRBSS) && len(data) == 0 && nreloc == 0 {
-			if s.Size < int64(size) {
-				s.Size = int64(size)
-			}
-			if typ != nil && s.Gotype == nil {
-				s.Gotype = typ
-			}
-			return
-		}
-
-		if (s.Type == obj.SDATA || s.Type == obj.SBSS || s.Type == obj.SNOPTRBSS) && len(s.P) == 0 && len(s.R) == 0 {
-			goto overwrite
-		}
-		if s.Type != obj.SBSS && s.Type != obj.SNOPTRBSS && !dupok && !s.Attr.DuplicateOK() {
-			log.Fatalf("duplicate symbol %s (types %d and %d) in %s and %s", s.Name, s.Type, t, s.File, r.pn)
-		}
-		if len(s.P) > 0 {
-			dup = s
-			s = r.dupSym
-			isdup = true
-		}
-	}
-
-overwrite:
-	s.File = pkg
-	if dupok {
-		s.Attr |= AttrDuplicateOK
-	}
-	if t == obj.SXREF {
-		log.Fatalf("bad sxref")
-	}
-	if t == 0 {
-		log.Fatalf("missing type for %s in %s", s.Name, r.pn)
-	}
-	if t == obj.SBSS && (s.Type == obj.SRODATA || s.Type == obj.SNOPTRBSS) {
-		t = s.Type
-	}
-	s.Type = t
-	if s.Size < int64(size) {
-		s.Size = int64(size)
-	}
-	s.Attr.Set(AttrLocal, local)
-	s.Attr.Set(AttrMakeTypelink, makeTypelink)
-	if typ != nil {
-		s.Gotype = typ
-	}
-	if isdup && typ != nil { // if bss sym defined multiple times, take type from any one def
-		dup.Gotype = typ
-	}
-	s.P = data
-	if nreloc > 0 {
-		s.R = r.reloc[:nreloc:nreloc]
-		if !isdup {
-			r.reloc = r.reloc[nreloc:]
-		}
-
-		for i := 0; i < nreloc; i++ {
-			s.R[i] = Reloc{
-				Off:  r.readInt32(),
-				Siz:  r.readUint8(),
-				Type: obj.RelocType(r.readInt32()),
-				Add:  r.readInt64(),
-				Sym:  r.readSymIndex(),
-			}
-		}
-	}
-
-	if s.Type == obj.STEXT {
-		s.FuncInfo = new(FuncInfo)
-		pc := s.FuncInfo
-
-		pc.Args = r.readInt32()
-		pc.Locals = r.readInt32()
-		if r.readUint8() != 0 {
-			s.Attr |= AttrNoSplit
-		}
-		flags := r.readInt()
-		if flags&(1<<2) != 0 {
-			s.Attr |= AttrReflectMethod
-		}
-		n := r.readInt()
-		pc.Autom = r.autom[:n:n]
-		if !isdup {
-			r.autom = r.autom[n:]
-		}
-
-		for i := 0; i < n; i++ {
-			pc.Autom[i] = Auto{
-				Asym:    r.readSymIndex(),
-				Aoffset: r.readInt32(),
-				Name:    r.readInt16(),
-				Gotype:  r.readSymIndex(),
-			}
-		}
-
-		pc.Pcsp.P = r.readData()
-		pc.Pcfile.P = r.readData()
-		pc.Pcline.P = r.readData()
-		n = r.readInt()
-		pc.Pcdata = r.pcdata[:n:n]
-		if !isdup {
-			r.pcdata = r.pcdata[n:]
-		}
-		for i := 0; i < n; i++ {
-			pc.Pcdata[i].P = r.readData()
-		}
-		n = r.readInt()
-		pc.Funcdata = r.funcdata[:n:n]
-		pc.Funcdataoff = r.funcdataoff[:n:n]
-		if !isdup {
-			r.funcdata = r.funcdata[n:]
-			r.funcdataoff = r.funcdataoff[n:]
-		}
-		for i := 0; i < n; i++ {
-			pc.Funcdata[i] = r.readSymIndex()
-		}
-		for i := 0; i < n; i++ {
-			pc.Funcdataoff[i] = r.readInt64()
-		}
-		n = r.readInt()
-		pc.File = r.file[:n:n]
-		if !isdup {
-			r.file = r.file[n:]
-		}
-		for i := 0; i < n; i++ {
-			pc.File[i] = r.readSymIndex()
-		}
-
-		if !dupok {
-			if s.Attr.OnList() {
-				log.Fatalf("symbol %s listed multiple times", s.Name)
-			}
-			s.Attr |= AttrOnList
-			r.lib.textp = append(r.lib.textp, s)
-		} else {
-			// there may ba a dup in another package
-			// put into a temp list and add to text later
-			if !isdup {
-				r.lib.dupTextSyms = append(r.lib.dupTextSyms, s)
-			} else {
-				r.lib.dupTextSyms = append(r.lib.dupTextSyms, dup)
-			}
-		}
-	}
-	if s.Type == obj.SDWARFINFO {
-		r.patchDWARFName(s)
-	}
-}
-
-func (r *objReader) patchDWARFName(s *Symbol) {
-	// This is kind of ugly. Really the package name should not
-	// even be included here.
-	if s.Size < 1 || s.P[0] != dwarf.DW_ABRV_FUNCTION {
-		return
-	}
-	e := bytes.IndexByte(s.P, 0)
-	if e == -1 {
-		return
-	}
-	p := bytes.Index(s.P[:e], emptyPkg)
-	if p == -1 {
-		return
-	}
-	pkgprefix := []byte(pathtoprefix(r.lib.Pkg) + ".")
-	patched := bytes.Replace(s.P[:e], emptyPkg, pkgprefix, -1)
-
-	s.P = append(patched, s.P[e:]...)
-	delta := int64(len(s.P)) - s.Size
-	s.Size = int64(len(s.P))
-	for i := range s.R {
-		r := &s.R[i]
-		if r.Off > int32(e) {
-			r.Off += int32(delta)
-		}
-	}
-}
-
-func (r *objReader) readFull(b []byte) {
-	_, err := io.ReadFull(r.rd, b)
-	if err != nil {
-		log.Fatalf("%s: error reading %s", r.pn, err)
-	}
-}
-
-func (r *objReader) readRef() {
-	if c, err := r.rd.ReadByte(); c != symPrefix || err != nil {
-		log.Fatalf("readSym out of sync")
-	}
-	name := r.readSymName()
-	v := r.readInt()
-	if v != 0 && v != 1 {
-		log.Fatalf("invalid symbol version %d", v)
-	}
-	if v == 1 {
-		v = r.localSymVersion
-	}
-	s := r.ctxt.Syms.Lookup(name, v)
-	r.refs = append(r.refs, s)
-
-	if s == nil || v != 0 {
-		return
-	}
-	if s.Name[0] == '$' && len(s.Name) > 5 && s.Type == 0 && len(s.P) == 0 {
-		x, err := strconv.ParseUint(s.Name[5:], 16, 64)
-		if err != nil {
-			log.Panicf("failed to parse $-symbol %s: %v", s.Name, err)
-		}
-		s.Type = obj.SRODATA
-		s.Attr |= AttrLocal
-		switch s.Name[:5] {
-		case "$f32.":
-			if uint64(uint32(x)) != x {
-				log.Panicf("$-symbol %s too large: %d", s.Name, x)
-			}
-			Adduint32(r.ctxt, s, uint32(x))
-		case "$f64.", "$i64.":
-			Adduint64(r.ctxt, s, x)
-		default:
-			log.Panicf("unrecognized $-symbol: %s", s.Name)
-		}
-		s.Attr.Set(AttrReachable, false)
-	}
-	if strings.HasPrefix(s.Name, "runtime.gcbits.") {
-		s.Attr |= AttrLocal
-	}
-}
-
-func (r *objReader) readInt64() int64 {
-	uv := uint64(0)
-	for shift := uint(0); ; shift += 7 {
-		if shift >= 64 {
-			log.Fatalf("corrupt input")
-		}
-		c, err := r.rd.ReadByte()
-		if err != nil {
-			log.Fatalln("error reading input: ", err)
-		}
-		uv |= uint64(c&0x7F) << shift
-		if c&0x80 == 0 {
-			break
-		}
-	}
-
-	return int64(uv>>1) ^ (int64(uv<<63) >> 63)
-}
-
-func (r *objReader) readInt() int {
-	n := r.readInt64()
-	if int64(int(n)) != n {
-		log.Panicf("%v out of range for int", n)
-	}
-	return int(n)
-}
-
-func (r *objReader) readInt32() int32 {
-	n := r.readInt64()
-	if int64(int32(n)) != n {
-		log.Panicf("%v out of range for int32", n)
-	}
-	return int32(n)
-}
-
-func (r *objReader) readInt16() int16 {
-	n := r.readInt64()
-	if int64(int16(n)) != n {
-		log.Panicf("%v out of range for int16", n)
-	}
-	return int16(n)
-}
-
-func (r *objReader) readUint8() uint8 {
-	n := r.readInt64()
-	if int64(uint8(n)) != n {
-		log.Panicf("%v out of range for uint8", n)
-	}
-	return uint8(n)
-}
-
-func (r *objReader) readString() string {
-	n := r.readInt()
-	if cap(r.rdBuf) < n {
-		r.rdBuf = make([]byte, 2*n)
-	}
-	r.readFull(r.rdBuf[:n])
-	return string(r.rdBuf[:n])
-}
-
-func (r *objReader) readData() []byte {
-	n := r.readInt()
-	p := r.data[:n:n]
-	r.data = r.data[n:]
-	return p
-}
-
-// readSymName reads a symbol name, replacing all "". with pkg.
-func (r *objReader) readSymName() string {
-	pkg := pathtoprefix(r.lib.Pkg)
-	n := r.readInt()
-	if n == 0 {
-		r.readInt64()
-		return ""
-	}
-	if cap(r.rdBuf) < n {
-		r.rdBuf = make([]byte, 2*n)
-	}
-	origName, err := r.rd.Peek(n)
-	if err == bufio.ErrBufferFull {
-		// Long symbol names are rare but exist. One source is type
-		// symbols for types with long string forms. See #15104.
-		origName = make([]byte, n)
-		r.readFull(origName)
-	} else if err != nil {
-		log.Fatalf("%s: error reading symbol: %v", r.pn, err)
-	}
-	adjName := r.rdBuf[:0]
-	for {
-		i := bytes.Index(origName, emptyPkg)
-		if i == -1 {
-			s := string(append(adjName, origName...))
-			// Read past the peeked origName, now that we're done with it,
-			// using the rfBuf (also no longer used) as the scratch space.
-			// TODO: use bufio.Reader.Discard if available instead?
-			if err == nil {
-				r.readFull(r.rdBuf[:n])
-			}
-			r.rdBuf = adjName[:0] // in case 2*n wasn't enough
-
-			if Buildmode == BuildmodeShared || *FlagLinkshared {
-				// These types are included in the symbol
-				// table when dynamically linking. To keep
-				// binary size down, we replace the names
-				// with SHA-1 prefixes.
-				//
-				// Keep the type.. prefix, which parts of the
-				// linker (like the DWARF generator) know means
-				// the symbol is not decodable.
-				//
-				// Leave type.runtime. symbols alone, because
-				// other parts of the linker manipulates them,
-				// and also symbols whose names would not be
-				// shortened by this process.
-				if len(s) > 14 && strings.HasPrefix(s, "type.") && !strings.HasPrefix(s, "type.runtime.") {
-					hash := sha1.Sum([]byte(s))
-					prefix := "type."
-					if s[5] == '.' {
-						prefix = "type.."
-					}
-					s = prefix + base64.StdEncoding.EncodeToString(hash[:6])
-				}
-			}
-			return s
-		}
-		adjName = append(adjName, origName[:i]...)
-		adjName = append(adjName, pkg...)
-		adjName = append(adjName, '.')
-		origName = origName[i+len(emptyPkg):]
-	}
-}
-
-// Reads the index of a symbol reference and resolves it to a symbol
-func (r *objReader) readSymIndex() *Symbol {
-	i := r.readInt()
-	return r.refs[i]
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/pcln.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/pcln.go
deleted file mode 100644
index 6328dbf..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/pcln.go
+++ /dev/null
@@ -1,466 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/pcln.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/pcln.go:1
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ld
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"log"
-	"os"
-	"path/filepath"
-)
-
-// iteration over encoded pcdata tables.
-
-func getvarint(pp *[]byte) uint32 {
-	v := uint32(0)
-	p := *pp
-	for shift := 0; ; shift += 7 {
-		v |= uint32(p[0]&0x7F) << uint(shift)
-		tmp4 := p
-		p = p[1:]
-		if tmp4[0]&0x80 == 0 {
-			break
-		}
-	}
-
-	*pp = p
-	return v
-}
-
-func pciternext(it *Pciter) {
-	it.pc = it.nextpc
-	if it.done != 0 {
-		return
-	}
-	if -cap(it.p) >= -cap(it.d.P[len(it.d.P):]) {
-		it.done = 1
-		return
-	}
-
-	// value delta
-	v := getvarint(&it.p)
-
-	if v == 0 && it.start == 0 {
-		it.done = 1
-		return
-	}
-
-	it.start = 0
-	dv := int32(v>>1) ^ (int32(v<<31) >> 31)
-	it.value += dv
-
-	// pc delta
-	v = getvarint(&it.p)
-
-	it.nextpc = it.pc + v*it.pcscale
-}
-
-func pciterinit(ctxt *Link, it *Pciter, d *Pcdata) {
-	it.d = *d
-	it.p = it.d.P
-	it.pc = 0
-	it.nextpc = 0
-	it.value = -1
-	it.start = 1
-	it.done = 0
-	it.pcscale = uint32(ctxt.Arch.MinLC)
-	pciternext(it)
-}
-
-func addvarint(d *Pcdata, val uint32) {
-	n := int32(0)
-	for v := val; v >= 0x80; v >>= 7 {
-		n++
-	}
-	n++
-
-	old := len(d.P)
-	for cap(d.P) < len(d.P)+int(n) {
-		d.P = append(d.P[:cap(d.P)], 0)
-	}
-	d.P = d.P[:old+int(n)]
-
-	p := d.P[old:]
-	var v uint32
-	for v = val; v >= 0x80; v >>= 7 {
-		p[0] = byte(v | 0x80)
-		p = p[1:]
-	}
-	p[0] = byte(v)
-}
-
-func addpctab(ctxt *Link, ftab *Symbol, off int32, d *Pcdata) int32 {
-	var start int32
-	if len(d.P) > 0 {
-		start = int32(len(ftab.P))
-		Addbytes(ftab, d.P)
-	}
-	return int32(setuint32(ctxt, ftab, int64(off), uint32(start)))
-}
-
-func ftabaddstring(ctxt *Link, ftab *Symbol, s string) int32 {
-	n := int32(len(s)) + 1
-	start := int32(len(ftab.P))
-	Symgrow(ftab, int64(start)+int64(n)+1)
-	copy(ftab.P[start:], s)
-	return start
-}
-
-func renumberfiles(ctxt *Link, files []*Symbol, d *Pcdata) {
-	var f *Symbol
-
-	// Give files numbers.
-	for i := 0; i < len(files); i++ {
-		f = files[i]
-		if f.Type != obj.SFILEPATH {
-			ctxt.Filesyms = append(ctxt.Filesyms, f)
-			f.Value = int64(len(ctxt.Filesyms))
-			f.Type = obj.SFILEPATH
-			f.Name = expandGoroot(f.Name)
-		}
-	}
-
-	newval := int32(-1)
-	var out Pcdata
-	var it Pciter
-	for pciterinit(ctxt, &it, d); it.done == 0; pciternext(&it) {
-		// value delta
-		oldval := it.value
-
-		var val int32
-		if oldval == -1 {
-			val = -1
-		} else {
-			if oldval < 0 || oldval >= int32(len(files)) {
-				log.Fatalf("bad pcdata %d", oldval)
-			}
-			val = int32(files[oldval].Value)
-		}
-
-		dv := val - newval
-		newval = val
-		v := (uint32(dv) << 1) ^ uint32(dv>>31)
-		addvarint(&out, v)
-
-		// pc delta
-		addvarint(&out, (it.nextpc-it.pc)/it.pcscale)
-	}
-
-	// terminating value delta
-	addvarint(&out, 0)
-
-	*d = out
-}
-
-// onlycsymbol reports whether this is a cgo symbol provided by the
-// runtime and only used from C code.
-func onlycsymbol(s *Symbol) bool {
-	switch s.Name {
-	case "_cgo_topofstack", "_cgo_panic", "crosscall2":
-		return true
-	}
-	return false
-}
-
-func container(s *Symbol) int {
-	if s == nil {
-		return 0
-	}
-	if Buildmode == BuildmodePlugin && Headtype == obj.Hdarwin && onlycsymbol(s) {
-		return 1
-	}
-	// We want to generate func table entries only for the "lowest level" symbols,
-	// not containers of subsymbols.
-	if s.Type&obj.SCONTAINER != 0 {
-		return 1
-	}
-	return 0
-}
-
-// pclntab initializes the pclntab symbol with
-// runtime function and file name information.
-
-var pclntabZpcln FuncInfo
-
-// These variables are used to initialize runtime.firstmoduledata, see symtab.go:symtab.
-var pclntabNfunc int32
-var pclntabFiletabOffset int32
-var pclntabPclntabOffset int32
-var pclntabFirstFunc *Symbol
-var pclntabLastFunc *Symbol
-
-func (ctxt *Link) pclntab() {
-	funcdataBytes := int64(0)
-	ftab := ctxt.Syms.Lookup("runtime.pclntab", 0)
-	ftab.Type = obj.SPCLNTAB
-	ftab.Attr |= AttrReachable
-
-	// See golang.org/s/go12symtab for the format. Briefly:
-	//	8-byte header
-	//	nfunc [thearch.ptrsize bytes]
-	//	function table, alternating PC and offset to func struct [each entry thearch.ptrsize bytes]
-	//	end PC [thearch.ptrsize bytes]
-	//	offset to file table [4 bytes]
-	nfunc := int32(0)
-
-	// Find container symbols, mark them with SCONTAINER
-	for _, s := range ctxt.Textp {
-		if s.Outer != nil {
-			s.Outer.Type |= obj.SCONTAINER
-		}
-	}
-
-	for _, s := range ctxt.Textp {
-		if container(s) == 0 {
-			nfunc++
-		}
-	}
-
-	pclntabNfunc = nfunc
-	Symgrow(ftab, 8+int64(SysArch.PtrSize)+int64(nfunc)*2*int64(SysArch.PtrSize)+int64(SysArch.PtrSize)+4)
-	setuint32(ctxt, ftab, 0, 0xfffffffb)
-	setuint8(ctxt, ftab, 6, uint8(SysArch.MinLC))
-	setuint8(ctxt, ftab, 7, uint8(SysArch.PtrSize))
-	setuintxx(ctxt, ftab, 8, uint64(nfunc), int64(SysArch.PtrSize))
-	pclntabPclntabOffset = int32(8 + SysArch.PtrSize)
-
-	nfunc = 0
-	var last *Symbol
-	for _, s := range ctxt.Textp {
-		last = s
-		if container(s) != 0 {
-			continue
-		}
-		pcln := s.FuncInfo
-		if pcln == nil {
-			pcln = &pclntabZpcln
-		}
-
-		if pclntabFirstFunc == nil {
-			pclntabFirstFunc = s
-		}
-
-		funcstart := int32(len(ftab.P))
-		funcstart += int32(-len(ftab.P)) & (int32(SysArch.PtrSize) - 1)
-
-		setaddr(ctxt, ftab, 8+int64(SysArch.PtrSize)+int64(nfunc)*2*int64(SysArch.PtrSize), s)
-		setuintxx(ctxt, ftab, 8+int64(SysArch.PtrSize)+int64(nfunc)*2*int64(SysArch.PtrSize)+int64(SysArch.PtrSize), uint64(funcstart), int64(SysArch.PtrSize))
-
-		// Write runtime._func. Keep in sync with ../../../../runtime/runtime2.go:/_func
-		// and package debug/gosym.
-
-		// fixed size of struct, checked below
-		off := funcstart
-
-		end := funcstart + int32(SysArch.PtrSize) + 3*4 + 5*4 + int32(len(pcln.Pcdata))*4 + int32(len(pcln.Funcdata))*int32(SysArch.PtrSize)
-		if len(pcln.Funcdata) > 0 && (end&int32(SysArch.PtrSize-1) != 0) {
-			end += 4
-		}
-		Symgrow(ftab, int64(end))
-
-		// entry uintptr
-		off = int32(setaddr(ctxt, ftab, int64(off), s))
-
-		// name int32
-		off = int32(setuint32(ctxt, ftab, int64(off), uint32(ftabaddstring(ctxt, ftab, s.Name))))
-
-		// args int32
-		// TODO: Move into funcinfo.
-		args := uint32(0)
-		if s.FuncInfo != nil {
-			args = uint32(s.FuncInfo.Args)
-		}
-		off = int32(setuint32(ctxt, ftab, int64(off), args))
-
-		// frame int32
-		// This has been removed (it was never set quite correctly anyway).
-		// Nothing should use it.
-		// Leave an obviously incorrect value.
-		// TODO: Remove entirely.
-		off = int32(setuint32(ctxt, ftab, int64(off), 0x1234567))
-
-		if pcln != &pclntabZpcln {
-			renumberfiles(ctxt, pcln.File, &pcln.Pcfile)
-			if false {
-				// Sanity check the new numbering
-				var it Pciter
-				for pciterinit(ctxt, &it, &pcln.Pcfile); it.done == 0; pciternext(&it) {
-					if it.value < 1 || it.value > int32(len(ctxt.Filesyms)) {
-						Errorf(s, "bad file number in pcfile: %d not in range [1, %d]\n", it.value, len(ctxt.Filesyms))
-						errorexit()
-					}
-				}
-			}
-		}
-
-		// pcdata
-		off = addpctab(ctxt, ftab, off, &pcln.Pcsp)
-
-		off = addpctab(ctxt, ftab, off, &pcln.Pcfile)
-		off = addpctab(ctxt, ftab, off, &pcln.Pcline)
-		off = int32(setuint32(ctxt, ftab, int64(off), uint32(len(pcln.Pcdata))))
-		off = int32(setuint32(ctxt, ftab, int64(off), uint32(len(pcln.Funcdata))))
-		for i := 0; i < len(pcln.Pcdata); i++ {
-			off = addpctab(ctxt, ftab, off, &pcln.Pcdata[i])
-		}
-
-		// funcdata, must be pointer-aligned and we're only int32-aligned.
-		// Missing funcdata will be 0 (nil pointer).
-		if len(pcln.Funcdata) > 0 {
-			if off&int32(SysArch.PtrSize-1) != 0 {
-				off += 4
-			}
-			for i := 0; i < len(pcln.Funcdata); i++ {
-				if pcln.Funcdata[i] == nil {
-					setuintxx(ctxt, ftab, int64(off)+int64(SysArch.PtrSize)*int64(i), uint64(pcln.Funcdataoff[i]), int64(SysArch.PtrSize))
-				} else {
-					// TODO: Dedup.
-					funcdataBytes += pcln.Funcdata[i].Size
-
-					setaddrplus(ctxt, ftab, int64(off)+int64(SysArch.PtrSize)*int64(i), pcln.Funcdata[i], pcln.Funcdataoff[i])
-				}
-			}
-
-			off += int32(len(pcln.Funcdata)) * int32(SysArch.PtrSize)
-		}
-
-		if off != end {
-			Errorf(s, "bad math in functab: funcstart=%d off=%d but end=%d (npcdata=%d nfuncdata=%d ptrsize=%d)", funcstart, off, end, len(pcln.Pcdata), len(pcln.Funcdata), SysArch.PtrSize)
-			errorexit()
-		}
-
-		nfunc++
-	}
-
-	pclntabLastFunc = last
-	// Final entry of table is just end pc.
-	setaddrplus(ctxt, ftab, 8+int64(SysArch.PtrSize)+int64(nfunc)*2*int64(SysArch.PtrSize), last, last.Size)
-
-	// Start file table.
-	start := int32(len(ftab.P))
-
-	start += int32(-len(ftab.P)) & (int32(SysArch.PtrSize) - 1)
-	pclntabFiletabOffset = start
-	setuint32(ctxt, ftab, 8+int64(SysArch.PtrSize)+int64(nfunc)*2*int64(SysArch.PtrSize)+int64(SysArch.PtrSize), uint32(start))
-
-	Symgrow(ftab, int64(start)+(int64(len(ctxt.Filesyms))+1)*4)
-	setuint32(ctxt, ftab, int64(start), uint32(len(ctxt.Filesyms)+1))
-	for i := len(ctxt.Filesyms) - 1; i >= 0; i-- {
-		s := ctxt.Filesyms[i]
-		setuint32(ctxt, ftab, int64(start)+s.Value*4, uint32(ftabaddstring(ctxt, ftab, s.Name)))
-	}
-
-	ftab.Size = int64(len(ftab.P))
-
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f pclntab=%d bytes, funcdata total %d bytes\n", obj.Cputime(), ftab.Size, funcdataBytes)
-	}
-}
-
-func expandGoroot(s string) string {
-	const n = len("$GOROOT")
-	if len(s) >= n+1 && s[:n] == "$GOROOT" && (s[n] == '/' || s[n] == '\\') {
-		root := obj.GOROOT
-		if final := os.Getenv("GOROOT_FINAL"); final != "" {
-			root = final
-		}
-		return filepath.ToSlash(filepath.Join(root, s[n:]))
-	}
-	return s
-}
-
-const (
-	BUCKETSIZE    = 256 * MINFUNC
-	SUBBUCKETS    = 16
-	SUBBUCKETSIZE = BUCKETSIZE / SUBBUCKETS
-	NOIDX         = 0x7fffffff
-)
-
-// findfunctab generates a lookup table to quickly find the containing
-// function for a pc. See src/runtime/symtab.go:findfunc for details.
-func (ctxt *Link) findfunctab() {
-	t := ctxt.Syms.Lookup("runtime.findfunctab", 0)
-	t.Type = obj.SRODATA
-	t.Attr |= AttrReachable
-	t.Attr |= AttrLocal
-
-	// find min and max address
-	min := ctxt.Textp[0].Value
-	max := int64(0)
-	for _, s := range ctxt.Textp {
-		max = s.Value + s.Size
-	}
-
-	// for each subbucket, compute the minimum of all symbol indexes
-	// that map to that subbucket.
-	n := int32((max - min + SUBBUCKETSIZE - 1) / SUBBUCKETSIZE)
-
-	indexes := make([]int32, n)
-	for i := int32(0); i < n; i++ {
-		indexes[i] = NOIDX
-	}
-	idx := int32(0)
-	for i, s := range ctxt.Textp {
-		if container(s) != 0 {
-			continue
-		}
-		p := s.Value
-		var e *Symbol
-		i++
-		if i < len(ctxt.Textp) {
-			e = ctxt.Textp[i]
-		}
-		for container(e) != 0 && i < len(ctxt.Textp) {
-			e = ctxt.Textp[i]
-			i++
-		}
-		q := max
-		if e != nil {
-			q = e.Value
-		}
-
-		//print("%d: [%lld %lld] %s\n", idx, p, q, s->name);
-		for ; p < q; p += SUBBUCKETSIZE {
-			i = int((p - min) / SUBBUCKETSIZE)
-			if indexes[i] > idx {
-				indexes[i] = idx
-			}
-		}
-
-		i = int((q - 1 - min) / SUBBUCKETSIZE)
-		if indexes[i] > idx {
-			indexes[i] = idx
-		}
-		idx++
-	}
-
-	// allocate table
-	nbuckets := int32((max - min + BUCKETSIZE - 1) / BUCKETSIZE)
-
-	Symgrow(t, 4*int64(nbuckets)+int64(n))
-
-	// fill in table
-	for i := int32(0); i < nbuckets; i++ {
-		base := indexes[i*SUBBUCKETS]
-		if base == NOIDX {
-			Errorf(nil, "hole in findfunctab")
-		}
-		setuint32(ctxt, t, int64(i)*(4+SUBBUCKETS), uint32(base))
-		for j := int32(0); j < SUBBUCKETS && i*SUBBUCKETS+j < n; j++ {
-			idx = indexes[i*SUBBUCKETS+j]
-			if idx == NOIDX {
-				Errorf(nil, "hole in findfunctab")
-			}
-			if idx-base >= 256 {
-				Errorf(nil, "too many functions in a findfunc bucket! %d/%d %d %d", i, nbuckets, j, idx-base)
-			}
-
-			setuint8(ctxt, t, int64(i)*(4+SUBBUCKETS)+4+int64(j), uint8(idx-base))
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/pe.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/pe.go
deleted file mode 100644
index 6c4d34a..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/pe.go
+++ /dev/null
@@ -1,1299 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/pe.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/pe.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ld
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"encoding/binary"
-	"fmt"
-	"os"
-	"sort"
-	"strconv"
-	"strings"
-)
-
-type IMAGE_FILE_HEADER struct {
-	Machine              uint16
-	NumberOfSections     uint16
-	TimeDateStamp        uint32
-	PointerToSymbolTable uint32
-	NumberOfSymbols      uint32
-	SizeOfOptionalHeader uint16
-	Characteristics      uint16
-}
-
-type IMAGE_DATA_DIRECTORY struct {
-	VirtualAddress uint32
-	Size           uint32
-}
-
-type IMAGE_OPTIONAL_HEADER struct {
-	Magic                       uint16
-	MajorLinkerVersion          uint8
-	MinorLinkerVersion          uint8
-	SizeOfCode                  uint32
-	SizeOfInitializedData       uint32
-	SizeOfUninitializedData     uint32
-	AddressOfEntryPoint         uint32
-	BaseOfCode                  uint32
-	BaseOfData                  uint32
-	ImageBase                   uint32
-	SectionAlignment            uint32
-	FileAlignment               uint32
-	MajorOperatingSystemVersion uint16
-	MinorOperatingSystemVersion uint16
-	MajorImageVersion           uint16
-	MinorImageVersion           uint16
-	MajorSubsystemVersion       uint16
-	MinorSubsystemVersion       uint16
-	Win32VersionValue           uint32
-	SizeOfImage                 uint32
-	SizeOfHeaders               uint32
-	CheckSum                    uint32
-	Subsystem                   uint16
-	DllCharacteristics          uint16
-	SizeOfStackReserve          uint32
-	SizeOfStackCommit           uint32
-	SizeOfHeapReserve           uint32
-	SizeOfHeapCommit            uint32
-	LoaderFlags                 uint32
-	NumberOfRvaAndSizes         uint32
-	DataDirectory               [16]IMAGE_DATA_DIRECTORY
-}
-
-type IMAGE_SECTION_HEADER struct {
-	Name                 [8]uint8
-	VirtualSize          uint32
-	VirtualAddress       uint32
-	SizeOfRawData        uint32
-	PointerToRawData     uint32
-	PointerToRelocations uint32
-	PointerToLineNumbers uint32
-	NumberOfRelocations  uint16
-	NumberOfLineNumbers  uint16
-	Characteristics      uint32
-}
-
-type IMAGE_IMPORT_DESCRIPTOR struct {
-	OriginalFirstThunk uint32
-	TimeDateStamp      uint32
-	ForwarderChain     uint32
-	Name               uint32
-	FirstThunk         uint32
-}
-
-type IMAGE_EXPORT_DIRECTORY struct {
-	Characteristics       uint32
-	TimeDateStamp         uint32
-	MajorVersion          uint16
-	MinorVersion          uint16
-	Name                  uint32
-	Base                  uint32
-	NumberOfFunctions     uint32
-	NumberOfNames         uint32
-	AddressOfFunctions    uint32
-	AddressOfNames        uint32
-	AddressOfNameOrdinals uint32
-}
-
-const (
-	PEBASE = 0x00400000
-
-	// SectionAlignment must be greater than or equal to FileAlignment.
-	// The default is the page size for the architecture.
-	PESECTALIGN = 0x1000
-
-	// FileAlignment should be a power of 2 between 512 and 64 K, inclusive.
-	// The default is 512. If the SectionAlignment is less than
-	// the architecture's page size, then FileAlignment must match SectionAlignment.
-	PEFILEALIGN = 2 << 8
-)
-
-const (
-	IMAGE_FILE_MACHINE_I386              = 0x14c
-	IMAGE_FILE_MACHINE_AMD64             = 0x8664
-	IMAGE_FILE_RELOCS_STRIPPED           = 0x0001
-	IMAGE_FILE_EXECUTABLE_IMAGE          = 0x0002
-	IMAGE_FILE_LINE_NUMS_STRIPPED        = 0x0004
-	IMAGE_FILE_LARGE_ADDRESS_AWARE       = 0x0020
-	IMAGE_FILE_32BIT_MACHINE             = 0x0100
-	IMAGE_FILE_DEBUG_STRIPPED            = 0x0200
-	IMAGE_SCN_CNT_CODE                   = 0x00000020
-	IMAGE_SCN_CNT_INITIALIZED_DATA       = 0x00000040
-	IMAGE_SCN_CNT_UNINITIALIZED_DATA     = 0x00000080
-	IMAGE_SCN_MEM_EXECUTE                = 0x20000000
-	IMAGE_SCN_MEM_READ                   = 0x40000000
-	IMAGE_SCN_MEM_WRITE                  = 0x80000000
-	IMAGE_SCN_MEM_DISCARDABLE            = 0x2000000
-	IMAGE_SCN_LNK_NRELOC_OVFL            = 0x1000000
-	IMAGE_SCN_ALIGN_32BYTES              = 0x600000
-	IMAGE_DIRECTORY_ENTRY_EXPORT         = 0
-	IMAGE_DIRECTORY_ENTRY_IMPORT         = 1
-	IMAGE_DIRECTORY_ENTRY_RESOURCE       = 2
-	IMAGE_DIRECTORY_ENTRY_EXCEPTION      = 3
-	IMAGE_DIRECTORY_ENTRY_SECURITY       = 4
-	IMAGE_DIRECTORY_ENTRY_BASERELOC      = 5
-	IMAGE_DIRECTORY_ENTRY_DEBUG          = 6
-	IMAGE_DIRECTORY_ENTRY_COPYRIGHT      = 7
-	IMAGE_DIRECTORY_ENTRY_ARCHITECTURE   = 7
-	IMAGE_DIRECTORY_ENTRY_GLOBALPTR      = 8
-	IMAGE_DIRECTORY_ENTRY_TLS            = 9
-	IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG    = 10
-	IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT   = 11
-	IMAGE_DIRECTORY_ENTRY_IAT            = 12
-	IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT   = 13
-	IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR = 14
-	IMAGE_SUBSYSTEM_WINDOWS_GUI          = 2
-	IMAGE_SUBSYSTEM_WINDOWS_CUI          = 3
-)
-
-// X64
-type PE64_IMAGE_OPTIONAL_HEADER struct {
-	Magic                       uint16
-	MajorLinkerVersion          uint8
-	MinorLinkerVersion          uint8
-	SizeOfCode                  uint32
-	SizeOfInitializedData       uint32
-	SizeOfUninitializedData     uint32
-	AddressOfEntryPoint         uint32
-	BaseOfCode                  uint32
-	ImageBase                   uint64
-	SectionAlignment            uint32
-	FileAlignment               uint32
-	MajorOperatingSystemVersion uint16
-	MinorOperatingSystemVersion uint16
-	MajorImageVersion           uint16
-	MinorImageVersion           uint16
-	MajorSubsystemVersion       uint16
-	MinorSubsystemVersion       uint16
-	Win32VersionValue           uint32
-	SizeOfImage                 uint32
-	SizeOfHeaders               uint32
-	CheckSum                    uint32
-	Subsystem                   uint16
-	DllCharacteristics          uint16
-	SizeOfStackReserve          uint64
-	SizeOfStackCommit           uint64
-	SizeOfHeapReserve           uint64
-	SizeOfHeapCommit            uint64
-	LoaderFlags                 uint32
-	NumberOfRvaAndSizes         uint32
-	DataDirectory               [16]IMAGE_DATA_DIRECTORY
-}
-
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// PE (Portable Executable) file writing
-// http://www.microsoft.com/whdc/system/platform/firmware/PECOFF.mspx
-
-// DOS stub that prints out
-// "This program cannot be run in DOS mode."
-var dosstub = []uint8{
-	0x4d,
-	0x5a,
-	0x90,
-	0x00,
-	0x03,
-	0x00,
-	0x04,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0xff,
-	0xff,
-	0x00,
-	0x00,
-	0x8b,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x40,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x80,
-	0x00,
-	0x00,
-	0x00,
-	0x0e,
-	0x1f,
-	0xba,
-	0x0e,
-	0x00,
-	0xb4,
-	0x09,
-	0xcd,
-	0x21,
-	0xb8,
-	0x01,
-	0x4c,
-	0xcd,
-	0x21,
-	0x54,
-	0x68,
-	0x69,
-	0x73,
-	0x20,
-	0x70,
-	0x72,
-	0x6f,
-	0x67,
-	0x72,
-	0x61,
-	0x6d,
-	0x20,
-	0x63,
-	0x61,
-	0x6e,
-	0x6e,
-	0x6f,
-	0x74,
-	0x20,
-	0x62,
-	0x65,
-	0x20,
-	0x72,
-	0x75,
-	0x6e,
-	0x20,
-	0x69,
-	0x6e,
-	0x20,
-	0x44,
-	0x4f,
-	0x53,
-	0x20,
-	0x6d,
-	0x6f,
-	0x64,
-	0x65,
-	0x2e,
-	0x0d,
-	0x0d,
-	0x0a,
-	0x24,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-	0x00,
-}
-
-var rsrcsym *Symbol
-
-var strtbl []byte
-
-var PESECTHEADR int32
-
-var PEFILEHEADR int32
-
-var pe64 int
-
-var pensect int
-
-var nextsectoff int
-
-var nextfileoff int
-
-var textsect int
-
-var datasect int
-
-var bsssect int
-
-var fh IMAGE_FILE_HEADER
-
-var oh IMAGE_OPTIONAL_HEADER
-
-var oh64 PE64_IMAGE_OPTIONAL_HEADER
-
-var sh [16]IMAGE_SECTION_HEADER
-
-var dd []IMAGE_DATA_DIRECTORY
-
-type Imp struct {
-	s       *Symbol
-	off     uint64
-	next    *Imp
-	argsize int
-}
-
-type Dll struct {
-	name     string
-	nameoff  uint64
-	thunkoff uint64
-	ms       *Imp
-	next     *Dll
-}
-
-var dr *Dll
-
-var dexport [1024]*Symbol
-
-var nexport int
-
-func addpesection(ctxt *Link, name string, sectsize int, filesize int) *IMAGE_SECTION_HEADER {
-	if pensect == 16 {
-		Errorf(nil, "too many sections")
-		errorexit()
-	}
-
-	h := &sh[pensect]
-	pensect++
-	copy(h.Name[:], name)
-	h.VirtualSize = uint32(sectsize)
-	h.VirtualAddress = uint32(nextsectoff)
-	nextsectoff = int(Rnd(int64(nextsectoff)+int64(sectsize), PESECTALIGN))
-	h.PointerToRawData = uint32(nextfileoff)
-	if filesize > 0 {
-		h.SizeOfRawData = uint32(Rnd(int64(filesize), PEFILEALIGN))
-		nextfileoff += int(h.SizeOfRawData)
-	}
-
-	return h
-}
-
-func chksectoff(ctxt *Link, h *IMAGE_SECTION_HEADER, off int64) {
-	if off != int64(h.PointerToRawData) {
-		Errorf(nil, "%s.PointerToRawData = %#x, want %#x", cstring(h.Name[:]), uint64(int64(h.PointerToRawData)), uint64(off))
-		errorexit()
-	}
-}
-
-func chksectseg(ctxt *Link, h *IMAGE_SECTION_HEADER, s *Segment) {
-	if s.Vaddr-PEBASE != uint64(h.VirtualAddress) {
-		Errorf(nil, "%s.VirtualAddress = %#x, want %#x", cstring(h.Name[:]), uint64(int64(h.VirtualAddress)), uint64(int64(s.Vaddr-PEBASE)))
-		errorexit()
-	}
-
-	if s.Fileoff != uint64(h.PointerToRawData) {
-		Errorf(nil, "%s.PointerToRawData = %#x, want %#x", cstring(h.Name[:]), uint64(int64(h.PointerToRawData)), uint64(int64(s.Fileoff)))
-		errorexit()
-	}
-}
-
-func Peinit(ctxt *Link) {
-	var l int
-
-	switch SysArch.Family {
-	// 64-bit architectures
-	case sys.AMD64:
-		pe64 = 1
-
-		l = binary.Size(&oh64)
-		dd = oh64.DataDirectory[:]
-
-	// 32-bit architectures
-	default:
-		l = binary.Size(&oh)
-
-		dd = oh.DataDirectory[:]
-	}
-
-	PEFILEHEADR = int32(Rnd(int64(len(dosstub)+binary.Size(&fh)+l+binary.Size(&sh)), PEFILEALIGN))
-	PESECTHEADR = int32(Rnd(int64(PEFILEHEADR), PESECTALIGN))
-	nextsectoff = int(PESECTHEADR)
-	nextfileoff = int(PEFILEHEADR)
-
-	// some mingw libs depend on this symbol, for example, FindPESectionByName
-	ctxt.xdefine("__image_base__", obj.SDATA, PEBASE)
-
-	ctxt.xdefine("_image_base__", obj.SDATA, PEBASE)
-}
-
-func pewrite() {
-	Cseek(0)
-	if Linkmode != LinkExternal {
-		Cwrite(dosstub)
-		strnput("PE", 4)
-	}
-
-	binary.Write(&coutbuf, binary.LittleEndian, &fh)
-
-	if pe64 != 0 {
-		binary.Write(&coutbuf, binary.LittleEndian, &oh64)
-	} else {
-		binary.Write(&coutbuf, binary.LittleEndian, &oh)
-	}
-	binary.Write(&coutbuf, binary.LittleEndian, sh[:pensect])
-}
-
-func strput(s string) {
-	coutbuf.WriteString(s)
-	Cput(0)
-	// string must be padded to even size
-	if (len(s)+1)%2 != 0 {
-		Cput(0)
-	}
-}
-
-func initdynimport(ctxt *Link) *Dll {
-	var d *Dll
-
-	dr = nil
-	var m *Imp
-	for _, s := range ctxt.Syms.Allsym {
-		if !s.Attr.Reachable() || s.Type != obj.SDYNIMPORT {
-			continue
-		}
-		for d = dr; d != nil; d = d.next {
-			if d.name == s.Dynimplib {
-				m = new(Imp)
-				break
-			}
-		}
-
-		if d == nil {
-			d = new(Dll)
-			d.name = s.Dynimplib
-			d.next = dr
-			dr = d
-			m = new(Imp)
-		}
-
-		// Because external link requires properly stdcall decorated name,
-		// all external symbols in runtime use %n to denote that the number
-		// of uinptrs this function consumes. Store the argsize and discard
-		// the %n suffix if any.
-		m.argsize = -1
-		if i := strings.IndexByte(s.Extname, '%'); i >= 0 {
-			var err error
-			m.argsize, err = strconv.Atoi(s.Extname[i+1:])
-			if err != nil {
-				Errorf(s, "failed to parse stdcall decoration: %v", err)
-			}
-			m.argsize *= SysArch.PtrSize
-			s.Extname = s.Extname[:i]
-		}
-
-		m.s = s
-		m.next = d.ms
-		d.ms = m
-	}
-
-	if Linkmode == LinkExternal {
-		// Add real symbol name
-		for d := dr; d != nil; d = d.next {
-			for m = d.ms; m != nil; m = m.next {
-				m.s.Type = obj.SDATA
-				Symgrow(m.s, int64(SysArch.PtrSize))
-				dynName := m.s.Extname
-				// only windows/386 requires stdcall decoration
-				if SysArch.Family == sys.I386 && m.argsize >= 0 {
-					dynName += fmt.Sprintf("@%d", m.argsize)
-				}
-				dynSym := ctxt.Syms.Lookup(dynName, 0)
-				dynSym.Attr |= AttrReachable
-				dynSym.Type = obj.SHOSTOBJ
-				r := Addrel(m.s)
-				r.Sym = dynSym
-				r.Off = 0
-				r.Siz = uint8(SysArch.PtrSize)
-				r.Type = obj.R_ADDR
-			}
-		}
-	} else {
-		dynamic := ctxt.Syms.Lookup(".windynamic", 0)
-		dynamic.Attr |= AttrReachable
-		dynamic.Type = obj.SWINDOWS
-		for d := dr; d != nil; d = d.next {
-			for m = d.ms; m != nil; m = m.next {
-				m.s.Type = obj.SWINDOWS | obj.SSUB
-				m.s.Sub = dynamic.Sub
-				dynamic.Sub = m.s
-				m.s.Value = dynamic.Size
-				dynamic.Size += int64(SysArch.PtrSize)
-			}
-
-			dynamic.Size += int64(SysArch.PtrSize)
-		}
-	}
-
-	return dr
-}
-
-// peimporteddlls returns the gcc command line argument to link all imported
-// DLLs.
-func peimporteddlls() []string {
-	var dlls []string
-
-	for d := dr; d != nil; d = d.next {
-		dlls = append(dlls, "-l"+strings.TrimSuffix(d.name, ".dll"))
-	}
-
-	return dlls
-}
-
-func addimports(ctxt *Link, datsect *IMAGE_SECTION_HEADER) {
-	startoff := coutbuf.Offset()
-	dynamic := ctxt.Syms.Lookup(".windynamic", 0)
-
-	// skip import descriptor table (will write it later)
-	n := uint64(0)
-
-	for d := dr; d != nil; d = d.next {
-		n++
-	}
-	Cseek(startoff + int64(binary.Size(&IMAGE_IMPORT_DESCRIPTOR{}))*int64(n+1))
-
-	// write dll names
-	for d := dr; d != nil; d = d.next {
-		d.nameoff = uint64(coutbuf.Offset()) - uint64(startoff)
-		strput(d.name)
-	}
-
-	// write function names
-	var m *Imp
-	for d := dr; d != nil; d = d.next {
-		for m = d.ms; m != nil; m = m.next {
-			m.off = uint64(nextsectoff) + uint64(coutbuf.Offset()) - uint64(startoff)
-			Wputl(0) // hint
-			strput(m.s.Extname)
-		}
-	}
-
-	// write OriginalFirstThunks
-	oftbase := uint64(coutbuf.Offset()) - uint64(startoff)
-
-	n = uint64(coutbuf.Offset())
-	for d := dr; d != nil; d = d.next {
-		d.thunkoff = uint64(coutbuf.Offset()) - n
-		for m = d.ms; m != nil; m = m.next {
-			if pe64 != 0 {
-				Vputl(m.off)
-			} else {
-				Lputl(uint32(m.off))
-			}
-		}
-
-		if pe64 != 0 {
-			Vputl(0)
-		} else {
-			Lputl(0)
-		}
-	}
-
-	// add pe section and pad it at the end
-	n = uint64(coutbuf.Offset()) - uint64(startoff)
-
-	isect := addpesection(ctxt, ".idata", int(n), int(n))
-	isect.Characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE
-	chksectoff(ctxt, isect, startoff)
-	strnput("", int(uint64(isect.SizeOfRawData)-n))
-	endoff := coutbuf.Offset()
-
-	// write FirstThunks (allocated in .data section)
-	ftbase := uint64(dynamic.Value) - uint64(datsect.VirtualAddress) - PEBASE
-
-	Cseek(int64(uint64(datsect.PointerToRawData) + ftbase))
-	for d := dr; d != nil; d = d.next {
-		for m = d.ms; m != nil; m = m.next {
-			if pe64 != 0 {
-				Vputl(m.off)
-			} else {
-				Lputl(uint32(m.off))
-			}
-		}
-
-		if pe64 != 0 {
-			Vputl(0)
-		} else {
-			Lputl(0)
-		}
-	}
-
-	// finally write import descriptor table
-	Cseek(startoff)
-
-	for d := dr; d != nil; d = d.next {
-		Lputl(uint32(uint64(isect.VirtualAddress) + oftbase + d.thunkoff))
-		Lputl(0)
-		Lputl(0)
-		Lputl(uint32(uint64(isect.VirtualAddress) + d.nameoff))
-		Lputl(uint32(uint64(datsect.VirtualAddress) + ftbase + d.thunkoff))
-	}
-
-	Lputl(0) //end
-	Lputl(0)
-	Lputl(0)
-	Lputl(0)
-	Lputl(0)
-
-	// update data directory
-	dd[IMAGE_DIRECTORY_ENTRY_IMPORT].VirtualAddress = isect.VirtualAddress
-	dd[IMAGE_DIRECTORY_ENTRY_IMPORT].Size = isect.VirtualSize
-	dd[IMAGE_DIRECTORY_ENTRY_IAT].VirtualAddress = uint32(dynamic.Value - PEBASE)
-	dd[IMAGE_DIRECTORY_ENTRY_IAT].Size = uint32(dynamic.Size)
-
-	Cseek(endoff)
-}
-
-type byExtname []*Symbol
-
-func (s byExtname) Len() int           { return len(s) }
-func (s byExtname) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
-func (s byExtname) Less(i, j int) bool { return s[i].Extname < s[j].Extname }
-
-func initdynexport(ctxt *Link) {
-	nexport = 0
-	for _, s := range ctxt.Syms.Allsym {
-		if !s.Attr.Reachable() || !s.Attr.CgoExportDynamic() {
-			continue
-		}
-		if nexport+1 > len(dexport) {
-			Errorf(s, "pe dynexport table is full")
-			errorexit()
-		}
-
-		dexport[nexport] = s
-		nexport++
-	}
-
-	sort.Sort(byExtname(dexport[:nexport]))
-}
-
-func addexports(ctxt *Link) {
-	var e IMAGE_EXPORT_DIRECTORY
-
-	size := binary.Size(&e) + 10*nexport + len(*flagOutfile) + 1
-	for i := 0; i < nexport; i++ {
-		size += len(dexport[i].Extname) + 1
-	}
-
-	if nexport == 0 {
-		return
-	}
-
-	sect := addpesection(ctxt, ".edata", size, size)
-	sect.Characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ
-	chksectoff(ctxt, sect, coutbuf.Offset())
-	va := int(sect.VirtualAddress)
-	dd[IMAGE_DIRECTORY_ENTRY_EXPORT].VirtualAddress = uint32(va)
-	dd[IMAGE_DIRECTORY_ENTRY_EXPORT].Size = sect.VirtualSize
-
-	vaName := va + binary.Size(&e) + nexport*4
-	vaAddr := va + binary.Size(&e)
-	vaNa := va + binary.Size(&e) + nexport*8
-
-	e.Characteristics = 0
-	e.MajorVersion = 0
-	e.MinorVersion = 0
-	e.NumberOfFunctions = uint32(nexport)
-	e.NumberOfNames = uint32(nexport)
-	e.Name = uint32(va+binary.Size(&e)) + uint32(nexport)*10 // Program names.
-	e.Base = 1
-	e.AddressOfFunctions = uint32(vaAddr)
-	e.AddressOfNames = uint32(vaName)
-	e.AddressOfNameOrdinals = uint32(vaNa)
-
-	// put IMAGE_EXPORT_DIRECTORY
-	binary.Write(&coutbuf, binary.LittleEndian, &e)
-
-	// put EXPORT Address Table
-	for i := 0; i < nexport; i++ {
-		Lputl(uint32(dexport[i].Value - PEBASE))
-	}
-
-	// put EXPORT Name Pointer Table
-	v := int(e.Name + uint32(len(*flagOutfile)) + 1)
-
-	for i := 0; i < nexport; i++ {
-		Lputl(uint32(v))
-		v += len(dexport[i].Extname) + 1
-	}
-
-	// put EXPORT Ordinal Table
-	for i := 0; i < nexport; i++ {
-		Wputl(uint16(i))
-	}
-
-	// put Names
-	strnput(*flagOutfile, len(*flagOutfile)+1)
-
-	for i := 0; i < nexport; i++ {
-		strnput(dexport[i].Extname, len(dexport[i].Extname)+1)
-	}
-	strnput("", int(sect.SizeOfRawData-uint32(size)))
-}
-
-// perelocsect relocates symbols from first in section sect, and returns
-// the total number of relocations emitted.
-func perelocsect(ctxt *Link, sect *Section, syms []*Symbol) int {
-	// If main section has no bits, nothing to relocate.
-	if sect.Vaddr >= sect.Seg.Vaddr+sect.Seg.Filelen {
-		return 0
-	}
-
-	relocs := 0
-
-	sect.Reloff = uint64(coutbuf.Offset())
-	for i, s := range syms {
-		if !s.Attr.Reachable() {
-			continue
-		}
-		if uint64(s.Value) >= sect.Vaddr {
-			syms = syms[i:]
-			break
-		}
-	}
-
-	eaddr := int32(sect.Vaddr + sect.Length)
-	for _, sym := range syms {
-		if !sym.Attr.Reachable() {
-			continue
-		}
-		if sym.Value >= int64(eaddr) {
-			break
-		}
-		for ri := 0; ri < len(sym.R); ri++ {
-			r := &sym.R[ri]
-			if r.Done != 0 {
-				continue
-			}
-			if r.Xsym == nil {
-				Errorf(sym, "missing xsym in relocation")
-				continue
-			}
-
-			if r.Xsym.Dynid < 0 {
-				Errorf(sym, "reloc %d to non-coff symbol %s (outer=%s) %d", r.Type, r.Sym.Name, r.Xsym.Name, r.Sym.Type)
-			}
-			if !Thearch.PEreloc1(sym, r, int64(uint64(sym.Value+int64(r.Off))-PEBASE)) {
-				Errorf(sym, "unsupported obj reloc %d/%d to %s", r.Type, r.Siz, r.Sym.Name)
-			}
-
-			relocs++
-		}
-	}
-
-	sect.Rellen = uint64(coutbuf.Offset()) - sect.Reloff
-
-	return relocs
-}
-
-// peemitreloc emits relocation entries for go.o in external linking.
-func peemitreloc(ctxt *Link, text, data, ctors *IMAGE_SECTION_HEADER) {
-	for coutbuf.Offset()&7 != 0 {
-		Cput(0)
-	}
-
-	text.PointerToRelocations = uint32(coutbuf.Offset())
-	// first entry: extended relocs
-	Lputl(0) // placeholder for number of relocation + 1
-	Lputl(0)
-	Wputl(0)
-
-	n := perelocsect(ctxt, Segtext.Sect, ctxt.Textp) + 1
-	for sect := Segtext.Sect.Next; sect != nil; sect = sect.Next {
-		n += perelocsect(ctxt, sect, datap)
-	}
-
-	cpos := coutbuf.Offset()
-	Cseek(int64(text.PointerToRelocations))
-	Lputl(uint32(n))
-	Cseek(cpos)
-	if n > 0x10000 {
-		n = 0x10000
-		text.Characteristics |= IMAGE_SCN_LNK_NRELOC_OVFL
-	} else {
-		text.PointerToRelocations += 10 // skip the extend reloc entry
-	}
-	text.NumberOfRelocations = uint16(n - 1)
-
-	data.PointerToRelocations = uint32(cpos)
-	// first entry: extended relocs
-	Lputl(0) // placeholder for number of relocation + 1
-	Lputl(0)
-	Wputl(0)
-
-	n = 1
-	for sect := Segdata.Sect; sect != nil; sect = sect.Next {
-		n += perelocsect(ctxt, sect, datap)
-	}
-
-	cpos = coutbuf.Offset()
-	Cseek(int64(data.PointerToRelocations))
-	Lputl(uint32(n))
-	Cseek(cpos)
-	if n > 0x10000 {
-		n = 0x10000
-		data.Characteristics |= IMAGE_SCN_LNK_NRELOC_OVFL
-	} else {
-		data.PointerToRelocations += 10 // skip the extend reloc entry
-	}
-	data.NumberOfRelocations = uint16(n - 1)
-
-	dottext := ctxt.Syms.Lookup(".text", 0)
-	ctors.NumberOfRelocations = 1
-	ctors.PointerToRelocations = uint32(coutbuf.Offset())
-	sectoff := ctors.VirtualAddress
-	Lputl(sectoff)
-	Lputl(uint32(dottext.Dynid))
-	switch obj.GOARCH {
-	default:
-		fmt.Fprintf(os.Stderr, "link: unknown architecture for PE: %q\n", obj.GOARCH)
-		os.Exit(2)
-	case "386":
-		Wputl(IMAGE_REL_I386_DIR32)
-	case "amd64":
-		Wputl(IMAGE_REL_AMD64_ADDR64)
-	}
-}
-
-func (ctxt *Link) dope() {
-	/* relocation table */
-	rel := ctxt.Syms.Lookup(".rel", 0)
-
-	rel.Attr |= AttrReachable
-	rel.Type = obj.SELFROSECT
-
-	initdynimport(ctxt)
-	initdynexport(ctxt)
-}
-
-func strtbladd(name string) int {
-	off := len(strtbl) + 4 // offset includes 4-byte length at beginning of table
-	strtbl = append(strtbl, name...)
-	strtbl = append(strtbl, 0)
-	return off
-}
-
-/*
- * For more than 8 characters section names, name contains a slash (/) that is
- * followed by an ASCII representation of a decimal number that is an offset into
- * the string table.
- * reference: pecoff_v8.docx Page 24.
- * <http://www.microsoft.com/whdc/system/platform/firmware/PECOFFdwn.mspx>
- */
-func newPEDWARFSection(ctxt *Link, name string, size int64) *IMAGE_SECTION_HEADER {
-	if size == 0 {
-		return nil
-	}
-
-	off := strtbladd(name)
-	s := fmt.Sprintf("/%d", off)
-	h := addpesection(ctxt, s, int(size), int(size))
-	h.Characteristics = IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_DISCARDABLE
-
-	return h
-}
-
-// writePESymTableRecords writes all COFF symbol table records.
-// It returns number of records written.
-func writePESymTableRecords(ctxt *Link) int {
-	var symcnt int
-
-	put := func(ctxt *Link, s *Symbol, name string, type_ SymbolType, addr int64, gotype *Symbol) {
-		if s == nil {
-			return
-		}
-		if s.Sect == nil && type_ != UndefinedSym {
-			return
-		}
-		switch type_ {
-		default:
-			return
-		case DataSym, BSSSym, TextSym, UndefinedSym:
-		}
-
-		// only windows/386 requires underscore prefix on external symbols
-		if SysArch.Family == sys.I386 &&
-			Linkmode == LinkExternal &&
-			(s.Type != obj.SDYNIMPORT || s.Attr.CgoExport()) &&
-			s.Name == s.Extname &&
-			s.Name != "_main" {
-			s.Name = "_" + s.Name
-		}
-
-		var typ uint16
-		var sect int
-		var value int64
-		// Note: although address of runtime.edata (type SDATA) is at the start of .bss section
-		// it still belongs to the .data section, not the .bss section.
-		if uint64(s.Value) >= Segdata.Vaddr+Segdata.Filelen && s.Type != obj.SDATA && Linkmode == LinkExternal {
-			value = int64(uint64(s.Value) - Segdata.Vaddr - Segdata.Filelen)
-			sect = bsssect
-		} else if uint64(s.Value) >= Segdata.Vaddr {
-			value = int64(uint64(s.Value) - Segdata.Vaddr)
-			sect = datasect
-		} else if uint64(s.Value) >= Segtext.Vaddr {
-			value = int64(uint64(s.Value) - Segtext.Vaddr)
-			sect = textsect
-		} else if type_ == UndefinedSym {
-			typ = IMAGE_SYM_DTYPE_FUNCTION
-		} else {
-			Errorf(s, "addpesym %#x", addr)
-		}
-
-		// write COFF symbol table record
-		if len(s.Name) > 8 {
-			Lputl(0)
-			Lputl(uint32(strtbladd(s.Name)))
-		} else {
-			strnput(s.Name, 8)
-		}
-		Lputl(uint32(value))
-		Wputl(uint16(sect))
-		if typ != 0 {
-			Wputl(typ)
-		} else if Linkmode == LinkExternal {
-			Wputl(0)
-		} else {
-			Wputl(0x0308) // "array of structs"
-		}
-		Cput(2) // storage class: external
-		Cput(0) // no aux entries
-
-		s.Dynid = int32(symcnt)
-
-		symcnt++
-	}
-
-	if Linkmode == LinkExternal {
-		for d := dr; d != nil; d = d.next {
-			for m := d.ms; m != nil; m = m.next {
-				s := m.s.R[0].Xsym
-				put(ctxt, s, s.Name, UndefinedSym, 0, nil)
-			}
-		}
-
-		s := ctxt.Syms.Lookup(".text", 0)
-		if s.Type == obj.STEXT {
-			put(ctxt, s, s.Name, TextSym, s.Value, nil)
-		}
-	}
-
-	genasmsym(ctxt, put)
-
-	return symcnt
-}
-
-func addpesymtable(ctxt *Link) {
-	symtabStartPos := coutbuf.Offset()
-
-	// write COFF symbol table
-	var symcnt int
-	if !*FlagS || Linkmode == LinkExternal {
-		symcnt = writePESymTableRecords(ctxt)
-	}
-
-	// update COFF file header and section table
-	size := len(strtbl) + 4 + 18*symcnt
-	var h *IMAGE_SECTION_HEADER
-	if Linkmode != LinkExternal {
-		// We do not really need .symtab for go.o, and if we have one, ld
-		// will also include it in the exe, and that will confuse windows.
-		h = addpesection(ctxt, ".symtab", size, size)
-		h.Characteristics = IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_DISCARDABLE
-		chksectoff(ctxt, h, symtabStartPos)
-	}
-	fh.PointerToSymbolTable = uint32(symtabStartPos)
-	fh.NumberOfSymbols = uint32(symcnt)
-
-	// write COFF string table
-	Lputl(uint32(len(strtbl)) + 4)
-	for i := 0; i < len(strtbl); i++ {
-		Cput(strtbl[i])
-	}
-	if Linkmode != LinkExternal {
-		strnput("", int(h.SizeOfRawData-uint32(size)))
-	}
-}
-
-func setpersrc(ctxt *Link, sym *Symbol) {
-	if rsrcsym != nil {
-		Errorf(sym, "too many .rsrc sections")
-	}
-
-	rsrcsym = sym
-}
-
-func addpersrc(ctxt *Link) {
-	if rsrcsym == nil {
-		return
-	}
-
-	h := addpesection(ctxt, ".rsrc", int(rsrcsym.Size), int(rsrcsym.Size))
-	h.Characteristics = IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE | IMAGE_SCN_CNT_INITIALIZED_DATA
-	chksectoff(ctxt, h, coutbuf.Offset())
-
-	// relocation
-	var p []byte
-	var r *Reloc
-	var val uint32
-	for ri := 0; ri < len(rsrcsym.R); ri++ {
-		r = &rsrcsym.R[ri]
-		p = rsrcsym.P[r.Off:]
-		val = uint32(int64(h.VirtualAddress) + r.Add)
-
-		// 32-bit little-endian
-		p[0] = byte(val)
-
-		p[1] = byte(val >> 8)
-		p[2] = byte(val >> 16)
-		p[3] = byte(val >> 24)
-	}
-
-	Cwrite(rsrcsym.P)
-	strnput("", int(int64(h.SizeOfRawData)-rsrcsym.Size))
-
-	// update data directory
-	dd[IMAGE_DIRECTORY_ENTRY_RESOURCE].VirtualAddress = h.VirtualAddress
-
-	dd[IMAGE_DIRECTORY_ENTRY_RESOURCE].Size = h.VirtualSize
-}
-
-func addinitarray(ctxt *Link) (c *IMAGE_SECTION_HEADER) {
-	// The size below was determined by the specification for array relocations,
-	// and by observing what GCC writes here. If the initarray section grows to
-	// contain more than one constructor entry, the size will need to be 8 * constructor_count.
-	// However, the entire Go runtime is initialized from just one function, so it is unlikely
-	// that this will need to grow in the future.
-	var size int
-	switch obj.GOARCH {
-	default:
-		fmt.Fprintf(os.Stderr, "link: unknown architecture for PE: %q\n", obj.GOARCH)
-		os.Exit(2)
-	case "386":
-		size = 4
-	case "amd64":
-		size = 8
-	}
-
-	c = addpesection(ctxt, ".ctors", size, size)
-	c.Characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ
-	c.SizeOfRawData = uint32(size)
-
-	Cseek(int64(c.PointerToRawData))
-	chksectoff(ctxt, c, coutbuf.Offset())
-	init_entry := ctxt.Syms.Lookup(*flagEntrySymbol, 0)
-	addr := uint64(init_entry.Value) - init_entry.Sect.Vaddr
-
-	switch obj.GOARCH {
-	case "386":
-		Lputl(uint32(addr))
-	case "amd64":
-		Vputl(addr)
-	}
-
-	return c
-}
-
-func Asmbpe(ctxt *Link) {
-	switch SysArch.Family {
-	default:
-		Exitf("unknown PE architecture: %v", SysArch.Family)
-	case sys.AMD64:
-		fh.Machine = IMAGE_FILE_MACHINE_AMD64
-	case sys.I386:
-		fh.Machine = IMAGE_FILE_MACHINE_I386
-	}
-
-	t := addpesection(ctxt, ".text", int(Segtext.Length), int(Segtext.Length))
-	t.Characteristics = IMAGE_SCN_CNT_CODE | IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_EXECUTE | IMAGE_SCN_MEM_READ
-	if Linkmode == LinkExternal {
-		// some data symbols (e.g. masks) end up in the .text section, and they normally
-		// expect larger alignment requirement than the default text section alignment.
-		t.Characteristics |= IMAGE_SCN_ALIGN_32BYTES
-	}
-	chksectseg(ctxt, t, &Segtext)
-	textsect = pensect
-
-	var d *IMAGE_SECTION_HEADER
-	var c *IMAGE_SECTION_HEADER
-	if Linkmode != LinkExternal {
-		d = addpesection(ctxt, ".data", int(Segdata.Length), int(Segdata.Filelen))
-		d.Characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE
-		chksectseg(ctxt, d, &Segdata)
-		datasect = pensect
-	} else {
-		d = addpesection(ctxt, ".data", int(Segdata.Filelen), int(Segdata.Filelen))
-		d.Characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE | IMAGE_SCN_ALIGN_32BYTES
-		chksectseg(ctxt, d, &Segdata)
-		datasect = pensect
-
-		b := addpesection(ctxt, ".bss", int(Segdata.Length-Segdata.Filelen), 0)
-		b.Characteristics = IMAGE_SCN_CNT_UNINITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE | IMAGE_SCN_ALIGN_32BYTES
-		b.PointerToRawData = 0
-		bsssect = pensect
-
-		c = addinitarray(ctxt)
-	}
-
-	if !*FlagS {
-		dwarfaddpeheaders(ctxt)
-	}
-
-	Cseek(int64(nextfileoff))
-	if Linkmode != LinkExternal {
-		addimports(ctxt, d)
-		addexports(ctxt)
-	}
-	addpesymtable(ctxt)
-	addpersrc(ctxt)
-	if Linkmode == LinkExternal {
-		peemitreloc(ctxt, t, d, c)
-	}
-
-	fh.NumberOfSections = uint16(pensect)
-
-	// Being able to produce identical output for identical input is
-	// much more beneficial than having build timestamp in the header.
-	fh.TimeDateStamp = 0
-
-	if Linkmode == LinkExternal {
-		fh.Characteristics = IMAGE_FILE_LINE_NUMS_STRIPPED
-	} else {
-		fh.Characteristics = IMAGE_FILE_RELOCS_STRIPPED | IMAGE_FILE_EXECUTABLE_IMAGE | IMAGE_FILE_DEBUG_STRIPPED
-	}
-	if pe64 != 0 {
-		fh.SizeOfOptionalHeader = uint16(binary.Size(&oh64))
-		fh.Characteristics |= IMAGE_FILE_LARGE_ADDRESS_AWARE
-		oh64.Magic = 0x20b // PE32+
-	} else {
-		fh.SizeOfOptionalHeader = uint16(binary.Size(&oh))
-		fh.Characteristics |= IMAGE_FILE_32BIT_MACHINE
-		oh.Magic = 0x10b // PE32
-		oh.BaseOfData = d.VirtualAddress
-	}
-
-	// Fill out both oh64 and oh. We only use one. Oh well.
-	oh64.MajorLinkerVersion = 3
-
-	oh.MajorLinkerVersion = 3
-	oh64.MinorLinkerVersion = 0
-	oh.MinorLinkerVersion = 0
-	oh64.SizeOfCode = t.SizeOfRawData
-	oh.SizeOfCode = t.SizeOfRawData
-	oh64.SizeOfInitializedData = d.SizeOfRawData
-	oh.SizeOfInitializedData = d.SizeOfRawData
-	oh64.SizeOfUninitializedData = 0
-	oh.SizeOfUninitializedData = 0
-	if Linkmode != LinkExternal {
-		oh64.AddressOfEntryPoint = uint32(Entryvalue(ctxt) - PEBASE)
-		oh.AddressOfEntryPoint = uint32(Entryvalue(ctxt) - PEBASE)
-	}
-	oh64.BaseOfCode = t.VirtualAddress
-	oh.BaseOfCode = t.VirtualAddress
-	oh64.ImageBase = PEBASE
-	oh.ImageBase = PEBASE
-	oh64.SectionAlignment = PESECTALIGN
-	oh.SectionAlignment = PESECTALIGN
-	oh64.FileAlignment = PEFILEALIGN
-	oh.FileAlignment = PEFILEALIGN
-	oh64.MajorOperatingSystemVersion = 4
-	oh.MajorOperatingSystemVersion = 4
-	oh64.MinorOperatingSystemVersion = 0
-	oh.MinorOperatingSystemVersion = 0
-	oh64.MajorImageVersion = 1
-	oh.MajorImageVersion = 1
-	oh64.MinorImageVersion = 0
-	oh.MinorImageVersion = 0
-	oh64.MajorSubsystemVersion = 4
-	oh.MajorSubsystemVersion = 4
-	oh64.MinorSubsystemVersion = 0
-	oh.MinorSubsystemVersion = 0
-	oh64.SizeOfImage = uint32(nextsectoff)
-	oh.SizeOfImage = uint32(nextsectoff)
-	oh64.SizeOfHeaders = uint32(PEFILEHEADR)
-	oh.SizeOfHeaders = uint32(PEFILEHEADR)
-	if Headtype == obj.Hwindowsgui {
-		oh64.Subsystem = IMAGE_SUBSYSTEM_WINDOWS_GUI
-		oh.Subsystem = IMAGE_SUBSYSTEM_WINDOWS_GUI
-	} else {
-		oh64.Subsystem = IMAGE_SUBSYSTEM_WINDOWS_CUI
-		oh.Subsystem = IMAGE_SUBSYSTEM_WINDOWS_CUI
-	}
-
-	// Disable stack growth as we don't want Windows to
-	// fiddle with the thread stack limits, which we set
-	// ourselves to circumvent the stack checks in the
-	// Windows exception dispatcher.
-	// Commit size must be strictly less than reserve
-	// size otherwise reserve will be rounded up to a
-	// larger size, as verified with VMMap.
-
-	// Go code would be OK with 64k stacks, but we need larger stacks for cgo.
-	//
-	// The default stack reserve size affects only the main
-	// thread, ctrlhandler thread, and profileloop thread. For
-	// these, it must be greater than the stack size assumed by
-	// externalthreadhandler.
-	//
-	// For other threads we specify stack size in runtime explicitly
-	// (runtime knows whether cgo is enabled or not).
-	// For these, the reserve must match STACKSIZE in
-	// runtime/cgo/gcc_windows_{386,amd64}.c and the correspondent
-	// CreateThread parameter in runtime.newosproc.
-	if !iscgo {
-		oh64.SizeOfStackReserve = 0x00020000
-		oh.SizeOfStackReserve = 0x00020000
-		oh64.SizeOfStackCommit = 0x00001000
-		oh.SizeOfStackCommit = 0x00001000
-	} else {
-		oh64.SizeOfStackReserve = 0x00200000
-		oh.SizeOfStackReserve = 0x00100000
-
-		// account for 2 guard pages
-		oh64.SizeOfStackCommit = 0x00200000 - 0x2000
-
-		oh.SizeOfStackCommit = 0x00100000 - 0x2000
-	}
-
-	oh64.SizeOfHeapReserve = 0x00100000
-	oh.SizeOfHeapReserve = 0x00100000
-	oh64.SizeOfHeapCommit = 0x00001000
-	oh.SizeOfHeapCommit = 0x00001000
-	oh64.NumberOfRvaAndSizes = 16
-	oh.NumberOfRvaAndSizes = 16
-
-	pewrite()
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/sym.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/sym.go
deleted file mode 100644
index 9f14176..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/sym.go
+++ /dev/null
@@ -1,137 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/sym.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/sym.go:1
-// Derived from Inferno utils/6l/obj.c and utils/6l/span.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6l/obj.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6l/span.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package ld
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"log"
-)
-
-func linknew(arch *sys.Arch) *Link {
-	ctxt := &Link{
-		Syms: &Symbols{
-			hash: []map[string]*Symbol{
-				// preallocate about 2mb for hash of
-				// non static symbols
-				make(map[string]*Symbol, 100000),
-			},
-			Allsym: make([]*Symbol, 0, 100000),
-		},
-		Arch: arch,
-	}
-
-	if obj.GOARCH != arch.Name {
-		log.Fatalf("invalid obj.GOARCH %s (want %s)", obj.GOARCH, arch.Name)
-	}
-
-	return ctxt
-}
-
-// computeTLSOffset records the thread-local storage offset.
-func (ctxt *Link) computeTLSOffset() {
-	switch Headtype {
-	default:
-		log.Fatalf("unknown thread-local storage offset for %v", Headtype)
-
-	case obj.Hplan9, obj.Hwindows, obj.Hwindowsgui:
-		break
-
-		/*
-		 * ELF uses TLS offset negative from FS.
-		 * Translate 0(FS) and 8(FS) into -16(FS) and -8(FS).
-		 * Known to low-level assembly in package runtime and runtime/cgo.
-		 */
-	case obj.Hlinux,
-		obj.Hfreebsd,
-		obj.Hnetbsd,
-		obj.Hopenbsd,
-		obj.Hdragonfly,
-		obj.Hsolaris:
-		if obj.GOOS == "android" {
-			switch ctxt.Arch.Family {
-			case sys.AMD64:
-				// Android/amd64 constant - offset from 0(FS) to our TLS slot.
-				// Explained in src/runtime/cgo/gcc_android_*.c
-				ctxt.Tlsoffset = 0x1d0
-			case sys.I386:
-				// Android/386 constant - offset from 0(GS) to our TLS slot.
-				ctxt.Tlsoffset = 0xf8
-			default:
-				ctxt.Tlsoffset = -1 * ctxt.Arch.PtrSize
-			}
-		} else {
-			ctxt.Tlsoffset = -1 * ctxt.Arch.PtrSize
-		}
-
-	case obj.Hnacl:
-		switch ctxt.Arch.Family {
-		default:
-			log.Fatalf("unknown thread-local storage offset for nacl/%s", ctxt.Arch.Name)
-
-		case sys.ARM:
-			ctxt.Tlsoffset = 0
-
-		case sys.AMD64:
-			ctxt.Tlsoffset = 0
-
-		case sys.I386:
-			ctxt.Tlsoffset = -8
-		}
-
-		/*
-		 * OS X system constants - offset from 0(GS) to our TLS.
-		 * Explained in src/runtime/cgo/gcc_darwin_*.c.
-		 */
-	case obj.Hdarwin:
-		switch ctxt.Arch.Family {
-		default:
-			log.Fatalf("unknown thread-local storage offset for darwin/%s", ctxt.Arch.Name)
-
-		case sys.ARM:
-			ctxt.Tlsoffset = 0 // dummy value, not needed
-
-		case sys.AMD64:
-			ctxt.Tlsoffset = 0x8a0
-
-		case sys.ARM64:
-			ctxt.Tlsoffset = 0 // dummy value, not needed
-
-		case sys.I386:
-			ctxt.Tlsoffset = 0x468
-		}
-	}
-
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/symbols.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/symbols.go
deleted file mode 100644
index f534938..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/symbols.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/symbols.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/symbols.go:1
-// Derived from Inferno utils/6l/l.h and related files.
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6l/l.h
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package ld
-
-type Symbols struct {
-	symbolBatch []Symbol
-
-	// Symbol lookup based on name and indexed by version.
-	hash []map[string]*Symbol
-
-	Allsym []*Symbol
-}
-
-func (syms *Symbols) newsym(name string, v int) *Symbol {
-	batch := syms.symbolBatch
-	if len(batch) == 0 {
-		batch = make([]Symbol, 1000)
-	}
-	s := &batch[0]
-	syms.symbolBatch = batch[1:]
-
-	s.Dynid = -1
-	s.Plt = -1
-	s.Got = -1
-	s.Name = name
-	s.Version = int16(v)
-	syms.Allsym = append(syms.Allsym, s)
-
-	return s
-}
-
-// Look up the symbol with the given name and version, creating the
-// symbol if it is not found.
-func (syms *Symbols) Lookup(name string, v int) *Symbol {
-	m := syms.hash[v]
-	s := m[name]
-	if s != nil {
-		return s
-	}
-	s = syms.newsym(name, v)
-	s.Extname = s.Name
-	m[name] = s
-	return s
-}
-
-// Look up the symbol with the given name and version, returning nil
-// if it is not found.
-func (syms *Symbols) ROLookup(name string, v int) *Symbol {
-	return syms.hash[v][name]
-}
-
-// Allocate a new version (i.e. symbol namespace).
-func (syms *Symbols) IncVersion() int {
-	syms.hash = append(syms.hash, make(map[string]*Symbol))
-	return len(syms.hash) - 1
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/symtab.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/symtab.go
deleted file mode 100644
index ffab5ca..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/symtab.go
+++ /dev/null
@@ -1,696 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/symtab.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/symtab.go:1
-// Inferno utils/6l/span.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6l/span.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package ld
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"fmt"
-	"path/filepath"
-	"strings"
-)
-
-// Symbol table.
-
-func putelfstr(s string) int {
-	if len(Elfstrdat) == 0 && s != "" {
-		// first entry must be empty string
-		putelfstr("")
-	}
-
-	off := len(Elfstrdat)
-	Elfstrdat = append(Elfstrdat, s...)
-	Elfstrdat = append(Elfstrdat, 0)
-	return off
-}
-
-func putelfsyment(off int, addr int64, size int64, info int, shndx int, other int) {
-	if elf64 {
-		Thearch.Lput(uint32(off))
-		Cput(uint8(info))
-		Cput(uint8(other))
-		Thearch.Wput(uint16(shndx))
-		Thearch.Vput(uint64(addr))
-		Thearch.Vput(uint64(size))
-		Symsize += ELF64SYMSIZE
-	} else {
-		Thearch.Lput(uint32(off))
-		Thearch.Lput(uint32(addr))
-		Thearch.Lput(uint32(size))
-		Cput(uint8(info))
-		Cput(uint8(other))
-		Thearch.Wput(uint16(shndx))
-		Symsize += ELF32SYMSIZE
-	}
-}
-
-var numelfsym int = 1 // 0 is reserved
-
-var elfbind int
-
-func putelfsym(ctxt *Link, x *Symbol, s string, t SymbolType, addr int64, go_ *Symbol) {
-	var typ int
-
-	switch t {
-	default:
-		return
-
-	case TextSym:
-		typ = STT_FUNC
-
-	case DataSym, BSSSym:
-		typ = STT_OBJECT
-
-	case UndefinedSym:
-		// ElfType is only set for symbols read from Go shared libraries, but
-		// for other symbols it is left as STT_NOTYPE which is fine.
-		typ = int(x.ElfType)
-
-	case TLSSym:
-		typ = STT_TLS
-	}
-
-	size := x.Size
-	if t == UndefinedSym {
-		size = 0
-	}
-
-	xo := x
-	for xo.Outer != nil {
-		xo = xo.Outer
-	}
-
-	var elfshnum int
-	if xo.Type == obj.SDYNIMPORT || xo.Type == obj.SHOSTOBJ {
-		elfshnum = SHN_UNDEF
-	} else {
-		if xo.Sect == nil {
-			Errorf(x, "missing section in putelfsym")
-			return
-		}
-		if xo.Sect.Elfsect == nil {
-			Errorf(x, "missing ELF section in putelfsym")
-			return
-		}
-		elfshnum = xo.Sect.Elfsect.shnum
-	}
-
-	// One pass for each binding: STB_LOCAL, STB_GLOBAL,
-	// maybe one day STB_WEAK.
-	bind := STB_GLOBAL
-
-	if x.Version != 0 || (x.Type&obj.SHIDDEN != 0) || x.Attr.Local() {
-		bind = STB_LOCAL
-	}
-
-	// In external linking mode, we have to invoke gcc with -rdynamic
-	// to get the exported symbols put into the dynamic symbol table.
-	// To avoid filling the dynamic table with lots of unnecessary symbols,
-	// mark all Go symbols local (not global) in the final executable.
-	// But when we're dynamically linking, we need all those global symbols.
-	if !ctxt.DynlinkingGo() && Linkmode == LinkExternal && !x.Attr.CgoExportStatic() && elfshnum != SHN_UNDEF {
-		bind = STB_LOCAL
-	}
-
-	if Linkmode == LinkExternal && elfshnum != SHN_UNDEF {
-		addr -= int64(xo.Sect.Vaddr)
-	}
-	other := STV_DEFAULT
-	if x.Type&obj.SHIDDEN != 0 {
-		other = STV_HIDDEN
-	}
-	if (Buildmode == BuildmodeCArchive || Buildmode == BuildmodePIE || ctxt.DynlinkingGo()) && SysArch.Family == sys.PPC64 && typ == STT_FUNC && x.Name != "runtime.duffzero" && x.Name != "runtime.duffcopy" {
-		// On ppc64 the top three bits of the st_other field indicate how
-		// many instructions separate the global and local entry points. In
-		// our case it is two instructions, indicated by the value 3.
-		other |= 3 << 5
-	}
-
-	// When dynamically linking, we create Symbols by reading the names from
-	// the symbol tables of the shared libraries and so the names need to
-	// match exactly. Tools like DTrace will have to wait for now.
-	if !ctxt.DynlinkingGo() {
-		// Rewrite · to . for ASCII-only tools like DTrace (sigh)
-		s = strings.Replace(s, "·", ".", -1)
-	}
-
-	if ctxt.DynlinkingGo() && bind == STB_GLOBAL && elfbind == STB_LOCAL && x.Type == obj.STEXT {
-		// When dynamically linking, we want references to functions defined
-		// in this module to always be to the function object, not to the
-		// PLT. We force this by writing an additional local symbol for every
-		// global function symbol and making all relocations against the
-		// global symbol refer to this local symbol instead (see
-		// (*Symbol).ElfsymForReloc). This is approximately equivalent to the
-		// ELF linker -Bsymbolic-functions option, but that is buggy on
-		// several platforms.
-		putelfsyment(putelfstr("local."+s), addr, size, STB_LOCAL<<4|typ&0xf, elfshnum, other)
-		x.LocalElfsym = int32(numelfsym)
-		numelfsym++
-		return
-	} else if bind != elfbind {
-		return
-	}
-
-	putelfsyment(putelfstr(s), addr, size, bind<<4|typ&0xf, elfshnum, other)
-	x.Elfsym = int32(numelfsym)
-	numelfsym++
-}
-
-func putelfsectionsym(s *Symbol, shndx int) {
-	putelfsyment(0, 0, 0, STB_LOCAL<<4|STT_SECTION, shndx, 0)
-	s.Elfsym = int32(numelfsym)
-	numelfsym++
-}
-
-func Asmelfsym(ctxt *Link) {
-	// the first symbol entry is reserved
-	putelfsyment(0, 0, 0, STB_LOCAL<<4|STT_NOTYPE, 0, 0)
-
-	dwarfaddelfsectionsyms(ctxt)
-
-	// Some linkers will add a FILE sym if one is not present.
-	// Avoid having the working directory inserted into the symbol table.
-	// It is added with a name to avoid problems with external linking
-	// encountered on some versions of Solaris. See issue #14957.
-	putelfsyment(putelfstr("go.go"), 0, 0, STB_LOCAL<<4|STT_FILE, SHN_ABS, 0)
-	numelfsym++
-
-	elfbind = STB_LOCAL
-	genasmsym(ctxt, putelfsym)
-
-	elfbind = STB_GLOBAL
-	elfglobalsymndx = numelfsym
-	genasmsym(ctxt, putelfsym)
-}
-
-func putplan9sym(ctxt *Link, x *Symbol, s string, typ SymbolType, addr int64, go_ *Symbol) {
-	t := int(typ)
-	switch typ {
-	case TextSym, DataSym, BSSSym:
-		if x.Version != 0 {
-			t += 'a' - 'A'
-		}
-		fallthrough
-
-	case AutoSym, ParamSym, FileSym, FrameSym:
-		l := 4
-		if Headtype == obj.Hplan9 && SysArch.Family == sys.AMD64 && !Flag8 {
-			Lputb(uint32(addr >> 32))
-			l = 8
-		}
-
-		Lputb(uint32(addr))
-		Cput(uint8(t + 0x80)) /* 0x80 is variable length */
-
-		var i int
-
-		/* skip the '<' in filenames */
-		if t == FileSym {
-			s = s[1:]
-		}
-		for i = 0; i < len(s); i++ {
-			Cput(s[i])
-		}
-		Cput(0)
-
-		Symsize += int32(l) + 1 + int32(i) + 1
-
-	default:
-		return
-	}
-}
-
-func Asmplan9sym(ctxt *Link) {
-	genasmsym(ctxt, putplan9sym)
-}
-
-var symt *Symbol
-
-var encbuf [10]byte
-
-func Wputb(w uint16) { Cwrite(Append16b(encbuf[:0], w)) }
-func Lputb(l uint32) { Cwrite(Append32b(encbuf[:0], l)) }
-func Vputb(v uint64) { Cwrite(Append64b(encbuf[:0], v)) }
-
-func Wputl(w uint16) { Cwrite(Append16l(encbuf[:0], w)) }
-func Lputl(l uint32) { Cwrite(Append32l(encbuf[:0], l)) }
-func Vputl(v uint64) { Cwrite(Append64l(encbuf[:0], v)) }
-
-func Append16b(b []byte, v uint16) []byte {
-	return append(b, uint8(v>>8), uint8(v))
-}
-func Append16l(b []byte, v uint16) []byte {
-	return append(b, uint8(v), uint8(v>>8))
-}
-
-func Append32b(b []byte, v uint32) []byte {
-	return append(b, uint8(v>>24), uint8(v>>16), uint8(v>>8), uint8(v))
-}
-func Append32l(b []byte, v uint32) []byte {
-	return append(b, uint8(v), uint8(v>>8), uint8(v>>16), uint8(v>>24))
-}
-
-func Append64b(b []byte, v uint64) []byte {
-	return append(b, uint8(v>>56), uint8(v>>48), uint8(v>>40), uint8(v>>32),
-		uint8(v>>24), uint8(v>>16), uint8(v>>8), uint8(v))
-}
-
-func Append64l(b []byte, v uint64) []byte {
-	return append(b, uint8(v), uint8(v>>8), uint8(v>>16), uint8(v>>24),
-		uint8(v>>32), uint8(v>>40), uint8(v>>48), uint8(v>>56))
-}
-
-type byPkg []*Library
-
-func (libs byPkg) Len() int {
-	return len(libs)
-}
-
-func (libs byPkg) Less(a, b int) bool {
-	return libs[a].Pkg < libs[b].Pkg
-}
-
-func (libs byPkg) Swap(a, b int) {
-	libs[a], libs[b] = libs[b], libs[a]
-}
-
-// Create a table with information on the text sections.
-
-func textsectionmap(ctxt *Link) uint32 {
-
-	t := ctxt.Syms.Lookup("runtime.textsectionmap", 0)
-	t.Type = obj.SRODATA
-	t.Attr |= AttrReachable
-	nsections := int64(0)
-
-	for sect := Segtext.Sect; sect != nil; sect = sect.Next {
-		if sect.Name == ".text" {
-			nsections++
-		} else {
-			break
-		}
-	}
-	Symgrow(t, nsections*(2*int64(SysArch.IntSize)+int64(SysArch.PtrSize)))
-
-	off := int64(0)
-	n := 0
-
-	// The vaddr for each text section is the difference between the section's
-	// Vaddr and the Vaddr for the first text section as determined at compile
-	// time.
-
-	// The symbol for the first text section is named runtime.text as before.
-	// Additional text sections are named runtime.text.n where n is the
-	// order of creation starting with 1. These symbols provide the section's
-	// address after relocation by the linker.
-
-	textbase := Segtext.Sect.Vaddr
-	for sect := Segtext.Sect; sect != nil; sect = sect.Next {
-		if sect.Name != ".text" {
-			break
-		}
-		off = setuintxx(ctxt, t, off, sect.Vaddr-textbase, int64(SysArch.IntSize))
-		off = setuintxx(ctxt, t, off, sect.Length, int64(SysArch.IntSize))
-		if n == 0 {
-			s := ctxt.Syms.ROLookup("runtime.text", 0)
-			if s == nil {
-				Errorf(nil, "Unable to find symbol runtime.text\n")
-			}
-			off = setaddr(ctxt, t, off, s)
-
-		} else {
-			s := ctxt.Syms.Lookup(fmt.Sprintf("runtime.text.%d", n), 0)
-			if s == nil {
-				Errorf(nil, "Unable to find symbol runtime.text.%d\n", n)
-			}
-			off = setaddr(ctxt, t, off, s)
-		}
-		n++
-	}
-	return uint32(n)
-}
-
-func (ctxt *Link) symtab() {
-	dosymtype(ctxt)
-
-	// Define these so that they'll get put into the symbol table.
-	// data.c:/^address will provide the actual values.
-	ctxt.xdefine("runtime.text", obj.STEXT, 0)
-
-	ctxt.xdefine("runtime.etext", obj.STEXT, 0)
-	ctxt.xdefine("runtime.itablink", obj.SRODATA, 0)
-	ctxt.xdefine("runtime.eitablink", obj.SRODATA, 0)
-	ctxt.xdefine("runtime.rodata", obj.SRODATA, 0)
-	ctxt.xdefine("runtime.erodata", obj.SRODATA, 0)
-	ctxt.xdefine("runtime.types", obj.SRODATA, 0)
-	ctxt.xdefine("runtime.etypes", obj.SRODATA, 0)
-	ctxt.xdefine("runtime.noptrdata", obj.SNOPTRDATA, 0)
-	ctxt.xdefine("runtime.enoptrdata", obj.SNOPTRDATA, 0)
-	ctxt.xdefine("runtime.data", obj.SDATA, 0)
-	ctxt.xdefine("runtime.edata", obj.SDATA, 0)
-	ctxt.xdefine("runtime.bss", obj.SBSS, 0)
-	ctxt.xdefine("runtime.ebss", obj.SBSS, 0)
-	ctxt.xdefine("runtime.noptrbss", obj.SNOPTRBSS, 0)
-	ctxt.xdefine("runtime.enoptrbss", obj.SNOPTRBSS, 0)
-	ctxt.xdefine("runtime.end", obj.SBSS, 0)
-	ctxt.xdefine("runtime.epclntab", obj.SRODATA, 0)
-	ctxt.xdefine("runtime.esymtab", obj.SRODATA, 0)
-
-	// garbage collection symbols
-	s := ctxt.Syms.Lookup("runtime.gcdata", 0)
-
-	s.Type = obj.SRODATA
-	s.Size = 0
-	s.Attr |= AttrReachable
-	ctxt.xdefine("runtime.egcdata", obj.SRODATA, 0)
-
-	s = ctxt.Syms.Lookup("runtime.gcbss", 0)
-	s.Type = obj.SRODATA
-	s.Size = 0
-	s.Attr |= AttrReachable
-	ctxt.xdefine("runtime.egcbss", obj.SRODATA, 0)
-
-	// pseudo-symbols to mark locations of type, string, and go string data.
-	var symtype *Symbol
-	var symtyperel *Symbol
-	if UseRelro() && (Buildmode == BuildmodeCArchive || Buildmode == BuildmodeCShared || Buildmode == BuildmodePIE) {
-		s = ctxt.Syms.Lookup("type.*", 0)
-
-		s.Type = obj.STYPE
-		s.Size = 0
-		s.Attr |= AttrReachable
-		symtype = s
-
-		s = ctxt.Syms.Lookup("typerel.*", 0)
-
-		s.Type = obj.STYPERELRO
-		s.Size = 0
-		s.Attr |= AttrReachable
-		symtyperel = s
-	} else if !ctxt.DynlinkingGo() {
-		s = ctxt.Syms.Lookup("type.*", 0)
-
-		s.Type = obj.STYPE
-		s.Size = 0
-		s.Attr |= AttrReachable
-		symtype = s
-		symtyperel = s
-	}
-
-	groupSym := func(name string, t obj.SymKind) *Symbol {
-		s := ctxt.Syms.Lookup(name, 0)
-		s.Type = t
-		s.Size = 0
-		s.Attr |= AttrLocal | AttrReachable
-		return s
-	}
-	var (
-		symgostring = groupSym("go.string.*", obj.SGOSTRING)
-		symgofunc   = groupSym("go.func.*", obj.SGOFUNC)
-		symgcbits   = groupSym("runtime.gcbits.*", obj.SGCBITS)
-	)
-
-	var symgofuncrel *Symbol
-	if !ctxt.DynlinkingGo() {
-		if UseRelro() {
-			symgofuncrel = groupSym("go.funcrel.*", obj.SGOFUNCRELRO)
-		} else {
-			symgofuncrel = symgofunc
-		}
-	}
-
-	symitablink := ctxt.Syms.Lookup("runtime.itablink", 0)
-	symitablink.Type = obj.SITABLINK
-
-	symt = ctxt.Syms.Lookup("runtime.symtab", 0)
-	symt.Attr |= AttrLocal
-	symt.Type = obj.SSYMTAB
-	symt.Size = 0
-	symt.Attr |= AttrReachable
-
-	nitablinks := 0
-
-	// assign specific types so that they sort together.
-	// within a type they sort by size, so the .* symbols
-	// just defined above will be first.
-	// hide the specific symbols.
-	for _, s := range ctxt.Syms.Allsym {
-		if !s.Attr.Reachable() || s.Attr.Special() || s.Type != obj.SRODATA {
-			continue
-		}
-
-		switch {
-		case strings.HasPrefix(s.Name, "type."):
-			if !ctxt.DynlinkingGo() {
-				s.Attr |= AttrHidden
-			}
-			if UseRelro() {
-				s.Type = obj.STYPERELRO
-				s.Outer = symtyperel
-			} else {
-				s.Type = obj.STYPE
-				s.Outer = symtype
-			}
-
-		case strings.HasPrefix(s.Name, "go.importpath.") && UseRelro():
-			// Keep go.importpath symbols in the same section as types and
-			// names, as they can be referred to by a section offset.
-			s.Type = obj.STYPERELRO
-
-		case strings.HasPrefix(s.Name, "go.itablink."):
-			nitablinks++
-			s.Type = obj.SITABLINK
-			s.Attr |= AttrHidden
-			s.Outer = symitablink
-
-		case strings.HasPrefix(s.Name, "go.string."):
-			s.Type = obj.SGOSTRING
-			s.Attr |= AttrHidden
-			s.Outer = symgostring
-
-		case strings.HasPrefix(s.Name, "runtime.gcbits."):
-			s.Type = obj.SGCBITS
-			s.Attr |= AttrHidden
-			s.Outer = symgcbits
-
-		case strings.HasSuffix(s.Name, "·f"):
-			if !ctxt.DynlinkingGo() {
-				s.Attr |= AttrHidden
-			}
-			if UseRelro() {
-				s.Type = obj.SGOFUNCRELRO
-				s.Outer = symgofuncrel
-			} else {
-				s.Type = obj.SGOFUNC
-				s.Outer = symgofunc
-			}
-
-		case strings.HasPrefix(s.Name, "gcargs."), strings.HasPrefix(s.Name, "gclocals."), strings.HasPrefix(s.Name, "gclocals·"):
-			s.Type = obj.SGOFUNC
-			s.Attr |= AttrHidden
-			s.Outer = symgofunc
-			s.Align = 4
-			liveness += (s.Size + int64(s.Align) - 1) &^ (int64(s.Align) - 1)
-		}
-	}
-
-	if Buildmode == BuildmodeShared {
-		abihashgostr := ctxt.Syms.Lookup("go.link.abihash."+filepath.Base(*flagOutfile), 0)
-		abihashgostr.Attr |= AttrReachable
-		abihashgostr.Type = obj.SRODATA
-		hashsym := ctxt.Syms.Lookup("go.link.abihashbytes", 0)
-		Addaddr(ctxt, abihashgostr, hashsym)
-		adduint(ctxt, abihashgostr, uint64(hashsym.Size))
-	}
-	if Buildmode == BuildmodePlugin || ctxt.Syms.ROLookup("plugin.Open", 0) != nil {
-		for _, l := range ctxt.Library {
-			s := ctxt.Syms.Lookup("go.link.pkghashbytes."+l.Pkg, 0)
-			s.Attr |= AttrReachable
-			s.Type = obj.SRODATA
-			s.Size = int64(len(l.hash))
-			s.P = []byte(l.hash)
-			str := ctxt.Syms.Lookup("go.link.pkghash."+l.Pkg, 0)
-			str.Attr |= AttrReachable
-			str.Type = obj.SRODATA
-			Addaddr(ctxt, str, s)
-			adduint(ctxt, str, uint64(len(l.hash)))
-		}
-	}
-
-	nsections := textsectionmap(ctxt)
-
-	// Information about the layout of the executable image for the
-	// runtime to use. Any changes here must be matched by changes to
-	// the definition of moduledata in runtime/symtab.go.
-	// This code uses several global variables that are set by pcln.go:pclntab.
-	moduledata := ctxt.Moduledata
-	// The pclntab slice
-	Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.pclntab", 0))
-	adduint(ctxt, moduledata, uint64(ctxt.Syms.Lookup("runtime.pclntab", 0).Size))
-	adduint(ctxt, moduledata, uint64(ctxt.Syms.Lookup("runtime.pclntab", 0).Size))
-	// The ftab slice
-	Addaddrplus(ctxt, moduledata, ctxt.Syms.Lookup("runtime.pclntab", 0), int64(pclntabPclntabOffset))
-	adduint(ctxt, moduledata, uint64(pclntabNfunc+1))
-	adduint(ctxt, moduledata, uint64(pclntabNfunc+1))
-	// The filetab slice
-	Addaddrplus(ctxt, moduledata, ctxt.Syms.Lookup("runtime.pclntab", 0), int64(pclntabFiletabOffset))
-	adduint(ctxt, moduledata, uint64(len(ctxt.Filesyms))+1)
-	adduint(ctxt, moduledata, uint64(len(ctxt.Filesyms))+1)
-	// findfunctab
-	Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.findfunctab", 0))
-	// minpc, maxpc
-	Addaddr(ctxt, moduledata, pclntabFirstFunc)
-	Addaddrplus(ctxt, moduledata, pclntabLastFunc, pclntabLastFunc.Size)
-	// pointers to specific parts of the module
-	Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.text", 0))
-	Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.etext", 0))
-	Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.noptrdata", 0))
-	Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.enoptrdata", 0))
-	Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.data", 0))
-	Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.edata", 0))
-	Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.bss", 0))
-	Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.ebss", 0))
-	Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.noptrbss", 0))
-	Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.enoptrbss", 0))
-	Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.end", 0))
-	Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.gcdata", 0))
-	Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.gcbss", 0))
-	Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.types", 0))
-	Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.etypes", 0))
-
-	// text section information
-	Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.textsectionmap", 0))
-	adduint(ctxt, moduledata, uint64(nsections))
-	adduint(ctxt, moduledata, uint64(nsections))
-
-	// The typelinks slice
-	typelinkSym := ctxt.Syms.Lookup("runtime.typelink", 0)
-	ntypelinks := uint64(typelinkSym.Size) / 4
-	Addaddr(ctxt, moduledata, typelinkSym)
-	adduint(ctxt, moduledata, ntypelinks)
-	adduint(ctxt, moduledata, ntypelinks)
-	// The itablinks slice
-	Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.itablink", 0))
-	adduint(ctxt, moduledata, uint64(nitablinks))
-	adduint(ctxt, moduledata, uint64(nitablinks))
-	// The ptab slice
-	if ptab := ctxt.Syms.ROLookup("go.plugin.tabs", 0); ptab != nil && ptab.Attr.Reachable() {
-		ptab.Attr |= AttrLocal
-		ptab.Type = obj.SRODATA
-
-		nentries := uint64(len(ptab.P) / 8) // sizeof(nameOff) + sizeof(typeOff)
-		Addaddr(ctxt, moduledata, ptab)
-		adduint(ctxt, moduledata, nentries)
-		adduint(ctxt, moduledata, nentries)
-	} else {
-		adduint(ctxt, moduledata, 0)
-		adduint(ctxt, moduledata, 0)
-		adduint(ctxt, moduledata, 0)
-	}
-	if Buildmode == BuildmodePlugin {
-		addgostring(ctxt, moduledata, "go.link.thispluginpath", *flagPluginPath)
-
-		pkghashes := ctxt.Syms.Lookup("go.link.pkghashes", 0)
-		pkghashes.Attr |= AttrReachable
-		pkghashes.Attr |= AttrLocal
-		pkghashes.Type = obj.SRODATA
-
-		for i, l := range ctxt.Library {
-			// pkghashes[i].name
-			addgostring(ctxt, pkghashes, fmt.Sprintf("go.link.pkgname.%d", i), l.Pkg)
-			// pkghashes[i].linktimehash
-			addgostring(ctxt, pkghashes, fmt.Sprintf("go.link.pkglinkhash.%d", i), string(l.hash))
-			// pkghashes[i].runtimehash
-			hash := ctxt.Syms.ROLookup("go.link.pkghash."+l.Pkg, 0)
-			Addaddr(ctxt, pkghashes, hash)
-		}
-		Addaddr(ctxt, moduledata, pkghashes)
-		adduint(ctxt, moduledata, uint64(len(ctxt.Library)))
-		adduint(ctxt, moduledata, uint64(len(ctxt.Library)))
-	} else {
-		adduint(ctxt, moduledata, 0) // pluginpath
-		adduint(ctxt, moduledata, 0)
-		adduint(ctxt, moduledata, 0) // pkghashes slice
-		adduint(ctxt, moduledata, 0)
-		adduint(ctxt, moduledata, 0)
-	}
-	if len(ctxt.Shlibs) > 0 {
-		thismodulename := filepath.Base(*flagOutfile)
-		switch Buildmode {
-		case BuildmodeExe, BuildmodePIE:
-			// When linking an executable, outfile is just "a.out". Make
-			// it something slightly more comprehensible.
-			thismodulename = "the executable"
-		}
-		addgostring(ctxt, moduledata, "go.link.thismodulename", thismodulename)
-
-		modulehashes := ctxt.Syms.Lookup("go.link.abihashes", 0)
-		modulehashes.Attr |= AttrReachable
-		modulehashes.Attr |= AttrLocal
-		modulehashes.Type = obj.SRODATA
-
-		for i, shlib := range ctxt.Shlibs {
-			// modulehashes[i].modulename
-			modulename := filepath.Base(shlib.Path)
-			addgostring(ctxt, modulehashes, fmt.Sprintf("go.link.libname.%d", i), modulename)
-
-			// modulehashes[i].linktimehash
-			addgostring(ctxt, modulehashes, fmt.Sprintf("go.link.linkhash.%d", i), string(shlib.Hash))
-
-			// modulehashes[i].runtimehash
-			abihash := ctxt.Syms.Lookup("go.link.abihash."+modulename, 0)
-			abihash.Attr |= AttrReachable
-			Addaddr(ctxt, modulehashes, abihash)
-		}
-
-		Addaddr(ctxt, moduledata, modulehashes)
-		adduint(ctxt, moduledata, uint64(len(ctxt.Shlibs)))
-		adduint(ctxt, moduledata, uint64(len(ctxt.Shlibs)))
-	}
-
-	// The rest of moduledata is zero initialized.
-	// When linking an object that does not contain the runtime we are
-	// creating the moduledata from scratch and it does not have a
-	// compiler-provided size, so read it from the type data.
-	moduledatatype := ctxt.Syms.ROLookup("type.runtime.moduledata", 0)
-	moduledata.Size = decodetypeSize(ctxt.Arch, moduledatatype)
-	Symgrow(moduledata, moduledata.Size)
-
-	lastmoduledatap := ctxt.Syms.Lookup("runtime.lastmoduledatap", 0)
-	if lastmoduledatap.Type != obj.SDYNIMPORT {
-		lastmoduledatap.Type = obj.SNOPTRDATA
-		lastmoduledatap.Size = 0 // overwrite existing value
-		Addaddr(ctxt, lastmoduledatap, moduledata)
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/typelink.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/typelink.go
deleted file mode 100644
index e3f18b0..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/typelink.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/typelink.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/typelink.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ld
-
-import (
-	"sort"
-
-	"bootstrap/cmd/internal/obj"
-)
-
-type byTypeStr []typelinkSortKey
-
-type typelinkSortKey struct {
-	TypeStr string
-	Type    *Symbol
-}
-
-func (s byTypeStr) Less(i, j int) bool { return s[i].TypeStr < s[j].TypeStr }
-func (s byTypeStr) Len() int           { return len(s) }
-func (s byTypeStr) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
-
-// typelink generates the typelink table which is used by reflect.typelinks().
-// Types that should be added to the typelinks table are marked with the
-// MakeTypelink attribute by the compiler.
-func (ctxt *Link) typelink() {
-	typelinks := byTypeStr{}
-	for _, s := range ctxt.Syms.Allsym {
-		if s.Attr.Reachable() && s.Attr.MakeTypelink() {
-			typelinks = append(typelinks, typelinkSortKey{decodetypeStr(s), s})
-		}
-	}
-	sort.Sort(typelinks)
-
-	tl := ctxt.Syms.Lookup("runtime.typelink", 0)
-	tl.Type = obj.STYPELINK
-	tl.Attr |= AttrReachable | AttrLocal
-	tl.Size = int64(4 * len(typelinks))
-	tl.P = make([]byte, tl.Size)
-	tl.R = make([]Reloc, len(typelinks))
-	for i, s := range typelinks {
-		r := &tl.R[i]
-		r.Sym = s.Type
-		r.Off = int32(i * 4)
-		r.Siz = 4
-		r.Type = obj.R_ADDROFF
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/util.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/util.go
deleted file mode 100644
index 1dbf5f8..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ld/util.go
+++ /dev/null
@@ -1,140 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/util.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ld/util.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ld
-
-import (
-	"bytes"
-	"encoding/binary"
-	"fmt"
-	"os"
-	"strings"
-	"time"
-)
-
-func cstring(x []byte) string {
-	i := bytes.IndexByte(x, '\x00')
-	if i >= 0 {
-		x = x[:i]
-	}
-	return string(x)
-}
-
-func tokenize(s string) []string {
-	var f []string
-	for {
-		s = strings.TrimLeft(s, " \t\r\n")
-		if s == "" {
-			break
-		}
-		quote := false
-		i := 0
-		for ; i < len(s); i++ {
-			if s[i] == '\'' {
-				if quote && i+1 < len(s) && s[i+1] == '\'' {
-					i++
-					continue
-				}
-				quote = !quote
-			}
-			if !quote && (s[i] == ' ' || s[i] == '\t' || s[i] == '\r' || s[i] == '\n') {
-				break
-			}
-		}
-		next := s[:i]
-		s = s[i:]
-		if strings.Contains(next, "'") {
-			var buf []byte
-			quote := false
-			for i := 0; i < len(next); i++ {
-				if next[i] == '\'' {
-					if quote && i+1 < len(next) && next[i+1] == '\'' {
-						i++
-						buf = append(buf, '\'')
-					}
-					quote = !quote
-					continue
-				}
-				buf = append(buf, next[i])
-			}
-			next = string(buf)
-		}
-		f = append(f, next)
-	}
-	return f
-}
-
-var atExitFuncs []func()
-
-func AtExit(f func()) {
-	atExitFuncs = append(atExitFuncs, f)
-}
-
-// Exit exits with code after executing all atExitFuncs.
-func Exit(code int) {
-	for i := len(atExitFuncs) - 1; i >= 0; i-- {
-		atExitFuncs[i]()
-	}
-	os.Exit(code)
-}
-
-// Exitf logs an error message then calls Exit(2).
-func Exitf(format string, a ...interface{}) {
-	fmt.Fprintf(os.Stderr, os.Args[0]+": "+format+"\n", a...)
-	if coutbuf.f != nil {
-		coutbuf.f.Close()
-		mayberemoveoutfile()
-	}
-	Exit(2)
-}
-
-// Errorf logs an error message.
-//
-// If more than 20 errors have been printed, exit with an error.
-//
-// Logging an error means that on exit cmd/link will delete any
-// output file and return a non-zero error code.
-func Errorf(s *Symbol, format string, args ...interface{}) {
-	if s != nil {
-		format = s.Name + ": " + format
-	}
-	format += "\n"
-	fmt.Fprintf(os.Stderr, format, args...)
-	nerrors++
-	if *flagH {
-		panic("error")
-	}
-	if nerrors > 20 {
-		Exitf("too many errors")
-	}
-}
-
-func artrim(x []byte) string {
-	i := 0
-	j := len(x)
-	for i < len(x) && x[i] == ' ' {
-		i++
-	}
-	for j > i && x[j-1] == ' ' {
-		j--
-	}
-	return string(x[i:j])
-}
-
-func stringtouint32(x []uint32, s string) {
-	for i := 0; len(s) > 0; i++ {
-		var buf [4]byte
-		s = s[copy(buf[:], s):]
-		x[i] = binary.LittleEndian.Uint32(buf[:])
-	}
-}
-
-var start = time.Now()
-
-func elapsed() float64 {
-	return time.Since(start).Seconds()
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/mips/asm.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/mips/asm.go
deleted file mode 100644
index 8aaf190..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/mips/asm.go
+++ /dev/null
@@ -1,271 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/mips/asm.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/mips/asm.go:1
-// Inferno utils/5l/asm.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/5l/asm.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2016 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package mips
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/link/internal/ld"
-	"fmt"
-	"log"
-)
-
-func gentext(ctxt *ld.Link) {
-	return
-}
-
-func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool {
-	log.Fatalf("adddynrel not implemented")
-	return false
-}
-
-func elfreloc1(ctxt *ld.Link, r *ld.Reloc, sectoff int64) int {
-	ld.Thearch.Lput(uint32(sectoff))
-
-	elfsym := r.Xsym.ElfsymForReloc()
-	switch r.Type {
-	default:
-		return -1
-
-	case obj.R_ADDR:
-		if r.Siz != 4 {
-			return -1
-		}
-		ld.Thearch.Lput(ld.R_MIPS_32 | uint32(elfsym)<<8)
-
-	case obj.R_ADDRMIPS:
-		ld.Thearch.Lput(ld.R_MIPS_LO16 | uint32(elfsym)<<8)
-
-	case obj.R_ADDRMIPSU:
-		ld.Thearch.Lput(ld.R_MIPS_HI16 | uint32(elfsym)<<8)
-
-	case obj.R_ADDRMIPSTLS:
-		ld.Thearch.Lput(ld.R_MIPS_TLS_TPREL_LO16 | uint32(elfsym)<<8)
-
-	case obj.R_CALLMIPS, obj.R_JMPMIPS:
-		ld.Thearch.Lput(ld.R_MIPS_26 | uint32(elfsym)<<8)
-	}
-
-	return 0
-}
-
-func elfsetupplt(ctxt *ld.Link) {
-	return
-}
-
-func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int {
-	return -1
-}
-
-func applyrel(r *ld.Reloc, s *ld.Symbol, val *int64, t int64) {
-	o := ld.SysArch.ByteOrder.Uint32(s.P[r.Off:])
-	switch r.Type {
-	case obj.R_ADDRMIPS, obj.R_ADDRMIPSTLS:
-		*val = int64(o&0xffff0000 | uint32(t)&0xffff)
-	case obj.R_ADDRMIPSU:
-		*val = int64(o&0xffff0000 | uint32((t+(1<<15))>>16)&0xffff)
-	case obj.R_CALLMIPS, obj.R_JMPMIPS:
-		*val = int64(o&0xfc000000 | uint32(t>>2)&^0xfc000000)
-	}
-}
-
-func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int {
-	if ld.Linkmode == ld.LinkExternal {
-		switch r.Type {
-		default:
-			return -1
-
-		case obj.R_ADDRMIPS, obj.R_ADDRMIPSU:
-
-			r.Done = 0
-
-			// set up addend for eventual relocation via outer symbol.
-			rs := r.Sym
-			r.Xadd = r.Add
-			for rs.Outer != nil {
-				r.Xadd += ld.Symaddr(rs) - ld.Symaddr(rs.Outer)
-				rs = rs.Outer
-			}
-
-			if rs.Type != obj.SHOSTOBJ && rs.Type != obj.SDYNIMPORT && rs.Sect == nil {
-				ld.Errorf(s, "missing section for %s", rs.Name)
-			}
-			r.Xsym = rs
-			applyrel(r, s, val, r.Xadd)
-			return 0
-
-		case obj.R_ADDRMIPSTLS, obj.R_CALLMIPS, obj.R_JMPMIPS:
-			r.Done = 0
-			r.Xsym = r.Sym
-			r.Xadd = r.Add
-			applyrel(r, s, val, r.Add)
-			return 0
-		}
-	}
-
-	switch r.Type {
-	case obj.R_CONST:
-		*val = r.Add
-		return 0
-
-	case obj.R_GOTOFF:
-		*val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0))
-		return 0
-
-	case obj.R_ADDRMIPS, obj.R_ADDRMIPSU:
-		t := ld.Symaddr(r.Sym) + r.Add
-		applyrel(r, s, val, t)
-		return 0
-
-	case obj.R_CALLMIPS, obj.R_JMPMIPS:
-		t := ld.Symaddr(r.Sym) + r.Add
-
-		if t&3 != 0 {
-			ld.Errorf(s, "direct call is not aligned: %s %x", r.Sym.Name, t)
-		}
-
-		// check if target address is in the same 256 MB region as the next instruction
-		if (s.Value+int64(r.Off)+4)&0xf0000000 != (t & 0xf0000000) {
-			ld.Errorf(s, "direct call too far: %s %x", r.Sym.Name, t)
-		}
-
-		applyrel(r, s, val, t)
-		return 0
-
-	case obj.R_ADDRMIPSTLS:
-		// thread pointer is at 0x7000 offset from the start of TLS data area
-		t := ld.Symaddr(r.Sym) + r.Add - 0x7000
-		if t < -32768 || t >= 32678 {
-			ld.Errorf(s, "TLS offset out of range %d", t)
-		}
-		applyrel(r, s, val, t)
-		return 0
-	}
-
-	return -1
-}
-
-func archrelocvariant(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, t int64) int64 {
-	return -1
-}
-
-func asmb(ctxt *ld.Link) {
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f asmb\n", obj.Cputime())
-	}
-
-	if ld.Iself {
-		ld.Asmbelfsetup()
-	}
-
-	sect := ld.Segtext.Sect
-	ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
-	ld.Codeblk(ctxt, int64(sect.Vaddr), int64(sect.Length))
-	for sect = sect.Next; sect != nil; sect = sect.Next {
-		ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
-		ld.Datblk(ctxt, int64(sect.Vaddr), int64(sect.Length))
-	}
-
-	if ld.Segrodata.Filelen > 0 {
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f rodatblk\n", obj.Cputime())
-		}
-
-		ld.Cseek(int64(ld.Segrodata.Fileoff))
-		ld.Datblk(ctxt, int64(ld.Segrodata.Vaddr), int64(ld.Segrodata.Filelen))
-	}
-
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f datblk\n", obj.Cputime())
-	}
-
-	ld.Cseek(int64(ld.Segdata.Fileoff))
-	ld.Datblk(ctxt, int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen))
-
-	ld.Cseek(int64(ld.Segdwarf.Fileoff))
-	ld.Dwarfblk(ctxt, int64(ld.Segdwarf.Vaddr), int64(ld.Segdwarf.Filelen))
-
-	/* output symbol table */
-	ld.Symsize = 0
-
-	ld.Lcsize = 0
-	symo := uint32(0)
-	if !*ld.FlagS {
-		if !ld.Iself {
-			ld.Errorf(nil, "unsupported executable format")
-		}
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f sym\n", obj.Cputime())
-		}
-		symo = uint32(ld.Segdwarf.Fileoff + ld.Segdwarf.Filelen)
-		symo = uint32(ld.Rnd(int64(symo), int64(*ld.FlagRound)))
-
-		ld.Cseek(int64(symo))
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f elfsym\n", obj.Cputime())
-		}
-		ld.Asmelfsym(ctxt)
-		ld.Cflush()
-		ld.Cwrite(ld.Elfstrdat)
-
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f dwarf\n", obj.Cputime())
-		}
-
-		if ld.Linkmode == ld.LinkExternal {
-			ld.Elfemitreloc(ctxt)
-		}
-	}
-
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f header\n", obj.Cputime())
-	}
-
-	ld.Cseek(0)
-	switch ld.Headtype {
-	default:
-		ld.Errorf(nil, "unsupported operating system")
-	case obj.Hlinux:
-		ld.Asmbelf(ctxt, int64(symo))
-	}
-
-	ld.Cflush()
-	if *ld.FlagC {
-		fmt.Printf("textsize=%d\n", ld.Segtext.Filelen)
-		fmt.Printf("datsize=%d\n", ld.Segdata.Filelen)
-		fmt.Printf("bsssize=%d\n", ld.Segdata.Length-ld.Segdata.Filelen)
-		fmt.Printf("symsize=%d\n", ld.Symsize)
-		fmt.Printf("lcsize=%d\n", ld.Lcsize)
-		fmt.Printf("total=%d\n", ld.Segtext.Filelen+ld.Segdata.Length+uint64(ld.Symsize)+uint64(ld.Lcsize))
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/mips/l.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/mips/l.go
deleted file mode 100644
index 5678bca..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/mips/l.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/mips/l.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/mips/l.go:1
-// Inferno utils/5l/asm.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/5l/asm.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2016 The Go Authors.  All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package mips
-
-// Writing object files.
-
-// cmd/9l/l.h from Vita Nuova.
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
-//	Portions Copyright © 2016 The Go Authors.  All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-const (
-	MaxAlign  = 32 // max data alignment
-	MinAlign  = 1  // min data alignment
-	FuncAlign = 4
-)
-
-/* Used by ../internal/ld/dwarf.go */
-const (
-	DWARFREGSP = 29
-	DWARFREGLR = 31
-)
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/mips/obj.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/mips/obj.go
deleted file mode 100644
index 52cb376..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/mips/obj.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/mips/obj.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/mips/obj.go:1
-// Inferno utils/5l/obj.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/5l/obj.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2016 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package mips
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"bootstrap/cmd/link/internal/ld"
-	"fmt"
-)
-
-// Reading object files.
-
-func Init() {
-	if obj.GOARCH == "mipsle" {
-		ld.SysArch = sys.ArchMIPSLE
-	} else {
-		ld.SysArch = sys.ArchMIPS
-	}
-
-	ld.Thearch.Funcalign = FuncAlign
-	ld.Thearch.Maxalign = MaxAlign
-	ld.Thearch.Minalign = MinAlign
-	ld.Thearch.Dwarfregsp = DWARFREGSP
-	ld.Thearch.Dwarfreglr = DWARFREGLR
-
-	ld.Thearch.Adddynrel = adddynrel
-	ld.Thearch.Archinit = archinit
-	ld.Thearch.Archreloc = archreloc
-	ld.Thearch.Archrelocvariant = archrelocvariant
-	ld.Thearch.Asmb = asmb
-	ld.Thearch.Elfreloc1 = elfreloc1
-	ld.Thearch.Elfsetupplt = elfsetupplt
-	ld.Thearch.Gentext = gentext
-	ld.Thearch.Machoreloc1 = machoreloc1
-	if ld.SysArch == sys.ArchMIPSLE {
-		ld.Thearch.Lput = ld.Lputl
-		ld.Thearch.Wput = ld.Wputl
-		ld.Thearch.Vput = ld.Vputl
-		ld.Thearch.Append16 = ld.Append16l
-		ld.Thearch.Append32 = ld.Append32l
-		ld.Thearch.Append64 = ld.Append64l
-	} else {
-		ld.Thearch.Lput = ld.Lputb
-		ld.Thearch.Wput = ld.Wputb
-		ld.Thearch.Vput = ld.Vputb
-		ld.Thearch.Append16 = ld.Append16b
-		ld.Thearch.Append32 = ld.Append32b
-		ld.Thearch.Append64 = ld.Append64b
-	}
-
-	ld.Thearch.Linuxdynld = "/lib/ld.so.1"
-
-	ld.Thearch.Freebsddynld = "XXX"
-	ld.Thearch.Openbsddynld = "XXX"
-	ld.Thearch.Netbsddynld = "XXX"
-	ld.Thearch.Dragonflydynld = "XXX"
-	ld.Thearch.Solarisdynld = "XXX"
-}
-
-func archinit(ctxt *ld.Link) {
-	switch ld.Headtype {
-	default:
-		ld.Exitf("unknown -H option: %v", ld.Headtype)
-	case obj.Hlinux: /* mips elf */
-		ld.Elfinit(ctxt)
-		ld.HEADR = ld.ELFRESERVE
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 0x10000 + int64(ld.HEADR)
-		}
-		if *ld.FlagDataAddr == -1 {
-			*ld.FlagDataAddr = 0
-		}
-		if *ld.FlagRound == -1 {
-			*ld.FlagRound = 0x10000
-		}
-	}
-
-	if *ld.FlagDataAddr != 0 && *ld.FlagRound != 0 {
-		fmt.Printf("warning: -D0x%x is ignored because of -R0x%x\n", uint64(*ld.FlagDataAddr), uint32(*ld.FlagRound))
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/mips64/asm.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/mips64/asm.go
deleted file mode 100644
index e71f948..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/mips64/asm.go
+++ /dev/null
@@ -1,321 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/mips64/asm.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/mips64/asm.go:1
-// Inferno utils/5l/asm.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/5l/asm.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package mips64
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"bootstrap/cmd/link/internal/ld"
-	"fmt"
-	"log"
-)
-
-func gentext(ctxt *ld.Link) {}
-
-func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool {
-	log.Fatalf("adddynrel not implemented")
-	return false
-}
-
-func elfreloc1(ctxt *ld.Link, r *ld.Reloc, sectoff int64) int {
-	// mips64 ELF relocation (endian neutral)
-	//		offset	uint64
-	//		sym		uint32
-	//		ssym	uint8
-	//		type3	uint8
-	//		type2	uint8
-	//		type	uint8
-	//		addend	int64
-
-	ld.Thearch.Vput(uint64(sectoff))
-
-	elfsym := r.Xsym.ElfsymForReloc()
-	ld.Thearch.Lput(uint32(elfsym))
-	ld.Cput(0)
-	ld.Cput(0)
-	ld.Cput(0)
-	switch r.Type {
-	default:
-		return -1
-
-	case obj.R_ADDR:
-		switch r.Siz {
-		case 4:
-			ld.Cput(ld.R_MIPS_32)
-		case 8:
-			ld.Cput(ld.R_MIPS_64)
-		default:
-			return -1
-		}
-
-	case obj.R_ADDRMIPS:
-		ld.Cput(ld.R_MIPS_LO16)
-
-	case obj.R_ADDRMIPSU:
-		ld.Cput(ld.R_MIPS_HI16)
-
-	case obj.R_ADDRMIPSTLS:
-		ld.Cput(ld.R_MIPS_TLS_TPREL_LO16)
-
-	case obj.R_CALLMIPS,
-		obj.R_JMPMIPS:
-		ld.Cput(ld.R_MIPS_26)
-	}
-	ld.Thearch.Vput(uint64(r.Xadd))
-
-	return 0
-}
-
-func elfsetupplt(ctxt *ld.Link) {
-	return
-}
-
-func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int {
-	return -1
-}
-
-func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int {
-	if ld.Linkmode == ld.LinkExternal {
-		switch r.Type {
-		default:
-			return -1
-
-		case obj.R_ADDRMIPS,
-			obj.R_ADDRMIPSU:
-			r.Done = 0
-
-			// set up addend for eventual relocation via outer symbol.
-			rs := r.Sym
-			r.Xadd = r.Add
-			for rs.Outer != nil {
-				r.Xadd += ld.Symaddr(rs) - ld.Symaddr(rs.Outer)
-				rs = rs.Outer
-			}
-
-			if rs.Type != obj.SHOSTOBJ && rs.Type != obj.SDYNIMPORT && rs.Sect == nil {
-				ld.Errorf(s, "missing section for %s", rs.Name)
-			}
-			r.Xsym = rs
-
-			return 0
-
-		case obj.R_ADDRMIPSTLS,
-			obj.R_CALLMIPS,
-			obj.R_JMPMIPS:
-			r.Done = 0
-			r.Xsym = r.Sym
-			r.Xadd = r.Add
-			return 0
-		}
-	}
-
-	switch r.Type {
-	case obj.R_CONST:
-		*val = r.Add
-		return 0
-
-	case obj.R_GOTOFF:
-		*val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0))
-		return 0
-
-	case obj.R_ADDRMIPS,
-		obj.R_ADDRMIPSU:
-		t := ld.Symaddr(r.Sym) + r.Add
-		o1 := ld.SysArch.ByteOrder.Uint32(s.P[r.Off:])
-		if r.Type == obj.R_ADDRMIPS {
-			*val = int64(o1&0xffff0000 | uint32(t)&0xffff)
-		} else {
-			*val = int64(o1&0xffff0000 | uint32((t+1<<15)>>16)&0xffff)
-		}
-		return 0
-
-	case obj.R_ADDRMIPSTLS:
-		// thread pointer is at 0x7000 offset from the start of TLS data area
-		t := ld.Symaddr(r.Sym) + r.Add - 0x7000
-		if t < -32768 || t >= 32678 {
-			ld.Errorf(s, "TLS offset out of range %d", t)
-		}
-		o1 := ld.SysArch.ByteOrder.Uint32(s.P[r.Off:])
-		*val = int64(o1&0xffff0000 | uint32(t)&0xffff)
-		return 0
-
-	case obj.R_CALLMIPS,
-		obj.R_JMPMIPS:
-		// Low 26 bits = (S + A) >> 2
-		t := ld.Symaddr(r.Sym) + r.Add
-		o1 := ld.SysArch.ByteOrder.Uint32(s.P[r.Off:])
-		*val = int64(o1&0xfc000000 | uint32(t>>2)&^0xfc000000)
-		return 0
-	}
-
-	return -1
-}
-
-func archrelocvariant(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, t int64) int64 {
-	return -1
-}
-
-func asmb(ctxt *ld.Link) {
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f asmb\n", obj.Cputime())
-	}
-
-	if ld.Iself {
-		ld.Asmbelfsetup()
-	}
-
-	sect := ld.Segtext.Sect
-	ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
-	ld.Codeblk(ctxt, int64(sect.Vaddr), int64(sect.Length))
-	for sect = sect.Next; sect != nil; sect = sect.Next {
-		ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
-		ld.Datblk(ctxt, int64(sect.Vaddr), int64(sect.Length))
-	}
-
-	if ld.Segrodata.Filelen > 0 {
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f rodatblk\n", obj.Cputime())
-		}
-		ld.Cseek(int64(ld.Segrodata.Fileoff))
-		ld.Datblk(ctxt, int64(ld.Segrodata.Vaddr), int64(ld.Segrodata.Filelen))
-	}
-	if ld.Segrelrodata.Filelen > 0 {
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f rodatblk\n", obj.Cputime())
-		}
-		ld.Cseek(int64(ld.Segrelrodata.Fileoff))
-		ld.Datblk(ctxt, int64(ld.Segrelrodata.Vaddr), int64(ld.Segrelrodata.Filelen))
-	}
-
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f datblk\n", obj.Cputime())
-	}
-
-	ld.Cseek(int64(ld.Segdata.Fileoff))
-	ld.Datblk(ctxt, int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen))
-
-	ld.Cseek(int64(ld.Segdwarf.Fileoff))
-	ld.Dwarfblk(ctxt, int64(ld.Segdwarf.Vaddr), int64(ld.Segdwarf.Filelen))
-
-	/* output symbol table */
-	ld.Symsize = 0
-
-	ld.Lcsize = 0
-	symo := uint32(0)
-	if !*ld.FlagS {
-		// TODO: rationalize
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f sym\n", obj.Cputime())
-		}
-		switch ld.Headtype {
-		default:
-			if ld.Iself {
-				symo = uint32(ld.Segdwarf.Fileoff + ld.Segdwarf.Filelen)
-				symo = uint32(ld.Rnd(int64(symo), int64(*ld.FlagRound)))
-			}
-
-		case obj.Hplan9:
-			symo = uint32(ld.Segdata.Fileoff + ld.Segdata.Filelen)
-		}
-
-		ld.Cseek(int64(symo))
-		switch ld.Headtype {
-		default:
-			if ld.Iself {
-				if ctxt.Debugvlog != 0 {
-					ctxt.Logf("%5.2f elfsym\n", obj.Cputime())
-				}
-				ld.Asmelfsym(ctxt)
-				ld.Cflush()
-				ld.Cwrite(ld.Elfstrdat)
-
-				if ld.Linkmode == ld.LinkExternal {
-					ld.Elfemitreloc(ctxt)
-				}
-			}
-
-		case obj.Hplan9:
-			ld.Asmplan9sym(ctxt)
-			ld.Cflush()
-
-			sym := ctxt.Syms.Lookup("pclntab", 0)
-			if sym != nil {
-				ld.Lcsize = int32(len(sym.P))
-				for i := 0; int32(i) < ld.Lcsize; i++ {
-					ld.Cput(sym.P[i])
-				}
-
-				ld.Cflush()
-			}
-		}
-	}
-
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f header\n", obj.Cputime())
-	}
-	ld.Cseek(0)
-	switch ld.Headtype {
-	default:
-	case obj.Hplan9: /* plan 9 */
-		magic := uint32(4*18*18 + 7)
-		if ld.SysArch == sys.ArchMIPS64LE {
-			magic = uint32(4*26*26 + 7)
-		}
-		ld.Thearch.Lput(magic)                      /* magic */
-		ld.Thearch.Lput(uint32(ld.Segtext.Filelen)) /* sizes */
-		ld.Thearch.Lput(uint32(ld.Segdata.Filelen))
-		ld.Thearch.Lput(uint32(ld.Segdata.Length - ld.Segdata.Filelen))
-		ld.Thearch.Lput(uint32(ld.Symsize))          /* nsyms */
-		ld.Thearch.Lput(uint32(ld.Entryvalue(ctxt))) /* va of entry */
-		ld.Thearch.Lput(0)
-		ld.Thearch.Lput(uint32(ld.Lcsize))
-
-	case obj.Hlinux,
-		obj.Hfreebsd,
-		obj.Hnetbsd,
-		obj.Hopenbsd,
-		obj.Hnacl:
-		ld.Asmbelf(ctxt, int64(symo))
-	}
-
-	ld.Cflush()
-	if *ld.FlagC {
-		fmt.Printf("textsize=%d\n", ld.Segtext.Filelen)
-		fmt.Printf("datsize=%d\n", ld.Segdata.Filelen)
-		fmt.Printf("bsssize=%d\n", ld.Segdata.Length-ld.Segdata.Filelen)
-		fmt.Printf("symsize=%d\n", ld.Symsize)
-		fmt.Printf("lcsize=%d\n", ld.Lcsize)
-		fmt.Printf("total=%d\n", ld.Segtext.Filelen+ld.Segdata.Length+uint64(ld.Symsize)+uint64(ld.Lcsize))
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/mips64/l.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/mips64/l.go
deleted file mode 100644
index 9586988..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/mips64/l.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/mips64/l.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/mips64/l.go:1
-// Inferno utils/5l/asm.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/5l/asm.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package mips64
-
-// Writing object files.
-
-// cmd/9l/l.h from Vita Nuova.
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-const (
-	maxAlign  = 32 // max data alignment
-	minAlign  = 1  // min data alignment
-	funcAlign = 8
-)
-
-/* Used by ../internal/ld/dwarf.go */
-const (
-	dwarfRegSP = 29
-	dwarfRegLR = 31
-)
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/mips64/obj.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/mips64/obj.go
deleted file mode 100644
index 256fa84..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/mips64/obj.go
+++ /dev/null
@@ -1,139 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/mips64/obj.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/mips64/obj.go:1
-// Inferno utils/5l/obj.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/5l/obj.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package mips64
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"bootstrap/cmd/link/internal/ld"
-	"fmt"
-)
-
-func Init() {
-	if obj.GOARCH == "mips64le" {
-		ld.SysArch = sys.ArchMIPS64LE
-	} else {
-		ld.SysArch = sys.ArchMIPS64
-	}
-
-	ld.Thearch.Funcalign = funcAlign
-	ld.Thearch.Maxalign = maxAlign
-	ld.Thearch.Minalign = minAlign
-	ld.Thearch.Dwarfregsp = dwarfRegSP
-	ld.Thearch.Dwarfreglr = dwarfRegLR
-
-	ld.Thearch.Adddynrel = adddynrel
-	ld.Thearch.Archinit = archinit
-	ld.Thearch.Archreloc = archreloc
-	ld.Thearch.Archrelocvariant = archrelocvariant
-	ld.Thearch.Asmb = asmb
-	ld.Thearch.Elfreloc1 = elfreloc1
-	ld.Thearch.Elfsetupplt = elfsetupplt
-	ld.Thearch.Gentext = gentext
-	ld.Thearch.Machoreloc1 = machoreloc1
-	if ld.SysArch == sys.ArchMIPS64LE {
-		ld.Thearch.Lput = ld.Lputl
-		ld.Thearch.Wput = ld.Wputl
-		ld.Thearch.Vput = ld.Vputl
-		ld.Thearch.Append16 = ld.Append16l
-		ld.Thearch.Append32 = ld.Append32l
-		ld.Thearch.Append64 = ld.Append64l
-	} else {
-		ld.Thearch.Lput = ld.Lputb
-		ld.Thearch.Wput = ld.Wputb
-		ld.Thearch.Vput = ld.Vputb
-		ld.Thearch.Append16 = ld.Append16b
-		ld.Thearch.Append32 = ld.Append32b
-		ld.Thearch.Append64 = ld.Append64b
-	}
-
-	ld.Thearch.Linuxdynld = "/lib64/ld64.so.1"
-
-	ld.Thearch.Freebsddynld = "XXX"
-	ld.Thearch.Openbsddynld = "XXX"
-	ld.Thearch.Netbsddynld = "XXX"
-	ld.Thearch.Dragonflydynld = "XXX"
-	ld.Thearch.Solarisdynld = "XXX"
-}
-
-func archinit(ctxt *ld.Link) {
-	switch ld.Headtype {
-	default:
-		ld.Exitf("unknown -H option: %v", ld.Headtype)
-
-	case obj.Hplan9: /* plan 9 */
-		ld.HEADR = 32
-
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 16*1024 + int64(ld.HEADR)
-		}
-		if *ld.FlagDataAddr == -1 {
-			*ld.FlagDataAddr = 0
-		}
-		if *ld.FlagRound == -1 {
-			*ld.FlagRound = 16 * 1024
-		}
-
-	case obj.Hlinux: /* mips64 elf */
-		ld.Elfinit(ctxt)
-		ld.HEADR = ld.ELFRESERVE
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 0x10000 + int64(ld.HEADR)
-		}
-		if *ld.FlagDataAddr == -1 {
-			*ld.FlagDataAddr = 0
-		}
-		if *ld.FlagRound == -1 {
-			*ld.FlagRound = 0x10000
-		}
-
-	case obj.Hnacl:
-		ld.Elfinit(ctxt)
-		ld.HEADR = 0x10000
-		ld.Funcalign = 16
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 0x20000
-		}
-		if *ld.FlagDataAddr == -1 {
-			*ld.FlagDataAddr = 0
-		}
-		if *ld.FlagRound == -1 {
-			*ld.FlagRound = 0x10000
-		}
-	}
-
-	if *ld.FlagDataAddr != 0 && *ld.FlagRound != 0 {
-		fmt.Printf("warning: -D0x%x is ignored because of -R0x%x\n", uint64(*ld.FlagDataAddr), uint32(*ld.FlagRound))
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ppc64/asm.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/ppc64/asm.go
deleted file mode 100644
index 4f940c9..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ppc64/asm.go
+++ /dev/null
@@ -1,1001 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ppc64/asm.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ppc64/asm.go:1
-// Inferno utils/5l/asm.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/5l/asm.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package ppc64
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/link/internal/ld"
-	"encoding/binary"
-	"fmt"
-	"log"
-)
-
-func genplt(ctxt *ld.Link) {
-	// The ppc64 ABI PLT has similar concepts to other
-	// architectures, but is laid out quite differently. When we
-	// see an R_PPC64_REL24 relocation to a dynamic symbol
-	// (indicating that the call needs to go through the PLT), we
-	// generate up to three stubs and reserve a PLT slot.
-	//
-	// 1) The call site will be bl x; nop (where the relocation
-	//    applies to the bl).  We rewrite this to bl x_stub; ld
-	//    r2,24(r1).  The ld is necessary because x_stub will save
-	//    r2 (the TOC pointer) at 24(r1) (the "TOC save slot").
-	//
-	// 2) We reserve space for a pointer in the .plt section (once
-	//    per referenced dynamic function).  .plt is a data
-	//    section filled solely by the dynamic linker (more like
-	//    .plt.got on other architectures).  Initially, the
-	//    dynamic linker will fill each slot with a pointer to the
-	//    corresponding x@plt entry point.
-	//
-	// 3) We generate the "call stub" x_stub (once per dynamic
-	//    function/object file pair).  This saves the TOC in the
-	//    TOC save slot, reads the function pointer from x's .plt
-	//    slot and calls it like any other global entry point
-	//    (including setting r12 to the function address).
-	//
-	// 4) We generate the "symbol resolver stub" x@plt (once per
-	//    dynamic function).  This is solely a branch to the glink
-	//    resolver stub.
-	//
-	// 5) We generate the glink resolver stub (only once).  This
-	//    computes which symbol resolver stub we came through and
-	//    invokes the dynamic resolver via a pointer provided by
-	//    the dynamic linker. This will patch up the .plt slot to
-	//    point directly at the function so future calls go
-	//    straight from the call stub to the real function, and
-	//    then call the function.
-
-	// NOTE: It's possible we could make ppc64 closer to other
-	// architectures: ppc64's .plt is like .plt.got on other
-	// platforms and ppc64's .glink is like .plt on other
-	// platforms.
-
-	// Find all R_PPC64_REL24 relocations that reference dynamic
-	// imports. Reserve PLT entries for these symbols and
-	// generate call stubs. The call stubs need to live in .text,
-	// which is why we need to do this pass this early.
-	//
-	// This assumes "case 1" from the ABI, where the caller needs
-	// us to save and restore the TOC pointer.
-	for _, s := range ctxt.Textp {
-		for i := range s.R {
-			r := &s.R[i]
-			if r.Type != 256+ld.R_PPC64_REL24 || r.Sym.Type != obj.SDYNIMPORT {
-				continue
-			}
-
-			// Reserve PLT entry and generate symbol
-			// resolver
-			addpltsym(ctxt, r.Sym)
-
-			// Generate call stub
-			n := fmt.Sprintf("%s.%s", s.Name, r.Sym.Name)
-
-			stub := ctxt.Syms.Lookup(n, 0)
-			if s.Attr.Reachable() {
-				stub.Attr |= ld.AttrReachable
-			}
-			if stub.Size == 0 {
-				// Need outer to resolve .TOC.
-				stub.Outer = s
-				ctxt.Textp = append(ctxt.Textp, stub)
-				gencallstub(ctxt, 1, stub, r.Sym)
-			}
-
-			// Update the relocation to use the call stub
-			r.Sym = stub
-
-			// Restore TOC after bl. The compiler put a
-			// nop here for us to overwrite.
-			const o1 = 0xe8410018 // ld r2,24(r1)
-			ctxt.Arch.ByteOrder.PutUint32(s.P[r.Off+4:], o1)
-		}
-	}
-}
-
-func genaddmoduledata(ctxt *ld.Link) {
-	addmoduledata := ctxt.Syms.ROLookup("runtime.addmoduledata", 0)
-	if addmoduledata.Type == obj.STEXT {
-		return
-	}
-	addmoduledata.Attr |= ld.AttrReachable
-	initfunc := ctxt.Syms.Lookup("go.link.addmoduledata", 0)
-	initfunc.Type = obj.STEXT
-	initfunc.Attr |= ld.AttrLocal
-	initfunc.Attr |= ld.AttrReachable
-	o := func(op uint32) {
-		ld.Adduint32(ctxt, initfunc, op)
-	}
-	// addis r2, r12, .TOC.-func@ha
-	rel := ld.Addrel(initfunc)
-	rel.Off = int32(initfunc.Size)
-	rel.Siz = 8
-	rel.Sym = ctxt.Syms.Lookup(".TOC.", 0)
-	rel.Type = obj.R_ADDRPOWER_PCREL
-	o(0x3c4c0000)
-	// addi r2, r2, .TOC.-func@l
-	o(0x38420000)
-	// mflr r31
-	o(0x7c0802a6)
-	// stdu r31, -32(r1)
-	o(0xf801ffe1)
-	// addis r3, r2, local.moduledata@got@ha
-	rel = ld.Addrel(initfunc)
-	rel.Off = int32(initfunc.Size)
-	rel.Siz = 8
-	rel.Sym = ctxt.Syms.Lookup("local.moduledata", 0)
-	rel.Type = obj.R_ADDRPOWER_GOT
-	o(0x3c620000)
-	// ld r3, local.moduledata@got@l(r3)
-	o(0xe8630000)
-	// bl runtime.addmoduledata
-	rel = ld.Addrel(initfunc)
-	rel.Off = int32(initfunc.Size)
-	rel.Siz = 4
-	rel.Sym = addmoduledata
-	rel.Type = obj.R_CALLPOWER
-	o(0x48000001)
-	// nop
-	o(0x60000000)
-	// ld r31, 0(r1)
-	o(0xe8010000)
-	// mtlr r31
-	o(0x7c0803a6)
-	// addi r1,r1,32
-	o(0x38210020)
-	// blr
-	o(0x4e800020)
-
-	initarray_entry := ctxt.Syms.Lookup("go.link.addmoduledatainit", 0)
-	ctxt.Textp = append(ctxt.Textp, initfunc)
-	initarray_entry.Attr |= ld.AttrReachable
-	initarray_entry.Attr |= ld.AttrLocal
-	initarray_entry.Type = obj.SINITARR
-	ld.Addaddr(ctxt, initarray_entry, initfunc)
-}
-
-func gentext(ctxt *ld.Link) {
-	if ctxt.DynlinkingGo() {
-		genaddmoduledata(ctxt)
-	}
-
-	if ld.Linkmode == ld.LinkInternal {
-		genplt(ctxt)
-	}
-}
-
-// Construct a call stub in stub that calls symbol targ via its PLT
-// entry.
-func gencallstub(ctxt *ld.Link, abicase int, stub *ld.Symbol, targ *ld.Symbol) {
-	if abicase != 1 {
-		// If we see R_PPC64_TOCSAVE or R_PPC64_REL24_NOTOC
-		// relocations, we'll need to implement cases 2 and 3.
-		log.Fatalf("gencallstub only implements case 1 calls")
-	}
-
-	plt := ctxt.Syms.Lookup(".plt", 0)
-
-	stub.Type = obj.STEXT
-
-	// Save TOC pointer in TOC save slot
-	ld.Adduint32(ctxt, stub, 0xf8410018) // std r2,24(r1)
-
-	// Load the function pointer from the PLT.
-	r := ld.Addrel(stub)
-
-	r.Off = int32(stub.Size)
-	r.Sym = plt
-	r.Add = int64(targ.Plt)
-	r.Siz = 2
-	if ctxt.Arch.ByteOrder == binary.BigEndian {
-		r.Off += int32(r.Siz)
-	}
-	r.Type = obj.R_POWER_TOC
-	r.Variant = ld.RV_POWER_HA
-	ld.Adduint32(ctxt, stub, 0x3d820000) // addis r12,r2,targ@plt@toc@ha
-	r = ld.Addrel(stub)
-	r.Off = int32(stub.Size)
-	r.Sym = plt
-	r.Add = int64(targ.Plt)
-	r.Siz = 2
-	if ctxt.Arch.ByteOrder == binary.BigEndian {
-		r.Off += int32(r.Siz)
-	}
-	r.Type = obj.R_POWER_TOC
-	r.Variant = ld.RV_POWER_LO
-	ld.Adduint32(ctxt, stub, 0xe98c0000) // ld r12,targ@plt@toc@l(r12)
-
-	// Jump to the loaded pointer
-	ld.Adduint32(ctxt, stub, 0x7d8903a6) // mtctr r12
-	ld.Adduint32(ctxt, stub, 0x4e800420) // bctr
-}
-
-func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool {
-	targ := r.Sym
-
-	switch r.Type {
-	default:
-		if r.Type >= 256 {
-			ld.Errorf(s, "unexpected relocation type %d", r.Type)
-			return false
-		}
-
-		// Handle relocations found in ELF object files.
-	case 256 + ld.R_PPC64_REL24:
-		r.Type = obj.R_CALLPOWER
-
-		// This is a local call, so the caller isn't setting
-		// up r12 and r2 is the same for the caller and
-		// callee. Hence, we need to go to the local entry
-		// point.  (If we don't do this, the callee will try
-		// to use r12 to compute r2.)
-		r.Add += int64(r.Sym.Localentry) * 4
-
-		if targ.Type == obj.SDYNIMPORT {
-			// Should have been handled in elfsetupplt
-			ld.Errorf(s, "unexpected R_PPC64_REL24 for dyn import")
-		}
-
-		return true
-
-	case 256 + ld.R_PPC_REL32:
-		r.Type = obj.R_PCREL
-		r.Add += 4
-
-		if targ.Type == obj.SDYNIMPORT {
-			ld.Errorf(s, "unexpected R_PPC_REL32 for dyn import")
-		}
-
-		return true
-
-	case 256 + ld.R_PPC64_ADDR64:
-		r.Type = obj.R_ADDR
-		if targ.Type == obj.SDYNIMPORT {
-			// These happen in .toc sections
-			ld.Adddynsym(ctxt, targ)
-
-			rela := ctxt.Syms.Lookup(".rela", 0)
-			ld.Addaddrplus(ctxt, rela, s, int64(r.Off))
-			ld.Adduint64(ctxt, rela, ld.ELF64_R_INFO(uint32(targ.Dynid), ld.R_PPC64_ADDR64))
-			ld.Adduint64(ctxt, rela, uint64(r.Add))
-			r.Type = 256 // ignore during relocsym
-		}
-
-		return true
-
-	case 256 + ld.R_PPC64_TOC16:
-		r.Type = obj.R_POWER_TOC
-		r.Variant = ld.RV_POWER_LO | ld.RV_CHECK_OVERFLOW
-		return true
-
-	case 256 + ld.R_PPC64_TOC16_LO:
-		r.Type = obj.R_POWER_TOC
-		r.Variant = ld.RV_POWER_LO
-		return true
-
-	case 256 + ld.R_PPC64_TOC16_HA:
-		r.Type = obj.R_POWER_TOC
-		r.Variant = ld.RV_POWER_HA | ld.RV_CHECK_OVERFLOW
-		return true
-
-	case 256 + ld.R_PPC64_TOC16_HI:
-		r.Type = obj.R_POWER_TOC
-		r.Variant = ld.RV_POWER_HI | ld.RV_CHECK_OVERFLOW
-		return true
-
-	case 256 + ld.R_PPC64_TOC16_DS:
-		r.Type = obj.R_POWER_TOC
-		r.Variant = ld.RV_POWER_DS | ld.RV_CHECK_OVERFLOW
-		return true
-
-	case 256 + ld.R_PPC64_TOC16_LO_DS:
-		r.Type = obj.R_POWER_TOC
-		r.Variant = ld.RV_POWER_DS
-		return true
-
-	case 256 + ld.R_PPC64_REL16_LO:
-		r.Type = obj.R_PCREL
-		r.Variant = ld.RV_POWER_LO
-		r.Add += 2 // Compensate for relocation size of 2
-		return true
-
-	case 256 + ld.R_PPC64_REL16_HI:
-		r.Type = obj.R_PCREL
-		r.Variant = ld.RV_POWER_HI | ld.RV_CHECK_OVERFLOW
-		r.Add += 2
-		return true
-
-	case 256 + ld.R_PPC64_REL16_HA:
-		r.Type = obj.R_PCREL
-		r.Variant = ld.RV_POWER_HA | ld.RV_CHECK_OVERFLOW
-		r.Add += 2
-		return true
-	}
-
-	// Handle references to ELF symbols from our own object files.
-	if targ.Type != obj.SDYNIMPORT {
-		return true
-	}
-
-	// TODO(austin): Translate our relocations to ELF
-
-	return false
-}
-
-func elfreloc1(ctxt *ld.Link, r *ld.Reloc, sectoff int64) int {
-	ld.Thearch.Vput(uint64(sectoff))
-
-	elfsym := r.Xsym.ElfsymForReloc()
-	switch r.Type {
-	default:
-		return -1
-
-	case obj.R_ADDR:
-		switch r.Siz {
-		case 4:
-			ld.Thearch.Vput(ld.R_PPC64_ADDR32 | uint64(elfsym)<<32)
-		case 8:
-			ld.Thearch.Vput(ld.R_PPC64_ADDR64 | uint64(elfsym)<<32)
-		default:
-			return -1
-		}
-
-	case obj.R_POWER_TLS:
-		ld.Thearch.Vput(ld.R_PPC64_TLS | uint64(elfsym)<<32)
-
-	case obj.R_POWER_TLS_LE:
-		ld.Thearch.Vput(ld.R_PPC64_TPREL16 | uint64(elfsym)<<32)
-
-	case obj.R_POWER_TLS_IE:
-		ld.Thearch.Vput(ld.R_PPC64_GOT_TPREL16_HA | uint64(elfsym)<<32)
-		ld.Thearch.Vput(uint64(r.Xadd))
-		ld.Thearch.Vput(uint64(sectoff + 4))
-		ld.Thearch.Vput(ld.R_PPC64_GOT_TPREL16_LO_DS | uint64(elfsym)<<32)
-
-	case obj.R_ADDRPOWER:
-		ld.Thearch.Vput(ld.R_PPC64_ADDR16_HA | uint64(elfsym)<<32)
-		ld.Thearch.Vput(uint64(r.Xadd))
-		ld.Thearch.Vput(uint64(sectoff + 4))
-		ld.Thearch.Vput(ld.R_PPC64_ADDR16_LO | uint64(elfsym)<<32)
-
-	case obj.R_ADDRPOWER_DS:
-		ld.Thearch.Vput(ld.R_PPC64_ADDR16_HA | uint64(elfsym)<<32)
-		ld.Thearch.Vput(uint64(r.Xadd))
-		ld.Thearch.Vput(uint64(sectoff + 4))
-		ld.Thearch.Vput(ld.R_PPC64_ADDR16_LO_DS | uint64(elfsym)<<32)
-
-	case obj.R_ADDRPOWER_GOT:
-		ld.Thearch.Vput(ld.R_PPC64_GOT16_HA | uint64(elfsym)<<32)
-		ld.Thearch.Vput(uint64(r.Xadd))
-		ld.Thearch.Vput(uint64(sectoff + 4))
-		ld.Thearch.Vput(ld.R_PPC64_GOT16_LO_DS | uint64(elfsym)<<32)
-
-	case obj.R_ADDRPOWER_PCREL:
-		ld.Thearch.Vput(ld.R_PPC64_REL16_HA | uint64(elfsym)<<32)
-		ld.Thearch.Vput(uint64(r.Xadd))
-		ld.Thearch.Vput(uint64(sectoff + 4))
-		ld.Thearch.Vput(ld.R_PPC64_REL16_LO | uint64(elfsym)<<32)
-		r.Xadd += 4
-
-	case obj.R_ADDRPOWER_TOCREL:
-		ld.Thearch.Vput(ld.R_PPC64_TOC16_HA | uint64(elfsym)<<32)
-		ld.Thearch.Vput(uint64(r.Xadd))
-		ld.Thearch.Vput(uint64(sectoff + 4))
-		ld.Thearch.Vput(ld.R_PPC64_TOC16_LO | uint64(elfsym)<<32)
-
-	case obj.R_ADDRPOWER_TOCREL_DS:
-		ld.Thearch.Vput(ld.R_PPC64_TOC16_HA | uint64(elfsym)<<32)
-		ld.Thearch.Vput(uint64(r.Xadd))
-		ld.Thearch.Vput(uint64(sectoff + 4))
-		ld.Thearch.Vput(ld.R_PPC64_TOC16_LO_DS | uint64(elfsym)<<32)
-
-	case obj.R_CALLPOWER:
-		if r.Siz != 4 {
-			return -1
-		}
-		ld.Thearch.Vput(ld.R_PPC64_REL24 | uint64(elfsym)<<32)
-
-	}
-	ld.Thearch.Vput(uint64(r.Xadd))
-
-	return 0
-}
-
-func elfsetupplt(ctxt *ld.Link) {
-	plt := ctxt.Syms.Lookup(".plt", 0)
-	if plt.Size == 0 {
-		// The dynamic linker stores the address of the
-		// dynamic resolver and the DSO identifier in the two
-		// doublewords at the beginning of the .plt section
-		// before the PLT array. Reserve space for these.
-		plt.Size = 16
-	}
-}
-
-func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int {
-	return -1
-}
-
-// Return the value of .TOC. for symbol s
-func symtoc(ctxt *ld.Link, s *ld.Symbol) int64 {
-	var toc *ld.Symbol
-
-	if s.Outer != nil {
-		toc = ctxt.Syms.ROLookup(".TOC.", int(s.Outer.Version))
-	} else {
-		toc = ctxt.Syms.ROLookup(".TOC.", int(s.Version))
-	}
-
-	if toc == nil {
-		ld.Errorf(s, "TOC-relative relocation in object without .TOC.")
-		return 0
-	}
-
-	return toc.Value
-}
-
-func archrelocaddr(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int {
-	var o1, o2 uint32
-	if ctxt.Arch.ByteOrder == binary.BigEndian {
-		o1 = uint32(*val >> 32)
-		o2 = uint32(*val)
-	} else {
-		o1 = uint32(*val)
-		o2 = uint32(*val >> 32)
-	}
-
-	// We are spreading a 31-bit address across two instructions, putting the
-	// high (adjusted) part in the low 16 bits of the first instruction and the
-	// low part in the low 16 bits of the second instruction, or, in the DS case,
-	// bits 15-2 (inclusive) of the address into bits 15-2 of the second
-	// instruction (it is an error in this case if the low 2 bits of the address
-	// are non-zero).
-
-	t := ld.Symaddr(r.Sym) + r.Add
-	if t < 0 || t >= 1<<31 {
-		ld.Errorf(s, "relocation for %s is too big (>=2G): %d", s.Name, ld.Symaddr(r.Sym))
-	}
-	if t&0x8000 != 0 {
-		t += 0x10000
-	}
-
-	switch r.Type {
-	case obj.R_ADDRPOWER:
-		o1 |= (uint32(t) >> 16) & 0xffff
-		o2 |= uint32(t) & 0xffff
-
-	case obj.R_ADDRPOWER_DS:
-		o1 |= (uint32(t) >> 16) & 0xffff
-		if t&3 != 0 {
-			ld.Errorf(s, "bad DS reloc for %s: %d", s.Name, ld.Symaddr(r.Sym))
-		}
-		o2 |= uint32(t) & 0xfffc
-
-	default:
-		return -1
-	}
-
-	if ctxt.Arch.ByteOrder == binary.BigEndian {
-		*val = int64(o1)<<32 | int64(o2)
-	} else {
-		*val = int64(o2)<<32 | int64(o1)
-	}
-	return 0
-}
-
-// resolve direct jump relocation r in s, and add trampoline if necessary
-func trampoline(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol) {
-
-	t := ld.Symaddr(r.Sym) + r.Add - (s.Value + int64(r.Off))
-	switch r.Type {
-	case obj.R_CALLPOWER:
-
-		// If branch offset is too far then create a trampoline.
-
-		if int64(int32(t<<6)>>6) != t || (*ld.FlagDebugTramp > 1 && s.File != r.Sym.File) {
-			var tramp *ld.Symbol
-			for i := 0; ; i++ {
-
-				// Using r.Add as part of the name is significant in functions like duffzero where the call
-				// target is at some offset within the function.  Calls to duff+8 and duff+256 must appear as
-				// distinct trampolines.
-
-				name := r.Sym.Name
-				if r.Add == 0 {
-					name = name + fmt.Sprintf("-tramp%d", i)
-				} else {
-					name = name + fmt.Sprintf("%+x-tramp%d", r.Add, i)
-				}
-
-				// Look up the trampoline in case it already exists
-
-				tramp = ctxt.Syms.Lookup(name, int(r.Sym.Version))
-				if tramp.Value == 0 {
-					break
-				}
-
-				t = ld.Symaddr(tramp) + r.Add - (s.Value + int64(r.Off))
-
-				// If the offset of the trampoline that has been found is within range, use it.
-				if int64(int32(t<<6)>>6) == t {
-					break
-				}
-			}
-			if tramp.Type == 0 {
-				ctxt.AddTramp(tramp)
-				tramp.Size = 16 // 4 instructions
-				tramp.P = make([]byte, tramp.Size)
-				t = ld.Symaddr(r.Sym) + r.Add
-				f := t & 0xffff0000
-				o1 := uint32(0x3fe00000 | (f >> 16)) // lis r31,trampaddr hi (r31 is temp reg)
-				f = t & 0xffff
-				o2 := uint32(0x63ff0000 | f) // ori r31,trampaddr lo
-				o3 := uint32(0x7fe903a6)     // mtctr
-				o4 := uint32(0x4e800420)     // bctr
-				ld.SysArch.ByteOrder.PutUint32(tramp.P, o1)
-				ld.SysArch.ByteOrder.PutUint32(tramp.P[4:], o2)
-				ld.SysArch.ByteOrder.PutUint32(tramp.P[8:], o3)
-				ld.SysArch.ByteOrder.PutUint32(tramp.P[12:], o4)
-			}
-			r.Sym = tramp
-			r.Add = 0 // This was folded into the trampoline target address
-			r.Done = 0
-		}
-	default:
-		ld.Errorf(s, "trampoline called with non-jump reloc: %v", r.Type)
-	}
-}
-
-func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int {
-	if ld.Linkmode == ld.LinkExternal {
-		switch r.Type {
-		default:
-			return -1
-
-		case obj.R_POWER_TLS, obj.R_POWER_TLS_LE, obj.R_POWER_TLS_IE:
-			r.Done = 0
-			// check Outer is nil, Type is TLSBSS?
-			r.Xadd = r.Add
-			r.Xsym = r.Sym
-			return 0
-
-		case obj.R_ADDRPOWER,
-			obj.R_ADDRPOWER_DS,
-			obj.R_ADDRPOWER_TOCREL,
-			obj.R_ADDRPOWER_TOCREL_DS,
-			obj.R_ADDRPOWER_GOT,
-			obj.R_ADDRPOWER_PCREL:
-			r.Done = 0
-
-			// set up addend for eventual relocation via outer symbol.
-			rs := r.Sym
-			r.Xadd = r.Add
-			for rs.Outer != nil {
-				r.Xadd += ld.Symaddr(rs) - ld.Symaddr(rs.Outer)
-				rs = rs.Outer
-			}
-
-			if rs.Type != obj.SHOSTOBJ && rs.Type != obj.SDYNIMPORT && rs.Sect == nil {
-				ld.Errorf(s, "missing section for %s", rs.Name)
-			}
-			r.Xsym = rs
-
-			return 0
-
-		case obj.R_CALLPOWER:
-			r.Done = 0
-			r.Xsym = r.Sym
-			r.Xadd = r.Add
-			return 0
-		}
-	}
-
-	switch r.Type {
-	case obj.R_CONST:
-		*val = r.Add
-		return 0
-
-	case obj.R_GOTOFF:
-		*val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0))
-		return 0
-
-	case obj.R_ADDRPOWER, obj.R_ADDRPOWER_DS:
-		return archrelocaddr(ctxt, r, s, val)
-
-	case obj.R_CALLPOWER:
-		// Bits 6 through 29 = (S + A - P) >> 2
-
-		t := ld.Symaddr(r.Sym) + r.Add - (s.Value + int64(r.Off))
-
-		if t&3 != 0 {
-			ld.Errorf(s, "relocation for %s+%d is not aligned: %d", r.Sym.Name, r.Off, t)
-		}
-		// If branch offset is too far then create a trampoline.
-
-		if int64(int32(t<<6)>>6) != t {
-			ld.Errorf(s, "direct call too far: %s %x", r.Sym.Name, t)
-		}
-		*val |= int64(uint32(t) &^ 0xfc000003)
-		return 0
-
-	case obj.R_POWER_TOC: // S + A - .TOC.
-		*val = ld.Symaddr(r.Sym) + r.Add - symtoc(ctxt, s)
-
-		return 0
-
-	case obj.R_POWER_TLS_LE:
-		// The thread pointer points 0x7000 bytes after the start of the the
-		// thread local storage area as documented in section "3.7.2 TLS
-		// Runtime Handling" of "Power Architecture 64-Bit ELF V2 ABI
-		// Specification".
-		v := r.Sym.Value - 0x7000
-		if int64(int16(v)) != v {
-			ld.Errorf(s, "TLS offset out of range %d", v)
-		}
-		*val = (*val &^ 0xffff) | (v & 0xffff)
-		return 0
-	}
-
-	return -1
-}
-
-func archrelocvariant(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, t int64) int64 {
-	switch r.Variant & ld.RV_TYPE_MASK {
-	default:
-		ld.Errorf(s, "unexpected relocation variant %d", r.Variant)
-		fallthrough
-
-	case ld.RV_NONE:
-		return t
-
-	case ld.RV_POWER_LO:
-		if r.Variant&ld.RV_CHECK_OVERFLOW != 0 {
-			// Whether to check for signed or unsigned
-			// overflow depends on the instruction
-			var o1 uint32
-			if ctxt.Arch.ByteOrder == binary.BigEndian {
-				o1 = ld.Be32(s.P[r.Off-2:])
-			} else {
-				o1 = ld.Le32(s.P[r.Off:])
-			}
-			switch o1 >> 26 {
-			case 24, // ori
-				26, // xori
-				28: // andi
-				if t>>16 != 0 {
-					goto overflow
-				}
-
-			default:
-				if int64(int16(t)) != t {
-					goto overflow
-				}
-			}
-		}
-
-		return int64(int16(t))
-
-	case ld.RV_POWER_HA:
-		t += 0x8000
-		fallthrough
-
-		// Fallthrough
-	case ld.RV_POWER_HI:
-		t >>= 16
-
-		if r.Variant&ld.RV_CHECK_OVERFLOW != 0 {
-			// Whether to check for signed or unsigned
-			// overflow depends on the instruction
-			var o1 uint32
-			if ctxt.Arch.ByteOrder == binary.BigEndian {
-				o1 = ld.Be32(s.P[r.Off-2:])
-			} else {
-				o1 = ld.Le32(s.P[r.Off:])
-			}
-			switch o1 >> 26 {
-			case 25, // oris
-				27, // xoris
-				29: // andis
-				if t>>16 != 0 {
-					goto overflow
-				}
-
-			default:
-				if int64(int16(t)) != t {
-					goto overflow
-				}
-			}
-		}
-
-		return int64(int16(t))
-
-	case ld.RV_POWER_DS:
-		var o1 uint32
-		if ctxt.Arch.ByteOrder == binary.BigEndian {
-			o1 = uint32(ld.Be16(s.P[r.Off:]))
-		} else {
-			o1 = uint32(ld.Le16(s.P[r.Off:]))
-		}
-		if t&3 != 0 {
-			ld.Errorf(s, "relocation for %s+%d is not aligned: %d", r.Sym.Name, r.Off, t)
-		}
-		if (r.Variant&ld.RV_CHECK_OVERFLOW != 0) && int64(int16(t)) != t {
-			goto overflow
-		}
-		return int64(o1)&0x3 | int64(int16(t))
-	}
-
-overflow:
-	ld.Errorf(s, "relocation for %s+%d is too big: %d", r.Sym.Name, r.Off, t)
-	return t
-}
-
-func addpltsym(ctxt *ld.Link, s *ld.Symbol) {
-	if s.Plt >= 0 {
-		return
-	}
-
-	ld.Adddynsym(ctxt, s)
-
-	if ld.Iself {
-		plt := ctxt.Syms.Lookup(".plt", 0)
-		rela := ctxt.Syms.Lookup(".rela.plt", 0)
-		if plt.Size == 0 {
-			elfsetupplt(ctxt)
-		}
-
-		// Create the glink resolver if necessary
-		glink := ensureglinkresolver(ctxt)
-
-		// Write symbol resolver stub (just a branch to the
-		// glink resolver stub)
-		r := ld.Addrel(glink)
-
-		r.Sym = glink
-		r.Off = int32(glink.Size)
-		r.Siz = 4
-		r.Type = obj.R_CALLPOWER
-		ld.Adduint32(ctxt, glink, 0x48000000) // b .glink
-
-		// In the ppc64 ABI, the dynamic linker is responsible
-		// for writing the entire PLT.  We just need to
-		// reserve 8 bytes for each PLT entry and generate a
-		// JMP_SLOT dynamic relocation for it.
-		//
-		// TODO(austin): ABI v1 is different
-		s.Plt = int32(plt.Size)
-
-		plt.Size += 8
-
-		ld.Addaddrplus(ctxt, rela, plt, int64(s.Plt))
-		ld.Adduint64(ctxt, rela, ld.ELF64_R_INFO(uint32(s.Dynid), ld.R_PPC64_JMP_SLOT))
-		ld.Adduint64(ctxt, rela, 0)
-	} else {
-		ld.Errorf(s, "addpltsym: unsupported binary format")
-	}
-}
-
-// Generate the glink resolver stub if necessary and return the .glink section
-func ensureglinkresolver(ctxt *ld.Link) *ld.Symbol {
-	glink := ctxt.Syms.Lookup(".glink", 0)
-	if glink.Size != 0 {
-		return glink
-	}
-
-	// This is essentially the resolver from the ppc64 ELF ABI.
-	// At entry, r12 holds the address of the symbol resolver stub
-	// for the target routine and the argument registers hold the
-	// arguments for the target routine.
-	//
-	// This stub is PIC, so first get the PC of label 1 into r11.
-	// Other things will be relative to this.
-	ld.Adduint32(ctxt, glink, 0x7c0802a6) // mflr r0
-	ld.Adduint32(ctxt, glink, 0x429f0005) // bcl 20,31,1f
-	ld.Adduint32(ctxt, glink, 0x7d6802a6) // 1: mflr r11
-	ld.Adduint32(ctxt, glink, 0x7c0803a6) // mtlf r0
-
-	// Compute the .plt array index from the entry point address.
-	// Because this is PIC, everything is relative to label 1b (in
-	// r11):
-	//   r0 = ((r12 - r11) - (res_0 - r11)) / 4 = (r12 - res_0) / 4
-	ld.Adduint32(ctxt, glink, 0x3800ffd0) // li r0,-(res_0-1b)=-48
-	ld.Adduint32(ctxt, glink, 0x7c006214) // add r0,r0,r12
-	ld.Adduint32(ctxt, glink, 0x7c0b0050) // sub r0,r0,r11
-	ld.Adduint32(ctxt, glink, 0x7800f082) // srdi r0,r0,2
-
-	// r11 = address of the first byte of the PLT
-	r := ld.Addrel(glink)
-
-	r.Off = int32(glink.Size)
-	r.Sym = ctxt.Syms.Lookup(".plt", 0)
-	r.Siz = 8
-	r.Type = obj.R_ADDRPOWER
-
-	ld.Adduint32(ctxt, glink, 0x3d600000) // addis r11,0,.plt@ha
-	ld.Adduint32(ctxt, glink, 0x396b0000) // addi r11,r11,.plt@l
-
-	// Load r12 = dynamic resolver address and r11 = DSO
-	// identifier from the first two doublewords of the PLT.
-	ld.Adduint32(ctxt, glink, 0xe98b0000) // ld r12,0(r11)
-	ld.Adduint32(ctxt, glink, 0xe96b0008) // ld r11,8(r11)
-
-	// Jump to the dynamic resolver
-	ld.Adduint32(ctxt, glink, 0x7d8903a6) // mtctr r12
-	ld.Adduint32(ctxt, glink, 0x4e800420) // bctr
-
-	// The symbol resolvers must immediately follow.
-	//   res_0:
-
-	// Add DT_PPC64_GLINK .dynamic entry, which points to 32 bytes
-	// before the first symbol resolver stub.
-	s := ctxt.Syms.Lookup(".dynamic", 0)
-
-	ld.Elfwritedynentsymplus(ctxt, s, ld.DT_PPC64_GLINK, glink, glink.Size-32)
-
-	return glink
-}
-
-func asmb(ctxt *ld.Link) {
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f asmb\n", obj.Cputime())
-	}
-
-	if ld.Iself {
-		ld.Asmbelfsetup()
-	}
-
-	for sect := ld.Segtext.Sect; sect != nil; sect = sect.Next {
-		ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
-		// Handle additional text sections with Codeblk
-		if sect.Name == ".text" {
-			ld.Codeblk(ctxt, int64(sect.Vaddr), int64(sect.Length))
-		} else {
-			ld.Datblk(ctxt, int64(sect.Vaddr), int64(sect.Length))
-		}
-	}
-
-	if ld.Segrodata.Filelen > 0 {
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f rodatblk\n", obj.Cputime())
-		}
-		ld.Cseek(int64(ld.Segrodata.Fileoff))
-		ld.Datblk(ctxt, int64(ld.Segrodata.Vaddr), int64(ld.Segrodata.Filelen))
-	}
-	if ld.Segrelrodata.Filelen > 0 {
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f relrodatblk\n", obj.Cputime())
-		}
-		ld.Cseek(int64(ld.Segrelrodata.Fileoff))
-		ld.Datblk(ctxt, int64(ld.Segrelrodata.Vaddr), int64(ld.Segrelrodata.Filelen))
-	}
-
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f datblk\n", obj.Cputime())
-	}
-
-	ld.Cseek(int64(ld.Segdata.Fileoff))
-	ld.Datblk(ctxt, int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen))
-
-	ld.Cseek(int64(ld.Segdwarf.Fileoff))
-	ld.Dwarfblk(ctxt, int64(ld.Segdwarf.Vaddr), int64(ld.Segdwarf.Filelen))
-
-	/* output symbol table */
-	ld.Symsize = 0
-
-	ld.Lcsize = 0
-	symo := uint32(0)
-	if !*ld.FlagS {
-		// TODO: rationalize
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f sym\n", obj.Cputime())
-		}
-		switch ld.Headtype {
-		default:
-			if ld.Iself {
-				symo = uint32(ld.Segdwarf.Fileoff + ld.Segdwarf.Filelen)
-				symo = uint32(ld.Rnd(int64(symo), int64(*ld.FlagRound)))
-			}
-
-		case obj.Hplan9:
-			symo = uint32(ld.Segdata.Fileoff + ld.Segdata.Filelen)
-		}
-
-		ld.Cseek(int64(symo))
-		switch ld.Headtype {
-		default:
-			if ld.Iself {
-				if ctxt.Debugvlog != 0 {
-					ctxt.Logf("%5.2f elfsym\n", obj.Cputime())
-				}
-				ld.Asmelfsym(ctxt)
-				ld.Cflush()
-				ld.Cwrite(ld.Elfstrdat)
-
-				if ld.Linkmode == ld.LinkExternal {
-					ld.Elfemitreloc(ctxt)
-				}
-			}
-
-		case obj.Hplan9:
-			ld.Asmplan9sym(ctxt)
-			ld.Cflush()
-
-			sym := ctxt.Syms.Lookup("pclntab", 0)
-			if sym != nil {
-				ld.Lcsize = int32(len(sym.P))
-				for i := 0; int32(i) < ld.Lcsize; i++ {
-					ld.Cput(sym.P[i])
-				}
-
-				ld.Cflush()
-			}
-		}
-	}
-
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f header\n", obj.Cputime())
-	}
-	ld.Cseek(0)
-	switch ld.Headtype {
-	default:
-	case obj.Hplan9: /* plan 9 */
-		ld.Thearch.Lput(0x647)                      /* magic */
-		ld.Thearch.Lput(uint32(ld.Segtext.Filelen)) /* sizes */
-		ld.Thearch.Lput(uint32(ld.Segdata.Filelen))
-		ld.Thearch.Lput(uint32(ld.Segdata.Length - ld.Segdata.Filelen))
-		ld.Thearch.Lput(uint32(ld.Symsize))          /* nsyms */
-		ld.Thearch.Lput(uint32(ld.Entryvalue(ctxt))) /* va of entry */
-		ld.Thearch.Lput(0)
-		ld.Thearch.Lput(uint32(ld.Lcsize))
-
-	case obj.Hlinux,
-		obj.Hfreebsd,
-		obj.Hnetbsd,
-		obj.Hopenbsd,
-		obj.Hnacl:
-		ld.Asmbelf(ctxt, int64(symo))
-	}
-
-	ld.Cflush()
-	if *ld.FlagC {
-		fmt.Printf("textsize=%d\n", ld.Segtext.Filelen)
-		fmt.Printf("datsize=%d\n", ld.Segdata.Filelen)
-		fmt.Printf("bsssize=%d\n", ld.Segdata.Length-ld.Segdata.Filelen)
-		fmt.Printf("symsize=%d\n", ld.Symsize)
-		fmt.Printf("lcsize=%d\n", ld.Lcsize)
-		fmt.Printf("total=%d\n", ld.Segtext.Filelen+ld.Segdata.Length+uint64(ld.Symsize)+uint64(ld.Lcsize))
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ppc64/l.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/ppc64/l.go
deleted file mode 100644
index a971dc1..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ppc64/l.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ppc64/l.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ppc64/l.go:1
-// Inferno utils/5l/asm.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/5l/asm.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package ppc64
-
-// Writing object files.
-
-// cmd/9l/l.h from Vita Nuova.
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-const (
-	maxAlign  = 32 // max data alignment
-	minAlign  = 1  // min data alignment
-	funcAlign = 8
-)
-
-/* Used by ../internal/ld/dwarf.go */
-const (
-	dwarfRegSP = 1
-	dwarfRegLR = 65
-)
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ppc64/obj.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/ppc64/obj.go
deleted file mode 100644
index 2fff147..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/ppc64/obj.go
+++ /dev/null
@@ -1,144 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ppc64/obj.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/ppc64/obj.go:1
-// Inferno utils/5l/obj.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/5l/obj.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package ppc64
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"bootstrap/cmd/link/internal/ld"
-	"fmt"
-)
-
-func Init() {
-	if obj.GOARCH == "ppc64le" {
-		ld.SysArch = sys.ArchPPC64LE
-	} else {
-		ld.SysArch = sys.ArchPPC64
-	}
-
-	ld.Thearch.Funcalign = funcAlign
-	ld.Thearch.Maxalign = maxAlign
-	ld.Thearch.Minalign = minAlign
-	ld.Thearch.Dwarfregsp = dwarfRegSP
-	ld.Thearch.Dwarfreglr = dwarfRegLR
-
-	ld.Thearch.Adddynrel = adddynrel
-	ld.Thearch.Archinit = archinit
-	ld.Thearch.Archreloc = archreloc
-	ld.Thearch.Archrelocvariant = archrelocvariant
-	ld.Thearch.Asmb = asmb
-	ld.Thearch.Elfreloc1 = elfreloc1
-	ld.Thearch.Elfsetupplt = elfsetupplt
-	ld.Thearch.Gentext = gentext
-	ld.Thearch.Trampoline = trampoline
-	ld.Thearch.Machoreloc1 = machoreloc1
-	if ld.SysArch == sys.ArchPPC64LE {
-		ld.Thearch.Lput = ld.Lputl
-		ld.Thearch.Wput = ld.Wputl
-		ld.Thearch.Vput = ld.Vputl
-		ld.Thearch.Append16 = ld.Append16l
-		ld.Thearch.Append32 = ld.Append32l
-		ld.Thearch.Append64 = ld.Append64l
-	} else {
-		ld.Thearch.Lput = ld.Lputb
-		ld.Thearch.Wput = ld.Wputb
-		ld.Thearch.Vput = ld.Vputb
-		ld.Thearch.Append16 = ld.Append16b
-		ld.Thearch.Append32 = ld.Append32b
-		ld.Thearch.Append64 = ld.Append64b
-	}
-
-	// TODO(austin): ABI v1 uses /usr/lib/ld.so.1
-	ld.Thearch.Linuxdynld = "/lib64/ld64.so.1"
-
-	ld.Thearch.Freebsddynld = "XXX"
-	ld.Thearch.Openbsddynld = "XXX"
-	ld.Thearch.Netbsddynld = "XXX"
-	ld.Thearch.Dragonflydynld = "XXX"
-	ld.Thearch.Solarisdynld = "XXX"
-}
-
-func archinit(ctxt *ld.Link) {
-	switch ld.Headtype {
-	default:
-		ld.Exitf("unknown -H option: %v", ld.Headtype)
-
-	case obj.Hplan9: /* plan 9 */
-		ld.HEADR = 32
-
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 4128
-		}
-		if *ld.FlagDataAddr == -1 {
-			*ld.FlagDataAddr = 0
-		}
-		if *ld.FlagRound == -1 {
-			*ld.FlagRound = 4096
-		}
-
-	case obj.Hlinux: /* ppc64 elf */
-		if ld.SysArch == sys.ArchPPC64 {
-			*ld.FlagD = true // TODO(austin): ELF ABI v1 not supported yet
-		}
-		ld.Elfinit(ctxt)
-		ld.HEADR = ld.ELFRESERVE
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 0x10000 + int64(ld.HEADR)
-		}
-		if *ld.FlagDataAddr == -1 {
-			*ld.FlagDataAddr = 0
-		}
-		if *ld.FlagRound == -1 {
-			*ld.FlagRound = 0x10000
-		}
-
-	case obj.Hnacl:
-		ld.Elfinit(ctxt)
-		ld.HEADR = 0x10000
-		ld.Funcalign = 16
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 0x20000
-		}
-		if *ld.FlagDataAddr == -1 {
-			*ld.FlagDataAddr = 0
-		}
-		if *ld.FlagRound == -1 {
-			*ld.FlagRound = 0x10000
-		}
-	}
-
-	if *ld.FlagDataAddr != 0 && *ld.FlagRound != 0 {
-		fmt.Printf("warning: -D0x%x is ignored because of -R0x%x\n", uint64(*ld.FlagDataAddr), uint32(*ld.FlagRound))
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/s390x/asm.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/s390x/asm.go
deleted file mode 100644
index 13b8d4d..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/s390x/asm.go
+++ /dev/null
@@ -1,601 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/s390x/asm.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/s390x/asm.go:1
-// Inferno utils/5l/asm.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/5l/asm.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package s390x
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/link/internal/ld"
-	"debug/elf"
-	"fmt"
-)
-
-// gentext generates assembly to append the local moduledata to the global
-// moduledata linked list at initialization time. This is only done if the runtime
-// is in a different module.
-//
-// <go.link.addmoduledata>:
-// 	larl  %r2, <local.moduledata>
-// 	jg    <runtime.addmoduledata@plt>
-//	undef
-//
-// The job of appending the moduledata is delegated to runtime.addmoduledata.
-func gentext(ctxt *ld.Link) {
-	if !ctxt.DynlinkingGo() {
-		return
-	}
-	addmoduledata := ctxt.Syms.Lookup("runtime.addmoduledata", 0)
-	if addmoduledata.Type == obj.STEXT {
-		// we're linking a module containing the runtime -> no need for
-		// an init function
-		return
-	}
-	addmoduledata.Attr |= ld.AttrReachable
-	initfunc := ctxt.Syms.Lookup("go.link.addmoduledata", 0)
-	initfunc.Type = obj.STEXT
-	initfunc.Attr |= ld.AttrLocal
-	initfunc.Attr |= ld.AttrReachable
-
-	// larl %r2, <local.moduledata>
-	ld.Adduint8(ctxt, initfunc, 0xc0)
-	ld.Adduint8(ctxt, initfunc, 0x20)
-	lmd := ld.Addrel(initfunc)
-	lmd.Off = int32(initfunc.Size)
-	lmd.Siz = 4
-	lmd.Sym = ctxt.Moduledata
-	lmd.Type = obj.R_PCREL
-	lmd.Variant = ld.RV_390_DBL
-	lmd.Add = 2 + int64(lmd.Siz)
-	ld.Adduint32(ctxt, initfunc, 0)
-
-	// jg <runtime.addmoduledata[@plt]>
-	ld.Adduint8(ctxt, initfunc, 0xc0)
-	ld.Adduint8(ctxt, initfunc, 0xf4)
-	rel := ld.Addrel(initfunc)
-	rel.Off = int32(initfunc.Size)
-	rel.Siz = 4
-	rel.Sym = ctxt.Syms.Lookup("runtime.addmoduledata", 0)
-	rel.Type = obj.R_CALL
-	rel.Variant = ld.RV_390_DBL
-	rel.Add = 2 + int64(rel.Siz)
-	ld.Adduint32(ctxt, initfunc, 0)
-
-	// undef (for debugging)
-	ld.Adduint32(ctxt, initfunc, 0)
-
-	ctxt.Textp = append(ctxt.Textp, initfunc)
-	initarray_entry := ctxt.Syms.Lookup("go.link.addmoduledatainit", 0)
-	initarray_entry.Attr |= ld.AttrLocal
-	initarray_entry.Attr |= ld.AttrReachable
-	initarray_entry.Type = obj.SINITARR
-	ld.Addaddr(ctxt, initarray_entry, initfunc)
-}
-
-func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool {
-	targ := r.Sym
-
-	switch r.Type {
-	default:
-		if r.Type >= 256 {
-			ld.Errorf(s, "unexpected relocation type %d", r.Type)
-			return false
-		}
-
-		// Handle relocations found in ELF object files.
-	case 256 + ld.R_390_12,
-		256 + ld.R_390_GOT12:
-		ld.Errorf(s, "s390x 12-bit relocations have not been implemented (relocation type %d)", r.Type-256)
-		return false
-
-	case 256 + ld.R_390_8,
-		256 + ld.R_390_16,
-		256 + ld.R_390_32,
-		256 + ld.R_390_64:
-		if targ.Type == obj.SDYNIMPORT {
-			ld.Errorf(s, "unexpected R_390_nn relocation for dynamic symbol %s", targ.Name)
-		}
-		r.Type = obj.R_ADDR
-		return true
-
-	case 256 + ld.R_390_PC16,
-		256 + ld.R_390_PC32,
-		256 + ld.R_390_PC64:
-		if targ.Type == obj.SDYNIMPORT {
-			ld.Errorf(s, "unexpected R_390_PCnn relocation for dynamic symbol %s", targ.Name)
-		}
-		if targ.Type == 0 || targ.Type == obj.SXREF {
-			ld.Errorf(s, "unknown symbol %s in pcrel", targ.Name)
-		}
-		r.Type = obj.R_PCREL
-		r.Add += int64(r.Siz)
-		return true
-
-	case 256 + ld.R_390_GOT16,
-		256 + ld.R_390_GOT32,
-		256 + ld.R_390_GOT64:
-		ld.Errorf(s, "unimplemented S390x relocation: %v", r.Type-256)
-		return true
-
-	case 256 + ld.R_390_PLT16DBL,
-		256 + ld.R_390_PLT32DBL:
-		r.Type = obj.R_PCREL
-		r.Variant = ld.RV_390_DBL
-		r.Add += int64(r.Siz)
-		if targ.Type == obj.SDYNIMPORT {
-			addpltsym(ctxt, targ)
-			r.Sym = ctxt.Syms.Lookup(".plt", 0)
-			r.Add += int64(targ.Plt)
-		}
-		return true
-
-	case 256 + ld.R_390_PLT32,
-		256 + ld.R_390_PLT64:
-		r.Type = obj.R_PCREL
-		r.Add += int64(r.Siz)
-		if targ.Type == obj.SDYNIMPORT {
-			addpltsym(ctxt, targ)
-			r.Sym = ctxt.Syms.Lookup(".plt", 0)
-			r.Add += int64(targ.Plt)
-		}
-		return true
-
-	case 256 + ld.R_390_COPY:
-		ld.Errorf(s, "unimplemented S390x relocation: %v", r.Type-256)
-		return false
-
-	case 256 + ld.R_390_GLOB_DAT:
-		ld.Errorf(s, "unimplemented S390x relocation: %v", r.Type-256)
-		return false
-
-	case 256 + ld.R_390_JMP_SLOT:
-		ld.Errorf(s, "unimplemented S390x relocation: %v", r.Type-256)
-		return false
-
-	case 256 + ld.R_390_RELATIVE:
-		ld.Errorf(s, "unimplemented S390x relocation: %v", r.Type-256)
-		return false
-
-	case 256 + ld.R_390_GOTOFF:
-		if targ.Type == obj.SDYNIMPORT {
-			ld.Errorf(s, "unexpected R_390_GOTOFF relocation for dynamic symbol %s", targ.Name)
-		}
-		r.Type = obj.R_GOTOFF
-		return true
-
-	case 256 + ld.R_390_GOTPC:
-		r.Type = obj.R_PCREL
-		r.Sym = ctxt.Syms.Lookup(".got", 0)
-		r.Add += int64(r.Siz)
-		return true
-
-	case 256 + ld.R_390_PC16DBL,
-		256 + ld.R_390_PC32DBL:
-		r.Type = obj.R_PCREL
-		r.Variant = ld.RV_390_DBL
-		r.Add += int64(r.Siz)
-		if targ.Type == obj.SDYNIMPORT {
-			ld.Errorf(s, "unexpected R_390_PCnnDBL relocation for dynamic symbol %s", targ.Name)
-		}
-		return true
-
-	case 256 + ld.R_390_GOTPCDBL:
-		r.Type = obj.R_PCREL
-		r.Variant = ld.RV_390_DBL
-		r.Sym = ctxt.Syms.Lookup(".got", 0)
-		r.Add += int64(r.Siz)
-		return true
-
-	case 256 + ld.R_390_GOTENT:
-		addgotsym(ctxt, targ)
-
-		r.Type = obj.R_PCREL
-		r.Variant = ld.RV_390_DBL
-		r.Sym = ctxt.Syms.Lookup(".got", 0)
-		r.Add += int64(targ.Got)
-		r.Add += int64(r.Siz)
-		return true
-	}
-	// Handle references to ELF symbols from our own object files.
-	if targ.Type != obj.SDYNIMPORT {
-		return true
-	}
-
-	return false
-}
-
-func elfreloc1(ctxt *ld.Link, r *ld.Reloc, sectoff int64) int {
-	ld.Thearch.Vput(uint64(sectoff))
-
-	elfsym := r.Xsym.ElfsymForReloc()
-	switch r.Type {
-	default:
-		return -1
-
-	case obj.R_TLS_LE:
-		switch r.Siz {
-		default:
-			return -1
-		case 4:
-			// WARNING - silently ignored by linker in ELF64
-			ld.Thearch.Vput(ld.R_390_TLS_LE32 | uint64(elfsym)<<32)
-		case 8:
-			// WARNING - silently ignored by linker in ELF32
-			ld.Thearch.Vput(ld.R_390_TLS_LE64 | uint64(elfsym)<<32)
-		}
-
-	case obj.R_TLS_IE:
-		switch r.Siz {
-		default:
-			return -1
-		case 4:
-			ld.Thearch.Vput(ld.R_390_TLS_IEENT | uint64(elfsym)<<32)
-		}
-
-	case obj.R_ADDR:
-		switch r.Siz {
-		default:
-			return -1
-		case 4:
-			ld.Thearch.Vput(ld.R_390_32 | uint64(elfsym)<<32)
-		case 8:
-			ld.Thearch.Vput(ld.R_390_64 | uint64(elfsym)<<32)
-		}
-
-	case obj.R_GOTPCREL:
-		if r.Siz == 4 {
-			ld.Thearch.Vput(ld.R_390_GOTENT | uint64(elfsym)<<32)
-		} else {
-			return -1
-		}
-
-	case obj.R_PCREL, obj.R_PCRELDBL, obj.R_CALL:
-		elfrel := ld.R_390_NONE
-		isdbl := r.Variant&ld.RV_TYPE_MASK == ld.RV_390_DBL
-		// TODO(mundaym): all DBL style relocations should be
-		// signalled using the variant - see issue 14218.
-		switch r.Type {
-		case obj.R_PCRELDBL, obj.R_CALL:
-			isdbl = true
-		}
-		if r.Xsym.Type == obj.SDYNIMPORT && (r.Xsym.ElfType == elf.STT_FUNC || r.Type == obj.R_CALL) {
-			if isdbl {
-				switch r.Siz {
-				case 2:
-					elfrel = ld.R_390_PLT16DBL
-				case 4:
-					elfrel = ld.R_390_PLT32DBL
-				}
-			} else {
-				switch r.Siz {
-				case 4:
-					elfrel = ld.R_390_PLT32
-				case 8:
-					elfrel = ld.R_390_PLT64
-				}
-			}
-		} else {
-			if isdbl {
-				switch r.Siz {
-				case 2:
-					elfrel = ld.R_390_PC16DBL
-				case 4:
-					elfrel = ld.R_390_PC32DBL
-				}
-			} else {
-				switch r.Siz {
-				case 2:
-					elfrel = ld.R_390_PC16
-				case 4:
-					elfrel = ld.R_390_PC32
-				case 8:
-					elfrel = ld.R_390_PC64
-				}
-			}
-		}
-		if elfrel == ld.R_390_NONE {
-			return -1 // unsupported size/dbl combination
-		}
-		ld.Thearch.Vput(uint64(elfrel) | uint64(elfsym)<<32)
-	}
-
-	ld.Thearch.Vput(uint64(r.Xadd))
-	return 0
-}
-
-func elfsetupplt(ctxt *ld.Link) {
-	plt := ctxt.Syms.Lookup(".plt", 0)
-	got := ctxt.Syms.Lookup(".got", 0)
-	if plt.Size == 0 {
-		// stg     %r1,56(%r15)
-		ld.Adduint8(ctxt, plt, 0xe3)
-		ld.Adduint8(ctxt, plt, 0x10)
-		ld.Adduint8(ctxt, plt, 0xf0)
-		ld.Adduint8(ctxt, plt, 0x38)
-		ld.Adduint8(ctxt, plt, 0x00)
-		ld.Adduint8(ctxt, plt, 0x24)
-		// larl    %r1,_GLOBAL_OFFSET_TABLE_
-		ld.Adduint8(ctxt, plt, 0xc0)
-		ld.Adduint8(ctxt, plt, 0x10)
-		ld.Addpcrelplus(ctxt, plt, got, 6)
-		// mvc     48(8,%r15),8(%r1)
-		ld.Adduint8(ctxt, plt, 0xd2)
-		ld.Adduint8(ctxt, plt, 0x07)
-		ld.Adduint8(ctxt, plt, 0xf0)
-		ld.Adduint8(ctxt, plt, 0x30)
-		ld.Adduint8(ctxt, plt, 0x10)
-		ld.Adduint8(ctxt, plt, 0x08)
-		// lg      %r1,16(%r1)
-		ld.Adduint8(ctxt, plt, 0xe3)
-		ld.Adduint8(ctxt, plt, 0x10)
-		ld.Adduint8(ctxt, plt, 0x10)
-		ld.Adduint8(ctxt, plt, 0x10)
-		ld.Adduint8(ctxt, plt, 0x00)
-		ld.Adduint8(ctxt, plt, 0x04)
-		// br      %r1
-		ld.Adduint8(ctxt, plt, 0x07)
-		ld.Adduint8(ctxt, plt, 0xf1)
-		// nopr    %r0
-		ld.Adduint8(ctxt, plt, 0x07)
-		ld.Adduint8(ctxt, plt, 0x00)
-		// nopr    %r0
-		ld.Adduint8(ctxt, plt, 0x07)
-		ld.Adduint8(ctxt, plt, 0x00)
-		// nopr    %r0
-		ld.Adduint8(ctxt, plt, 0x07)
-		ld.Adduint8(ctxt, plt, 0x00)
-
-		// assume got->size == 0 too
-		ld.Addaddrplus(ctxt, got, ctxt.Syms.Lookup(".dynamic", 0), 0)
-
-		ld.Adduint64(ctxt, got, 0)
-		ld.Adduint64(ctxt, got, 0)
-	}
-}
-
-func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int {
-	return -1
-}
-
-func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int {
-	if ld.Linkmode == ld.LinkExternal {
-		return -1
-	}
-
-	switch r.Type {
-	case obj.R_CONST:
-		*val = r.Add
-		return 0
-
-	case obj.R_GOTOFF:
-		*val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0))
-		return 0
-	}
-
-	return -1
-}
-
-func archrelocvariant(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, t int64) int64 {
-	switch r.Variant & ld.RV_TYPE_MASK {
-	default:
-		ld.Errorf(s, "unexpected relocation variant %d", r.Variant)
-		return t
-
-	case ld.RV_NONE:
-		return t
-
-	case ld.RV_390_DBL:
-		if (t & 1) != 0 {
-			ld.Errorf(s, "%s+%v is not 2-byte aligned", r.Sym.Name, r.Sym.Value)
-		}
-		return t >> 1
-	}
-}
-
-func addpltsym(ctxt *ld.Link, s *ld.Symbol) {
-	if s.Plt >= 0 {
-		return
-	}
-
-	ld.Adddynsym(ctxt, s)
-
-	if ld.Iself {
-		plt := ctxt.Syms.Lookup(".plt", 0)
-		got := ctxt.Syms.Lookup(".got", 0)
-		rela := ctxt.Syms.Lookup(".rela.plt", 0)
-		if plt.Size == 0 {
-			elfsetupplt(ctxt)
-		}
-		// larl    %r1,_GLOBAL_OFFSET_TABLE_+index
-
-		ld.Adduint8(ctxt, plt, 0xc0)
-		ld.Adduint8(ctxt, plt, 0x10)
-		ld.Addpcrelplus(ctxt, plt, got, got.Size+6) // need variant?
-
-		// add to got: pointer to current pos in plt
-		ld.Addaddrplus(ctxt, got, plt, plt.Size+8) // weird but correct
-		// lg      %r1,0(%r1)
-		ld.Adduint8(ctxt, plt, 0xe3)
-		ld.Adduint8(ctxt, plt, 0x10)
-		ld.Adduint8(ctxt, plt, 0x10)
-		ld.Adduint8(ctxt, plt, 0x00)
-		ld.Adduint8(ctxt, plt, 0x00)
-		ld.Adduint8(ctxt, plt, 0x04)
-		// br      %r1
-		ld.Adduint8(ctxt, plt, 0x07)
-		ld.Adduint8(ctxt, plt, 0xf1)
-		// basr    %r1,%r0
-		ld.Adduint8(ctxt, plt, 0x0d)
-		ld.Adduint8(ctxt, plt, 0x10)
-		// lgf     %r1,12(%r1)
-		ld.Adduint8(ctxt, plt, 0xe3)
-		ld.Adduint8(ctxt, plt, 0x10)
-		ld.Adduint8(ctxt, plt, 0x10)
-		ld.Adduint8(ctxt, plt, 0x0c)
-		ld.Adduint8(ctxt, plt, 0x00)
-		ld.Adduint8(ctxt, plt, 0x14)
-		// jg .plt
-		ld.Adduint8(ctxt, plt, 0xc0)
-		ld.Adduint8(ctxt, plt, 0xf4)
-
-		ld.Adduint32(ctxt, plt, uint32(-((plt.Size - 2) >> 1))) // roll-your-own relocation
-		//.plt index
-		ld.Adduint32(ctxt, plt, uint32(rela.Size)) // rela size before current entry
-
-		// rela
-		ld.Addaddrplus(ctxt, rela, got, got.Size-8)
-
-		ld.Adduint64(ctxt, rela, ld.ELF64_R_INFO(uint32(s.Dynid), ld.R_390_JMP_SLOT))
-		ld.Adduint64(ctxt, rela, 0)
-
-		s.Plt = int32(plt.Size - 32)
-
-	} else {
-		ld.Errorf(s, "addpltsym: unsupported binary format")
-	}
-}
-
-func addgotsym(ctxt *ld.Link, s *ld.Symbol) {
-	if s.Got >= 0 {
-		return
-	}
-
-	ld.Adddynsym(ctxt, s)
-	got := ctxt.Syms.Lookup(".got", 0)
-	s.Got = int32(got.Size)
-	ld.Adduint64(ctxt, got, 0)
-
-	if ld.Iself {
-		rela := ctxt.Syms.Lookup(".rela", 0)
-		ld.Addaddrplus(ctxt, rela, got, int64(s.Got))
-		ld.Adduint64(ctxt, rela, ld.ELF64_R_INFO(uint32(s.Dynid), ld.R_390_GLOB_DAT))
-		ld.Adduint64(ctxt, rela, 0)
-	} else {
-		ld.Errorf(s, "addgotsym: unsupported binary format")
-	}
-}
-
-func asmb(ctxt *ld.Link) {
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f asmb\n", obj.Cputime())
-	}
-
-	if ld.Iself {
-		ld.Asmbelfsetup()
-	}
-
-	sect := ld.Segtext.Sect
-	ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
-	ld.Codeblk(ctxt, int64(sect.Vaddr), int64(sect.Length))
-	for sect = sect.Next; sect != nil; sect = sect.Next {
-		ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
-		ld.Datblk(ctxt, int64(sect.Vaddr), int64(sect.Length))
-	}
-
-	if ld.Segrodata.Filelen > 0 {
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f rodatblk\n", obj.Cputime())
-		}
-		ld.Cseek(int64(ld.Segrodata.Fileoff))
-		ld.Datblk(ctxt, int64(ld.Segrodata.Vaddr), int64(ld.Segrodata.Filelen))
-	}
-	if ld.Segrelrodata.Filelen > 0 {
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f rodatblk\n", obj.Cputime())
-		}
-		ld.Cseek(int64(ld.Segrelrodata.Fileoff))
-		ld.Datblk(ctxt, int64(ld.Segrelrodata.Vaddr), int64(ld.Segrelrodata.Filelen))
-	}
-
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f datblk\n", obj.Cputime())
-	}
-
-	ld.Cseek(int64(ld.Segdata.Fileoff))
-	ld.Datblk(ctxt, int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen))
-
-	ld.Cseek(int64(ld.Segdwarf.Fileoff))
-	ld.Dwarfblk(ctxt, int64(ld.Segdwarf.Vaddr), int64(ld.Segdwarf.Filelen))
-
-	/* output symbol table */
-	ld.Symsize = 0
-
-	ld.Lcsize = 0
-	symo := uint32(0)
-	if !*ld.FlagS {
-		if !ld.Iself {
-			ld.Errorf(nil, "unsupported executable format")
-		}
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f sym\n", obj.Cputime())
-		}
-		symo = uint32(ld.Segdwarf.Fileoff + ld.Segdwarf.Filelen)
-		symo = uint32(ld.Rnd(int64(symo), int64(*ld.FlagRound)))
-
-		ld.Cseek(int64(symo))
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f elfsym\n", obj.Cputime())
-		}
-		ld.Asmelfsym(ctxt)
-		ld.Cflush()
-		ld.Cwrite(ld.Elfstrdat)
-
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f dwarf\n", obj.Cputime())
-		}
-
-		if ld.Linkmode == ld.LinkExternal {
-			ld.Elfemitreloc(ctxt)
-		}
-	}
-
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f header\n", obj.Cputime())
-	}
-	ld.Cseek(0)
-	switch ld.Headtype {
-	default:
-		ld.Errorf(nil, "unsupported operating system")
-	case obj.Hlinux:
-		ld.Asmbelf(ctxt, int64(symo))
-	}
-
-	ld.Cflush()
-	if *ld.FlagC {
-		fmt.Printf("textsize=%d\n", ld.Segtext.Filelen)
-		fmt.Printf("datsize=%d\n", ld.Segdata.Filelen)
-		fmt.Printf("bsssize=%d\n", ld.Segdata.Length-ld.Segdata.Filelen)
-		fmt.Printf("symsize=%d\n", ld.Symsize)
-		fmt.Printf("lcsize=%d\n", ld.Lcsize)
-		fmt.Printf("total=%d\n", ld.Segtext.Filelen+ld.Segdata.Length+uint64(ld.Symsize)+uint64(ld.Lcsize))
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/s390x/l.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/s390x/l.go
deleted file mode 100644
index d7af028..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/s390x/l.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/s390x/l.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/s390x/l.go:1
-// Inferno utils/5l/asm.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/5l/asm.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package s390x
-
-// Writing object files.
-
-// cmd/9l/l.h from Vita Nuova.
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-const (
-	maxAlign  = 32 // max data alignment
-	minAlign  = 2  // min data alignment
-	funcAlign = 16
-)
-
-/* Used by ../internal/ld/dwarf.go */
-const (
-	dwarfRegSP = 15
-	dwarfRegLR = 14
-)
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/s390x/obj.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/s390x/obj.go
deleted file mode 100644
index 7ba70f2..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/s390x/obj.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/s390x/obj.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/s390x/obj.go:1
-// Inferno utils/5l/obj.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/5l/obj.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package s390x
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"bootstrap/cmd/link/internal/ld"
-	"fmt"
-)
-
-func Init() {
-	ld.SysArch = sys.ArchS390X
-
-	ld.Thearch.Funcalign = funcAlign
-	ld.Thearch.Maxalign = maxAlign
-	ld.Thearch.Minalign = minAlign
-	ld.Thearch.Dwarfregsp = dwarfRegSP
-	ld.Thearch.Dwarfreglr = dwarfRegLR
-
-	ld.Thearch.Adddynrel = adddynrel
-	ld.Thearch.Archinit = archinit
-	ld.Thearch.Archreloc = archreloc
-	ld.Thearch.Archrelocvariant = archrelocvariant
-	ld.Thearch.Asmb = asmb // in asm.go
-	ld.Thearch.Elfreloc1 = elfreloc1
-	ld.Thearch.Elfsetupplt = elfsetupplt
-	ld.Thearch.Gentext = gentext
-	ld.Thearch.Machoreloc1 = machoreloc1
-	ld.Thearch.Lput = ld.Lputb
-	ld.Thearch.Wput = ld.Wputb
-	ld.Thearch.Vput = ld.Vputb
-	ld.Thearch.Append16 = ld.Append16b
-	ld.Thearch.Append32 = ld.Append32b
-	ld.Thearch.Append64 = ld.Append64b
-
-	ld.Thearch.Linuxdynld = "/lib64/ld64.so.1"
-
-	// not relevant for s390x
-	ld.Thearch.Freebsddynld = "XXX"
-	ld.Thearch.Openbsddynld = "XXX"
-	ld.Thearch.Netbsddynld = "XXX"
-	ld.Thearch.Dragonflydynld = "XXX"
-	ld.Thearch.Solarisdynld = "XXX"
-}
-
-func archinit(ctxt *ld.Link) {
-	switch ld.Headtype {
-	default:
-		ld.Exitf("unknown -H option: %v", ld.Headtype)
-
-	case obj.Hlinux: // s390x ELF
-		ld.Elfinit(ctxt)
-		ld.HEADR = ld.ELFRESERVE
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 0x10000 + int64(ld.HEADR)
-		}
-		if *ld.FlagDataAddr == -1 {
-			*ld.FlagDataAddr = 0
-		}
-		if *ld.FlagRound == -1 {
-			*ld.FlagRound = 0x10000
-		}
-	}
-
-	if *ld.FlagDataAddr != 0 && *ld.FlagRound != 0 {
-		fmt.Printf("warning: -D0x%x is ignored because of -R0x%x\n", uint64(*ld.FlagDataAddr), uint32(*ld.FlagRound))
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/x86/asm.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/x86/asm.go
deleted file mode 100644
index f8e7aea..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/x86/asm.go
+++ /dev/null
@@ -1,782 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/x86/asm.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/x86/asm.go:1
-// Inferno utils/8l/asm.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/8l/asm.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package x86
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/link/internal/ld"
-	"log"
-)
-
-// Append 4 bytes to s and create a R_CALL relocation targeting t to fill them in.
-func addcall(ctxt *ld.Link, s *ld.Symbol, t *ld.Symbol) {
-	s.Attr |= ld.AttrReachable
-	i := s.Size
-	s.Size += 4
-	ld.Symgrow(s, s.Size)
-	r := ld.Addrel(s)
-	r.Sym = t
-	r.Off = int32(i)
-	r.Type = obj.R_CALL
-	r.Siz = 4
-}
-
-func gentext(ctxt *ld.Link) {
-	if ctxt.DynlinkingGo() {
-		// We need get_pc_thunk.
-	} else {
-		switch ld.Buildmode {
-		case ld.BuildmodeCArchive:
-			if !ld.Iself {
-				return
-			}
-		case ld.BuildmodePIE, ld.BuildmodeCShared, ld.BuildmodePlugin:
-			// We need get_pc_thunk.
-		default:
-			return
-		}
-	}
-
-	// Generate little thunks that load the PC of the next instruction into a register.
-	thunks := make([]*ld.Symbol, 0, 7+len(ctxt.Textp))
-	for _, r := range [...]struct {
-		name string
-		num  uint8
-	}{
-		{"ax", 0},
-		{"cx", 1},
-		{"dx", 2},
-		{"bx", 3},
-		// sp
-		{"bp", 5},
-		{"si", 6},
-		{"di", 7},
-	} {
-		thunkfunc := ctxt.Syms.Lookup("__x86.get_pc_thunk."+r.name, 0)
-		thunkfunc.Type = obj.STEXT
-		thunkfunc.Attr |= ld.AttrLocal
-		thunkfunc.Attr |= ld.AttrReachable //TODO: remove?
-		o := func(op ...uint8) {
-			for _, op1 := range op {
-				ld.Adduint8(ctxt, thunkfunc, op1)
-			}
-		}
-		// 8b 04 24	mov    (%esp),%eax
-		// Destination register is in bits 3-5 of the middle byte, so add that in.
-		o(0x8b, 0x04+r.num<<3, 0x24)
-		// c3		ret
-		o(0xc3)
-
-		thunks = append(thunks, thunkfunc)
-	}
-	ctxt.Textp = append(thunks, ctxt.Textp...) // keep Textp in dependency order
-
-	addmoduledata := ctxt.Syms.Lookup("runtime.addmoduledata", 0)
-	if addmoduledata.Type == obj.STEXT && ld.Buildmode != ld.BuildmodePlugin {
-		// we're linking a module containing the runtime -> no need for
-		// an init function
-		return
-	}
-
-	addmoduledata.Attr |= ld.AttrReachable
-
-	initfunc := ctxt.Syms.Lookup("go.link.addmoduledata", 0)
-	initfunc.Type = obj.STEXT
-	initfunc.Attr |= ld.AttrLocal
-	initfunc.Attr |= ld.AttrReachable
-	o := func(op ...uint8) {
-		for _, op1 := range op {
-			ld.Adduint8(ctxt, initfunc, op1)
-		}
-	}
-
-	// go.link.addmoduledata:
-	//      53                      push %ebx
-	//      e8 00 00 00 00          call __x86.get_pc_thunk.cx + R_CALL __x86.get_pc_thunk.cx
-	//      8d 81 00 00 00 00       lea 0x0(%ecx), %eax + R_PCREL ctxt.Moduledata
-	//      8d 99 00 00 00 00       lea 0x0(%ecx), %ebx + R_GOTPC _GLOBAL_OFFSET_TABLE_
-	//      e8 00 00 00 00          call runtime.addmoduledata@plt + R_CALL runtime.addmoduledata
-	//      5b                      pop %ebx
-	//      c3                      ret
-
-	o(0x53)
-
-	o(0xe8)
-	addcall(ctxt, initfunc, ctxt.Syms.Lookup("__x86.get_pc_thunk.cx", 0))
-
-	o(0x8d, 0x81)
-	ld.Addpcrelplus(ctxt, initfunc, ctxt.Moduledata, 6)
-
-	o(0x8d, 0x99)
-	i := initfunc.Size
-	initfunc.Size += 4
-	ld.Symgrow(initfunc, initfunc.Size)
-	r := ld.Addrel(initfunc)
-	r.Sym = ctxt.Syms.Lookup("_GLOBAL_OFFSET_TABLE_", 0)
-	r.Off = int32(i)
-	r.Type = obj.R_PCREL
-	r.Add = 12
-	r.Siz = 4
-
-	o(0xe8)
-	addcall(ctxt, initfunc, addmoduledata)
-
-	o(0x5b)
-
-	o(0xc3)
-
-	if ld.Buildmode == ld.BuildmodePlugin {
-		ctxt.Textp = append(ctxt.Textp, addmoduledata)
-	}
-	ctxt.Textp = append(ctxt.Textp, initfunc)
-	initarray_entry := ctxt.Syms.Lookup("go.link.addmoduledatainit", 0)
-	initarray_entry.Attr |= ld.AttrReachable
-	initarray_entry.Attr |= ld.AttrLocal
-	initarray_entry.Type = obj.SINITARR
-	ld.Addaddr(ctxt, initarray_entry, initfunc)
-}
-
-func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool {
-	targ := r.Sym
-
-	switch r.Type {
-	default:
-		if r.Type >= 256 {
-			ld.Errorf(s, "unexpected relocation type %d", r.Type)
-			return false
-		}
-
-		// Handle relocations found in ELF object files.
-	case 256 + ld.R_386_PC32:
-		if targ.Type == obj.SDYNIMPORT {
-			ld.Errorf(s, "unexpected R_386_PC32 relocation for dynamic symbol %s", targ.Name)
-		}
-		if targ.Type == 0 || targ.Type == obj.SXREF {
-			ld.Errorf(s, "unknown symbol %s in pcrel", targ.Name)
-		}
-		r.Type = obj.R_PCREL
-		r.Add += 4
-		return true
-
-	case 256 + ld.R_386_PLT32:
-		r.Type = obj.R_PCREL
-		r.Add += 4
-		if targ.Type == obj.SDYNIMPORT {
-			addpltsym(ctxt, targ)
-			r.Sym = ctxt.Syms.Lookup(".plt", 0)
-			r.Add += int64(targ.Plt)
-		}
-
-		return true
-
-	case 256 + ld.R_386_GOT32, 256 + ld.R_386_GOT32X:
-		if targ.Type != obj.SDYNIMPORT {
-			// have symbol
-			if r.Off >= 2 && s.P[r.Off-2] == 0x8b {
-				// turn MOVL of GOT entry into LEAL of symbol address, relative to GOT.
-				s.P[r.Off-2] = 0x8d
-
-				r.Type = obj.R_GOTOFF
-				return true
-			}
-
-			if r.Off >= 2 && s.P[r.Off-2] == 0xff && s.P[r.Off-1] == 0xb3 {
-				// turn PUSHL of GOT entry into PUSHL of symbol itself.
-				// use unnecessary SS prefix to keep instruction same length.
-				s.P[r.Off-2] = 0x36
-
-				s.P[r.Off-1] = 0x68
-				r.Type = obj.R_ADDR
-				return true
-			}
-
-			ld.Errorf(s, "unexpected GOT reloc for non-dynamic symbol %s", targ.Name)
-			return false
-		}
-
-		addgotsym(ctxt, targ)
-		r.Type = obj.R_CONST // write r->add during relocsym
-		r.Sym = nil
-		r.Add += int64(targ.Got)
-		return true
-
-	case 256 + ld.R_386_GOTOFF:
-		r.Type = obj.R_GOTOFF
-		return true
-
-	case 256 + ld.R_386_GOTPC:
-		r.Type = obj.R_PCREL
-		r.Sym = ctxt.Syms.Lookup(".got", 0)
-		r.Add += 4
-		return true
-
-	case 256 + ld.R_386_32:
-		if targ.Type == obj.SDYNIMPORT {
-			ld.Errorf(s, "unexpected R_386_32 relocation for dynamic symbol %s", targ.Name)
-		}
-		r.Type = obj.R_ADDR
-		return true
-
-	case 512 + ld.MACHO_GENERIC_RELOC_VANILLA*2 + 0:
-		r.Type = obj.R_ADDR
-		if targ.Type == obj.SDYNIMPORT {
-			ld.Errorf(s, "unexpected reloc for dynamic symbol %s", targ.Name)
-		}
-		return true
-
-	case 512 + ld.MACHO_GENERIC_RELOC_VANILLA*2 + 1:
-		if targ.Type == obj.SDYNIMPORT {
-			addpltsym(ctxt, targ)
-			r.Sym = ctxt.Syms.Lookup(".plt", 0)
-			r.Add = int64(targ.Plt)
-			r.Type = obj.R_PCREL
-			return true
-		}
-
-		r.Type = obj.R_PCREL
-		return true
-
-	case 512 + ld.MACHO_FAKE_GOTPCREL:
-		if targ.Type != obj.SDYNIMPORT {
-			// have symbol
-			// turn MOVL of GOT entry into LEAL of symbol itself
-			if r.Off < 2 || s.P[r.Off-2] != 0x8b {
-				ld.Errorf(s, "unexpected GOT reloc for non-dynamic symbol %s", targ.Name)
-				return false
-			}
-
-			s.P[r.Off-2] = 0x8d
-			r.Type = obj.R_PCREL
-			return true
-		}
-
-		addgotsym(ctxt, targ)
-		r.Sym = ctxt.Syms.Lookup(".got", 0)
-		r.Add += int64(targ.Got)
-		r.Type = obj.R_PCREL
-		return true
-	}
-
-	// Handle references to ELF symbols from our own object files.
-	if targ.Type != obj.SDYNIMPORT {
-		return true
-	}
-	switch r.Type {
-	case obj.R_CALL,
-		obj.R_PCREL:
-		addpltsym(ctxt, targ)
-		r.Sym = ctxt.Syms.Lookup(".plt", 0)
-		r.Add = int64(targ.Plt)
-		return true
-
-	case obj.R_ADDR:
-		if s.Type != obj.SDATA {
-			break
-		}
-		if ld.Iself {
-			ld.Adddynsym(ctxt, targ)
-			rel := ctxt.Syms.Lookup(".rel", 0)
-			ld.Addaddrplus(ctxt, rel, s, int64(r.Off))
-			ld.Adduint32(ctxt, rel, ld.ELF32_R_INFO(uint32(targ.Dynid), ld.R_386_32))
-			r.Type = obj.R_CONST // write r->add during relocsym
-			r.Sym = nil
-			return true
-		}
-
-		if ld.Headtype == obj.Hdarwin && s.Size == int64(ld.SysArch.PtrSize) && r.Off == 0 {
-			// Mach-O relocations are a royal pain to lay out.
-			// They use a compact stateful bytecode representation
-			// that is too much bother to deal with.
-			// Instead, interpret the C declaration
-			//	void *_Cvar_stderr = &stderr;
-			// as making _Cvar_stderr the name of a GOT entry
-			// for stderr. This is separate from the usual GOT entry,
-			// just in case the C code assigns to the variable,
-			// and of course it only works for single pointers,
-			// but we only need to support cgo and that's all it needs.
-			ld.Adddynsym(ctxt, targ)
-
-			got := ctxt.Syms.Lookup(".got", 0)
-			s.Type = got.Type | obj.SSUB
-			s.Outer = got
-			s.Sub = got.Sub
-			got.Sub = s
-			s.Value = got.Size
-			ld.Adduint32(ctxt, got, 0)
-			ld.Adduint32(ctxt, ctxt.Syms.Lookup(".linkedit.got", 0), uint32(targ.Dynid))
-			r.Type = 256 // ignore during relocsym
-			return true
-		}
-
-		if (ld.Headtype == obj.Hwindows || ld.Headtype == obj.Hwindowsgui) && s.Size == int64(ld.SysArch.PtrSize) {
-			// nothing to do, the relocation will be laid out in pereloc1
-			return true
-		}
-	}
-
-	return false
-}
-
-func elfreloc1(ctxt *ld.Link, r *ld.Reloc, sectoff int64) int {
-	ld.Thearch.Lput(uint32(sectoff))
-
-	elfsym := r.Xsym.ElfsymForReloc()
-	switch r.Type {
-	default:
-		return -1
-
-	case obj.R_ADDR:
-		if r.Siz == 4 {
-			ld.Thearch.Lput(ld.R_386_32 | uint32(elfsym)<<8)
-		} else {
-			return -1
-		}
-
-	case obj.R_GOTPCREL:
-		if r.Siz == 4 {
-			ld.Thearch.Lput(ld.R_386_GOTPC)
-			if r.Xsym.Name != "_GLOBAL_OFFSET_TABLE_" {
-				ld.Thearch.Lput(uint32(sectoff))
-				ld.Thearch.Lput(ld.R_386_GOT32 | uint32(elfsym)<<8)
-			}
-		} else {
-			return -1
-		}
-
-	case obj.R_CALL:
-		if r.Siz == 4 {
-			if r.Xsym.Type == obj.SDYNIMPORT {
-				ld.Thearch.Lput(ld.R_386_PLT32 | uint32(elfsym)<<8)
-			} else {
-				ld.Thearch.Lput(ld.R_386_PC32 | uint32(elfsym)<<8)
-			}
-		} else {
-			return -1
-		}
-
-	case obj.R_PCREL:
-		if r.Siz == 4 {
-			ld.Thearch.Lput(ld.R_386_PC32 | uint32(elfsym)<<8)
-		} else {
-			return -1
-		}
-
-	case obj.R_TLS_LE:
-		if r.Siz == 4 {
-			ld.Thearch.Lput(ld.R_386_TLS_LE | uint32(elfsym)<<8)
-		} else {
-			return -1
-		}
-
-	case obj.R_TLS_IE:
-		if r.Siz == 4 {
-			ld.Thearch.Lput(ld.R_386_GOTPC)
-			ld.Thearch.Lput(uint32(sectoff))
-			ld.Thearch.Lput(ld.R_386_TLS_GOTIE | uint32(elfsym)<<8)
-		} else {
-			return -1
-		}
-	}
-
-	return 0
-}
-
-func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int {
-	var v uint32
-
-	rs := r.Xsym
-
-	if rs.Type == obj.SHOSTOBJ {
-		if rs.Dynid < 0 {
-			ld.Errorf(s, "reloc %d to non-macho symbol %s type=%d", r.Type, rs.Name, rs.Type)
-			return -1
-		}
-
-		v = uint32(rs.Dynid)
-		v |= 1 << 27 // external relocation
-	} else {
-		v = uint32(rs.Sect.Extnum)
-		if v == 0 {
-			ld.Errorf(s, "reloc %d to symbol %s in non-macho section %s type=%d", r.Type, rs.Name, rs.Sect.Name, rs.Type)
-			return -1
-		}
-	}
-
-	switch r.Type {
-	default:
-		return -1
-
-	case obj.R_ADDR:
-		v |= ld.MACHO_GENERIC_RELOC_VANILLA << 28
-
-	case obj.R_CALL,
-		obj.R_PCREL:
-		v |= 1 << 24 // pc-relative bit
-		v |= ld.MACHO_GENERIC_RELOC_VANILLA << 28
-	}
-
-	switch r.Siz {
-	default:
-		return -1
-
-	case 1:
-		v |= 0 << 25
-
-	case 2:
-		v |= 1 << 25
-
-	case 4:
-		v |= 2 << 25
-
-	case 8:
-		v |= 3 << 25
-	}
-
-	ld.Thearch.Lput(uint32(sectoff))
-	ld.Thearch.Lput(v)
-	return 0
-}
-
-func pereloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) bool {
-	var v uint32
-
-	rs := r.Xsym
-
-	if rs.Dynid < 0 {
-		ld.Errorf(s, "reloc %d to non-coff symbol %s type=%d", r.Type, rs.Name, rs.Type)
-		return false
-	}
-
-	ld.Thearch.Lput(uint32(sectoff))
-	ld.Thearch.Lput(uint32(rs.Dynid))
-
-	switch r.Type {
-	default:
-		return false
-
-	case obj.R_ADDR:
-		v = ld.IMAGE_REL_I386_DIR32
-
-	case obj.R_CALL,
-		obj.R_PCREL:
-		v = ld.IMAGE_REL_I386_REL32
-	}
-
-	ld.Thearch.Wput(uint16(v))
-
-	return true
-}
-
-func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int {
-	if ld.Linkmode == ld.LinkExternal {
-		return -1
-	}
-	switch r.Type {
-	case obj.R_CONST:
-		*val = r.Add
-		return 0
-
-	case obj.R_GOTOFF:
-		*val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0))
-		return 0
-	}
-
-	return -1
-}
-
-func archrelocvariant(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, t int64) int64 {
-	log.Fatalf("unexpected relocation variant")
-	return t
-}
-
-func elfsetupplt(ctxt *ld.Link) {
-	plt := ctxt.Syms.Lookup(".plt", 0)
-	got := ctxt.Syms.Lookup(".got.plt", 0)
-	if plt.Size == 0 {
-		// pushl got+4
-		ld.Adduint8(ctxt, plt, 0xff)
-
-		ld.Adduint8(ctxt, plt, 0x35)
-		ld.Addaddrplus(ctxt, plt, got, 4)
-
-		// jmp *got+8
-		ld.Adduint8(ctxt, plt, 0xff)
-
-		ld.Adduint8(ctxt, plt, 0x25)
-		ld.Addaddrplus(ctxt, plt, got, 8)
-
-		// zero pad
-		ld.Adduint32(ctxt, plt, 0)
-
-		// assume got->size == 0 too
-		ld.Addaddrplus(ctxt, got, ctxt.Syms.Lookup(".dynamic", 0), 0)
-
-		ld.Adduint32(ctxt, got, 0)
-		ld.Adduint32(ctxt, got, 0)
-	}
-}
-
-func addpltsym(ctxt *ld.Link, s *ld.Symbol) {
-	if s.Plt >= 0 {
-		return
-	}
-
-	ld.Adddynsym(ctxt, s)
-
-	if ld.Iself {
-		plt := ctxt.Syms.Lookup(".plt", 0)
-		got := ctxt.Syms.Lookup(".got.plt", 0)
-		rel := ctxt.Syms.Lookup(".rel.plt", 0)
-		if plt.Size == 0 {
-			elfsetupplt(ctxt)
-		}
-
-		// jmpq *got+size
-		ld.Adduint8(ctxt, plt, 0xff)
-
-		ld.Adduint8(ctxt, plt, 0x25)
-		ld.Addaddrplus(ctxt, plt, got, got.Size)
-
-		// add to got: pointer to current pos in plt
-		ld.Addaddrplus(ctxt, got, plt, plt.Size)
-
-		// pushl $x
-		ld.Adduint8(ctxt, plt, 0x68)
-
-		ld.Adduint32(ctxt, plt, uint32(rel.Size))
-
-		// jmp .plt
-		ld.Adduint8(ctxt, plt, 0xe9)
-
-		ld.Adduint32(ctxt, plt, uint32(-(plt.Size + 4)))
-
-		// rel
-		ld.Addaddrplus(ctxt, rel, got, got.Size-4)
-
-		ld.Adduint32(ctxt, rel, ld.ELF32_R_INFO(uint32(s.Dynid), ld.R_386_JMP_SLOT))
-
-		s.Plt = int32(plt.Size - 16)
-	} else if ld.Headtype == obj.Hdarwin {
-		// Same laziness as in 6l.
-
-		plt := ctxt.Syms.Lookup(".plt", 0)
-
-		addgotsym(ctxt, s)
-
-		ld.Adduint32(ctxt, ctxt.Syms.Lookup(".linkedit.plt", 0), uint32(s.Dynid))
-
-		// jmpq *got+size(IP)
-		s.Plt = int32(plt.Size)
-
-		ld.Adduint8(ctxt, plt, 0xff)
-		ld.Adduint8(ctxt, plt, 0x25)
-		ld.Addaddrplus(ctxt, plt, ctxt.Syms.Lookup(".got", 0), int64(s.Got))
-	} else {
-		ld.Errorf(s, "addpltsym: unsupported binary format")
-	}
-}
-
-func addgotsym(ctxt *ld.Link, s *ld.Symbol) {
-	if s.Got >= 0 {
-		return
-	}
-
-	ld.Adddynsym(ctxt, s)
-	got := ctxt.Syms.Lookup(".got", 0)
-	s.Got = int32(got.Size)
-	ld.Adduint32(ctxt, got, 0)
-
-	if ld.Iself {
-		rel := ctxt.Syms.Lookup(".rel", 0)
-		ld.Addaddrplus(ctxt, rel, got, int64(s.Got))
-		ld.Adduint32(ctxt, rel, ld.ELF32_R_INFO(uint32(s.Dynid), ld.R_386_GLOB_DAT))
-	} else if ld.Headtype == obj.Hdarwin {
-		ld.Adduint32(ctxt, ctxt.Syms.Lookup(".linkedit.got", 0), uint32(s.Dynid))
-	} else {
-		ld.Errorf(s, "addgotsym: unsupported binary format")
-	}
-}
-
-func asmb(ctxt *ld.Link) {
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f asmb\n", obj.Cputime())
-	}
-
-	if ld.Iself {
-		ld.Asmbelfsetup()
-	}
-
-	sect := ld.Segtext.Sect
-	ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
-	// 0xCC is INT $3 - breakpoint instruction
-	ld.CodeblkPad(ctxt, int64(sect.Vaddr), int64(sect.Length), []byte{0xCC})
-	for sect = sect.Next; sect != nil; sect = sect.Next {
-		ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
-		ld.Datblk(ctxt, int64(sect.Vaddr), int64(sect.Length))
-	}
-
-	if ld.Segrodata.Filelen > 0 {
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f rodatblk\n", obj.Cputime())
-		}
-
-		ld.Cseek(int64(ld.Segrodata.Fileoff))
-		ld.Datblk(ctxt, int64(ld.Segrodata.Vaddr), int64(ld.Segrodata.Filelen))
-	}
-	if ld.Segrelrodata.Filelen > 0 {
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f relrodatblk\n", obj.Cputime())
-		}
-		ld.Cseek(int64(ld.Segrelrodata.Fileoff))
-		ld.Datblk(ctxt, int64(ld.Segrelrodata.Vaddr), int64(ld.Segrelrodata.Filelen))
-	}
-
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f datblk\n", obj.Cputime())
-	}
-
-	ld.Cseek(int64(ld.Segdata.Fileoff))
-	ld.Datblk(ctxt, int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen))
-
-	ld.Cseek(int64(ld.Segdwarf.Fileoff))
-	ld.Dwarfblk(ctxt, int64(ld.Segdwarf.Vaddr), int64(ld.Segdwarf.Filelen))
-
-	machlink := uint32(0)
-	if ld.Headtype == obj.Hdarwin {
-		machlink = uint32(ld.Domacholink(ctxt))
-	}
-
-	ld.Symsize = 0
-	ld.Spsize = 0
-	ld.Lcsize = 0
-	symo := uint32(0)
-	if !*ld.FlagS {
-		// TODO: rationalize
-		if ctxt.Debugvlog != 0 {
-			ctxt.Logf("%5.2f sym\n", obj.Cputime())
-		}
-		switch ld.Headtype {
-		default:
-			if ld.Iself {
-				symo = uint32(ld.Segdwarf.Fileoff + ld.Segdwarf.Filelen)
-				symo = uint32(ld.Rnd(int64(symo), int64(*ld.FlagRound)))
-			}
-
-		case obj.Hplan9:
-			symo = uint32(ld.Segdata.Fileoff + ld.Segdata.Filelen)
-
-		case obj.Hdarwin:
-			symo = uint32(ld.Segdwarf.Fileoff + uint64(ld.Rnd(int64(ld.Segdwarf.Filelen), int64(*ld.FlagRound))) + uint64(machlink))
-
-		case obj.Hwindows, obj.Hwindowsgui:
-			symo = uint32(ld.Segdwarf.Fileoff + ld.Segdwarf.Filelen)
-			symo = uint32(ld.Rnd(int64(symo), ld.PEFILEALIGN))
-		}
-
-		ld.Cseek(int64(symo))
-		switch ld.Headtype {
-		default:
-			if ld.Iself {
-				if ctxt.Debugvlog != 0 {
-					ctxt.Logf("%5.2f elfsym\n", obj.Cputime())
-				}
-				ld.Asmelfsym(ctxt)
-				ld.Cflush()
-				ld.Cwrite(ld.Elfstrdat)
-
-				if ld.Linkmode == ld.LinkExternal {
-					ld.Elfemitreloc(ctxt)
-				}
-			}
-
-		case obj.Hplan9:
-			ld.Asmplan9sym(ctxt)
-			ld.Cflush()
-
-			sym := ctxt.Syms.Lookup("pclntab", 0)
-			if sym != nil {
-				ld.Lcsize = int32(len(sym.P))
-				for i := 0; int32(i) < ld.Lcsize; i++ {
-					ld.Cput(sym.P[i])
-				}
-
-				ld.Cflush()
-			}
-
-		case obj.Hwindows, obj.Hwindowsgui:
-			if ctxt.Debugvlog != 0 {
-				ctxt.Logf("%5.2f dwarf\n", obj.Cputime())
-			}
-
-		case obj.Hdarwin:
-			if ld.Linkmode == ld.LinkExternal {
-				ld.Machoemitreloc(ctxt)
-			}
-		}
-	}
-
-	if ctxt.Debugvlog != 0 {
-		ctxt.Logf("%5.2f headr\n", obj.Cputime())
-	}
-	ld.Cseek(0)
-	switch ld.Headtype {
-	default:
-	case obj.Hplan9: /* plan9 */
-		magic := int32(4*11*11 + 7)
-
-		ld.Lputb(uint32(magic))              /* magic */
-		ld.Lputb(uint32(ld.Segtext.Filelen)) /* sizes */
-		ld.Lputb(uint32(ld.Segdata.Filelen))
-		ld.Lputb(uint32(ld.Segdata.Length - ld.Segdata.Filelen))
-		ld.Lputb(uint32(ld.Symsize))          /* nsyms */
-		ld.Lputb(uint32(ld.Entryvalue(ctxt))) /* va of entry */
-		ld.Lputb(uint32(ld.Spsize))           /* sp offsets */
-		ld.Lputb(uint32(ld.Lcsize))           /* line offsets */
-
-	case obj.Hdarwin:
-		ld.Asmbmacho(ctxt)
-
-	case obj.Hlinux,
-		obj.Hfreebsd,
-		obj.Hnetbsd,
-		obj.Hopenbsd,
-		obj.Hnacl:
-		ld.Asmbelf(ctxt, int64(symo))
-
-	case obj.Hwindows, obj.Hwindowsgui:
-		ld.Asmbpe(ctxt)
-	}
-
-	ld.Cflush()
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/x86/l.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/x86/l.go
deleted file mode 100644
index 61085ed..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/x86/l.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/x86/l.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/x86/l.go:1
-// Inferno utils/8l/l.h
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/8l/l.h
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package x86
-
-const (
-	maxAlign  = 32 // max data alignment
-	minAlign  = 1  // min data alignment
-	funcAlign = 16
-)
-
-/* Used by ../internal/ld/dwarf.go */
-const (
-	dwarfRegSP = 4
-	dwarfRegLR = 8
-)
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/internal/x86/obj.go b/pkg/bootstrap/src/bootstrap/cmd/link/internal/x86/obj.go
deleted file mode 100644
index 298b177..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/internal/x86/obj.go
+++ /dev/null
@@ -1,157 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/x86/obj.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/internal/x86/obj.go:1
-// Inferno utils/8l/obj.c
-// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/8l/obj.c
-//
-//	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
-//	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
-//	Portions Copyright © 1997-1999 Vita Nuova Limited
-//	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
-//	Portions Copyright © 2004,2006 Bruce Ellis
-//	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
-//	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
-//	Portions Copyright © 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package x86
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/internal/sys"
-	"bootstrap/cmd/link/internal/ld"
-	"fmt"
-)
-
-func Init() {
-	ld.SysArch = sys.Arch386
-
-	ld.Thearch.Funcalign = funcAlign
-	ld.Thearch.Maxalign = maxAlign
-	ld.Thearch.Minalign = minAlign
-	ld.Thearch.Dwarfregsp = dwarfRegSP
-	ld.Thearch.Dwarfreglr = dwarfRegLR
-
-	ld.Thearch.Adddynrel = adddynrel
-	ld.Thearch.Archinit = archinit
-	ld.Thearch.Archreloc = archreloc
-	ld.Thearch.Archrelocvariant = archrelocvariant
-	ld.Thearch.Asmb = asmb
-	ld.Thearch.Elfreloc1 = elfreloc1
-	ld.Thearch.Elfsetupplt = elfsetupplt
-	ld.Thearch.Gentext = gentext
-	ld.Thearch.Machoreloc1 = machoreloc1
-	ld.Thearch.PEreloc1 = pereloc1
-	ld.Thearch.Lput = ld.Lputl
-	ld.Thearch.Wput = ld.Wputl
-	ld.Thearch.Vput = ld.Vputl
-	ld.Thearch.Append16 = ld.Append16l
-	ld.Thearch.Append32 = ld.Append32l
-	ld.Thearch.Append64 = ld.Append64l
-
-	ld.Thearch.Linuxdynld = "/lib/ld-linux.so.2"
-	ld.Thearch.Freebsddynld = "/usr/libexec/ld-elf.so.1"
-	ld.Thearch.Openbsddynld = "/usr/libexec/ld.so"
-	ld.Thearch.Netbsddynld = "/usr/libexec/ld.elf_so"
-	ld.Thearch.Solarisdynld = "/lib/ld.so.1"
-}
-
-func archinit(ctxt *ld.Link) {
-	switch ld.Headtype {
-	default:
-		ld.Exitf("unknown -H option: %v", ld.Headtype)
-
-	case obj.Hplan9: /* plan 9 */
-		ld.HEADR = 32
-
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 4096 + int64(ld.HEADR)
-		}
-		if *ld.FlagDataAddr == -1 {
-			*ld.FlagDataAddr = 0
-		}
-		if *ld.FlagRound == -1 {
-			*ld.FlagRound = 4096
-		}
-
-	case obj.Hdarwin: /* apple MACH */
-		ld.Machoinit()
-
-		ld.HEADR = ld.INITIAL_MACHO_HEADR
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 4096 + int64(ld.HEADR)
-		}
-		if *ld.FlagDataAddr == -1 {
-			*ld.FlagDataAddr = 0
-		}
-		if *ld.FlagRound == -1 {
-			*ld.FlagRound = 4096
-		}
-
-	case obj.Hlinux, /* elf32 executable */
-		obj.Hfreebsd,
-		obj.Hnetbsd,
-		obj.Hopenbsd:
-		ld.Elfinit(ctxt)
-
-		ld.HEADR = ld.ELFRESERVE
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 0x08048000 + int64(ld.HEADR)
-		}
-		if *ld.FlagDataAddr == -1 {
-			*ld.FlagDataAddr = 0
-		}
-		if *ld.FlagRound == -1 {
-			*ld.FlagRound = 4096
-		}
-
-	case obj.Hnacl:
-		ld.Elfinit(ctxt)
-		ld.HEADR = 0x10000
-		ld.Funcalign = 32
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = 0x20000
-		}
-		if *ld.FlagDataAddr == -1 {
-			*ld.FlagDataAddr = 0
-		}
-		if *ld.FlagRound == -1 {
-			*ld.FlagRound = 0x10000
-		}
-
-	case obj.Hwindows, obj.Hwindowsgui: /* PE executable */
-		ld.Peinit(ctxt)
-
-		ld.HEADR = ld.PEFILEHEADR
-		if *ld.FlagTextAddr == -1 {
-			*ld.FlagTextAddr = ld.PEBASE + int64(ld.PESECTHEADR)
-		}
-		if *ld.FlagDataAddr == -1 {
-			*ld.FlagDataAddr = 0
-		}
-		if *ld.FlagRound == -1 {
-			*ld.FlagRound = ld.PESECTALIGN
-		}
-	}
-
-	if *ld.FlagDataAddr != 0 && *ld.FlagRound != 0 {
-		fmt.Printf("warning: -D0x%x is ignored because of -R0x%x\n", uint64(*ld.FlagDataAddr), uint32(*ld.FlagRound))
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/link_test.go b/pkg/bootstrap/src/bootstrap/cmd/link/link_test.go
deleted file mode 100644
index 8963bb8..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/link_test.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/link_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/link_test.go:1
-package main
-
-import "testing"
-
-var AuthorPaidByTheColumnInch struct {
-	fog int `
-	London. Michaelmas term lately over, and the Lord Chancellor sitting in Lincoln’s Inn Hall. Implacable November weather. As much mud in the streets as if the waters had but newly retired from the face of the earth, and it would not be wonderful to meet a Megalosaurus, forty feet long or so, waddling like an elephantine lizard up Holborn Hill. Smoke lowering down from chimney-pots, making a soft black drizzle, with flakes of soot in it as big as full-grown snowflakes—gone into mourning, one might imagine, for the death of the sun. Dogs, undistinguishable in mire. Horses, scarcely better; splashed to their very blinkers. Foot passengers, jostling one another’s umbrellas in a general infection of ill temper, and losing their foot-hold at street-corners, where tens of thousands of other foot passengers have been slipping and sliding since the day broke (if this day ever broke), adding new deposits to the crust upon crust of mud, sticking at those points tenaciously to the pavement, and accumulating at compound interest.
-
-	Fog everywhere. Fog up the river, where it flows among green aits and meadows; fog down the river, where it rolls defiled among the tiers of shipping and the waterside pollutions of a great (and dirty) city. Fog on the Essex marshes, fog on the Kentish heights. Fog creeping into the cabooses of collier-brigs; fog lying out on the yards and hovering in the rigging of great ships; fog drooping on the gunwales of barges and small boats. Fog in the eyes and throats of ancient Greenwich pensioners, wheezing by the firesides of their wards; fog in the stem and bowl of the afternoon pipe of the wrathful skipper, down in his close cabin; fog cruelly pinching the toes and fingers of his shivering little ‘prentice boy on deck. Chance people on the bridges peeping over the parapets into a nether sky of fog, with fog all round them, as if they were up in a balloon and hanging in the misty clouds.
-
-	Gas looming through the fog in divers places in the streets, much as the sun may, from the spongey fields, be seen to loom by husbandman and ploughboy. Most of the shops lighted two hours before their time—as the gas seems to know, for it has a haggard and unwilling look.
-
-	The raw afternoon is rawest, and the dense fog is densest, and the muddy streets are muddiest near that leaden-headed old obstruction, appropriate ornament for the threshold of a leaden-headed old corporation, Temple Bar. And hard by Temple Bar, in Lincoln’s Inn Hall, at the very heart of the fog, sits the Lord High Chancellor in his High Court of Chancery.`
-
-	wind int `
-	It was grand to see how the wind awoke, and bent the trees, and drove the rain before it like a cloud of smoke; and to hear the solemn thunder, and to see the lightning; and while thinking with awe of the tremendous powers by which our little lives are encompassed, to consider how beneficent they are, and how upon the smallest flower and leaf there was already a freshness poured from all this seeming rage, which seemed to make creation new again.`
-
-	jarndyce int `
-	Jarndyce and Jarndyce drones on. This scarecrow of a suit has, over the course of time, become so complicated, that no man alive knows what it means. The parties to it understand it least; but it has been observed that no two Chancery lawyers can talk about it for five minutes, without coming to a total disagreement as to all the premises. Innumerable children have been born into the cause; innumerable young people have married into it; innumerable old people have died out of it. Scores of persons have deliriously found themselves made parties in Jarndyce and Jarndyce, without knowing how or why; whole families have inherited legendary hatreds with the suit. The little plaintiff or defendant, who was promised a new rocking-horse when Jarndyce and Jarndyce should be settled, has grown up, possessed himself of a real horse, and trotted away into the other world. Fair wards of court have faded into mothers and grandmothers; a long procession of Chancellors has come in and gone out; the legion of bills in the suit have been transformed into mere bills of mortality; there are not three Jarndyces left upon the earth perhaps, since old Tom Jarndyce in despair blew his brains out at a coffee-house in Chancery Lane; but Jarndyce and Jarndyce still drags its dreary length before the Court, perennially hopeless.`
-
-	principle int `
-	The one great principle of the English law is, to make business for itself. There is no other principle distinctly, certainly, and consistently maintained through all its narrow turnings. Viewed by this light it becomes a coherent scheme, and not the monstrous maze the laity are apt to think it. Let them but once clearly perceive that its grand principle is to make business for itself at their expense, and surely they will cease to grumble.`
-}
-
-func TestLargeSymName(t *testing.T) {
-	// The compiler generates a symbol name using the string form of the
-	// type. This tests that the linker can read symbol names larger than
-	// the bufio buffer. Issue #15104.
-	_ = AuthorPaidByTheColumnInch
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/linkbig_test.go b/pkg/bootstrap/src/bootstrap/cmd/link/linkbig_test.go
deleted file mode 100644
index b2b1fea..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/linkbig_test.go
+++ /dev/null
@@ -1,112 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/linkbig_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/linkbig_test.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This program generates a test to verify that a program can be
-// successfully linked even when there are very large text
-// sections present.
-
-package main
-
-import (
-	"bytes"
-	"bootstrap/cmd/internal/obj"
-	"fmt"
-	"internal/testenv"
-	"io/ioutil"
-	"os"
-	"os/exec"
-	"testing"
-)
-
-func TestLargeText(t *testing.T) {
-	if testing.Short() || (obj.GOARCH != "ppc64le" && obj.GOARCH != "ppc64" && obj.GOARCH != "arm") {
-		t.Skip("Skipping large text section test in short mode or on %s", obj.GOARCH)
-	}
-	testenv.MustHaveGoBuild(t)
-
-	var w bytes.Buffer
-	const FN = 4
-	tmpdir, err := ioutil.TempDir("", "bigtext")
-
-	defer os.RemoveAll(tmpdir)
-
-	// Generate the scenario where the total amount of text exceeds the
-	// limit for the jmp/call instruction, on RISC architectures like ppc64le,
-	// which is 2^26.  When that happens the call requires special trampolines or
-	// long branches inserted by the linker where supported.
-	// Multiple .s files are generated instead of one.
-	instOnArch := map[string]string{
-		"ppc64":   "\tMOVD\tR0,R3\n",
-		"ppc64le": "\tMOVD\tR0,R3\n",
-		"arm":     "\tMOVW\tR0,R1\n",
-	}
-	inst := instOnArch[obj.GOARCH]
-	for j := 0; j < FN; j++ {
-		testname := fmt.Sprintf("bigfn%d", j)
-		fmt.Fprintf(&w, "TEXT ·%s(SB),$0\n", testname)
-		for i := 0; i < 2200000; i++ {
-			fmt.Fprintf(&w, inst)
-		}
-		fmt.Fprintf(&w, "\tRET\n")
-		err := ioutil.WriteFile(tmpdir+"/"+testname+".s", w.Bytes(), 0666)
-		if err != nil {
-			t.Fatalf("can't write output: %v\n", err)
-		}
-		w.Reset()
-	}
-	fmt.Fprintf(&w, "package main\n")
-	fmt.Fprintf(&w, "\nimport (\n")
-	fmt.Fprintf(&w, "\t\"os\"\n")
-	fmt.Fprintf(&w, "\t\"fmt\"\n")
-	fmt.Fprintf(&w, ")\n\n")
-
-	for i := 0; i < FN; i++ {
-		fmt.Fprintf(&w, "func bigfn%d()\n", i)
-	}
-	fmt.Fprintf(&w, "\nfunc main() {\n")
-
-	// There are lots of dummy code generated in the .s files just to generate a lot
-	// of text. Link them in but guard their call so their code is not executed but
-	// the main part of the program can be run.
-	fmt.Fprintf(&w, "\tif os.Getenv(\"LINKTESTARG\") != \"\" {\n")
-	for i := 0; i < FN; i++ {
-		fmt.Fprintf(&w, "\t\tbigfn%d()\n", i)
-	}
-	fmt.Fprintf(&w, "\t}\n")
-	fmt.Fprintf(&w, "\tfmt.Printf(\"PASS\\n\")\n")
-	fmt.Fprintf(&w, "}")
-	err = ioutil.WriteFile(tmpdir+"/bigfn.go", w.Bytes(), 0666)
-	if err != nil {
-		t.Fatalf("can't write output: %v\n", err)
-	}
-
-	// Build and run with internal linking.
-	os.Chdir(tmpdir)
-	cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", "bigtext")
-	out, err := cmd.CombinedOutput()
-	if err != nil {
-		t.Fatalf("Build failed for big text program with internal linking: %v, output: %s", err, out)
-	}
-	cmd = exec.Command(tmpdir + "/bigtext")
-	out, err = cmd.CombinedOutput()
-	if err != nil {
-		t.Fatalf("Program built with internal linking failed to run with err %v, output: %s", err, out)
-	}
-
-	// Build and run with external linking
-	os.Chdir(tmpdir)
-	cmd = exec.Command(testenv.GoToolPath(t), "build", "-o", "bigtext", "-ldflags", "'-linkmode=external'")
-	out, err = cmd.CombinedOutput()
-	if err != nil {
-		t.Fatalf("Build failed for big text program with external linking: %v, output: %s", err, out)
-	}
-	cmd = exec.Command(tmpdir + "/bigtext")
-	out, err = cmd.CombinedOutput()
-	if err != nil {
-		t.Fatalf("Program built with external linking failed to run with err %v, output: %s", err, out)
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/cmd/link/main.go b/pkg/bootstrap/src/bootstrap/cmd/link/main.go
deleted file mode 100644
index 95bf51b..0000000
--- a/pkg/bootstrap/src/bootstrap/cmd/link/main.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/main.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/cmd/link/main.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
-	"bootstrap/cmd/internal/obj"
-	"bootstrap/cmd/link/internal/amd64"
-	"bootstrap/cmd/link/internal/arm"
-	"bootstrap/cmd/link/internal/arm64"
-	"bootstrap/cmd/link/internal/ld"
-	"bootstrap/cmd/link/internal/mips"
-	"bootstrap/cmd/link/internal/mips64"
-	"bootstrap/cmd/link/internal/ppc64"
-	"bootstrap/cmd/link/internal/s390x"
-	"bootstrap/cmd/link/internal/x86"
-	"fmt"
-	"os"
-)
-
-// The bulk of the linker implementation lives in cmd/link/internal/ld.
-// Architecture-specific code lives in cmd/link/internal/GOARCH.
-//
-// Program initialization:
-//
-// Before any argument parsing is done, the Init function of relevant
-// architecture package is called. The only job done in Init is
-// configuration of the ld.Thearch and ld.SysArch variables.
-//
-// Then control flow passes to ld.Main, which parses flags, makes
-// some configuration decisions, and then gives the architecture
-// packages a second chance to modify the linker's configuration
-// via the ld.Thearch.Archinit function.
-
-func main() {
-	switch obj.GOARCH {
-	default:
-		fmt.Fprintf(os.Stderr, "link: unknown architecture %q\n", obj.GOARCH)
-		os.Exit(2)
-	case "386":
-		x86.Init()
-	case "amd64", "amd64p32":
-		amd64.Init()
-	case "arm":
-		arm.Init()
-	case "arm64":
-		arm64.Init()
-	case "mips", "mipsle":
-		mips.Init()
-	case "mips64", "mips64le":
-		mips64.Init()
-	case "ppc64", "ppc64le":
-		ppc64.Init()
-	case "s390x":
-		s390x.Init()
-	}
-	ld.Main()
-}
diff --git a/pkg/bootstrap/src/bootstrap/debug/pe/file.go b/pkg/bootstrap/src/bootstrap/debug/pe/file.go
deleted file mode 100644
index 5280e8c..0000000
--- a/pkg/bootstrap/src/bootstrap/debug/pe/file.go
+++ /dev/null
@@ -1,349 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/debug/pe/file.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/debug/pe/file.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package pe implements access to PE (Microsoft Windows Portable Executable) files.
-package pe
-
-import (
-	"debug/dwarf"
-	"encoding/binary"
-	"fmt"
-	"io"
-	"os"
-)
-
-// Avoid use of post-Go 1.4 io features, to make safe for toolchain bootstrap.
-const seekStart = 0
-
-// A File represents an open PE file.
-type File struct {
-	FileHeader
-	OptionalHeader interface{} // of type *OptionalHeader32 or *OptionalHeader64
-	Sections       []*Section
-	Symbols        []*Symbol    // COFF symbols with auxiliary symbol records removed
-	COFFSymbols    []COFFSymbol // all COFF symbols (including auxiliary symbol records)
-	StringTable    StringTable
-
-	closer io.Closer
-}
-
-// Open opens the named file using os.Open and prepares it for use as a PE binary.
-func Open(name string) (*File, error) {
-	f, err := os.Open(name)
-	if err != nil {
-		return nil, err
-	}
-	ff, err := NewFile(f)
-	if err != nil {
-		f.Close()
-		return nil, err
-	}
-	ff.closer = f
-	return ff, nil
-}
-
-// Close closes the File.
-// If the File was created using NewFile directly instead of Open,
-// Close has no effect.
-func (f *File) Close() error {
-	var err error
-	if f.closer != nil {
-		err = f.closer.Close()
-		f.closer = nil
-	}
-	return err
-}
-
-var (
-	sizeofOptionalHeader32 = uint16(binary.Size(OptionalHeader32{}))
-	sizeofOptionalHeader64 = uint16(binary.Size(OptionalHeader64{}))
-)
-
-// TODO(brainman): add Load function, as a replacement for NewFile, that does not call removeAuxSymbols (for performance)
-
-// NewFile creates a new File for accessing a PE binary in an underlying reader.
-func NewFile(r io.ReaderAt) (*File, error) {
-	f := new(File)
-	sr := io.NewSectionReader(r, 0, 1<<63-1)
-
-	var dosheader [96]byte
-	if _, err := r.ReadAt(dosheader[0:], 0); err != nil {
-		return nil, err
-	}
-	var base int64
-	if dosheader[0] == 'M' && dosheader[1] == 'Z' {
-		signoff := int64(binary.LittleEndian.Uint32(dosheader[0x3c:]))
-		var sign [4]byte
-		r.ReadAt(sign[:], signoff)
-		if !(sign[0] == 'P' && sign[1] == 'E' && sign[2] == 0 && sign[3] == 0) {
-			return nil, fmt.Errorf("Invalid PE COFF file signature of %v.", sign)
-		}
-		base = signoff + 4
-	} else {
-		base = int64(0)
-	}
-	sr.Seek(base, seekStart)
-	if err := binary.Read(sr, binary.LittleEndian, &f.FileHeader); err != nil {
-		return nil, err
-	}
-	switch f.FileHeader.Machine {
-	case IMAGE_FILE_MACHINE_UNKNOWN, IMAGE_FILE_MACHINE_AMD64, IMAGE_FILE_MACHINE_I386:
-	default:
-		return nil, fmt.Errorf("Unrecognised COFF file header machine value of 0x%x.", f.FileHeader.Machine)
-	}
-
-	var err error
-
-	// Read string table.
-	f.StringTable, err = readStringTable(&f.FileHeader, sr)
-	if err != nil {
-		return nil, err
-	}
-
-	// Read symbol table.
-	f.COFFSymbols, err = readCOFFSymbols(&f.FileHeader, sr)
-	if err != nil {
-		return nil, err
-	}
-	f.Symbols, err = removeAuxSymbols(f.COFFSymbols, f.StringTable)
-	if err != nil {
-		return nil, err
-	}
-
-	// Read optional header.
-	sr.Seek(base, seekStart)
-	if err := binary.Read(sr, binary.LittleEndian, &f.FileHeader); err != nil {
-		return nil, err
-	}
-	var oh32 OptionalHeader32
-	var oh64 OptionalHeader64
-	switch f.FileHeader.SizeOfOptionalHeader {
-	case sizeofOptionalHeader32:
-		if err := binary.Read(sr, binary.LittleEndian, &oh32); err != nil {
-			return nil, err
-		}
-		if oh32.Magic != 0x10b { // PE32
-			return nil, fmt.Errorf("pe32 optional header has unexpected Magic of 0x%x", oh32.Magic)
-		}
-		f.OptionalHeader = &oh32
-	case sizeofOptionalHeader64:
-		if err := binary.Read(sr, binary.LittleEndian, &oh64); err != nil {
-			return nil, err
-		}
-		if oh64.Magic != 0x20b { // PE32+
-			return nil, fmt.Errorf("pe32+ optional header has unexpected Magic of 0x%x", oh64.Magic)
-		}
-		f.OptionalHeader = &oh64
-	}
-
-	// Process sections.
-	f.Sections = make([]*Section, f.FileHeader.NumberOfSections)
-	for i := 0; i < int(f.FileHeader.NumberOfSections); i++ {
-		sh := new(SectionHeader32)
-		if err := binary.Read(sr, binary.LittleEndian, sh); err != nil {
-			return nil, err
-		}
-		name, err := sh.fullName(f.StringTable)
-		if err != nil {
-			return nil, err
-		}
-		s := new(Section)
-		s.SectionHeader = SectionHeader{
-			Name:                 name,
-			VirtualSize:          sh.VirtualSize,
-			VirtualAddress:       sh.VirtualAddress,
-			Size:                 sh.SizeOfRawData,
-			Offset:               sh.PointerToRawData,
-			PointerToRelocations: sh.PointerToRelocations,
-			PointerToLineNumbers: sh.PointerToLineNumbers,
-			NumberOfRelocations:  sh.NumberOfRelocations,
-			NumberOfLineNumbers:  sh.NumberOfLineNumbers,
-			Characteristics:      sh.Characteristics,
-		}
-		r2 := r
-		if sh.PointerToRawData == 0 { // .bss must have all 0s
-			r2 = zeroReaderAt{}
-		}
-		s.sr = io.NewSectionReader(r2, int64(s.SectionHeader.Offset), int64(s.SectionHeader.Size))
-		s.ReaderAt = s.sr
-		f.Sections[i] = s
-	}
-	for i := range f.Sections {
-		var err error
-		f.Sections[i].Relocs, err = readRelocs(&f.Sections[i].SectionHeader, sr)
-		if err != nil {
-			return nil, err
-		}
-	}
-
-	return f, nil
-}
-
-// zeroReaderAt is ReaderAt that reads 0s.
-type zeroReaderAt struct{}
-
-// ReadAt writes len(p) 0s into p.
-func (w zeroReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
-	for i := range p {
-		p[i] = 0
-	}
-	return len(p), nil
-}
-
-// getString extracts a string from symbol string table.
-func getString(section []byte, start int) (string, bool) {
-	if start < 0 || start >= len(section) {
-		return "", false
-	}
-
-	for end := start; end < len(section); end++ {
-		if section[end] == 0 {
-			return string(section[start:end]), true
-		}
-	}
-	return "", false
-}
-
-// Section returns the first section with the given name, or nil if no such
-// section exists.
-func (f *File) Section(name string) *Section {
-	for _, s := range f.Sections {
-		if s.Name == name {
-			return s
-		}
-	}
-	return nil
-}
-
-func (f *File) DWARF() (*dwarf.Data, error) {
-	// There are many other DWARF sections, but these
-	// are the ones the debug/dwarf package uses.
-	// Don't bother loading others.
-	var names = [...]string{"abbrev", "info", "line", "ranges", "str"}
-	var dat [len(names)][]byte
-	for i, name := range names {
-		name = ".debug_" + name
-		s := f.Section(name)
-		if s == nil {
-			continue
-		}
-		b, err := s.Data()
-		if err != nil && uint32(len(b)) < s.Size {
-			return nil, err
-		}
-		if 0 < s.VirtualSize && s.VirtualSize < s.Size {
-			b = b[:s.VirtualSize]
-		}
-		dat[i] = b
-	}
-
-	abbrev, info, line, ranges, str := dat[0], dat[1], dat[2], dat[3], dat[4]
-	return dwarf.New(abbrev, nil, nil, info, line, nil, ranges, str)
-}
-
-// TODO(brainman): document ImportDirectory once we decide what to do with it.
-
-type ImportDirectory struct {
-	OriginalFirstThunk uint32
-	TimeDateStamp      uint32
-	ForwarderChain     uint32
-	Name               uint32
-	FirstThunk         uint32
-
-	dll string
-}
-
-// ImportedSymbols returns the names of all symbols
-// referred to by the binary f that are expected to be
-// satisfied by other libraries at dynamic load time.
-// It does not return weak symbols.
-func (f *File) ImportedSymbols() ([]string, error) {
-	pe64 := f.Machine == IMAGE_FILE_MACHINE_AMD64
-	ds := f.Section(".idata")
-	if ds == nil {
-		// not dynamic, so no libraries
-		return nil, nil
-	}
-	d, err := ds.Data()
-	if err != nil {
-		return nil, err
-	}
-	var ida []ImportDirectory
-	for len(d) > 0 {
-		var dt ImportDirectory
-		dt.OriginalFirstThunk = binary.LittleEndian.Uint32(d[0:4])
-		dt.Name = binary.LittleEndian.Uint32(d[12:16])
-		dt.FirstThunk = binary.LittleEndian.Uint32(d[16:20])
-		d = d[20:]
-		if dt.OriginalFirstThunk == 0 {
-			break
-		}
-		ida = append(ida, dt)
-	}
-	// TODO(brainman): this needs to be rewritten
-	//  ds.Data() return contets of .idata section. Why store in variable called "names"?
-	//  Why we are retrieving it second time? We already have it in "d", and it is not modified anywhere.
-	//  getString does not extracts a string from symbol string table (as getString doco says).
-	//  Why ds.Data() called again and again in the loop?
-	//  Needs test before rewrite.
-	names, _ := ds.Data()
-	var all []string
-	for _, dt := range ida {
-		dt.dll, _ = getString(names, int(dt.Name-ds.VirtualAddress))
-		d, _ = ds.Data()
-		// seek to OriginalFirstThunk
-		d = d[dt.OriginalFirstThunk-ds.VirtualAddress:]
-		for len(d) > 0 {
-			if pe64 { // 64bit
-				va := binary.LittleEndian.Uint64(d[0:8])
-				d = d[8:]
-				if va == 0 {
-					break
-				}
-				if va&0x8000000000000000 > 0 { // is Ordinal
-					// TODO add dynimport ordinal support.
-				} else {
-					fn, _ := getString(names, int(uint32(va)-ds.VirtualAddress+2))
-					all = append(all, fn+":"+dt.dll)
-				}
-			} else { // 32bit
-				va := binary.LittleEndian.Uint32(d[0:4])
-				d = d[4:]
-				if va == 0 {
-					break
-				}
-				if va&0x80000000 > 0 { // is Ordinal
-					// TODO add dynimport ordinal support.
-					//ord := va&0x0000FFFF
-				} else {
-					fn, _ := getString(names, int(va-ds.VirtualAddress+2))
-					all = append(all, fn+":"+dt.dll)
-				}
-			}
-		}
-	}
-
-	return all, nil
-}
-
-// ImportedLibraries returns the names of all libraries
-// referred to by the binary f that are expected to be
-// linked with the binary at dynamic link time.
-func (f *File) ImportedLibraries() ([]string, error) {
-	// TODO
-	// cgo -dynimport don't use this for windows PE, so just return.
-	return nil, nil
-}
-
-// FormatError is unused.
-// The type is retained for compatibility.
-type FormatError struct {
-}
-
-func (e *FormatError) Error() string {
-	return "unknown error"
-}
diff --git a/pkg/bootstrap/src/bootstrap/debug/pe/file_test.go b/pkg/bootstrap/src/bootstrap/debug/pe/file_test.go
deleted file mode 100644
index 4235f3a..0000000
--- a/pkg/bootstrap/src/bootstrap/debug/pe/file_test.go
+++ /dev/null
@@ -1,420 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/debug/pe/file_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/debug/pe/file_test.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package pe
-
-import (
-	"debug/dwarf"
-	"internal/testenv"
-	"io/ioutil"
-	"os"
-	"os/exec"
-	"path/filepath"
-	"reflect"
-	"runtime"
-	"testing"
-)
-
-type fileTest struct {
-	file           string
-	hdr            FileHeader
-	opthdr         interface{}
-	sections       []*SectionHeader
-	symbols        []*Symbol
-	hasNoDwarfInfo bool
-}
-
-var fileTests = []fileTest{
-	{
-		file: "testdata/gcc-386-mingw-obj",
-		hdr:  FileHeader{0x014c, 0x000c, 0x0, 0x64a, 0x1e, 0x0, 0x104},
-		sections: []*SectionHeader{
-			{".text", 0, 0, 36, 500, 1440, 0, 3, 0, 0x60300020},
-			{".data", 0, 0, 0, 0, 0, 0, 0, 0, 3224371264},
-			{".bss", 0, 0, 0, 0, 0, 0, 0, 0, 3224371328},
-			{".debug_abbrev", 0, 0, 137, 536, 0, 0, 0, 0, 0x42100000},
-			{".debug_info", 0, 0, 418, 673, 1470, 0, 7, 0, 1108344832},
-			{".debug_line", 0, 0, 128, 1091, 1540, 0, 1, 0, 1108344832},
-			{".rdata", 0, 0, 16, 1219, 0, 0, 0, 0, 1076887616},
-			{".debug_frame", 0, 0, 52, 1235, 1550, 0, 2, 0, 1110441984},
-			{".debug_loc", 0, 0, 56, 1287, 0, 0, 0, 0, 1108344832},
-			{".debug_pubnames", 0, 0, 27, 1343, 1570, 0, 1, 0, 1108344832},
-			{".debug_pubtypes", 0, 0, 38, 1370, 1580, 0, 1, 0, 1108344832},
-			{".debug_aranges", 0, 0, 32, 1408, 1590, 0, 2, 0, 1108344832},
-		},
-		symbols: []*Symbol{
-			{".file", 0x0, -2, 0x0, 0x67},
-			{"_main", 0x0, 1, 0x20, 0x2},
-			{".text", 0x0, 1, 0x0, 0x3},
-			{".data", 0x0, 2, 0x0, 0x3},
-			{".bss", 0x0, 3, 0x0, 0x3},
-			{".debug_abbrev", 0x0, 4, 0x0, 0x3},
-			{".debug_info", 0x0, 5, 0x0, 0x3},
-			{".debug_line", 0x0, 6, 0x0, 0x3},
-			{".rdata", 0x0, 7, 0x0, 0x3},
-			{".debug_frame", 0x0, 8, 0x0, 0x3},
-			{".debug_loc", 0x0, 9, 0x0, 0x3},
-			{".debug_pubnames", 0x0, 10, 0x0, 0x3},
-			{".debug_pubtypes", 0x0, 11, 0x0, 0x3},
-			{".debug_aranges", 0x0, 12, 0x0, 0x3},
-			{"___main", 0x0, 0, 0x20, 0x2},
-			{"_puts", 0x0, 0, 0x20, 0x2},
-		},
-	},
-	{
-		file: "testdata/gcc-386-mingw-exec",
-		hdr:  FileHeader{0x014c, 0x000f, 0x4c6a1b60, 0x3c00, 0x282, 0xe0, 0x107},
-		opthdr: &OptionalHeader32{
-			0x10b, 0x2, 0x38, 0xe00, 0x1a00, 0x200, 0x1160, 0x1000, 0x2000, 0x400000, 0x1000, 0x200, 0x4, 0x0, 0x1, 0x0, 0x4, 0x0, 0x0, 0x10000, 0x400, 0x14abb, 0x3, 0x0, 0x200000, 0x1000, 0x100000, 0x1000, 0x0, 0x10,
-			[16]DataDirectory{
-				{0x0, 0x0},
-				{0x5000, 0x3c8},
-				{0x0, 0x0},
-				{0x0, 0x0},
-				{0x0, 0x0},
-				{0x0, 0x0},
-				{0x0, 0x0},
-				{0x0, 0x0},
-				{0x0, 0x0},
-				{0x7000, 0x18},
-				{0x0, 0x0},
-				{0x0, 0x0},
-				{0x0, 0x0},
-				{0x0, 0x0},
-				{0x0, 0x0},
-				{0x0, 0x0},
-			},
-		},
-		sections: []*SectionHeader{
-			{".text", 0xcd8, 0x1000, 0xe00, 0x400, 0x0, 0x0, 0x0, 0x0, 0x60500060},
-			{".data", 0x10, 0x2000, 0x200, 0x1200, 0x0, 0x0, 0x0, 0x0, 0xc0300040},
-			{".rdata", 0x120, 0x3000, 0x200, 0x1400, 0x0, 0x0, 0x0, 0x0, 0x40300040},
-			{".bss", 0xdc, 0x4000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0400080},
-			{".idata", 0x3c8, 0x5000, 0x400, 0x1600, 0x0, 0x0, 0x0, 0x0, 0xc0300040},
-			{".CRT", 0x18, 0x6000, 0x200, 0x1a00, 0x0, 0x0, 0x0, 0x0, 0xc0300040},
-			{".tls", 0x20, 0x7000, 0x200, 0x1c00, 0x0, 0x0, 0x0, 0x0, 0xc0300040},
-			{".debug_aranges", 0x20, 0x8000, 0x200, 0x1e00, 0x0, 0x0, 0x0, 0x0, 0x42100000},
-			{".debug_pubnames", 0x51, 0x9000, 0x200, 0x2000, 0x0, 0x0, 0x0, 0x0, 0x42100000},
-			{".debug_pubtypes", 0x91, 0xa000, 0x200, 0x2200, 0x0, 0x0, 0x0, 0x0, 0x42100000},
-			{".debug_info", 0xe22, 0xb000, 0x1000, 0x2400, 0x0, 0x0, 0x0, 0x0, 0x42100000},
-			{".debug_abbrev", 0x157, 0xc000, 0x200, 0x3400, 0x0, 0x0, 0x0, 0x0, 0x42100000},
-			{".debug_line", 0x144, 0xd000, 0x200, 0x3600, 0x0, 0x0, 0x0, 0x0, 0x42100000},
-			{".debug_frame", 0x34, 0xe000, 0x200, 0x3800, 0x0, 0x0, 0x0, 0x0, 0x42300000},
-			{".debug_loc", 0x38, 0xf000, 0x200, 0x3a00, 0x0, 0x0, 0x0, 0x0, 0x42100000},
-		},
-	},
-	{
-		file: "testdata/gcc-386-mingw-no-symbols-exec",
-		hdr:  FileHeader{0x14c, 0x8, 0x69676572, 0x0, 0x0, 0xe0, 0x30f},
-		opthdr: &OptionalHeader32{0x10b, 0x2, 0x18, 0xe00, 0x1e00, 0x200, 0x1280, 0x1000, 0x2000, 0x400000, 0x1000, 0x200, 0x4, 0x0, 0x1, 0x0, 0x4, 0x0, 0x0, 0x9000, 0x400, 0x5306, 0x3, 0x0, 0x200000, 0x1000, 0x100000, 0x1000, 0x0, 0x10,
-			[16]DataDirectory{
-				{0x0, 0x0},
-				{0x6000, 0x378},
-				{0x0, 0x0},
-				{0x0, 0x0},
-				{0x0, 0x0},
-				{0x0, 0x0},
-				{0x0, 0x0},
-				{0x0, 0x0},
-				{0x0, 0x0},
-				{0x8004, 0x18},
-				{0x0, 0x0},
-				{0x0, 0x0},
-				{0x60b8, 0x7c},
-				{0x0, 0x0},
-				{0x0, 0x0},
-				{0x0, 0x0},
-			},
-		},
-		sections: []*SectionHeader{
-			{".text", 0xc64, 0x1000, 0xe00, 0x400, 0x0, 0x0, 0x0, 0x0, 0x60500060},
-			{".data", 0x10, 0x2000, 0x200, 0x1200, 0x0, 0x0, 0x0, 0x0, 0xc0300040},
-			{".rdata", 0x134, 0x3000, 0x200, 0x1400, 0x0, 0x0, 0x0, 0x0, 0x40300040},
-			{".eh_fram", 0x3a0, 0x4000, 0x400, 0x1600, 0x0, 0x0, 0x0, 0x0, 0x40300040},
-			{".bss", 0x60, 0x5000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0300080},
-			{".idata", 0x378, 0x6000, 0x400, 0x1a00, 0x0, 0x0, 0x0, 0x0, 0xc0300040},
-			{".CRT", 0x18, 0x7000, 0x200, 0x1e00, 0x0, 0x0, 0x0, 0x0, 0xc0300040},
-			{".tls", 0x20, 0x8000, 0x200, 0x2000, 0x0, 0x0, 0x0, 0x0, 0xc0300040},
-		},
-		hasNoDwarfInfo: true,
-	},
-	{
-		file: "testdata/gcc-amd64-mingw-obj",
-		hdr:  FileHeader{0x8664, 0x6, 0x0, 0x198, 0x12, 0x0, 0x4},
-		sections: []*SectionHeader{
-			{".text", 0x0, 0x0, 0x30, 0x104, 0x15c, 0x0, 0x3, 0x0, 0x60500020},
-			{".data", 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0500040},
-			{".bss", 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0500080},
-			{".rdata", 0x0, 0x0, 0x10, 0x134, 0x0, 0x0, 0x0, 0x0, 0x40500040},
-			{".xdata", 0x0, 0x0, 0xc, 0x144, 0x0, 0x0, 0x0, 0x0, 0x40300040},
-			{".pdata", 0x0, 0x0, 0xc, 0x150, 0x17a, 0x0, 0x3, 0x0, 0x40300040},
-		},
-		symbols: []*Symbol{
-			{".file", 0x0, -2, 0x0, 0x67},
-			{"main", 0x0, 1, 0x20, 0x2},
-			{".text", 0x0, 1, 0x0, 0x3},
-			{".data", 0x0, 2, 0x0, 0x3},
-			{".bss", 0x0, 3, 0x0, 0x3},
-			{".rdata", 0x0, 4, 0x0, 0x3},
-			{".xdata", 0x0, 5, 0x0, 0x3},
-			{".pdata", 0x0, 6, 0x0, 0x3},
-			{"__main", 0x0, 0, 0x20, 0x2},
-			{"puts", 0x0, 0, 0x20, 0x2},
-		},
-		hasNoDwarfInfo: true,
-	},
-	{
-		file: "testdata/gcc-amd64-mingw-exec",
-		hdr:  FileHeader{0x8664, 0x11, 0x53e4364f, 0x39600, 0x6fc, 0xf0, 0x27},
-		opthdr: &OptionalHeader64{
-			0x20b, 0x2, 0x16, 0x6a00, 0x2400, 0x1600, 0x14e0, 0x1000, 0x400000, 0x1000, 0x200, 0x4, 0x0, 0x0, 0x0, 0x5, 0x2, 0x0, 0x45000, 0x600, 0x46f19, 0x3, 0x0, 0x200000, 0x1000, 0x100000, 0x1000, 0x0, 0x10,
-			[16]DataDirectory{
-				{0x0, 0x0},
-				{0xe000, 0x990},
-				{0x0, 0x0},
-				{0xa000, 0x498},
-				{0x0, 0x0},
-				{0x0, 0x0},
-				{0x0, 0x0},
-				{0x0, 0x0},
-				{0x0, 0x0},
-				{0x10000, 0x28},
-				{0x0, 0x0},
-				{0x0, 0x0},
-				{0xe254, 0x218},
-				{0x0, 0x0},
-				{0x0, 0x0},
-				{0x0, 0x0},
-			}},
-		sections: []*SectionHeader{
-			{".text", 0x6860, 0x1000, 0x6a00, 0x600, 0x0, 0x0, 0x0, 0x0, 0x60500020},
-			{".data", 0xe0, 0x8000, 0x200, 0x7000, 0x0, 0x0, 0x0, 0x0, 0xc0500040},
-			{".rdata", 0x6b0, 0x9000, 0x800, 0x7200, 0x0, 0x0, 0x0, 0x0, 0x40600040},
-			{".pdata", 0x498, 0xa000, 0x600, 0x7a00, 0x0, 0x0, 0x0, 0x0, 0x40300040},
-			{".xdata", 0x488, 0xb000, 0x600, 0x8000, 0x0, 0x0, 0x0, 0x0, 0x40300040},
-			{".bss", 0x1410, 0xc000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0600080},
-			{".idata", 0x990, 0xe000, 0xa00, 0x8600, 0x0, 0x0, 0x0, 0x0, 0xc0300040},
-			{".CRT", 0x68, 0xf000, 0x200, 0x9000, 0x0, 0x0, 0x0, 0x0, 0xc0400040},
-			{".tls", 0x48, 0x10000, 0x200, 0x9200, 0x0, 0x0, 0x0, 0x0, 0xc0600040},
-			{".debug_aranges", 0x600, 0x11000, 0x600, 0x9400, 0x0, 0x0, 0x0, 0x0, 0x42500040},
-			{".debug_info", 0x1316e, 0x12000, 0x13200, 0x9a00, 0x0, 0x0, 0x0, 0x0, 0x42100040},
-			{".debug_abbrev", 0x2ccb, 0x26000, 0x2e00, 0x1cc00, 0x0, 0x0, 0x0, 0x0, 0x42100040},
-			{".debug_line", 0x3c4d, 0x29000, 0x3e00, 0x1fa00, 0x0, 0x0, 0x0, 0x0, 0x42100040},
-			{".debug_frame", 0x18b8, 0x2d000, 0x1a00, 0x23800, 0x0, 0x0, 0x0, 0x0, 0x42400040},
-			{".debug_str", 0x396, 0x2f000, 0x400, 0x25200, 0x0, 0x0, 0x0, 0x0, 0x42100040},
-			{".debug_loc", 0x13240, 0x30000, 0x13400, 0x25600, 0x0, 0x0, 0x0, 0x0, 0x42100040},
-			{".debug_ranges", 0xa70, 0x44000, 0xc00, 0x38a00, 0x0, 0x0, 0x0, 0x0, 0x42100040},
-		},
-	},
-}
-
-func isOptHdrEq(a, b interface{}) bool {
-	switch va := a.(type) {
-	case *OptionalHeader32:
-		vb, ok := b.(*OptionalHeader32)
-		if !ok {
-			return false
-		}
-		return *vb == *va
-	case *OptionalHeader64:
-		vb, ok := b.(*OptionalHeader64)
-		if !ok {
-			return false
-		}
-		return *vb == *va
-	case nil:
-		return b == nil
-	}
-	return false
-}
-
-func TestOpen(t *testing.T) {
-	for i := range fileTests {
-		tt := &fileTests[i]
-
-		f, err := Open(tt.file)
-		if err != nil {
-			t.Error(err)
-			continue
-		}
-		if !reflect.DeepEqual(f.FileHeader, tt.hdr) {
-			t.Errorf("open %s:\n\thave %#v\n\twant %#v\n", tt.file, f.FileHeader, tt.hdr)
-			continue
-		}
-		if !isOptHdrEq(tt.opthdr, f.OptionalHeader) {
-			t.Errorf("open %s:\n\thave %#v\n\twant %#v\n", tt.file, f.OptionalHeader, tt.opthdr)
-			continue
-		}
-
-		for i, sh := range f.Sections {
-			if i >= len(tt.sections) {
-				break
-			}
-			have := &sh.SectionHeader
-			want := tt.sections[i]
-			if !reflect.DeepEqual(have, want) {
-				t.Errorf("open %s, section %d:\n\thave %#v\n\twant %#v\n", tt.file, i, have, want)
-			}
-		}
-		tn := len(tt.sections)
-		fn := len(f.Sections)
-		if tn != fn {
-			t.Errorf("open %s: len(Sections) = %d, want %d", tt.file, fn, tn)
-		}
-		for i, have := range f.Symbols {
-			if i >= len(tt.symbols) {
-				break
-			}
-			want := tt.symbols[i]
-			if !reflect.DeepEqual(have, want) {
-				t.Errorf("open %s, symbol %d:\n\thave %#v\n\twant %#v\n", tt.file, i, have, want)
-			}
-		}
-		if !tt.hasNoDwarfInfo {
-			_, err = f.DWARF()
-			if err != nil {
-				t.Errorf("fetching %s dwarf details failed: %v", tt.file, err)
-			}
-		}
-	}
-}
-
-func TestOpenFailure(t *testing.T) {
-	filename := "file.go"    // not a PE file
-	_, err := Open(filename) // don't crash
-	if err == nil {
-		t.Errorf("open %s: succeeded unexpectedly", filename)
-	}
-}
-
-func TestDWARF(t *testing.T) {
-	if runtime.GOOS != "windows" {
-		t.Skip("skipping windows only test")
-	}
-
-	tmpdir, err := ioutil.TempDir("", "TestDWARF")
-	if err != nil {
-		t.Fatal("TempDir failed: ", err)
-	}
-	defer os.RemoveAll(tmpdir)
-
-	prog := `
-package main
-func main() {
-}
-`
-	src := filepath.Join(tmpdir, "a.go")
-	exe := filepath.Join(tmpdir, "a.exe")
-	err = ioutil.WriteFile(src, []byte(prog), 0644)
-	output, err := exec.Command(testenv.GoToolPath(t), "build", "-o", exe, src).CombinedOutput()
-	if err != nil {
-		t.Fatalf("building test executable failed: %s %s", err, output)
-	}
-
-	f, err := Open(exe)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer f.Close()
-
-	d, err := f.DWARF()
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	// look for main.main
-	r := d.Reader()
-	for {
-		e, err := r.Next()
-		if err != nil {
-			t.Fatal("r.Next:", err)
-		}
-		if e == nil {
-			break
-		}
-		if e.Tag == dwarf.TagSubprogram {
-			for _, f := range e.Field {
-				if f.Attr == dwarf.AttrName && e.Val(dwarf.AttrName) == "main.main" {
-					return
-				}
-			}
-		}
-	}
-	t.Fatal("main.main not found")
-}
-
-func TestBSSHasZeros(t *testing.T) {
-	testenv.MustHaveExec(t)
-
-	if runtime.GOOS != "windows" {
-		t.Skip("skipping windows only test")
-	}
-	gccpath, err := exec.LookPath("gcc")
-	if err != nil {
-		t.Skip("skipping test: gcc is missing")
-	}
-
-	tmpdir, err := ioutil.TempDir("", "TestBSSHasZeros")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer os.RemoveAll(tmpdir)
-
-	srcpath := filepath.Join(tmpdir, "a.c")
-	src := `
-#include <stdio.h>
-
-int zero = 0;
-
-int
-main(void)
-{
-	printf("%d\n", zero);
-	return 0;
-}
-`
-	err = ioutil.WriteFile(srcpath, []byte(src), 0644)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	objpath := filepath.Join(tmpdir, "a.obj")
-	cmd := exec.Command(gccpath, "-c", srcpath, "-o", objpath)
-	out, err := cmd.CombinedOutput()
-	if err != nil {
-		t.Fatalf("failed to build object file: %v - %v", err, string(out))
-	}
-
-	f, err := Open(objpath)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer f.Close()
-
-	var bss *Section
-	for _, sect := range f.Sections {
-		if sect.Name == ".bss" {
-			bss = sect
-			break
-		}
-	}
-	if bss == nil {
-		t.Fatal("could not find .bss section")
-	}
-	data, err := bss.Data()
-	if err != nil {
-		t.Fatal(err)
-	}
-	if len(data) == 0 {
-		t.Fatalf("%s file .bss section cannot be empty", objpath)
-	}
-	for _, b := range data {
-		if b != 0 {
-			t.Fatalf(".bss section has non zero bytes: %v", data)
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/debug/pe/pe.go b/pkg/bootstrap/src/bootstrap/debug/pe/pe.go
deleted file mode 100644
index 4750582..0000000
--- a/pkg/bootstrap/src/bootstrap/debug/pe/pe.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/debug/pe/pe.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/debug/pe/pe.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package pe
-
-type FileHeader struct {
-	Machine              uint16
-	NumberOfSections     uint16
-	TimeDateStamp        uint32
-	PointerToSymbolTable uint32
-	NumberOfSymbols      uint32
-	SizeOfOptionalHeader uint16
-	Characteristics      uint16
-}
-
-type DataDirectory struct {
-	VirtualAddress uint32
-	Size           uint32
-}
-
-type OptionalHeader32 struct {
-	Magic                       uint16
-	MajorLinkerVersion          uint8
-	MinorLinkerVersion          uint8
-	SizeOfCode                  uint32
-	SizeOfInitializedData       uint32
-	SizeOfUninitializedData     uint32
-	AddressOfEntryPoint         uint32
-	BaseOfCode                  uint32
-	BaseOfData                  uint32
-	ImageBase                   uint32
-	SectionAlignment            uint32
-	FileAlignment               uint32
-	MajorOperatingSystemVersion uint16
-	MinorOperatingSystemVersion uint16
-	MajorImageVersion           uint16
-	MinorImageVersion           uint16
-	MajorSubsystemVersion       uint16
-	MinorSubsystemVersion       uint16
-	Win32VersionValue           uint32
-	SizeOfImage                 uint32
-	SizeOfHeaders               uint32
-	CheckSum                    uint32
-	Subsystem                   uint16
-	DllCharacteristics          uint16
-	SizeOfStackReserve          uint32
-	SizeOfStackCommit           uint32
-	SizeOfHeapReserve           uint32
-	SizeOfHeapCommit            uint32
-	LoaderFlags                 uint32
-	NumberOfRvaAndSizes         uint32
-	DataDirectory               [16]DataDirectory
-}
-
-type OptionalHeader64 struct {
-	Magic                       uint16
-	MajorLinkerVersion          uint8
-	MinorLinkerVersion          uint8
-	SizeOfCode                  uint32
-	SizeOfInitializedData       uint32
-	SizeOfUninitializedData     uint32
-	AddressOfEntryPoint         uint32
-	BaseOfCode                  uint32
-	ImageBase                   uint64
-	SectionAlignment            uint32
-	FileAlignment               uint32
-	MajorOperatingSystemVersion uint16
-	MinorOperatingSystemVersion uint16
-	MajorImageVersion           uint16
-	MinorImageVersion           uint16
-	MajorSubsystemVersion       uint16
-	MinorSubsystemVersion       uint16
-	Win32VersionValue           uint32
-	SizeOfImage                 uint32
-	SizeOfHeaders               uint32
-	CheckSum                    uint32
-	Subsystem                   uint16
-	DllCharacteristics          uint16
-	SizeOfStackReserve          uint64
-	SizeOfStackCommit           uint64
-	SizeOfHeapReserve           uint64
-	SizeOfHeapCommit            uint64
-	LoaderFlags                 uint32
-	NumberOfRvaAndSizes         uint32
-	DataDirectory               [16]DataDirectory
-}
-
-const (
-	IMAGE_FILE_MACHINE_UNKNOWN   = 0x0
-	IMAGE_FILE_MACHINE_AM33      = 0x1d3
-	IMAGE_FILE_MACHINE_AMD64     = 0x8664
-	IMAGE_FILE_MACHINE_ARM       = 0x1c0
-	IMAGE_FILE_MACHINE_EBC       = 0xebc
-	IMAGE_FILE_MACHINE_I386      = 0x14c
-	IMAGE_FILE_MACHINE_IA64      = 0x200
-	IMAGE_FILE_MACHINE_M32R      = 0x9041
-	IMAGE_FILE_MACHINE_MIPS16    = 0x266
-	IMAGE_FILE_MACHINE_MIPSFPU   = 0x366
-	IMAGE_FILE_MACHINE_MIPSFPU16 = 0x466
-	IMAGE_FILE_MACHINE_POWERPC   = 0x1f0
-	IMAGE_FILE_MACHINE_POWERPCFP = 0x1f1
-	IMAGE_FILE_MACHINE_R4000     = 0x166
-	IMAGE_FILE_MACHINE_SH3       = 0x1a2
-	IMAGE_FILE_MACHINE_SH3DSP    = 0x1a3
-	IMAGE_FILE_MACHINE_SH4       = 0x1a6
-	IMAGE_FILE_MACHINE_SH5       = 0x1a8
-	IMAGE_FILE_MACHINE_THUMB     = 0x1c2
-	IMAGE_FILE_MACHINE_WCEMIPSV2 = 0x169
-)
diff --git a/pkg/bootstrap/src/bootstrap/debug/pe/section.go b/pkg/bootstrap/src/bootstrap/debug/pe/section.go
deleted file mode 100644
index 62230bd..0000000
--- a/pkg/bootstrap/src/bootstrap/debug/pe/section.go
+++ /dev/null
@@ -1,114 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/debug/pe/section.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/debug/pe/section.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package pe
-
-import (
-	"encoding/binary"
-	"fmt"
-	"io"
-	"strconv"
-)
-
-// SectionHeader32 represents real PE COFF section header.
-type SectionHeader32 struct {
-	Name                 [8]uint8
-	VirtualSize          uint32
-	VirtualAddress       uint32
-	SizeOfRawData        uint32
-	PointerToRawData     uint32
-	PointerToRelocations uint32
-	PointerToLineNumbers uint32
-	NumberOfRelocations  uint16
-	NumberOfLineNumbers  uint16
-	Characteristics      uint32
-}
-
-// fullName finds real name of section sh. Normally name is stored
-// in sh.Name, but if it is longer then 8 characters, it is stored
-// in COFF string table st instead.
-func (sh *SectionHeader32) fullName(st StringTable) (string, error) {
-	if sh.Name[0] != '/' {
-		return cstring(sh.Name[:]), nil
-	}
-	i, err := strconv.Atoi(cstring(sh.Name[1:]))
-	if err != nil {
-		return "", err
-	}
-	return st.String(uint32(i))
-}
-
-// TODO(brainman): copy all IMAGE_REL_* consts from ldpe.go here
-
-// Reloc represents a PE COFF relocation.
-// Each section contains its own relocation list.
-type Reloc struct {
-	VirtualAddress   uint32
-	SymbolTableIndex uint32
-	Type             uint16
-}
-
-func readRelocs(sh *SectionHeader, r io.ReadSeeker) ([]Reloc, error) {
-	if sh.NumberOfRelocations <= 0 {
-		return nil, nil
-	}
-	_, err := r.Seek(int64(sh.PointerToRelocations), seekStart)
-	if err != nil {
-		return nil, fmt.Errorf("fail to seek to %q section relocations: %v", sh.Name, err)
-	}
-	relocs := make([]Reloc, sh.NumberOfRelocations)
-	err = binary.Read(r, binary.LittleEndian, relocs)
-	if err != nil {
-		return nil, fmt.Errorf("fail to read section relocations: %v", err)
-	}
-	return relocs, nil
-}
-
-// SectionHeader is similar to SectionHeader32 with Name
-// field replaced by Go string.
-type SectionHeader struct {
-	Name                 string
-	VirtualSize          uint32
-	VirtualAddress       uint32
-	Size                 uint32
-	Offset               uint32
-	PointerToRelocations uint32
-	PointerToLineNumbers uint32
-	NumberOfRelocations  uint16
-	NumberOfLineNumbers  uint16
-	Characteristics      uint32
-}
-
-// Section provides access to PE COFF section.
-type Section struct {
-	SectionHeader
-	Relocs []Reloc
-
-	// Embed ReaderAt for ReadAt method.
-	// Do not embed SectionReader directly
-	// to avoid having Read and Seek.
-	// If a client wants Read and Seek it must use
-	// Open() to avoid fighting over the seek offset
-	// with other clients.
-	io.ReaderAt
-	sr *io.SectionReader
-}
-
-// Data reads and returns the contents of the PE section s.
-func (s *Section) Data() ([]byte, error) {
-	dat := make([]byte, s.sr.Size())
-	n, err := s.sr.ReadAt(dat, 0)
-	if n == len(dat) {
-		err = nil
-	}
-	return dat[0:n], err
-}
-
-// Open returns a new ReadSeeker reading the PE section s.
-func (s *Section) Open() io.ReadSeeker {
-	return io.NewSectionReader(s.sr, 0, 1<<63-1)
-}
diff --git a/pkg/bootstrap/src/bootstrap/debug/pe/string.go b/pkg/bootstrap/src/bootstrap/debug/pe/string.go
deleted file mode 100644
index f5f07a4..0000000
--- a/pkg/bootstrap/src/bootstrap/debug/pe/string.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/debug/pe/string.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/debug/pe/string.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package pe
-
-import (
-	"encoding/binary"
-	"fmt"
-	"io"
-)
-
-// cstring converts ASCII byte sequence b to string.
-// It stops once it finds 0 or reaches end of b.
-func cstring(b []byte) string {
-	var i int
-	for i = 0; i < len(b) && b[i] != 0; i++ {
-	}
-	return string(b[:i])
-}
-
-// StringTable is a COFF string table.
-type StringTable []byte
-
-func readStringTable(fh *FileHeader, r io.ReadSeeker) (StringTable, error) {
-	// COFF string table is located right after COFF symbol table.
-	if fh.PointerToSymbolTable <= 0 {
-		return nil, nil
-	}
-	offset := fh.PointerToSymbolTable + COFFSymbolSize*fh.NumberOfSymbols
-	_, err := r.Seek(int64(offset), seekStart)
-	if err != nil {
-		return nil, fmt.Errorf("fail to seek to string table: %v", err)
-	}
-	var l uint32
-	err = binary.Read(r, binary.LittleEndian, &l)
-	if err != nil {
-		return nil, fmt.Errorf("fail to read string table length: %v", err)
-	}
-	// string table length includes itself
-	if l <= 4 {
-		return nil, nil
-	}
-	l -= 4
-	buf := make([]byte, l)
-	_, err = io.ReadFull(r, buf)
-	if err != nil {
-		return nil, fmt.Errorf("fail to read string table: %v", err)
-	}
-	return StringTable(buf), nil
-}
-
-// TODO(brainman): decide if start parameter should be int instead of uint32
-
-// String extracts string from COFF string table st at offset start.
-func (st StringTable) String(start uint32) (string, error) {
-	// start includes 4 bytes of string table length
-	if start < 4 {
-		return "", fmt.Errorf("offset %d is before the start of string table", start)
-	}
-	start -= 4
-	if int(start) > len(st) {
-		return "", fmt.Errorf("offset %d is beyond the end of string table", start)
-	}
-	return cstring(st[start:]), nil
-}
diff --git a/pkg/bootstrap/src/bootstrap/debug/pe/symbol.go b/pkg/bootstrap/src/bootstrap/debug/pe/symbol.go
deleted file mode 100644
index 04e3484..0000000
--- a/pkg/bootstrap/src/bootstrap/debug/pe/symbol.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/debug/pe/symbol.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/debug/pe/symbol.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package pe
-
-import (
-	"encoding/binary"
-	"fmt"
-	"io"
-)
-
-const COFFSymbolSize = 18
-
-// COFFSymbol represents single COFF symbol table record.
-type COFFSymbol struct {
-	Name               [8]uint8
-	Value              uint32
-	SectionNumber      int16
-	Type               uint16
-	StorageClass       uint8
-	NumberOfAuxSymbols uint8
-}
-
-func readCOFFSymbols(fh *FileHeader, r io.ReadSeeker) ([]COFFSymbol, error) {
-	if fh.PointerToSymbolTable == 0 {
-		return nil, nil
-	}
-	if fh.NumberOfSymbols <= 0 {
-		return nil, nil
-	}
-	_, err := r.Seek(int64(fh.PointerToSymbolTable), seekStart)
-	if err != nil {
-		return nil, fmt.Errorf("fail to seek to symbol table: %v", err)
-	}
-	syms := make([]COFFSymbol, fh.NumberOfSymbols)
-	err = binary.Read(r, binary.LittleEndian, syms)
-	if err != nil {
-		return nil, fmt.Errorf("fail to read symbol table: %v", err)
-	}
-	return syms, nil
-}
-
-// isSymNameOffset checks symbol name if it is encoded as offset into string table.
-func isSymNameOffset(name [8]byte) (bool, uint32) {
-	if name[0] == 0 && name[1] == 0 && name[2] == 0 && name[3] == 0 {
-		return true, binary.LittleEndian.Uint32(name[4:])
-	}
-	return false, 0
-}
-
-// FullName finds real name of symbol sym. Normally name is stored
-// in sym.Name, but if it is longer then 8 characters, it is stored
-// in COFF string table st instead.
-func (sym *COFFSymbol) FullName(st StringTable) (string, error) {
-	if ok, offset := isSymNameOffset(sym.Name); ok {
-		return st.String(offset)
-	}
-	return cstring(sym.Name[:]), nil
-}
-
-func removeAuxSymbols(allsyms []COFFSymbol, st StringTable) ([]*Symbol, error) {
-	if len(allsyms) == 0 {
-		return nil, nil
-	}
-	syms := make([]*Symbol, 0)
-	aux := uint8(0)
-	for _, sym := range allsyms {
-		if aux > 0 {
-			aux--
-			continue
-		}
-		name, err := sym.FullName(st)
-		if err != nil {
-			return nil, err
-		}
-		aux = sym.NumberOfAuxSymbols
-		s := &Symbol{
-			Name:          name,
-			Value:         sym.Value,
-			SectionNumber: sym.SectionNumber,
-			Type:          sym.Type,
-			StorageClass:  sym.StorageClass,
-		}
-		syms = append(syms, s)
-	}
-	return syms, nil
-}
-
-// Symbol is similar to COFFSymbol with Name field replaced
-// by Go string. Symbol also does not have NumberOfAuxSymbols.
-type Symbol struct {
-	Name          string
-	Value         uint32
-	SectionNumber int16
-	Type          uint16
-	StorageClass  uint8
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/accuracy_string.go b/pkg/bootstrap/src/bootstrap/math/big/accuracy_string.go
deleted file mode 100644
index 7312c83..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/accuracy_string.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/accuracy_string.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/accuracy_string.go:1
-// generated by stringer -type=Accuracy; DO NOT EDIT
-
-package big
-
-import "fmt"
-
-const _Accuracy_name = "BelowExactAbove"
-
-var _Accuracy_index = [...]uint8{0, 5, 10, 15}
-
-func (i Accuracy) String() string {
-	i -= -1
-	if i < 0 || i+1 >= Accuracy(len(_Accuracy_index)) {
-		return fmt.Sprintf("Accuracy(%d)", i+-1)
-	}
-	return _Accuracy_name[_Accuracy_index[i]:_Accuracy_index[i+1]]
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/arith.go b/pkg/bootstrap/src/bootstrap/math/big/arith.go
deleted file mode 100644
index 7f21550..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/arith.go
+++ /dev/null
@@ -1,308 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file provides Go implementations of elementary multi-precision
-// arithmetic operations on word vectors. Needed for platforms without
-// assembly implementations of these routines.
-
-package big
-
-// A Word represents a single digit of a multi-precision unsigned integer.
-type Word uintptr
-
-const (
-	// Compute the size _S of a Word in bytes.
-	_m    = ^Word(0)
-	_logS = _m>>8&1 + _m>>16&1 + _m>>32&1
-	_S    = 1 << _logS
-
-	_W = _S << 3 // word size in bits
-	_B = 1 << _W // digit base
-	_M = _B - 1  // digit mask
-
-	_W2 = _W / 2   // half word size in bits
-	_B2 = 1 << _W2 // half digit base
-	_M2 = _B2 - 1  // half digit mask
-)
-
-// ----------------------------------------------------------------------------
-// Elementary operations on words
-//
-// These operations are used by the vector operations below.
-
-// z1<<_W + z0 = x+y+c, with c == 0 or 1
-func addWW_g(x, y, c Word) (z1, z0 Word) {
-	yc := y + c
-	z0 = x + yc
-	if z0 < x || yc < y {
-		z1 = 1
-	}
-	return
-}
-
-// z1<<_W + z0 = x-y-c, with c == 0 or 1
-func subWW_g(x, y, c Word) (z1, z0 Word) {
-	yc := y + c
-	z0 = x - yc
-	if z0 > x || yc < y {
-		z1 = 1
-	}
-	return
-}
-
-// z1<<_W + z0 = x*y
-// Adapted from Warren, Hacker's Delight, p. 132.
-func mulWW_g(x, y Word) (z1, z0 Word) {
-	x0 := x & _M2
-	x1 := x >> _W2
-	y0 := y & _M2
-	y1 := y >> _W2
-	w0 := x0 * y0
-	t := x1*y0 + w0>>_W2
-	w1 := t & _M2
-	w2 := t >> _W2
-	w1 += x0 * y1
-	z1 = x1*y1 + w2 + w1>>_W2
-	z0 = x * y
-	return
-}
-
-// z1<<_W + z0 = x*y + c
-func mulAddWWW_g(x, y, c Word) (z1, z0 Word) {
-	z1, zz0 := mulWW_g(x, y)
-	if z0 = zz0 + c; z0 < zz0 {
-		z1++
-	}
-	return
-}
-
-// Length of x in bits.
-func bitLen_g(x Word) (n int) {
-	for ; x >= 0x8000; x >>= 16 {
-		n += 16
-	}
-	if x >= 0x80 {
-		x >>= 8
-		n += 8
-	}
-	if x >= 0x8 {
-		x >>= 4
-		n += 4
-	}
-	if x >= 0x2 {
-		x >>= 2
-		n += 2
-	}
-	if x >= 0x1 {
-		n++
-	}
-	return
-}
-
-// log2 computes the integer binary logarithm of x.
-// The result is the integer n for which 2^n <= x < 2^(n+1).
-// If x == 0, the result is -1.
-func log2(x Word) int {
-	return bitLen(x) - 1
-}
-
-// nlz returns the number of leading zeros in x.
-func nlz(x Word) uint {
-	return uint(_W - bitLen(x))
-}
-
-// nlz64 returns the number of leading zeros in x.
-func nlz64(x uint64) uint {
-	switch _W {
-	case 32:
-		w := x >> 32
-		if w == 0 {
-			return 32 + nlz(Word(x))
-		}
-		return nlz(Word(w))
-	case 64:
-		return nlz(Word(x))
-	}
-	panic("unreachable")
-}
-
-// q = (u1<<_W + u0 - r)/y
-// Adapted from Warren, Hacker's Delight, p. 152.
-func divWW_g(u1, u0, v Word) (q, r Word) {
-	if u1 >= v {
-		return 1<<_W - 1, 1<<_W - 1
-	}
-
-	s := nlz(v)
-	v <<= s
-
-	vn1 := v >> _W2
-	vn0 := v & _M2
-	un32 := u1<<s | u0>>(_W-s)
-	un10 := u0 << s
-	un1 := un10 >> _W2
-	un0 := un10 & _M2
-	q1 := un32 / vn1
-	rhat := un32 - q1*vn1
-
-	for q1 >= _B2 || q1*vn0 > _B2*rhat+un1 {
-		q1--
-		rhat += vn1
-		if rhat >= _B2 {
-			break
-		}
-	}
-
-	un21 := un32*_B2 + un1 - q1*v
-	q0 := un21 / vn1
-	rhat = un21 - q0*vn1
-
-	for q0 >= _B2 || q0*vn0 > _B2*rhat+un0 {
-		q0--
-		rhat += vn1
-		if rhat >= _B2 {
-			break
-		}
-	}
-
-	return q1*_B2 + q0, (un21*_B2 + un0 - q0*v) >> s
-}
-
-// Keep for performance debugging.
-// Using addWW_g is likely slower.
-const use_addWW_g = false
-
-// The resulting carry c is either 0 or 1.
-func addVV_g(z, x, y []Word) (c Word) {
-	if use_addWW_g {
-		for i := range z {
-			c, z[i] = addWW_g(x[i], y[i], c)
-		}
-		return
-	}
-
-	for i, xi := range x[:len(z)] {
-		yi := y[i]
-		zi := xi + yi + c
-		z[i] = zi
-		// see "Hacker's Delight", section 2-12 (overflow detection)
-		c = (xi&yi | (xi|yi)&^zi) >> (_W - 1)
-	}
-	return
-}
-
-// The resulting carry c is either 0 or 1.
-func subVV_g(z, x, y []Word) (c Word) {
-	if use_addWW_g {
-		for i := range z {
-			c, z[i] = subWW_g(x[i], y[i], c)
-		}
-		return
-	}
-
-	for i, xi := range x[:len(z)] {
-		yi := y[i]
-		zi := xi - yi - c
-		z[i] = zi
-		// see "Hacker's Delight", section 2-12 (overflow detection)
-		c = (yi&^xi | (yi|^xi)&zi) >> (_W - 1)
-	}
-	return
-}
-
-// The resulting carry c is either 0 or 1.
-func addVW_g(z, x []Word, y Word) (c Word) {
-	if use_addWW_g {
-		c = y
-		for i := range z {
-			c, z[i] = addWW_g(x[i], c, 0)
-		}
-		return
-	}
-
-	c = y
-	for i, xi := range x[:len(z)] {
-		zi := xi + c
-		z[i] = zi
-		c = xi &^ zi >> (_W - 1)
-	}
-	return
-}
-
-func subVW_g(z, x []Word, y Word) (c Word) {
-	if use_addWW_g {
-		c = y
-		for i := range z {
-			c, z[i] = subWW_g(x[i], c, 0)
-		}
-		return
-	}
-
-	c = y
-	for i, xi := range x[:len(z)] {
-		zi := xi - c
-		z[i] = zi
-		c = (zi &^ xi) >> (_W - 1)
-	}
-	return
-}
-
-func shlVU_g(z, x []Word, s uint) (c Word) {
-	if n := len(z); n > 0 {
-		ŝ := _W - s
-		w1 := x[n-1]
-		c = w1 >> ŝ
-		for i := n - 1; i > 0; i-- {
-			w := w1
-			w1 = x[i-1]
-			z[i] = w<<s | w1>>ŝ
-		}
-		z[0] = w1 << s
-	}
-	return
-}
-
-func shrVU_g(z, x []Word, s uint) (c Word) {
-	if n := len(z); n > 0 {
-		ŝ := _W - s
-		w1 := x[0]
-		c = w1 << ŝ
-		for i := 0; i < n-1; i++ {
-			w := w1
-			w1 = x[i+1]
-			z[i] = w>>s | w1<<ŝ
-		}
-		z[n-1] = w1 >> s
-	}
-	return
-}
-
-func mulAddVWW_g(z, x []Word, y, r Word) (c Word) {
-	c = r
-	for i := range z {
-		c, z[i] = mulAddWWW_g(x[i], y, c)
-	}
-	return
-}
-
-// TODO(gri) Remove use of addWW_g here and then we can remove addWW_g and subWW_g.
-func addMulVVW_g(z, x []Word, y Word) (c Word) {
-	for i := range z {
-		z1, z0 := mulAddWWW_g(x[i], y, z[i])
-		c, z[i] = addWW_g(z0, c, 0)
-		c += z1
-	}
-	return
-}
-
-func divWVW_g(z []Word, xn Word, x []Word, y Word) (r Word) {
-	r = xn
-	for i := len(z) - 1; i >= 0; i-- {
-		z[i], r = divWW_g(r, x[i], y)
-	}
-	return
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/arith_386.s b/pkg/bootstrap/src/bootstrap/math/big/arith_386.s
deleted file mode 100644
index 697c57d..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/arith_386.s
+++ /dev/null
@@ -1,285 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith_386.s
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith_386.s:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !math_big_pure_go
-
-#include "textflag.h"
-
-// This file provides fast assembly versions for the elementary
-// arithmetic operations on vectors implemented in arith.go.
-
-// func mulWW(x, y Word) (z1, z0 Word)
-TEXT ·mulWW(SB),NOSPLIT,$0
-	MOVL x+0(FP), AX
-	MULL y+4(FP)
-	MOVL DX, z1+8(FP)
-	MOVL AX, z0+12(FP)
-	RET
-
-
-// func divWW(x1, x0, y Word) (q, r Word)
-TEXT ·divWW(SB),NOSPLIT,$0
-	MOVL x1+0(FP), DX
-	MOVL x0+4(FP), AX
-	DIVL y+8(FP)
-	MOVL AX, q+12(FP)
-	MOVL DX, r+16(FP)
-	RET
-
-
-// func addVV(z, x, y []Word) (c Word)
-TEXT ·addVV(SB),NOSPLIT,$0
-	MOVL z+0(FP), DI
-	MOVL x+12(FP), SI
-	MOVL y+24(FP), CX
-	MOVL z_len+4(FP), BP
-	MOVL $0, BX		// i = 0
-	MOVL $0, DX		// c = 0
-	JMP E1
-
-L1:	MOVL (SI)(BX*4), AX
-	ADDL DX, DX		// restore CF
-	ADCL (CX)(BX*4), AX
-	SBBL DX, DX		// save CF
-	MOVL AX, (DI)(BX*4)
-	ADDL $1, BX		// i++
-
-E1:	CMPL BX, BP		// i < n
-	JL L1
-
-	NEGL DX
-	MOVL DX, c+36(FP)
-	RET
-
-
-// func subVV(z, x, y []Word) (c Word)
-// (same as addVV except for SBBL instead of ADCL and label names)
-TEXT ·subVV(SB),NOSPLIT,$0
-	MOVL z+0(FP), DI
-	MOVL x+12(FP), SI
-	MOVL y+24(FP), CX
-	MOVL z_len+4(FP), BP
-	MOVL $0, BX		// i = 0
-	MOVL $0, DX		// c = 0
-	JMP E2
-
-L2:	MOVL (SI)(BX*4), AX
-	ADDL DX, DX		// restore CF
-	SBBL (CX)(BX*4), AX
-	SBBL DX, DX		// save CF
-	MOVL AX, (DI)(BX*4)
-	ADDL $1, BX		// i++
-
-E2:	CMPL BX, BP		// i < n
-	JL L2
-
-	NEGL DX
-	MOVL DX, c+36(FP)
-	RET
-
-
-// func addVW(z, x []Word, y Word) (c Word)
-TEXT ·addVW(SB),NOSPLIT,$0
-	MOVL z+0(FP), DI
-	MOVL x+12(FP), SI
-	MOVL y+24(FP), AX	// c = y
-	MOVL z_len+4(FP), BP
-	MOVL $0, BX		// i = 0
-	JMP E3
-
-L3:	ADDL (SI)(BX*4), AX
-	MOVL AX, (DI)(BX*4)
-	SBBL AX, AX		// save CF
-	NEGL AX
-	ADDL $1, BX		// i++
-
-E3:	CMPL BX, BP		// i < n
-	JL L3
-
-	MOVL AX, c+28(FP)
-	RET
-
-
-// func subVW(z, x []Word, y Word) (c Word)
-TEXT ·subVW(SB),NOSPLIT,$0
-	MOVL z+0(FP), DI
-	MOVL x+12(FP), SI
-	MOVL y+24(FP), AX	// c = y
-	MOVL z_len+4(FP), BP
-	MOVL $0, BX		// i = 0
-	JMP E4
-
-L4:	MOVL (SI)(BX*4), DX
-	SUBL AX, DX
-	MOVL DX, (DI)(BX*4)
-	SBBL AX, AX		// save CF
-	NEGL AX
-	ADDL $1, BX		// i++
-
-E4:	CMPL BX, BP		// i < n
-	JL L4
-
-	MOVL AX, c+28(FP)
-	RET
-
-
-// func shlVU(z, x []Word, s uint) (c Word)
-TEXT ·shlVU(SB),NOSPLIT,$0
-	MOVL z_len+4(FP), BX	// i = z
-	SUBL $1, BX		// i--
-	JL X8b			// i < 0	(n <= 0)
-
-	// n > 0
-	MOVL z+0(FP), DI
-	MOVL x+12(FP), SI
-	MOVL s+24(FP), CX
-	MOVL (SI)(BX*4), AX	// w1 = x[n-1]
-	MOVL $0, DX
-	SHLL CX, DX:AX		// w1>>ŝ
-	MOVL DX, c+28(FP)
-
-	CMPL BX, $0
-	JLE X8a			// i <= 0
-
-	// i > 0
-L8:	MOVL AX, DX		// w = w1
-	MOVL -4(SI)(BX*4), AX	// w1 = x[i-1]
-	SHLL CX, DX:AX		// w<<s | w1>>ŝ
-	MOVL DX, (DI)(BX*4)	// z[i] = w<<s | w1>>ŝ
-	SUBL $1, BX		// i--
-	JG L8			// i > 0
-
-	// i <= 0
-X8a:	SHLL CX, AX		// w1<<s
-	MOVL AX, (DI)		// z[0] = w1<<s
-	RET
-
-X8b:	MOVL $0, c+28(FP)
-	RET
-
-
-// func shrVU(z, x []Word, s uint) (c Word)
-TEXT ·shrVU(SB),NOSPLIT,$0
-	MOVL z_len+4(FP), BP
-	SUBL $1, BP		// n--
-	JL X9b			// n < 0	(n <= 0)
-
-	// n > 0
-	MOVL z+0(FP), DI
-	MOVL x+12(FP), SI
-	MOVL s+24(FP), CX
-	MOVL (SI), AX		// w1 = x[0]
-	MOVL $0, DX
-	SHRL CX, DX:AX		// w1<<ŝ
-	MOVL DX, c+28(FP)
-
-	MOVL $0, BX		// i = 0
-	JMP E9
-
-	// i < n-1
-L9:	MOVL AX, DX		// w = w1
-	MOVL 4(SI)(BX*4), AX	// w1 = x[i+1]
-	SHRL CX, DX:AX		// w>>s | w1<<ŝ
-	MOVL DX, (DI)(BX*4)	// z[i] = w>>s | w1<<ŝ
-	ADDL $1, BX		// i++
-	
-E9:	CMPL BX, BP
-	JL L9			// i < n-1
-
-	// i >= n-1
-X9a:	SHRL CX, AX		// w1>>s
-	MOVL AX, (DI)(BP*4)	// z[n-1] = w1>>s
-	RET
-
-X9b:	MOVL $0, c+28(FP)
-	RET
-
-
-// func mulAddVWW(z, x []Word, y, r Word) (c Word)
-TEXT ·mulAddVWW(SB),NOSPLIT,$0
-	MOVL z+0(FP), DI
-	MOVL x+12(FP), SI
-	MOVL y+24(FP), BP
-	MOVL r+28(FP), CX	// c = r
-	MOVL z_len+4(FP), BX
-	LEAL (DI)(BX*4), DI
-	LEAL (SI)(BX*4), SI
-	NEGL BX			// i = -n
-	JMP E5
-
-L5:	MOVL (SI)(BX*4), AX
-	MULL BP
-	ADDL CX, AX
-	ADCL $0, DX
-	MOVL AX, (DI)(BX*4)
-	MOVL DX, CX
-	ADDL $1, BX		// i++
-
-E5:	CMPL BX, $0		// i < 0
-	JL L5
-
-	MOVL CX, c+32(FP)
-	RET
-
-
-// func addMulVVW(z, x []Word, y Word) (c Word)
-TEXT ·addMulVVW(SB),NOSPLIT,$0
-	MOVL z+0(FP), DI
-	MOVL x+12(FP), SI
-	MOVL y+24(FP), BP
-	MOVL z_len+4(FP), BX
-	LEAL (DI)(BX*4), DI
-	LEAL (SI)(BX*4), SI
-	NEGL BX			// i = -n
-	MOVL $0, CX		// c = 0
-	JMP E6
-
-L6:	MOVL (SI)(BX*4), AX
-	MULL BP
-	ADDL CX, AX
-	ADCL $0, DX
-	ADDL AX, (DI)(BX*4)
-	ADCL $0, DX
-	MOVL DX, CX
-	ADDL $1, BX		// i++
-
-E6:	CMPL BX, $0		// i < 0
-	JL L6
-
-	MOVL CX, c+28(FP)
-	RET
-
-
-// func divWVW(z* Word, xn Word, x []Word, y Word) (r Word)
-TEXT ·divWVW(SB),NOSPLIT,$0
-	MOVL z+0(FP), DI
-	MOVL xn+12(FP), DX	// r = xn
-	MOVL x+16(FP), SI
-	MOVL y+28(FP), CX
-	MOVL z_len+4(FP), BX	// i = z
-	JMP E7
-
-L7:	MOVL (SI)(BX*4), AX
-	DIVL CX
-	MOVL AX, (DI)(BX*4)
-
-E7:	SUBL $1, BX		// i--
-	JGE L7			// i >= 0
-
-	MOVL DX, r+32(FP)
-	RET
-
-// func bitLen(x Word) (n int)
-TEXT ·bitLen(SB),NOSPLIT,$0
-	BSRL x+0(FP), AX
-	JZ Z1
-	INCL AX
-	MOVL AX, n+4(FP)
-	RET
-
-Z1:	MOVL $0, n+4(FP)
-	RET
diff --git a/pkg/bootstrap/src/bootstrap/math/big/arith_amd64.s b/pkg/bootstrap/src/bootstrap/math/big/arith_amd64.s
deleted file mode 100644
index 70f7797..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/arith_amd64.s
+++ /dev/null
@@ -1,466 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith_amd64.s
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith_amd64.s:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !math_big_pure_go
-
-#include "textflag.h"
-
-// This file provides fast assembly versions for the elementary
-// arithmetic operations on vectors implemented in arith.go.
-
-// func mulWW(x, y Word) (z1, z0 Word)
-TEXT ·mulWW(SB),NOSPLIT,$0
-	MOVQ x+0(FP), AX
-	MULQ y+8(FP)
-	MOVQ DX, z1+16(FP)
-	MOVQ AX, z0+24(FP)
-	RET
-
-
-// func divWW(x1, x0, y Word) (q, r Word)
-TEXT ·divWW(SB),NOSPLIT,$0
-	MOVQ x1+0(FP), DX
-	MOVQ x0+8(FP), AX
-	DIVQ y+16(FP)
-	MOVQ AX, q+24(FP)
-	MOVQ DX, r+32(FP)
-	RET
-
-// The carry bit is saved with SBBQ Rx, Rx: if the carry was set, Rx is -1, otherwise it is 0.
-// It is restored with ADDQ Rx, Rx: if Rx was -1 the carry is set, otherwise it is cleared.
-// This is faster than using rotate instructions.
-//
-// CAUTION: Note that MOVQ $0, Rx is translated to XORQ Rx, Rx which clears the carry bit!
-
-// func addVV(z, x, y []Word) (c Word)
-TEXT ·addVV(SB),NOSPLIT,$0
-	MOVQ z_len+8(FP), DI
-	MOVQ x+24(FP), R8
-	MOVQ y+48(FP), R9
-	MOVQ z+0(FP), R10
-
-	MOVQ $0, CX		// c = 0
-	MOVQ $0, SI		// i = 0
-
-	// s/JL/JMP/ below to disable the unrolled loop
-	SUBQ $4, DI		// n -= 4
-	JL V1			// if n < 0 goto V1
-
-U1:	// n >= 0
-	// regular loop body unrolled 4x
-	ADDQ CX, CX		// restore CF
-	MOVQ 0(R8)(SI*8), R11
-	MOVQ 8(R8)(SI*8), R12
-	MOVQ 16(R8)(SI*8), R13
-	MOVQ 24(R8)(SI*8), R14
-	ADCQ 0(R9)(SI*8), R11
-	ADCQ 8(R9)(SI*8), R12
-	ADCQ 16(R9)(SI*8), R13
-	ADCQ 24(R9)(SI*8), R14
-	MOVQ R11, 0(R10)(SI*8)
-	MOVQ R12, 8(R10)(SI*8)
-	MOVQ R13, 16(R10)(SI*8)
-	MOVQ R14, 24(R10)(SI*8)
-	SBBQ CX, CX		// save CF
-
-	ADDQ $4, SI		// i += 4
-	SUBQ $4, DI		// n -= 4
-	JGE U1			// if n >= 0 goto U1
-
-V1:	ADDQ $4, DI		// n += 4
-	JLE E1			// if n <= 0 goto E1
-
-L1:	// n > 0
-	ADDQ CX, CX		// restore CF
-	MOVQ 0(R8)(SI*8), R11
-	ADCQ 0(R9)(SI*8), R11
-	MOVQ R11, 0(R10)(SI*8)
-	SBBQ CX, CX		// save CF
-
-	ADDQ $1, SI		// i++
-	SUBQ $1, DI		// n--
-	JG L1			// if n > 0 goto L1
-
-E1:	NEGQ CX
-	MOVQ CX, c+72(FP)	// return c
-	RET
-
-
-// func subVV(z, x, y []Word) (c Word)
-// (same as addVV except for SBBQ instead of ADCQ and label names)
-TEXT ·subVV(SB),NOSPLIT,$0
-	MOVQ z_len+8(FP), DI
-	MOVQ x+24(FP), R8
-	MOVQ y+48(FP), R9
-	MOVQ z+0(FP), R10
-
-	MOVQ $0, CX		// c = 0
-	MOVQ $0, SI		// i = 0
-
-	// s/JL/JMP/ below to disable the unrolled loop
-	SUBQ $4, DI		// n -= 4
-	JL V2			// if n < 0 goto V2
-
-U2:	// n >= 0
-	// regular loop body unrolled 4x
-	ADDQ CX, CX		// restore CF
-	MOVQ 0(R8)(SI*8), R11
-	MOVQ 8(R8)(SI*8), R12
-	MOVQ 16(R8)(SI*8), R13
-	MOVQ 24(R8)(SI*8), R14
-	SBBQ 0(R9)(SI*8), R11
-	SBBQ 8(R9)(SI*8), R12
-	SBBQ 16(R9)(SI*8), R13
-	SBBQ 24(R9)(SI*8), R14
-	MOVQ R11, 0(R10)(SI*8)
-	MOVQ R12, 8(R10)(SI*8)
-	MOVQ R13, 16(R10)(SI*8)
-	MOVQ R14, 24(R10)(SI*8)
-	SBBQ CX, CX		// save CF
-
-	ADDQ $4, SI		// i += 4
-	SUBQ $4, DI		// n -= 4
-	JGE U2			// if n >= 0 goto U2
-
-V2:	ADDQ $4, DI		// n += 4
-	JLE E2			// if n <= 0 goto E2
-
-L2:	// n > 0
-	ADDQ CX, CX		// restore CF
-	MOVQ 0(R8)(SI*8), R11
-	SBBQ 0(R9)(SI*8), R11
-	MOVQ R11, 0(R10)(SI*8)
-	SBBQ CX, CX		// save CF
-
-	ADDQ $1, SI		// i++
-	SUBQ $1, DI		// n--
-	JG L2			// if n > 0 goto L2
-
-E2:	NEGQ CX
-	MOVQ CX, c+72(FP)	// return c
-	RET
-
-
-// func addVW(z, x []Word, y Word) (c Word)
-TEXT ·addVW(SB),NOSPLIT,$0
-	MOVQ z_len+8(FP), DI
-	MOVQ x+24(FP), R8
-	MOVQ y+48(FP), CX	// c = y
-	MOVQ z+0(FP), R10
-
-	MOVQ $0, SI		// i = 0
-
-	// s/JL/JMP/ below to disable the unrolled loop
-	SUBQ $4, DI		// n -= 4
-	JL V3			// if n < 4 goto V3
-
-U3:	// n >= 0
-	// regular loop body unrolled 4x
-	MOVQ 0(R8)(SI*8), R11
-	MOVQ 8(R8)(SI*8), R12
-	MOVQ 16(R8)(SI*8), R13
-	MOVQ 24(R8)(SI*8), R14
-	ADDQ CX, R11
-	ADCQ $0, R12
-	ADCQ $0, R13
-	ADCQ $0, R14
-	SBBQ CX, CX		// save CF
-	NEGQ CX
-	MOVQ R11, 0(R10)(SI*8)
-	MOVQ R12, 8(R10)(SI*8)
-	MOVQ R13, 16(R10)(SI*8)
-	MOVQ R14, 24(R10)(SI*8)
-
-	ADDQ $4, SI		// i += 4
-	SUBQ $4, DI		// n -= 4
-	JGE U3			// if n >= 0 goto U3
-
-V3:	ADDQ $4, DI		// n += 4
-	JLE E3			// if n <= 0 goto E3
-
-L3:	// n > 0
-	ADDQ 0(R8)(SI*8), CX
-	MOVQ CX, 0(R10)(SI*8)
-	SBBQ CX, CX		// save CF
-	NEGQ CX
-
-	ADDQ $1, SI		// i++
-	SUBQ $1, DI		// n--
-	JG L3			// if n > 0 goto L3
-
-E3:	MOVQ CX, c+56(FP)	// return c
-	RET
-
-
-// func subVW(z, x []Word, y Word) (c Word)
-// (same as addVW except for SUBQ/SBBQ instead of ADDQ/ADCQ and label names)
-TEXT ·subVW(SB),NOSPLIT,$0
-	MOVQ z_len+8(FP), DI
-	MOVQ x+24(FP), R8
-	MOVQ y+48(FP), CX	// c = y
-	MOVQ z+0(FP), R10
-
-	MOVQ $0, SI		// i = 0
-
-	// s/JL/JMP/ below to disable the unrolled loop
-	SUBQ $4, DI		// n -= 4
-	JL V4			// if n < 4 goto V4
-
-U4:	// n >= 0
-	// regular loop body unrolled 4x
-	MOVQ 0(R8)(SI*8), R11
-	MOVQ 8(R8)(SI*8), R12
-	MOVQ 16(R8)(SI*8), R13
-	MOVQ 24(R8)(SI*8), R14
-	SUBQ CX, R11
-	SBBQ $0, R12
-	SBBQ $0, R13
-	SBBQ $0, R14
-	SBBQ CX, CX		// save CF
-	NEGQ CX
-	MOVQ R11, 0(R10)(SI*8)
-	MOVQ R12, 8(R10)(SI*8)
-	MOVQ R13, 16(R10)(SI*8)
-	MOVQ R14, 24(R10)(SI*8)
-
-	ADDQ $4, SI		// i += 4
-	SUBQ $4, DI		// n -= 4
-	JGE U4			// if n >= 0 goto U4
-
-V4:	ADDQ $4, DI		// n += 4
-	JLE E4			// if n <= 0 goto E4
-
-L4:	// n > 0
-	MOVQ 0(R8)(SI*8), R11
-	SUBQ CX, R11
-	MOVQ R11, 0(R10)(SI*8)
-	SBBQ CX, CX		// save CF
-	NEGQ CX
-
-	ADDQ $1, SI		// i++
-	SUBQ $1, DI		// n--
-	JG L4			// if n > 0 goto L4
-
-E4:	MOVQ CX, c+56(FP)	// return c
-	RET
-
-
-// func shlVU(z, x []Word, s uint) (c Word)
-TEXT ·shlVU(SB),NOSPLIT,$0
-	MOVQ z_len+8(FP), BX	// i = z
-	SUBQ $1, BX		// i--
-	JL X8b			// i < 0	(n <= 0)
-
-	// n > 0
-	MOVQ z+0(FP), R10
-	MOVQ x+24(FP), R8
-	MOVQ s+48(FP), CX
-	MOVQ (R8)(BX*8), AX	// w1 = x[n-1]
-	MOVQ $0, DX
-	SHLQ CX, DX:AX		// w1>>ŝ
-	MOVQ DX, c+56(FP)
-
-	CMPQ BX, $0
-	JLE X8a			// i <= 0
-
-	// i > 0
-L8:	MOVQ AX, DX		// w = w1
-	MOVQ -8(R8)(BX*8), AX	// w1 = x[i-1]
-	SHLQ CX, DX:AX		// w<<s | w1>>ŝ
-	MOVQ DX, (R10)(BX*8)	// z[i] = w<<s | w1>>ŝ
-	SUBQ $1, BX		// i--
-	JG L8			// i > 0
-
-	// i <= 0
-X8a:	SHLQ CX, AX		// w1<<s
-	MOVQ AX, (R10)		// z[0] = w1<<s
-	RET
-
-X8b:	MOVQ $0, c+56(FP)
-	RET
-
-
-// func shrVU(z, x []Word, s uint) (c Word)
-TEXT ·shrVU(SB),NOSPLIT,$0
-	MOVQ z_len+8(FP), R11
-	SUBQ $1, R11		// n--
-	JL X9b			// n < 0	(n <= 0)
-
-	// n > 0
-	MOVQ z+0(FP), R10
-	MOVQ x+24(FP), R8
-	MOVQ s+48(FP), CX
-	MOVQ (R8), AX		// w1 = x[0]
-	MOVQ $0, DX
-	SHRQ CX, DX:AX		// w1<<ŝ
-	MOVQ DX, c+56(FP)
-
-	MOVQ $0, BX		// i = 0
-	JMP E9
-
-	// i < n-1
-L9:	MOVQ AX, DX		// w = w1
-	MOVQ 8(R8)(BX*8), AX	// w1 = x[i+1]
-	SHRQ CX, DX:AX		// w>>s | w1<<ŝ
-	MOVQ DX, (R10)(BX*8)	// z[i] = w>>s | w1<<ŝ
-	ADDQ $1, BX		// i++
-
-E9:	CMPQ BX, R11
-	JL L9			// i < n-1
-
-	// i >= n-1
-X9a:	SHRQ CX, AX		// w1>>s
-	MOVQ AX, (R10)(R11*8)	// z[n-1] = w1>>s
-	RET
-
-X9b:	MOVQ $0, c+56(FP)
-	RET
-
-
-// func mulAddVWW(z, x []Word, y, r Word) (c Word)
-TEXT ·mulAddVWW(SB),NOSPLIT,$0
-	MOVQ z+0(FP), R10
-	MOVQ x+24(FP), R8
-	MOVQ y+48(FP), R9
-	MOVQ r+56(FP), CX	// c = r
-	MOVQ z_len+8(FP), R11
-	MOVQ $0, BX		// i = 0
-	
-	CMPQ R11, $4
-	JL E5
-	
-U5:	// i+4 <= n
-	// regular loop body unrolled 4x
-	MOVQ (0*8)(R8)(BX*8), AX
-	MULQ R9
-	ADDQ CX, AX
-	ADCQ $0, DX
-	MOVQ AX, (0*8)(R10)(BX*8)
-	MOVQ DX, CX
-	MOVQ (1*8)(R8)(BX*8), AX
-	MULQ R9
-	ADDQ CX, AX
-	ADCQ $0, DX
-	MOVQ AX, (1*8)(R10)(BX*8)
-	MOVQ DX, CX
-	MOVQ (2*8)(R8)(BX*8), AX
-	MULQ R9
-	ADDQ CX, AX
-	ADCQ $0, DX
-	MOVQ AX, (2*8)(R10)(BX*8)
-	MOVQ DX, CX
-	MOVQ (3*8)(R8)(BX*8), AX
-	MULQ R9
-	ADDQ CX, AX
-	ADCQ $0, DX
-	MOVQ AX, (3*8)(R10)(BX*8)
-	MOVQ DX, CX
-	ADDQ $4, BX		// i += 4
-	
-	LEAQ 4(BX), DX
-	CMPQ DX, R11
-	JLE U5
-	JMP E5
-
-L5:	MOVQ (R8)(BX*8), AX
-	MULQ R9
-	ADDQ CX, AX
-	ADCQ $0, DX
-	MOVQ AX, (R10)(BX*8)
-	MOVQ DX, CX
-	ADDQ $1, BX		// i++
-
-E5:	CMPQ BX, R11		// i < n
-	JL L5
-
-	MOVQ CX, c+64(FP)
-	RET
-
-
-// func addMulVVW(z, x []Word, y Word) (c Word)
-TEXT ·addMulVVW(SB),NOSPLIT,$0
-	MOVQ z+0(FP), R10
-	MOVQ x+24(FP), R8
-	MOVQ y+48(FP), R9
-	MOVQ z_len+8(FP), R11
-	MOVQ $0, BX		// i = 0
-	MOVQ $0, CX		// c = 0
-	MOVQ R11, R12
-	ANDQ $-2, R12
-	CMPQ R11, $2
-	JAE A6
-	JMP E6
-
-A6:
-	MOVQ (R8)(BX*8), AX
-	MULQ R9
-	ADDQ (R10)(BX*8), AX
-	ADCQ $0, DX
-	ADDQ CX, AX
-	ADCQ $0, DX
-	MOVQ DX, CX
-	MOVQ AX, (R10)(BX*8)
-
-	MOVQ (8)(R8)(BX*8), AX
-	MULQ R9
-	ADDQ (8)(R10)(BX*8), AX
-	ADCQ $0, DX
-	ADDQ CX, AX
-	ADCQ $0, DX
-	MOVQ DX, CX
-	MOVQ AX, (8)(R10)(BX*8)
-
-	ADDQ $2, BX
-	CMPQ BX, R12
-	JL A6
-	JMP E6
-
-L6:	MOVQ (R8)(BX*8), AX
-	MULQ R9
-	ADDQ CX, AX
-	ADCQ $0, DX
-	ADDQ AX, (R10)(BX*8)
-	ADCQ $0, DX
-	MOVQ DX, CX
-	ADDQ $1, BX		// i++
-
-E6:	CMPQ BX, R11		// i < n
-	JL L6
-
-	MOVQ CX, c+56(FP)
-	RET
-
-
-// func divWVW(z []Word, xn Word, x []Word, y Word) (r Word)
-TEXT ·divWVW(SB),NOSPLIT,$0
-	MOVQ z+0(FP), R10
-	MOVQ xn+24(FP), DX	// r = xn
-	MOVQ x+32(FP), R8
-	MOVQ y+56(FP), R9
-	MOVQ z_len+8(FP), BX	// i = z
-	JMP E7
-
-L7:	MOVQ (R8)(BX*8), AX
-	DIVQ R9
-	MOVQ AX, (R10)(BX*8)
-
-E7:	SUBQ $1, BX		// i--
-	JGE L7			// i >= 0
-
-	MOVQ DX, r+64(FP)
-	RET
-
-// func bitLen(x Word) (n int)
-TEXT ·bitLen(SB),NOSPLIT,$0
-	BSRQ x+0(FP), AX
-	JZ Z1
-	ADDQ $1, AX
-	MOVQ AX, n+8(FP)
-	RET
-
-Z1:	MOVQ $0, n+8(FP)
-	RET
diff --git a/pkg/bootstrap/src/bootstrap/math/big/arith_amd64p32.s b/pkg/bootstrap/src/bootstrap/math/big/arith_amd64p32.s
deleted file mode 100644
index 97f841b..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/arith_amd64p32.s
+++ /dev/null
@@ -1,46 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith_amd64p32.s
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith_amd64p32.s:1
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !math_big_pure_go
-
-#include "textflag.h"
-
-TEXT ·mulWW(SB),NOSPLIT,$0
-	JMP ·mulWW_g(SB)
-
-TEXT ·divWW(SB),NOSPLIT,$0
-	JMP ·divWW_g(SB)
-
-TEXT ·addVV(SB),NOSPLIT,$0
-	JMP ·addVV_g(SB)
-
-TEXT ·subVV(SB),NOSPLIT,$0
-	JMP ·subVV_g(SB)
-
-TEXT ·addVW(SB),NOSPLIT,$0
-	JMP ·addVW_g(SB)
-
-TEXT ·subVW(SB),NOSPLIT,$0
-	JMP ·subVW_g(SB)
-
-TEXT ·shlVU(SB),NOSPLIT,$0
-	JMP ·shlVU_g(SB)
-
-TEXT ·shrVU(SB),NOSPLIT,$0
-	JMP ·shrVU_g(SB)
-
-TEXT ·mulAddVWW(SB),NOSPLIT,$0
-	JMP ·mulAddVWW_g(SB)
-
-TEXT ·addMulVVW(SB),NOSPLIT,$0
-	JMP ·addMulVVW_g(SB)
-
-TEXT ·divWVW(SB),NOSPLIT,$0
-	JMP ·divWVW_g(SB)
-
-TEXT ·bitLen(SB),NOSPLIT,$0
-	JMP ·bitLen_g(SB)
diff --git a/pkg/bootstrap/src/bootstrap/math/big/arith_arm.s b/pkg/bootstrap/src/bootstrap/math/big/arith_arm.s
deleted file mode 100644
index 20aeb6d..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/arith_arm.s
+++ /dev/null
@@ -1,305 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith_arm.s
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith_arm.s:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !math_big_pure_go
-
-#include "textflag.h"
-
-// This file provides fast assembly versions for the elementary
-// arithmetic operations on vectors implemented in arith.go.
-
-// func addVV(z, x, y []Word) (c Word)
-TEXT ·addVV(SB),NOSPLIT,$0
-	ADD.S	$0, R0		// clear carry flag
-	MOVW	z+0(FP), R1
-	MOVW	z_len+4(FP), R4
-	MOVW	x+12(FP), R2
-	MOVW	y+24(FP), R3
-	ADD	R4<<2, R1, R4
-	B E1
-L1:
-	MOVW.P	4(R2), R5
-	MOVW.P	4(R3), R6
-	ADC.S	R6, R5
-	MOVW.P	R5, 4(R1)
-E1:
-	TEQ	R1, R4
-	BNE L1
-
-	MOVW	$0, R0
-	MOVW.CS	$1, R0
-	MOVW	R0, c+36(FP)
-	RET
-
-
-// func subVV(z, x, y []Word) (c Word)
-// (same as addVV except for SBC instead of ADC and label names)
-TEXT ·subVV(SB),NOSPLIT,$0
-	SUB.S	$0, R0		// clear borrow flag
-	MOVW	z+0(FP), R1
-	MOVW	z_len+4(FP), R4
-	MOVW	x+12(FP), R2
-	MOVW	y+24(FP), R3
-	ADD	R4<<2, R1, R4
-	B E2
-L2:
-	MOVW.P	4(R2), R5
-	MOVW.P	4(R3), R6
-	SBC.S	R6, R5
-	MOVW.P	R5, 4(R1)
-E2:
-	TEQ	R1, R4
-	BNE L2
-
-	MOVW	$0, R0
-	MOVW.CC	$1, R0
-	MOVW	R0, c+36(FP)
-	RET
-
-
-// func addVW(z, x []Word, y Word) (c Word)
-TEXT ·addVW(SB),NOSPLIT,$0
-	MOVW	z+0(FP), R1
-	MOVW	z_len+4(FP), R4
-	MOVW	x+12(FP), R2
-	MOVW	y+24(FP), R3
-	ADD	R4<<2, R1, R4
-	TEQ	R1, R4
-	BNE L3a
-	MOVW	R3, c+28(FP)
-	RET
-L3a:
-	MOVW.P	4(R2), R5
-	ADD.S	R3, R5
-	MOVW.P	R5, 4(R1)
-	B	E3
-L3:
-	MOVW.P	4(R2), R5
-	ADC.S	$0, R5
-	MOVW.P	R5, 4(R1)
-E3:
-	TEQ	R1, R4
-	BNE	L3
-
-	MOVW	$0, R0
-	MOVW.CS	$1, R0
-	MOVW	R0, c+28(FP)
-	RET
-
-
-// func subVW(z, x []Word, y Word) (c Word)
-TEXT ·subVW(SB),NOSPLIT,$0
-	MOVW	z+0(FP), R1
-	MOVW	z_len+4(FP), R4
-	MOVW	x+12(FP), R2
-	MOVW	y+24(FP), R3
-	ADD	R4<<2, R1, R4
-	TEQ	R1, R4
-	BNE L4a
-	MOVW	R3, c+28(FP)
-	RET
-L4a:
-	MOVW.P	4(R2), R5
-	SUB.S	R3, R5
-	MOVW.P	R5, 4(R1)
-	B	E4
-L4:
-	MOVW.P	4(R2), R5
-	SBC.S	$0, R5
-	MOVW.P	R5, 4(R1)
-E4:
-	TEQ	R1, R4
-	BNE	L4
-
-	MOVW	$0, R0
-	MOVW.CC	$1, R0
-	MOVW	R0, c+28(FP)
-	RET
-
-
-// func shlVU(z, x []Word, s uint) (c Word)
-TEXT ·shlVU(SB),NOSPLIT,$0
-	MOVW	z_len+4(FP), R5
-	TEQ	$0, R5
-	BEQ	X7
-	
-	MOVW	z+0(FP), R1
-	MOVW	x+12(FP), R2
-	ADD	R5<<2, R2, R2
-	ADD	R5<<2, R1, R5
-	MOVW	s+24(FP), R3
-	TEQ	$0, R3	// shift 0 is special
-	BEQ	Y7
-	ADD	$4, R1	// stop one word early
-	MOVW	$32, R4
-	SUB	R3, R4
-	MOVW	$0, R7
-	
-	MOVW.W	-4(R2), R6
-	MOVW	R6<<R3, R7
-	MOVW	R6>>R4, R6
-	MOVW	R6, c+28(FP)
-	B E7
-
-L7:
-	MOVW.W	-4(R2), R6
-	ORR	R6>>R4, R7
-	MOVW.W	R7, -4(R5)
-	MOVW	R6<<R3, R7
-E7:
-	TEQ	R1, R5
-	BNE	L7
-
-	MOVW	R7, -4(R5)
-	RET
-
-Y7:	// copy loop, because shift 0 == shift 32
-	MOVW.W	-4(R2), R6
-	MOVW.W	R6, -4(R5)
-	TEQ	R1, R5
-	BNE Y7
-
-X7:
-	MOVW	$0, R1
-	MOVW	R1, c+28(FP)
-	RET
-
-
-// func shrVU(z, x []Word, s uint) (c Word)
-TEXT ·shrVU(SB),NOSPLIT,$0
-	MOVW	z_len+4(FP), R5
-	TEQ	$0, R5
-	BEQ	X6
-
-	MOVW	z+0(FP), R1
-	MOVW	x+12(FP), R2
-	ADD	R5<<2, R1, R5
-	MOVW	s+24(FP), R3
-	TEQ	$0, R3	// shift 0 is special
-	BEQ Y6
-	SUB	$4, R5	// stop one word early
-	MOVW	$32, R4
-	SUB	R3, R4
-	MOVW	$0, R7
-
-	// first word
-	MOVW.P	4(R2), R6
-	MOVW	R6>>R3, R7
-	MOVW	R6<<R4, R6
-	MOVW	R6, c+28(FP)
-	B E6
-
-	// word loop
-L6:
-	MOVW.P	4(R2), R6
-	ORR	R6<<R4, R7
-	MOVW.P	R7, 4(R1)
-	MOVW	R6>>R3, R7
-E6:
-	TEQ	R1, R5
-	BNE	L6
-
-	MOVW	R7, 0(R1)
-	RET
-
-Y6:	// copy loop, because shift 0 == shift 32
-	MOVW.P	4(R2), R6
-	MOVW.P	R6, 4(R1)
-	TEQ R1, R5
-	BNE Y6
-
-X6:
-	MOVW	$0, R1
-	MOVW	R1, c+28(FP)
-	RET
-
-
-// func mulAddVWW(z, x []Word, y, r Word) (c Word)
-TEXT ·mulAddVWW(SB),NOSPLIT,$0
-	MOVW	$0, R0
-	MOVW	z+0(FP), R1
-	MOVW	z_len+4(FP), R5
-	MOVW	x+12(FP), R2
-	MOVW	y+24(FP), R3
-	MOVW	r+28(FP), R4
-	ADD	R5<<2, R1, R5
-	B E8
-
-	// word loop
-L8:
-	MOVW.P	4(R2), R6
-	MULLU	R6, R3, (R7, R6)
-	ADD.S	R4, R6
-	ADC	R0, R7
-	MOVW.P	R6, 4(R1)
-	MOVW	R7, R4
-E8:
-	TEQ	R1, R5
-	BNE	L8
-
-	MOVW	R4, c+32(FP)
-	RET
-
-
-// func addMulVVW(z, x []Word, y Word) (c Word)
-TEXT ·addMulVVW(SB),NOSPLIT,$0
-	MOVW	$0, R0
-	MOVW	z+0(FP), R1
-	MOVW	z_len+4(FP), R5
-	MOVW	x+12(FP), R2
-	MOVW	y+24(FP), R3
-	ADD	R5<<2, R1, R5
-	MOVW	$0, R4
-	B E9
-
-	// word loop
-L9:
-	MOVW.P	4(R2), R6
-	MULLU	R6, R3, (R7, R6)
-	ADD.S	R4, R6
-	ADC	R0, R7
-	MOVW	0(R1), R4
-	ADD.S	R4, R6
-	ADC	R0, R7
-	MOVW.P	R6, 4(R1)
-	MOVW	R7, R4
-E9:
-	TEQ	R1, R5
-	BNE	L9
-
-	MOVW	R4, c+28(FP)
-	RET
-
-
-// func divWVW(z* Word, xn Word, x []Word, y Word) (r Word)
-TEXT ·divWVW(SB),NOSPLIT,$0
-	// ARM has no multiword division, so use portable code.
-	B ·divWVW_g(SB)
-
-
-// func divWW(x1, x0, y Word) (q, r Word)
-TEXT ·divWW(SB),NOSPLIT,$0
-	// ARM has no multiword division, so use portable code.
-	B ·divWW_g(SB)
-
-
-// func mulWW(x, y Word) (z1, z0 Word)
-TEXT ·mulWW(SB),NOSPLIT,$0
-	MOVW	x+0(FP), R1
-	MOVW	y+4(FP), R2
-	MULLU	R1, R2, (R4, R3)
-	MOVW	R4, z1+8(FP)
-	MOVW	R3, z0+12(FP)
-	RET
-
-// func bitLen(x Word) (n int)
-TEXT ·bitLen(SB),NOSPLIT,$0
-	MOVW	x+0(FP), R0
-	CLZ 	R0, R0
-	RSB	$32, R0
-	MOVW	R0, n+4(FP)
-	RET
diff --git a/pkg/bootstrap/src/bootstrap/math/big/arith_decl.go b/pkg/bootstrap/src/bootstrap/math/big/arith_decl.go
deleted file mode 100644
index 199e5a0..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/arith_decl.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith_decl.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith_decl.go:1
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !math_big_pure_go
-
-package big
-
-// implemented in arith_$GOARCH.s
-func mulWW(x, y Word) (z1, z0 Word)
-func divWW(x1, x0, y Word) (q, r Word)
-func addVV(z, x, y []Word) (c Word)
-func subVV(z, x, y []Word) (c Word)
-func addVW(z, x []Word, y Word) (c Word)
-func subVW(z, x []Word, y Word) (c Word)
-func shlVU(z, x []Word, s uint) (c Word)
-func shrVU(z, x []Word, s uint) (c Word)
-func mulAddVWW(z, x []Word, y, r Word) (c Word)
-func addMulVVW(z, x []Word, y Word) (c Word)
-func divWVW(z []Word, xn Word, x []Word, y Word) (r Word)
-func bitLen(x Word) (n int)
diff --git a/pkg/bootstrap/src/bootstrap/math/big/arith_decl_pure.go b/pkg/bootstrap/src/bootstrap/math/big/arith_decl_pure.go
deleted file mode 100644
index 8826400..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/arith_decl_pure.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith_decl_pure.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith_decl_pure.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build math_big_pure_go
-
-package big
-
-func mulWW(x, y Word) (z1, z0 Word) {
-	return mulWW_g(x, y)
-}
-
-func divWW(x1, x0, y Word) (q, r Word) {
-	return divWW_g(x1, x0, y)
-}
-
-func addVV(z, x, y []Word) (c Word) {
-	return addVV_g(z, x, y)
-}
-
-func subVV(z, x, y []Word) (c Word) {
-	return subVV_g(z, x, y)
-}
-
-func addVW(z, x []Word, y Word) (c Word) {
-	return addVW_g(z, x, y)
-}
-
-func subVW(z, x []Word, y Word) (c Word) {
-	return subVW_g(z, x, y)
-}
-
-func shlVU(z, x []Word, s uint) (c Word) {
-	return shlVU_g(z, x, s)
-}
-
-func shrVU(z, x []Word, s uint) (c Word) {
-	return shrVU_g(z, x, s)
-}
-
-func mulAddVWW(z, x []Word, y, r Word) (c Word) {
-	return mulAddVWW_g(z, x, y, r)
-}
-
-func addMulVVW(z, x []Word, y Word) (c Word) {
-	return addMulVVW_g(z, x, y)
-}
-
-func divWVW(z []Word, xn Word, x []Word, y Word) (r Word) {
-	return divWVW_g(z, xn, x, y)
-}
-
-func bitLen(x Word) (n int) {
-	return bitLen_g(x)
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/arith_decl_s390x.go b/pkg/bootstrap/src/bootstrap/math/big/arith_decl_s390x.go
deleted file mode 100644
index cbaf273..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/arith_decl_s390x.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith_decl_s390x.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith_decl_s390x.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !math_big_pure_go
-
-package big
-
-func addVV_check(z, x, y []Word) (c Word)
-func addVV_vec(z, x, y []Word) (c Word)
-func addVV_novec(z, x, y []Word) (c Word)
-func subVV_check(z, x, y []Word) (c Word)
-func subVV_vec(z, x, y []Word) (c Word)
-func subVV_novec(z, x, y []Word) (c Word)
-func addVW_check(z, x []Word, y Word) (c Word)
-func addVW_vec(z, x []Word, y Word) (c Word)
-func addVW_novec(z, x []Word, y Word) (c Word)
-func subVW_check(z, x []Word, y Word) (c Word)
-func subVW_vec(z, x []Word, y Word) (c Word)
-func subVW_novec(z, x []Word, y Word) (c Word)
-func hasVectorFacility() bool
-
-var hasVX = hasVectorFacility()
diff --git a/pkg/bootstrap/src/bootstrap/math/big/arith_mips64x.s b/pkg/bootstrap/src/bootstrap/math/big/arith_mips64x.s
deleted file mode 100644
index c1242b4..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/arith_mips64x.s
+++ /dev/null
@@ -1,49 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith_mips64x.s
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith_mips64x.s:1
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !math_big_pure_go,mips64 !math_big_pure_go,mips64le
-
-#include "textflag.h"
-
-// This file provides fast assembly versions for the elementary
-// arithmetic operations on vectors implemented in arith.go.
-
-TEXT ·mulWW(SB),NOSPLIT,$0
-	JMP ·mulWW_g(SB)
-
-TEXT ·divWW(SB),NOSPLIT,$0
-	JMP ·divWW_g(SB)
-
-TEXT ·addVV(SB),NOSPLIT,$0
-	JMP ·addVV_g(SB)
-
-TEXT ·subVV(SB),NOSPLIT,$0
-	JMP ·subVV_g(SB)
-
-TEXT ·addVW(SB),NOSPLIT,$0
-	JMP ·addVW_g(SB)
-
-TEXT ·subVW(SB),NOSPLIT,$0
-	JMP ·subVW_g(SB)
-
-TEXT ·shlVU(SB),NOSPLIT,$0
-	JMP ·shlVU_g(SB)
-
-TEXT ·shrVU(SB),NOSPLIT,$0
-	JMP ·shrVU_g(SB)
-
-TEXT ·mulAddVWW(SB),NOSPLIT,$0
-	JMP ·mulAddVWW_g(SB)
-
-TEXT ·addMulVVW(SB),NOSPLIT,$0
-	JMP ·addMulVVW_g(SB)
-
-TEXT ·divWVW(SB),NOSPLIT,$0
-	JMP ·divWVW_g(SB)
-
-TEXT ·bitLen(SB),NOSPLIT,$0
-	JMP ·bitLen_g(SB)
diff --git a/pkg/bootstrap/src/bootstrap/math/big/arith_mipsx.s b/pkg/bootstrap/src/bootstrap/math/big/arith_mipsx.s
deleted file mode 100644
index 7ac1f16..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/arith_mipsx.s
+++ /dev/null
@@ -1,49 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith_mipsx.s
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith_mipsx.s:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !math_big_pure_go,mips !math_big_pure_go,mipsle
-
-#include "textflag.h"
-
-// This file provides fast assembly versions for the elementary
-// arithmetic operations on vectors implemented in arith.go.
-
-TEXT ·mulWW(SB),NOSPLIT,$0
-	JMP	·mulWW_g(SB)
-
-TEXT ·divWW(SB),NOSPLIT,$0
-	JMP	·divWW_g(SB)
-
-TEXT ·addVV(SB),NOSPLIT,$0
-	JMP	·addVV_g(SB)
-
-TEXT ·subVV(SB),NOSPLIT,$0
-	JMP	·subVV_g(SB)
-
-TEXT ·addVW(SB),NOSPLIT,$0
-	JMP	·addVW_g(SB)
-
-TEXT ·subVW(SB),NOSPLIT,$0
-	JMP	·subVW_g(SB)
-
-TEXT ·shlVU(SB),NOSPLIT,$0
-	JMP	·shlVU_g(SB)
-
-TEXT ·shrVU(SB),NOSPLIT,$0
-	JMP	·shrVU_g(SB)
-
-TEXT ·mulAddVWW(SB),NOSPLIT,$0
-	JMP	·mulAddVWW_g(SB)
-
-TEXT ·addMulVVW(SB),NOSPLIT,$0
-	JMP	·addMulVVW_g(SB)
-
-TEXT ·divWVW(SB),NOSPLIT,$0
-	JMP	·divWVW_g(SB)
-
-TEXT ·bitLen(SB),NOSPLIT,$0
-	JMP	·bitLen_g(SB)
diff --git a/pkg/bootstrap/src/bootstrap/math/big/arith_ppc64.s b/pkg/bootstrap/src/bootstrap/math/big/arith_ppc64.s
deleted file mode 100644
index 5b2a4a7..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/arith_ppc64.s
+++ /dev/null
@@ -1,17 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith_ppc64.s
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith_ppc64.s:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !math_big_pure_go,ppc64
-
-#include "textflag.h"
-
-// This file provides fast assembly versions for the elementary
-// arithmetic operations on vectors implemented in arith.go.
-
-TEXT ·divWW(SB), NOSPLIT, $0
-	BR ·divWW_g(SB)
-
diff --git a/pkg/bootstrap/src/bootstrap/math/big/arith_ppc64le.s b/pkg/bootstrap/src/bootstrap/math/big/arith_ppc64le.s
deleted file mode 100644
index 5509950..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/arith_ppc64le.s
+++ /dev/null
@@ -1,53 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith_ppc64le.s
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith_ppc64le.s:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !math_big_pure_go,ppc64le
-
-#include "textflag.h"
-
-// This file provides fast assembly versions for the elementary
-// arithmetic operations on vectors implemented in arith.go.
-
-// func divWW(x1, x0, y Word) (q, r Word)
-TEXT ·divWW(SB), NOSPLIT, $0
-	MOVD x1+0(FP), R4
-	MOVD x0+8(FP), R5
-	MOVD y+16(FP), R6
-
-	CMPU R4, R6
-	BGE  divbigger
-
-	// from the programmer's note in ch. 3 of the ISA manual, p.74
-	DIVDEU R6, R4, R3
-	DIVDU  R6, R5, R7
-	MULLD  R6, R3, R8
-	MULLD  R6, R7, R20
-	SUB    R20, R5, R10
-	ADD    R7, R3, R3
-	SUB    R8, R10, R4
-	CMPU   R4, R10
-	BLT    adjust
-	CMPU   R4, R6
-	BLT    end
-
-adjust:
-	MOVD $1, R21
-	ADD  R21, R3, R3
-	SUB  R6, R4, R4
-
-end:
-	MOVD R3, q+24(FP)
-	MOVD R4, r+32(FP)
-
-	RET
-
-divbigger:
-	MOVD $-1, R7
-	MOVD R7, q+24(FP)
-	MOVD R7, r+32(FP)
-	RET
-
diff --git a/pkg/bootstrap/src/bootstrap/math/big/arith_ppc64x.s b/pkg/bootstrap/src/bootstrap/math/big/arith_ppc64x.s
deleted file mode 100644
index e442840..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/arith_ppc64x.s
+++ /dev/null
@@ -1,189 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith_ppc64x.s
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith_ppc64x.s:1
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !math_big_pure_go,ppc64 !math_big_pure_go,ppc64le
-
-#include "textflag.h"
-
-// This file provides fast assembly versions for the elementary
-// arithmetic operations on vectors implemented in arith.go.
-
-// func mulWW(x, y Word) (z1, z0 Word)
-TEXT ·mulWW(SB), NOSPLIT, $0
-	MOVD   x+0(FP), R4
-	MOVD   y+8(FP), R5
-	MULHDU R4, R5, R6
-	MULLD  R4, R5, R7
-	MOVD   R6, z1+16(FP)
-	MOVD   R7, z0+24(FP)
-	RET
-
-TEXT ·addVV(SB), NOSPLIT, $0
-	BR ·addVV_g(SB)
-
-// func subVV(z, x, y []Word) (c Word)
-// z[i] = x[i] - y[i] for all i, carrying
-TEXT ·subVV(SB), NOSPLIT, $0
-	MOVD z_len+8(FP), R7
-	MOVD x+24(FP), R8
-	MOVD y+48(FP), R9
-	MOVD z+0(FP), R10
-
-	MOVD $0, R4  // c = 0
-	MOVD $0, R5  // i = 0
-	MOVD $1, R29 // work around lack of ADDI
-	MOVD $8, R28 // work around lack of scaled addressing
-
-	SUBC R0, R0  // clear CA
-	JMP  sublend
-
-// amd64 saves and restores CF, but I believe they only have to do that because all of
-// their math operations clobber it - we should just be able to recover it at the end.
-subloop:
-	MULLD R5, R28, R6
-	MOVD  (R8)(R6), R11 // x[i]
-	MOVD  (R9)(R6), R12 // y[i]
-
-	SUBE R12, R11, R15
-	MOVD R15, (R10)(R6)
-
-	ADD R29, R5 // i++
-
-sublend:
-	CMP R5, R7
-	BLT subloop
-
-	ADDZE R4
-	XOR   R29, R4
-	MOVD  R4, c+72(FP)
-	RET
-
-TEXT ·addVW(SB), NOSPLIT, $0
-	BR ·addVW_g(SB)
-
-TEXT ·subVW(SB), NOSPLIT, $0
-	BR ·subVW_g(SB)
-
-TEXT ·shlVU(SB), NOSPLIT, $0
-	BR ·shlVU_g(SB)
-
-TEXT ·shrVU(SB), NOSPLIT, $0
-	BR ·shrVU_g(SB)
-
-// func mulAddVWW(z, x []Word, y, r Word) (c Word)
-TEXT ·mulAddVWW(SB), NOSPLIT, $0
-	MOVD z+0(FP), R10
-	MOVD x+24(FP), R8
-	MOVD y+48(FP), R9
-	MOVD r+56(FP), R4     // c = r
-	MOVD z_len+8(FP), R11
-	MOVD $0, R3           // i = 0
-	MOVD $8, R18
-	MOVD $1, R19
-
-	JMP e5
-
-l5:
-	MULLD  R18, R3, R5
-	MOVD   (R8)(R5), R20
-	MULLD  R9, R20, R6
-	MULHDU R9, R20, R7
-	ADDC   R4, R6
-	ADDZE  R7
-	MOVD   R6, (R10)(R5)
-	MOVD   R7, R4
-	ADD    R19, R3
-
-e5:
-	CMP R3, R11
-	BLT l5
-
-	MOVD R4, c+64(FP)
-	RET
-
-// func addMulVVW(z, x []Word, y Word) (c Word)
-TEXT ·addMulVVW(SB), NOSPLIT, $0
-	MOVD z+0(FP), R10
-	MOVD x+24(FP), R8
-	MOVD y+48(FP), R9
-	MOVD z_len+8(FP), R22
-
-	MOVD $0, R5   // i = 0
-	MOVD $0, R4   // c = 0
-	MOVD $8, R28
-	MOVD $-2, R23
-	AND  R22, R23 // mask the last bit of z.len
-	MOVD $2, R24
-	CMP  R23, R24
-	BGE  unrolled
-	JMP  end
-
-unrolled:
-	MOVD  $8, R19         // no (RA)(RB*8) on power
-	MULLD R5, R19
-	MOVD  (R10)(R19), R11 // R11 = z[i]
-	MOVD  (R8)(R19), R16  // R16 = x[i]
-	ADD   R28, R19, R25
-	MOVD  (R10)(R25), R17
-	MOVD  (R8)(R25), R18
-
-	MULLD  R9, R16, R12
-	MULHDU R9, R16, R14
-	MULLD  R9, R18, R6
-	MULHDU R9, R18, R7
-	ADDC   R4, R12
-	ADDZE  R14
-	ADDC   R11, R12        // z[i] = (x[i]*y) + z[i] + carry
-	ADDZE  R14             // carry = high order bits + add carry
-	MOVD   R12, (R10)(R19)
-	ADDC   R14, R6
-	ADDZE  R7
-	ADDC   R17, R6
-	ADDZE  R7
-	MOVD   R6, (R10)(R25)
-	MOVD   R7, R4
-
-	ADD R24, R5
-	CMP R5, R23
-	BLT unrolled
-	JMP end
-
-loop:
-	MOVD   $8, R19
-	MULLD  R5, R19
-	MOVD   (R10)(R19), R11
-	MOVD   (R8)(R19), R16
-	MULLD  R9, R16, R12
-	MULHDU R9, R16, R14
-	ADDC   R4, R12
-	ADDZE  R14
-	ADDC   R11, R12
-	ADDZE  R14
-	MOVD   R12, (R10)(R19)
-	MOVD   R14, R4
-
-	MOVD $1, R15
-	ADD  R15, R5
-
-end:
-	CMP R5, R22
-	BLT loop
-
-	MOVD R4, c+56(FP)
-	RET
-
-TEXT ·divWVW(SB), NOSPLIT, $0
-	BR ·divWVW_g(SB)
-
-// func bitLen(x Word) int
-TEXT ·bitLen(SB), NOSPLIT, $0
-	MOVD   x+0(FP), R4
-	CNTLZD R4, R4
-	MOVD   $64, R5
-	SUB    R4, R5
-	MOVD   R5, n+8(FP)
-	RET
diff --git a/pkg/bootstrap/src/bootstrap/math/big/arith_s390x.s b/pkg/bootstrap/src/bootstrap/math/big/arith_s390x.s
deleted file mode 100644
index e291f04..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/arith_s390x.s
+++ /dev/null
@@ -1,1252 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith_s390x.s
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith_s390x.s:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !math_big_pure_go,s390x
-
-#include "textflag.h"
-
-// This file provides fast assembly versions for the elementary
-// arithmetic operations on vectors implemented in arith.go.
-
-TEXT ·hasVectorFacility(SB),NOSPLIT,$24-1
-        MOVD    $x-24(SP), R1
-        XC      $24, 0(R1), 0(R1) // clear the storage
-        MOVD    $2, R0            // R0 is the number of double words stored -1
-        WORD    $0xB2B01000       // STFLE 0(R1)
-        XOR     R0, R0            // reset the value of R0
-        MOVBZ   z-8(SP), R1
-        AND     $0x40, R1
-        BEQ     novector
-vectorinstalled:
-        // check if the vector instruction has been enabled
-        VLEIB   $0, $0xF, V16
-        VLGVB   $0, V16, R1
-        CMPBNE  R1, $0xF, novector
-        MOVB    $1, ret+0(FP) // have vx
-        RET
-novector:
-        MOVB    $0, ret+0(FP) // no vx
-        RET
-
-TEXT ·mulWW(SB),NOSPLIT,$0
-	MOVD	x+0(FP), R3
-	MOVD	y+8(FP), R4
-	MULHDU	R3, R4
-	MOVD	R10, z1+16(FP)
-	MOVD	R11, z0+24(FP)
-	RET
-
-// func divWW(x1, x0, y Word) (q, r Word)
-TEXT ·divWW(SB),NOSPLIT,$0
-	MOVD	x1+0(FP), R10
-	MOVD	x0+8(FP), R11
-	MOVD	y+16(FP), R5
-	WORD	$0xb98700a5 // dlgr r10,r5
-	MOVD	R11, q+24(FP)
-	MOVD	R10, r+32(FP)
-	RET
-
-// DI = R3, CX = R4, SI = r10, r8 = r8, r9=r9, r10 = r2 , r11 = r5, r12 = r6, r13 = r7, r14 = r1 (R0 set to 0) + use R11
-// func addVV(z, x, y []Word) (c Word)
-
-
-TEXT ·addVV(SB),NOSPLIT,$0
-	MOVD	addvectorfacility+0x00(SB),R1
-	BR	(R1)
-	
-TEXT ·addVV_check(SB),NOSPLIT, $0
-	MOVB	·hasVX(SB), R1
-	CMPBEQ	R1, $1, vectorimpl      // vectorfacility = 1, vector supported
-	MOVD	$addvectorfacility+0x00(SB), R1
-	MOVD	$·addVV_novec(SB), R2
-	MOVD	R2, 0(R1)
-	//MOVD	$·addVV_novec(SB), 0(R1)
-	BR	·addVV_novec(SB)
-vectorimpl:
-	MOVD	$addvectorfacility+0x00(SB), R1
-	MOVD	$·addVV_vec(SB), R2
-	MOVD	R2, 0(R1)
-	//MOVD	$·addVV_vec(SB), 0(R1)
-	BR	·addVV_vec(SB)
-
-GLOBL addvectorfacility+0x00(SB), NOPTR, $8
-DATA addvectorfacility+0x00(SB)/8, $·addVV_check(SB)
-
-TEXT ·addVV_vec(SB),NOSPLIT,$0
-	MOVD	z_len+8(FP), R3
-	MOVD	x+24(FP), R8
-	MOVD	y+48(FP), R9
-	MOVD	z+0(FP), R2
-
-	MOVD	$0, R4		// c = 0
-	MOVD	$0, R0		// make sure it's zero
-	MOVD	$0, R10		// i = 0
-
-
-	// s/JL/JMP/ below to disable the unrolled loop
-	SUB	$4, R3
-	BLT	v1
-	SUB     $12, R3                 // n -= 16
-        BLT     A1                      // if n < 0 goto A1
-       
-	MOVD	R8, R5
-	MOVD	R9, R6
-	MOVD	R2, R7
-	// n >= 0
-	// regular loop body unrolled 16x
-	VZERO	V0			// c = 0
-UU1:	VLM	0(R5), V1, V4		// 64-bytes into V1..V8
-	ADD	$64, R5
-	VPDI	$0x4,V1,V1,V1		// flip the doublewords to big-endian order
-	VPDI	$0x4,V2,V2,V2		// flip the doublewords to big-endian order
-
-
-	VLM	0(R6), V9, V12  	// 64-bytes into V9..V16
-	ADD	$64, R6
-	VPDI	$0x4,V9,V9,V9		// flip the doublewords to big-endian order
-	VPDI	$0x4,V10,V10,V10	// flip the doublewords to big-endian order
-
-	VACCCQ	V1, V9, V0, V25
-	VACQ	V1, V9, V0, V17
-	VACCCQ	V2, V10, V25, V26
-	VACQ	V2, V10, V25, V18
-
-
-	VLM	0(R5), V5, V6		// 32-bytes into V1..V8
-	VLM	0(R6), V13, V14  	// 32-bytes into V9..V16
-	ADD	$32, R5
-	ADD	$32, R6
-
-	VPDI	$0x4,V3,V3,V3		// flip the doublewords to big-endian order
-	VPDI	$0x4,V4,V4,V4		// flip the doublewords to big-endian order
-	VPDI	$0x4,V11,V11,V11	// flip the doublewords to big-endian order
-	VPDI	$0x4,V12,V12,V12	// flip the doublewords to big-endian order
-
-	VACCCQ	V3, V11, V26, V27
-	VACQ	V3, V11, V26, V19
-	VACCCQ	V4, V12, V27, V28
-	VACQ	V4, V12, V27, V20
-
-	VLM	0(R5), V7, V8		// 32-bytes into V1..V8
-	VLM	0(R6), V15, V16  	// 32-bytes into V9..V16
-	ADD	$32, R5
-	ADD	$32, R6
-
-	VPDI	$0x4,V5,V5,V5		// flip the doublewords to big-endian order
-	VPDI	$0x4,V6,V6,V6		// flip the doublewords to big-endian order
-	VPDI	$0x4,V13,V13,V13	// flip the doublewords to big-endian order
-	VPDI	$0x4,V14,V14,V14	// flip the doublewords to big-endian order
-
-	VACCCQ	V5, V13, V28, V29
-	VACQ	V5, V13, V28, V21
-	VACCCQ	V6, V14, V29, V30
-	VACQ	V6, V14, V29, V22
-
-	VPDI	$0x4,V7,V7,V7		// flip the doublewords to big-endian order
-	VPDI	$0x4,V8,V8,V8		// flip the doublewords to big-endian order
-	VPDI	$0x4,V15,V15,V15	// flip the doublewords to big-endian order
-	VPDI	$0x4,V16,V16,V16	// flip the doublewords to big-endian order
-
-	VACCCQ	V7, V15, V30, V31
-	VACQ	V7, V15, V30, V23
-	VACCCQ	V8, V16, V31, V0	//V0 has carry-over
-	VACQ	V8, V16, V31, V24
-
-	VPDI	$0x4,V17,V17,V17	// flip the doublewords to big-endian order
-	VPDI	$0x4,V18,V18,V18	// flip the doublewords to big-endian order
-	VPDI	$0x4,V19,V19,V19	// flip the doublewords to big-endian order
-	VPDI	$0x4,V20,V20,V20	// flip the doublewords to big-endian order
-	VPDI	$0x4,V21,V21,V21	// flip the doublewords to big-endian order
-	VPDI	$0x4,V22,V22,V22	// flip the doublewords to big-endian order
-	VPDI	$0x4,V23,V23,V23	// flip the doublewords to big-endian order
-	VPDI	$0x4,V24,V24,V24	// flip the doublewords to big-endian order
-	VSTM	V17, V24, 0(R7)  	// 128-bytes into z
-	ADD	$128, R7
-	ADD	$128, R10	// i += 16
-	SUB	$16,  R3	// n -= 16
-	BGE	UU1		// if n >= 0 goto U1
-	VLGVG	$1, V0, R4	// put cf into R4
-	NEG	R4, R4		// save cf
-
-A1:	ADD	$12, R3		// n += 16
-
-
-	// s/JL/JMP/ below to disable the unrolled loop
-	BLT	v1		// if n < 0 goto v1
-
-U1:	// n >= 0
-	// regular loop body unrolled 4x
-	MOVD	0(R8)(R10*1), R5
-	MOVD	8(R8)(R10*1), R6
-	MOVD	16(R8)(R10*1), R7
-	MOVD	24(R8)(R10*1), R1
-	ADDC	R4, R4		// restore CF
-	MOVD	0(R9)(R10*1), R11
-	ADDE	R11, R5
-	MOVD	8(R9)(R10*1), R11
-	ADDE	R11, R6
-	MOVD	16(R9)(R10*1), R11
-	ADDE	R11, R7
-	MOVD	24(R9)(R10*1), R11
-	ADDE	R11, R1
-	MOVD	R0, R4
-	ADDE	R4, R4		// save CF
-	NEG	R4, R4
-	MOVD	R5, 0(R2)(R10*1)
-	MOVD	R6, 8(R2)(R10*1)
-	MOVD	R7, 16(R2)(R10*1)
-	MOVD	R1, 24(R2)(R10*1)
-
-
-	ADD	$32, R10	// i += 4
-	SUB	$4,  R3		// n -= 4
-	BGE	U1		// if n >= 0 goto U1
-
-v1:	ADD	$4, R3		// n += 4
-	BLE	E1		// if n <= 0 goto E1
-
-L1:	// n > 0
-	ADDC	R4, R4		// restore CF
-	MOVD	0(R8)(R10*1), R5
-	MOVD	0(R9)(R10*1), R11
-	ADDE	R11, R5
-	MOVD	R5, 0(R2)(R10*1)
-	MOVD	R0, R4
-	ADDE	R4, R4		// save CF
-	NEG 	R4, R4
-
-	ADD	$8, R10		// i++
-	SUB	$1, R3		// n--
-	BGT	L1		// if n > 0 goto L1
-
-E1:	NEG	R4, R4
-	MOVD	R4, c+72(FP)	// return c
-	RET
-
-TEXT ·addVV_novec(SB),NOSPLIT,$0
-novec:
-	MOVD	z_len+8(FP), R3
-	MOVD	x+24(FP), R8
-	MOVD	y+48(FP), R9
-	MOVD	z+0(FP), R2
-
-	MOVD	$0, R4		// c = 0
-	MOVD	$0, R0		// make sure it's zero
-	MOVD	$0, R10		// i = 0
-
-	// s/JL/JMP/ below to disable the unrolled loop
-	SUB	$4, R3		// n -= 4
-	BLT	v1n		// if n < 0 goto v1n
-U1n:	// n >= 0
-	// regular loop body unrolled 4x
-	MOVD	0(R8)(R10*1), R5
-	MOVD	8(R8)(R10*1), R6
-	MOVD	16(R8)(R10*1), R7
-	MOVD	24(R8)(R10*1), R1
-	ADDC	R4, R4		// restore CF
-	MOVD	0(R9)(R10*1), R11
-	ADDE	R11, R5
-	MOVD	8(R9)(R10*1), R11
-	ADDE	R11, R6
-	MOVD	16(R9)(R10*1), R11
-	ADDE	R11, R7
-	MOVD	24(R9)(R10*1), R11
-	ADDE	R11, R1
-	MOVD	R0, R4
-	ADDE	R4, R4		// save CF
-	NEG	R4, R4
-	MOVD	R5, 0(R2)(R10*1)
-	MOVD	R6, 8(R2)(R10*1)
-	MOVD	R7, 16(R2)(R10*1)
-	MOVD	R1, 24(R2)(R10*1)
-
-
-	ADD	$32, R10	// i += 4
-	SUB	$4,  R3		// n -= 4
-	BGE	U1n		// if n >= 0 goto U1n
-
-v1n:	ADD	$4, R3		// n += 4
-	BLE	E1n		// if n <= 0 goto E1n
-
-L1n:	// n > 0
-	ADDC	R4, R4		// restore CF
-	MOVD	0(R8)(R10*1), R5
-	MOVD	0(R9)(R10*1), R11
-	ADDE	R11, R5
-	MOVD	R5, 0(R2)(R10*1)
-	MOVD	R0, R4
-	ADDE	R4, R4		// save CF
-	NEG 	R4, R4
-
-	ADD	$8, R10		// i++
-	SUB	$1, R3		// n--
-	BGT L1n			// if n > 0 goto L1n
-
-E1n:	NEG	R4, R4
-	MOVD	R4, c+72(FP)	// return c
-	RET
-
-
-TEXT ·subVV(SB),NOSPLIT,$0
-	MOVD	subvectorfacility+0x00(SB),R1
-	BR	(R1)
-	
-TEXT ·subVV_check(SB),NOSPLIT,$0
-	MOVB	·hasVX(SB), R1
-	CMPBEQ	R1, $1, vectorimpl      // vectorfacility = 1, vector supported
-	MOVD	$subvectorfacility+0x00(SB), R1
-	MOVD	$·subVV_novec(SB), R2
-	MOVD	R2, 0(R1)
-	//MOVD	$·subVV_novec(SB), 0(R1)
-	BR	·subVV_novec(SB)
-vectorimpl:
-	MOVD	$subvectorfacility+0x00(SB), R1
-	MOVD    $·subVV_vec(SB), R2
-        MOVD    R2, 0(R1)
-	//MOVD	$·subVV_vec(SB), 0(R1)
-	BR	·subVV_vec(SB)
-
-GLOBL subvectorfacility+0x00(SB), NOPTR, $8
-DATA subvectorfacility+0x00(SB)/8, $·subVV_check(SB)
-
-// DI = R3, CX = R4, SI = r10, r8 = r8, r9=r9, r10 = r2 , r11 = r5, r12 = r6, r13 = r7, r14 = r1 (R0 set to 0) + use R11
-// func subVV(z, x, y []Word) (c Word)
-// (same as addVV except for SUBC/SUBE instead of ADDC/ADDE and label names)
-TEXT ·subVV_vec(SB),NOSPLIT,$0
-	MOVD	z_len+8(FP), R3
-	MOVD	x+24(FP), R8
-	MOVD	y+48(FP), R9
-	MOVD	z+0(FP), R2
-	MOVD	$0, R4		// c = 0
-	MOVD	$0, R0		// make sure it's zero
-	MOVD	$0, R10		// i = 0
-	
-	// s/JL/JMP/ below to disable the unrolled loop
-	SUB	$4, R3		// n -= 4
-	BLT	v1		// if n < 0 goto v1
-	SUB     $12, R3         // n -= 16
-        BLT     A1              // if n < 0 goto A1
-
-	MOVD	R8, R5
-	MOVD	R9, R6
-	MOVD	R2, R7
-
-	// n >= 0
-	// regular loop body unrolled 16x
-	VZERO	V0		// cf = 0
-	MOVD	$1, R4		// for 390 subtraction cf starts as 1 (no borrow)
-	VLVGG	$1, R4, V0	//put carry into V0
-
-UU1:	VLM	0(R5), V1, V4		// 64-bytes into V1..V8
-	ADD	$64, R5
-	VPDI	$0x4,V1,V1,V1		// flip the doublewords to big-endian order
-	VPDI	$0x4,V2,V2,V2		// flip the doublewords to big-endian order
-
-
-	VLM	0(R6), V9, V12  	// 64-bytes into V9..V16
-	ADD	$64, R6
-	VPDI	$0x4,V9,V9,V9		// flip the doublewords to big-endian order
-	VPDI	$0x4,V10,V10,V10	// flip the doublewords to big-endian order
-
-	VSBCBIQ	V1, V9, V0, V25
-	VSBIQ	V1, V9, V0, V17
-	VSBCBIQ	V2, V10, V25, V26
-	VSBIQ	V2, V10, V25, V18
-
-
-	VLM	0(R5), V5, V6		// 32-bytes into V1..V8
-	VLM	0(R6), V13, V14  	// 32-bytes into V9..V16
-	ADD	$32, R5
-	ADD	$32, R6
-
-	VPDI	$0x4,V3,V3,V3		// flip the doublewords to big-endian order
-	VPDI	$0x4,V4,V4,V4		// flip the doublewords to big-endian order
-	VPDI	$0x4,V11,V11,V11	// flip the doublewords to big-endian order
-	VPDI	$0x4,V12,V12,V12	// flip the doublewords to big-endian order
-
-	VSBCBIQ	V3, V11, V26, V27
-	VSBIQ	V3, V11, V26, V19
-	VSBCBIQ	V4, V12, V27, V28
-	VSBIQ	V4, V12, V27, V20
-
-	VLM	0(R5), V7, V8		// 32-bytes into V1..V8
-	VLM	0(R6), V15, V16  	// 32-bytes into V9..V16
-	ADD	$32, R5
-	ADD	$32, R6
-
-	VPDI	$0x4,V5,V5,V5		// flip the doublewords to big-endian order
-	VPDI	$0x4,V6,V6,V6		// flip the doublewords to big-endian order
-	VPDI	$0x4,V13,V13,V13	// flip the doublewords to big-endian order
-	VPDI	$0x4,V14,V14,V14	// flip the doublewords to big-endian order
-
-	VSBCBIQ	V5, V13, V28, V29
-	VSBIQ	V5, V13, V28, V21
-	VSBCBIQ	V6, V14, V29, V30
-	VSBIQ	V6, V14, V29, V22
-
-	VPDI	$0x4,V7,V7,V7		// flip the doublewords to big-endian order
-	VPDI	$0x4,V8,V8,V8		// flip the doublewords to big-endian order
-	VPDI	$0x4,V15,V15,V15	// flip the doublewords to big-endian order
-	VPDI	$0x4,V16,V16,V16	// flip the doublewords to big-endian order
-
-	VSBCBIQ	V7, V15, V30, V31
-	VSBIQ	V7, V15, V30, V23
-	VSBCBIQ	V8, V16, V31, V0	//V0 has carry-over
-	VSBIQ	V8, V16, V31, V24
-
-	VPDI	$0x4,V17,V17,V17	// flip the doublewords to big-endian order
-	VPDI	$0x4,V18,V18,V18	// flip the doublewords to big-endian order
-	VPDI	$0x4,V19,V19,V19	// flip the doublewords to big-endian order
-	VPDI	$0x4,V20,V20,V20	// flip the doublewords to big-endian order
-	VPDI	$0x4,V21,V21,V21	// flip the doublewords to big-endian order
-	VPDI	$0x4,V22,V22,V22	// flip the doublewords to big-endian order
-	VPDI	$0x4,V23,V23,V23	// flip the doublewords to big-endian order
-	VPDI	$0x4,V24,V24,V24	// flip the doublewords to big-endian order
-	VSTM	V17, V24, 0(R7)   // 128-bytes into z
-	ADD	$128, R7
-	ADD	$128, R10	// i += 16
-	SUB	$16,  R3	// n -= 16
-	BGE	UU1		// if n >= 0 goto U1
-	VLGVG	$1, V0, R4	// put cf into R4
-	SUB	$1, R4		// save cf
-
-A1:	ADD	$12, R3		// n += 16
-	BLT	v1		// if n < 0 goto v1
-	
-U1:	// n >= 0
-	// regular loop body unrolled 4x
-	MOVD	0(R8)(R10*1), R5
-	MOVD	8(R8)(R10*1), R6
-	MOVD	16(R8)(R10*1), R7
-	MOVD	24(R8)(R10*1), R1
-	MOVD	R0, R11
-	SUBC	R4, R11		// restore CF
-	MOVD	0(R9)(R10*1), R11
-	SUBE	R11, R5
-	MOVD	8(R9)(R10*1), R11
-	SUBE	R11, R6
-	MOVD	16(R9)(R10*1), R11
-	SUBE	R11, R7
-	MOVD	24(R9)(R10*1), R11
-	SUBE	R11, R1
-	MOVD	R0, R4
-	SUBE	R4, R4		// save CF
-	MOVD	R5, 0(R2)(R10*1)
-	MOVD	R6, 8(R2)(R10*1)
-	MOVD	R7, 16(R2)(R10*1)
-	MOVD	R1, 24(R2)(R10*1)
-
-	ADD	$32, R10	// i += 4
-	SUB	$4,  R3		// n -= 4
-	BGE	U1		// if n >= 0 goto U1n
-
-v1:	ADD	$4, R3		// n += 4
-	BLE	E1		// if n <= 0 goto E1
-
-L1:	// n > 0
-	MOVD	R0, R11
-	SUBC	R4, R11		// restore CF
-	MOVD	0(R8)(R10*1), R5
-	MOVD	0(R9)(R10*1), R11
-	SUBE	R11, R5
-	MOVD	R5, 0(R2)(R10*1)
-	MOVD	R0, R4
-	SUBE	R4, R4		// save CF
-
-	ADD	$8, R10		// i++
-	SUB	$1, R3		// n--
-	BGT	L1		// if n > 0 goto L1n
-
-E1:	NEG	R4, R4
-	MOVD	R4, c+72(FP)	// return c
-	RET
-
-
-// DI = R3, CX = R4, SI = r10, r8 = r8, r9=r9, r10 = r2 , r11 = r5, r12 = r6, r13 = r7, r14 = r1 (R0 set to 0) + use R11
-// func subVV(z, x, y []Word) (c Word)
-// (same as addVV except for SUBC/SUBE instead of ADDC/ADDE and label names)
-TEXT ·subVV_novec(SB),NOSPLIT,$0
-	MOVD z_len+8(FP), R3
-	MOVD x+24(FP), R8
-	MOVD y+48(FP), R9
-	MOVD z+0(FP), R2
-
-	MOVD $0, R4		// c = 0
-	MOVD $0, R0		// make sure it's zero
-	MOVD $0, R10		// i = 0
-
-	// s/JL/JMP/ below to disable the unrolled loop
-	SUB  $4, R3		// n -= 4
-	BLT v1			// if n < 0 goto v1
-
-U1:	// n >= 0
-	// regular loop body unrolled 4x
-	MOVD 0(R8)(R10*1), R5
-	MOVD 8(R8)(R10*1), R6
-	MOVD 16(R8)(R10*1), R7
-	MOVD 24(R8)(R10*1), R1
-	MOVD R0, R11
-	SUBC R4, R11		// restore CF
-	MOVD 0(R9)(R10*1), R11
-	SUBE R11, R5
-	MOVD 8(R9)(R10*1), R11
-	SUBE R11, R6
-	MOVD 16(R9)(R10*1), R11
-	SUBE R11, R7
-	MOVD 24(R9)(R10*1), R11
-	SUBE R11, R1
-	MOVD R0, R4
-	SUBE R4, R4		// save CF
-	MOVD R5, 0(R2)(R10*1)
-	MOVD R6, 8(R2)(R10*1)
-	MOVD R7, 16(R2)(R10*1)
-	MOVD R1, 24(R2)(R10*1)
-
-
-	ADD  $32, R10		// i += 4
-	SUB  $4,  R3		// n -= 4
-	BGE  U1			// if n >= 0 goto U1
-
-v1:	ADD  $4, R3		// n += 4
-	BLE E1			// if n <= 0 goto E1
-
-L1:	// n > 0
-	MOVD R0, R11
-	SUBC R4, R11		// restore CF
-	MOVD 0(R8)(R10*1), R5
-	MOVD 0(R9)(R10*1), R11
-	SUBE R11, R5
-	MOVD R5, 0(R2)(R10*1)
-	MOVD R0, R4
-	SUBE R4, R4		// save CF
-
-	ADD  $8, R10		// i++
-	SUB  $1, R3		// n--
-	BGT L1			// if n > 0 goto L1
-
-E1:	NEG  R4, R4
-	MOVD R4, c+72(FP)	// return c
-	RET
-
-TEXT ·addVW(SB),NOSPLIT,$0
-	MOVD	addwvectorfacility+0x00(SB),R1
-	BR	(R1)
-	
-TEXT ·addVW_check(SB),NOSPLIT,$0
-	MOVB	·hasVX(SB), R1
-	CMPBEQ	R1, $1, vectorimpl      // vectorfacility = 1, vector supported
-	MOVD	$addwvectorfacility+0x00(SB), R1
-	MOVD    $·addVW_novec(SB), R2
-        MOVD    R2, 0(R1)
-	//MOVD	$·addVW_novec(SB), 0(R1)
-	BR	·addVW_novec(SB)
-vectorimpl:
-	MOVD	$addwvectorfacility+0x00(SB), R1
-	MOVD    $·addVW_vec(SB), R2
-        MOVD    R2, 0(R1)
-	//MOVD	$·addVW_vec(SB), 0(R1)
-	BR	·addVW_vec(SB)
-
-GLOBL addwvectorfacility+0x00(SB), NOPTR, $8
-DATA addwvectorfacility+0x00(SB)/8, $·addVW_check(SB)
-
-
-// func addVW_vec(z, x []Word, y Word) (c Word)
-TEXT ·addVW_vec(SB),NOSPLIT,$0
-	MOVD	z_len+8(FP), R3
-	MOVD	x+24(FP), R8
-	MOVD	y+48(FP), R4	// c = y
-	MOVD	z+0(FP), R2
-
-	MOVD	$0, R0		// make sure it's zero
-	MOVD	$0, R10		// i = 0
-	MOVD	R8, R5
-	MOVD	R2, R7
-
-	// s/JL/JMP/ below to disable the unrolled loop
-	SUB	$4, R3			// n -= 4
-	BLT	v10			// if n < 0 goto v10
-	SUB	$12, R3
-	BLT	A10
-
-	// n >= 0
-	// regular loop body unrolled 16x
-
-	VZERO	V0			// prepare V0 to be final carry register
-	VZERO	V9			// to ensure upper half is zero
-	VLVGG	$1, R4, V9
-UU1:	VLM	0(R5), V1, V4		// 64-bytes into V1..V4
-	ADD	$64, R5
-	VPDI	$0x4,V1,V1,V1		// flip the doublewords to big-endian order
-	VPDI	$0x4,V2,V2,V2		// flip the doublewords to big-endian order
-
-
-	VACCCQ	V1, V9, V0, V25
-	VACQ	V1, V9, V0, V17
-	VZERO	V9
-	VACCCQ	V2, V9, V25, V26
-	VACQ	V2, V9, V25, V18
-
-
-	VLM	0(R5), V5, V6		// 32-bytes into V5..V6
-	ADD	$32, R5
-
-	VPDI	$0x4,V3,V3,V3		// flip the doublewords to big-endian order
-	VPDI	$0x4,V4,V4,V4		// flip the doublewords to big-endian order
-
-	VACCCQ	V3, V9, V26, V27
-	VACQ	V3, V9, V26, V19
-	VACCCQ	V4, V9, V27, V28
-	VACQ	V4, V9, V27, V20
-
-	VLM	0(R5), V7, V8		// 32-bytes into V7..V8
-	ADD	$32, R5
-
-	VPDI	$0x4,V5,V5,V5		// flip the doublewords to big-endian order
-	VPDI	$0x4,V6,V6,V6		// flip the doublewords to big-endian order
-
-	VACCCQ	V5, V9, V28, V29
-	VACQ	V5, V9, V28, V21
-	VACCCQ	V6, V9, V29, V30
-	VACQ	V6, V9, V29, V22
-
-	VPDI	$0x4,V7,V7,V7		// flip the doublewords to big-endian order
-	VPDI	$0x4,V8,V8,V8		// flip the doublewords to big-endian order
-
-	VACCCQ	V7, V9, V30, V31
-	VACQ	V7, V9, V30, V23
-	VACCCQ	V8, V9, V31, V0	//V0 has carry-over
-	VACQ	V8, V9, V31, V24
-
-	VPDI	$0x4,V17,V17,V17	// flip the doublewords to big-endian order
-	VPDI	$0x4,V18,V18,V18	// flip the doublewords to big-endian order
-	VPDI	$0x4,V19,V19,V19	// flip the doublewords to big-endian order
-	VPDI	$0x4,V20,V20,V20	// flip the doublewords to big-endian order
-	VPDI	$0x4,V21,V21,V21	// flip the doublewords to big-endian order
-	VPDI	$0x4,V22,V22,V22	// flip the doublewords to big-endian order
-	VPDI	$0x4,V23,V23,V23	// flip the doublewords to big-endian order
-	VPDI	$0x4,V24,V24,V24	// flip the doublewords to big-endian order
-	VSTM	V17, V24, 0(R7)   	// 128-bytes into z
-	ADD	$128, R7
-	ADD	$128, R10		// i += 16
-	SUB	$16,  R3		// n -= 16
-	BGE	UU1		// if n >= 0 goto U1
-	VLGVG	$1, V0, R4	// put cf into R4 in case we branch to v10
-
-A10:	ADD	$12, R3		// n += 16
-
-
-	// s/JL/JMP/ below to disable the unrolled loop
-
-	BLT	v10		// if n < 0 goto v10
-
-
-U4:	// n >= 0
-	// regular loop body unrolled 4x
-	MOVD 0(R8)(R10*1), R5
-	MOVD 8(R8)(R10*1), R6
-	MOVD 16(R8)(R10*1), R7
-	MOVD 24(R8)(R10*1), R1
-	ADDC R4, R5
-	ADDE R0, R6
-	ADDE R0, R7
-	ADDE R0, R1
-	ADDE R0, R0
-	MOVD R0, R4		// save CF
-	SUB  R0, R0
-	MOVD R5, 0(R2)(R10*1)
-	MOVD R6, 8(R2)(R10*1)
-	MOVD R7, 16(R2)(R10*1)
-	MOVD R1, 24(R2)(R10*1)
-
-	ADD $32, R10		// i += 4 -> i +=32
-	SUB $4, R3		// n -= 4
-	BGE U4			// if n >= 0 goto U4
-
-v10:	ADD $4, R3		// n += 4
-	BLE E10			// if n <= 0 goto E4
-
-
-L4:	// n > 0
-	MOVD	0(R8)(R10*1), R5
-	ADDC	R4, R5
-	ADDE	R0, R0
-	MOVD	R0, R4		// save CF
-	SUB 	R0, R0
-	MOVD	R5, 0(R2)(R10*1)
-
-	ADD	$8, R10		// i++
-	SUB	$1, R3		// n--
-	BGT	L4		// if n > 0 goto L4
-
-E10:	MOVD	R4, c+56(FP)	// return c
-
-	RET
-
-
-TEXT ·addVW_novec(SB),NOSPLIT,$0
-//DI = R3, CX = R4, SI = r10, r8 = r8, r10 = r2 , r11 = r5, r12 = r6, r13 = r7, r14 = r1 (R0 set to 0)
-	MOVD z_len+8(FP), R3
-	MOVD x+24(FP), R8
-	MOVD y+48(FP), R4	// c = y
-	MOVD z+0(FP), R2
-	MOVD $0, R0		// make sure it's 0
-	MOVD $0, R10		// i = 0
-
-	// s/JL/JMP/ below to disable the unrolled loop
-	SUB $4, R3		// n -= 4
-	BLT v4			// if n < 4 goto v4
-
-U4:	// n >= 0
-	// regular loop body unrolled 4x
-	MOVD 0(R8)(R10*1), R5
-	MOVD 8(R8)(R10*1), R6
-	MOVD 16(R8)(R10*1), R7
-	MOVD 24(R8)(R10*1), R1
-	ADDC R4, R5
-	ADDE R0, R6
-	ADDE R0, R7
-	ADDE R0, R1
-	ADDE R0, R0
-	MOVD R0, R4		// save CF
-	SUB  R0, R0
-	MOVD R5, 0(R2)(R10*1)
-	MOVD R6, 8(R2)(R10*1)
-	MOVD R7, 16(R2)(R10*1)
-	MOVD R1, 24(R2)(R10*1)
-
-	ADD $32, R10		// i += 4 -> i +=32
-	SUB $4, R3		// n -= 4
-	BGE U4			// if n >= 0 goto U4
-
-v4:	ADD $4, R3		// n += 4
-	BLE E4			// if n <= 0 goto E4
-
-L4:	// n > 0
-	MOVD 0(R8)(R10*1), R5
-	ADDC R4, R5
-	ADDE R0, R0
-	MOVD R0, R4		// save CF
-	SUB  R0, R0
-	MOVD R5, 0(R2)(R10*1)
-
-	ADD  $8, R10		// i++
-	SUB  $1, R3		// n--
-	BGT L4			// if n > 0 goto L4
-
-E4:	MOVD R4, c+56(FP)	// return c
-
-	RET
-
-TEXT ·subVW(SB),NOSPLIT,$0
-	MOVD	subwvectorfacility+0x00(SB),R1
-	BR	(R1)
-	
-TEXT ·subVW_check(SB),NOSPLIT,$0
-	MOVB	·hasVX(SB), R1
-	CMPBEQ	R1, $1, vectorimpl      // vectorfacility = 1, vector supported
-	MOVD	$subwvectorfacility+0x00(SB), R1
-	MOVD    $·subVW_novec(SB), R2
-        MOVD    R2, 0(R1)
-	//MOVD	$·subVW_novec(SB), 0(R1)
-	BR	·subVW_novec(SB)
-vectorimpl:
-	MOVD	$subwvectorfacility+0x00(SB), R1
-	MOVD    $·subVW_vec(SB), R2
-        MOVD    R2, 0(R1)
-	//MOVD	$·subVW_vec(SB), 0(R1)
-	BR	·subVW_vec(SB)
-
-GLOBL subwvectorfacility+0x00(SB), NOPTR, $8
-DATA subwvectorfacility+0x00(SB)/8, $·subVW_check(SB)
-
-// func subVW(z, x []Word, y Word) (c Word)
-TEXT ·subVW_vec(SB),NOSPLIT,$0
-	MOVD	z_len+8(FP), R3
-	MOVD	x+24(FP), R8
-	MOVD	y+48(FP), R4	// c = y
-	MOVD	z+0(FP), R2
-
-	MOVD	$0, R0		// make sure it's zero
-	MOVD	$0, R10		// i = 0
-	MOVD	R8, R5
-	MOVD	R2, R7
-
-	// s/JL/JMP/ below to disable the unrolled loop
-	SUB	$4, R3			// n -= 4
-	BLT	v11			// if n < 0 goto v11
-	SUB	$12, R3
-	BLT	A11
-
-	VZERO	V0
-	MOVD	$1, R6			// prepare V0 to be final carry register
-	VLVGG	$1, R6, V0		// borrow is initially "no borrow"
-	VZERO	V9			// to ensure upper half is zero
-	VLVGG	$1, R4, V9
-
-	// n >= 0
-	// regular loop body unrolled 16x
-
-
-UU1:	VLM	0(R5), V1, V4		// 64-bytes into V1..V4
-	ADD	$64, R5
-	VPDI	$0x4,V1,V1,V1		// flip the doublewords to big-endian order
-	VPDI	$0x4,V2,V2,V2		// flip the doublewords to big-endian order
-
-
-	VSBCBIQ	V1, V9, V0, V25
-	VSBIQ	V1, V9, V0, V17
-	VZERO	V9
-	VSBCBIQ	V2, V9, V25, V26
-	VSBIQ	V2, V9, V25, V18
-
-	VLM	0(R5), V5, V6		// 32-bytes into V5..V6
-	ADD	$32, R5
-
-	VPDI	$0x4,V3,V3,V3		// flip the doublewords to big-endian order
-	VPDI	$0x4,V4,V4,V4		// flip the doublewords to big-endian order
-
-
-	VSBCBIQ	V3, V9, V26, V27
-	VSBIQ	V3, V9, V26, V19
-	VSBCBIQ	V4, V9, V27, V28
-	VSBIQ	V4, V9, V27, V20
-
-	VLM	0(R5), V7, V8		// 32-bytes into V7..V8
-	ADD	$32, R5
-
-	VPDI	$0x4,V5,V5,V5		// flip the doublewords to big-endian order
-	VPDI	$0x4,V6,V6,V6		// flip the doublewords to big-endian order
-
-	VSBCBIQ	V5, V9, V28, V29
-	VSBIQ	V5, V9, V28, V21
-	VSBCBIQ	V6, V9, V29, V30
-	VSBIQ	V6, V9, V29, V22
-
-	VPDI	$0x4,V7,V7,V7		// flip the doublewords to big-endian order
-	VPDI	$0x4,V8,V8,V8		// flip the doublewords to big-endian order
-
-	VSBCBIQ	V7, V9, V30, V31
-	VSBIQ	V7, V9, V30, V23
-	VSBCBIQ	V8, V9, V31, V0	// V0 has carry-over
-	VSBIQ	V8, V9, V31, V24
-
-	VPDI	$0x4,V17,V17,V17	// flip the doublewords to big-endian order
-	VPDI	$0x4,V18,V18,V18	// flip the doublewords to big-endian order
-	VPDI	$0x4,V19,V19,V19	// flip the doublewords to big-endian order
-	VPDI	$0x4,V20,V20,V20	// flip the doublewords to big-endian order
-	VPDI	$0x4,V21,V21,V21	// flip the doublewords to big-endian order
-	VPDI	$0x4,V22,V22,V22	// flip the doublewords to big-endian order
-	VPDI	$0x4,V23,V23,V23	// flip the doublewords to big-endian order
-	VPDI	$0x4,V24,V24,V24	// flip the doublewords to big-endian order
-	VSTM	V17, V24, 0(R7)   	// 128-bytes into z
-	ADD	$128, R7
-	ADD	$128, R10		// i += 16
-	SUB	$16,  R3		// n -= 16
-	BGE	UU1			// if n >= 0 goto U1
-	VLGVG	$1, V0, R4		// put cf into R4 in case we branch to v10
-	SUB	$1, R4			// save cf
-	NEG	R4, R4
-A11:	ADD	$12, R3			// n += 16
-
-	BLT	v11			// if n < 0 goto v11
-
-	// n >= 0
-	// regular loop body unrolled 4x
-
-U4:	// n >= 0
-	// regular loop body unrolled 4x
-	MOVD 0(R8)(R10*1), R5
-	MOVD 8(R8)(R10*1), R6
-	MOVD 16(R8)(R10*1), R7
-	MOVD 24(R8)(R10*1), R1
-	SUBC R4, R5 //SLGR  -> SUBC
-	SUBE R0, R6 //SLBGR -> SUBE
-	SUBE R0, R7
-	SUBE R0, R1
-	SUBE R4, R4		// save CF
-	NEG  R4, R4
-	MOVD R5, 0(R2)(R10*1)
-	MOVD R6, 8(R2)(R10*1)
-	MOVD R7, 16(R2)(R10*1)
-	MOVD R1, 24(R2)(R10*1)
-
-	ADD $32, R10		// i += 4 -> i +=32
-	SUB $4, R3		// n -= 4
-	BGE U4			// if n >= 0 goto U4
-
-v11:	ADD $4, R3		// n += 4
-	BLE E11			// if n <= 0 goto E4
-
-L4:	// n > 0
-
-	MOVD	0(R8)(R10*1), R5
-	SUBC	R4, R5
-	SUBE	R4, R4		// save CF
-	NEG	R4, R4
-	MOVD	R5, 0(R2)(R10*1)
-
-	ADD	$8, R10		// i++
-	SUB	$1, R3		// n--
-	BGT	L4		// if n > 0 goto L4
-
-E11:	MOVD	R4, c+56(FP)	// return c
-
-	RET
-
-//DI = R3, CX = R4, SI = r10, r8 = r8, r10 = r2 , r11 = r5, r12 = r6, r13 = r7, r14 = r1 (R0 set to 0)
-// func subVW(z, x []Word, y Word) (c Word)
-// (same as addVW except for SUBC/SUBE instead of ADDC/ADDE and label names)
-TEXT ·subVW_novec(SB),NOSPLIT,$0
-	MOVD z_len+8(FP), R3
-	MOVD x+24(FP), R8
-	MOVD y+48(FP), R4	// c = y
-	MOVD z+0(FP), R2
-	MOVD $0, R0		// make sure it's 0
-	MOVD $0, R10		// i = 0
-
-	// s/JL/JMP/ below to disable the unrolled loop
-	SUB $4, R3		// n -= 4
-	BLT v4			// if n < 4 goto v4
-
-U4:	// n >= 0
-	// regular loop body unrolled 4x
-	MOVD 0(R8)(R10*1), R5
-	MOVD 8(R8)(R10*1), R6
-	MOVD 16(R8)(R10*1), R7
-	MOVD 24(R8)(R10*1), R1
-	SUBC R4, R5 //SLGR  -> SUBC
-	SUBE R0, R6 //SLBGR -> SUBE
-	SUBE R0, R7
-	SUBE R0, R1
-	SUBE R4, R4		// save CF
-	NEG  R4, R4
-	MOVD R5, 0(R2)(R10*1)
-	MOVD R6, 8(R2)(R10*1)
-	MOVD R7, 16(R2)(R10*1)
-	MOVD R1, 24(R2)(R10*1)
-
-	ADD $32, R10		// i += 4 -> i +=32
-	SUB $4, R3		// n -= 4
-	BGE U4			// if n >= 0 goto U4
-
-v4:	ADD $4, R3		// n += 4
-	BLE E4			// if n <= 0 goto E4
-
-L4:	// n > 0
-	MOVD 0(R8)(R10*1), R5
-	SUBC R4, R5
-	SUBE R4, R4		// save CF
-	NEG  R4, R4
-	MOVD R5, 0(R2)(R10*1)
-
-	ADD  $8, R10		// i++
-	SUB  $1, R3		// n--
-	BGT L4			// if n > 0 goto L4
-
-E4:	MOVD R4, c+56(FP)	// return c
-
-	RET
-
-// func shlVU(z, x []Word, s uint) (c Word)
-TEXT ·shlVU(SB),NOSPLIT,$0
-	MOVD	z_len+8(FP), R5
-	MOVD	$0, R0
-	SUB	$1, R5             // n--
-	BLT	X8b                // n < 0        (n <= 0)
-
-	// n > 0
-	MOVD	s+48(FP), R4
-	CMPBEQ	R0, R4, Z80	   //handle 0 case beq
-	MOVD	$64, R6
-	CMPBEQ	R6, R4, Z864	   //handle 64 case beq
-	MOVD	z+0(FP), R2
-	MOVD	x+24(FP), R8
-	SLD	$3, R5             // n = n*8
-	SUB	R4, R6, R7
-	MOVD	(R8)(R5*1), R10    // w1 = x[i-1]
-	SRD	R7, R10, R3
-	MOVD	R3, c+56(FP)
-
-	MOVD	$0, R1             // i = 0
-	BR	E8
-
-	// i < n-1
-L8:	MOVD	R10, R3             // w = w1
-	MOVD	-8(R8)(R5*1), R10   // w1 = x[i+1]
-
-	SLD	R4,  R3             // w<<s | w1>>ŝ
-	SRD	R7, R10, R6
-	OR 	R6, R3
-	MOVD	R3, (R2)(R5*1)      // z[i] = w<<s | w1>>ŝ
-	SUB	$8, R5              // i--
-
-E8:	CMPBGT	R5, R0, L8	    // i < n-1
-
-	// i >= n-1
-X8a:	SLD	R4, R10             // w1<<s
-	MOVD	R10, (R2)           // z[0] = w1<<s
-	RET
-
-X8b:	MOVD	R0, c+56(FP)
-	RET
-
-Z80:	MOVD	z+0(FP), R2
-	MOVD	x+24(FP), R8
-	SLD	$3, R5             // n = n*8
-
-	MOVD	(R8), R10
-	MOVD	$0, R3
-	MOVD	R3, c+56(FP)
-
-	MOVD	$0, R1             // i = 0
-	BR	E8Z
-
-	// i < n-1
-L8Z:	MOVD	R10, R3
-	MOVD	8(R8)(R1*1), R10
-
-	MOVD	R3, (R2)(R1*1)
-	ADD 	$8, R1
-
-E8Z:	CMPBLT	R1, R5, L8Z
-
-	// i >= n-1
-	MOVD	R10, (R2)(R5*1)
-	RET
-
-Z864:	MOVD	z+0(FP), R2
-	MOVD	x+24(FP), R8
-	SLD	$3, R5             // n = n*8
-	MOVD	(R8)(R5*1), R3     // w1 = x[n-1]
-	MOVD	R3, c+56(FP)       // z[i] = x[n-1]
-
-	BR	E864
-
-	// i < n-1
-L864:	MOVD	-8(R8)(R5*1), R3
-
-	MOVD	R3, (R2)(R5*1)     // z[i] = x[n-1]
-	SUB	$8, R5             // i--
-
-E864:	CMPBGT	R5, R0, L864       // i < n-1
-
-	MOVD	R0, (R2)           // z[n-1] = 0
-	RET
-
-
-// CX = R4, r8 = r8, r10 = r2 , r11 = r5, DX = r3, AX = r10 , BX = R1 , 64-count = r7 (R0 set to 0) temp = R6
-// func shrVU(z, x []Word, s uint) (c Word)
-TEXT ·shrVU(SB),NOSPLIT,$0
-	MOVD	z_len+8(FP), R5
-	MOVD	$0, R0
-	SUB	$1, R5             // n--
-	BLT	X9b                // n < 0        (n <= 0)
-
-	// n > 0
-	MOVD	s+48(FP), R4
-	CMPBEQ	R0, R4, ZB0	//handle 0 case beq
-	MOVD	$64, R6
-	CMPBEQ 	R6, R4, ZB64	//handle 64 case beq
-	MOVD	z+0(FP), R2
-	MOVD	x+24(FP), R8
-	SLD	$3, R5		// n = n*8
-	SUB	R4, R6, R7
-	MOVD	(R8), R10	// w1 = x[0]
-	SLD	R7, R10, R3
-	MOVD	R3, c+56(FP)
-
-	MOVD	$0, R1		// i = 0
-	BR 	E9
-
-	// i < n-1
-L9:	MOVD	R10, R3		// w = w1
-	MOVD	8(R8)(R1*1), R10	// w1 = x[i+1]
-
-	SRD	R4,  R3		// w>>s | w1<<s
-	SLD	R7, R10, R6
-	OR	R6, R3
-	MOVD	R3, (R2)(R1*1)	// z[i] = w>>s | w1<<s
-	ADD	$8, R1		// i++
-
-E9:	CMPBLT	R1, R5, L9	// i < n-1
-
-	// i >= n-1
-X9a:	SRD	R4, R10		// w1>>s
-	MOVD	R10, (R2)(R5*1)	// z[n-1] = w1>>s
-	RET
-
-X9b:	MOVD	R0, c+56(FP)
-	RET
-
-ZB0:	MOVD	z+0(FP), R2
-	MOVD	x+24(FP), R8
-	SLD	$3, R5		// n = n*8
-
-	MOVD	(R8), R10	// w1 = x[0]
-	MOVD	$0, R3		// R10 << 64
-	MOVD	R3, c+56(FP)
-
-	MOVD	$0, R1		// i = 0
-	BR	E9Z
-
-	// i < n-1
-L9Z:	MOVD	R10, R3		// w = w1
-	MOVD	8(R8)(R1*1), R10	// w1 = x[i+1]
-
-	MOVD	R3, (R2)(R1*1)	// z[i] = w>>s | w1<<s
-	ADD	$8, R1		// i++
-
-E9Z:	CMPBLT	R1, R5, L9Z	// i < n-1
-
-	// i >= n-1
-	MOVD	R10, (R2)(R5*1)	// z[n-1] = w1>>s
-	RET
-
-ZB64:	MOVD	z+0(FP), R2
-	MOVD	x+24(FP), R8
-	SLD	$3, R5		// n = n*8
-	MOVD	(R8), R3	// w1 = x[0]
-	MOVD	R3, c+56(FP)
-
-	MOVD	$0, R1		// i = 0
-	BR	E964
-
-	// i < n-1
-L964:	MOVD	8(R8)(R1*1), R3	// w1 = x[i+1]
-
-	MOVD	R3, (R2)(R1*1)	// z[i] = w>>s | w1<<s
-	ADD	$8, R1		// i++
-
-E964:	CMPBLT	R1, R5, L964	// i < n-1
-
-	// i >= n-1
-	MOVD	$0, R10            // w1>>s
-	MOVD	R10, (R2)(R5*1)    // z[n-1] = w1>>s
-	RET
-
-// CX = R4, r8 = r8, r9=r9, r10 = r2 , r11 = r5, DX = r3, AX = r6 , BX = R1 , (R0 set to 0) + use R11 + use R7 for i
-// func mulAddVWW(z, x []Word, y, r Word) (c Word)
-TEXT ·mulAddVWW(SB),NOSPLIT,$0
-	MOVD	z+0(FP), R2
-	MOVD	x+24(FP), R8
-	MOVD	y+48(FP), R9
-	MOVD	r+56(FP), R4	// c = r
-	MOVD	z_len+8(FP), R5
-	MOVD	$0, R1		// i = 0
-	MOVD	$0, R7		// i*8 = 0
-	MOVD	$0, R0		// make sure it's zero
-	BR	E5
-
-L5:	MOVD	(R8)(R1*1), R6
-	MULHDU	R9, R6
-	ADDC	R4, R11 	//add to low order bits
-	ADDE	R0, R6
-	MOVD	R11, (R2)(R1*1)
-	MOVD	R6, R4
-	ADD	$8, R1		// i*8 + 8
-	ADD	$1, R7		// i++
-
-E5:	CMPBLT	R7, R5, L5	// i < n
-
-	MOVD	R4, c+64(FP)
-	RET
-
-// func addMulVVW(z, x []Word, y Word) (c Word)
-// CX = R4, r8 = r8, r9=r9, r10 = r2 , r11 = r5, AX = r11, DX = R6, r12=r12, BX = R1 , (R0 set to 0) + use R11 + use R7 for i
-TEXT ·addMulVVW(SB),NOSPLIT,$0
-	MOVD	z+0(FP), R2
-	MOVD	x+24(FP), R8
-	MOVD	y+48(FP), R9
-	MOVD	z_len+8(FP), R5
-
-	MOVD	$0, R1		// i*8 = 0
-	MOVD	$0, R7		// i = 0
-	MOVD	$0, R0		// make sure it's zero
-	MOVD	$0, R4		// c = 0
-
-	MOVD	R5, R12
-	AND	$-2, R12
-	CMPBGE	R5, $2, A6
-	BR	E6
-
-A6:	MOVD	(R8)(R1*1), R6
-	MULHDU	R9, R6
-	MOVD	(R2)(R1*1), R10
-	ADDC	R10, R11	//add to low order bits
-	ADDE	R0, R6
-	ADDC	R4, R11
-	ADDE	R0, R6
-	MOVD	R6, R4
-	MOVD	R11, (R2)(R1*1)
-
-	MOVD	(8)(R8)(R1*1), R6
-	MULHDU	R9, R6
-	MOVD	(8)(R2)(R1*1), R10
-	ADDC	R10, R11	//add to low order bits
-	ADDE	R0, R6
-	ADDC	R4, R11
-	ADDE	R0, R6
-	MOVD	R6, R4
-	MOVD	R11, (8)(R2)(R1*1)
-
-	ADD	$16, R1		// i*8 + 8
-	ADD	$2, R7		// i++
-
-	CMPBLT	R7, R12, A6
-	BR	E6
-
-L6:	MOVD	(R8)(R1*1), R6
-	MULHDU	R9, R6
-	MOVD	(R2)(R1*1), R10
-	ADDC	R10, R11	//add to low order bits
-	ADDE	R0, R6
-	ADDC	R4, R11
-	ADDE	R0, R6
-	MOVD	R6, R4
-	MOVD	R11, (R2)(R1*1)
-
-	ADD	$8, R1		// i*8 + 8
-	ADD	$1, R7		// i++
-
-E6:	CMPBLT	R7, R5, L6	// i < n
-
-	MOVD	R4, c+56(FP)
-	RET
-
-// func divWVW(z []Word, xn Word, x []Word, y Word) (r Word)
-// CX = R4, r8 = r8, r9=r9, r10 = r2 , r11 = r5, AX = r11, DX = R6, r12=r12, BX = R1(*8) , (R0 set to 0) + use R11 + use R7 for i
-TEXT ·divWVW(SB),NOSPLIT,$0
-	MOVD	z+0(FP), R2
-	MOVD	xn+24(FP), R10	// r = xn
-	MOVD	x+32(FP), R8
-	MOVD	y+56(FP), R9
-	MOVD	z_len+8(FP), R7	// i = z
-	SLD	$3, R7, R1		// i*8
-	MOVD	$0, R0		// make sure it's zero
-	BR	E7
-
-L7:	MOVD	(R8)(R1*1), R11
-	WORD	$0xB98700A9	//DLGR R10,R9
-	MOVD	R11, (R2)(R1*1)
-
-E7:	SUB	$1, R7		// i--
-	SUB	$8, R1
-	BGE	L7		// i >= 0
-
-	MOVD	R10, r+64(FP)
-	RET
-
-// func bitLen(x Word) (n int)
-TEXT ·bitLen(SB),NOSPLIT,$0
-	MOVD  x+0(FP), R2
-	FLOGR R2, R2 // clobbers R3
-	MOVD  $64, R3
-	SUB   R2, R3
-	MOVD  R3, n+8(FP)
-	RET
-
diff --git a/pkg/bootstrap/src/bootstrap/math/big/arith_s390x_test.go b/pkg/bootstrap/src/bootstrap/math/big/arith_s390x_test.go
deleted file mode 100644
index f515976..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/arith_s390x_test.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith_s390x_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith_s390x_test.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build s390x !math_big_pure_go
-
-package big
-
-import (
-	"testing"
-)
-
-// Tests whether the non vector routines are working, even when the tests are run on a
-// vector-capable machine
-
-func TestFunVVnovec(t *testing.T) {
-	if hasVX == true {
-		for _, a := range sumVV {
-			arg := a
-			testFunVV(t, "addVV_novec", addVV_novec, arg)
-
-			arg = argVV{a.z, a.y, a.x, a.c}
-			testFunVV(t, "addVV_novec symmetric", addVV_novec, arg)
-
-			arg = argVV{a.x, a.z, a.y, a.c}
-			testFunVV(t, "subVV_novec", subVV_novec, arg)
-
-			arg = argVV{a.y, a.z, a.x, a.c}
-			testFunVV(t, "subVV_novec symmetric", subVV_novec, arg)
-		}
-	}
-}
-
-func TestFunVWnovec(t *testing.T) {
-	if hasVX == true {
-		for _, a := range sumVW {
-			arg := a
-			testFunVW(t, "addVW_novec", addVW_novec, arg)
-
-			arg = argVW{a.x, a.z, a.y, a.c}
-			testFunVW(t, "subVW_novec", subVW_novec, arg)
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/arith_test.go b/pkg/bootstrap/src/bootstrap/math/big/arith_test.go
deleted file mode 100644
index 0c75abd..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/arith_test.go
+++ /dev/null
@@ -1,429 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/arith_test.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package big
-
-import (
-	"fmt"
-	"internal/testenv"
-	"math/rand"
-	"strings"
-	"testing"
-)
-
-var isRaceBuilder = strings.HasSuffix(testenv.Builder(), "-race")
-
-type funWW func(x, y, c Word) (z1, z0 Word)
-type argWW struct {
-	x, y, c, z1, z0 Word
-}
-
-var sumWW = []argWW{
-	{0, 0, 0, 0, 0},
-	{0, 1, 0, 0, 1},
-	{0, 0, 1, 0, 1},
-	{0, 1, 1, 0, 2},
-	{12345, 67890, 0, 0, 80235},
-	{12345, 67890, 1, 0, 80236},
-	{_M, 1, 0, 1, 0},
-	{_M, 0, 1, 1, 0},
-	{_M, 1, 1, 1, 1},
-	{_M, _M, 0, 1, _M - 1},
-	{_M, _M, 1, 1, _M},
-}
-
-func testFunWW(t *testing.T, msg string, f funWW, a argWW) {
-	z1, z0 := f(a.x, a.y, a.c)
-	if z1 != a.z1 || z0 != a.z0 {
-		t.Errorf("%s%+v\n\tgot z1:z0 = %#x:%#x; want %#x:%#x", msg, a, z1, z0, a.z1, a.z0)
-	}
-}
-
-func TestFunWW(t *testing.T) {
-	for _, a := range sumWW {
-		arg := a
-		testFunWW(t, "addWW_g", addWW_g, arg)
-
-		arg = argWW{a.y, a.x, a.c, a.z1, a.z0}
-		testFunWW(t, "addWW_g symmetric", addWW_g, arg)
-
-		arg = argWW{a.z0, a.x, a.c, a.z1, a.y}
-		testFunWW(t, "subWW_g", subWW_g, arg)
-
-		arg = argWW{a.z0, a.y, a.c, a.z1, a.x}
-		testFunWW(t, "subWW_g symmetric", subWW_g, arg)
-	}
-}
-
-type funVV func(z, x, y []Word) (c Word)
-type argVV struct {
-	z, x, y nat
-	c       Word
-}
-
-var sumVV = []argVV{
-	{},
-	{nat{0}, nat{0}, nat{0}, 0},
-	{nat{1}, nat{1}, nat{0}, 0},
-	{nat{0}, nat{_M}, nat{1}, 1},
-	{nat{80235}, nat{12345}, nat{67890}, 0},
-	{nat{_M - 1}, nat{_M}, nat{_M}, 1},
-	{nat{0, 0, 0, 0}, nat{_M, _M, _M, _M}, nat{1, 0, 0, 0}, 1},
-	{nat{0, 0, 0, _M}, nat{_M, _M, _M, _M - 1}, nat{1, 0, 0, 0}, 0},
-	{nat{0, 0, 0, 0}, nat{_M, 0, _M, 0}, nat{1, _M, 0, _M}, 1},
-}
-
-func testFunVV(t *testing.T, msg string, f funVV, a argVV) {
-	z := make(nat, len(a.z))
-	c := f(z, a.x, a.y)
-	for i, zi := range z {
-		if zi != a.z[i] {
-			t.Errorf("%s%+v\n\tgot z[%d] = %#x; want %#x", msg, a, i, zi, a.z[i])
-			break
-		}
-	}
-	if c != a.c {
-		t.Errorf("%s%+v\n\tgot c = %#x; want %#x", msg, a, c, a.c)
-	}
-}
-
-func TestFunVV(t *testing.T) {
-	for _, a := range sumVV {
-		arg := a
-		testFunVV(t, "addVV_g", addVV_g, arg)
-		testFunVV(t, "addVV", addVV, arg)
-
-		arg = argVV{a.z, a.y, a.x, a.c}
-		testFunVV(t, "addVV_g symmetric", addVV_g, arg)
-		testFunVV(t, "addVV symmetric", addVV, arg)
-
-		arg = argVV{a.x, a.z, a.y, a.c}
-		testFunVV(t, "subVV_g", subVV_g, arg)
-		testFunVV(t, "subVV", subVV, arg)
-
-		arg = argVV{a.y, a.z, a.x, a.c}
-		testFunVV(t, "subVV_g symmetric", subVV_g, arg)
-		testFunVV(t, "subVV symmetric", subVV, arg)
-	}
-}
-
-// Always the same seed for reproducible results.
-var rnd = rand.New(rand.NewSource(0))
-
-func rndW() Word {
-	return Word(rnd.Int63()<<1 | rnd.Int63n(2))
-}
-
-func rndV(n int) []Word {
-	v := make([]Word, n)
-	for i := range v {
-		v[i] = rndW()
-	}
-	return v
-}
-
-var benchSizes = []int{1, 2, 3, 4, 5, 1e1, 1e2, 1e3, 1e4, 1e5}
-
-func BenchmarkAddVV(b *testing.B) {
-	for _, n := range benchSizes {
-		if isRaceBuilder && n > 1e3 {
-			continue
-		}
-		x := rndV(n)
-		y := rndV(n)
-		z := make([]Word, n)
-		b.Run(fmt.Sprint(n), func(b *testing.B) {
-			b.SetBytes(int64(n * _W))
-			for i := 0; i < b.N; i++ {
-				addVV(z, x, y)
-			}
-		})
-	}
-}
-
-type funVW func(z, x []Word, y Word) (c Word)
-type argVW struct {
-	z, x nat
-	y    Word
-	c    Word
-}
-
-var sumVW = []argVW{
-	{},
-	{nil, nil, 2, 2},
-	{nat{0}, nat{0}, 0, 0},
-	{nat{1}, nat{0}, 1, 0},
-	{nat{1}, nat{1}, 0, 0},
-	{nat{0}, nat{_M}, 1, 1},
-	{nat{0, 0, 0, 0}, nat{_M, _M, _M, _M}, 1, 1},
-	{nat{585}, nat{314}, 271, 0},
-}
-
-var lshVW = []argVW{
-	{},
-	{nat{0}, nat{0}, 0, 0},
-	{nat{0}, nat{0}, 1, 0},
-	{nat{0}, nat{0}, 20, 0},
-
-	{nat{_M}, nat{_M}, 0, 0},
-	{nat{_M << 1 & _M}, nat{_M}, 1, 1},
-	{nat{_M << 20 & _M}, nat{_M}, 20, _M >> (_W - 20)},
-
-	{nat{_M, _M, _M}, nat{_M, _M, _M}, 0, 0},
-	{nat{_M << 1 & _M, _M, _M}, nat{_M, _M, _M}, 1, 1},
-	{nat{_M << 20 & _M, _M, _M}, nat{_M, _M, _M}, 20, _M >> (_W - 20)},
-}
-
-var rshVW = []argVW{
-	{},
-	{nat{0}, nat{0}, 0, 0},
-	{nat{0}, nat{0}, 1, 0},
-	{nat{0}, nat{0}, 20, 0},
-
-	{nat{_M}, nat{_M}, 0, 0},
-	{nat{_M >> 1}, nat{_M}, 1, _M << (_W - 1) & _M},
-	{nat{_M >> 20}, nat{_M}, 20, _M << (_W - 20) & _M},
-
-	{nat{_M, _M, _M}, nat{_M, _M, _M}, 0, 0},
-	{nat{_M, _M, _M >> 1}, nat{_M, _M, _M}, 1, _M << (_W - 1) & _M},
-	{nat{_M, _M, _M >> 20}, nat{_M, _M, _M}, 20, _M << (_W - 20) & _M},
-}
-
-func testFunVW(t *testing.T, msg string, f funVW, a argVW) {
-	z := make(nat, len(a.z))
-	c := f(z, a.x, a.y)
-	for i, zi := range z {
-		if zi != a.z[i] {
-			t.Errorf("%s%+v\n\tgot z[%d] = %#x; want %#x", msg, a, i, zi, a.z[i])
-			break
-		}
-	}
-	if c != a.c {
-		t.Errorf("%s%+v\n\tgot c = %#x; want %#x", msg, a, c, a.c)
-	}
-}
-
-func makeFunVW(f func(z, x []Word, s uint) (c Word)) funVW {
-	return func(z, x []Word, s Word) (c Word) {
-		return f(z, x, uint(s))
-	}
-}
-
-func TestFunVW(t *testing.T) {
-	for _, a := range sumVW {
-		arg := a
-		testFunVW(t, "addVW_g", addVW_g, arg)
-		testFunVW(t, "addVW", addVW, arg)
-
-		arg = argVW{a.x, a.z, a.y, a.c}
-		testFunVW(t, "subVW_g", subVW_g, arg)
-		testFunVW(t, "subVW", subVW, arg)
-	}
-
-	shlVW_g := makeFunVW(shlVU_g)
-	shlVW := makeFunVW(shlVU)
-	for _, a := range lshVW {
-		arg := a
-		testFunVW(t, "shlVU_g", shlVW_g, arg)
-		testFunVW(t, "shlVU", shlVW, arg)
-	}
-
-	shrVW_g := makeFunVW(shrVU_g)
-	shrVW := makeFunVW(shrVU)
-	for _, a := range rshVW {
-		arg := a
-		testFunVW(t, "shrVU_g", shrVW_g, arg)
-		testFunVW(t, "shrVU", shrVW, arg)
-	}
-}
-
-func BenchmarkAddVW(b *testing.B) {
-	for _, n := range benchSizes {
-		if isRaceBuilder && n > 1e3 {
-			continue
-		}
-		x := rndV(n)
-		y := rndW()
-		z := make([]Word, n)
-		b.Run(fmt.Sprint(n), func(b *testing.B) {
-			b.SetBytes(int64(n * _S))
-			for i := 0; i < b.N; i++ {
-				addVW(z, x, y)
-			}
-		})
-	}
-}
-
-type funVWW func(z, x []Word, y, r Word) (c Word)
-type argVWW struct {
-	z, x nat
-	y, r Word
-	c    Word
-}
-
-var prodVWW = []argVWW{
-	{},
-	{nat{0}, nat{0}, 0, 0, 0},
-	{nat{991}, nat{0}, 0, 991, 0},
-	{nat{0}, nat{_M}, 0, 0, 0},
-	{nat{991}, nat{_M}, 0, 991, 0},
-	{nat{0}, nat{0}, _M, 0, 0},
-	{nat{991}, nat{0}, _M, 991, 0},
-	{nat{1}, nat{1}, 1, 0, 0},
-	{nat{992}, nat{1}, 1, 991, 0},
-	{nat{22793}, nat{991}, 23, 0, 0},
-	{nat{22800}, nat{991}, 23, 7, 0},
-	{nat{0, 0, 0, 22793}, nat{0, 0, 0, 991}, 23, 0, 0},
-	{nat{7, 0, 0, 22793}, nat{0, 0, 0, 991}, 23, 7, 0},
-	{nat{0, 0, 0, 0}, nat{7893475, 7395495, 798547395, 68943}, 0, 0, 0},
-	{nat{991, 0, 0, 0}, nat{7893475, 7395495, 798547395, 68943}, 0, 991, 0},
-	{nat{0, 0, 0, 0}, nat{0, 0, 0, 0}, 894375984, 0, 0},
-	{nat{991, 0, 0, 0}, nat{0, 0, 0, 0}, 894375984, 991, 0},
-	{nat{_M << 1 & _M}, nat{_M}, 1 << 1, 0, _M >> (_W - 1)},
-	{nat{_M<<1&_M + 1}, nat{_M}, 1 << 1, 1, _M >> (_W - 1)},
-	{nat{_M << 7 & _M}, nat{_M}, 1 << 7, 0, _M >> (_W - 7)},
-	{nat{_M<<7&_M + 1<<6}, nat{_M}, 1 << 7, 1 << 6, _M >> (_W - 7)},
-	{nat{_M << 7 & _M, _M, _M, _M}, nat{_M, _M, _M, _M}, 1 << 7, 0, _M >> (_W - 7)},
-	{nat{_M<<7&_M + 1<<6, _M, _M, _M}, nat{_M, _M, _M, _M}, 1 << 7, 1 << 6, _M >> (_W - 7)},
-}
-
-func testFunVWW(t *testing.T, msg string, f funVWW, a argVWW) {
-	z := make(nat, len(a.z))
-	c := f(z, a.x, a.y, a.r)
-	for i, zi := range z {
-		if zi != a.z[i] {
-			t.Errorf("%s%+v\n\tgot z[%d] = %#x; want %#x", msg, a, i, zi, a.z[i])
-			break
-		}
-	}
-	if c != a.c {
-		t.Errorf("%s%+v\n\tgot c = %#x; want %#x", msg, a, c, a.c)
-	}
-}
-
-// TODO(gri) mulAddVWW and divWVW are symmetric operations but
-//           their signature is not symmetric. Try to unify.
-
-type funWVW func(z []Word, xn Word, x []Word, y Word) (r Word)
-type argWVW struct {
-	z  nat
-	xn Word
-	x  nat
-	y  Word
-	r  Word
-}
-
-func testFunWVW(t *testing.T, msg string, f funWVW, a argWVW) {
-	z := make(nat, len(a.z))
-	r := f(z, a.xn, a.x, a.y)
-	for i, zi := range z {
-		if zi != a.z[i] {
-			t.Errorf("%s%+v\n\tgot z[%d] = %#x; want %#x", msg, a, i, zi, a.z[i])
-			break
-		}
-	}
-	if r != a.r {
-		t.Errorf("%s%+v\n\tgot r = %#x; want %#x", msg, a, r, a.r)
-	}
-}
-
-func TestFunVWW(t *testing.T) {
-	for _, a := range prodVWW {
-		arg := a
-		testFunVWW(t, "mulAddVWW_g", mulAddVWW_g, arg)
-		testFunVWW(t, "mulAddVWW", mulAddVWW, arg)
-
-		if a.y != 0 && a.r < a.y {
-			arg := argWVW{a.x, a.c, a.z, a.y, a.r}
-			testFunWVW(t, "divWVW_g", divWVW_g, arg)
-			testFunWVW(t, "divWVW", divWVW, arg)
-		}
-	}
-}
-
-var mulWWTests = []struct {
-	x, y Word
-	q, r Word
-}{
-	{_M, _M, _M - 1, 1},
-	// 32 bit only: {0xc47dfa8c, 50911, 0x98a4, 0x998587f4},
-}
-
-func TestMulWW(t *testing.T) {
-	for i, test := range mulWWTests {
-		q, r := mulWW_g(test.x, test.y)
-		if q != test.q || r != test.r {
-			t.Errorf("#%d got (%x, %x) want (%x, %x)", i, q, r, test.q, test.r)
-		}
-	}
-}
-
-var mulAddWWWTests = []struct {
-	x, y, c Word
-	q, r    Word
-}{
-	// TODO(agl): These will only work on 64-bit platforms.
-	// {15064310297182388543, 0xe7df04d2d35d5d80, 13537600649892366549, 13644450054494335067, 10832252001440893781},
-	// {15064310297182388543, 0xdab2f18048baa68d, 13644450054494335067, 12869334219691522700, 14233854684711418382},
-	{_M, _M, 0, _M - 1, 1},
-	{_M, _M, _M, _M, 0},
-}
-
-func TestMulAddWWW(t *testing.T) {
-	for i, test := range mulAddWWWTests {
-		q, r := mulAddWWW_g(test.x, test.y, test.c)
-		if q != test.q || r != test.r {
-			t.Errorf("#%d got (%x, %x) want (%x, %x)", i, q, r, test.q, test.r)
-		}
-	}
-}
-
-func BenchmarkAddMulVVW(b *testing.B) {
-	for _, n := range benchSizes {
-		if isRaceBuilder && n > 1e3 {
-			continue
-		}
-		x := rndV(n)
-		y := rndW()
-		z := make([]Word, n)
-		b.Run(fmt.Sprint(n), func(b *testing.B) {
-			b.SetBytes(int64(n * _W))
-			for i := 0; i < b.N; i++ {
-				addMulVVW(z, x, y)
-			}
-		})
-	}
-}
-
-func testWordBitLen(t *testing.T, fname string, f func(Word) int) {
-	for i := 0; i <= _W; i++ {
-		x := Word(1) << uint(i-1) // i == 0 => x == 0
-		n := f(x)
-		if n != i {
-			t.Errorf("got %d; want %d for %s(%#x)", n, i, fname, x)
-		}
-	}
-}
-
-func TestWordBitLen(t *testing.T) {
-	testWordBitLen(t, "bitLen", bitLen)
-	testWordBitLen(t, "bitLen_g", bitLen_g)
-}
-
-// runs b.N iterations of bitLen called on a Word containing (1 << nbits)-1.
-func BenchmarkBitLen(b *testing.B) {
-	// Individual bitLen tests. Numbers chosen to examine both sides
-	// of powers-of-two boundaries.
-	for _, nbits := range []uint{0, 1, 2, 3, 4, 5, 8, 9, 16, 17, 31} {
-		testword := Word((uint64(1) << nbits) - 1)
-		b.Run(fmt.Sprint(nbits), func(b *testing.B) {
-			for i := 0; i < b.N; i++ {
-				bitLen(testword)
-			}
-		})
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/bits_test.go b/pkg/bootstrap/src/bootstrap/math/big/bits_test.go
deleted file mode 100644
index 9a35a09..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/bits_test.go
+++ /dev/null
@@ -1,227 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/bits_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/bits_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements the Bits type used for testing Float operations
-// via an independent (albeit slower) representations for floating-point
-// numbers.
-
-package big
-
-import (
-	"fmt"
-	"sort"
-	"testing"
-)
-
-// A Bits value b represents a finite floating-point number x of the form
-//
-//	x = 2**b[0] + 2**b[1] + ... 2**b[len(b)-1]
-//
-// The order of slice elements is not significant. Negative elements may be
-// used to form fractions. A Bits value is normalized if each b[i] occurs at
-// most once. For instance Bits{0, 0, 1} is not normalized but represents the
-// same floating-point number as Bits{2}, which is normalized. The zero (nil)
-// value of Bits is a ready to use Bits value and represents the value 0.
-type Bits []int
-
-func (x Bits) add(y Bits) Bits {
-	return append(x, y...)
-}
-
-func (x Bits) mul(y Bits) Bits {
-	var p Bits
-	for _, x := range x {
-		for _, y := range y {
-			p = append(p, x+y)
-		}
-	}
-	return p
-}
-
-func TestMulBits(t *testing.T) {
-	for _, test := range []struct {
-		x, y, want Bits
-	}{
-		{nil, nil, nil},
-		{Bits{}, Bits{}, nil},
-		{Bits{0}, Bits{0}, Bits{0}},
-		{Bits{0}, Bits{1}, Bits{1}},
-		{Bits{1}, Bits{1, 2, 3}, Bits{2, 3, 4}},
-		{Bits{-1}, Bits{1}, Bits{0}},
-		{Bits{-10, -1, 0, 1, 10}, Bits{1, 2, 3}, Bits{-9, -8, -7, 0, 1, 2, 1, 2, 3, 2, 3, 4, 11, 12, 13}},
-	} {
-		got := fmt.Sprintf("%v", test.x.mul(test.y))
-		want := fmt.Sprintf("%v", test.want)
-		if got != want {
-			t.Errorf("%v * %v = %s; want %s", test.x, test.y, got, want)
-		}
-
-	}
-}
-
-// norm returns the normalized bits for x: It removes multiple equal entries
-// by treating them as an addition (e.g., Bits{5, 5} => Bits{6}), and it sorts
-// the result list for reproducible results.
-func (x Bits) norm() Bits {
-	m := make(map[int]bool)
-	for _, b := range x {
-		for m[b] {
-			m[b] = false
-			b++
-		}
-		m[b] = true
-	}
-	var z Bits
-	for b, set := range m {
-		if set {
-			z = append(z, b)
-		}
-	}
-	sort.Ints([]int(z))
-	return z
-}
-
-func TestNormBits(t *testing.T) {
-	for _, test := range []struct {
-		x, want Bits
-	}{
-		{nil, nil},
-		{Bits{}, Bits{}},
-		{Bits{0}, Bits{0}},
-		{Bits{0, 0}, Bits{1}},
-		{Bits{3, 1, 1}, Bits{2, 3}},
-		{Bits{10, 9, 8, 7, 6, 6}, Bits{11}},
-	} {
-		got := fmt.Sprintf("%v", test.x.norm())
-		want := fmt.Sprintf("%v", test.want)
-		if got != want {
-			t.Errorf("normBits(%v) = %s; want %s", test.x, got, want)
-		}
-
-	}
-}
-
-// round returns the Float value corresponding to x after rounding x
-// to prec bits according to mode.
-func (x Bits) round(prec uint, mode RoundingMode) *Float {
-	x = x.norm()
-
-	// determine range
-	var min, max int
-	for i, b := range x {
-		if i == 0 || b < min {
-			min = b
-		}
-		if i == 0 || b > max {
-			max = b
-		}
-	}
-	prec0 := uint(max + 1 - min)
-	if prec >= prec0 {
-		return x.Float()
-	}
-	// prec < prec0
-
-	// determine bit 0, rounding, and sticky bit, and result bits z
-	var bit0, rbit, sbit uint
-	var z Bits
-	r := max - int(prec)
-	for _, b := range x {
-		switch {
-		case b == r:
-			rbit = 1
-		case b < r:
-			sbit = 1
-		default:
-			// b > r
-			if b == r+1 {
-				bit0 = 1
-			}
-			z = append(z, b)
-		}
-	}
-
-	// round
-	f := z.Float() // rounded to zero
-	if mode == ToNearestAway {
-		panic("not yet implemented")
-	}
-	if mode == ToNearestEven && rbit == 1 && (sbit == 1 || sbit == 0 && bit0 != 0) || mode == AwayFromZero {
-		// round away from zero
-		f.SetMode(ToZero).SetPrec(prec)
-		f.Add(f, Bits{int(r) + 1}.Float())
-	}
-	return f
-}
-
-// Float returns the *Float z of the smallest possible precision such that
-// z = sum(2**bits[i]), with i = range bits. If multiple bits[i] are equal,
-// they are added: Bits{0, 1, 0}.Float() == 2**0 + 2**1 + 2**0 = 4.
-func (bits Bits) Float() *Float {
-	// handle 0
-	if len(bits) == 0 {
-		return new(Float)
-	}
-	// len(bits) > 0
-
-	// determine lsb exponent
-	var min int
-	for i, b := range bits {
-		if i == 0 || b < min {
-			min = b
-		}
-	}
-
-	// create bit pattern
-	x := NewInt(0)
-	for _, b := range bits {
-		badj := b - min
-		// propagate carry if necessary
-		for x.Bit(badj) != 0 {
-			x.SetBit(x, badj, 0)
-			badj++
-		}
-		x.SetBit(x, badj, 1)
-	}
-
-	// create corresponding float
-	z := new(Float).SetInt(x) // normalized
-	if e := int64(z.exp) + int64(min); MinExp <= e && e <= MaxExp {
-		z.exp = int32(e)
-	} else {
-		// this should never happen for our test cases
-		panic("exponent out of range")
-	}
-	return z
-}
-
-func TestFromBits(t *testing.T) {
-	for _, test := range []struct {
-		bits Bits
-		want string
-	}{
-		// all different bit numbers
-		{nil, "0"},
-		{Bits{0}, "0x.8p+1"},
-		{Bits{1}, "0x.8p+2"},
-		{Bits{-1}, "0x.8p+0"},
-		{Bits{63}, "0x.8p+64"},
-		{Bits{33, -30}, "0x.8000000000000001p+34"},
-		{Bits{255, 0}, "0x.8000000000000000000000000000000000000000000000000000000000000001p+256"},
-
-		// multiple equal bit numbers
-		{Bits{0, 0}, "0x.8p+2"},
-		{Bits{0, 0, 0, 0}, "0x.8p+3"},
-		{Bits{0, 1, 0}, "0x.8p+3"},
-		{append(Bits{2, 1, 0} /* 7 */, Bits{3, 1} /* 10 */ ...), "0x.88p+5" /* 17 */},
-	} {
-		f := test.bits.Float()
-		if got := f.Text('p', 0); got != test.want {
-			t.Errorf("setBits(%v) = %s; want %s", test.bits, got, test.want)
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/calibrate_test.go b/pkg/bootstrap/src/bootstrap/math/big/calibrate_test.go
deleted file mode 100644
index d9bce08..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/calibrate_test.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/calibrate_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/calibrate_test.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file prints execution times for the Mul benchmark
-// given different Karatsuba thresholds. The result may be
-// used to manually fine-tune the threshold constant. The
-// results are somewhat fragile; use repeated runs to get
-// a clear picture.
-
-// Usage: go test -run=TestCalibrate -calibrate
-
-package big
-
-import (
-	"flag"
-	"fmt"
-	"testing"
-	"time"
-)
-
-var calibrate = flag.Bool("calibrate", false, "run calibration test")
-
-func karatsubaLoad(b *testing.B) {
-	BenchmarkMul(b)
-}
-
-// measureKaratsuba returns the time to run a Karatsuba-relevant benchmark
-// given Karatsuba threshold th.
-func measureKaratsuba(th int) time.Duration {
-	th, karatsubaThreshold = karatsubaThreshold, th
-	res := testing.Benchmark(karatsubaLoad)
-	karatsubaThreshold = th
-	return time.Duration(res.NsPerOp())
-}
-
-func computeThresholds() {
-	fmt.Printf("Multiplication times for varying Karatsuba thresholds\n")
-	fmt.Printf("(run repeatedly for good results)\n")
-
-	// determine Tk, the work load execution time using basic multiplication
-	Tb := measureKaratsuba(1e9) // th == 1e9 => Karatsuba multiplication disabled
-	fmt.Printf("Tb = %10s\n", Tb)
-
-	// thresholds
-	th := 4
-	th1 := -1
-	th2 := -1
-
-	var deltaOld time.Duration
-	for count := -1; count != 0 && th < 128; count-- {
-		// determine Tk, the work load execution time using Karatsuba multiplication
-		Tk := measureKaratsuba(th)
-
-		// improvement over Tb
-		delta := (Tb - Tk) * 100 / Tb
-
-		fmt.Printf("th = %3d  Tk = %10s  %4d%%", th, Tk, delta)
-
-		// determine break-even point
-		if Tk < Tb && th1 < 0 {
-			th1 = th
-			fmt.Print("  break-even point")
-		}
-
-		// determine diminishing return
-		if 0 < delta && delta < deltaOld && th2 < 0 {
-			th2 = th
-			fmt.Print("  diminishing return")
-		}
-		deltaOld = delta
-
-		fmt.Println()
-
-		// trigger counter
-		if th1 >= 0 && th2 >= 0 && count < 0 {
-			count = 10 // this many extra measurements after we got both thresholds
-		}
-
-		th++
-	}
-}
-
-func TestCalibrate(t *testing.T) {
-	if *calibrate {
-		computeThresholds()
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/decimal.go b/pkg/bootstrap/src/bootstrap/math/big/decimal.go
deleted file mode 100644
index 378980a..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/decimal.go
+++ /dev/null
@@ -1,270 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/decimal.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/decimal.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements multi-precision decimal numbers.
-// The implementation is for float to decimal conversion only;
-// not general purpose use.
-// The only operations are precise conversion from binary to
-// decimal and rounding.
-//
-// The key observation and some code (shr) is borrowed from
-// strconv/decimal.go: conversion of binary fractional values can be done
-// precisely in multi-precision decimal because 2 divides 10 (required for
-// >> of mantissa); but conversion of decimal floating-point values cannot
-// be done precisely in binary representation.
-//
-// In contrast to strconv/decimal.go, only right shift is implemented in
-// decimal format - left shift can be done precisely in binary format.
-
-package big
-
-// A decimal represents an unsigned floating-point number in decimal representation.
-// The value of a non-zero decimal d is d.mant * 10**d.exp with 0.5 <= d.mant < 1,
-// with the most-significant mantissa digit at index 0. For the zero decimal, the
-// mantissa length and exponent are 0.
-// The zero value for decimal represents a ready-to-use 0.0.
-type decimal struct {
-	mant []byte // mantissa ASCII digits, big-endian
-	exp  int    // exponent
-}
-
-// at returns the i'th mantissa digit, starting with the most significant digit at 0.
-func (d *decimal) at(i int) byte {
-	if 0 <= i && i < len(d.mant) {
-		return d.mant[i]
-	}
-	return '0'
-}
-
-// Maximum shift amount that can be done in one pass without overflow.
-// A Word has _W bits and (1<<maxShift - 1)*10 + 9 must fit into Word.
-const maxShift = _W - 4
-
-// TODO(gri) Since we know the desired decimal precision when converting
-// a floating-point number, we may be able to limit the number of decimal
-// digits that need to be computed by init by providing an additional
-// precision argument and keeping track of when a number was truncated early
-// (equivalent of "sticky bit" in binary rounding).
-
-// TODO(gri) Along the same lines, enforce some limit to shift magnitudes
-// to avoid "infinitely" long running conversions (until we run out of space).
-
-// Init initializes x to the decimal representation of m << shift (for
-// shift >= 0), or m >> -shift (for shift < 0).
-func (x *decimal) init(m nat, shift int) {
-	// special case 0
-	if len(m) == 0 {
-		x.mant = x.mant[:0]
-		x.exp = 0
-		return
-	}
-
-	// Optimization: If we need to shift right, first remove any trailing
-	// zero bits from m to reduce shift amount that needs to be done in
-	// decimal format (since that is likely slower).
-	if shift < 0 {
-		ntz := m.trailingZeroBits()
-		s := uint(-shift)
-		if s >= ntz {
-			s = ntz // shift at most ntz bits
-		}
-		m = nat(nil).shr(m, s)
-		shift += int(s)
-	}
-
-	// Do any shift left in binary representation.
-	if shift > 0 {
-		m = nat(nil).shl(m, uint(shift))
-		shift = 0
-	}
-
-	// Convert mantissa into decimal representation.
-	s := m.utoa(10)
-	n := len(s)
-	x.exp = n
-	// Trim trailing zeros; instead the exponent is tracking
-	// the decimal point independent of the number of digits.
-	for n > 0 && s[n-1] == '0' {
-		n--
-	}
-	x.mant = append(x.mant[:0], s[:n]...)
-
-	// Do any (remaining) shift right in decimal representation.
-	if shift < 0 {
-		for shift < -maxShift {
-			shr(x, maxShift)
-			shift += maxShift
-		}
-		shr(x, uint(-shift))
-	}
-}
-
-// shr implements x >> s, for s <= maxShift.
-func shr(x *decimal, s uint) {
-	// Division by 1<<s using shift-and-subtract algorithm.
-
-	// pick up enough leading digits to cover first shift
-	r := 0 // read index
-	var n Word
-	for n>>s == 0 && r < len(x.mant) {
-		ch := Word(x.mant[r])
-		r++
-		n = n*10 + ch - '0'
-	}
-	if n == 0 {
-		// x == 0; shouldn't get here, but handle anyway
-		x.mant = x.mant[:0]
-		return
-	}
-	for n>>s == 0 {
-		r++
-		n *= 10
-	}
-	x.exp += 1 - r
-
-	// read a digit, write a digit
-	w := 0 // write index
-	mask := Word(1)<<s - 1
-	for r < len(x.mant) {
-		ch := Word(x.mant[r])
-		r++
-		d := n >> s
-		n &= mask // n -= d << s
-		x.mant[w] = byte(d + '0')
-		w++
-		n = n*10 + ch - '0'
-	}
-
-	// write extra digits that still fit
-	for n > 0 && w < len(x.mant) {
-		d := n >> s
-		n &= mask
-		x.mant[w] = byte(d + '0')
-		w++
-		n = n * 10
-	}
-	x.mant = x.mant[:w] // the number may be shorter (e.g. 1024 >> 10)
-
-	// append additional digits that didn't fit
-	for n > 0 {
-		d := n >> s
-		n &= mask
-		x.mant = append(x.mant, byte(d+'0'))
-		n = n * 10
-	}
-
-	trim(x)
-}
-
-func (x *decimal) String() string {
-	if len(x.mant) == 0 {
-		return "0"
-	}
-
-	var buf []byte
-	switch {
-	case x.exp <= 0:
-		// 0.00ddd
-		buf = append(buf, "0."...)
-		buf = appendZeros(buf, -x.exp)
-		buf = append(buf, x.mant...)
-
-	case /* 0 < */ x.exp < len(x.mant):
-		// dd.ddd
-		buf = append(buf, x.mant[:x.exp]...)
-		buf = append(buf, '.')
-		buf = append(buf, x.mant[x.exp:]...)
-
-	default: // len(x.mant) <= x.exp
-		// ddd00
-		buf = append(buf, x.mant...)
-		buf = appendZeros(buf, x.exp-len(x.mant))
-	}
-
-	return string(buf)
-}
-
-// appendZeros appends n 0 digits to buf and returns buf.
-func appendZeros(buf []byte, n int) []byte {
-	for ; n > 0; n-- {
-		buf = append(buf, '0')
-	}
-	return buf
-}
-
-// shouldRoundUp reports if x should be rounded up
-// if shortened to n digits. n must be a valid index
-// for x.mant.
-func shouldRoundUp(x *decimal, n int) bool {
-	if x.mant[n] == '5' && n+1 == len(x.mant) {
-		// exactly halfway - round to even
-		return n > 0 && (x.mant[n-1]-'0')&1 != 0
-	}
-	// not halfway - digit tells all (x.mant has no trailing zeros)
-	return x.mant[n] >= '5'
-}
-
-// round sets x to (at most) n mantissa digits by rounding it
-// to the nearest even value with n (or fever) mantissa digits.
-// If n < 0, x remains unchanged.
-func (x *decimal) round(n int) {
-	if n < 0 || n >= len(x.mant) {
-		return // nothing to do
-	}
-
-	if shouldRoundUp(x, n) {
-		x.roundUp(n)
-	} else {
-		x.roundDown(n)
-	}
-}
-
-func (x *decimal) roundUp(n int) {
-	if n < 0 || n >= len(x.mant) {
-		return // nothing to do
-	}
-	// 0 <= n < len(x.mant)
-
-	// find first digit < '9'
-	for n > 0 && x.mant[n-1] >= '9' {
-		n--
-	}
-
-	if n == 0 {
-		// all digits are '9's => round up to '1' and update exponent
-		x.mant[0] = '1' // ok since len(x.mant) > n
-		x.mant = x.mant[:1]
-		x.exp++
-		return
-	}
-
-	// n > 0 && x.mant[n-1] < '9'
-	x.mant[n-1]++
-	x.mant = x.mant[:n]
-	// x already trimmed
-}
-
-func (x *decimal) roundDown(n int) {
-	if n < 0 || n >= len(x.mant) {
-		return // nothing to do
-	}
-	x.mant = x.mant[:n]
-	trim(x)
-}
-
-// trim cuts off any trailing zeros from x's mantissa;
-// they are meaningless for the value of x.
-func trim(x *decimal) {
-	i := len(x.mant)
-	for i > 0 && x.mant[i-1] == '0' {
-		i--
-	}
-	x.mant = x.mant[:i]
-	if i == 0 {
-		x.exp = 0
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/decimal_test.go b/pkg/bootstrap/src/bootstrap/math/big/decimal_test.go
deleted file mode 100644
index f11981c..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/decimal_test.go
+++ /dev/null
@@ -1,137 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/decimal_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/decimal_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package big
-
-import (
-	"fmt"
-	"testing"
-)
-
-func TestDecimalString(t *testing.T) {
-	for _, test := range []struct {
-		x    decimal
-		want string
-	}{
-		{want: "0"},
-		{decimal{nil, 1000}, "0"}, // exponent of 0 is ignored
-		{decimal{[]byte("12345"), 0}, "0.12345"},
-		{decimal{[]byte("12345"), -3}, "0.00012345"},
-		{decimal{[]byte("12345"), +3}, "123.45"},
-		{decimal{[]byte("12345"), +10}, "1234500000"},
-	} {
-		if got := test.x.String(); got != test.want {
-			t.Errorf("%v == %s; want %s", test.x, got, test.want)
-		}
-	}
-}
-
-func TestDecimalInit(t *testing.T) {
-	for _, test := range []struct {
-		x     Word
-		shift int
-		want  string
-	}{
-		{0, 0, "0"},
-		{0, -100, "0"},
-		{0, 100, "0"},
-		{1, 0, "1"},
-		{1, 10, "1024"},
-		{1, 100, "1267650600228229401496703205376"},
-		{1, -100, "0.0000000000000000000000000000007888609052210118054117285652827862296732064351090230047702789306640625"},
-		{12345678, 8, "3160493568"},
-		{12345678, -8, "48225.3046875"},
-		{195312, 9, "99999744"},
-		{1953125, 9, "1000000000"},
-	} {
-		var d decimal
-		d.init(nat{test.x}.norm(), test.shift)
-		if got := d.String(); got != test.want {
-			t.Errorf("%d << %d == %s; want %s", test.x, test.shift, got, test.want)
-		}
-	}
-}
-
-func TestDecimalRounding(t *testing.T) {
-	for _, test := range []struct {
-		x              uint64
-		n              int
-		down, even, up string
-	}{
-		{0, 0, "0", "0", "0"},
-		{0, 1, "0", "0", "0"},
-
-		{1, 0, "0", "0", "10"},
-		{5, 0, "0", "0", "10"},
-		{9, 0, "0", "10", "10"},
-
-		{15, 1, "10", "20", "20"},
-		{45, 1, "40", "40", "50"},
-		{95, 1, "90", "100", "100"},
-
-		{12344999, 4, "12340000", "12340000", "12350000"},
-		{12345000, 4, "12340000", "12340000", "12350000"},
-		{12345001, 4, "12340000", "12350000", "12350000"},
-		{23454999, 4, "23450000", "23450000", "23460000"},
-		{23455000, 4, "23450000", "23460000", "23460000"},
-		{23455001, 4, "23450000", "23460000", "23460000"},
-
-		{99994999, 4, "99990000", "99990000", "100000000"},
-		{99995000, 4, "99990000", "100000000", "100000000"},
-		{99999999, 4, "99990000", "100000000", "100000000"},
-
-		{12994999, 4, "12990000", "12990000", "13000000"},
-		{12995000, 4, "12990000", "13000000", "13000000"},
-		{12999999, 4, "12990000", "13000000", "13000000"},
-	} {
-		x := nat(nil).setUint64(test.x)
-
-		var d decimal
-		d.init(x, 0)
-		d.roundDown(test.n)
-		if got := d.String(); got != test.down {
-			t.Errorf("roundDown(%d, %d) = %s; want %s", test.x, test.n, got, test.down)
-		}
-
-		d.init(x, 0)
-		d.round(test.n)
-		if got := d.String(); got != test.even {
-			t.Errorf("round(%d, %d) = %s; want %s", test.x, test.n, got, test.even)
-		}
-
-		d.init(x, 0)
-		d.roundUp(test.n)
-		if got := d.String(); got != test.up {
-			t.Errorf("roundUp(%d, %d) = %s; want %s", test.x, test.n, got, test.up)
-		}
-	}
-}
-
-var sink string
-
-func BenchmarkDecimalConversion(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		for shift := -100; shift <= +100; shift++ {
-			var d decimal
-			d.init(natOne, shift)
-			sink = d.String()
-		}
-	}
-}
-
-func BenchmarkFloatString(b *testing.B) {
-	x := new(Float)
-	for _, prec := range []uint{1e2, 1e3, 1e4, 1e5} {
-		x.SetPrec(prec).SetRat(NewRat(1, 3))
-		b.Run(fmt.Sprintf("%v", prec), func(b *testing.B) {
-			b.ReportAllocs()
-			for i := 0; i < b.N; i++ {
-				sink = x.String()
-			}
-		})
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/doc.go b/pkg/bootstrap/src/bootstrap/math/big/doc.go
deleted file mode 100644
index d7b96d0..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/doc.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/doc.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/doc.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package big implements arbitrary-precision arithmetic (big numbers).
-The following numeric types are supported:
-
-	Int    signed integers
-	Rat    rational numbers
-	Float  floating-point numbers
-
-The zero value for an Int, Rat, or Float correspond to 0. Thus, new
-values can be declared in the usual ways and denote 0 without further
-initialization:
-
-	var x Int        // &x is an *Int of value 0
-	var r = &Rat{}   // r is a *Rat of value 0
-	y := new(Float)  // y is a *Float of value 0
-
-Alternatively, new values can be allocated and initialized with factory
-functions of the form:
-
-	func NewT(v V) *T
-
-For instance, NewInt(x) returns an *Int set to the value of the int64
-argument x, NewRat(a, b) returns a *Rat set to the fraction a/b where
-a and b are int64 values, and NewFloat(f) returns a *Float initialized
-to the float64 argument f. More flexibility is provided with explicit
-setters, for instance:
-
-	var z1 Int
-	z1.SetUint64(123)                 // z1 := 123
-	z2 := new(Rat).SetFloat64(1.25)   // z2 := 5/4
-	z3 := new(Float).SetInt(z1)       // z3 := 123.0
-
-Setters, numeric operations and predicates are represented as methods of
-the form:
-
-	func (z *T) SetV(v V) *T          // z = v
-	func (z *T) Unary(x *T) *T        // z = unary x
-	func (z *T) Binary(x, y *T) *T    // z = x binary y
-	func (x *T) Pred() P              // p = pred(x)
-
-with T one of Int, Rat, or Float. For unary and binary operations, the
-result is the receiver (usually named z in that case; see below); if it
-is one of the operands x or y it may be safely overwritten (and its memory
-reused).
-
-Arithmetic expressions are typically written as a sequence of individual
-method calls, with each call corresponding to an operation. The receiver
-denotes the result and the method arguments are the operation's operands.
-For instance, given three *Int values a, b and c, the invocation
-
-	c.Add(a, b)
-
-computes the sum a + b and stores the result in c, overwriting whatever
-value was held in c before. Unless specified otherwise, operations permit
-aliasing of parameters, so it is perfectly ok to write
-
-	sum.Add(sum, x)
-
-to accumulate values x in a sum.
-
-(By always passing in a result value via the receiver, memory use can be
-much better controlled. Instead of having to allocate new memory for each
-result, an operation can reuse the space allocated for the result value,
-and overwrite that value with the new result in the process.)
-
-Notational convention: Incoming method parameters (including the receiver)
-are named consistently in the API to clarify their use. Incoming operands
-are usually named x, y, a, b, and so on, but never z. A parameter specifying
-the result is named z (typically the receiver).
-
-For instance, the arguments for (*Int).Add are named x and y, and because
-the receiver specifies the result destination, it is called z:
-
-	func (z *Int) Add(x, y *Int) *Int
-
-Methods of this form typically return the incoming receiver as well, to
-enable simple call chaining.
-
-Methods which don't require a result value to be passed in (for instance,
-Int.Sign), simply return the result. In this case, the receiver is typically
-the first operand, named x:
-
-	func (x *Int) Sign() int
-
-Various methods support conversions between strings and corresponding
-numeric values, and vice versa: *Int, *Rat, and *Float values implement
-the Stringer interface for a (default) string representation of the value,
-but also provide SetString methods to initialize a value from a string in
-a variety of supported formats (see the respective SetString documentation).
-
-Finally, *Int, *Rat, and *Float satisfy the fmt package's Scanner interface
-for scanning and (except for *Rat) the Formatter interface for formatted
-printing.
-*/
-package big
diff --git a/pkg/bootstrap/src/bootstrap/math/big/example_rat_test.go b/pkg/bootstrap/src/bootstrap/math/big/example_rat_test.go
deleted file mode 100644
index 0a0063d..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/example_rat_test.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/example_rat_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/example_rat_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package big_test
-
-import (
-	"fmt"
-	"bootstrap/math/big"
-)
-
-// Use the classic continued fraction for e
-//     e = [1; 0, 1, 1, 2, 1, 1, ... 2n, 1, 1, ...]
-// i.e., for the nth term, use
-//     1          if   n mod 3 != 1
-//  (n-1)/3 * 2   if   n mod 3 == 1
-func recur(n, lim int64) *big.Rat {
-	term := new(big.Rat)
-	if n%3 != 1 {
-		term.SetInt64(1)
-	} else {
-		term.SetInt64((n - 1) / 3 * 2)
-	}
-
-	if n > lim {
-		return term
-	}
-
-	// Directly initialize frac as the fractional
-	// inverse of the result of recur.
-	frac := new(big.Rat).Inv(recur(n+1, lim))
-
-	return term.Add(term, frac)
-}
-
-// This example demonstrates how to use big.Rat to compute the
-// first 15 terms in the sequence of rational convergents for
-// the constant e (base of natural logarithm).
-func Example_eConvergents() {
-	for i := 1; i <= 15; i++ {
-		r := recur(0, int64(i))
-
-		// Print r both as a fraction and as a floating-point number.
-		// Since big.Rat implements fmt.Formatter, we can use %-13s to
-		// get a left-aligned string representation of the fraction.
-		fmt.Printf("%-13s = %s\n", r, r.FloatString(8))
-	}
-
-	// Output:
-	// 2/1           = 2.00000000
-	// 3/1           = 3.00000000
-	// 8/3           = 2.66666667
-	// 11/4          = 2.75000000
-	// 19/7          = 2.71428571
-	// 87/32         = 2.71875000
-	// 106/39        = 2.71794872
-	// 193/71        = 2.71830986
-	// 1264/465      = 2.71827957
-	// 1457/536      = 2.71828358
-	// 2721/1001     = 2.71828172
-	// 23225/8544    = 2.71828184
-	// 25946/9545    = 2.71828182
-	// 49171/18089   = 2.71828183
-	// 517656/190435 = 2.71828183
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/example_test.go b/pkg/bootstrap/src/bootstrap/math/big/example_test.go
deleted file mode 100644
index b7d71a9..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/example_test.go
+++ /dev/null
@@ -1,144 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/example_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/example_test.go:1
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package big_test
-
-import (
-	"fmt"
-	"log"
-	"math"
-	"bootstrap/math/big"
-)
-
-func ExampleRat_SetString() {
-	r := new(big.Rat)
-	r.SetString("355/113")
-	fmt.Println(r.FloatString(3))
-	// Output: 3.142
-}
-
-func ExampleInt_SetString() {
-	i := new(big.Int)
-	i.SetString("644", 8) // octal
-	fmt.Println(i)
-	// Output: 420
-}
-
-func ExampleRat_Scan() {
-	// The Scan function is rarely used directly;
-	// the fmt package recognizes it as an implementation of fmt.Scanner.
-	r := new(big.Rat)
-	_, err := fmt.Sscan("1.5000", r)
-	if err != nil {
-		log.Println("error scanning value:", err)
-	} else {
-		fmt.Println(r)
-	}
-	// Output: 3/2
-}
-
-func ExampleInt_Scan() {
-	// The Scan function is rarely used directly;
-	// the fmt package recognizes it as an implementation of fmt.Scanner.
-	i := new(big.Int)
-	_, err := fmt.Sscan("18446744073709551617", i)
-	if err != nil {
-		log.Println("error scanning value:", err)
-	} else {
-		fmt.Println(i)
-	}
-	// Output: 18446744073709551617
-}
-
-func ExampleFloat_Scan() {
-	// The Scan function is rarely used directly;
-	// the fmt package recognizes it as an implementation of fmt.Scanner.
-	f := new(big.Float)
-	_, err := fmt.Sscan("1.19282e99", f)
-	if err != nil {
-		log.Println("error scanning value:", err)
-	} else {
-		fmt.Println(f)
-	}
-	// Output: 1.19282e+99
-}
-
-// This example demonstrates how to use big.Int to compute the smallest
-// Fibonacci number with 100 decimal digits and to test whether it is prime.
-func Example_fibonacci() {
-	// Initialize two big ints with the first two numbers in the sequence.
-	a := big.NewInt(0)
-	b := big.NewInt(1)
-
-	// Initialize limit as 10^99, the smallest integer with 100 digits.
-	var limit big.Int
-	limit.Exp(big.NewInt(10), big.NewInt(99), nil)
-
-	// Loop while a is smaller than 1e100.
-	for a.Cmp(&limit) < 0 {
-		// Compute the next Fibonacci number, storing it in a.
-		a.Add(a, b)
-		// Swap a and b so that b is the next number in the sequence.
-		a, b = b, a
-	}
-	fmt.Println(a) // 100-digit Fibonacci number
-
-	// Test a for primality.
-	// (ProbablyPrimes' argument sets the number of Miller-Rabin
-	// rounds to be performed. 20 is a good value.)
-	fmt.Println(a.ProbablyPrime(20))
-
-	// Output:
-	// 1344719667586153181419716641724567886890850696275767987106294472017884974410332069524504824747437757
-	// false
-}
-
-// This example shows how to use big.Float to compute the square root of 2 with
-// a precision of 200 bits, and how to print the result as a decimal number.
-func Example_sqrt2() {
-	// We'll do computations with 200 bits of precision in the mantissa.
-	const prec = 200
-
-	// Compute the square root of 2 using Newton's Method. We start with
-	// an initial estimate for sqrt(2), and then iterate:
-	//     x_{n+1} = 1/2 * ( x_n + (2.0 / x_n) )
-
-	// Since Newton's Method doubles the number of correct digits at each
-	// iteration, we need at least log_2(prec) steps.
-	steps := int(math.Log2(prec))
-
-	// Initialize values we need for the computation.
-	two := new(big.Float).SetPrec(prec).SetInt64(2)
-	half := new(big.Float).SetPrec(prec).SetFloat64(0.5)
-
-	// Use 1 as the initial estimate.
-	x := new(big.Float).SetPrec(prec).SetInt64(1)
-
-	// We use t as a temporary variable. There's no need to set its precision
-	// since big.Float values with unset (== 0) precision automatically assume
-	// the largest precision of the arguments when used as the result (receiver)
-	// of a big.Float operation.
-	t := new(big.Float)
-
-	// Iterate.
-	for i := 0; i <= steps; i++ {
-		t.Quo(two, x)  // t = 2.0 / x_n
-		t.Add(x, t)    // t = x_n + (2.0 / x_n)
-		x.Mul(half, t) // x_{n+1} = 0.5 * t
-	}
-
-	// We can use the usual fmt.Printf verbs since big.Float implements fmt.Formatter
-	fmt.Printf("sqrt(2) = %.50f\n", x)
-
-	// Print the error between 2 and x*x.
-	t.Mul(x, x) // t = x*x
-	fmt.Printf("error = %e\n", t.Sub(two, t))
-
-	// Output:
-	// sqrt(2) = 1.41421356237309504880168872420969807856967187537695
-	// error = 0.000000e+00
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/float.go b/pkg/bootstrap/src/bootstrap/math/big/float.go
deleted file mode 100644
index cf605b0..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/float.go
+++ /dev/null
@@ -1,1706 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/float.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/float.go:1
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements multi-precision floating-point numbers.
-// Like in the GNU MPFR library (http://www.mpfr.org/), operands
-// can be of mixed precision. Unlike MPFR, the rounding mode is
-// not specified with each operation, but with each operand. The
-// rounding mode of the result operand determines the rounding
-// mode of an operation. This is a from-scratch implementation.
-
-package big
-
-import (
-	"fmt"
-	"math"
-)
-
-const debugFloat = false // enable for debugging
-
-// A nonzero finite Float represents a multi-precision floating point number
-//
-//   sign × mantissa × 2**exponent
-//
-// with 0.5 <= mantissa < 1.0, and MinExp <= exponent <= MaxExp.
-// A Float may also be zero (+0, -0) or infinite (+Inf, -Inf).
-// All Floats are ordered, and the ordering of two Floats x and y
-// is defined by x.Cmp(y).
-//
-// Each Float value also has a precision, rounding mode, and accuracy.
-// The precision is the maximum number of mantissa bits available to
-// represent the value. The rounding mode specifies how a result should
-// be rounded to fit into the mantissa bits, and accuracy describes the
-// rounding error with respect to the exact result.
-//
-// Unless specified otherwise, all operations (including setters) that
-// specify a *Float variable for the result (usually via the receiver
-// with the exception of MantExp), round the numeric result according
-// to the precision and rounding mode of the result variable.
-//
-// If the provided result precision is 0 (see below), it is set to the
-// precision of the argument with the largest precision value before any
-// rounding takes place, and the rounding mode remains unchanged. Thus,
-// uninitialized Floats provided as result arguments will have their
-// precision set to a reasonable value determined by the operands and
-// their mode is the zero value for RoundingMode (ToNearestEven).
-//
-// By setting the desired precision to 24 or 53 and using matching rounding
-// mode (typically ToNearestEven), Float operations produce the same results
-// as the corresponding float32 or float64 IEEE-754 arithmetic for operands
-// that correspond to normal (i.e., not denormal) float32 or float64 numbers.
-// Exponent underflow and overflow lead to a 0 or an Infinity for different
-// values than IEEE-754 because Float exponents have a much larger range.
-//
-// The zero (uninitialized) value for a Float is ready to use and represents
-// the number +0.0 exactly, with precision 0 and rounding mode ToNearestEven.
-//
-type Float struct {
-	prec uint32
-	mode RoundingMode
-	acc  Accuracy
-	form form
-	neg  bool
-	mant nat
-	exp  int32
-}
-
-// An ErrNaN panic is raised by a Float operation that would lead to
-// a NaN under IEEE-754 rules. An ErrNaN implements the error interface.
-type ErrNaN struct {
-	msg string
-}
-
-func (err ErrNaN) Error() string {
-	return err.msg
-}
-
-// NewFloat allocates and returns a new Float set to x,
-// with precision 53 and rounding mode ToNearestEven.
-// NewFloat panics with ErrNaN if x is a NaN.
-func NewFloat(x float64) *Float {
-	if math.IsNaN(x) {
-		panic(ErrNaN{"NewFloat(NaN)"})
-	}
-	return new(Float).SetFloat64(x)
-}
-
-// Exponent and precision limits.
-const (
-	MaxExp  = math.MaxInt32  // largest supported exponent
-	MinExp  = math.MinInt32  // smallest supported exponent
-	MaxPrec = math.MaxUint32 // largest (theoretically) supported precision; likely memory-limited
-)
-
-// Internal representation: The mantissa bits x.mant of a nonzero finite
-// Float x are stored in a nat slice long enough to hold up to x.prec bits;
-// the slice may (but doesn't have to) be shorter if the mantissa contains
-// trailing 0 bits. x.mant is normalized if the msb of x.mant == 1 (i.e.,
-// the msb is shifted all the way "to the left"). Thus, if the mantissa has
-// trailing 0 bits or x.prec is not a multiple of the the Word size _W,
-// x.mant[0] has trailing zero bits. The msb of the mantissa corresponds
-// to the value 0.5; the exponent x.exp shifts the binary point as needed.
-//
-// A zero or non-finite Float x ignores x.mant and x.exp.
-//
-// x                 form      neg      mant         exp
-// ----------------------------------------------------------
-// ±0                zero      sign     -            -
-// 0 < |x| < +Inf    finite    sign     mantissa     exponent
-// ±Inf              inf       sign     -            -
-
-// A form value describes the internal representation.
-type form byte
-
-// The form value order is relevant - do not change!
-const (
-	zero form = iota
-	finite
-	inf
-)
-
-// RoundingMode determines how a Float value is rounded to the
-// desired precision. Rounding may change the Float value; the
-// rounding error is described by the Float's Accuracy.
-type RoundingMode byte
-
-// These constants define supported rounding modes.
-const (
-	ToNearestEven RoundingMode = iota // == IEEE 754-2008 roundTiesToEven
-	ToNearestAway                     // == IEEE 754-2008 roundTiesToAway
-	ToZero                            // == IEEE 754-2008 roundTowardZero
-	AwayFromZero                      // no IEEE 754-2008 equivalent
-	ToNegativeInf                     // == IEEE 754-2008 roundTowardNegative
-	ToPositiveInf                     // == IEEE 754-2008 roundTowardPositive
-)
-
-//go:generate stringer -type=RoundingMode
-
-// Accuracy describes the rounding error produced by the most recent
-// operation that generated a Float value, relative to the exact value.
-type Accuracy int8
-
-// Constants describing the Accuracy of a Float.
-const (
-	Below Accuracy = -1
-	Exact Accuracy = 0
-	Above Accuracy = +1
-)
-
-//go:generate stringer -type=Accuracy
-
-// SetPrec sets z's precision to prec and returns the (possibly) rounded
-// value of z. Rounding occurs according to z's rounding mode if the mantissa
-// cannot be represented in prec bits without loss of precision.
-// SetPrec(0) maps all finite values to ±0; infinite values remain unchanged.
-// If prec > MaxPrec, it is set to MaxPrec.
-func (z *Float) SetPrec(prec uint) *Float {
-	z.acc = Exact // optimistically assume no rounding is needed
-
-	// special case
-	if prec == 0 {
-		z.prec = 0
-		if z.form == finite {
-			// truncate z to 0
-			z.acc = makeAcc(z.neg)
-			z.form = zero
-		}
-		return z
-	}
-
-	// general case
-	if prec > MaxPrec {
-		prec = MaxPrec
-	}
-	old := z.prec
-	z.prec = uint32(prec)
-	if z.prec < old {
-		z.round(0)
-	}
-	return z
-}
-
-func makeAcc(above bool) Accuracy {
-	if above {
-		return Above
-	}
-	return Below
-}
-
-// SetMode sets z's rounding mode to mode and returns an exact z.
-// z remains unchanged otherwise.
-// z.SetMode(z.Mode()) is a cheap way to set z's accuracy to Exact.
-func (z *Float) SetMode(mode RoundingMode) *Float {
-	z.mode = mode
-	z.acc = Exact
-	return z
-}
-
-// Prec returns the mantissa precision of x in bits.
-// The result may be 0 for |x| == 0 and |x| == Inf.
-func (x *Float) Prec() uint {
-	return uint(x.prec)
-}
-
-// MinPrec returns the minimum precision required to represent x exactly
-// (i.e., the smallest prec before x.SetPrec(prec) would start rounding x).
-// The result is 0 for |x| == 0 and |x| == Inf.
-func (x *Float) MinPrec() uint {
-	if x.form != finite {
-		return 0
-	}
-	return uint(len(x.mant))*_W - x.mant.trailingZeroBits()
-}
-
-// Mode returns the rounding mode of x.
-func (x *Float) Mode() RoundingMode {
-	return x.mode
-}
-
-// Acc returns the accuracy of x produced by the most recent operation.
-func (x *Float) Acc() Accuracy {
-	return x.acc
-}
-
-// Sign returns:
-//
-//	-1 if x <   0
-//	 0 if x is ±0
-//	+1 if x >   0
-//
-func (x *Float) Sign() int {
-	if debugFloat {
-		x.validate()
-	}
-	if x.form == zero {
-		return 0
-	}
-	if x.neg {
-		return -1
-	}
-	return 1
-}
-
-// MantExp breaks x into its mantissa and exponent components
-// and returns the exponent. If a non-nil mant argument is
-// provided its value is set to the mantissa of x, with the
-// same precision and rounding mode as x. The components
-// satisfy x == mant × 2**exp, with 0.5 <= |mant| < 1.0.
-// Calling MantExp with a nil argument is an efficient way to
-// get the exponent of the receiver.
-//
-// Special cases are:
-//
-//	(  ±0).MantExp(mant) = 0, with mant set to   ±0
-//	(±Inf).MantExp(mant) = 0, with mant set to ±Inf
-//
-// x and mant may be the same in which case x is set to its
-// mantissa value.
-func (x *Float) MantExp(mant *Float) (exp int) {
-	if debugFloat {
-		x.validate()
-	}
-	if x.form == finite {
-		exp = int(x.exp)
-	}
-	if mant != nil {
-		mant.Copy(x)
-		if mant.form == finite {
-			mant.exp = 0
-		}
-	}
-	return
-}
-
-func (z *Float) setExpAndRound(exp int64, sbit uint) {
-	if exp < MinExp {
-		// underflow
-		z.acc = makeAcc(z.neg)
-		z.form = zero
-		return
-	}
-
-	if exp > MaxExp {
-		// overflow
-		z.acc = makeAcc(!z.neg)
-		z.form = inf
-		return
-	}
-
-	z.form = finite
-	z.exp = int32(exp)
-	z.round(sbit)
-}
-
-// SetMantExp sets z to mant × 2**exp and and returns z.
-// The result z has the same precision and rounding mode
-// as mant. SetMantExp is an inverse of MantExp but does
-// not require 0.5 <= |mant| < 1.0. Specifically:
-//
-//	mant := new(Float)
-//	new(Float).SetMantExp(mant, x.MantExp(mant)).Cmp(x) == 0
-//
-// Special cases are:
-//
-//	z.SetMantExp(  ±0, exp) =   ±0
-//	z.SetMantExp(±Inf, exp) = ±Inf
-//
-// z and mant may be the same in which case z's exponent
-// is set to exp.
-func (z *Float) SetMantExp(mant *Float, exp int) *Float {
-	if debugFloat {
-		z.validate()
-		mant.validate()
-	}
-	z.Copy(mant)
-	if z.form != finite {
-		return z
-	}
-	z.setExpAndRound(int64(z.exp)+int64(exp), 0)
-	return z
-}
-
-// Signbit returns true if x is negative or negative zero.
-func (x *Float) Signbit() bool {
-	return x.neg
-}
-
-// IsInf reports whether x is +Inf or -Inf.
-func (x *Float) IsInf() bool {
-	return x.form == inf
-}
-
-// IsInt reports whether x is an integer.
-// ±Inf values are not integers.
-func (x *Float) IsInt() bool {
-	if debugFloat {
-		x.validate()
-	}
-	// special cases
-	if x.form != finite {
-		return x.form == zero
-	}
-	// x.form == finite
-	if x.exp <= 0 {
-		return false
-	}
-	// x.exp > 0
-	return x.prec <= uint32(x.exp) || x.MinPrec() <= uint(x.exp) // not enough bits for fractional mantissa
-}
-
-// debugging support
-func (x *Float) validate() {
-	if !debugFloat {
-		// avoid performance bugs
-		panic("validate called but debugFloat is not set")
-	}
-	if x.form != finite {
-		return
-	}
-	m := len(x.mant)
-	if m == 0 {
-		panic("nonzero finite number with empty mantissa")
-	}
-	const msb = 1 << (_W - 1)
-	if x.mant[m-1]&msb == 0 {
-		panic(fmt.Sprintf("msb not set in last word %#x of %s", x.mant[m-1], x.Text('p', 0)))
-	}
-	if x.prec == 0 {
-		panic("zero precision finite number")
-	}
-}
-
-// round rounds z according to z.mode to z.prec bits and sets z.acc accordingly.
-// sbit must be 0 or 1 and summarizes any "sticky bit" information one might
-// have before calling round. z's mantissa must be normalized (with the msb set)
-// or empty.
-//
-// CAUTION: The rounding modes ToNegativeInf, ToPositiveInf are affected by the
-// sign of z. For correct rounding, the sign of z must be set correctly before
-// calling round.
-func (z *Float) round(sbit uint) {
-	if debugFloat {
-		z.validate()
-	}
-
-	z.acc = Exact
-	if z.form != finite {
-		// ±0 or ±Inf => nothing left to do
-		return
-	}
-	// z.form == finite && len(z.mant) > 0
-	// m > 0 implies z.prec > 0 (checked by validate)
-
-	m := uint32(len(z.mant)) // present mantissa length in words
-	bits := m * _W           // present mantissa bits; bits > 0
-	if bits <= z.prec {
-		// mantissa fits => nothing to do
-		return
-	}
-	// bits > z.prec
-
-	// Rounding is based on two bits: the rounding bit (rbit) and the
-	// sticky bit (sbit). The rbit is the bit immediately before the
-	// z.prec leading mantissa bits (the "0.5"). The sbit is set if any
-	// of the bits before the rbit are set (the "0.25", "0.125", etc.):
-	//
-	//   rbit  sbit  => "fractional part"
-	//
-	//   0     0        == 0
-	//   0     1        >  0  , < 0.5
-	//   1     0        == 0.5
-	//   1     1        >  0.5, < 1.0
-
-	// bits > z.prec: mantissa too large => round
-	r := uint(bits - z.prec - 1) // rounding bit position; r >= 0
-	rbit := z.mant.bit(r) & 1    // rounding bit; be safe and ensure it's a single bit
-	if sbit == 0 {
-		// TODO(gri) if rbit != 0 we don't need to compute sbit for some rounding modes (optimization)
-		sbit = z.mant.sticky(r)
-	}
-	sbit &= 1 // be safe and ensure it's a single bit
-
-	// cut off extra words
-	n := (z.prec + (_W - 1)) / _W // mantissa length in words for desired precision
-	if m > n {
-		copy(z.mant, z.mant[m-n:]) // move n last words to front
-		z.mant = z.mant[:n]
-	}
-
-	// determine number of trailing zero bits (ntz) and compute lsb mask of mantissa's least-significant word
-	ntz := n*_W - z.prec // 0 <= ntz < _W
-	lsb := Word(1) << ntz
-
-	// round if result is inexact
-	if rbit|sbit != 0 {
-		// Make rounding decision: The result mantissa is truncated ("rounded down")
-		// by default. Decide if we need to increment, or "round up", the (unsigned)
-		// mantissa.
-		inc := false
-		switch z.mode {
-		case ToNegativeInf:
-			inc = z.neg
-		case ToZero:
-			// nothing to do
-		case ToNearestEven:
-			inc = rbit != 0 && (sbit != 0 || z.mant[0]&lsb != 0)
-		case ToNearestAway:
-			inc = rbit != 0
-		case AwayFromZero:
-			inc = true
-		case ToPositiveInf:
-			inc = !z.neg
-		default:
-			panic("unreachable")
-		}
-
-		// A positive result (!z.neg) is Above the exact result if we increment,
-		// and it's Below if we truncate (Exact results require no rounding).
-		// For a negative result (z.neg) it is exactly the opposite.
-		z.acc = makeAcc(inc != z.neg)
-
-		if inc {
-			// add 1 to mantissa
-			if addVW(z.mant, z.mant, lsb) != 0 {
-				// mantissa overflow => adjust exponent
-				if z.exp >= MaxExp {
-					// exponent overflow
-					z.form = inf
-					return
-				}
-				z.exp++
-				// adjust mantissa: divide by 2 to compensate for exponent adjustment
-				shrVU(z.mant, z.mant, 1)
-				// set msb == carry == 1 from the mantissa overflow above
-				const msb = 1 << (_W - 1)
-				z.mant[n-1] |= msb
-			}
-		}
-	}
-
-	// zero out trailing bits in least-significant word
-	z.mant[0] &^= lsb - 1
-
-	if debugFloat {
-		z.validate()
-	}
-}
-
-func (z *Float) setBits64(neg bool, x uint64) *Float {
-	if z.prec == 0 {
-		z.prec = 64
-	}
-	z.acc = Exact
-	z.neg = neg
-	if x == 0 {
-		z.form = zero
-		return z
-	}
-	// x != 0
-	z.form = finite
-	s := nlz64(x)
-	z.mant = z.mant.setUint64(x << s)
-	z.exp = int32(64 - s) // always fits
-	if z.prec < 64 {
-		z.round(0)
-	}
-	return z
-}
-
-// SetUint64 sets z to the (possibly rounded) value of x and returns z.
-// If z's precision is 0, it is changed to 64 (and rounding will have
-// no effect).
-func (z *Float) SetUint64(x uint64) *Float {
-	return z.setBits64(false, x)
-}
-
-// SetInt64 sets z to the (possibly rounded) value of x and returns z.
-// If z's precision is 0, it is changed to 64 (and rounding will have
-// no effect).
-func (z *Float) SetInt64(x int64) *Float {
-	u := x
-	if u < 0 {
-		u = -u
-	}
-	// We cannot simply call z.SetUint64(uint64(u)) and change
-	// the sign afterwards because the sign affects rounding.
-	return z.setBits64(x < 0, uint64(u))
-}
-
-// SetFloat64 sets z to the (possibly rounded) value of x and returns z.
-// If z's precision is 0, it is changed to 53 (and rounding will have
-// no effect). SetFloat64 panics with ErrNaN if x is a NaN.
-func (z *Float) SetFloat64(x float64) *Float {
-	if z.prec == 0 {
-		z.prec = 53
-	}
-	if math.IsNaN(x) {
-		panic(ErrNaN{"Float.SetFloat64(NaN)"})
-	}
-	z.acc = Exact
-	z.neg = math.Signbit(x) // handle -0, -Inf correctly
-	if x == 0 {
-		z.form = zero
-		return z
-	}
-	if math.IsInf(x, 0) {
-		z.form = inf
-		return z
-	}
-	// normalized x != 0
-	z.form = finite
-	fmant, exp := math.Frexp(x) // get normalized mantissa
-	z.mant = z.mant.setUint64(1<<63 | math.Float64bits(fmant)<<11)
-	z.exp = int32(exp) // always fits
-	if z.prec < 53 {
-		z.round(0)
-	}
-	return z
-}
-
-// fnorm normalizes mantissa m by shifting it to the left
-// such that the msb of the most-significant word (msw) is 1.
-// It returns the shift amount. It assumes that len(m) != 0.
-func fnorm(m nat) int64 {
-	if debugFloat && (len(m) == 0 || m[len(m)-1] == 0) {
-		panic("msw of mantissa is 0")
-	}
-	s := nlz(m[len(m)-1])
-	if s > 0 {
-		c := shlVU(m, m, s)
-		if debugFloat && c != 0 {
-			panic("nlz or shlVU incorrect")
-		}
-	}
-	return int64(s)
-}
-
-// SetInt sets z to the (possibly rounded) value of x and returns z.
-// If z's precision is 0, it is changed to the larger of x.BitLen()
-// or 64 (and rounding will have no effect).
-func (z *Float) SetInt(x *Int) *Float {
-	// TODO(gri) can be more efficient if z.prec > 0
-	// but small compared to the size of x, or if there
-	// are many trailing 0's.
-	bits := uint32(x.BitLen())
-	if z.prec == 0 {
-		z.prec = umax32(bits, 64)
-	}
-	z.acc = Exact
-	z.neg = x.neg
-	if len(x.abs) == 0 {
-		z.form = zero
-		return z
-	}
-	// x != 0
-	z.mant = z.mant.set(x.abs)
-	fnorm(z.mant)
-	z.setExpAndRound(int64(bits), 0)
-	return z
-}
-
-// SetRat sets z to the (possibly rounded) value of x and returns z.
-// If z's precision is 0, it is changed to the largest of a.BitLen(),
-// b.BitLen(), or 64; with x = a/b.
-func (z *Float) SetRat(x *Rat) *Float {
-	if x.IsInt() {
-		return z.SetInt(x.Num())
-	}
-	var a, b Float
-	a.SetInt(x.Num())
-	b.SetInt(x.Denom())
-	if z.prec == 0 {
-		z.prec = umax32(a.prec, b.prec)
-	}
-	return z.Quo(&a, &b)
-}
-
-// SetInf sets z to the infinite Float -Inf if signbit is
-// set, or +Inf if signbit is not set, and returns z. The
-// precision of z is unchanged and the result is always
-// Exact.
-func (z *Float) SetInf(signbit bool) *Float {
-	z.acc = Exact
-	z.form = inf
-	z.neg = signbit
-	return z
-}
-
-// Set sets z to the (possibly rounded) value of x and returns z.
-// If z's precision is 0, it is changed to the precision of x
-// before setting z (and rounding will have no effect).
-// Rounding is performed according to z's precision and rounding
-// mode; and z's accuracy reports the result error relative to the
-// exact (not rounded) result.
-func (z *Float) Set(x *Float) *Float {
-	if debugFloat {
-		x.validate()
-	}
-	z.acc = Exact
-	if z != x {
-		z.form = x.form
-		z.neg = x.neg
-		if x.form == finite {
-			z.exp = x.exp
-			z.mant = z.mant.set(x.mant)
-		}
-		if z.prec == 0 {
-			z.prec = x.prec
-		} else if z.prec < x.prec {
-			z.round(0)
-		}
-	}
-	return z
-}
-
-// Copy sets z to x, with the same precision, rounding mode, and
-// accuracy as x, and returns z. x is not changed even if z and
-// x are the same.
-func (z *Float) Copy(x *Float) *Float {
-	if debugFloat {
-		x.validate()
-	}
-	if z != x {
-		z.prec = x.prec
-		z.mode = x.mode
-		z.acc = x.acc
-		z.form = x.form
-		z.neg = x.neg
-		if z.form == finite {
-			z.mant = z.mant.set(x.mant)
-			z.exp = x.exp
-		}
-	}
-	return z
-}
-
-// msb32 returns the 32 most significant bits of x.
-func msb32(x nat) uint32 {
-	i := len(x) - 1
-	if i < 0 {
-		return 0
-	}
-	if debugFloat && x[i]&(1<<(_W-1)) == 0 {
-		panic("x not normalized")
-	}
-	switch _W {
-	case 32:
-		return uint32(x[i])
-	case 64:
-		return uint32(x[i] >> 32)
-	}
-	panic("unreachable")
-}
-
-// msb64 returns the 64 most significant bits of x.
-func msb64(x nat) uint64 {
-	i := len(x) - 1
-	if i < 0 {
-		return 0
-	}
-	if debugFloat && x[i]&(1<<(_W-1)) == 0 {
-		panic("x not normalized")
-	}
-	switch _W {
-	case 32:
-		v := uint64(x[i]) << 32
-		if i > 0 {
-			v |= uint64(x[i-1])
-		}
-		return v
-	case 64:
-		return uint64(x[i])
-	}
-	panic("unreachable")
-}
-
-// Uint64 returns the unsigned integer resulting from truncating x
-// towards zero. If 0 <= x <= math.MaxUint64, the result is Exact
-// if x is an integer and Below otherwise.
-// The result is (0, Above) for x < 0, and (math.MaxUint64, Below)
-// for x > math.MaxUint64.
-func (x *Float) Uint64() (uint64, Accuracy) {
-	if debugFloat {
-		x.validate()
-	}
-
-	switch x.form {
-	case finite:
-		if x.neg {
-			return 0, Above
-		}
-		// 0 < x < +Inf
-		if x.exp <= 0 {
-			// 0 < x < 1
-			return 0, Below
-		}
-		// 1 <= x < Inf
-		if x.exp <= 64 {
-			// u = trunc(x) fits into a uint64
-			u := msb64(x.mant) >> (64 - uint32(x.exp))
-			if x.MinPrec() <= 64 {
-				return u, Exact
-			}
-			return u, Below // x truncated
-		}
-		// x too large
-		return math.MaxUint64, Below
-
-	case zero:
-		return 0, Exact
-
-	case inf:
-		if x.neg {
-			return 0, Above
-		}
-		return math.MaxUint64, Below
-	}
-
-	panic("unreachable")
-}
-
-// Int64 returns the integer resulting from truncating x towards zero.
-// If math.MinInt64 <= x <= math.MaxInt64, the result is Exact if x is
-// an integer, and Above (x < 0) or Below (x > 0) otherwise.
-// The result is (math.MinInt64, Above) for x < math.MinInt64,
-// and (math.MaxInt64, Below) for x > math.MaxInt64.
-func (x *Float) Int64() (int64, Accuracy) {
-	if debugFloat {
-		x.validate()
-	}
-
-	switch x.form {
-	case finite:
-		// 0 < |x| < +Inf
-		acc := makeAcc(x.neg)
-		if x.exp <= 0 {
-			// 0 < |x| < 1
-			return 0, acc
-		}
-		// x.exp > 0
-
-		// 1 <= |x| < +Inf
-		if x.exp <= 63 {
-			// i = trunc(x) fits into an int64 (excluding math.MinInt64)
-			i := int64(msb64(x.mant) >> (64 - uint32(x.exp)))
-			if x.neg {
-				i = -i
-			}
-			if x.MinPrec() <= uint(x.exp) {
-				return i, Exact
-			}
-			return i, acc // x truncated
-		}
-		if x.neg {
-			// check for special case x == math.MinInt64 (i.e., x == -(0.5 << 64))
-			if x.exp == 64 && x.MinPrec() == 1 {
-				acc = Exact
-			}
-			return math.MinInt64, acc
-		}
-		// x too large
-		return math.MaxInt64, Below
-
-	case zero:
-		return 0, Exact
-
-	case inf:
-		if x.neg {
-			return math.MinInt64, Above
-		}
-		return math.MaxInt64, Below
-	}
-
-	panic("unreachable")
-}
-
-// Float32 returns the float32 value nearest to x. If x is too small to be
-// represented by a float32 (|x| < math.SmallestNonzeroFloat32), the result
-// is (0, Below) or (-0, Above), respectively, depending on the sign of x.
-// If x is too large to be represented by a float32 (|x| > math.MaxFloat32),
-// the result is (+Inf, Above) or (-Inf, Below), depending on the sign of x.
-func (x *Float) Float32() (float32, Accuracy) {
-	if debugFloat {
-		x.validate()
-	}
-
-	switch x.form {
-	case finite:
-		// 0 < |x| < +Inf
-
-		const (
-			fbits = 32                //        float size
-			mbits = 23                //        mantissa size (excluding implicit msb)
-			ebits = fbits - mbits - 1 //     8  exponent size
-			bias  = 1<<(ebits-1) - 1  //   127  exponent bias
-			dmin  = 1 - bias - mbits  //  -149  smallest unbiased exponent (denormal)
-			emin  = 1 - bias          //  -126  smallest unbiased exponent (normal)
-			emax  = bias              //   127  largest unbiased exponent (normal)
-		)
-
-		// Float mantissa m is 0.5 <= m < 1.0; compute exponent e for float32 mantissa.
-		e := x.exp - 1 // exponent for normal mantissa m with 1.0 <= m < 2.0
-
-		// Compute precision p for float32 mantissa.
-		// If the exponent is too small, we have a denormal number before
-		// rounding and fewer than p mantissa bits of precision available
-		// (the exponent remains fixed but the mantissa gets shifted right).
-		p := mbits + 1 // precision of normal float
-		if e < emin {
-			// recompute precision
-			p = mbits + 1 - emin + int(e)
-			// If p == 0, the mantissa of x is shifted so much to the right
-			// that its msb falls immediately to the right of the float32
-			// mantissa space. In other words, if the smallest denormal is
-			// considered "1.0", for p == 0, the mantissa value m is >= 0.5.
-			// If m > 0.5, it is rounded up to 1.0; i.e., the smallest denormal.
-			// If m == 0.5, it is rounded down to even, i.e., 0.0.
-			// If p < 0, the mantissa value m is <= "0.25" which is never rounded up.
-			if p < 0 /* m <= 0.25 */ || p == 0 && x.mant.sticky(uint(len(x.mant))*_W-1) == 0 /* m == 0.5 */ {
-				// underflow to ±0
-				if x.neg {
-					var z float32
-					return -z, Above
-				}
-				return 0.0, Below
-			}
-			// otherwise, round up
-			// We handle p == 0 explicitly because it's easy and because
-			// Float.round doesn't support rounding to 0 bits of precision.
-			if p == 0 {
-				if x.neg {
-					return -math.SmallestNonzeroFloat32, Below
-				}
-				return math.SmallestNonzeroFloat32, Above
-			}
-		}
-		// p > 0
-
-		// round
-		var r Float
-		r.prec = uint32(p)
-		r.Set(x)
-		e = r.exp - 1
-
-		// Rounding may have caused r to overflow to ±Inf
-		// (rounding never causes underflows to 0).
-		// If the exponent is too large, also overflow to ±Inf.
-		if r.form == inf || e > emax {
-			// overflow
-			if x.neg {
-				return float32(math.Inf(-1)), Below
-			}
-			return float32(math.Inf(+1)), Above
-		}
-		// e <= emax
-
-		// Determine sign, biased exponent, and mantissa.
-		var sign, bexp, mant uint32
-		if x.neg {
-			sign = 1 << (fbits - 1)
-		}
-
-		// Rounding may have caused a denormal number to
-		// become normal. Check again.
-		if e < emin {
-			// denormal number: recompute precision
-			// Since rounding may have at best increased precision
-			// and we have eliminated p <= 0 early, we know p > 0.
-			// bexp == 0 for denormals
-			p = mbits + 1 - emin + int(e)
-			mant = msb32(r.mant) >> uint(fbits-p)
-		} else {
-			// normal number: emin <= e <= emax
-			bexp = uint32(e+bias) << mbits
-			mant = msb32(r.mant) >> ebits & (1<<mbits - 1) // cut off msb (implicit 1 bit)
-		}
-
-		return math.Float32frombits(sign | bexp | mant), r.acc
-
-	case zero:
-		if x.neg {
-			var z float32
-			return -z, Exact
-		}
-		return 0.0, Exact
-
-	case inf:
-		if x.neg {
-			return float32(math.Inf(-1)), Exact
-		}
-		return float32(math.Inf(+1)), Exact
-	}
-
-	panic("unreachable")
-}
-
-// Float64 returns the float64 value nearest to x. If x is too small to be
-// represented by a float64 (|x| < math.SmallestNonzeroFloat64), the result
-// is (0, Below) or (-0, Above), respectively, depending on the sign of x.
-// If x is too large to be represented by a float64 (|x| > math.MaxFloat64),
-// the result is (+Inf, Above) or (-Inf, Below), depending on the sign of x.
-func (x *Float) Float64() (float64, Accuracy) {
-	if debugFloat {
-		x.validate()
-	}
-
-	switch x.form {
-	case finite:
-		// 0 < |x| < +Inf
-
-		const (
-			fbits = 64                //        float size
-			mbits = 52                //        mantissa size (excluding implicit msb)
-			ebits = fbits - mbits - 1 //    11  exponent size
-			bias  = 1<<(ebits-1) - 1  //  1023  exponent bias
-			dmin  = 1 - bias - mbits  // -1074  smallest unbiased exponent (denormal)
-			emin  = 1 - bias          // -1022  smallest unbiased exponent (normal)
-			emax  = bias              //  1023  largest unbiased exponent (normal)
-		)
-
-		// Float mantissa m is 0.5 <= m < 1.0; compute exponent e for float64 mantissa.
-		e := x.exp - 1 // exponent for normal mantissa m with 1.0 <= m < 2.0
-
-		// Compute precision p for float64 mantissa.
-		// If the exponent is too small, we have a denormal number before
-		// rounding and fewer than p mantissa bits of precision available
-		// (the exponent remains fixed but the mantissa gets shifted right).
-		p := mbits + 1 // precision of normal float
-		if e < emin {
-			// recompute precision
-			p = mbits + 1 - emin + int(e)
-			// If p == 0, the mantissa of x is shifted so much to the right
-			// that its msb falls immediately to the right of the float64
-			// mantissa space. In other words, if the smallest denormal is
-			// considered "1.0", for p == 0, the mantissa value m is >= 0.5.
-			// If m > 0.5, it is rounded up to 1.0; i.e., the smallest denormal.
-			// If m == 0.5, it is rounded down to even, i.e., 0.0.
-			// If p < 0, the mantissa value m is <= "0.25" which is never rounded up.
-			if p < 0 /* m <= 0.25 */ || p == 0 && x.mant.sticky(uint(len(x.mant))*_W-1) == 0 /* m == 0.5 */ {
-				// underflow to ±0
-				if x.neg {
-					var z float64
-					return -z, Above
-				}
-				return 0.0, Below
-			}
-			// otherwise, round up
-			// We handle p == 0 explicitly because it's easy and because
-			// Float.round doesn't support rounding to 0 bits of precision.
-			if p == 0 {
-				if x.neg {
-					return -math.SmallestNonzeroFloat64, Below
-				}
-				return math.SmallestNonzeroFloat64, Above
-			}
-		}
-		// p > 0
-
-		// round
-		var r Float
-		r.prec = uint32(p)
-		r.Set(x)
-		e = r.exp - 1
-
-		// Rounding may have caused r to overflow to ±Inf
-		// (rounding never causes underflows to 0).
-		// If the exponent is too large, also overflow to ±Inf.
-		if r.form == inf || e > emax {
-			// overflow
-			if x.neg {
-				return math.Inf(-1), Below
-			}
-			return math.Inf(+1), Above
-		}
-		// e <= emax
-
-		// Determine sign, biased exponent, and mantissa.
-		var sign, bexp, mant uint64
-		if x.neg {
-			sign = 1 << (fbits - 1)
-		}
-
-		// Rounding may have caused a denormal number to
-		// become normal. Check again.
-		if e < emin {
-			// denormal number: recompute precision
-			// Since rounding may have at best increased precision
-			// and we have eliminated p <= 0 early, we know p > 0.
-			// bexp == 0 for denormals
-			p = mbits + 1 - emin + int(e)
-			mant = msb64(r.mant) >> uint(fbits-p)
-		} else {
-			// normal number: emin <= e <= emax
-			bexp = uint64(e+bias) << mbits
-			mant = msb64(r.mant) >> ebits & (1<<mbits - 1) // cut off msb (implicit 1 bit)
-		}
-
-		return math.Float64frombits(sign | bexp | mant), r.acc
-
-	case zero:
-		if x.neg {
-			var z float64
-			return -z, Exact
-		}
-		return 0.0, Exact
-
-	case inf:
-		if x.neg {
-			return math.Inf(-1), Exact
-		}
-		return math.Inf(+1), Exact
-	}
-
-	panic("unreachable")
-}
-
-// Int returns the result of truncating x towards zero;
-// or nil if x is an infinity.
-// The result is Exact if x.IsInt(); otherwise it is Below
-// for x > 0, and Above for x < 0.
-// If a non-nil *Int argument z is provided, Int stores
-// the result in z instead of allocating a new Int.
-func (x *Float) Int(z *Int) (*Int, Accuracy) {
-	if debugFloat {
-		x.validate()
-	}
-
-	if z == nil && x.form <= finite {
-		z = new(Int)
-	}
-
-	switch x.form {
-	case finite:
-		// 0 < |x| < +Inf
-		acc := makeAcc(x.neg)
-		if x.exp <= 0 {
-			// 0 < |x| < 1
-			return z.SetInt64(0), acc
-		}
-		// x.exp > 0
-
-		// 1 <= |x| < +Inf
-		// determine minimum required precision for x
-		allBits := uint(len(x.mant)) * _W
-		exp := uint(x.exp)
-		if x.MinPrec() <= exp {
-			acc = Exact
-		}
-		// shift mantissa as needed
-		if z == nil {
-			z = new(Int)
-		}
-		z.neg = x.neg
-		switch {
-		case exp > allBits:
-			z.abs = z.abs.shl(x.mant, exp-allBits)
-		default:
-			z.abs = z.abs.set(x.mant)
-		case exp < allBits:
-			z.abs = z.abs.shr(x.mant, allBits-exp)
-		}
-		return z, acc
-
-	case zero:
-		return z.SetInt64(0), Exact
-
-	case inf:
-		return nil, makeAcc(x.neg)
-	}
-
-	panic("unreachable")
-}
-
-// Rat returns the rational number corresponding to x;
-// or nil if x is an infinity.
-// The result is Exact if x is not an Inf.
-// If a non-nil *Rat argument z is provided, Rat stores
-// the result in z instead of allocating a new Rat.
-func (x *Float) Rat(z *Rat) (*Rat, Accuracy) {
-	if debugFloat {
-		x.validate()
-	}
-
-	if z == nil && x.form <= finite {
-		z = new(Rat)
-	}
-
-	switch x.form {
-	case finite:
-		// 0 < |x| < +Inf
-		allBits := int32(len(x.mant)) * _W
-		// build up numerator and denominator
-		z.a.neg = x.neg
-		switch {
-		case x.exp > allBits:
-			z.a.abs = z.a.abs.shl(x.mant, uint(x.exp-allBits))
-			z.b.abs = z.b.abs[:0] // == 1 (see Rat)
-			// z already in normal form
-		default:
-			z.a.abs = z.a.abs.set(x.mant)
-			z.b.abs = z.b.abs[:0] // == 1 (see Rat)
-			// z already in normal form
-		case x.exp < allBits:
-			z.a.abs = z.a.abs.set(x.mant)
-			t := z.b.abs.setUint64(1)
-			z.b.abs = t.shl(t, uint(allBits-x.exp))
-			z.norm()
-		}
-		return z, Exact
-
-	case zero:
-		return z.SetInt64(0), Exact
-
-	case inf:
-		return nil, makeAcc(x.neg)
-	}
-
-	panic("unreachable")
-}
-
-// Abs sets z to the (possibly rounded) value |x| (the absolute value of x)
-// and returns z.
-func (z *Float) Abs(x *Float) *Float {
-	z.Set(x)
-	z.neg = false
-	return z
-}
-
-// Neg sets z to the (possibly rounded) value of x with its sign negated,
-// and returns z.
-func (z *Float) Neg(x *Float) *Float {
-	z.Set(x)
-	z.neg = !z.neg
-	return z
-}
-
-func validateBinaryOperands(x, y *Float) {
-	if !debugFloat {
-		// avoid performance bugs
-		panic("validateBinaryOperands called but debugFloat is not set")
-	}
-	if len(x.mant) == 0 {
-		panic("empty mantissa for x")
-	}
-	if len(y.mant) == 0 {
-		panic("empty mantissa for y")
-	}
-}
-
-// z = x + y, ignoring signs of x and y for the addition
-// but using the sign of z for rounding the result.
-// x and y must have a non-empty mantissa and valid exponent.
-func (z *Float) uadd(x, y *Float) {
-	// Note: This implementation requires 2 shifts most of the
-	// time. It is also inefficient if exponents or precisions
-	// differ by wide margins. The following article describes
-	// an efficient (but much more complicated) implementation
-	// compatible with the internal representation used here:
-	//
-	// Vincent Lefèvre: "The Generic Multiple-Precision Floating-
-	// Point Addition With Exact Rounding (as in the MPFR Library)"
-	// http://www.vinc17.net/research/papers/rnc6.pdf
-
-	if debugFloat {
-		validateBinaryOperands(x, y)
-	}
-
-	// compute exponents ex, ey for mantissa with "binary point"
-	// on the right (mantissa.0) - use int64 to avoid overflow
-	ex := int64(x.exp) - int64(len(x.mant))*_W
-	ey := int64(y.exp) - int64(len(y.mant))*_W
-
-	al := alias(z.mant, x.mant) || alias(z.mant, y.mant)
-
-	// TODO(gri) having a combined add-and-shift primitive
-	//           could make this code significantly faster
-	switch {
-	case ex < ey:
-		if al {
-			t := nat(nil).shl(y.mant, uint(ey-ex))
-			z.mant = z.mant.add(x.mant, t)
-		} else {
-			z.mant = z.mant.shl(y.mant, uint(ey-ex))
-			z.mant = z.mant.add(x.mant, z.mant)
-		}
-	default:
-		// ex == ey, no shift needed
-		z.mant = z.mant.add(x.mant, y.mant)
-	case ex > ey:
-		if al {
-			t := nat(nil).shl(x.mant, uint(ex-ey))
-			z.mant = z.mant.add(t, y.mant)
-		} else {
-			z.mant = z.mant.shl(x.mant, uint(ex-ey))
-			z.mant = z.mant.add(z.mant, y.mant)
-		}
-		ex = ey
-	}
-	// len(z.mant) > 0
-
-	z.setExpAndRound(ex+int64(len(z.mant))*_W-fnorm(z.mant), 0)
-}
-
-// z = x - y for |x| > |y|, ignoring signs of x and y for the subtraction
-// but using the sign of z for rounding the result.
-// x and y must have a non-empty mantissa and valid exponent.
-func (z *Float) usub(x, y *Float) {
-	// This code is symmetric to uadd.
-	// We have not factored the common code out because
-	// eventually uadd (and usub) should be optimized
-	// by special-casing, and the code will diverge.
-
-	if debugFloat {
-		validateBinaryOperands(x, y)
-	}
-
-	ex := int64(x.exp) - int64(len(x.mant))*_W
-	ey := int64(y.exp) - int64(len(y.mant))*_W
-
-	al := alias(z.mant, x.mant) || alias(z.mant, y.mant)
-
-	switch {
-	case ex < ey:
-		if al {
-			t := nat(nil).shl(y.mant, uint(ey-ex))
-			z.mant = t.sub(x.mant, t)
-		} else {
-			z.mant = z.mant.shl(y.mant, uint(ey-ex))
-			z.mant = z.mant.sub(x.mant, z.mant)
-		}
-	default:
-		// ex == ey, no shift needed
-		z.mant = z.mant.sub(x.mant, y.mant)
-	case ex > ey:
-		if al {
-			t := nat(nil).shl(x.mant, uint(ex-ey))
-			z.mant = t.sub(t, y.mant)
-		} else {
-			z.mant = z.mant.shl(x.mant, uint(ex-ey))
-			z.mant = z.mant.sub(z.mant, y.mant)
-		}
-		ex = ey
-	}
-
-	// operands may have canceled each other out
-	if len(z.mant) == 0 {
-		z.acc = Exact
-		z.form = zero
-		z.neg = false
-		return
-	}
-	// len(z.mant) > 0
-
-	z.setExpAndRound(ex+int64(len(z.mant))*_W-fnorm(z.mant), 0)
-}
-
-// z = x * y, ignoring signs of x and y for the multiplication
-// but using the sign of z for rounding the result.
-// x and y must have a non-empty mantissa and valid exponent.
-func (z *Float) umul(x, y *Float) {
-	if debugFloat {
-		validateBinaryOperands(x, y)
-	}
-
-	// Note: This is doing too much work if the precision
-	// of z is less than the sum of the precisions of x
-	// and y which is often the case (e.g., if all floats
-	// have the same precision).
-	// TODO(gri) Optimize this for the common case.
-
-	e := int64(x.exp) + int64(y.exp)
-	z.mant = z.mant.mul(x.mant, y.mant)
-
-	z.setExpAndRound(e-fnorm(z.mant), 0)
-}
-
-// z = x / y, ignoring signs of x and y for the division
-// but using the sign of z for rounding the result.
-// x and y must have a non-empty mantissa and valid exponent.
-func (z *Float) uquo(x, y *Float) {
-	if debugFloat {
-		validateBinaryOperands(x, y)
-	}
-
-	// mantissa length in words for desired result precision + 1
-	// (at least one extra bit so we get the rounding bit after
-	// the division)
-	n := int(z.prec/_W) + 1
-
-	// compute adjusted x.mant such that we get enough result precision
-	xadj := x.mant
-	if d := n - len(x.mant) + len(y.mant); d > 0 {
-		// d extra words needed => add d "0 digits" to x
-		xadj = make(nat, len(x.mant)+d)
-		copy(xadj[d:], x.mant)
-	}
-	// TODO(gri): If we have too many digits (d < 0), we should be able
-	// to shorten x for faster division. But we must be extra careful
-	// with rounding in that case.
-
-	// Compute d before division since there may be aliasing of x.mant
-	// (via xadj) or y.mant with z.mant.
-	d := len(xadj) - len(y.mant)
-
-	// divide
-	var r nat
-	z.mant, r = z.mant.div(nil, xadj, y.mant)
-	e := int64(x.exp) - int64(y.exp) - int64(d-len(z.mant))*_W
-
-	// The result is long enough to include (at least) the rounding bit.
-	// If there's a non-zero remainder, the corresponding fractional part
-	// (if it were computed), would have a non-zero sticky bit (if it were
-	// zero, it couldn't have a non-zero remainder).
-	var sbit uint
-	if len(r) > 0 {
-		sbit = 1
-	}
-
-	z.setExpAndRound(e-fnorm(z.mant), sbit)
-}
-
-// ucmp returns -1, 0, or +1, depending on whether
-// |x| < |y|, |x| == |y|, or |x| > |y|.
-// x and y must have a non-empty mantissa and valid exponent.
-func (x *Float) ucmp(y *Float) int {
-	if debugFloat {
-		validateBinaryOperands(x, y)
-	}
-
-	switch {
-	case x.exp < y.exp:
-		return -1
-	case x.exp > y.exp:
-		return +1
-	}
-	// x.exp == y.exp
-
-	// compare mantissas
-	i := len(x.mant)
-	j := len(y.mant)
-	for i > 0 || j > 0 {
-		var xm, ym Word
-		if i > 0 {
-			i--
-			xm = x.mant[i]
-		}
-		if j > 0 {
-			j--
-			ym = y.mant[j]
-		}
-		switch {
-		case xm < ym:
-			return -1
-		case xm > ym:
-			return +1
-		}
-	}
-
-	return 0
-}
-
-// Handling of sign bit as defined by IEEE 754-2008, section 6.3:
-//
-// When neither the inputs nor result are NaN, the sign of a product or
-// quotient is the exclusive OR of the operands’ signs; the sign of a sum,
-// or of a difference x−y regarded as a sum x+(−y), differs from at most
-// one of the addends’ signs; and the sign of the result of conversions,
-// the quantize operation, the roundToIntegral operations, and the
-// roundToIntegralExact (see 5.3.1) is the sign of the first or only operand.
-// These rules shall apply even when operands or results are zero or infinite.
-//
-// When the sum of two operands with opposite signs (or the difference of
-// two operands with like signs) is exactly zero, the sign of that sum (or
-// difference) shall be +0 in all rounding-direction attributes except
-// roundTowardNegative; under that attribute, the sign of an exact zero
-// sum (or difference) shall be −0. However, x+x = x−(−x) retains the same
-// sign as x even when x is zero.
-//
-// See also: https://play.golang.org/p/RtH3UCt5IH
-
-// Add sets z to the rounded sum x+y and returns z. If z's precision is 0,
-// it is changed to the larger of x's or y's precision before the operation.
-// Rounding is performed according to z's precision and rounding mode; and
-// z's accuracy reports the result error relative to the exact (not rounded)
-// result. Add panics with ErrNaN if x and y are infinities with opposite
-// signs. The value of z is undefined in that case.
-//
-// BUG(gri) When rounding ToNegativeInf, the sign of Float values rounded to 0 is incorrect.
-func (z *Float) Add(x, y *Float) *Float {
-	if debugFloat {
-		x.validate()
-		y.validate()
-	}
-
-	if z.prec == 0 {
-		z.prec = umax32(x.prec, y.prec)
-	}
-
-	if x.form == finite && y.form == finite {
-		// x + y (common case)
-		z.neg = x.neg
-		if x.neg == y.neg {
-			// x + y == x + y
-			// (-x) + (-y) == -(x + y)
-			z.uadd(x, y)
-		} else {
-			// x + (-y) == x - y == -(y - x)
-			// (-x) + y == y - x == -(x - y)
-			if x.ucmp(y) > 0 {
-				z.usub(x, y)
-			} else {
-				z.neg = !z.neg
-				z.usub(y, x)
-			}
-		}
-		return z
-	}
-
-	if x.form == inf && y.form == inf && x.neg != y.neg {
-		// +Inf + -Inf
-		// -Inf + +Inf
-		// value of z is undefined but make sure it's valid
-		z.acc = Exact
-		z.form = zero
-		z.neg = false
-		panic(ErrNaN{"addition of infinities with opposite signs"})
-	}
-
-	if x.form == zero && y.form == zero {
-		// ±0 + ±0
-		z.acc = Exact
-		z.form = zero
-		z.neg = x.neg && y.neg // -0 + -0 == -0
-		return z
-	}
-
-	if x.form == inf || y.form == zero {
-		// ±Inf + y
-		// x + ±0
-		return z.Set(x)
-	}
-
-	// ±0 + y
-	// x + ±Inf
-	return z.Set(y)
-}
-
-// Sub sets z to the rounded difference x-y and returns z.
-// Precision, rounding, and accuracy reporting are as for Add.
-// Sub panics with ErrNaN if x and y are infinities with equal
-// signs. The value of z is undefined in that case.
-func (z *Float) Sub(x, y *Float) *Float {
-	if debugFloat {
-		x.validate()
-		y.validate()
-	}
-
-	if z.prec == 0 {
-		z.prec = umax32(x.prec, y.prec)
-	}
-
-	if x.form == finite && y.form == finite {
-		// x - y (common case)
-		z.neg = x.neg
-		if x.neg != y.neg {
-			// x - (-y) == x + y
-			// (-x) - y == -(x + y)
-			z.uadd(x, y)
-		} else {
-			// x - y == x - y == -(y - x)
-			// (-x) - (-y) == y - x == -(x - y)
-			if x.ucmp(y) > 0 {
-				z.usub(x, y)
-			} else {
-				z.neg = !z.neg
-				z.usub(y, x)
-			}
-		}
-		return z
-	}
-
-	if x.form == inf && y.form == inf && x.neg == y.neg {
-		// +Inf - +Inf
-		// -Inf - -Inf
-		// value of z is undefined but make sure it's valid
-		z.acc = Exact
-		z.form = zero
-		z.neg = false
-		panic(ErrNaN{"subtraction of infinities with equal signs"})
-	}
-
-	if x.form == zero && y.form == zero {
-		// ±0 - ±0
-		z.acc = Exact
-		z.form = zero
-		z.neg = x.neg && !y.neg // -0 - +0 == -0
-		return z
-	}
-
-	if x.form == inf || y.form == zero {
-		// ±Inf - y
-		// x - ±0
-		return z.Set(x)
-	}
-
-	// ±0 - y
-	// x - ±Inf
-	return z.Neg(y)
-}
-
-// Mul sets z to the rounded product x*y and returns z.
-// Precision, rounding, and accuracy reporting are as for Add.
-// Mul panics with ErrNaN if one operand is zero and the other
-// operand an infinity. The value of z is undefined in that case.
-func (z *Float) Mul(x, y *Float) *Float {
-	if debugFloat {
-		x.validate()
-		y.validate()
-	}
-
-	if z.prec == 0 {
-		z.prec = umax32(x.prec, y.prec)
-	}
-
-	z.neg = x.neg != y.neg
-
-	if x.form == finite && y.form == finite {
-		// x * y (common case)
-		z.umul(x, y)
-		return z
-	}
-
-	z.acc = Exact
-	if x.form == zero && y.form == inf || x.form == inf && y.form == zero {
-		// ±0 * ±Inf
-		// ±Inf * ±0
-		// value of z is undefined but make sure it's valid
-		z.form = zero
-		z.neg = false
-		panic(ErrNaN{"multiplication of zero with infinity"})
-	}
-
-	if x.form == inf || y.form == inf {
-		// ±Inf * y
-		// x * ±Inf
-		z.form = inf
-		return z
-	}
-
-	// ±0 * y
-	// x * ±0
-	z.form = zero
-	return z
-}
-
-// Quo sets z to the rounded quotient x/y and returns z.
-// Precision, rounding, and accuracy reporting are as for Add.
-// Quo panics with ErrNaN if both operands are zero or infinities.
-// The value of z is undefined in that case.
-func (z *Float) Quo(x, y *Float) *Float {
-	if debugFloat {
-		x.validate()
-		y.validate()
-	}
-
-	if z.prec == 0 {
-		z.prec = umax32(x.prec, y.prec)
-	}
-
-	z.neg = x.neg != y.neg
-
-	if x.form == finite && y.form == finite {
-		// x / y (common case)
-		z.uquo(x, y)
-		return z
-	}
-
-	z.acc = Exact
-	if x.form == zero && y.form == zero || x.form == inf && y.form == inf {
-		// ±0 / ±0
-		// ±Inf / ±Inf
-		// value of z is undefined but make sure it's valid
-		z.form = zero
-		z.neg = false
-		panic(ErrNaN{"division of zero by zero or infinity by infinity"})
-	}
-
-	if x.form == zero || y.form == inf {
-		// ±0 / y
-		// x / ±Inf
-		z.form = zero
-		return z
-	}
-
-	// x / ±0
-	// ±Inf / y
-	z.form = inf
-	return z
-}
-
-// Cmp compares x and y and returns:
-//
-//   -1 if x <  y
-//    0 if x == y (incl. -0 == 0, -Inf == -Inf, and +Inf == +Inf)
-//   +1 if x >  y
-//
-func (x *Float) Cmp(y *Float) int {
-	if debugFloat {
-		x.validate()
-		y.validate()
-	}
-
-	mx := x.ord()
-	my := y.ord()
-	switch {
-	case mx < my:
-		return -1
-	case mx > my:
-		return +1
-	}
-	// mx == my
-
-	// only if |mx| == 1 we have to compare the mantissae
-	switch mx {
-	case -1:
-		return y.ucmp(x)
-	case +1:
-		return x.ucmp(y)
-	}
-
-	return 0
-}
-
-// ord classifies x and returns:
-//
-//	-2 if -Inf == x
-//	-1 if -Inf < x < 0
-//	 0 if x == 0 (signed or unsigned)
-//	+1 if 0 < x < +Inf
-//	+2 if x == +Inf
-//
-func (x *Float) ord() int {
-	var m int
-	switch x.form {
-	case finite:
-		m = 1
-	case zero:
-		return 0
-	case inf:
-		m = 2
-	}
-	if x.neg {
-		m = -m
-	}
-	return m
-}
-
-func umax32(x, y uint32) uint32 {
-	if x > y {
-		return x
-	}
-	return y
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/float_test.go b/pkg/bootstrap/src/bootstrap/math/big/float_test.go
deleted file mode 100644
index ed469a8..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/float_test.go
+++ /dev/null
@@ -1,1808 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/float_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/float_test.go:1
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package big
-
-import (
-	"flag"
-	"fmt"
-	"math"
-	"strconv"
-	"strings"
-	"testing"
-)
-
-// Verify that ErrNaN implements the error interface.
-var _ error = ErrNaN{}
-
-func (x *Float) uint64() uint64 {
-	u, acc := x.Uint64()
-	if acc != Exact {
-		panic(fmt.Sprintf("%s is not a uint64", x.Text('g', 10)))
-	}
-	return u
-}
-
-func (x *Float) int64() int64 {
-	i, acc := x.Int64()
-	if acc != Exact {
-		panic(fmt.Sprintf("%s is not an int64", x.Text('g', 10)))
-	}
-	return i
-}
-
-func TestFloatZeroValue(t *testing.T) {
-	// zero (uninitialized) value is a ready-to-use 0.0
-	var x Float
-	if s := x.Text('f', 1); s != "0.0" {
-		t.Errorf("zero value = %s; want 0.0", s)
-	}
-
-	// zero value has precision 0
-	if prec := x.Prec(); prec != 0 {
-		t.Errorf("prec = %d; want 0", prec)
-	}
-
-	// zero value can be used in any and all positions of binary operations
-	make := func(x int) *Float {
-		var f Float
-		if x != 0 {
-			f.SetInt64(int64(x))
-		}
-		// x == 0 translates into the zero value
-		return &f
-	}
-	for _, test := range []struct {
-		z, x, y, want int
-		opname        rune
-		op            func(z, x, y *Float) *Float
-	}{
-		{0, 0, 0, 0, '+', (*Float).Add},
-		{0, 1, 2, 3, '+', (*Float).Add},
-		{1, 2, 0, 2, '+', (*Float).Add},
-		{2, 0, 1, 1, '+', (*Float).Add},
-
-		{0, 0, 0, 0, '-', (*Float).Sub},
-		{0, 1, 2, -1, '-', (*Float).Sub},
-		{1, 2, 0, 2, '-', (*Float).Sub},
-		{2, 0, 1, -1, '-', (*Float).Sub},
-
-		{0, 0, 0, 0, '*', (*Float).Mul},
-		{0, 1, 2, 2, '*', (*Float).Mul},
-		{1, 2, 0, 0, '*', (*Float).Mul},
-		{2, 0, 1, 0, '*', (*Float).Mul},
-
-		// {0, 0, 0, 0, '/', (*Float).Quo}, // panics
-		{0, 2, 1, 2, '/', (*Float).Quo},
-		{1, 2, 0, 0, '/', (*Float).Quo}, // = +Inf
-		{2, 0, 1, 0, '/', (*Float).Quo},
-	} {
-		z := make(test.z)
-		test.op(z, make(test.x), make(test.y))
-		got := 0
-		if !z.IsInf() {
-			got = int(z.int64())
-		}
-		if got != test.want {
-			t.Errorf("%d %c %d = %d; want %d", test.x, test.opname, test.y, got, test.want)
-		}
-	}
-
-	// TODO(gri) test how precision is set for zero value results
-}
-
-func makeFloat(s string) *Float {
-	x, _, err := ParseFloat(s, 0, 1000, ToNearestEven)
-	if err != nil {
-		panic(err)
-	}
-	return x
-}
-
-func TestFloatSetPrec(t *testing.T) {
-	for _, test := range []struct {
-		x    string
-		prec uint
-		want string
-		acc  Accuracy
-	}{
-		// prec 0
-		{"0", 0, "0", Exact},
-		{"-0", 0, "-0", Exact},
-		{"-Inf", 0, "-Inf", Exact},
-		{"+Inf", 0, "+Inf", Exact},
-		{"123", 0, "0", Below},
-		{"-123", 0, "-0", Above},
-
-		// prec at upper limit
-		{"0", MaxPrec, "0", Exact},
-		{"-0", MaxPrec, "-0", Exact},
-		{"-Inf", MaxPrec, "-Inf", Exact},
-		{"+Inf", MaxPrec, "+Inf", Exact},
-
-		// just a few regular cases - general rounding is tested elsewhere
-		{"1.5", 1, "2", Above},
-		{"-1.5", 1, "-2", Below},
-		{"123", 1e6, "123", Exact},
-		{"-123", 1e6, "-123", Exact},
-	} {
-		x := makeFloat(test.x).SetPrec(test.prec)
-		prec := test.prec
-		if prec > MaxPrec {
-			prec = MaxPrec
-		}
-		if got := x.Prec(); got != prec {
-			t.Errorf("%s.SetPrec(%d).Prec() == %d; want %d", test.x, test.prec, got, prec)
-		}
-		if got, acc := x.String(), x.Acc(); got != test.want || acc != test.acc {
-			t.Errorf("%s.SetPrec(%d) = %s (%s); want %s (%s)", test.x, test.prec, got, acc, test.want, test.acc)
-		}
-	}
-}
-
-func TestFloatMinPrec(t *testing.T) {
-	const max = 100
-	for _, test := range []struct {
-		x    string
-		want uint
-	}{
-		{"0", 0},
-		{"-0", 0},
-		{"+Inf", 0},
-		{"-Inf", 0},
-		{"1", 1},
-		{"2", 1},
-		{"3", 2},
-		{"0x8001", 16},
-		{"0x8001p-1000", 16},
-		{"0x8001p+1000", 16},
-		{"0.1", max},
-	} {
-		x := makeFloat(test.x).SetPrec(max)
-		if got := x.MinPrec(); got != test.want {
-			t.Errorf("%s.MinPrec() = %d; want %d", test.x, got, test.want)
-		}
-	}
-}
-
-func TestFloatSign(t *testing.T) {
-	for _, test := range []struct {
-		x string
-		s int
-	}{
-		{"-Inf", -1},
-		{"-1", -1},
-		{"-0", 0},
-		{"+0", 0},
-		{"+1", +1},
-		{"+Inf", +1},
-	} {
-		x := makeFloat(test.x)
-		s := x.Sign()
-		if s != test.s {
-			t.Errorf("%s.Sign() = %d; want %d", test.x, s, test.s)
-		}
-	}
-}
-
-// alike(x, y) is like x.Cmp(y) == 0 but also considers the sign of 0 (0 != -0).
-func alike(x, y *Float) bool {
-	return x.Cmp(y) == 0 && x.Signbit() == y.Signbit()
-}
-
-func alike32(x, y float32) bool {
-	// we can ignore NaNs
-	return x == y && math.Signbit(float64(x)) == math.Signbit(float64(y))
-
-}
-
-func alike64(x, y float64) bool {
-	// we can ignore NaNs
-	return x == y && math.Signbit(x) == math.Signbit(y)
-
-}
-
-func TestFloatMantExp(t *testing.T) {
-	for _, test := range []struct {
-		x    string
-		mant string
-		exp  int
-	}{
-		{"0", "0", 0},
-		{"+0", "0", 0},
-		{"-0", "-0", 0},
-		{"Inf", "+Inf", 0},
-		{"+Inf", "+Inf", 0},
-		{"-Inf", "-Inf", 0},
-		{"1.5", "0.75", 1},
-		{"1.024e3", "0.5", 11},
-		{"-0.125", "-0.5", -2},
-	} {
-		x := makeFloat(test.x)
-		mant := makeFloat(test.mant)
-		m := new(Float)
-		e := x.MantExp(m)
-		if !alike(m, mant) || e != test.exp {
-			t.Errorf("%s.MantExp() = %s, %d; want %s, %d", test.x, m.Text('g', 10), e, test.mant, test.exp)
-		}
-	}
-}
-
-func TestFloatMantExpAliasing(t *testing.T) {
-	x := makeFloat("0.5p10")
-	if e := x.MantExp(x); e != 10 {
-		t.Fatalf("Float.MantExp aliasing error: got %d; want 10", e)
-	}
-	if want := makeFloat("0.5"); !alike(x, want) {
-		t.Fatalf("Float.MantExp aliasing error: got %s; want %s", x.Text('g', 10), want.Text('g', 10))
-	}
-}
-
-func TestFloatSetMantExp(t *testing.T) {
-	for _, test := range []struct {
-		frac string
-		exp  int
-		z    string
-	}{
-		{"0", 0, "0"},
-		{"+0", 0, "0"},
-		{"-0", 0, "-0"},
-		{"Inf", 1234, "+Inf"},
-		{"+Inf", -1234, "+Inf"},
-		{"-Inf", -1234, "-Inf"},
-		{"0", MinExp, "0"},
-		{"0.25", MinExp, "+0"},    // exponent underflow
-		{"-0.25", MinExp, "-0"},   // exponent underflow
-		{"1", MaxExp, "+Inf"},     // exponent overflow
-		{"2", MaxExp - 1, "+Inf"}, // exponent overflow
-		{"0.75", 1, "1.5"},
-		{"0.5", 11, "1024"},
-		{"-0.5", -2, "-0.125"},
-		{"32", 5, "1024"},
-		{"1024", -10, "1"},
-	} {
-		frac := makeFloat(test.frac)
-		want := makeFloat(test.z)
-		var z Float
-		z.SetMantExp(frac, test.exp)
-		if !alike(&z, want) {
-			t.Errorf("SetMantExp(%s, %d) = %s; want %s", test.frac, test.exp, z.Text('g', 10), test.z)
-		}
-		// test inverse property
-		mant := new(Float)
-		if z.SetMantExp(mant, want.MantExp(mant)).Cmp(want) != 0 {
-			t.Errorf("Inverse property not satisfied: got %s; want %s", z.Text('g', 10), test.z)
-		}
-	}
-}
-
-func TestFloatPredicates(t *testing.T) {
-	for _, test := range []struct {
-		x            string
-		sign         int
-		signbit, inf bool
-	}{
-		{x: "-Inf", sign: -1, signbit: true, inf: true},
-		{x: "-1", sign: -1, signbit: true},
-		{x: "-0", signbit: true},
-		{x: "0"},
-		{x: "1", sign: 1},
-		{x: "+Inf", sign: 1, inf: true},
-	} {
-		x := makeFloat(test.x)
-		if got := x.Signbit(); got != test.signbit {
-			t.Errorf("(%s).Signbit() = %v; want %v", test.x, got, test.signbit)
-		}
-		if got := x.Sign(); got != test.sign {
-			t.Errorf("(%s).Sign() = %d; want %d", test.x, got, test.sign)
-		}
-		if got := x.IsInf(); got != test.inf {
-			t.Errorf("(%s).IsInf() = %v; want %v", test.x, got, test.inf)
-		}
-	}
-}
-
-func TestFloatIsInt(t *testing.T) {
-	for _, test := range []string{
-		"0 int",
-		"-0 int",
-		"1 int",
-		"-1 int",
-		"0.5",
-		"1.23",
-		"1.23e1",
-		"1.23e2 int",
-		"0.000000001e+8",
-		"0.000000001e+9 int",
-		"1.2345e200 int",
-		"Inf",
-		"+Inf",
-		"-Inf",
-	} {
-		s := strings.TrimSuffix(test, " int")
-		want := s != test
-		if got := makeFloat(s).IsInt(); got != want {
-			t.Errorf("%s.IsInt() == %t", s, got)
-		}
-	}
-}
-
-func fromBinary(s string) int64 {
-	x, err := strconv.ParseInt(s, 2, 64)
-	if err != nil {
-		panic(err)
-	}
-	return x
-}
-
-func toBinary(x int64) string {
-	return strconv.FormatInt(x, 2)
-}
-
-func testFloatRound(t *testing.T, x, r int64, prec uint, mode RoundingMode) {
-	// verify test data
-	var ok bool
-	switch mode {
-	case ToNearestEven, ToNearestAway:
-		ok = true // nothing to do for now
-	case ToZero:
-		if x < 0 {
-			ok = r >= x
-		} else {
-			ok = r <= x
-		}
-	case AwayFromZero:
-		if x < 0 {
-			ok = r <= x
-		} else {
-			ok = r >= x
-		}
-	case ToNegativeInf:
-		ok = r <= x
-	case ToPositiveInf:
-		ok = r >= x
-	default:
-		panic("unreachable")
-	}
-	if !ok {
-		t.Fatalf("incorrect test data for prec = %d, %s: x = %s, r = %s", prec, mode, toBinary(x), toBinary(r))
-	}
-
-	// compute expected accuracy
-	a := Exact
-	switch {
-	case r < x:
-		a = Below
-	case r > x:
-		a = Above
-	}
-
-	// round
-	f := new(Float).SetMode(mode).SetInt64(x).SetPrec(prec)
-
-	// check result
-	r1 := f.int64()
-	p1 := f.Prec()
-	a1 := f.Acc()
-	if r1 != r || p1 != prec || a1 != a {
-		t.Errorf("round %s (%d bits, %s) incorrect: got %s (%d bits, %s); want %s (%d bits, %s)",
-			toBinary(x), prec, mode,
-			toBinary(r1), p1, a1,
-			toBinary(r), prec, a)
-		return
-	}
-
-	// g and f should be the same
-	// (rounding by SetPrec after SetInt64 using default precision
-	// should be the same as rounding by SetInt64 after setting the
-	// precision)
-	g := new(Float).SetMode(mode).SetPrec(prec).SetInt64(x)
-	if !alike(g, f) {
-		t.Errorf("round %s (%d bits, %s) not symmetric: got %s and %s; want %s",
-			toBinary(x), prec, mode,
-			toBinary(g.int64()),
-			toBinary(r1),
-			toBinary(r),
-		)
-		return
-	}
-
-	// h and f should be the same
-	// (repeated rounding should be idempotent)
-	h := new(Float).SetMode(mode).SetPrec(prec).Set(f)
-	if !alike(h, f) {
-		t.Errorf("round %s (%d bits, %s) not idempotent: got %s and %s; want %s",
-			toBinary(x), prec, mode,
-			toBinary(h.int64()),
-			toBinary(r1),
-			toBinary(r),
-		)
-		return
-	}
-}
-
-// TestFloatRound tests basic rounding.
-func TestFloatRound(t *testing.T) {
-	for _, test := range []struct {
-		prec                        uint
-		x, zero, neven, naway, away string // input, results rounded to prec bits
-	}{
-		{5, "1000", "1000", "1000", "1000", "1000"},
-		{5, "1001", "1001", "1001", "1001", "1001"},
-		{5, "1010", "1010", "1010", "1010", "1010"},
-		{5, "1011", "1011", "1011", "1011", "1011"},
-		{5, "1100", "1100", "1100", "1100", "1100"},
-		{5, "1101", "1101", "1101", "1101", "1101"},
-		{5, "1110", "1110", "1110", "1110", "1110"},
-		{5, "1111", "1111", "1111", "1111", "1111"},
-
-		{4, "1000", "1000", "1000", "1000", "1000"},
-		{4, "1001", "1001", "1001", "1001", "1001"},
-		{4, "1010", "1010", "1010", "1010", "1010"},
-		{4, "1011", "1011", "1011", "1011", "1011"},
-		{4, "1100", "1100", "1100", "1100", "1100"},
-		{4, "1101", "1101", "1101", "1101", "1101"},
-		{4, "1110", "1110", "1110", "1110", "1110"},
-		{4, "1111", "1111", "1111", "1111", "1111"},
-
-		{3, "1000", "1000", "1000", "1000", "1000"},
-		{3, "1001", "1000", "1000", "1010", "1010"},
-		{3, "1010", "1010", "1010", "1010", "1010"},
-		{3, "1011", "1010", "1100", "1100", "1100"},
-		{3, "1100", "1100", "1100", "1100", "1100"},
-		{3, "1101", "1100", "1100", "1110", "1110"},
-		{3, "1110", "1110", "1110", "1110", "1110"},
-		{3, "1111", "1110", "10000", "10000", "10000"},
-
-		{3, "1000001", "1000000", "1000000", "1000000", "1010000"},
-		{3, "1001001", "1000000", "1010000", "1010000", "1010000"},
-		{3, "1010001", "1010000", "1010000", "1010000", "1100000"},
-		{3, "1011001", "1010000", "1100000", "1100000", "1100000"},
-		{3, "1100001", "1100000", "1100000", "1100000", "1110000"},
-		{3, "1101001", "1100000", "1110000", "1110000", "1110000"},
-		{3, "1110001", "1110000", "1110000", "1110000", "10000000"},
-		{3, "1111001", "1110000", "10000000", "10000000", "10000000"},
-
-		{2, "1000", "1000", "1000", "1000", "1000"},
-		{2, "1001", "1000", "1000", "1000", "1100"},
-		{2, "1010", "1000", "1000", "1100", "1100"},
-		{2, "1011", "1000", "1100", "1100", "1100"},
-		{2, "1100", "1100", "1100", "1100", "1100"},
-		{2, "1101", "1100", "1100", "1100", "10000"},
-		{2, "1110", "1100", "10000", "10000", "10000"},
-		{2, "1111", "1100", "10000", "10000", "10000"},
-
-		{2, "1000001", "1000000", "1000000", "1000000", "1100000"},
-		{2, "1001001", "1000000", "1000000", "1000000", "1100000"},
-		{2, "1010001", "1000000", "1100000", "1100000", "1100000"},
-		{2, "1011001", "1000000", "1100000", "1100000", "1100000"},
-		{2, "1100001", "1100000", "1100000", "1100000", "10000000"},
-		{2, "1101001", "1100000", "1100000", "1100000", "10000000"},
-		{2, "1110001", "1100000", "10000000", "10000000", "10000000"},
-		{2, "1111001", "1100000", "10000000", "10000000", "10000000"},
-
-		{1, "1000", "1000", "1000", "1000", "1000"},
-		{1, "1001", "1000", "1000", "1000", "10000"},
-		{1, "1010", "1000", "1000", "1000", "10000"},
-		{1, "1011", "1000", "1000", "1000", "10000"},
-		{1, "1100", "1000", "10000", "10000", "10000"},
-		{1, "1101", "1000", "10000", "10000", "10000"},
-		{1, "1110", "1000", "10000", "10000", "10000"},
-		{1, "1111", "1000", "10000", "10000", "10000"},
-
-		{1, "1000001", "1000000", "1000000", "1000000", "10000000"},
-		{1, "1001001", "1000000", "1000000", "1000000", "10000000"},
-		{1, "1010001", "1000000", "1000000", "1000000", "10000000"},
-		{1, "1011001", "1000000", "1000000", "1000000", "10000000"},
-		{1, "1100001", "1000000", "10000000", "10000000", "10000000"},
-		{1, "1101001", "1000000", "10000000", "10000000", "10000000"},
-		{1, "1110001", "1000000", "10000000", "10000000", "10000000"},
-		{1, "1111001", "1000000", "10000000", "10000000", "10000000"},
-	} {
-		x := fromBinary(test.x)
-		z := fromBinary(test.zero)
-		e := fromBinary(test.neven)
-		n := fromBinary(test.naway)
-		a := fromBinary(test.away)
-		prec := test.prec
-
-		testFloatRound(t, x, z, prec, ToZero)
-		testFloatRound(t, x, e, prec, ToNearestEven)
-		testFloatRound(t, x, n, prec, ToNearestAway)
-		testFloatRound(t, x, a, prec, AwayFromZero)
-
-		testFloatRound(t, x, z, prec, ToNegativeInf)
-		testFloatRound(t, x, a, prec, ToPositiveInf)
-
-		testFloatRound(t, -x, -a, prec, ToNegativeInf)
-		testFloatRound(t, -x, -z, prec, ToPositiveInf)
-	}
-}
-
-// TestFloatRound24 tests that rounding a float64 to 24 bits
-// matches IEEE-754 rounding to nearest when converting a
-// float64 to a float32 (excluding denormal numbers).
-func TestFloatRound24(t *testing.T) {
-	const x0 = 1<<26 - 0x10 // 11...110000 (26 bits)
-	for d := 0; d <= 0x10; d++ {
-		x := float64(x0 + d)
-		f := new(Float).SetPrec(24).SetFloat64(x)
-		got, _ := f.Float32()
-		want := float32(x)
-		if got != want {
-			t.Errorf("Round(%g, 24) = %g; want %g", x, got, want)
-		}
-	}
-}
-
-func TestFloatSetUint64(t *testing.T) {
-	for _, want := range []uint64{
-		0,
-		1,
-		2,
-		10,
-		100,
-		1<<32 - 1,
-		1 << 32,
-		1<<64 - 1,
-	} {
-		var f Float
-		f.SetUint64(want)
-		if got := f.uint64(); got != want {
-			t.Errorf("got %#x (%s); want %#x", got, f.Text('p', 0), want)
-		}
-	}
-
-	// test basic rounding behavior (exhaustive rounding testing is done elsewhere)
-	const x uint64 = 0x8765432187654321 // 64 bits needed
-	for prec := uint(1); prec <= 64; prec++ {
-		f := new(Float).SetPrec(prec).SetMode(ToZero).SetUint64(x)
-		got := f.uint64()
-		want := x &^ (1<<(64-prec) - 1) // cut off (round to zero) low 64-prec bits
-		if got != want {
-			t.Errorf("got %#x (%s); want %#x", got, f.Text('p', 0), want)
-		}
-	}
-}
-
-func TestFloatSetInt64(t *testing.T) {
-	for _, want := range []int64{
-		0,
-		1,
-		2,
-		10,
-		100,
-		1<<32 - 1,
-		1 << 32,
-		1<<63 - 1,
-	} {
-		for i := range [2]int{} {
-			if i&1 != 0 {
-				want = -want
-			}
-			var f Float
-			f.SetInt64(want)
-			if got := f.int64(); got != want {
-				t.Errorf("got %#x (%s); want %#x", got, f.Text('p', 0), want)
-			}
-		}
-	}
-
-	// test basic rounding behavior (exhaustive rounding testing is done elsewhere)
-	const x int64 = 0x7654321076543210 // 63 bits needed
-	for prec := uint(1); prec <= 63; prec++ {
-		f := new(Float).SetPrec(prec).SetMode(ToZero).SetInt64(x)
-		got := f.int64()
-		want := x &^ (1<<(63-prec) - 1) // cut off (round to zero) low 63-prec bits
-		if got != want {
-			t.Errorf("got %#x (%s); want %#x", got, f.Text('p', 0), want)
-		}
-	}
-}
-
-func TestFloatSetFloat64(t *testing.T) {
-	for _, want := range []float64{
-		0,
-		1,
-		2,
-		12345,
-		1e10,
-		1e100,
-		3.14159265e10,
-		2.718281828e-123,
-		1.0 / 3,
-		math.MaxFloat32,
-		math.MaxFloat64,
-		math.SmallestNonzeroFloat32,
-		math.SmallestNonzeroFloat64,
-		math.Inf(-1),
-		math.Inf(0),
-		-math.Inf(1),
-	} {
-		for i := range [2]int{} {
-			if i&1 != 0 {
-				want = -want
-			}
-			var f Float
-			f.SetFloat64(want)
-			if got, acc := f.Float64(); got != want || acc != Exact {
-				t.Errorf("got %g (%s, %s); want %g (Exact)", got, f.Text('p', 0), acc, want)
-			}
-		}
-	}
-
-	// test basic rounding behavior (exhaustive rounding testing is done elsewhere)
-	const x uint64 = 0x8765432143218 // 53 bits needed
-	for prec := uint(1); prec <= 52; prec++ {
-		f := new(Float).SetPrec(prec).SetMode(ToZero).SetFloat64(float64(x))
-		got, _ := f.Float64()
-		want := float64(x &^ (1<<(52-prec) - 1)) // cut off (round to zero) low 53-prec bits
-		if got != want {
-			t.Errorf("got %g (%s); want %g", got, f.Text('p', 0), want)
-		}
-	}
-
-	// test NaN
-	defer func() {
-		if p, ok := recover().(ErrNaN); !ok {
-			t.Errorf("got %v; want ErrNaN panic", p)
-		}
-	}()
-	var f Float
-	f.SetFloat64(math.NaN())
-	// should not reach here
-	t.Errorf("got %s; want ErrNaN panic", f.Text('p', 0))
-}
-
-func TestFloatSetInt(t *testing.T) {
-	for _, want := range []string{
-		"0",
-		"1",
-		"-1",
-		"1234567890",
-		"123456789012345678901234567890",
-		"123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890",
-	} {
-		var x Int
-		_, ok := x.SetString(want, 0)
-		if !ok {
-			t.Errorf("invalid integer %s", want)
-			continue
-		}
-		n := x.BitLen()
-
-		var f Float
-		f.SetInt(&x)
-
-		// check precision
-		if n < 64 {
-			n = 64
-		}
-		if prec := f.Prec(); prec != uint(n) {
-			t.Errorf("got prec = %d; want %d", prec, n)
-		}
-
-		// check value
-		got := f.Text('g', 100)
-		if got != want {
-			t.Errorf("got %s (%s); want %s", got, f.Text('p', 0), want)
-		}
-	}
-
-	// TODO(gri) test basic rounding behavior
-}
-
-func TestFloatSetRat(t *testing.T) {
-	for _, want := range []string{
-		"0",
-		"1",
-		"-1",
-		"1234567890",
-		"123456789012345678901234567890",
-		"123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890",
-		"1.2",
-		"3.14159265",
-		// TODO(gri) expand
-	} {
-		var x Rat
-		_, ok := x.SetString(want)
-		if !ok {
-			t.Errorf("invalid fraction %s", want)
-			continue
-		}
-		n := max(x.Num().BitLen(), x.Denom().BitLen())
-
-		var f1, f2 Float
-		f2.SetPrec(1000)
-		f1.SetRat(&x)
-		f2.SetRat(&x)
-
-		// check precision when set automatically
-		if n < 64 {
-			n = 64
-		}
-		if prec := f1.Prec(); prec != uint(n) {
-			t.Errorf("got prec = %d; want %d", prec, n)
-		}
-
-		got := f2.Text('g', 100)
-		if got != want {
-			t.Errorf("got %s (%s); want %s", got, f2.Text('p', 0), want)
-		}
-	}
-}
-
-func TestFloatSetInf(t *testing.T) {
-	var f Float
-	for _, test := range []struct {
-		signbit bool
-		prec    uint
-		want    string
-	}{
-		{false, 0, "+Inf"},
-		{true, 0, "-Inf"},
-		{false, 10, "+Inf"},
-		{true, 30, "-Inf"},
-	} {
-		x := f.SetPrec(test.prec).SetInf(test.signbit)
-		if got := x.String(); got != test.want || x.Prec() != test.prec {
-			t.Errorf("SetInf(%v) = %s (prec = %d); want %s (prec = %d)", test.signbit, got, x.Prec(), test.want, test.prec)
-		}
-	}
-}
-
-func TestFloatUint64(t *testing.T) {
-	for _, test := range []struct {
-		x   string
-		out uint64
-		acc Accuracy
-	}{
-		{"-Inf", 0, Above},
-		{"-1", 0, Above},
-		{"-1e-1000", 0, Above},
-		{"-0", 0, Exact},
-		{"0", 0, Exact},
-		{"1e-1000", 0, Below},
-		{"1", 1, Exact},
-		{"1.000000000000000000001", 1, Below},
-		{"12345.0", 12345, Exact},
-		{"12345.000000000000000000001", 12345, Below},
-		{"18446744073709551615", 18446744073709551615, Exact},
-		{"18446744073709551615.000000000000000000001", math.MaxUint64, Below},
-		{"18446744073709551616", math.MaxUint64, Below},
-		{"1e10000", math.MaxUint64, Below},
-		{"+Inf", math.MaxUint64, Below},
-	} {
-		x := makeFloat(test.x)
-		out, acc := x.Uint64()
-		if out != test.out || acc != test.acc {
-			t.Errorf("%s: got %d (%s); want %d (%s)", test.x, out, acc, test.out, test.acc)
-		}
-	}
-}
-
-func TestFloatInt64(t *testing.T) {
-	for _, test := range []struct {
-		x   string
-		out int64
-		acc Accuracy
-	}{
-		{"-Inf", math.MinInt64, Above},
-		{"-1e10000", math.MinInt64, Above},
-		{"-9223372036854775809", math.MinInt64, Above},
-		{"-9223372036854775808.000000000000000000001", math.MinInt64, Above},
-		{"-9223372036854775808", -9223372036854775808, Exact},
-		{"-9223372036854775807.000000000000000000001", -9223372036854775807, Above},
-		{"-9223372036854775807", -9223372036854775807, Exact},
-		{"-12345.000000000000000000001", -12345, Above},
-		{"-12345.0", -12345, Exact},
-		{"-1.000000000000000000001", -1, Above},
-		{"-1.5", -1, Above},
-		{"-1", -1, Exact},
-		{"-1e-1000", 0, Above},
-		{"0", 0, Exact},
-		{"1e-1000", 0, Below},
-		{"1", 1, Exact},
-		{"1.000000000000000000001", 1, Below},
-		{"1.5", 1, Below},
-		{"12345.0", 12345, Exact},
-		{"12345.000000000000000000001", 12345, Below},
-		{"9223372036854775807", 9223372036854775807, Exact},
-		{"9223372036854775807.000000000000000000001", math.MaxInt64, Below},
-		{"9223372036854775808", math.MaxInt64, Below},
-		{"1e10000", math.MaxInt64, Below},
-		{"+Inf", math.MaxInt64, Below},
-	} {
-		x := makeFloat(test.x)
-		out, acc := x.Int64()
-		if out != test.out || acc != test.acc {
-			t.Errorf("%s: got %d (%s); want %d (%s)", test.x, out, acc, test.out, test.acc)
-		}
-	}
-}
-
-func TestFloatFloat32(t *testing.T) {
-	for _, test := range []struct {
-		x   string
-		out float32
-		acc Accuracy
-	}{
-		{"0", 0, Exact},
-
-		// underflow to zero
-		{"1e-1000", 0, Below},
-		{"0x0.000002p-127", 0, Below},
-		{"0x.0000010p-126", 0, Below},
-
-		// denormals
-		{"1.401298464e-45", math.SmallestNonzeroFloat32, Above}, // rounded up to smallest denormal
-		{"0x.ffffff8p-149", math.SmallestNonzeroFloat32, Above}, // rounded up to smallest denormal
-		{"0x.0000018p-126", math.SmallestNonzeroFloat32, Above}, // rounded up to smallest denormal
-		{"0x.0000020p-126", math.SmallestNonzeroFloat32, Exact},
-		{"0x.8p-148", math.SmallestNonzeroFloat32, Exact},
-		{"1p-149", math.SmallestNonzeroFloat32, Exact},
-		{"0x.fffffep-126", math.Float32frombits(0x7fffff), Exact}, // largest denormal
-
-		// special denormal cases (see issues 14553, 14651)
-		{"0x0.0000001p-126", math.Float32frombits(0x00000000), Below}, // underflow to zero
-		{"0x0.0000008p-126", math.Float32frombits(0x00000000), Below}, // underflow to zero
-		{"0x0.0000010p-126", math.Float32frombits(0x00000000), Below}, // rounded down to even
-		{"0x0.0000011p-126", math.Float32frombits(0x00000001), Above}, // rounded up to smallest denormal
-		{"0x0.0000018p-126", math.Float32frombits(0x00000001), Above}, // rounded up to smallest denormal
-
-		{"0x1.0000000p-149", math.Float32frombits(0x00000001), Exact}, // smallest denormal
-		{"0x0.0000020p-126", math.Float32frombits(0x00000001), Exact}, // smallest denormal
-		{"0x0.fffffe0p-126", math.Float32frombits(0x007fffff), Exact}, // largest denormal
-		{"0x1.0000000p-126", math.Float32frombits(0x00800000), Exact}, // smallest normal
-
-		{"0x0.8p-149", math.Float32frombits(0x000000000), Below}, // rounded down to even
-		{"0x0.9p-149", math.Float32frombits(0x000000001), Above}, // rounded up to smallest denormal
-		{"0x0.ap-149", math.Float32frombits(0x000000001), Above}, // rounded up to smallest denormal
-		{"0x0.bp-149", math.Float32frombits(0x000000001), Above}, // rounded up to smallest denormal
-		{"0x0.cp-149", math.Float32frombits(0x000000001), Above}, // rounded up to smallest denormal
-
-		{"0x1.0p-149", math.Float32frombits(0x000000001), Exact}, // smallest denormal
-		{"0x1.7p-149", math.Float32frombits(0x000000001), Below},
-		{"0x1.8p-149", math.Float32frombits(0x000000002), Above},
-		{"0x1.9p-149", math.Float32frombits(0x000000002), Above},
-
-		{"0x2.0p-149", math.Float32frombits(0x000000002), Exact},
-		{"0x2.8p-149", math.Float32frombits(0x000000002), Below}, // rounded down to even
-		{"0x2.9p-149", math.Float32frombits(0x000000003), Above},
-
-		{"0x3.0p-149", math.Float32frombits(0x000000003), Exact},
-		{"0x3.7p-149", math.Float32frombits(0x000000003), Below},
-		{"0x3.8p-149", math.Float32frombits(0x000000004), Above}, // rounded up to even
-
-		{"0x4.0p-149", math.Float32frombits(0x000000004), Exact},
-		{"0x4.8p-149", math.Float32frombits(0x000000004), Below}, // rounded down to even
-		{"0x4.9p-149", math.Float32frombits(0x000000005), Above},
-
-		// specific case from issue 14553
-		{"0x7.7p-149", math.Float32frombits(0x000000007), Below},
-		{"0x7.8p-149", math.Float32frombits(0x000000008), Above},
-		{"0x7.9p-149", math.Float32frombits(0x000000008), Above},
-
-		// normals
-		{"0x.ffffffp-126", math.Float32frombits(0x00800000), Above}, // rounded up to smallest normal
-		{"1p-126", math.Float32frombits(0x00800000), Exact},         // smallest normal
-		{"0x1.fffffep-126", math.Float32frombits(0x00ffffff), Exact},
-		{"0x1.ffffffp-126", math.Float32frombits(0x01000000), Above}, // rounded up
-		{"1", 1, Exact},
-		{"1.000000000000000000001", 1, Below},
-		{"12345.0", 12345, Exact},
-		{"12345.000000000000000000001", 12345, Below},
-		{"0x1.fffffe0p127", math.MaxFloat32, Exact},
-		{"0x1.fffffe8p127", math.MaxFloat32, Below},
-
-		// overflow
-		{"0x1.ffffff0p127", float32(math.Inf(+1)), Above},
-		{"0x1p128", float32(math.Inf(+1)), Above},
-		{"1e10000", float32(math.Inf(+1)), Above},
-		{"0x1.ffffff0p2147483646", float32(math.Inf(+1)), Above}, // overflow in rounding
-
-		// inf
-		{"Inf", float32(math.Inf(+1)), Exact},
-	} {
-		for i := 0; i < 2; i++ {
-			// test both signs
-			tx, tout, tacc := test.x, test.out, test.acc
-			if i != 0 {
-				tx = "-" + tx
-				tout = -tout
-				tacc = -tacc
-			}
-
-			// conversion should match strconv where syntax is agreeable
-			if f, err := strconv.ParseFloat(tx, 32); err == nil && !alike32(float32(f), tout) {
-				t.Errorf("%s: got %g; want %g (incorrect test data)", tx, f, tout)
-			}
-
-			x := makeFloat(tx)
-			out, acc := x.Float32()
-			if !alike32(out, tout) || acc != tacc {
-				t.Errorf("%s: got %g (%#08x, %s); want %g (%#08x, %s)", tx, out, math.Float32bits(out), acc, test.out, math.Float32bits(test.out), tacc)
-			}
-
-			// test that x.SetFloat64(float64(f)).Float32() == f
-			var x2 Float
-			out2, acc2 := x2.SetFloat64(float64(out)).Float32()
-			if !alike32(out2, out) || acc2 != Exact {
-				t.Errorf("idempotency test: got %g (%s); want %g (Exact)", out2, acc2, out)
-			}
-		}
-	}
-}
-
-func TestFloatFloat64(t *testing.T) {
-	const smallestNormalFloat64 = 2.2250738585072014e-308 // 1p-1022
-	for _, test := range []struct {
-		x   string
-		out float64
-		acc Accuracy
-	}{
-		{"0", 0, Exact},
-
-		// underflow to zero
-		{"1e-1000", 0, Below},
-		{"0x0.0000000000001p-1023", 0, Below},
-		{"0x0.00000000000008p-1022", 0, Below},
-
-		// denormals
-		{"0x0.0000000000000cp-1022", math.SmallestNonzeroFloat64, Above}, // rounded up to smallest denormal
-		{"0x0.00000000000010p-1022", math.SmallestNonzeroFloat64, Exact}, // smallest denormal
-		{"0x.8p-1073", math.SmallestNonzeroFloat64, Exact},
-		{"1p-1074", math.SmallestNonzeroFloat64, Exact},
-		{"0x.fffffffffffffp-1022", math.Float64frombits(0x000fffffffffffff), Exact}, // largest denormal
-
-		// special denormal cases (see issues 14553, 14651)
-		{"0x0.00000000000001p-1022", math.Float64frombits(0x00000000000000000), Below}, // underflow to zero
-		{"0x0.00000000000004p-1022", math.Float64frombits(0x00000000000000000), Below}, // underflow to zero
-		{"0x0.00000000000008p-1022", math.Float64frombits(0x00000000000000000), Below}, // rounded down to even
-		{"0x0.00000000000009p-1022", math.Float64frombits(0x00000000000000001), Above}, // rounded up to smallest denormal
-		{"0x0.0000000000000ap-1022", math.Float64frombits(0x00000000000000001), Above}, // rounded up to smallest denormal
-
-		{"0x0.8p-1074", math.Float64frombits(0x00000000000000000), Below}, // rounded down to even
-		{"0x0.9p-1074", math.Float64frombits(0x00000000000000001), Above}, // rounded up to smallest denormal
-		{"0x0.ap-1074", math.Float64frombits(0x00000000000000001), Above}, // rounded up to smallest denormal
-		{"0x0.bp-1074", math.Float64frombits(0x00000000000000001), Above}, // rounded up to smallest denormal
-		{"0x0.cp-1074", math.Float64frombits(0x00000000000000001), Above}, // rounded up to smallest denormal
-
-		{"0x1.0p-1074", math.Float64frombits(0x00000000000000001), Exact},
-		{"0x1.7p-1074", math.Float64frombits(0x00000000000000001), Below},
-		{"0x1.8p-1074", math.Float64frombits(0x00000000000000002), Above},
-		{"0x1.9p-1074", math.Float64frombits(0x00000000000000002), Above},
-
-		{"0x2.0p-1074", math.Float64frombits(0x00000000000000002), Exact},
-		{"0x2.8p-1074", math.Float64frombits(0x00000000000000002), Below}, // rounded down to even
-		{"0x2.9p-1074", math.Float64frombits(0x00000000000000003), Above},
-
-		{"0x3.0p-1074", math.Float64frombits(0x00000000000000003), Exact},
-		{"0x3.7p-1074", math.Float64frombits(0x00000000000000003), Below},
-		{"0x3.8p-1074", math.Float64frombits(0x00000000000000004), Above}, // rounded up to even
-
-		{"0x4.0p-1074", math.Float64frombits(0x00000000000000004), Exact},
-		{"0x4.8p-1074", math.Float64frombits(0x00000000000000004), Below}, // rounded down to even
-		{"0x4.9p-1074", math.Float64frombits(0x00000000000000005), Above},
-
-		// normals
-		{"0x.fffffffffffff8p-1022", math.Float64frombits(0x0010000000000000), Above}, // rounded up to smallest normal
-		{"1p-1022", math.Float64frombits(0x0010000000000000), Exact},                 // smallest normal
-		{"1", 1, Exact},
-		{"1.000000000000000000001", 1, Below},
-		{"12345.0", 12345, Exact},
-		{"12345.000000000000000000001", 12345, Below},
-		{"0x1.fffffffffffff0p1023", math.MaxFloat64, Exact},
-		{"0x1.fffffffffffff4p1023", math.MaxFloat64, Below},
-
-		// overflow
-		{"0x1.fffffffffffff8p1023", math.Inf(+1), Above},
-		{"0x1p1024", math.Inf(+1), Above},
-		{"1e10000", math.Inf(+1), Above},
-		{"0x1.fffffffffffff8p2147483646", math.Inf(+1), Above}, // overflow in rounding
-		{"Inf", math.Inf(+1), Exact},
-
-		// selected denormalized values that were handled incorrectly in the past
-		{"0x.fffffffffffffp-1022", smallestNormalFloat64 - math.SmallestNonzeroFloat64, Exact},
-		{"4503599627370495p-1074", smallestNormalFloat64 - math.SmallestNonzeroFloat64, Exact},
-
-		// http://www.exploringbinary.com/php-hangs-on-numeric-value-2-2250738585072011e-308/
-		{"2.2250738585072011e-308", 2.225073858507201e-308, Below},
-		// http://www.exploringbinary.com/java-hangs-when-converting-2-2250738585072012e-308/
-		{"2.2250738585072012e-308", 2.2250738585072014e-308, Above},
-	} {
-		for i := 0; i < 2; i++ {
-			// test both signs
-			tx, tout, tacc := test.x, test.out, test.acc
-			if i != 0 {
-				tx = "-" + tx
-				tout = -tout
-				tacc = -tacc
-			}
-
-			// conversion should match strconv where syntax is agreeable
-			if f, err := strconv.ParseFloat(tx, 64); err == nil && !alike64(f, tout) {
-				t.Errorf("%s: got %g; want %g (incorrect test data)", tx, f, tout)
-			}
-
-			x := makeFloat(tx)
-			out, acc := x.Float64()
-			if !alike64(out, tout) || acc != tacc {
-				t.Errorf("%s: got %g (%#016x, %s); want %g (%#016x, %s)", tx, out, math.Float64bits(out), acc, test.out, math.Float64bits(test.out), tacc)
-			}
-
-			// test that x.SetFloat64(f).Float64() == f
-			var x2 Float
-			out2, acc2 := x2.SetFloat64(out).Float64()
-			if !alike64(out2, out) || acc2 != Exact {
-				t.Errorf("idempotency test: got %g (%s); want %g (Exact)", out2, acc2, out)
-			}
-		}
-	}
-}
-
-func TestFloatInt(t *testing.T) {
-	for _, test := range []struct {
-		x    string
-		want string
-		acc  Accuracy
-	}{
-		{"0", "0", Exact},
-		{"+0", "0", Exact},
-		{"-0", "0", Exact},
-		{"Inf", "nil", Below},
-		{"+Inf", "nil", Below},
-		{"-Inf", "nil", Above},
-		{"1", "1", Exact},
-		{"-1", "-1", Exact},
-		{"1.23", "1", Below},
-		{"-1.23", "-1", Above},
-		{"123e-2", "1", Below},
-		{"123e-3", "0", Below},
-		{"123e-4", "0", Below},
-		{"1e-1000", "0", Below},
-		{"-1e-1000", "0", Above},
-		{"1e+10", "10000000000", Exact},
-		{"1e+100", "10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", Exact},
-	} {
-		x := makeFloat(test.x)
-		res, acc := x.Int(nil)
-		got := "nil"
-		if res != nil {
-			got = res.String()
-		}
-		if got != test.want || acc != test.acc {
-			t.Errorf("%s: got %s (%s); want %s (%s)", test.x, got, acc, test.want, test.acc)
-		}
-	}
-
-	// check that supplied *Int is used
-	for _, f := range []string{"0", "1", "-1", "1234"} {
-		x := makeFloat(f)
-		i := new(Int)
-		if res, _ := x.Int(i); res != i {
-			t.Errorf("(%s).Int is not using supplied *Int", f)
-		}
-	}
-}
-
-func TestFloatRat(t *testing.T) {
-	for _, test := range []struct {
-		x, want string
-		acc     Accuracy
-	}{
-		{"0", "0/1", Exact},
-		{"+0", "0/1", Exact},
-		{"-0", "0/1", Exact},
-		{"Inf", "nil", Below},
-		{"+Inf", "nil", Below},
-		{"-Inf", "nil", Above},
-		{"1", "1/1", Exact},
-		{"-1", "-1/1", Exact},
-		{"1.25", "5/4", Exact},
-		{"-1.25", "-5/4", Exact},
-		{"1e10", "10000000000/1", Exact},
-		{"1p10", "1024/1", Exact},
-		{"-1p-10", "-1/1024", Exact},
-		{"3.14159265", "7244019449799623199/2305843009213693952", Exact},
-	} {
-		x := makeFloat(test.x).SetPrec(64)
-		res, acc := x.Rat(nil)
-		got := "nil"
-		if res != nil {
-			got = res.String()
-		}
-		if got != test.want {
-			t.Errorf("%s: got %s; want %s", test.x, got, test.want)
-			continue
-		}
-		if acc != test.acc {
-			t.Errorf("%s: got %s; want %s", test.x, acc, test.acc)
-			continue
-		}
-
-		// inverse conversion
-		if res != nil {
-			got := new(Float).SetPrec(64).SetRat(res)
-			if got.Cmp(x) != 0 {
-				t.Errorf("%s: got %s; want %s", test.x, got, x)
-			}
-		}
-	}
-
-	// check that supplied *Rat is used
-	for _, f := range []string{"0", "1", "-1", "1234"} {
-		x := makeFloat(f)
-		r := new(Rat)
-		if res, _ := x.Rat(r); res != r {
-			t.Errorf("(%s).Rat is not using supplied *Rat", f)
-		}
-	}
-}
-
-func TestFloatAbs(t *testing.T) {
-	for _, test := range []string{
-		"0",
-		"1",
-		"1234",
-		"1.23e-2",
-		"1e-1000",
-		"1e1000",
-		"Inf",
-	} {
-		p := makeFloat(test)
-		a := new(Float).Abs(p)
-		if !alike(a, p) {
-			t.Errorf("%s: got %s; want %s", test, a.Text('g', 10), test)
-		}
-
-		n := makeFloat("-" + test)
-		a.Abs(n)
-		if !alike(a, p) {
-			t.Errorf("-%s: got %s; want %s", test, a.Text('g', 10), test)
-		}
-	}
-}
-
-func TestFloatNeg(t *testing.T) {
-	for _, test := range []string{
-		"0",
-		"1",
-		"1234",
-		"1.23e-2",
-		"1e-1000",
-		"1e1000",
-		"Inf",
-	} {
-		p1 := makeFloat(test)
-		n1 := makeFloat("-" + test)
-		n2 := new(Float).Neg(p1)
-		p2 := new(Float).Neg(n2)
-		if !alike(n2, n1) {
-			t.Errorf("%s: got %s; want %s", test, n2.Text('g', 10), n1.Text('g', 10))
-		}
-		if !alike(p2, p1) {
-			t.Errorf("%s: got %s; want %s", test, p2.Text('g', 10), p1.Text('g', 10))
-		}
-	}
-}
-
-func TestFloatInc(t *testing.T) {
-	const n = 10
-	for _, prec := range precList {
-		if 1<<prec < n {
-			continue // prec must be large enough to hold all numbers from 0 to n
-		}
-		var x, one Float
-		x.SetPrec(prec)
-		one.SetInt64(1)
-		for i := 0; i < n; i++ {
-			x.Add(&x, &one)
-		}
-		if x.Cmp(new(Float).SetInt64(n)) != 0 {
-			t.Errorf("prec = %d: got %s; want %d", prec, &x, n)
-		}
-	}
-}
-
-// Selected precisions with which to run various tests.
-var precList = [...]uint{1, 2, 5, 8, 10, 16, 23, 24, 32, 50, 53, 64, 100, 128, 500, 511, 512, 513, 1000, 10000}
-
-// Selected bits with which to run various tests.
-// Each entry is a list of bits representing a floating-point number (see fromBits).
-var bitsList = [...]Bits{
-	{},           // = 0
-	{0},          // = 1
-	{1},          // = 2
-	{-1},         // = 1/2
-	{10},         // = 2**10 == 1024
-	{-10},        // = 2**-10 == 1/1024
-	{100, 10, 1}, // = 2**100 + 2**10 + 2**1
-	{0, -1, -2, -10},
-	// TODO(gri) add more test cases
-}
-
-// TestFloatAdd tests Float.Add/Sub by comparing the result of a "manual"
-// addition/subtraction of arguments represented by Bits values with the
-// respective Float addition/subtraction for a variety of precisions
-// and rounding modes.
-func TestFloatAdd(t *testing.T) {
-	for _, xbits := range bitsList {
-		for _, ybits := range bitsList {
-			// exact values
-			x := xbits.Float()
-			y := ybits.Float()
-			zbits := xbits.add(ybits)
-			z := zbits.Float()
-
-			for i, mode := range [...]RoundingMode{ToZero, ToNearestEven, AwayFromZero} {
-				for _, prec := range precList {
-					got := new(Float).SetPrec(prec).SetMode(mode)
-					got.Add(x, y)
-					want := zbits.round(prec, mode)
-					if got.Cmp(want) != 0 {
-						t.Errorf("i = %d, prec = %d, %s:\n\t     %s %v\n\t+    %s %v\n\t=    %s\n\twant %s",
-							i, prec, mode, x, xbits, y, ybits, got, want)
-					}
-
-					got.Sub(z, x)
-					want = ybits.round(prec, mode)
-					if got.Cmp(want) != 0 {
-						t.Errorf("i = %d, prec = %d, %s:\n\t     %s %v\n\t-    %s %v\n\t=    %s\n\twant %s",
-							i, prec, mode, z, zbits, x, xbits, got, want)
-					}
-				}
-			}
-		}
-	}
-}
-
-// TestFloatAdd32 tests that Float.Add/Sub of numbers with
-// 24bit mantissa behaves like float32 addition/subtraction
-// (excluding denormal numbers).
-func TestFloatAdd32(t *testing.T) {
-	// chose base such that we cross the mantissa precision limit
-	const base = 1<<26 - 0x10 // 11...110000 (26 bits)
-	for d := 0; d <= 0x10; d++ {
-		for i := range [2]int{} {
-			x0, y0 := float64(base), float64(d)
-			if i&1 != 0 {
-				x0, y0 = y0, x0
-			}
-
-			x := NewFloat(x0)
-			y := NewFloat(y0)
-			z := new(Float).SetPrec(24)
-
-			z.Add(x, y)
-			got, acc := z.Float32()
-			want := float32(y0) + float32(x0)
-			if got != want || acc != Exact {
-				t.Errorf("d = %d: %g + %g = %g (%s); want %g (Exact)", d, x0, y0, got, acc, want)
-			}
-
-			z.Sub(z, y)
-			got, acc = z.Float32()
-			want = float32(want) - float32(y0)
-			if got != want || acc != Exact {
-				t.Errorf("d = %d: %g - %g = %g (%s); want %g (Exact)", d, x0+y0, y0, got, acc, want)
-			}
-		}
-	}
-}
-
-// TestFloatAdd64 tests that Float.Add/Sub of numbers with
-// 53bit mantissa behaves like float64 addition/subtraction.
-func TestFloatAdd64(t *testing.T) {
-	// chose base such that we cross the mantissa precision limit
-	const base = 1<<55 - 0x10 // 11...110000 (55 bits)
-	for d := 0; d <= 0x10; d++ {
-		for i := range [2]int{} {
-			x0, y0 := float64(base), float64(d)
-			if i&1 != 0 {
-				x0, y0 = y0, x0
-			}
-
-			x := NewFloat(x0)
-			y := NewFloat(y0)
-			z := new(Float).SetPrec(53)
-
-			z.Add(x, y)
-			got, acc := z.Float64()
-			want := x0 + y0
-			if got != want || acc != Exact {
-				t.Errorf("d = %d: %g + %g = %g (%s); want %g (Exact)", d, x0, y0, got, acc, want)
-			}
-
-			z.Sub(z, y)
-			got, acc = z.Float64()
-			want -= y0
-			if got != want || acc != Exact {
-				t.Errorf("d = %d: %g - %g = %g (%s); want %g (Exact)", d, x0+y0, y0, got, acc, want)
-			}
-		}
-	}
-}
-
-// TestFloatMul tests Float.Mul/Quo by comparing the result of a "manual"
-// multiplication/division of arguments represented by Bits values with the
-// respective Float multiplication/division for a variety of precisions
-// and rounding modes.
-func TestFloatMul(t *testing.T) {
-	for _, xbits := range bitsList {
-		for _, ybits := range bitsList {
-			// exact values
-			x := xbits.Float()
-			y := ybits.Float()
-			zbits := xbits.mul(ybits)
-			z := zbits.Float()
-
-			for i, mode := range [...]RoundingMode{ToZero, ToNearestEven, AwayFromZero} {
-				for _, prec := range precList {
-					got := new(Float).SetPrec(prec).SetMode(mode)
-					got.Mul(x, y)
-					want := zbits.round(prec, mode)
-					if got.Cmp(want) != 0 {
-						t.Errorf("i = %d, prec = %d, %s:\n\t     %s %v\n\t*    %s %v\n\t=    %s\n\twant %s",
-							i, prec, mode, x, xbits, y, ybits, got, want)
-					}
-
-					if x.Sign() == 0 {
-						continue // ignore div-0 case (not invertable)
-					}
-					got.Quo(z, x)
-					want = ybits.round(prec, mode)
-					if got.Cmp(want) != 0 {
-						t.Errorf("i = %d, prec = %d, %s:\n\t     %s %v\n\t/    %s %v\n\t=    %s\n\twant %s",
-							i, prec, mode, z, zbits, x, xbits, got, want)
-					}
-				}
-			}
-		}
-	}
-}
-
-// TestFloatMul64 tests that Float.Mul/Quo of numbers with
-// 53bit mantissa behaves like float64 multiplication/division.
-func TestFloatMul64(t *testing.T) {
-	for _, test := range []struct {
-		x, y float64
-	}{
-		{0, 0},
-		{0, 1},
-		{1, 1},
-		{1, 1.5},
-		{1.234, 0.5678},
-		{2.718281828, 3.14159265358979},
-		{2.718281828e10, 3.14159265358979e-32},
-		{1.0 / 3, 1e200},
-	} {
-		for i := range [8]int{} {
-			x0, y0 := test.x, test.y
-			if i&1 != 0 {
-				x0 = -x0
-			}
-			if i&2 != 0 {
-				y0 = -y0
-			}
-			if i&4 != 0 {
-				x0, y0 = y0, x0
-			}
-
-			x := NewFloat(x0)
-			y := NewFloat(y0)
-			z := new(Float).SetPrec(53)
-
-			z.Mul(x, y)
-			got, _ := z.Float64()
-			want := x0 * y0
-			if got != want {
-				t.Errorf("%g * %g = %g; want %g", x0, y0, got, want)
-			}
-
-			if y0 == 0 {
-				continue // avoid division-by-zero
-			}
-			z.Quo(z, y)
-			got, _ = z.Float64()
-			want /= y0
-			if got != want {
-				t.Errorf("%g / %g = %g; want %g", x0*y0, y0, got, want)
-			}
-		}
-	}
-}
-
-func TestIssue6866(t *testing.T) {
-	for _, prec := range precList {
-		two := new(Float).SetPrec(prec).SetInt64(2)
-		one := new(Float).SetPrec(prec).SetInt64(1)
-		three := new(Float).SetPrec(prec).SetInt64(3)
-		msix := new(Float).SetPrec(prec).SetInt64(-6)
-		psix := new(Float).SetPrec(prec).SetInt64(+6)
-
-		p := new(Float).SetPrec(prec)
-		z1 := new(Float).SetPrec(prec)
-		z2 := new(Float).SetPrec(prec)
-
-		// z1 = 2 + 1.0/3*-6
-		p.Quo(one, three)
-		p.Mul(p, msix)
-		z1.Add(two, p)
-
-		// z2 = 2 - 1.0/3*+6
-		p.Quo(one, three)
-		p.Mul(p, psix)
-		z2.Sub(two, p)
-
-		if z1.Cmp(z2) != 0 {
-			t.Fatalf("prec %d: got z1 = %s != z2 = %s; want z1 == z2\n", prec, z1, z2)
-		}
-		if z1.Sign() != 0 {
-			t.Errorf("prec %d: got z1 = %s; want 0", prec, z1)
-		}
-		if z2.Sign() != 0 {
-			t.Errorf("prec %d: got z2 = %s; want 0", prec, z2)
-		}
-	}
-}
-
-func TestFloatQuo(t *testing.T) {
-	// TODO(gri) make the test vary these precisions
-	preci := 200 // precision of integer part
-	precf := 20  // precision of fractional part
-
-	for i := 0; i < 8; i++ {
-		// compute accurate (not rounded) result z
-		bits := Bits{preci - 1}
-		if i&3 != 0 {
-			bits = append(bits, 0)
-		}
-		if i&2 != 0 {
-			bits = append(bits, -1)
-		}
-		if i&1 != 0 {
-			bits = append(bits, -precf)
-		}
-		z := bits.Float()
-
-		// compute accurate x as z*y
-		y := NewFloat(3.14159265358979323e123)
-
-		x := new(Float).SetPrec(z.Prec() + y.Prec()).SetMode(ToZero)
-		x.Mul(z, y)
-
-		// leave for debugging
-		// fmt.Printf("x = %s\ny = %s\nz = %s\n", x, y, z)
-
-		if got := x.Acc(); got != Exact {
-			t.Errorf("got acc = %s; want exact", got)
-		}
-
-		// round accurate z for a variety of precisions and
-		// modes and compare against result of x / y.
-		for _, mode := range [...]RoundingMode{ToZero, ToNearestEven, AwayFromZero} {
-			for d := -5; d < 5; d++ {
-				prec := uint(preci + d)
-				got := new(Float).SetPrec(prec).SetMode(mode).Quo(x, y)
-				want := bits.round(prec, mode)
-				if got.Cmp(want) != 0 {
-					t.Errorf("i = %d, prec = %d, %s:\n\t     %s\n\t/    %s\n\t=    %s\n\twant %s",
-						i, prec, mode, x, y, got, want)
-				}
-			}
-		}
-	}
-}
-
-var long = flag.Bool("long", false, "run very long tests")
-
-// TestFloatQuoSmoke tests all divisions x/y for values x, y in the range [-n, +n];
-// it serves as a smoke test for basic correctness of division.
-func TestFloatQuoSmoke(t *testing.T) {
-	n := 10
-	if *long {
-		n = 1000
-	}
-
-	const dprec = 3         // max. precision variation
-	const prec = 10 + dprec // enough bits to hold n precisely
-	for x := -n; x <= n; x++ {
-		for y := -n; y < n; y++ {
-			if y == 0 {
-				continue
-			}
-
-			a := float64(x)
-			b := float64(y)
-			c := a / b
-
-			// vary operand precision (only ok as long as a, b can be represented correctly)
-			for ad := -dprec; ad <= dprec; ad++ {
-				for bd := -dprec; bd <= dprec; bd++ {
-					A := new(Float).SetPrec(uint(prec + ad)).SetFloat64(a)
-					B := new(Float).SetPrec(uint(prec + bd)).SetFloat64(b)
-					C := new(Float).SetPrec(53).Quo(A, B) // C has float64 mantissa width
-
-					cc, acc := C.Float64()
-					if cc != c {
-						t.Errorf("%g/%g = %s; want %.5g\n", a, b, C.Text('g', 5), c)
-						continue
-					}
-					if acc != Exact {
-						t.Errorf("%g/%g got %s result; want exact result", a, b, acc)
-					}
-				}
-			}
-		}
-	}
-}
-
-// TestFloatArithmeticSpecialValues tests that Float operations produce the
-// correct results for combinations of zero (±0), finite (±1 and ±2.71828),
-// and infinite (±Inf) operands.
-func TestFloatArithmeticSpecialValues(t *testing.T) {
-	zero := 0.0
-	args := []float64{math.Inf(-1), -2.71828, -1, -zero, zero, 1, 2.71828, math.Inf(1)}
-	xx := new(Float)
-	yy := new(Float)
-	got := new(Float)
-	want := new(Float)
-	for i := 0; i < 4; i++ {
-		for _, x := range args {
-			xx.SetFloat64(x)
-			// check conversion is correct
-			// (no need to do this for y, since we see exactly the
-			// same values there)
-			if got, acc := xx.Float64(); got != x || acc != Exact {
-				t.Errorf("Float(%g) == %g (%s)", x, got, acc)
-			}
-			for _, y := range args {
-				yy.SetFloat64(y)
-				var (
-					op string
-					z  float64
-					f  func(z, x, y *Float) *Float
-				)
-				switch i {
-				case 0:
-					op = "+"
-					z = x + y
-					f = (*Float).Add
-				case 1:
-					op = "-"
-					z = x - y
-					f = (*Float).Sub
-				case 2:
-					op = "*"
-					z = x * y
-					f = (*Float).Mul
-				case 3:
-					op = "/"
-					z = x / y
-					f = (*Float).Quo
-				default:
-					panic("unreachable")
-				}
-				var errnan bool // set if execution of f panicked with ErrNaN
-				// protect execution of f
-				func() {
-					defer func() {
-						if p := recover(); p != nil {
-							_ = p.(ErrNaN) // re-panic if not ErrNaN
-							errnan = true
-						}
-					}()
-					f(got, xx, yy)
-				}()
-				if math.IsNaN(z) {
-					if !errnan {
-						t.Errorf("%5g %s %5g = %5s; want ErrNaN panic", x, op, y, got)
-					}
-					continue
-				}
-				if errnan {
-					t.Errorf("%5g %s %5g panicked with ErrNan; want %5s", x, op, y, want)
-					continue
-				}
-				want.SetFloat64(z)
-				if !alike(got, want) {
-					t.Errorf("%5g %s %5g = %5s; want %5s", x, op, y, got, want)
-				}
-			}
-		}
-	}
-}
-
-func TestFloatArithmeticOverflow(t *testing.T) {
-	for _, test := range []struct {
-		prec       uint
-		mode       RoundingMode
-		op         byte
-		x, y, want string
-		acc        Accuracy
-	}{
-		{4, ToNearestEven, '+', "0", "0", "0", Exact},                   // smoke test
-		{4, ToNearestEven, '+', "0x.8p+0", "0x.8p+0", "0x.8p+1", Exact}, // smoke test
-
-		{4, ToNearestEven, '+', "0", "0x.8p2147483647", "0x.8p+2147483647", Exact},
-		{4, ToNearestEven, '+', "0x.8p2147483500", "0x.8p2147483647", "0x.8p+2147483647", Below}, // rounded to zero
-		{4, ToNearestEven, '+', "0x.8p2147483647", "0x.8p2147483647", "+Inf", Above},             // exponent overflow in +
-		{4, ToNearestEven, '+', "-0x.8p2147483647", "-0x.8p2147483647", "-Inf", Below},           // exponent overflow in +
-		{4, ToNearestEven, '-', "-0x.8p2147483647", "0x.8p2147483647", "-Inf", Below},            // exponent overflow in -
-
-		{4, ToZero, '+', "0x.fp2147483647", "0x.8p2147483643", "0x.fp+2147483647", Below}, // rounded to zero
-		{4, ToNearestEven, '+', "0x.fp2147483647", "0x.8p2147483643", "+Inf", Above},      // exponent overflow in rounding
-		{4, AwayFromZero, '+', "0x.fp2147483647", "0x.8p2147483643", "+Inf", Above},       // exponent overflow in rounding
-
-		{4, AwayFromZero, '-', "-0x.fp2147483647", "0x.8p2147483644", "-Inf", Below},        // exponent overflow in rounding
-		{4, ToNearestEven, '-', "-0x.fp2147483647", "0x.8p2147483643", "-Inf", Below},       // exponent overflow in rounding
-		{4, ToZero, '-', "-0x.fp2147483647", "0x.8p2147483643", "-0x.fp+2147483647", Above}, // rounded to zero
-
-		{4, ToNearestEven, '+', "0", "0x.8p-2147483648", "0x.8p-2147483648", Exact},
-		{4, ToNearestEven, '+', "0x.8p-2147483648", "0x.8p-2147483648", "0x.8p-2147483647", Exact},
-
-		{4, ToNearestEven, '*', "1", "0x.8p2147483647", "0x.8p+2147483647", Exact},
-		{4, ToNearestEven, '*', "2", "0x.8p2147483647", "+Inf", Above},  // exponent overflow in *
-		{4, ToNearestEven, '*', "-2", "0x.8p2147483647", "-Inf", Below}, // exponent overflow in *
-
-		{4, ToNearestEven, '/', "0.5", "0x.8p2147483647", "0x.8p-2147483646", Exact},
-		{4, ToNearestEven, '/', "0x.8p+0", "0x.8p2147483647", "0x.8p-2147483646", Exact},
-		{4, ToNearestEven, '/', "0x.8p-1", "0x.8p2147483647", "0x.8p-2147483647", Exact},
-		{4, ToNearestEven, '/', "0x.8p-2", "0x.8p2147483647", "0x.8p-2147483648", Exact},
-		{4, ToNearestEven, '/', "0x.8p-3", "0x.8p2147483647", "0", Below}, // exponent underflow in /
-	} {
-		x := makeFloat(test.x)
-		y := makeFloat(test.y)
-		z := new(Float).SetPrec(test.prec).SetMode(test.mode)
-		switch test.op {
-		case '+':
-			z.Add(x, y)
-		case '-':
-			z.Sub(x, y)
-		case '*':
-			z.Mul(x, y)
-		case '/':
-			z.Quo(x, y)
-		default:
-			panic("unreachable")
-		}
-		if got := z.Text('p', 0); got != test.want || z.Acc() != test.acc {
-			t.Errorf(
-				"prec = %d (%s): %s %c %s = %s (%s); want %s (%s)",
-				test.prec, test.mode, x.Text('p', 0), test.op, y.Text('p', 0), got, z.Acc(), test.want, test.acc,
-			)
-		}
-	}
-}
-
-// TODO(gri) Add tests that check correctness in the presence of aliasing.
-
-// For rounding modes ToNegativeInf and ToPositiveInf, rounding is affected
-// by the sign of the value to be rounded. Test that rounding happens after
-// the sign of a result has been set.
-// This test uses specific values that are known to fail if rounding is
-// "factored" out before setting the result sign.
-func TestFloatArithmeticRounding(t *testing.T) {
-	for _, test := range []struct {
-		mode       RoundingMode
-		prec       uint
-		x, y, want int64
-		op         byte
-	}{
-		{ToZero, 3, -0x8, -0x1, -0x8, '+'},
-		{AwayFromZero, 3, -0x8, -0x1, -0xa, '+'},
-		{ToNegativeInf, 3, -0x8, -0x1, -0xa, '+'},
-
-		{ToZero, 3, -0x8, 0x1, -0x8, '-'},
-		{AwayFromZero, 3, -0x8, 0x1, -0xa, '-'},
-		{ToNegativeInf, 3, -0x8, 0x1, -0xa, '-'},
-
-		{ToZero, 3, -0x9, 0x1, -0x8, '*'},
-		{AwayFromZero, 3, -0x9, 0x1, -0xa, '*'},
-		{ToNegativeInf, 3, -0x9, 0x1, -0xa, '*'},
-
-		{ToZero, 3, -0x9, 0x1, -0x8, '/'},
-		{AwayFromZero, 3, -0x9, 0x1, -0xa, '/'},
-		{ToNegativeInf, 3, -0x9, 0x1, -0xa, '/'},
-	} {
-		var x, y, z Float
-		x.SetInt64(test.x)
-		y.SetInt64(test.y)
-		z.SetPrec(test.prec).SetMode(test.mode)
-		switch test.op {
-		case '+':
-			z.Add(&x, &y)
-		case '-':
-			z.Sub(&x, &y)
-		case '*':
-			z.Mul(&x, &y)
-		case '/':
-			z.Quo(&x, &y)
-		default:
-			panic("unreachable")
-		}
-		if got, acc := z.Int64(); got != test.want || acc != Exact {
-			t.Errorf("%s, %d bits: %d %c %d = %d (%s); want %d (Exact)",
-				test.mode, test.prec, test.x, test.op, test.y, got, acc, test.want,
-			)
-		}
-	}
-}
-
-// TestFloatCmpSpecialValues tests that Cmp produces the correct results for
-// combinations of zero (±0), finite (±1 and ±2.71828), and infinite (±Inf)
-// operands.
-func TestFloatCmpSpecialValues(t *testing.T) {
-	zero := 0.0
-	args := []float64{math.Inf(-1), -2.71828, -1, -zero, zero, 1, 2.71828, math.Inf(1)}
-	xx := new(Float)
-	yy := new(Float)
-	for i := 0; i < 4; i++ {
-		for _, x := range args {
-			xx.SetFloat64(x)
-			// check conversion is correct
-			// (no need to do this for y, since we see exactly the
-			// same values there)
-			if got, acc := xx.Float64(); got != x || acc != Exact {
-				t.Errorf("Float(%g) == %g (%s)", x, got, acc)
-			}
-			for _, y := range args {
-				yy.SetFloat64(y)
-				got := xx.Cmp(yy)
-				want := 0
-				switch {
-				case x < y:
-					want = -1
-				case x > y:
-					want = +1
-				}
-				if got != want {
-					t.Errorf("(%g).Cmp(%g) = %v; want %v", x, y, got, want)
-				}
-			}
-		}
-	}
-}
-
-func BenchmarkFloatAdd(b *testing.B) {
-	x := new(Float)
-	y := new(Float)
-	z := new(Float)
-
-	for _, prec := range []uint{10, 1e2, 1e3, 1e4, 1e5} {
-		x.SetPrec(prec).SetRat(NewRat(1, 3))
-		y.SetPrec(prec).SetRat(NewRat(1, 6))
-		z.SetPrec(prec)
-
-		b.Run(fmt.Sprintf("%v", prec), func(b *testing.B) {
-			b.ReportAllocs()
-			for i := 0; i < b.N; i++ {
-				z.Add(x, y)
-			}
-		})
-	}
-}
-
-func BenchmarkFloatSub(b *testing.B) {
-	x := new(Float)
-	y := new(Float)
-	z := new(Float)
-
-	for _, prec := range []uint{10, 1e2, 1e3, 1e4, 1e5} {
-		x.SetPrec(prec).SetRat(NewRat(1, 3))
-		y.SetPrec(prec).SetRat(NewRat(1, 6))
-		z.SetPrec(prec)
-
-		b.Run(fmt.Sprintf("%v", prec), func(b *testing.B) {
-			b.ReportAllocs()
-			for i := 0; i < b.N; i++ {
-				z.Sub(x, y)
-			}
-		})
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/floatconv.go b/pkg/bootstrap/src/bootstrap/math/big/floatconv.go
deleted file mode 100644
index 7becc88..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/floatconv.go
+++ /dev/null
@@ -1,296 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/floatconv.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/floatconv.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements string-to-Float conversion functions.
-
-package big
-
-import (
-	"fmt"
-	"io"
-	"strings"
-)
-
-var floatZero Float
-
-// SetString sets z to the value of s and returns z and a boolean indicating
-// success. s must be a floating-point number of the same format as accepted
-// by Parse, with base argument 0. The entire string (not just a prefix) must
-// be valid for success. If the operation failed, the value of z is undefined
-// but the returned value is nil.
-func (z *Float) SetString(s string) (*Float, bool) {
-	if f, _, err := z.Parse(s, 0); err == nil {
-		return f, true
-	}
-	return nil, false
-}
-
-// scan is like Parse but reads the longest possible prefix representing a valid
-// floating point number from an io.ByteScanner rather than a string. It serves
-// as the implementation of Parse. It does not recognize ±Inf and does not expect
-// EOF at the end.
-func (z *Float) scan(r io.ByteScanner, base int) (f *Float, b int, err error) {
-	prec := z.prec
-	if prec == 0 {
-		prec = 64
-	}
-
-	// A reasonable value in case of an error.
-	z.form = zero
-
-	// sign
-	z.neg, err = scanSign(r)
-	if err != nil {
-		return
-	}
-
-	// mantissa
-	var fcount int // fractional digit count; valid if <= 0
-	z.mant, b, fcount, err = z.mant.scan(r, base, true)
-	if err != nil {
-		return
-	}
-
-	// exponent
-	var exp int64
-	var ebase int
-	exp, ebase, err = scanExponent(r, true)
-	if err != nil {
-		return
-	}
-
-	// special-case 0
-	if len(z.mant) == 0 {
-		z.prec = prec
-		z.acc = Exact
-		z.form = zero
-		f = z
-		return
-	}
-	// len(z.mant) > 0
-
-	// The mantissa may have a decimal point (fcount <= 0) and there
-	// may be a nonzero exponent exp. The decimal point amounts to a
-	// division by b**(-fcount). An exponent means multiplication by
-	// ebase**exp. Finally, mantissa normalization (shift left) requires
-	// a correcting multiplication by 2**(-shiftcount). Multiplications
-	// are commutative, so we can apply them in any order as long as there
-	// is no loss of precision. We only have powers of 2 and 10, and
-	// we split powers of 10 into the product of the same powers of
-	// 2 and 5. This reduces the size of the multiplication factor
-	// needed for base-10 exponents.
-
-	// normalize mantissa and determine initial exponent contributions
-	exp2 := int64(len(z.mant))*_W - fnorm(z.mant)
-	exp5 := int64(0)
-
-	// determine binary or decimal exponent contribution of decimal point
-	if fcount < 0 {
-		// The mantissa has a "decimal" point ddd.dddd; and
-		// -fcount is the number of digits to the right of '.'.
-		// Adjust relevant exponent accordingly.
-		d := int64(fcount)
-		switch b {
-		case 10:
-			exp5 = d
-			fallthrough // 10**e == 5**e * 2**e
-		case 2:
-			exp2 += d
-		case 16:
-			exp2 += d * 4 // hexadecimal digits are 4 bits each
-		default:
-			panic("unexpected mantissa base")
-		}
-		// fcount consumed - not needed anymore
-	}
-
-	// take actual exponent into account
-	switch ebase {
-	case 10:
-		exp5 += exp
-		fallthrough
-	case 2:
-		exp2 += exp
-	default:
-		panic("unexpected exponent base")
-	}
-	// exp consumed - not needed anymore
-
-	// apply 2**exp2
-	if MinExp <= exp2 && exp2 <= MaxExp {
-		z.prec = prec
-		z.form = finite
-		z.exp = int32(exp2)
-		f = z
-	} else {
-		err = fmt.Errorf("exponent overflow")
-		return
-	}
-
-	if exp5 == 0 {
-		// no decimal exponent contribution
-		z.round(0)
-		return
-	}
-	// exp5 != 0
-
-	// apply 5**exp5
-	p := new(Float).SetPrec(z.Prec() + 64) // use more bits for p -- TODO(gri) what is the right number?
-	if exp5 < 0 {
-		z.Quo(z, p.pow5(uint64(-exp5)))
-	} else {
-		z.Mul(z, p.pow5(uint64(exp5)))
-	}
-
-	return
-}
-
-// These powers of 5 fit into a uint64.
-//
-//	for p, q := uint64(0), uint64(1); p < q; p, q = q, q*5 {
-//		fmt.Println(q)
-//	}
-//
-var pow5tab = [...]uint64{
-	1,
-	5,
-	25,
-	125,
-	625,
-	3125,
-	15625,
-	78125,
-	390625,
-	1953125,
-	9765625,
-	48828125,
-	244140625,
-	1220703125,
-	6103515625,
-	30517578125,
-	152587890625,
-	762939453125,
-	3814697265625,
-	19073486328125,
-	95367431640625,
-	476837158203125,
-	2384185791015625,
-	11920928955078125,
-	59604644775390625,
-	298023223876953125,
-	1490116119384765625,
-	7450580596923828125,
-}
-
-// pow5 sets z to 5**n and returns z.
-// n must not be negative.
-func (z *Float) pow5(n uint64) *Float {
-	const m = uint64(len(pow5tab) - 1)
-	if n <= m {
-		return z.SetUint64(pow5tab[n])
-	}
-	// n > m
-
-	z.SetUint64(pow5tab[m])
-	n -= m
-
-	// use more bits for f than for z
-	// TODO(gri) what is the right number?
-	f := new(Float).SetPrec(z.Prec() + 64).SetUint64(5)
-
-	for n > 0 {
-		if n&1 != 0 {
-			z.Mul(z, f)
-		}
-		f.Mul(f, f)
-		n >>= 1
-	}
-
-	return z
-}
-
-// Parse parses s which must contain a text representation of a floating-
-// point number with a mantissa in the given conversion base (the exponent
-// is always a decimal number), or a string representing an infinite value.
-//
-// It sets z to the (possibly rounded) value of the corresponding floating-
-// point value, and returns z, the actual base b, and an error err, if any.
-// The entire string (not just a prefix) must be consumed for success.
-// If z's precision is 0, it is changed to 64 before rounding takes effect.
-// The number must be of the form:
-//
-//	number   = [ sign ] [ prefix ] mantissa [ exponent ] | infinity .
-//	sign     = "+" | "-" .
-//	prefix   = "0" ( "x" | "X" | "b" | "B" ) .
-//	mantissa = digits | digits "." [ digits ] | "." digits .
-//	exponent = ( "E" | "e" | "p" ) [ sign ] digits .
-//	digits   = digit { digit } .
-//	digit    = "0" ... "9" | "a" ... "z" | "A" ... "Z" .
-//	infinity = [ sign ] ( "inf" | "Inf" ) .
-//
-// The base argument must be 0, 2, 10, or 16. Providing an invalid base
-// argument will lead to a run-time panic.
-//
-// For base 0, the number prefix determines the actual base: A prefix of
-// "0x" or "0X" selects base 16, and a "0b" or "0B" prefix selects
-// base 2; otherwise, the actual base is 10 and no prefix is accepted.
-// The octal prefix "0" is not supported (a leading "0" is simply
-// considered a "0").
-//
-// A "p" exponent indicates a binary (rather then decimal) exponent;
-// for instance "0x1.fffffffffffffp1023" (using base 0) represents the
-// maximum float64 value. For hexadecimal mantissae, the exponent must
-// be binary, if present (an "e" or "E" exponent indicator cannot be
-// distinguished from a mantissa digit).
-//
-// The returned *Float f is nil and the value of z is valid but not
-// defined if an error is reported.
-//
-func (z *Float) Parse(s string, base int) (f *Float, b int, err error) {
-	// scan doesn't handle ±Inf
-	if len(s) == 3 && (s == "Inf" || s == "inf") {
-		f = z.SetInf(false)
-		return
-	}
-	if len(s) == 4 && (s[0] == '+' || s[0] == '-') && (s[1:] == "Inf" || s[1:] == "inf") {
-		f = z.SetInf(s[0] == '-')
-		return
-	}
-
-	r := strings.NewReader(s)
-	if f, b, err = z.scan(r, base); err != nil {
-		return
-	}
-
-	// entire string must have been consumed
-	if ch, err2 := r.ReadByte(); err2 == nil {
-		err = fmt.Errorf("expected end of string, found %q", ch)
-	} else if err2 != io.EOF {
-		err = err2
-	}
-
-	return
-}
-
-// ParseFloat is like f.Parse(s, base) with f set to the given precision
-// and rounding mode.
-func ParseFloat(s string, base int, prec uint, mode RoundingMode) (f *Float, b int, err error) {
-	return new(Float).SetPrec(prec).SetMode(mode).Parse(s, base)
-}
-
-var _ fmt.Scanner = &floatZero // *Float must implement fmt.Scanner
-
-// Scan is a support routine for fmt.Scanner; it sets z to the value of
-// the scanned number. It accepts formats whose verbs are supported by
-// fmt.Scan for floating point values, which are:
-// 'b' (binary), 'e', 'E', 'f', 'F', 'g' and 'G'.
-// Scan doesn't handle ±Inf.
-func (z *Float) Scan(s fmt.ScanState, ch rune) error {
-	s.SkipSpace()
-	_, _, err := z.scan(byteReader{s}, 0)
-	return err
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/floatconv_test.go b/pkg/bootstrap/src/bootstrap/math/big/floatconv_test.go
deleted file mode 100644
index 3e104bf..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/floatconv_test.go
+++ /dev/null
@@ -1,722 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/floatconv_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/floatconv_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package big
-
-import (
-	"bytes"
-	"fmt"
-	"math"
-	"strconv"
-	"testing"
-)
-
-func TestFloatSetFloat64String(t *testing.T) {
-	inf := math.Inf(0)
-	nan := math.NaN()
-
-	for _, test := range []struct {
-		s string
-		x float64 // NaNs represent invalid inputs
-	}{
-		// basics
-		{"0", 0},
-		{"-0", -0},
-		{"+0", 0},
-		{"1", 1},
-		{"-1", -1},
-		{"+1", 1},
-		{"1.234", 1.234},
-		{"-1.234", -1.234},
-		{"+1.234", 1.234},
-		{".1", 0.1},
-		{"1.", 1},
-		{"+1.", 1},
-
-		// various zeros
-		{"0e100", 0},
-		{"-0e+100", 0},
-		{"+0e-100", 0},
-		{"0E100", 0},
-		{"-0E+100", 0},
-		{"+0E-100", 0},
-
-		// various decimal exponent formats
-		{"1.e10", 1e10},
-		{"1e+10", 1e10},
-		{"+1e-10", 1e-10},
-		{"1E10", 1e10},
-		{"1.E+10", 1e10},
-		{"+1E-10", 1e-10},
-
-		// infinities
-		{"Inf", inf},
-		{"+Inf", inf},
-		{"-Inf", -inf},
-		{"inf", inf},
-		{"+inf", inf},
-		{"-inf", -inf},
-
-		// invalid numbers
-		{"", nan},
-		{"-", nan},
-		{"0x", nan},
-		{"0e", nan},
-		{"1.2ef", nan},
-		{"2..3", nan},
-		{"123..", nan},
-		{"infinity", nan},
-		{"foobar", nan},
-
-		// misc decimal values
-		{"3.14159265", 3.14159265},
-		{"-687436.79457e-245", -687436.79457e-245},
-		{"-687436.79457E245", -687436.79457e245},
-		{".0000000000000000000000000000000000000001", 1e-40},
-		{"+10000000000000000000000000000000000000000e-0", 1e40},
-
-		// decimal mantissa, binary exponent
-		{"0p0", 0},
-		{"-0p0", -0},
-		{"1p10", 1 << 10},
-		{"1p+10", 1 << 10},
-		{"+1p-10", 1.0 / (1 << 10)},
-		{"1024p-12", 0.25},
-		{"-1p10", -1024},
-		{"1.5p1", 3},
-
-		// binary mantissa, decimal exponent
-		{"0b0", 0},
-		{"-0b0", -0},
-		{"0b0e+10", 0},
-		{"-0b0e-10", -0},
-		{"0b1010", 10},
-		{"0B1010E2", 1000},
-		{"0b.1", 0.5},
-		{"0b.001", 0.125},
-		{"0b.001e3", 125},
-
-		// binary mantissa, binary exponent
-		{"0b0p+10", 0},
-		{"-0b0p-10", -0},
-		{"0b.1010p4", 10},
-		{"0b1p-1", 0.5},
-		{"0b001p-3", 0.125},
-		{"0b.001p3", 1},
-		{"0b0.01p2", 1},
-
-		// hexadecimal mantissa and exponent
-		{"0x0", 0},
-		{"-0x0", -0},
-		{"0x0p+10", 0},
-		{"-0x0p-10", -0},
-		{"0xff", 255},
-		{"0X.8p1", 1},
-		{"-0X0.00008p16", -0.5},
-		{"0x0.0000000000001p-1022", math.SmallestNonzeroFloat64},
-		{"0x1.fffffffffffffp1023", math.MaxFloat64},
-	} {
-		var x Float
-		x.SetPrec(53)
-		_, ok := x.SetString(test.s)
-		if math.IsNaN(test.x) {
-			// test.s is invalid
-			if ok {
-				t.Errorf("%s: want parse error", test.s)
-			}
-			continue
-		}
-		// test.s is valid
-		if !ok {
-			t.Errorf("%s: got parse error", test.s)
-			continue
-		}
-		f, _ := x.Float64()
-		want := new(Float).SetFloat64(test.x)
-		if x.Cmp(want) != 0 {
-			t.Errorf("%s: got %s (%v); want %v", test.s, &x, f, test.x)
-		}
-	}
-}
-
-func fdiv(a, b float64) float64 { return a / b }
-
-const (
-	below1e23 = 99999999999999974834176
-	above1e23 = 100000000000000008388608
-)
-
-func TestFloat64Text(t *testing.T) {
-	for _, test := range []struct {
-		x      float64
-		format byte
-		prec   int
-		want   string
-	}{
-		{0, 'f', 0, "0"},
-		{math.Copysign(0, -1), 'f', 0, "-0"},
-		{1, 'f', 0, "1"},
-		{-1, 'f', 0, "-1"},
-
-		{0.001, 'e', 0, "1e-03"},
-		{0.459, 'e', 0, "5e-01"},
-		{1.459, 'e', 0, "1e+00"},
-		{2.459, 'e', 1, "2.5e+00"},
-		{3.459, 'e', 2, "3.46e+00"},
-		{4.459, 'e', 3, "4.459e+00"},
-		{5.459, 'e', 4, "5.4590e+00"},
-
-		{0.001, 'f', 0, "0"},
-		{0.459, 'f', 0, "0"},
-		{1.459, 'f', 0, "1"},
-		{2.459, 'f', 1, "2.5"},
-		{3.459, 'f', 2, "3.46"},
-		{4.459, 'f', 3, "4.459"},
-		{5.459, 'f', 4, "5.4590"},
-
-		{0, 'b', 0, "0"},
-		{math.Copysign(0, -1), 'b', 0, "-0"},
-		{1.0, 'b', 0, "4503599627370496p-52"},
-		{-1.0, 'b', 0, "-4503599627370496p-52"},
-		{4503599627370496, 'b', 0, "4503599627370496p+0"},
-
-		{0, 'p', 0, "0"},
-		{math.Copysign(0, -1), 'p', 0, "-0"},
-		{1024.0, 'p', 0, "0x.8p+11"},
-		{-1024.0, 'p', 0, "-0x.8p+11"},
-
-		// all test cases below from strconv/ftoa_test.go
-		{1, 'e', 5, "1.00000e+00"},
-		{1, 'f', 5, "1.00000"},
-		{1, 'g', 5, "1"},
-		{1, 'g', -1, "1"},
-		{20, 'g', -1, "20"},
-		{1234567.8, 'g', -1, "1.2345678e+06"},
-		{200000, 'g', -1, "200000"},
-		{2000000, 'g', -1, "2e+06"},
-
-		// g conversion and zero suppression
-		{400, 'g', 2, "4e+02"},
-		{40, 'g', 2, "40"},
-		{4, 'g', 2, "4"},
-		{.4, 'g', 2, "0.4"},
-		{.04, 'g', 2, "0.04"},
-		{.004, 'g', 2, "0.004"},
-		{.0004, 'g', 2, "0.0004"},
-		{.00004, 'g', 2, "4e-05"},
-		{.000004, 'g', 2, "4e-06"},
-
-		{0, 'e', 5, "0.00000e+00"},
-		{0, 'f', 5, "0.00000"},
-		{0, 'g', 5, "0"},
-		{0, 'g', -1, "0"},
-
-		{-1, 'e', 5, "-1.00000e+00"},
-		{-1, 'f', 5, "-1.00000"},
-		{-1, 'g', 5, "-1"},
-		{-1, 'g', -1, "-1"},
-
-		{12, 'e', 5, "1.20000e+01"},
-		{12, 'f', 5, "12.00000"},
-		{12, 'g', 5, "12"},
-		{12, 'g', -1, "12"},
-
-		{123456700, 'e', 5, "1.23457e+08"},
-		{123456700, 'f', 5, "123456700.00000"},
-		{123456700, 'g', 5, "1.2346e+08"},
-		{123456700, 'g', -1, "1.234567e+08"},
-
-		{1.2345e6, 'e', 5, "1.23450e+06"},
-		{1.2345e6, 'f', 5, "1234500.00000"},
-		{1.2345e6, 'g', 5, "1.2345e+06"},
-
-		{1e23, 'e', 17, "9.99999999999999916e+22"},
-		{1e23, 'f', 17, "99999999999999991611392.00000000000000000"},
-		{1e23, 'g', 17, "9.9999999999999992e+22"},
-
-		{1e23, 'e', -1, "1e+23"},
-		{1e23, 'f', -1, "100000000000000000000000"},
-		{1e23, 'g', -1, "1e+23"},
-
-		{below1e23, 'e', 17, "9.99999999999999748e+22"},
-		{below1e23, 'f', 17, "99999999999999974834176.00000000000000000"},
-		{below1e23, 'g', 17, "9.9999999999999975e+22"},
-
-		{below1e23, 'e', -1, "9.999999999999997e+22"},
-		{below1e23, 'f', -1, "99999999999999970000000"},
-		{below1e23, 'g', -1, "9.999999999999997e+22"},
-
-		{above1e23, 'e', 17, "1.00000000000000008e+23"},
-		{above1e23, 'f', 17, "100000000000000008388608.00000000000000000"},
-		{above1e23, 'g', 17, "1.0000000000000001e+23"},
-
-		{above1e23, 'e', -1, "1.0000000000000001e+23"},
-		{above1e23, 'f', -1, "100000000000000010000000"},
-		{above1e23, 'g', -1, "1.0000000000000001e+23"},
-
-		{5e-304 / 1e20, 'g', -1, "5e-324"},
-		{-5e-304 / 1e20, 'g', -1, "-5e-324"},
-		{fdiv(5e-304, 1e20), 'g', -1, "5e-324"},   // avoid constant arithmetic
-		{fdiv(-5e-304, 1e20), 'g', -1, "-5e-324"}, // avoid constant arithmetic
-
-		{32, 'g', -1, "32"},
-		{32, 'g', 0, "3e+01"},
-
-		{100, 'x', -1, "%x"},
-
-		// {math.NaN(), 'g', -1, "NaN"},  // Float doesn't support NaNs
-		// {-math.NaN(), 'g', -1, "NaN"}, // Float doesn't support NaNs
-		{math.Inf(0), 'g', -1, "+Inf"},
-		{math.Inf(-1), 'g', -1, "-Inf"},
-		{-math.Inf(0), 'g', -1, "-Inf"},
-
-		{-1, 'b', -1, "-4503599627370496p-52"},
-
-		// fixed bugs
-		{0.9, 'f', 1, "0.9"},
-		{0.09, 'f', 1, "0.1"},
-		{0.0999, 'f', 1, "0.1"},
-		{0.05, 'f', 1, "0.1"},
-		{0.05, 'f', 0, "0"},
-		{0.5, 'f', 1, "0.5"},
-		{0.5, 'f', 0, "0"},
-		{1.5, 'f', 0, "2"},
-
-		// http://www.exploringbinary.com/java-hangs-when-converting-2-2250738585072012e-308/
-		{2.2250738585072012e-308, 'g', -1, "2.2250738585072014e-308"},
-		// http://www.exploringbinary.com/php-hangs-on-numeric-value-2-2250738585072011e-308/
-		{2.2250738585072011e-308, 'g', -1, "2.225073858507201e-308"},
-
-		// Issue 2625.
-		{383260575764816448, 'f', 0, "383260575764816448"},
-		{383260575764816448, 'g', -1, "3.8326057576481645e+17"},
-
-		// Issue 15918.
-		{1, 'f', -10, "1"},
-		{1, 'f', -11, "1"},
-		{1, 'f', -12, "1"},
-	} {
-		// The test cases are from the strconv package which tests float64 values.
-		// When formatting values with prec = -1 (shortest representation),
-		// the actually available mantissa precision matters.
-		// For denormalized values, that precision is < 53 (SetFloat64 default).
-		// Compute and set the actual precision explicitly.
-		f := new(Float).SetPrec(actualPrec(test.x)).SetFloat64(test.x)
-		got := f.Text(test.format, test.prec)
-		if got != test.want {
-			t.Errorf("%v: got %s; want %s", test, got, test.want)
-			continue
-		}
-
-		if test.format == 'b' && test.x == 0 {
-			continue // 'b' format in strconv.Float requires knowledge of bias for 0.0
-		}
-		if test.format == 'p' {
-			continue // 'p' format not supported in strconv.Format
-		}
-
-		// verify that Float format matches strconv format
-		want := strconv.FormatFloat(test.x, test.format, test.prec, 64)
-		if got != want {
-			t.Errorf("%v: got %s; want %s (strconv)", test, got, want)
-		}
-	}
-}
-
-// actualPrec returns the number of actually used mantissa bits.
-func actualPrec(x float64) uint {
-	if bits := math.Float64bits(x); x != 0 && bits&(0x7ff<<52) == 0 {
-		// x is denormalized
-		return 64 - nlz64(bits&(1<<52-1))
-	}
-	return 53
-}
-
-func TestFloatText(t *testing.T) {
-	for _, test := range []struct {
-		x      string
-		prec   uint
-		format byte
-		digits int
-		want   string
-	}{
-		{"0", 10, 'f', 0, "0"},
-		{"-0", 10, 'f', 0, "-0"},
-		{"1", 10, 'f', 0, "1"},
-		{"-1", 10, 'f', 0, "-1"},
-
-		{"1.459", 100, 'e', 0, "1e+00"},
-		{"2.459", 100, 'e', 1, "2.5e+00"},
-		{"3.459", 100, 'e', 2, "3.46e+00"},
-		{"4.459", 100, 'e', 3, "4.459e+00"},
-		{"5.459", 100, 'e', 4, "5.4590e+00"},
-
-		{"1.459", 100, 'E', 0, "1E+00"},
-		{"2.459", 100, 'E', 1, "2.5E+00"},
-		{"3.459", 100, 'E', 2, "3.46E+00"},
-		{"4.459", 100, 'E', 3, "4.459E+00"},
-		{"5.459", 100, 'E', 4, "5.4590E+00"},
-
-		{"1.459", 100, 'f', 0, "1"},
-		{"2.459", 100, 'f', 1, "2.5"},
-		{"3.459", 100, 'f', 2, "3.46"},
-		{"4.459", 100, 'f', 3, "4.459"},
-		{"5.459", 100, 'f', 4, "5.4590"},
-
-		{"1.459", 100, 'g', 0, "1"},
-		{"2.459", 100, 'g', 1, "2"},
-		{"3.459", 100, 'g', 2, "3.5"},
-		{"4.459", 100, 'g', 3, "4.46"},
-		{"5.459", 100, 'g', 4, "5.459"},
-
-		{"1459", 53, 'g', 0, "1e+03"},
-		{"2459", 53, 'g', 1, "2e+03"},
-		{"3459", 53, 'g', 2, "3.5e+03"},
-		{"4459", 53, 'g', 3, "4.46e+03"},
-		{"5459", 53, 'g', 4, "5459"},
-
-		{"1459", 53, 'G', 0, "1E+03"},
-		{"2459", 53, 'G', 1, "2E+03"},
-		{"3459", 53, 'G', 2, "3.5E+03"},
-		{"4459", 53, 'G', 3, "4.46E+03"},
-		{"5459", 53, 'G', 4, "5459"},
-
-		{"3", 10, 'e', 40, "3.0000000000000000000000000000000000000000e+00"},
-		{"3", 10, 'f', 40, "3.0000000000000000000000000000000000000000"},
-		{"3", 10, 'g', 40, "3"},
-
-		{"3e40", 100, 'e', 40, "3.0000000000000000000000000000000000000000e+40"},
-		{"3e40", 100, 'f', 4, "30000000000000000000000000000000000000000.0000"},
-		{"3e40", 100, 'g', 40, "3e+40"},
-
-		// make sure "stupid" exponents don't stall the machine
-		{"1e1000000", 64, 'p', 0, "0x.88b3a28a05eade3ap+3321929"},
-		{"1e646456992", 64, 'p', 0, "0x.e883a0c5c8c7c42ap+2147483644"},
-		{"1e646456993", 64, 'p', 0, "+Inf"},
-		{"1e1000000000", 64, 'p', 0, "+Inf"},
-		{"1e-1000000", 64, 'p', 0, "0x.efb4542cc8ca418ap-3321928"},
-		{"1e-646456993", 64, 'p', 0, "0x.e17c8956983d9d59p-2147483647"},
-		{"1e-646456994", 64, 'p', 0, "0"},
-		{"1e-1000000000", 64, 'p', 0, "0"},
-
-		// minimum and maximum values
-		{"1p2147483646", 64, 'p', 0, "0x.8p+2147483647"},
-		{"0x.8p2147483647", 64, 'p', 0, "0x.8p+2147483647"},
-		{"0x.8p-2147483647", 64, 'p', 0, "0x.8p-2147483647"},
-		{"1p-2147483649", 64, 'p', 0, "0x.8p-2147483648"},
-
-		// TODO(gri) need tests for actual large Floats
-
-		{"0", 53, 'b', 0, "0"},
-		{"-0", 53, 'b', 0, "-0"},
-		{"1.0", 53, 'b', 0, "4503599627370496p-52"},
-		{"-1.0", 53, 'b', 0, "-4503599627370496p-52"},
-		{"4503599627370496", 53, 'b', 0, "4503599627370496p+0"},
-
-		// issue 9939
-		{"3", 350, 'b', 0, "1720123961992553633708115671476565205597423741876210842803191629540192157066363606052513914832594264915968p-348"},
-		{"03", 350, 'b', 0, "1720123961992553633708115671476565205597423741876210842803191629540192157066363606052513914832594264915968p-348"},
-		{"3.", 350, 'b', 0, "1720123961992553633708115671476565205597423741876210842803191629540192157066363606052513914832594264915968p-348"},
-		{"3.0", 350, 'b', 0, "1720123961992553633708115671476565205597423741876210842803191629540192157066363606052513914832594264915968p-348"},
-		{"3.00", 350, 'b', 0, "1720123961992553633708115671476565205597423741876210842803191629540192157066363606052513914832594264915968p-348"},
-		{"3.000", 350, 'b', 0, "1720123961992553633708115671476565205597423741876210842803191629540192157066363606052513914832594264915968p-348"},
-
-		{"3", 350, 'p', 0, "0x.cp+2"},
-		{"03", 350, 'p', 0, "0x.cp+2"},
-		{"3.", 350, 'p', 0, "0x.cp+2"},
-		{"3.0", 350, 'p', 0, "0x.cp+2"},
-		{"3.00", 350, 'p', 0, "0x.cp+2"},
-		{"3.000", 350, 'p', 0, "0x.cp+2"},
-
-		{"0", 64, 'p', 0, "0"},
-		{"-0", 64, 'p', 0, "-0"},
-		{"1024.0", 64, 'p', 0, "0x.8p+11"},
-		{"-1024.0", 64, 'p', 0, "-0x.8p+11"},
-
-		// unsupported format
-		{"3.14", 64, 'x', 0, "%x"},
-		{"-3.14", 64, 'x', 0, "%x"},
-	} {
-		f, _, err := ParseFloat(test.x, 0, test.prec, ToNearestEven)
-		if err != nil {
-			t.Errorf("%v: %s", test, err)
-			continue
-		}
-
-		got := f.Text(test.format, test.digits)
-		if got != test.want {
-			t.Errorf("%v: got %s; want %s", test, got, test.want)
-		}
-
-		// compare with strconv.FormatFloat output if possible
-		// ('p' format is not supported by strconv.FormatFloat,
-		// and its output for 0.0 prints a biased exponent value
-		// as in 0p-1074 which makes no sense to emulate here)
-		if test.prec == 53 && test.format != 'p' && f.Sign() != 0 {
-			f64, acc := f.Float64()
-			if acc != Exact {
-				t.Errorf("%v: expected exact conversion to float64", test)
-				continue
-			}
-			got := strconv.FormatFloat(f64, test.format, test.digits, 64)
-			if got != test.want {
-				t.Errorf("%v: got %s; want %s", test, got, test.want)
-			}
-		}
-	}
-}
-
-func TestFloatFormat(t *testing.T) {
-	for _, test := range []struct {
-		format string
-		value  interface{} // float32, float64, or string (== 512bit *Float)
-		want   string
-	}{
-		// from fmt/fmt_test.go
-		{"%+.3e", 0.0, "+0.000e+00"},
-		{"%+.3e", 1.0, "+1.000e+00"},
-		{"%+.3f", -1.0, "-1.000"},
-		{"%+.3F", -1.0, "-1.000"},
-		{"%+.3F", float32(-1.0), "-1.000"},
-		{"%+07.2f", 1.0, "+001.00"},
-		{"%+07.2f", -1.0, "-001.00"},
-		{"%+10.2f", +1.0, "     +1.00"},
-		{"%+10.2f", -1.0, "     -1.00"},
-		{"% .3E", -1.0, "-1.000E+00"},
-		{"% .3e", 1.0, " 1.000e+00"},
-		{"%+.3g", 0.0, "+0"},
-		{"%+.3g", 1.0, "+1"},
-		{"%+.3g", -1.0, "-1"},
-		{"% .3g", -1.0, "-1"},
-		{"% .3g", 1.0, " 1"},
-		{"%b", float32(1.0), "8388608p-23"},
-		{"%b", 1.0, "4503599627370496p-52"},
-
-		// from fmt/fmt_test.go: old test/fmt_test.go
-		{"%e", 1.0, "1.000000e+00"},
-		{"%e", 1234.5678e3, "1.234568e+06"},
-		{"%e", 1234.5678e-8, "1.234568e-05"},
-		{"%e", -7.0, "-7.000000e+00"},
-		{"%e", -1e-9, "-1.000000e-09"},
-		{"%f", 1234.5678e3, "1234567.800000"},
-		{"%f", 1234.5678e-8, "0.000012"},
-		{"%f", -7.0, "-7.000000"},
-		{"%f", -1e-9, "-0.000000"},
-		{"%g", 1234.5678e3, "1.2345678e+06"},
-		{"%g", float32(1234.5678e3), "1.2345678e+06"},
-		{"%g", 1234.5678e-8, "1.2345678e-05"},
-		{"%g", -7.0, "-7"},
-		{"%g", -1e-9, "-1e-09"},
-		{"%g", float32(-1e-9), "-1e-09"},
-		{"%E", 1.0, "1.000000E+00"},
-		{"%E", 1234.5678e3, "1.234568E+06"},
-		{"%E", 1234.5678e-8, "1.234568E-05"},
-		{"%E", -7.0, "-7.000000E+00"},
-		{"%E", -1e-9, "-1.000000E-09"},
-		{"%G", 1234.5678e3, "1.2345678E+06"},
-		{"%G", float32(1234.5678e3), "1.2345678E+06"},
-		{"%G", 1234.5678e-8, "1.2345678E-05"},
-		{"%G", -7.0, "-7"},
-		{"%G", -1e-9, "-1E-09"},
-		{"%G", float32(-1e-9), "-1E-09"},
-
-		{"%20.6e", 1.2345e3, "        1.234500e+03"},
-		{"%20.6e", 1.2345e-3, "        1.234500e-03"},
-		{"%20e", 1.2345e3, "        1.234500e+03"},
-		{"%20e", 1.2345e-3, "        1.234500e-03"},
-		{"%20.8e", 1.2345e3, "      1.23450000e+03"},
-		{"%20f", 1.23456789e3, "         1234.567890"},
-		{"%20f", 1.23456789e-3, "            0.001235"},
-		{"%20f", 12345678901.23456789, "  12345678901.234568"},
-		{"%-20f", 1.23456789e3, "1234.567890         "},
-		{"%20.8f", 1.23456789e3, "       1234.56789000"},
-		{"%20.8f", 1.23456789e-3, "          0.00123457"},
-		{"%g", 1.23456789e3, "1234.56789"},
-		{"%g", 1.23456789e-3, "0.00123456789"},
-		{"%g", 1.23456789e20, "1.23456789e+20"},
-		{"%20e", math.Inf(1), "                +Inf"},
-		{"%-20f", math.Inf(-1), "-Inf                "},
-
-		// from fmt/fmt_test.go: comparison of padding rules with C printf
-		{"%.2f", 1.0, "1.00"},
-		{"%.2f", -1.0, "-1.00"},
-		{"% .2f", 1.0, " 1.00"},
-		{"% .2f", -1.0, "-1.00"},
-		{"%+.2f", 1.0, "+1.00"},
-		{"%+.2f", -1.0, "-1.00"},
-		{"%7.2f", 1.0, "   1.00"},
-		{"%7.2f", -1.0, "  -1.00"},
-		{"% 7.2f", 1.0, "   1.00"},
-		{"% 7.2f", -1.0, "  -1.00"},
-		{"%+7.2f", 1.0, "  +1.00"},
-		{"%+7.2f", -1.0, "  -1.00"},
-		{"%07.2f", 1.0, "0001.00"},
-		{"%07.2f", -1.0, "-001.00"},
-		{"% 07.2f", 1.0, " 001.00"},
-		{"% 07.2f", -1.0, "-001.00"},
-		{"%+07.2f", 1.0, "+001.00"},
-		{"%+07.2f", -1.0, "-001.00"},
-
-		// from fmt/fmt_test.go: zero padding does not apply to infinities
-		{"%020f", math.Inf(-1), "                -Inf"},
-		{"%020f", math.Inf(+1), "                +Inf"},
-		{"% 020f", math.Inf(-1), "                -Inf"},
-		{"% 020f", math.Inf(+1), "                 Inf"},
-		{"%+020f", math.Inf(-1), "                -Inf"},
-		{"%+020f", math.Inf(+1), "                +Inf"},
-		{"%20f", -1.0, "           -1.000000"},
-
-		// handle %v like %g
-		{"%v", 0.0, "0"},
-		{"%v", -7.0, "-7"},
-		{"%v", -1e-9, "-1e-09"},
-		{"%v", float32(-1e-9), "-1e-09"},
-		{"%010v", 0.0, "0000000000"},
-
-		// *Float cases
-		{"%.20f", "1e-20", "0.00000000000000000001"},
-		{"%.20f", "-1e-20", "-0.00000000000000000001"},
-		{"%30.20f", "-1e-20", "       -0.00000000000000000001"},
-		{"%030.20f", "-1e-20", "-00000000.00000000000000000001"},
-		{"%030.20f", "+1e-20", "000000000.00000000000000000001"},
-		{"% 030.20f", "+1e-20", " 00000000.00000000000000000001"},
-
-		// erroneous formats
-		{"%s", 1.0, "%!s(*big.Float=1)"},
-	} {
-		value := new(Float)
-		switch v := test.value.(type) {
-		case float32:
-			value.SetPrec(24).SetFloat64(float64(v))
-		case float64:
-			value.SetPrec(53).SetFloat64(v)
-		case string:
-			value.SetPrec(512).Parse(v, 0)
-		default:
-			t.Fatalf("unsupported test value: %v (%T)", v, v)
-		}
-
-		if got := fmt.Sprintf(test.format, value); got != test.want {
-			t.Errorf("%v: got %q; want %q", test, got, test.want)
-		}
-	}
-}
-
-func BenchmarkParseFloatSmallExp(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		for _, s := range []string{
-			"1e0",
-			"1e-1",
-			"1e-2",
-			"1e-3",
-			"1e-4",
-			"1e-5",
-			"1e-10",
-			"1e-20",
-			"1e-50",
-			"1e1",
-			"1e2",
-			"1e3",
-			"1e4",
-			"1e5",
-			"1e10",
-			"1e20",
-			"1e50",
-		} {
-			var x Float
-			_, _, err := x.Parse(s, 0)
-			if err != nil {
-				b.Fatalf("%s: %v", s, err)
-			}
-		}
-	}
-}
-
-func BenchmarkParseFloatLargeExp(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		for _, s := range []string{
-			"1e0",
-			"1e-10",
-			"1e-20",
-			"1e-30",
-			"1e-40",
-			"1e-50",
-			"1e-100",
-			"1e-500",
-			"1e-1000",
-			"1e-5000",
-			"1e-10000",
-			"1e10",
-			"1e20",
-			"1e30",
-			"1e40",
-			"1e50",
-			"1e100",
-			"1e500",
-			"1e1000",
-			"1e5000",
-			"1e10000",
-		} {
-			var x Float
-			_, _, err := x.Parse(s, 0)
-			if err != nil {
-				b.Fatalf("%s: %v", s, err)
-			}
-		}
-	}
-}
-
-func TestFloatScan(t *testing.T) {
-	var floatScanTests = []struct {
-		input     string
-		format    string
-		output    string
-		remaining int
-		wantErr   bool
-	}{
-		0: {"10.0", "%f", "10", 0, false},
-		1: {"23.98+2.0", "%v", "23.98", 4, false},
-		2: {"-1+1", "%v", "-1", 2, false},
-		3: {" 00000", "%v", "0", 0, false},
-		4: {"-123456p-78", "%b", "-4.084816388e-19", 0, false},
-		5: {"+123", "%b", "123", 0, false},
-		6: {"-1.234e+56", "%e", "-1.234e+56", 0, false},
-		7: {"-1.234E-56", "%E", "-1.234e-56", 0, false},
-		8: {"-1.234e+567", "%g", "-1.234e+567", 0, false},
-		9: {"+1234567891011.234", "%G", "1.234567891e+12", 0, false},
-
-		// Scan doesn't handle ±Inf.
-		10: {"Inf", "%v", "", 3, true},
-		11: {"-Inf", "%v", "", 3, true},
-		12: {"-Inf", "%v", "", 3, true},
-	}
-
-	var buf bytes.Buffer
-	for i, test := range floatScanTests {
-		x := new(Float)
-		buf.Reset()
-		buf.WriteString(test.input)
-		_, err := fmt.Fscanf(&buf, test.format, x)
-		if test.wantErr {
-			if err == nil {
-				t.Errorf("#%d want non-nil err", i)
-			}
-			continue
-		}
-
-		if err != nil {
-			t.Errorf("#%d error: %s", i, err)
-		}
-
-		if x.String() != test.output {
-			t.Errorf("#%d got %s; want %s", i, x.String(), test.output)
-		}
-		if buf.Len() != test.remaining {
-			t.Errorf("#%d got %d bytes remaining; want %d", i, buf.Len(), test.remaining)
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/floatexample_test.go b/pkg/bootstrap/src/bootstrap/math/big/floatexample_test.go
deleted file mode 100644
index 99dc03e..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/floatexample_test.go
+++ /dev/null
@@ -1,144 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/floatexample_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/floatexample_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package big_test
-
-import (
-	"fmt"
-	"math"
-	"bootstrap/math/big"
-)
-
-func ExampleFloat_Add() {
-	// Operate on numbers of different precision.
-	var x, y, z big.Float
-	x.SetInt64(1000)          // x is automatically set to 64bit precision
-	y.SetFloat64(2.718281828) // y is automatically set to 53bit precision
-	z.SetPrec(32)
-	z.Add(&x, &y)
-	fmt.Printf("x = %.10g (%s, prec = %d, acc = %s)\n", &x, x.Text('p', 0), x.Prec(), x.Acc())
-	fmt.Printf("y = %.10g (%s, prec = %d, acc = %s)\n", &y, y.Text('p', 0), y.Prec(), y.Acc())
-	fmt.Printf("z = %.10g (%s, prec = %d, acc = %s)\n", &z, z.Text('p', 0), z.Prec(), z.Acc())
-	// Output:
-	// x = 1000 (0x.fap+10, prec = 64, acc = Exact)
-	// y = 2.718281828 (0x.adf85458248cd8p+2, prec = 53, acc = Exact)
-	// z = 1002.718282 (0x.faadf854p+10, prec = 32, acc = Below)
-}
-
-func ExampleFloat_shift() {
-	// Implement Float "shift" by modifying the (binary) exponents directly.
-	for s := -5; s <= 5; s++ {
-		x := big.NewFloat(0.5)
-		x.SetMantExp(x, x.MantExp(nil)+s) // shift x by s
-		fmt.Println(x)
-	}
-	// Output:
-	// 0.015625
-	// 0.03125
-	// 0.0625
-	// 0.125
-	// 0.25
-	// 0.5
-	// 1
-	// 2
-	// 4
-	// 8
-	// 16
-}
-
-func ExampleFloat_Cmp() {
-	inf := math.Inf(1)
-	zero := 0.0
-
-	operands := []float64{-inf, -1.2, -zero, 0, +1.2, +inf}
-
-	fmt.Println("   x     y  cmp")
-	fmt.Println("---------------")
-	for _, x64 := range operands {
-		x := big.NewFloat(x64)
-		for _, y64 := range operands {
-			y := big.NewFloat(y64)
-			fmt.Printf("%4g  %4g  %3d\n", x, y, x.Cmp(y))
-		}
-		fmt.Println()
-	}
-
-	// Output:
-	//    x     y  cmp
-	// ---------------
-	// -Inf  -Inf    0
-	// -Inf  -1.2   -1
-	// -Inf    -0   -1
-	// -Inf     0   -1
-	// -Inf   1.2   -1
-	// -Inf  +Inf   -1
-	//
-	// -1.2  -Inf    1
-	// -1.2  -1.2    0
-	// -1.2    -0   -1
-	// -1.2     0   -1
-	// -1.2   1.2   -1
-	// -1.2  +Inf   -1
-	//
-	//   -0  -Inf    1
-	//   -0  -1.2    1
-	//   -0    -0    0
-	//   -0     0    0
-	//   -0   1.2   -1
-	//   -0  +Inf   -1
-	//
-	//    0  -Inf    1
-	//    0  -1.2    1
-	//    0    -0    0
-	//    0     0    0
-	//    0   1.2   -1
-	//    0  +Inf   -1
-	//
-	//  1.2  -Inf    1
-	//  1.2  -1.2    1
-	//  1.2    -0    1
-	//  1.2     0    1
-	//  1.2   1.2    0
-	//  1.2  +Inf   -1
-	//
-	// +Inf  -Inf    1
-	// +Inf  -1.2    1
-	// +Inf    -0    1
-	// +Inf     0    1
-	// +Inf   1.2    1
-	// +Inf  +Inf    0
-}
-
-func ExampleRoundingMode() {
-	operands := []float64{2.6, 2.5, 2.1, -2.1, -2.5, -2.6}
-
-	fmt.Print("   x")
-	for mode := big.ToNearestEven; mode <= big.ToPositiveInf; mode++ {
-		fmt.Printf("  %s", mode)
-	}
-	fmt.Println()
-
-	for _, f64 := range operands {
-		fmt.Printf("%4g", f64)
-		for mode := big.ToNearestEven; mode <= big.ToPositiveInf; mode++ {
-			// sample operands above require 2 bits to represent mantissa
-			// set binary precision to 2 to round them to integer values
-			f := new(big.Float).SetPrec(2).SetMode(mode).SetFloat64(f64)
-			fmt.Printf("  %*g", len(mode.String()), f)
-		}
-		fmt.Println()
-	}
-
-	// Output:
-	//    x  ToNearestEven  ToNearestAway  ToZero  AwayFromZero  ToNegativeInf  ToPositiveInf
-	//  2.6              3              3       2             3              2              3
-	//  2.5              2              3       2             3              2              3
-	//  2.1              2              2       2             3              2              3
-	// -2.1             -2             -2      -2            -3             -3             -2
-	// -2.5             -2             -3      -2            -3             -3             -2
-	// -2.6             -3             -3      -2            -3             -3             -2
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/floatmarsh.go b/pkg/bootstrap/src/bootstrap/math/big/floatmarsh.go
deleted file mode 100644
index dcd2dbf..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/floatmarsh.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/floatmarsh.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/floatmarsh.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements encoding/decoding of Floats.
-
-package big
-
-import (
-	"encoding/binary"
-	"fmt"
-)
-
-// Gob codec version. Permits backward-compatible changes to the encoding.
-const floatGobVersion byte = 1
-
-// GobEncode implements the gob.GobEncoder interface.
-// The Float value and all its attributes (precision,
-// rounding mode, accuracy) are marshaled.
-func (x *Float) GobEncode() ([]byte, error) {
-	if x == nil {
-		return nil, nil
-	}
-
-	// determine max. space (bytes) required for encoding
-	sz := 1 + 1 + 4 // version + mode|acc|form|neg (3+2+2+1bit) + prec
-	n := 0          // number of mantissa words
-	if x.form == finite {
-		// add space for mantissa and exponent
-		n = int((x.prec + (_W - 1)) / _W) // required mantissa length in words for given precision
-		// actual mantissa slice could be shorter (trailing 0's) or longer (unused bits):
-		// - if shorter, only encode the words present
-		// - if longer, cut off unused words when encoding in bytes
-		//   (in practice, this should never happen since rounding
-		//   takes care of it, but be safe and do it always)
-		if len(x.mant) < n {
-			n = len(x.mant)
-		}
-		// len(x.mant) >= n
-		sz += 4 + n*_S // exp + mant
-	}
-	buf := make([]byte, sz)
-
-	buf[0] = floatGobVersion
-	b := byte(x.mode&7)<<5 | byte((x.acc+1)&3)<<3 | byte(x.form&3)<<1
-	if x.neg {
-		b |= 1
-	}
-	buf[1] = b
-	binary.BigEndian.PutUint32(buf[2:], x.prec)
-
-	if x.form == finite {
-		binary.BigEndian.PutUint32(buf[6:], uint32(x.exp))
-		x.mant[len(x.mant)-n:].bytes(buf[10:]) // cut off unused trailing words
-	}
-
-	return buf, nil
-}
-
-// GobDecode implements the gob.GobDecoder interface.
-// The result is rounded per the precision and rounding mode of
-// z unless z's precision is 0, in which case z is set exactly
-// to the decoded value.
-func (z *Float) GobDecode(buf []byte) error {
-	if len(buf) == 0 {
-		// Other side sent a nil or default value.
-		*z = Float{}
-		return nil
-	}
-
-	if buf[0] != floatGobVersion {
-		return fmt.Errorf("Float.GobDecode: encoding version %d not supported", buf[0])
-	}
-
-	oldPrec := z.prec
-	oldMode := z.mode
-
-	b := buf[1]
-	z.mode = RoundingMode((b >> 5) & 7)
-	z.acc = Accuracy((b>>3)&3) - 1
-	z.form = form((b >> 1) & 3)
-	z.neg = b&1 != 0
-	z.prec = binary.BigEndian.Uint32(buf[2:])
-
-	if z.form == finite {
-		z.exp = int32(binary.BigEndian.Uint32(buf[6:]))
-		z.mant = z.mant.setBytes(buf[10:])
-	}
-
-	if oldPrec != 0 {
-		z.mode = oldMode
-		z.SetPrec(uint(oldPrec))
-	}
-
-	return nil
-}
-
-// MarshalText implements the encoding.TextMarshaler interface.
-// Only the Float value is marshaled (in full precision), other
-// attributes such as precision or accuracy are ignored.
-func (x *Float) MarshalText() (text []byte, err error) {
-	if x == nil {
-		return []byte("<nil>"), nil
-	}
-	var buf []byte
-	return x.Append(buf, 'g', -1), nil
-}
-
-// UnmarshalText implements the encoding.TextUnmarshaler interface.
-// The result is rounded per the precision and rounding mode of z.
-// If z's precision is 0, it is changed to 64 before rounding takes
-// effect.
-func (z *Float) UnmarshalText(text []byte) error {
-	// TODO(gri): get rid of the []byte/string conversion
-	_, _, err := z.Parse(string(text), 0)
-	if err != nil {
-		err = fmt.Errorf("math/big: cannot unmarshal %q into a *big.Float (%v)", text, err)
-	}
-	return err
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/floatmarsh_test.go b/pkg/bootstrap/src/bootstrap/math/big/floatmarsh_test.go
deleted file mode 100644
index a1c5174..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/floatmarsh_test.go
+++ /dev/null
@@ -1,139 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/floatmarsh_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/floatmarsh_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package big
-
-import (
-	"bytes"
-	"encoding/gob"
-	"encoding/json"
-	"io"
-	"testing"
-)
-
-var floatVals = []string{
-	"0",
-	"1",
-	"0.1",
-	"2.71828",
-	"1234567890",
-	"3.14e1234",
-	"3.14e-1234",
-	"0.738957395793475734757349579759957975985497e100",
-	"0.73895739579347546656564656573475734957975995797598589749859834759476745986795497e100",
-	"inf",
-	"Inf",
-}
-
-func TestFloatGobEncoding(t *testing.T) {
-	var medium bytes.Buffer
-	enc := gob.NewEncoder(&medium)
-	dec := gob.NewDecoder(&medium)
-	for _, test := range floatVals {
-		for _, sign := range []string{"", "+", "-"} {
-			for _, prec := range []uint{0, 1, 2, 10, 53, 64, 100, 1000} {
-				for _, mode := range []RoundingMode{ToNearestEven, ToNearestAway, ToZero, AwayFromZero, ToNegativeInf, ToPositiveInf} {
-					medium.Reset() // empty buffer for each test case (in case of failures)
-					x := sign + test
-
-					var tx Float
-					_, _, err := tx.SetPrec(prec).SetMode(mode).Parse(x, 0)
-					if err != nil {
-						t.Errorf("parsing of %s (%dbits, %v) failed (invalid test case): %v", x, prec, mode, err)
-						continue
-					}
-
-					// If tx was set to prec == 0, tx.Parse(x, 0) assumes precision 64. Correct it.
-					if prec == 0 {
-						tx.SetPrec(0)
-					}
-
-					if err := enc.Encode(&tx); err != nil {
-						t.Errorf("encoding of %v (%dbits, %v) failed: %v", &tx, prec, mode, err)
-						continue
-					}
-
-					var rx Float
-					if err := dec.Decode(&rx); err != nil {
-						t.Errorf("decoding of %v (%dbits, %v) failed: %v", &tx, prec, mode, err)
-						continue
-					}
-
-					if rx.Cmp(&tx) != 0 {
-						t.Errorf("transmission of %s failed: got %s want %s", x, rx.String(), tx.String())
-						continue
-					}
-
-					if rx.Prec() != prec {
-						t.Errorf("transmission of %s's prec failed: got %d want %d", x, rx.Prec(), prec)
-					}
-
-					if rx.Mode() != mode {
-						t.Errorf("transmission of %s's mode failed: got %s want %s", x, rx.Mode(), mode)
-					}
-
-					if rx.Acc() != tx.Acc() {
-						t.Errorf("transmission of %s's accuracy failed: got %s want %s", x, rx.Acc(), tx.Acc())
-					}
-				}
-			}
-		}
-	}
-}
-
-func TestFloatCorruptGob(t *testing.T) {
-	var buf bytes.Buffer
-	tx := NewFloat(4 / 3).SetPrec(1000).SetMode(ToPositiveInf)
-	if err := gob.NewEncoder(&buf).Encode(tx); err != nil {
-		t.Fatal(err)
-	}
-	b := buf.Bytes()
-
-	var rx Float
-	if err := gob.NewDecoder(bytes.NewReader(b)).Decode(&rx); err != nil {
-		t.Fatal(err)
-	}
-
-	if err := gob.NewDecoder(bytes.NewReader(b[:10])).Decode(&rx); err != io.ErrUnexpectedEOF {
-		t.Errorf("got %v want EOF", err)
-	}
-
-	b[1] = 0
-	if err := gob.NewDecoder(bytes.NewReader(b)).Decode(&rx); err == nil {
-		t.Fatal("got nil want version error")
-	}
-}
-
-func TestFloatJSONEncoding(t *testing.T) {
-	for _, test := range floatVals {
-		for _, sign := range []string{"", "+", "-"} {
-			for _, prec := range []uint{0, 1, 2, 10, 53, 64, 100, 1000} {
-				x := sign + test
-				var tx Float
-				_, _, err := tx.SetPrec(prec).Parse(x, 0)
-				if err != nil {
-					t.Errorf("parsing of %s (prec = %d) failed (invalid test case): %v", x, prec, err)
-					continue
-				}
-				b, err := json.Marshal(&tx)
-				if err != nil {
-					t.Errorf("marshaling of %v (prec = %d) failed: %v", &tx, prec, err)
-					continue
-				}
-				var rx Float
-				rx.SetPrec(prec)
-				if err := json.Unmarshal(b, &rx); err != nil {
-					t.Errorf("unmarshaling of %v (prec = %d) failed: %v", &tx, prec, err)
-					continue
-				}
-				if rx.Cmp(&tx) != 0 {
-					t.Errorf("JSON encoding of %v (prec = %d) failed: got %v want %v", &tx, prec, &rx, &tx)
-				}
-			}
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/ftoa.go b/pkg/bootstrap/src/bootstrap/math/big/ftoa.go
deleted file mode 100644
index 366df64..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/ftoa.go
+++ /dev/null
@@ -1,464 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/ftoa.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/ftoa.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements Float-to-string conversion functions.
-// It is closely following the corresponding implementation
-// in strconv/ftoa.go, but modified and simplified for Float.
-
-package big
-
-import (
-	"bytes"
-	"fmt"
-	"strconv"
-)
-
-// Text converts the floating-point number x to a string according
-// to the given format and precision prec. The format is one of:
-//
-//	'e'	-d.dddde±dd, decimal exponent, at least two (possibly 0) exponent digits
-//	'E'	-d.ddddE±dd, decimal exponent, at least two (possibly 0) exponent digits
-//	'f'	-ddddd.dddd, no exponent
-//	'g'	like 'e' for large exponents, like 'f' otherwise
-//	'G'	like 'E' for large exponents, like 'f' otherwise
-//	'b'	-ddddddp±dd, binary exponent
-//	'p'	-0x.dddp±dd, binary exponent, hexadecimal mantissa
-//
-// For the binary exponent formats, the mantissa is printed in normalized form:
-//
-//	'b'	decimal integer mantissa using x.Prec() bits, or -0
-//	'p'	hexadecimal fraction with 0.5 <= 0.mantissa < 1.0, or -0
-//
-// If format is a different character, Text returns a "%" followed by the
-// unrecognized format character.
-//
-// The precision prec controls the number of digits (excluding the exponent)
-// printed by the 'e', 'E', 'f', 'g', and 'G' formats. For 'e', 'E', and 'f'
-// it is the number of digits after the decimal point. For 'g' and 'G' it is
-// the total number of digits. A negative precision selects the smallest
-// number of decimal digits necessary to identify the value x uniquely using
-// x.Prec() mantissa bits.
-// The prec value is ignored for the 'b' or 'p' format.
-func (x *Float) Text(format byte, prec int) string {
-	cap := 10 // TODO(gri) determine a good/better value here
-	if prec > 0 {
-		cap += prec
-	}
-	return string(x.Append(make([]byte, 0, cap), format, prec))
-}
-
-// String formats x like x.Text('g', 10).
-// (String must be called explicitly, Float.Format does not support %s verb.)
-func (x *Float) String() string {
-	return x.Text('g', 10)
-}
-
-// Append appends to buf the string form of the floating-point number x,
-// as generated by x.Text, and returns the extended buffer.
-func (x *Float) Append(buf []byte, fmt byte, prec int) []byte {
-	// sign
-	if x.neg {
-		buf = append(buf, '-')
-	}
-
-	// Inf
-	if x.form == inf {
-		if !x.neg {
-			buf = append(buf, '+')
-		}
-		return append(buf, "Inf"...)
-	}
-
-	// pick off easy formats
-	switch fmt {
-	case 'b':
-		return x.fmtB(buf)
-	case 'p':
-		return x.fmtP(buf)
-	}
-
-	// Algorithm:
-	//   1) convert Float to multiprecision decimal
-	//   2) round to desired precision
-	//   3) read digits out and format
-
-	// 1) convert Float to multiprecision decimal
-	var d decimal // == 0.0
-	if x.form == finite {
-		// x != 0
-		d.init(x.mant, int(x.exp)-x.mant.bitLen())
-	}
-
-	// 2) round to desired precision
-	shortest := false
-	if prec < 0 {
-		shortest = true
-		roundShortest(&d, x)
-		// Precision for shortest representation mode.
-		switch fmt {
-		case 'e', 'E':
-			prec = len(d.mant) - 1
-		case 'f':
-			prec = max(len(d.mant)-d.exp, 0)
-		case 'g', 'G':
-			prec = len(d.mant)
-		}
-	} else {
-		// round appropriately
-		switch fmt {
-		case 'e', 'E':
-			// one digit before and number of digits after decimal point
-			d.round(1 + prec)
-		case 'f':
-			// number of digits before and after decimal point
-			d.round(d.exp + prec)
-		case 'g', 'G':
-			if prec == 0 {
-				prec = 1
-			}
-			d.round(prec)
-		}
-	}
-
-	// 3) read digits out and format
-	switch fmt {
-	case 'e', 'E':
-		return fmtE(buf, fmt, prec, d)
-	case 'f':
-		return fmtF(buf, prec, d)
-	case 'g', 'G':
-		// trim trailing fractional zeros in %e format
-		eprec := prec
-		if eprec > len(d.mant) && len(d.mant) >= d.exp {
-			eprec = len(d.mant)
-		}
-		// %e is used if the exponent from the conversion
-		// is less than -4 or greater than or equal to the precision.
-		// If precision was the shortest possible, use eprec = 6 for
-		// this decision.
-		if shortest {
-			eprec = 6
-		}
-		exp := d.exp - 1
-		if exp < -4 || exp >= eprec {
-			if prec > len(d.mant) {
-				prec = len(d.mant)
-			}
-			return fmtE(buf, fmt+'e'-'g', prec-1, d)
-		}
-		if prec > d.exp {
-			prec = len(d.mant)
-		}
-		return fmtF(buf, max(prec-d.exp, 0), d)
-	}
-
-	// unknown format
-	if x.neg {
-		buf = buf[:len(buf)-1] // sign was added prematurely - remove it again
-	}
-	return append(buf, '%', fmt)
-}
-
-func roundShortest(d *decimal, x *Float) {
-	// if the mantissa is zero, the number is zero - stop now
-	if len(d.mant) == 0 {
-		return
-	}
-
-	// Approach: All numbers in the interval [x - 1/2ulp, x + 1/2ulp]
-	// (possibly exclusive) round to x for the given precision of x.
-	// Compute the lower and upper bound in decimal form and find the
-	// shortest decimal number d such that lower <= d <= upper.
-
-	// TODO(gri) strconv/ftoa.do describes a shortcut in some cases.
-	// See if we can use it (in adjusted form) here as well.
-
-	// 1) Compute normalized mantissa mant and exponent exp for x such
-	// that the lsb of mant corresponds to 1/2 ulp for the precision of
-	// x (i.e., for mant we want x.prec + 1 bits).
-	mant := nat(nil).set(x.mant)
-	exp := int(x.exp) - mant.bitLen()
-	s := mant.bitLen() - int(x.prec+1)
-	switch {
-	case s < 0:
-		mant = mant.shl(mant, uint(-s))
-	case s > 0:
-		mant = mant.shr(mant, uint(+s))
-	}
-	exp += s
-	// x = mant * 2**exp with lsb(mant) == 1/2 ulp of x.prec
-
-	// 2) Compute lower bound by subtracting 1/2 ulp.
-	var lower decimal
-	var tmp nat
-	lower.init(tmp.sub(mant, natOne), exp)
-
-	// 3) Compute upper bound by adding 1/2 ulp.
-	var upper decimal
-	upper.init(tmp.add(mant, natOne), exp)
-
-	// The upper and lower bounds are possible outputs only if
-	// the original mantissa is even, so that ToNearestEven rounding
-	// would round to the original mantissa and not the neighbors.
-	inclusive := mant[0]&2 == 0 // test bit 1 since original mantissa was shifted by 1
-
-	// Now we can figure out the minimum number of digits required.
-	// Walk along until d has distinguished itself from upper and lower.
-	for i, m := range d.mant {
-		l := lower.at(i)
-		u := upper.at(i)
-
-		// Okay to round down (truncate) if lower has a different digit
-		// or if lower is inclusive and is exactly the result of rounding
-		// down (i.e., and we have reached the final digit of lower).
-		okdown := l != m || inclusive && i+1 == len(lower.mant)
-
-		// Okay to round up if upper has a different digit and either upper
-		// is inclusive or upper is bigger than the result of rounding up.
-		okup := m != u && (inclusive || m+1 < u || i+1 < len(upper.mant))
-
-		// If it's okay to do either, then round to the nearest one.
-		// If it's okay to do only one, do it.
-		switch {
-		case okdown && okup:
-			d.round(i + 1)
-			return
-		case okdown:
-			d.roundDown(i + 1)
-			return
-		case okup:
-			d.roundUp(i + 1)
-			return
-		}
-	}
-}
-
-// %e: d.ddddde±dd
-func fmtE(buf []byte, fmt byte, prec int, d decimal) []byte {
-	// first digit
-	ch := byte('0')
-	if len(d.mant) > 0 {
-		ch = d.mant[0]
-	}
-	buf = append(buf, ch)
-
-	// .moredigits
-	if prec > 0 {
-		buf = append(buf, '.')
-		i := 1
-		m := min(len(d.mant), prec+1)
-		if i < m {
-			buf = append(buf, d.mant[i:m]...)
-			i = m
-		}
-		for ; i <= prec; i++ {
-			buf = append(buf, '0')
-		}
-	}
-
-	// e±
-	buf = append(buf, fmt)
-	var exp int64
-	if len(d.mant) > 0 {
-		exp = int64(d.exp) - 1 // -1 because first digit was printed before '.'
-	}
-	if exp < 0 {
-		ch = '-'
-		exp = -exp
-	} else {
-		ch = '+'
-	}
-	buf = append(buf, ch)
-
-	// dd...d
-	if exp < 10 {
-		buf = append(buf, '0') // at least 2 exponent digits
-	}
-	return strconv.AppendInt(buf, exp, 10)
-}
-
-// %f: ddddddd.ddddd
-func fmtF(buf []byte, prec int, d decimal) []byte {
-	// integer, padded with zeros as needed
-	if d.exp > 0 {
-		m := min(len(d.mant), d.exp)
-		buf = append(buf, d.mant[:m]...)
-		for ; m < d.exp; m++ {
-			buf = append(buf, '0')
-		}
-	} else {
-		buf = append(buf, '0')
-	}
-
-	// fraction
-	if prec > 0 {
-		buf = append(buf, '.')
-		for i := 0; i < prec; i++ {
-			buf = append(buf, d.at(d.exp+i))
-		}
-	}
-
-	return buf
-}
-
-// fmtB appends the string of x in the format mantissa "p" exponent
-// with a decimal mantissa and a binary exponent, or 0" if x is zero,
-// and returns the extended buffer.
-// The mantissa is normalized such that is uses x.Prec() bits in binary
-// representation.
-// The sign of x is ignored, and x must not be an Inf.
-func (x *Float) fmtB(buf []byte) []byte {
-	if x.form == zero {
-		return append(buf, '0')
-	}
-
-	if debugFloat && x.form != finite {
-		panic("non-finite float")
-	}
-	// x != 0
-
-	// adjust mantissa to use exactly x.prec bits
-	m := x.mant
-	switch w := uint32(len(x.mant)) * _W; {
-	case w < x.prec:
-		m = nat(nil).shl(m, uint(x.prec-w))
-	case w > x.prec:
-		m = nat(nil).shr(m, uint(w-x.prec))
-	}
-
-	buf = append(buf, m.utoa(10)...)
-	buf = append(buf, 'p')
-	e := int64(x.exp) - int64(x.prec)
-	if e >= 0 {
-		buf = append(buf, '+')
-	}
-	return strconv.AppendInt(buf, e, 10)
-}
-
-// fmtP appends the string of x in the format "0x." mantissa "p" exponent
-// with a hexadecimal mantissa and a binary exponent, or "0" if x is zero,
-// and returns the extended buffer.
-// The mantissa is normalized such that 0.5 <= 0.mantissa < 1.0.
-// The sign of x is ignored, and x must not be an Inf.
-func (x *Float) fmtP(buf []byte) []byte {
-	if x.form == zero {
-		return append(buf, '0')
-	}
-
-	if debugFloat && x.form != finite {
-		panic("non-finite float")
-	}
-	// x != 0
-
-	// remove trailing 0 words early
-	// (no need to convert to hex 0's and trim later)
-	m := x.mant
-	i := 0
-	for i < len(m) && m[i] == 0 {
-		i++
-	}
-	m = m[i:]
-
-	buf = append(buf, "0x."...)
-	buf = append(buf, bytes.TrimRight(m.utoa(16), "0")...)
-	buf = append(buf, 'p')
-	if x.exp >= 0 {
-		buf = append(buf, '+')
-	}
-	return strconv.AppendInt(buf, int64(x.exp), 10)
-}
-
-func min(x, y int) int {
-	if x < y {
-		return x
-	}
-	return y
-}
-
-var _ fmt.Formatter = &floatZero // *Float must implement fmt.Formatter
-
-// Format implements fmt.Formatter. It accepts all the regular
-// formats for floating-point numbers ('b', 'e', 'E', 'f', 'F',
-// 'g', 'G') as well as 'p' and 'v'. See (*Float).Text for the
-// interpretation of 'p'. The 'v' format is handled like 'g'.
-// Format also supports specification of the minimum precision
-// in digits, the output field width, as well as the format flags
-// '+' and ' ' for sign control, '0' for space or zero padding,
-// and '-' for left or right justification. See the fmt package
-// for details.
-func (x *Float) Format(s fmt.State, format rune) {
-	prec, hasPrec := s.Precision()
-	if !hasPrec {
-		prec = 6 // default precision for 'e', 'f'
-	}
-
-	switch format {
-	case 'e', 'E', 'f', 'b', 'p':
-		// nothing to do
-	case 'F':
-		// (*Float).Text doesn't support 'F'; handle like 'f'
-		format = 'f'
-	case 'v':
-		// handle like 'g'
-		format = 'g'
-		fallthrough
-	case 'g', 'G':
-		if !hasPrec {
-			prec = -1 // default precision for 'g', 'G'
-		}
-	default:
-		fmt.Fprintf(s, "%%!%c(*big.Float=%s)", format, x.String())
-		return
-	}
-	var buf []byte
-	buf = x.Append(buf, byte(format), prec)
-	if len(buf) == 0 {
-		buf = []byte("?") // should never happen, but don't crash
-	}
-	// len(buf) > 0
-
-	var sign string
-	switch {
-	case buf[0] == '-':
-		sign = "-"
-		buf = buf[1:]
-	case buf[0] == '+':
-		// +Inf
-		sign = "+"
-		if s.Flag(' ') {
-			sign = " "
-		}
-		buf = buf[1:]
-	case s.Flag('+'):
-		sign = "+"
-	case s.Flag(' '):
-		sign = " "
-	}
-
-	var padding int
-	if width, hasWidth := s.Width(); hasWidth && width > len(sign)+len(buf) {
-		padding = width - len(sign) - len(buf)
-	}
-
-	switch {
-	case s.Flag('0') && !x.IsInf():
-		// 0-padding on left
-		writeMultiple(s, sign, 1)
-		writeMultiple(s, "0", padding)
-		s.Write(buf)
-	case s.Flag('-'):
-		// padding on right
-		writeMultiple(s, sign, 1)
-		s.Write(buf)
-		writeMultiple(s, " ", padding)
-	default:
-		// padding on left
-		writeMultiple(s, " ", padding)
-		writeMultiple(s, sign, 1)
-		s.Write(buf)
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/gcd_test.go b/pkg/bootstrap/src/bootstrap/math/big/gcd_test.go
deleted file mode 100644
index 08abc9a..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/gcd_test.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/gcd_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/gcd_test.go:1
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements a GCD benchmark.
-// Usage: go test math/big -test.bench GCD
-
-package big
-
-import (
-	"math/rand"
-	"testing"
-)
-
-// randInt returns a pseudo-random Int in the range [1<<(size-1), (1<<size) - 1]
-func randInt(r *rand.Rand, size uint) *Int {
-	n := new(Int).Lsh(intOne, size-1)
-	x := new(Int).Rand(r, n)
-	return x.Add(x, n) // make sure result > 1<<(size-1)
-}
-
-func runGCD(b *testing.B, aSize, bSize uint) {
-	if isRaceBuilder && (aSize > 1000 || bSize > 1000) {
-		b.Skip("skipping on race builder")
-	}
-	b.Run("WithoutXY", func(b *testing.B) {
-		runGCDExt(b, aSize, bSize, false)
-	})
-	b.Run("WithXY", func(b *testing.B) {
-		runGCDExt(b, aSize, bSize, true)
-	})
-}
-
-func runGCDExt(b *testing.B, aSize, bSize uint, calcXY bool) {
-	b.StopTimer()
-	var r = rand.New(rand.NewSource(1234))
-	aa := randInt(r, aSize)
-	bb := randInt(r, bSize)
-	var x, y *Int
-	if calcXY {
-		x = new(Int)
-		y = new(Int)
-	}
-	b.StartTimer()
-	for i := 0; i < b.N; i++ {
-		new(Int).GCD(x, y, aa, bb)
-	}
-}
-
-func BenchmarkGCD10x10(b *testing.B)         { runGCD(b, 10, 10) }
-func BenchmarkGCD10x100(b *testing.B)        { runGCD(b, 10, 100) }
-func BenchmarkGCD10x1000(b *testing.B)       { runGCD(b, 10, 1000) }
-func BenchmarkGCD10x10000(b *testing.B)      { runGCD(b, 10, 10000) }
-func BenchmarkGCD10x100000(b *testing.B)     { runGCD(b, 10, 100000) }
-func BenchmarkGCD100x100(b *testing.B)       { runGCD(b, 100, 100) }
-func BenchmarkGCD100x1000(b *testing.B)      { runGCD(b, 100, 1000) }
-func BenchmarkGCD100x10000(b *testing.B)     { runGCD(b, 100, 10000) }
-func BenchmarkGCD100x100000(b *testing.B)    { runGCD(b, 100, 100000) }
-func BenchmarkGCD1000x1000(b *testing.B)     { runGCD(b, 1000, 1000) }
-func BenchmarkGCD1000x10000(b *testing.B)    { runGCD(b, 1000, 10000) }
-func BenchmarkGCD1000x100000(b *testing.B)   { runGCD(b, 1000, 100000) }
-func BenchmarkGCD10000x10000(b *testing.B)   { runGCD(b, 10000, 10000) }
-func BenchmarkGCD10000x100000(b *testing.B)  { runGCD(b, 10000, 100000) }
-func BenchmarkGCD100000x100000(b *testing.B) { runGCD(b, 100000, 100000) }
diff --git a/pkg/bootstrap/src/bootstrap/math/big/hilbert_test.go b/pkg/bootstrap/src/bootstrap/math/big/hilbert_test.go
deleted file mode 100644
index 14c878a..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/hilbert_test.go
+++ /dev/null
@@ -1,163 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/hilbert_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/hilbert_test.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// A little test program and benchmark for rational arithmetics.
-// Computes a Hilbert matrix, its inverse, multiplies them
-// and verifies that the product is the identity matrix.
-
-package big
-
-import (
-	"fmt"
-	"testing"
-)
-
-type matrix struct {
-	n, m int
-	a    []*Rat
-}
-
-func (a *matrix) at(i, j int) *Rat {
-	if !(0 <= i && i < a.n && 0 <= j && j < a.m) {
-		panic("index out of range")
-	}
-	return a.a[i*a.m+j]
-}
-
-func (a *matrix) set(i, j int, x *Rat) {
-	if !(0 <= i && i < a.n && 0 <= j && j < a.m) {
-		panic("index out of range")
-	}
-	a.a[i*a.m+j] = x
-}
-
-func newMatrix(n, m int) *matrix {
-	if !(0 <= n && 0 <= m) {
-		panic("illegal matrix")
-	}
-	a := new(matrix)
-	a.n = n
-	a.m = m
-	a.a = make([]*Rat, n*m)
-	return a
-}
-
-func newUnit(n int) *matrix {
-	a := newMatrix(n, n)
-	for i := 0; i < n; i++ {
-		for j := 0; j < n; j++ {
-			x := NewRat(0, 1)
-			if i == j {
-				x.SetInt64(1)
-			}
-			a.set(i, j, x)
-		}
-	}
-	return a
-}
-
-func newHilbert(n int) *matrix {
-	a := newMatrix(n, n)
-	for i := 0; i < n; i++ {
-		for j := 0; j < n; j++ {
-			a.set(i, j, NewRat(1, int64(i+j+1)))
-		}
-	}
-	return a
-}
-
-func newInverseHilbert(n int) *matrix {
-	a := newMatrix(n, n)
-	for i := 0; i < n; i++ {
-		for j := 0; j < n; j++ {
-			x1 := new(Rat).SetInt64(int64(i + j + 1))
-			x2 := new(Rat).SetInt(new(Int).Binomial(int64(n+i), int64(n-j-1)))
-			x3 := new(Rat).SetInt(new(Int).Binomial(int64(n+j), int64(n-i-1)))
-			x4 := new(Rat).SetInt(new(Int).Binomial(int64(i+j), int64(i)))
-
-			x1.Mul(x1, x2)
-			x1.Mul(x1, x3)
-			x1.Mul(x1, x4)
-			x1.Mul(x1, x4)
-
-			if (i+j)&1 != 0 {
-				x1.Neg(x1)
-			}
-
-			a.set(i, j, x1)
-		}
-	}
-	return a
-}
-
-func (a *matrix) mul(b *matrix) *matrix {
-	if a.m != b.n {
-		panic("illegal matrix multiply")
-	}
-	c := newMatrix(a.n, b.m)
-	for i := 0; i < c.n; i++ {
-		for j := 0; j < c.m; j++ {
-			x := NewRat(0, 1)
-			for k := 0; k < a.m; k++ {
-				x.Add(x, new(Rat).Mul(a.at(i, k), b.at(k, j)))
-			}
-			c.set(i, j, x)
-		}
-	}
-	return c
-}
-
-func (a *matrix) eql(b *matrix) bool {
-	if a.n != b.n || a.m != b.m {
-		return false
-	}
-	for i := 0; i < a.n; i++ {
-		for j := 0; j < a.m; j++ {
-			if a.at(i, j).Cmp(b.at(i, j)) != 0 {
-				return false
-			}
-		}
-	}
-	return true
-}
-
-func (a *matrix) String() string {
-	s := ""
-	for i := 0; i < a.n; i++ {
-		for j := 0; j < a.m; j++ {
-			s += fmt.Sprintf("\t%s", a.at(i, j))
-		}
-		s += "\n"
-	}
-	return s
-}
-
-func doHilbert(t *testing.T, n int) {
-	a := newHilbert(n)
-	b := newInverseHilbert(n)
-	I := newUnit(n)
-	ab := a.mul(b)
-	if !ab.eql(I) {
-		if t == nil {
-			panic("Hilbert failed")
-		}
-		t.Errorf("a   = %s\n", a)
-		t.Errorf("b   = %s\n", b)
-		t.Errorf("a*b = %s\n", ab)
-		t.Errorf("I   = %s\n", I)
-	}
-}
-
-func TestHilbert(t *testing.T) {
-	doHilbert(t, 10)
-}
-
-func BenchmarkHilbert(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		doHilbert(nil, 10)
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/int.go b/pkg/bootstrap/src/bootstrap/math/big/int.go
deleted file mode 100644
index a04aec1..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/int.go
+++ /dev/null
@@ -1,943 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/int.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/int.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements signed multi-precision integers.
-
-package big
-
-import (
-	"fmt"
-	"io"
-	"math/rand"
-	"strings"
-)
-
-// An Int represents a signed multi-precision integer.
-// The zero value for an Int represents the value 0.
-type Int struct {
-	neg bool // sign
-	abs nat  // absolute value of the integer
-}
-
-var intOne = &Int{false, natOne}
-
-// Sign returns:
-//
-//	-1 if x <  0
-//	 0 if x == 0
-//	+1 if x >  0
-//
-func (x *Int) Sign() int {
-	if len(x.abs) == 0 {
-		return 0
-	}
-	if x.neg {
-		return -1
-	}
-	return 1
-}
-
-// SetInt64 sets z to x and returns z.
-func (z *Int) SetInt64(x int64) *Int {
-	neg := false
-	if x < 0 {
-		neg = true
-		x = -x
-	}
-	z.abs = z.abs.setUint64(uint64(x))
-	z.neg = neg
-	return z
-}
-
-// SetUint64 sets z to x and returns z.
-func (z *Int) SetUint64(x uint64) *Int {
-	z.abs = z.abs.setUint64(x)
-	z.neg = false
-	return z
-}
-
-// NewInt allocates and returns a new Int set to x.
-func NewInt(x int64) *Int {
-	return new(Int).SetInt64(x)
-}
-
-// Set sets z to x and returns z.
-func (z *Int) Set(x *Int) *Int {
-	if z != x {
-		z.abs = z.abs.set(x.abs)
-		z.neg = x.neg
-	}
-	return z
-}
-
-// Bits provides raw (unchecked but fast) access to x by returning its
-// absolute value as a little-endian Word slice. The result and x share
-// the same underlying array.
-// Bits is intended to support implementation of missing low-level Int
-// functionality outside this package; it should be avoided otherwise.
-func (x *Int) Bits() []Word {
-	return x.abs
-}
-
-// SetBits provides raw (unchecked but fast) access to z by setting its
-// value to abs, interpreted as a little-endian Word slice, and returning
-// z. The result and abs share the same underlying array.
-// SetBits is intended to support implementation of missing low-level Int
-// functionality outside this package; it should be avoided otherwise.
-func (z *Int) SetBits(abs []Word) *Int {
-	z.abs = nat(abs).norm()
-	z.neg = false
-	return z
-}
-
-// Abs sets z to |x| (the absolute value of x) and returns z.
-func (z *Int) Abs(x *Int) *Int {
-	z.Set(x)
-	z.neg = false
-	return z
-}
-
-// Neg sets z to -x and returns z.
-func (z *Int) Neg(x *Int) *Int {
-	z.Set(x)
-	z.neg = len(z.abs) > 0 && !z.neg // 0 has no sign
-	return z
-}
-
-// Add sets z to the sum x+y and returns z.
-func (z *Int) Add(x, y *Int) *Int {
-	neg := x.neg
-	if x.neg == y.neg {
-		// x + y == x + y
-		// (-x) + (-y) == -(x + y)
-		z.abs = z.abs.add(x.abs, y.abs)
-	} else {
-		// x + (-y) == x - y == -(y - x)
-		// (-x) + y == y - x == -(x - y)
-		if x.abs.cmp(y.abs) >= 0 {
-			z.abs = z.abs.sub(x.abs, y.abs)
-		} else {
-			neg = !neg
-			z.abs = z.abs.sub(y.abs, x.abs)
-		}
-	}
-	z.neg = len(z.abs) > 0 && neg // 0 has no sign
-	return z
-}
-
-// Sub sets z to the difference x-y and returns z.
-func (z *Int) Sub(x, y *Int) *Int {
-	neg := x.neg
-	if x.neg != y.neg {
-		// x - (-y) == x + y
-		// (-x) - y == -(x + y)
-		z.abs = z.abs.add(x.abs, y.abs)
-	} else {
-		// x - y == x - y == -(y - x)
-		// (-x) - (-y) == y - x == -(x - y)
-		if x.abs.cmp(y.abs) >= 0 {
-			z.abs = z.abs.sub(x.abs, y.abs)
-		} else {
-			neg = !neg
-			z.abs = z.abs.sub(y.abs, x.abs)
-		}
-	}
-	z.neg = len(z.abs) > 0 && neg // 0 has no sign
-	return z
-}
-
-// Mul sets z to the product x*y and returns z.
-func (z *Int) Mul(x, y *Int) *Int {
-	// x * y == x * y
-	// x * (-y) == -(x * y)
-	// (-x) * y == -(x * y)
-	// (-x) * (-y) == x * y
-	z.abs = z.abs.mul(x.abs, y.abs)
-	z.neg = len(z.abs) > 0 && x.neg != y.neg // 0 has no sign
-	return z
-}
-
-// MulRange sets z to the product of all integers
-// in the range [a, b] inclusively and returns z.
-// If a > b (empty range), the result is 1.
-func (z *Int) MulRange(a, b int64) *Int {
-	switch {
-	case a > b:
-		return z.SetInt64(1) // empty range
-	case a <= 0 && b >= 0:
-		return z.SetInt64(0) // range includes 0
-	}
-	// a <= b && (b < 0 || a > 0)
-
-	neg := false
-	if a < 0 {
-		neg = (b-a)&1 == 0
-		a, b = -b, -a
-	}
-
-	z.abs = z.abs.mulRange(uint64(a), uint64(b))
-	z.neg = neg
-	return z
-}
-
-// Binomial sets z to the binomial coefficient of (n, k) and returns z.
-func (z *Int) Binomial(n, k int64) *Int {
-	// reduce the number of multiplications by reducing k
-	if n/2 < k && k <= n {
-		k = n - k // Binomial(n, k) == Binomial(n, n-k)
-	}
-	var a, b Int
-	a.MulRange(n-k+1, n)
-	b.MulRange(1, k)
-	return z.Quo(&a, &b)
-}
-
-// Quo sets z to the quotient x/y for y != 0 and returns z.
-// If y == 0, a division-by-zero run-time panic occurs.
-// Quo implements truncated division (like Go); see QuoRem for more details.
-func (z *Int) Quo(x, y *Int) *Int {
-	z.abs, _ = z.abs.div(nil, x.abs, y.abs)
-	z.neg = len(z.abs) > 0 && x.neg != y.neg // 0 has no sign
-	return z
-}
-
-// Rem sets z to the remainder x%y for y != 0 and returns z.
-// If y == 0, a division-by-zero run-time panic occurs.
-// Rem implements truncated modulus (like Go); see QuoRem for more details.
-func (z *Int) Rem(x, y *Int) *Int {
-	_, z.abs = nat(nil).div(z.abs, x.abs, y.abs)
-	z.neg = len(z.abs) > 0 && x.neg // 0 has no sign
-	return z
-}
-
-// QuoRem sets z to the quotient x/y and r to the remainder x%y
-// and returns the pair (z, r) for y != 0.
-// If y == 0, a division-by-zero run-time panic occurs.
-//
-// QuoRem implements T-division and modulus (like Go):
-//
-//	q = x/y      with the result truncated to zero
-//	r = x - y*q
-//
-// (See Daan Leijen, ``Division and Modulus for Computer Scientists''.)
-// See DivMod for Euclidean division and modulus (unlike Go).
-//
-func (z *Int) QuoRem(x, y, r *Int) (*Int, *Int) {
-	z.abs, r.abs = z.abs.div(r.abs, x.abs, y.abs)
-	z.neg, r.neg = len(z.abs) > 0 && x.neg != y.neg, len(r.abs) > 0 && x.neg // 0 has no sign
-	return z, r
-}
-
-// Div sets z to the quotient x/y for y != 0 and returns z.
-// If y == 0, a division-by-zero run-time panic occurs.
-// Div implements Euclidean division (unlike Go); see DivMod for more details.
-func (z *Int) Div(x, y *Int) *Int {
-	y_neg := y.neg // z may be an alias for y
-	var r Int
-	z.QuoRem(x, y, &r)
-	if r.neg {
-		if y_neg {
-			z.Add(z, intOne)
-		} else {
-			z.Sub(z, intOne)
-		}
-	}
-	return z
-}
-
-// Mod sets z to the modulus x%y for y != 0 and returns z.
-// If y == 0, a division-by-zero run-time panic occurs.
-// Mod implements Euclidean modulus (unlike Go); see DivMod for more details.
-func (z *Int) Mod(x, y *Int) *Int {
-	y0 := y // save y
-	if z == y || alias(z.abs, y.abs) {
-		y0 = new(Int).Set(y)
-	}
-	var q Int
-	q.QuoRem(x, y, z)
-	if z.neg {
-		if y0.neg {
-			z.Sub(z, y0)
-		} else {
-			z.Add(z, y0)
-		}
-	}
-	return z
-}
-
-// DivMod sets z to the quotient x div y and m to the modulus x mod y
-// and returns the pair (z, m) for y != 0.
-// If y == 0, a division-by-zero run-time panic occurs.
-//
-// DivMod implements Euclidean division and modulus (unlike Go):
-//
-//	q = x div y  such that
-//	m = x - y*q  with 0 <= m < |y|
-//
-// (See Raymond T. Boute, ``The Euclidean definition of the functions
-// div and mod''. ACM Transactions on Programming Languages and
-// Systems (TOPLAS), 14(2):127-144, New York, NY, USA, 4/1992.
-// ACM press.)
-// See QuoRem for T-division and modulus (like Go).
-//
-func (z *Int) DivMod(x, y, m *Int) (*Int, *Int) {
-	y0 := y // save y
-	if z == y || alias(z.abs, y.abs) {
-		y0 = new(Int).Set(y)
-	}
-	z.QuoRem(x, y, m)
-	if m.neg {
-		if y0.neg {
-			z.Add(z, intOne)
-			m.Sub(m, y0)
-		} else {
-			z.Sub(z, intOne)
-			m.Add(m, y0)
-		}
-	}
-	return z, m
-}
-
-// Cmp compares x and y and returns:
-//
-//   -1 if x <  y
-//    0 if x == y
-//   +1 if x >  y
-//
-func (x *Int) Cmp(y *Int) (r int) {
-	// x cmp y == x cmp y
-	// x cmp (-y) == x
-	// (-x) cmp y == y
-	// (-x) cmp (-y) == -(x cmp y)
-	switch {
-	case x.neg == y.neg:
-		r = x.abs.cmp(y.abs)
-		if x.neg {
-			r = -r
-		}
-	case x.neg:
-		r = -1
-	default:
-		r = 1
-	}
-	return
-}
-
-// low32 returns the least significant 32 bits of z.
-func low32(z nat) uint32 {
-	if len(z) == 0 {
-		return 0
-	}
-	return uint32(z[0])
-}
-
-// low64 returns the least significant 64 bits of z.
-func low64(z nat) uint64 {
-	if len(z) == 0 {
-		return 0
-	}
-	v := uint64(z[0])
-	if _W == 32 && len(z) > 1 {
-		v |= uint64(z[1]) << 32
-	}
-	return v
-}
-
-// Int64 returns the int64 representation of x.
-// If x cannot be represented in an int64, the result is undefined.
-func (x *Int) Int64() int64 {
-	v := int64(low64(x.abs))
-	if x.neg {
-		v = -v
-	}
-	return v
-}
-
-// Uint64 returns the uint64 representation of x.
-// If x cannot be represented in a uint64, the result is undefined.
-func (x *Int) Uint64() uint64 {
-	return low64(x.abs)
-}
-
-// SetString sets z to the value of s, interpreted in the given base,
-// and returns z and a boolean indicating success. The entire string
-// (not just a prefix) must be valid for success. If SetString fails,
-// the value of z is undefined but the returned value is nil.
-//
-// The base argument must be 0 or a value between 2 and MaxBase. If the base
-// is 0, the string prefix determines the actual conversion base. A prefix of
-// ``0x'' or ``0X'' selects base 16; the ``0'' prefix selects base 8, and a
-// ``0b'' or ``0B'' prefix selects base 2. Otherwise the selected base is 10.
-//
-func (z *Int) SetString(s string, base int) (*Int, bool) {
-	r := strings.NewReader(s)
-	if _, _, err := z.scan(r, base); err != nil {
-		return nil, false
-	}
-	// entire string must have been consumed
-	if _, err := r.ReadByte(); err != io.EOF {
-		return nil, false
-	}
-	return z, true // err == io.EOF => scan consumed all of s
-}
-
-// SetBytes interprets buf as the bytes of a big-endian unsigned
-// integer, sets z to that value, and returns z.
-func (z *Int) SetBytes(buf []byte) *Int {
-	z.abs = z.abs.setBytes(buf)
-	z.neg = false
-	return z
-}
-
-// Bytes returns the absolute value of x as a big-endian byte slice.
-func (x *Int) Bytes() []byte {
-	buf := make([]byte, len(x.abs)*_S)
-	return buf[x.abs.bytes(buf):]
-}
-
-// BitLen returns the length of the absolute value of x in bits.
-// The bit length of 0 is 0.
-func (x *Int) BitLen() int {
-	return x.abs.bitLen()
-}
-
-// Exp sets z = x**y mod |m| (i.e. the sign of m is ignored), and returns z.
-// If y <= 0, the result is 1 mod |m|; if m == nil or m == 0, z = x**y.
-//
-// Modular exponentation of inputs of a particular size is not a
-// cryptographically constant-time operation.
-func (z *Int) Exp(x, y, m *Int) *Int {
-	// See Knuth, volume 2, section 4.6.3.
-	var yWords nat
-	if !y.neg {
-		yWords = y.abs
-	}
-	// y >= 0
-
-	var mWords nat
-	if m != nil {
-		mWords = m.abs // m.abs may be nil for m == 0
-	}
-
-	z.abs = z.abs.expNN(x.abs, yWords, mWords)
-	z.neg = len(z.abs) > 0 && x.neg && len(yWords) > 0 && yWords[0]&1 == 1 // 0 has no sign
-	if z.neg && len(mWords) > 0 {
-		// make modulus result positive
-		z.abs = z.abs.sub(mWords, z.abs) // z == x**y mod |m| && 0 <= z < |m|
-		z.neg = false
-	}
-
-	return z
-}
-
-// GCD sets z to the greatest common divisor of a and b, which both must
-// be > 0, and returns z.
-// If x and y are not nil, GCD sets x and y such that z = a*x + b*y.
-// If either a or b is <= 0, GCD sets z = x = y = 0.
-func (z *Int) GCD(x, y, a, b *Int) *Int {
-	if a.Sign() <= 0 || b.Sign() <= 0 {
-		z.SetInt64(0)
-		if x != nil {
-			x.SetInt64(0)
-		}
-		if y != nil {
-			y.SetInt64(0)
-		}
-		return z
-	}
-	if x == nil && y == nil {
-		return z.binaryGCD(a, b)
-	}
-
-	A := new(Int).Set(a)
-	B := new(Int).Set(b)
-
-	X := new(Int)
-	Y := new(Int).SetInt64(1)
-
-	lastX := new(Int).SetInt64(1)
-	lastY := new(Int)
-
-	q := new(Int)
-	temp := new(Int)
-
-	r := new(Int)
-	for len(B.abs) > 0 {
-		q, r = q.QuoRem(A, B, r)
-
-		A, B, r = B, r, A
-
-		temp.Set(X)
-		X.Mul(X, q)
-		X.neg = !X.neg
-		X.Add(X, lastX)
-		lastX.Set(temp)
-
-		temp.Set(Y)
-		Y.Mul(Y, q)
-		Y.neg = !Y.neg
-		Y.Add(Y, lastY)
-		lastY.Set(temp)
-	}
-
-	if x != nil {
-		*x = *lastX
-	}
-
-	if y != nil {
-		*y = *lastY
-	}
-
-	*z = *A
-	return z
-}
-
-// binaryGCD sets z to the greatest common divisor of a and b, which both must
-// be > 0, and returns z.
-// See Knuth, The Art of Computer Programming, Vol. 2, Section 4.5.2, Algorithm B.
-func (z *Int) binaryGCD(a, b *Int) *Int {
-	u := z
-	v := new(Int)
-
-	// use one Euclidean iteration to ensure that u and v are approx. the same size
-	switch {
-	case len(a.abs) > len(b.abs):
-		// must set v before u since u may be alias for a or b (was issue #11284)
-		v.Rem(a, b)
-		u.Set(b)
-	case len(a.abs) < len(b.abs):
-		v.Rem(b, a)
-		u.Set(a)
-	default:
-		v.Set(b)
-		u.Set(a)
-	}
-	// a, b must not be used anymore (may be aliases with u)
-
-	// v might be 0 now
-	if len(v.abs) == 0 {
-		return u
-	}
-	// u > 0 && v > 0
-
-	// determine largest k such that u = u' << k, v = v' << k
-	k := u.abs.trailingZeroBits()
-	if vk := v.abs.trailingZeroBits(); vk < k {
-		k = vk
-	}
-	u.Rsh(u, k)
-	v.Rsh(v, k)
-
-	// determine t (we know that u > 0)
-	t := new(Int)
-	if u.abs[0]&1 != 0 {
-		// u is odd
-		t.Neg(v)
-	} else {
-		t.Set(u)
-	}
-
-	for len(t.abs) > 0 {
-		// reduce t
-		t.Rsh(t, t.abs.trailingZeroBits())
-		if t.neg {
-			v, t = t, v
-			v.neg = len(v.abs) > 0 && !v.neg // 0 has no sign
-		} else {
-			u, t = t, u
-		}
-		t.Sub(u, v)
-	}
-
-	return z.Lsh(u, k)
-}
-
-// Rand sets z to a pseudo-random number in [0, n) and returns z.
-func (z *Int) Rand(rnd *rand.Rand, n *Int) *Int {
-	z.neg = false
-	if n.neg == true || len(n.abs) == 0 {
-		z.abs = nil
-		return z
-	}
-	z.abs = z.abs.random(rnd, n.abs, n.abs.bitLen())
-	return z
-}
-
-// ModInverse sets z to the multiplicative inverse of g in the ring ℤ/nℤ
-// and returns z. If g and n are not relatively prime, the result is undefined.
-func (z *Int) ModInverse(g, n *Int) *Int {
-	if g.neg {
-		// GCD expects parameters a and b to be > 0.
-		var g2 Int
-		g = g2.Mod(g, n)
-	}
-	var d Int
-	d.GCD(z, nil, g, n)
-	// x and y are such that g*x + n*y = d. Since g and n are
-	// relatively prime, d = 1. Taking that modulo n results in
-	// g*x = 1, therefore x is the inverse element.
-	if z.neg {
-		z.Add(z, n)
-	}
-	return z
-}
-
-// Jacobi returns the Jacobi symbol (x/y), either +1, -1, or 0.
-// The y argument must be an odd integer.
-func Jacobi(x, y *Int) int {
-	if len(y.abs) == 0 || y.abs[0]&1 == 0 {
-		panic(fmt.Sprintf("big: invalid 2nd argument to Int.Jacobi: need odd integer but got %s", y))
-	}
-
-	// We use the formulation described in chapter 2, section 2.4,
-	// "The Yacas Book of Algorithms":
-	// http://yacas.sourceforge.net/Algo.book.pdf
-
-	var a, b, c Int
-	a.Set(x)
-	b.Set(y)
-	j := 1
-
-	if b.neg {
-		if a.neg {
-			j = -1
-		}
-		b.neg = false
-	}
-
-	for {
-		if b.Cmp(intOne) == 0 {
-			return j
-		}
-		if len(a.abs) == 0 {
-			return 0
-		}
-		a.Mod(&a, &b)
-		if len(a.abs) == 0 {
-			return 0
-		}
-		// a > 0
-
-		// handle factors of 2 in 'a'
-		s := a.abs.trailingZeroBits()
-		if s&1 != 0 {
-			bmod8 := b.abs[0] & 7
-			if bmod8 == 3 || bmod8 == 5 {
-				j = -j
-			}
-		}
-		c.Rsh(&a, s) // a = 2^s*c
-
-		// swap numerator and denominator
-		if b.abs[0]&3 == 3 && c.abs[0]&3 == 3 {
-			j = -j
-		}
-		a.Set(&b)
-		b.Set(&c)
-	}
-}
-
-// modSqrt3Mod4 uses the identity
-//      (a^((p+1)/4))^2  mod p
-//   == u^(p+1)          mod p
-//   == u^2              mod p
-// to calculate the square root of any quadratic residue mod p quickly for 3
-// mod 4 primes.
-func (z *Int) modSqrt3Mod4Prime(x, p *Int) *Int {
-	z.Set(p)         // z = p
-	z.Add(z, intOne) // z = p + 1
-	z.Rsh(z, 2)      // z = (p + 1) / 4
-	z.Exp(x, z, p)   // z = x^z mod p
-	return z
-}
-
-// modSqrtTonelliShanks uses the Tonelli-Shanks algorithm to find the square
-// root of a quadratic residue modulo any prime.
-func (z *Int) modSqrtTonelliShanks(x, p *Int) *Int {
-	// Break p-1 into s*2^e such that s is odd.
-	var s Int
-	s.Sub(p, intOne)
-	e := s.abs.trailingZeroBits()
-	s.Rsh(&s, e)
-
-	// find some non-square n
-	var n Int
-	n.SetInt64(2)
-	for Jacobi(&n, p) != -1 {
-		n.Add(&n, intOne)
-	}
-
-	// Core of the Tonelli-Shanks algorithm. Follows the description in
-	// section 6 of "Square roots from 1; 24, 51, 10 to Dan Shanks" by Ezra
-	// Brown:
-	// https://www.maa.org/sites/default/files/pdf/upload_library/22/Polya/07468342.di020786.02p0470a.pdf
-	var y, b, g, t Int
-	y.Add(&s, intOne)
-	y.Rsh(&y, 1)
-	y.Exp(x, &y, p)  // y = x^((s+1)/2)
-	b.Exp(x, &s, p)  // b = x^s
-	g.Exp(&n, &s, p) // g = n^s
-	r := e
-	for {
-		// find the least m such that ord_p(b) = 2^m
-		var m uint
-		t.Set(&b)
-		for t.Cmp(intOne) != 0 {
-			t.Mul(&t, &t).Mod(&t, p)
-			m++
-		}
-
-		if m == 0 {
-			return z.Set(&y)
-		}
-
-		t.SetInt64(0).SetBit(&t, int(r-m-1), 1).Exp(&g, &t, p)
-		// t = g^(2^(r-m-1)) mod p
-		g.Mul(&t, &t).Mod(&g, p) // g = g^(2^(r-m)) mod p
-		y.Mul(&y, &t).Mod(&y, p)
-		b.Mul(&b, &g).Mod(&b, p)
-		r = m
-	}
-}
-
-// ModSqrt sets z to a square root of x mod p if such a square root exists, and
-// returns z. The modulus p must be an odd prime. If x is not a square mod p,
-// ModSqrt leaves z unchanged and returns nil. This function panics if p is
-// not an odd integer.
-func (z *Int) ModSqrt(x, p *Int) *Int {
-	switch Jacobi(x, p) {
-	case -1:
-		return nil // x is not a square mod p
-	case 0:
-		return z.SetInt64(0) // sqrt(0) mod p = 0
-	case 1:
-		break
-	}
-	if x.neg || x.Cmp(p) >= 0 { // ensure 0 <= x < p
-		x = new(Int).Mod(x, p)
-	}
-
-	// Check whether p is 3 mod 4, and if so, use the faster algorithm.
-	if len(p.abs) > 0 && p.abs[0]%4 == 3 {
-		return z.modSqrt3Mod4Prime(x, p)
-	}
-	// Otherwise, use Tonelli-Shanks.
-	return z.modSqrtTonelliShanks(x, p)
-}
-
-// Lsh sets z = x << n and returns z.
-func (z *Int) Lsh(x *Int, n uint) *Int {
-	z.abs = z.abs.shl(x.abs, n)
-	z.neg = x.neg
-	return z
-}
-
-// Rsh sets z = x >> n and returns z.
-func (z *Int) Rsh(x *Int, n uint) *Int {
-	if x.neg {
-		// (-x) >> s == ^(x-1) >> s == ^((x-1) >> s) == -(((x-1) >> s) + 1)
-		t := z.abs.sub(x.abs, natOne) // no underflow because |x| > 0
-		t = t.shr(t, n)
-		z.abs = t.add(t, natOne)
-		z.neg = true // z cannot be zero if x is negative
-		return z
-	}
-
-	z.abs = z.abs.shr(x.abs, n)
-	z.neg = false
-	return z
-}
-
-// Bit returns the value of the i'th bit of x. That is, it
-// returns (x>>i)&1. The bit index i must be >= 0.
-func (x *Int) Bit(i int) uint {
-	if i == 0 {
-		// optimization for common case: odd/even test of x
-		if len(x.abs) > 0 {
-			return uint(x.abs[0] & 1) // bit 0 is same for -x
-		}
-		return 0
-	}
-	if i < 0 {
-		panic("negative bit index")
-	}
-	if x.neg {
-		t := nat(nil).sub(x.abs, natOne)
-		return t.bit(uint(i)) ^ 1
-	}
-
-	return x.abs.bit(uint(i))
-}
-
-// SetBit sets z to x, with x's i'th bit set to b (0 or 1).
-// That is, if b is 1 SetBit sets z = x | (1 << i);
-// if b is 0 SetBit sets z = x &^ (1 << i). If b is not 0 or 1,
-// SetBit will panic.
-func (z *Int) SetBit(x *Int, i int, b uint) *Int {
-	if i < 0 {
-		panic("negative bit index")
-	}
-	if x.neg {
-		t := z.abs.sub(x.abs, natOne)
-		t = t.setBit(t, uint(i), b^1)
-		z.abs = t.add(t, natOne)
-		z.neg = len(z.abs) > 0
-		return z
-	}
-	z.abs = z.abs.setBit(x.abs, uint(i), b)
-	z.neg = false
-	return z
-}
-
-// And sets z = x & y and returns z.
-func (z *Int) And(x, y *Int) *Int {
-	if x.neg == y.neg {
-		if x.neg {
-			// (-x) & (-y) == ^(x-1) & ^(y-1) == ^((x-1) | (y-1)) == -(((x-1) | (y-1)) + 1)
-			x1 := nat(nil).sub(x.abs, natOne)
-			y1 := nat(nil).sub(y.abs, natOne)
-			z.abs = z.abs.add(z.abs.or(x1, y1), natOne)
-			z.neg = true // z cannot be zero if x and y are negative
-			return z
-		}
-
-		// x & y == x & y
-		z.abs = z.abs.and(x.abs, y.abs)
-		z.neg = false
-		return z
-	}
-
-	// x.neg != y.neg
-	if x.neg {
-		x, y = y, x // & is symmetric
-	}
-
-	// x & (-y) == x & ^(y-1) == x &^ (y-1)
-	y1 := nat(nil).sub(y.abs, natOne)
-	z.abs = z.abs.andNot(x.abs, y1)
-	z.neg = false
-	return z
-}
-
-// AndNot sets z = x &^ y and returns z.
-func (z *Int) AndNot(x, y *Int) *Int {
-	if x.neg == y.neg {
-		if x.neg {
-			// (-x) &^ (-y) == ^(x-1) &^ ^(y-1) == ^(x-1) & (y-1) == (y-1) &^ (x-1)
-			x1 := nat(nil).sub(x.abs, natOne)
-			y1 := nat(nil).sub(y.abs, natOne)
-			z.abs = z.abs.andNot(y1, x1)
-			z.neg = false
-			return z
-		}
-
-		// x &^ y == x &^ y
-		z.abs = z.abs.andNot(x.abs, y.abs)
-		z.neg = false
-		return z
-	}
-
-	if x.neg {
-		// (-x) &^ y == ^(x-1) &^ y == ^(x-1) & ^y == ^((x-1) | y) == -(((x-1) | y) + 1)
-		x1 := nat(nil).sub(x.abs, natOne)
-		z.abs = z.abs.add(z.abs.or(x1, y.abs), natOne)
-		z.neg = true // z cannot be zero if x is negative and y is positive
-		return z
-	}
-
-	// x &^ (-y) == x &^ ^(y-1) == x & (y-1)
-	y1 := nat(nil).sub(y.abs, natOne)
-	z.abs = z.abs.and(x.abs, y1)
-	z.neg = false
-	return z
-}
-
-// Or sets z = x | y and returns z.
-func (z *Int) Or(x, y *Int) *Int {
-	if x.neg == y.neg {
-		if x.neg {
-			// (-x) | (-y) == ^(x-1) | ^(y-1) == ^((x-1) & (y-1)) == -(((x-1) & (y-1)) + 1)
-			x1 := nat(nil).sub(x.abs, natOne)
-			y1 := nat(nil).sub(y.abs, natOne)
-			z.abs = z.abs.add(z.abs.and(x1, y1), natOne)
-			z.neg = true // z cannot be zero if x and y are negative
-			return z
-		}
-
-		// x | y == x | y
-		z.abs = z.abs.or(x.abs, y.abs)
-		z.neg = false
-		return z
-	}
-
-	// x.neg != y.neg
-	if x.neg {
-		x, y = y, x // | is symmetric
-	}
-
-	// x | (-y) == x | ^(y-1) == ^((y-1) &^ x) == -(^((y-1) &^ x) + 1)
-	y1 := nat(nil).sub(y.abs, natOne)
-	z.abs = z.abs.add(z.abs.andNot(y1, x.abs), natOne)
-	z.neg = true // z cannot be zero if one of x or y is negative
-	return z
-}
-
-// Xor sets z = x ^ y and returns z.
-func (z *Int) Xor(x, y *Int) *Int {
-	if x.neg == y.neg {
-		if x.neg {
-			// (-x) ^ (-y) == ^(x-1) ^ ^(y-1) == (x-1) ^ (y-1)
-			x1 := nat(nil).sub(x.abs, natOne)
-			y1 := nat(nil).sub(y.abs, natOne)
-			z.abs = z.abs.xor(x1, y1)
-			z.neg = false
-			return z
-		}
-
-		// x ^ y == x ^ y
-		z.abs = z.abs.xor(x.abs, y.abs)
-		z.neg = false
-		return z
-	}
-
-	// x.neg != y.neg
-	if x.neg {
-		x, y = y, x // ^ is symmetric
-	}
-
-	// x ^ (-y) == x ^ ^(y-1) == ^(x ^ (y-1)) == -((x ^ (y-1)) + 1)
-	y1 := nat(nil).sub(y.abs, natOne)
-	z.abs = z.abs.add(z.abs.xor(x.abs, y1), natOne)
-	z.neg = true // z cannot be zero if only one of x or y is negative
-	return z
-}
-
-// Not sets z = ^x and returns z.
-func (z *Int) Not(x *Int) *Int {
-	if x.neg {
-		// ^(-x) == ^(^(x-1)) == x-1
-		z.abs = z.abs.sub(x.abs, natOne)
-		z.neg = false
-		return z
-	}
-
-	// ^x == -x-1 == -(x+1)
-	z.abs = z.abs.add(x.abs, natOne)
-	z.neg = true // z cannot be zero if x is positive
-	return z
-}
-
-// Sqrt sets z to ⌊√x⌋, the largest integer such that z² ≤ x, and returns z.
-// It panics if x is negative.
-func (z *Int) Sqrt(x *Int) *Int {
-	if x.neg {
-		panic("square root of negative number")
-	}
-	z.neg = false
-	z.abs = z.abs.sqrt(x.abs)
-	return z
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/int_test.go b/pkg/bootstrap/src/bootstrap/math/big/int_test.go
deleted file mode 100644
index 440268f..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/int_test.go
+++ /dev/null
@@ -1,1500 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/int_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/int_test.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package big
-
-import (
-	"bytes"
-	"encoding/hex"
-	"fmt"
-	"math/rand"
-	"strings"
-	"testing"
-	"testing/quick"
-)
-
-func isNormalized(x *Int) bool {
-	if len(x.abs) == 0 {
-		return !x.neg
-	}
-	// len(x.abs) > 0
-	return x.abs[len(x.abs)-1] != 0
-}
-
-type funZZ func(z, x, y *Int) *Int
-type argZZ struct {
-	z, x, y *Int
-}
-
-var sumZZ = []argZZ{
-	{NewInt(0), NewInt(0), NewInt(0)},
-	{NewInt(1), NewInt(1), NewInt(0)},
-	{NewInt(1111111110), NewInt(123456789), NewInt(987654321)},
-	{NewInt(-1), NewInt(-1), NewInt(0)},
-	{NewInt(864197532), NewInt(-123456789), NewInt(987654321)},
-	{NewInt(-1111111110), NewInt(-123456789), NewInt(-987654321)},
-}
-
-var prodZZ = []argZZ{
-	{NewInt(0), NewInt(0), NewInt(0)},
-	{NewInt(0), NewInt(1), NewInt(0)},
-	{NewInt(1), NewInt(1), NewInt(1)},
-	{NewInt(-991 * 991), NewInt(991), NewInt(-991)},
-	// TODO(gri) add larger products
-}
-
-func TestSignZ(t *testing.T) {
-	var zero Int
-	for _, a := range sumZZ {
-		s := a.z.Sign()
-		e := a.z.Cmp(&zero)
-		if s != e {
-			t.Errorf("got %d; want %d for z = %v", s, e, a.z)
-		}
-	}
-}
-
-func TestSetZ(t *testing.T) {
-	for _, a := range sumZZ {
-		var z Int
-		z.Set(a.z)
-		if !isNormalized(&z) {
-			t.Errorf("%v is not normalized", z)
-		}
-		if (&z).Cmp(a.z) != 0 {
-			t.Errorf("got z = %v; want %v", z, a.z)
-		}
-	}
-}
-
-func TestAbsZ(t *testing.T) {
-	var zero Int
-	for _, a := range sumZZ {
-		var z Int
-		z.Abs(a.z)
-		var e Int
-		e.Set(a.z)
-		if e.Cmp(&zero) < 0 {
-			e.Sub(&zero, &e)
-		}
-		if z.Cmp(&e) != 0 {
-			t.Errorf("got z = %v; want %v", z, e)
-		}
-	}
-}
-
-func testFunZZ(t *testing.T, msg string, f funZZ, a argZZ) {
-	var z Int
-	f(&z, a.x, a.y)
-	if !isNormalized(&z) {
-		t.Errorf("%s%v is not normalized", msg, z)
-	}
-	if (&z).Cmp(a.z) != 0 {
-		t.Errorf("%s%+v\n\tgot z = %v; want %v", msg, a, &z, a.z)
-	}
-}
-
-func TestSumZZ(t *testing.T) {
-	AddZZ := func(z, x, y *Int) *Int { return z.Add(x, y) }
-	SubZZ := func(z, x, y *Int) *Int { return z.Sub(x, y) }
-	for _, a := range sumZZ {
-		arg := a
-		testFunZZ(t, "AddZZ", AddZZ, arg)
-
-		arg = argZZ{a.z, a.y, a.x}
-		testFunZZ(t, "AddZZ symmetric", AddZZ, arg)
-
-		arg = argZZ{a.x, a.z, a.y}
-		testFunZZ(t, "SubZZ", SubZZ, arg)
-
-		arg = argZZ{a.y, a.z, a.x}
-		testFunZZ(t, "SubZZ symmetric", SubZZ, arg)
-	}
-}
-
-func TestProdZZ(t *testing.T) {
-	MulZZ := func(z, x, y *Int) *Int { return z.Mul(x, y) }
-	for _, a := range prodZZ {
-		arg := a
-		testFunZZ(t, "MulZZ", MulZZ, arg)
-
-		arg = argZZ{a.z, a.y, a.x}
-		testFunZZ(t, "MulZZ symmetric", MulZZ, arg)
-	}
-}
-
-// mulBytes returns x*y via grade school multiplication. Both inputs
-// and the result are assumed to be in big-endian representation (to
-// match the semantics of Int.Bytes and Int.SetBytes).
-func mulBytes(x, y []byte) []byte {
-	z := make([]byte, len(x)+len(y))
-
-	// multiply
-	k0 := len(z) - 1
-	for j := len(y) - 1; j >= 0; j-- {
-		d := int(y[j])
-		if d != 0 {
-			k := k0
-			carry := 0
-			for i := len(x) - 1; i >= 0; i-- {
-				t := int(z[k]) + int(x[i])*d + carry
-				z[k], carry = byte(t), t>>8
-				k--
-			}
-			z[k] = byte(carry)
-		}
-		k0--
-	}
-
-	// normalize (remove leading 0's)
-	i := 0
-	for i < len(z) && z[i] == 0 {
-		i++
-	}
-
-	return z[i:]
-}
-
-func checkMul(a, b []byte) bool {
-	var x, y, z1 Int
-	x.SetBytes(a)
-	y.SetBytes(b)
-	z1.Mul(&x, &y)
-
-	var z2 Int
-	z2.SetBytes(mulBytes(a, b))
-
-	return z1.Cmp(&z2) == 0
-}
-
-func TestMul(t *testing.T) {
-	if err := quick.Check(checkMul, nil); err != nil {
-		t.Error(err)
-	}
-}
-
-var mulRangesZ = []struct {
-	a, b int64
-	prod string
-}{
-	// entirely positive ranges are covered by mulRangesN
-	{-1, 1, "0"},
-	{-2, -1, "2"},
-	{-3, -2, "6"},
-	{-3, -1, "-6"},
-	{1, 3, "6"},
-	{-10, -10, "-10"},
-	{0, -1, "1"},                      // empty range
-	{-1, -100, "1"},                   // empty range
-	{-1, 1, "0"},                      // range includes 0
-	{-1e9, 0, "0"},                    // range includes 0
-	{-1e9, 1e9, "0"},                  // range includes 0
-	{-10, -1, "3628800"},              // 10!
-	{-20, -2, "-2432902008176640000"}, // -20!
-	{-99, -1,
-		"-933262154439441526816992388562667004907159682643816214685929" +
-			"638952175999932299156089414639761565182862536979208272237582" +
-			"511852109168640000000000000000000000", // -99!
-	},
-}
-
-func TestMulRangeZ(t *testing.T) {
-	var tmp Int
-	// test entirely positive ranges
-	for i, r := range mulRangesN {
-		prod := tmp.MulRange(int64(r.a), int64(r.b)).String()
-		if prod != r.prod {
-			t.Errorf("#%da: got %s; want %s", i, prod, r.prod)
-		}
-	}
-	// test other ranges
-	for i, r := range mulRangesZ {
-		prod := tmp.MulRange(r.a, r.b).String()
-		if prod != r.prod {
-			t.Errorf("#%db: got %s; want %s", i, prod, r.prod)
-		}
-	}
-}
-
-func TestBinomial(t *testing.T) {
-	var z Int
-	for _, test := range []struct {
-		n, k int64
-		want string
-	}{
-		{0, 0, "1"},
-		{0, 1, "0"},
-		{1, 0, "1"},
-		{1, 1, "1"},
-		{1, 10, "0"},
-		{4, 0, "1"},
-		{4, 1, "4"},
-		{4, 2, "6"},
-		{4, 3, "4"},
-		{4, 4, "1"},
-		{10, 1, "10"},
-		{10, 9, "10"},
-		{10, 5, "252"},
-		{11, 5, "462"},
-		{11, 6, "462"},
-		{100, 10, "17310309456440"},
-		{100, 90, "17310309456440"},
-		{1000, 10, "263409560461970212832400"},
-		{1000, 990, "263409560461970212832400"},
-	} {
-		if got := z.Binomial(test.n, test.k).String(); got != test.want {
-			t.Errorf("Binomial(%d, %d) = %s; want %s", test.n, test.k, got, test.want)
-		}
-	}
-}
-
-func BenchmarkBinomial(b *testing.B) {
-	var z Int
-	for i := b.N - 1; i >= 0; i-- {
-		z.Binomial(1000, 990)
-	}
-}
-
-// Examples from the Go Language Spec, section "Arithmetic operators"
-var divisionSignsTests = []struct {
-	x, y int64
-	q, r int64 // T-division
-	d, m int64 // Euclidian division
-}{
-	{5, 3, 1, 2, 1, 2},
-	{-5, 3, -1, -2, -2, 1},
-	{5, -3, -1, 2, -1, 2},
-	{-5, -3, 1, -2, 2, 1},
-	{1, 2, 0, 1, 0, 1},
-	{8, 4, 2, 0, 2, 0},
-}
-
-func TestDivisionSigns(t *testing.T) {
-	for i, test := range divisionSignsTests {
-		x := NewInt(test.x)
-		y := NewInt(test.y)
-		q := NewInt(test.q)
-		r := NewInt(test.r)
-		d := NewInt(test.d)
-		m := NewInt(test.m)
-
-		q1 := new(Int).Quo(x, y)
-		r1 := new(Int).Rem(x, y)
-		if !isNormalized(q1) {
-			t.Errorf("#%d Quo: %v is not normalized", i, *q1)
-		}
-		if !isNormalized(r1) {
-			t.Errorf("#%d Rem: %v is not normalized", i, *r1)
-		}
-		if q1.Cmp(q) != 0 || r1.Cmp(r) != 0 {
-			t.Errorf("#%d QuoRem: got (%s, %s), want (%s, %s)", i, q1, r1, q, r)
-		}
-
-		q2, r2 := new(Int).QuoRem(x, y, new(Int))
-		if !isNormalized(q2) {
-			t.Errorf("#%d Quo: %v is not normalized", i, *q2)
-		}
-		if !isNormalized(r2) {
-			t.Errorf("#%d Rem: %v is not normalized", i, *r2)
-		}
-		if q2.Cmp(q) != 0 || r2.Cmp(r) != 0 {
-			t.Errorf("#%d QuoRem: got (%s, %s), want (%s, %s)", i, q2, r2, q, r)
-		}
-
-		d1 := new(Int).Div(x, y)
-		m1 := new(Int).Mod(x, y)
-		if !isNormalized(d1) {
-			t.Errorf("#%d Div: %v is not normalized", i, *d1)
-		}
-		if !isNormalized(m1) {
-			t.Errorf("#%d Mod: %v is not normalized", i, *m1)
-		}
-		if d1.Cmp(d) != 0 || m1.Cmp(m) != 0 {
-			t.Errorf("#%d DivMod: got (%s, %s), want (%s, %s)", i, d1, m1, d, m)
-		}
-
-		d2, m2 := new(Int).DivMod(x, y, new(Int))
-		if !isNormalized(d2) {
-			t.Errorf("#%d Div: %v is not normalized", i, *d2)
-		}
-		if !isNormalized(m2) {
-			t.Errorf("#%d Mod: %v is not normalized", i, *m2)
-		}
-		if d2.Cmp(d) != 0 || m2.Cmp(m) != 0 {
-			t.Errorf("#%d DivMod: got (%s, %s), want (%s, %s)", i, d2, m2, d, m)
-		}
-	}
-}
-
-func norm(x nat) nat {
-	i := len(x)
-	for i > 0 && x[i-1] == 0 {
-		i--
-	}
-	return x[:i]
-}
-
-func TestBits(t *testing.T) {
-	for _, test := range []nat{
-		nil,
-		{0},
-		{1},
-		{0, 1, 2, 3, 4},
-		{4, 3, 2, 1, 0},
-		{4, 3, 2, 1, 0, 0, 0, 0},
-	} {
-		var z Int
-		z.neg = true
-		got := z.SetBits(test)
-		want := norm(test)
-		if got.abs.cmp(want) != 0 {
-			t.Errorf("SetBits(%v) = %v; want %v", test, got.abs, want)
-		}
-
-		if got.neg {
-			t.Errorf("SetBits(%v): got negative result", test)
-		}
-
-		bits := nat(z.Bits())
-		if bits.cmp(want) != 0 {
-			t.Errorf("%v.Bits() = %v; want %v", z.abs, bits, want)
-		}
-	}
-}
-
-func checkSetBytes(b []byte) bool {
-	hex1 := hex.EncodeToString(new(Int).SetBytes(b).Bytes())
-	hex2 := hex.EncodeToString(b)
-
-	for len(hex1) < len(hex2) {
-		hex1 = "0" + hex1
-	}
-
-	for len(hex1) > len(hex2) {
-		hex2 = "0" + hex2
-	}
-
-	return hex1 == hex2
-}
-
-func TestSetBytes(t *testing.T) {
-	if err := quick.Check(checkSetBytes, nil); err != nil {
-		t.Error(err)
-	}
-}
-
-func checkBytes(b []byte) bool {
-	// trim leading zero bytes since Bytes() won't return them
-	// (was issue 12231)
-	for len(b) > 0 && b[0] == 0 {
-		b = b[1:]
-	}
-	b2 := new(Int).SetBytes(b).Bytes()
-	return bytes.Equal(b, b2)
-}
-
-func TestBytes(t *testing.T) {
-	if err := quick.Check(checkBytes, nil); err != nil {
-		t.Error(err)
-	}
-}
-
-func checkQuo(x, y []byte) bool {
-	u := new(Int).SetBytes(x)
-	v := new(Int).SetBytes(y)
-
-	if len(v.abs) == 0 {
-		return true
-	}
-
-	r := new(Int)
-	q, r := new(Int).QuoRem(u, v, r)
-
-	if r.Cmp(v) >= 0 {
-		return false
-	}
-
-	uprime := new(Int).Set(q)
-	uprime.Mul(uprime, v)
-	uprime.Add(uprime, r)
-
-	return uprime.Cmp(u) == 0
-}
-
-var quoTests = []struct {
-	x, y string
-	q, r string
-}{
-	{
-		"476217953993950760840509444250624797097991362735329973741718102894495832294430498335824897858659711275234906400899559094370964723884706254265559534144986498357",
-		"9353930466774385905609975137998169297361893554149986716853295022578535724979483772383667534691121982974895531435241089241440253066816724367338287092081996",
-		"50911",
-		"1",
-	},
-	{
-		"11510768301994997771168",
-		"1328165573307167369775",
-		"8",
-		"885443715537658812968",
-	},
-}
-
-func TestQuo(t *testing.T) {
-	if err := quick.Check(checkQuo, nil); err != nil {
-		t.Error(err)
-	}
-
-	for i, test := range quoTests {
-		x, _ := new(Int).SetString(test.x, 10)
-		y, _ := new(Int).SetString(test.y, 10)
-		expectedQ, _ := new(Int).SetString(test.q, 10)
-		expectedR, _ := new(Int).SetString(test.r, 10)
-
-		r := new(Int)
-		q, r := new(Int).QuoRem(x, y, r)
-
-		if q.Cmp(expectedQ) != 0 || r.Cmp(expectedR) != 0 {
-			t.Errorf("#%d got (%s, %s) want (%s, %s)", i, q, r, expectedQ, expectedR)
-		}
-	}
-}
-
-func TestQuoStepD6(t *testing.T) {
-	// See Knuth, Volume 2, section 4.3.1, exercise 21. This code exercises
-	// a code path which only triggers 1 in 10^{-19} cases.
-
-	u := &Int{false, nat{0, 0, 1 + 1<<(_W-1), _M ^ (1 << (_W - 1))}}
-	v := &Int{false, nat{5, 2 + 1<<(_W-1), 1 << (_W - 1)}}
-
-	r := new(Int)
-	q, r := new(Int).QuoRem(u, v, r)
-	const expectedQ64 = "18446744073709551613"
-	const expectedR64 = "3138550867693340382088035895064302439801311770021610913807"
-	const expectedQ32 = "4294967293"
-	const expectedR32 = "39614081266355540837921718287"
-	if q.String() != expectedQ64 && q.String() != expectedQ32 ||
-		r.String() != expectedR64 && r.String() != expectedR32 {
-		t.Errorf("got (%s, %s) want (%s, %s) or (%s, %s)", q, r, expectedQ64, expectedR64, expectedQ32, expectedR32)
-	}
-}
-
-func BenchmarkQuoRem(b *testing.B) {
-	x, _ := new(Int).SetString("153980389784927331788354528594524332344709972855165340650588877572729725338415474372475094155672066328274535240275856844648695200875763869073572078279316458648124537905600131008790701752441155668003033945258023841165089852359980273279085783159654751552359397986180318708491098942831252291841441726305535546071", 0)
-	y, _ := new(Int).SetString("7746362281539803897849273317883545285945243323447099728551653406505888775727297253384154743724750941556720663282745352402758568446486952008757638690735720782793164586481245379056001310087907017524411556680030339452580238411650898523599802732790857831596547515523593979861803187084910989428312522918414417263055355460715745539358014631136245887418412633787074173796862711588221766398229333338511838891484974940633857861775630560092874987828057333663969469797013996401149696897591265769095952887917296740109742927689053276850469671231961384715398038978492733178835452859452433234470997285516534065058887757272972533841547437247509415567206632827453524027585684464869520087576386907357207827931645864812453790560013100879070175244115566800303394525802384116508985235998027327908578315965475155235939798618031870849109894283125229184144172630553554607112725169432413343763989564437170644270643461665184965150423819594083121075825", 0)
-	q := new(Int)
-	r := new(Int)
-
-	b.ResetTimer()
-	for i := 0; i < b.N; i++ {
-		q.QuoRem(y, x, r)
-	}
-}
-
-var bitLenTests = []struct {
-	in  string
-	out int
-}{
-	{"-1", 1},
-	{"0", 0},
-	{"1", 1},
-	{"2", 2},
-	{"4", 3},
-	{"0xabc", 12},
-	{"0x8000", 16},
-	{"0x80000000", 32},
-	{"0x800000000000", 48},
-	{"0x8000000000000000", 64},
-	{"0x80000000000000000000", 80},
-	{"-0x4000000000000000000000", 87},
-}
-
-func TestBitLen(t *testing.T) {
-	for i, test := range bitLenTests {
-		x, ok := new(Int).SetString(test.in, 0)
-		if !ok {
-			t.Errorf("#%d test input invalid: %s", i, test.in)
-			continue
-		}
-
-		if n := x.BitLen(); n != test.out {
-			t.Errorf("#%d got %d want %d", i, n, test.out)
-		}
-	}
-}
-
-var expTests = []struct {
-	x, y, m string
-	out     string
-}{
-	// y <= 0
-	{"0", "0", "", "1"},
-	{"1", "0", "", "1"},
-	{"-10", "0", "", "1"},
-	{"1234", "-1", "", "1"},
-
-	// m == 1
-	{"0", "0", "1", "0"},
-	{"1", "0", "1", "0"},
-	{"-10", "0", "1", "0"},
-	{"1234", "-1", "1", "0"},
-
-	// misc
-	{"5", "1", "3", "2"},
-	{"5", "-7", "", "1"},
-	{"-5", "-7", "", "1"},
-	{"5", "0", "", "1"},
-	{"-5", "0", "", "1"},
-	{"5", "1", "", "5"},
-	{"-5", "1", "", "-5"},
-	{"-5", "1", "7", "2"},
-	{"-2", "3", "2", "0"},
-	{"5", "2", "", "25"},
-	{"1", "65537", "2", "1"},
-	{"0x8000000000000000", "2", "", "0x40000000000000000000000000000000"},
-	{"0x8000000000000000", "2", "6719", "4944"},
-	{"0x8000000000000000", "3", "6719", "5447"},
-	{"0x8000000000000000", "1000", "6719", "1603"},
-	{"0x8000000000000000", "1000000", "6719", "3199"},
-	{"0x8000000000000000", "-1000000", "6719", "1"},
-
-	{"0xffffffffffffffffffffffffffffffff", "0x12345678123456781234567812345678123456789", "0x01112222333344445555666677778889", "0x36168FA1DB3AAE6C8CE647E137F97A"},
-
-	{
-		"2938462938472983472983659726349017249287491026512746239764525612965293865296239471239874193284792387498274256129746192347",
-		"298472983472983471903246121093472394872319615612417471234712061",
-		"29834729834729834729347290846729561262544958723956495615629569234729836259263598127342374289365912465901365498236492183464",
-		"23537740700184054162508175125554701713153216681790245129157191391322321508055833908509185839069455749219131480588829346291",
-	},
-	// test case for issue 8822
-	{
-		"11001289118363089646017359372117963499250546375269047542777928006103246876688756735760905680604646624353196869572752623285140408755420374049317646428185270079555372763503115646054602867593662923894140940837479507194934267532831694565516466765025434902348314525627418515646588160955862839022051353653052947073136084780742729727874803457643848197499548297570026926927502505634297079527299004267769780768565695459945235586892627059178884998772989397505061206395455591503771677500931269477503508150175717121828518985901959919560700853226255420793148986854391552859459511723547532575574664944815966793196961286234040892865",
-		"0xB08FFB20760FFED58FADA86DFEF71AD72AA0FA763219618FE022C197E54708BB1191C66470250FCE8879487507CEE41381CA4D932F81C2B3F1AB20B539D50DCD",
-		"0xAC6BDB41324A9A9BF166DE5E1389582FAF72B6651987EE07FC3192943DB56050A37329CBB4A099ED8193E0757767A13DD52312AB4B03310DCD7F48A9DA04FD50E8083969EDB767B0CF6095179A163AB3661A05FBD5FAAAE82918A9962F0B93B855F97993EC975EEAA80D740ADBF4FF747359D041D5C33EA71D281E446B14773BCA97B43A23FB801676BD207A436C6481F1D2B9078717461A5B9D32E688F87748544523B524B0D57D5EA77A2775D2ECFA032CFBDBF52FB3786160279004E57AE6AF874E7303CE53299CCC041C7BC308D82A5698F3A8D0C38271AE35F8E9DBFBB694B5C803D89F7AE435DE236D525F54759B65E372FCD68EF20FA7111F9E4AFF73",
-		"21484252197776302499639938883777710321993113097987201050501182909581359357618579566746556372589385361683610524730509041328855066514963385522570894839035884713051640171474186548713546686476761306436434146475140156284389181808675016576845833340494848283681088886584219750554408060556769486628029028720727393293111678826356480455433909233520504112074401376133077150471237549474149190242010469539006449596611576612573955754349042329130631128234637924786466585703488460540228477440853493392086251021228087076124706778899179648655221663765993962724699135217212118535057766739392069738618682722216712319320435674779146070442",
-	},
-	{
-		"-0x1BCE04427D8032319A89E5C4136456671AC620883F2C4139E57F91307C485AD2D6204F4F87A58262652DB5DBBAC72B0613E51B835E7153BEC6068F5C8D696B74DBD18FEC316AEF73985CF0475663208EB46B4F17DD9DA55367B03323E5491A70997B90C059FB34809E6EE55BCFBD5F2F52233BFE62E6AA9E4E26A1D4C2439883D14F2633D55D8AA66A1ACD5595E778AC3A280517F1157989E70C1A437B849F1877B779CC3CDDEDE2DAA6594A6C66D181A00A5F777EE60596D8773998F6E988DEAE4CCA60E4DDCF9590543C89F74F603259FCAD71660D30294FBBE6490300F78A9D63FA660DC9417B8B9DDA28BEB3977B621B988E23D4D954F322C3540541BC649ABD504C50FADFD9F0987D58A2BF689313A285E773FF02899A6EF887D1D4A0D2",
-		"0xB08FFB20760FFED58FADA86DFEF71AD72AA0FA763219618FE022C197E54708BB1191C66470250FCE8879487507CEE41381CA4D932F81C2B3F1AB20B539D50DCD",
-		"0xAC6BDB41324A9A9BF166DE5E1389582FAF72B6651987EE07FC3192943DB56050A37329CBB4A099ED8193E0757767A13DD52312AB4B03310DCD7F48A9DA04FD50E8083969EDB767B0CF6095179A163AB3661A05FBD5FAAAE82918A9962F0B93B855F97993EC975EEAA80D740ADBF4FF747359D041D5C33EA71D281E446B14773BCA97B43A23FB801676BD207A436C6481F1D2B9078717461A5B9D32E688F87748544523B524B0D57D5EA77A2775D2ECFA032CFBDBF52FB3786160279004E57AE6AF874E7303CE53299CCC041C7BC308D82A5698F3A8D0C38271AE35F8E9DBFBB694B5C803D89F7AE435DE236D525F54759B65E372FCD68EF20FA7111F9E4AFF73",
-		"21484252197776302499639938883777710321993113097987201050501182909581359357618579566746556372589385361683610524730509041328855066514963385522570894839035884713051640171474186548713546686476761306436434146475140156284389181808675016576845833340494848283681088886584219750554408060556769486628029028720727393293111678826356480455433909233520504112074401376133077150471237549474149190242010469539006449596611576612573955754349042329130631128234637924786466585703488460540228477440853493392086251021228087076124706778899179648655221663765993962724699135217212118535057766739392069738618682722216712319320435674779146070442",
-	},
-
-	// test cases for issue 13907
-	{"0xffffffff00000001", "0xffffffff00000001", "0xffffffff00000001", "0"},
-	{"0xffffffffffffffff00000001", "0xffffffffffffffff00000001", "0xffffffffffffffff00000001", "0"},
-	{"0xffffffffffffffffffffffff00000001", "0xffffffffffffffffffffffff00000001", "0xffffffffffffffffffffffff00000001", "0"},
-	{"0xffffffffffffffffffffffffffffffff00000001", "0xffffffffffffffffffffffffffffffff00000001", "0xffffffffffffffffffffffffffffffff00000001", "0"},
-
-	{
-		"2",
-		"0xB08FFB20760FFED58FADA86DFEF71AD72AA0FA763219618FE022C197E54708BB1191C66470250FCE8879487507CEE41381CA4D932F81C2B3F1AB20B539D50DCD",
-		"0xAC6BDB41324A9A9BF166DE5E1389582FAF72B6651987EE07FC3192943DB56050A37329CBB4A099ED8193E0757767A13DD52312AB4B03310DCD7F48A9DA04FD50E8083969EDB767B0CF6095179A163AB3661A05FBD5FAAAE82918A9962F0B93B855F97993EC975EEAA80D740ADBF4FF747359D041D5C33EA71D281E446B14773BCA97B43A23FB801676BD207A436C6481F1D2B9078717461A5B9D32E688F87748544523B524B0D57D5EA77A2775D2ECFA032CFBDBF52FB3786160279004E57AE6AF874E7303CE53299CCC041C7BC308D82A5698F3A8D0C38271AE35F8E9DBFBB694B5C803D89F7AE435DE236D525F54759B65E372FCD68EF20FA7111F9E4AFF73", // odd
-		"0x6AADD3E3E424D5B713FCAA8D8945B1E055166132038C57BBD2D51C833F0C5EA2007A2324CE514F8E8C2F008A2F36F44005A4039CB55830986F734C93DAF0EB4BAB54A6A8C7081864F44346E9BC6F0A3EB9F2C0146A00C6A05187D0C101E1F2D038CDB70CB5E9E05A2D188AB6CBB46286624D4415E7D4DBFAD3BCC6009D915C406EED38F468B940F41E6BEDC0430DD78E6F19A7DA3A27498A4181E24D738B0072D8F6ADB8C9809A5B033A09785814FD9919F6EF9F83EEA519BEC593855C4C10CBEEC582D4AE0792158823B0275E6AEC35242740468FAF3D5C60FD1E376362B6322F78B7ED0CA1C5BBCD2B49734A56C0967A1D01A100932C837B91D592CE08ABFF",
-	},
-	{
-		"2",
-		"0xB08FFB20760FFED58FADA86DFEF71AD72AA0FA763219618FE022C197E54708BB1191C66470250FCE8879487507CEE41381CA4D932F81C2B3F1AB20B539D50DCD",
-		"0xAC6BDB41324A9A9BF166DE5E1389582FAF72B6651987EE07FC3192943DB56050A37329CBB4A099ED8193E0757767A13DD52312AB4B03310DCD7F48A9DA04FD50E8083969EDB767B0CF6095179A163AB3661A05FBD5FAAAE82918A9962F0B93B855F97993EC975EEAA80D740ADBF4FF747359D041D5C33EA71D281E446B14773BCA97B43A23FB801676BD207A436C6481F1D2B9078717461A5B9D32E688F87748544523B524B0D57D5EA77A2775D2ECFA032CFBDBF52FB3786160279004E57AE6AF874E7303CE53299CCC041C7BC308D82A5698F3A8D0C38271AE35F8E9DBFBB694B5C803D89F7AE435DE236D525F54759B65E372FCD68EF20FA7111F9E4AFF72", // even
-		"0x7858794B5897C29F4ED0B40913416AB6C48588484E6A45F2ED3E26C941D878E923575AAC434EE2750E6439A6976F9BB4D64CEDB2A53CE8D04DD48CADCDF8E46F22747C6B81C6CEA86C0D873FBF7CEF262BAAC43A522BD7F32F3CDAC52B9337C77B3DCFB3DB3EDD80476331E82F4B1DF8EFDC1220C92656DFC9197BDC1877804E28D928A2A284B8DED506CBA304435C9D0133C246C98A7D890D1DE60CBC53A024361DA83A9B8775019083D22AC6820ED7C3C68F8E801DD4EC779EE0A05C6EB682EF9840D285B838369BA7E148FA27691D524FAEAF7C6ECE2A4B99A294B9F2C241857B5B90CC8BFFCFCF18DFA7D676131D5CD3855A5A3E8EBFA0CDFADB4D198B4A",
-	},
-}
-
-func TestExp(t *testing.T) {
-	for i, test := range expTests {
-		x, ok1 := new(Int).SetString(test.x, 0)
-		y, ok2 := new(Int).SetString(test.y, 0)
-		out, ok3 := new(Int).SetString(test.out, 0)
-
-		var ok4 bool
-		var m *Int
-
-		if len(test.m) == 0 {
-			m, ok4 = nil, true
-		} else {
-			m, ok4 = new(Int).SetString(test.m, 0)
-		}
-
-		if !ok1 || !ok2 || !ok3 || !ok4 {
-			t.Errorf("#%d: error in input", i)
-			continue
-		}
-
-		z1 := new(Int).Exp(x, y, m)
-		if !isNormalized(z1) {
-			t.Errorf("#%d: %v is not normalized", i, *z1)
-		}
-		if z1.Cmp(out) != 0 {
-			t.Errorf("#%d: got %x want %x", i, z1, out)
-		}
-
-		if m == nil {
-			// The result should be the same as for m == 0;
-			// specifically, there should be no div-zero panic.
-			m = &Int{abs: nat{}} // m != nil && len(m.abs) == 0
-			z2 := new(Int).Exp(x, y, m)
-			if z2.Cmp(z1) != 0 {
-				t.Errorf("#%d: got %x want %x", i, z2, z1)
-			}
-		}
-	}
-}
-
-func BenchmarkExp(b *testing.B) {
-	x, _ := new(Int).SetString("11001289118363089646017359372117963499250546375269047542777928006103246876688756735760905680604646624353196869572752623285140408755420374049317646428185270079555372763503115646054602867593662923894140940837479507194934267532831694565516466765025434902348314525627418515646588160955862839022051353653052947073136084780742729727874803457643848197499548297570026926927502505634297079527299004267769780768565695459945235586892627059178884998772989397505061206395455591503771677500931269477503508150175717121828518985901959919560700853226255420793148986854391552859459511723547532575574664944815966793196961286234040892865", 0)
-	y, _ := new(Int).SetString("0xAC6BDB41324A9A9BF166DE5E1389582FAF72B6651987EE07FC3192943DB56050A37329CBB4A099ED8193E0757767A13DD52312AB4B03310DCD7F48A9DA04FD50E8083969EDB767B0CF6095179A163AB3661A05FBD5FAAAE82918A9962F0B93B855F97993EC975EEAA80D740ADBF4FF747359D041D5C33EA71D281E446B14773BCA97B43A23FB801676BD207A436C6481F1D2B9078717461A5B9D32E688F87748544523B524B0D57D5EA77A2775D2ECFA032CFBDBF52FB3786160279004E57AE6AF874E7303CE53299CCC041C7BC308D82A5698F3A8D0C38271AE35F8E9DBFBB694B5C803D89F7AE435DE236D525F54759B65E372FCD68EF20FA7111F9E4AFF72", 0)
-	n, _ := new(Int).SetString("0xAC6BDB41324A9A9BF166DE5E1389582FAF72B6651987EE07FC3192943DB56050A37329CBB4A099ED8193E0757767A13DD52312AB4B03310DCD7F48A9DA04FD50E8083969EDB767B0CF6095179A163AB3661A05FBD5FAAAE82918A9962F0B93B855F97993EC975EEAA80D740ADBF4FF747359D041D5C33EA71D281E446B14773BCA97B43A23FB801676BD207A436C6481F1D2B9078717461A5B9D32E688F87748544523B524B0D57D5EA77A2775D2ECFA032CFBDBF52FB3786160279004E57AE6AF874E7303CE53299CCC041C7BC308D82A5698F3A8D0C38271AE35F8E9DBFBB694B5C803D89F7AE435DE236D525F54759B65E372FCD68EF20FA7111F9E4AFF73", 0)
-	out := new(Int)
-	for i := 0; i < b.N; i++ {
-		out.Exp(x, y, n)
-	}
-}
-
-func BenchmarkExp2(b *testing.B) {
-	x, _ := new(Int).SetString("2", 0)
-	y, _ := new(Int).SetString("0xAC6BDB41324A9A9BF166DE5E1389582FAF72B6651987EE07FC3192943DB56050A37329CBB4A099ED8193E0757767A13DD52312AB4B03310DCD7F48A9DA04FD50E8083969EDB767B0CF6095179A163AB3661A05FBD5FAAAE82918A9962F0B93B855F97993EC975EEAA80D740ADBF4FF747359D041D5C33EA71D281E446B14773BCA97B43A23FB801676BD207A436C6481F1D2B9078717461A5B9D32E688F87748544523B524B0D57D5EA77A2775D2ECFA032CFBDBF52FB3786160279004E57AE6AF874E7303CE53299CCC041C7BC308D82A5698F3A8D0C38271AE35F8E9DBFBB694B5C803D89F7AE435DE236D525F54759B65E372FCD68EF20FA7111F9E4AFF72", 0)
-	n, _ := new(Int).SetString("0xAC6BDB41324A9A9BF166DE5E1389582FAF72B6651987EE07FC3192943DB56050A37329CBB4A099ED8193E0757767A13DD52312AB4B03310DCD7F48A9DA04FD50E8083969EDB767B0CF6095179A163AB3661A05FBD5FAAAE82918A9962F0B93B855F97993EC975EEAA80D740ADBF4FF747359D041D5C33EA71D281E446B14773BCA97B43A23FB801676BD207A436C6481F1D2B9078717461A5B9D32E688F87748544523B524B0D57D5EA77A2775D2ECFA032CFBDBF52FB3786160279004E57AE6AF874E7303CE53299CCC041C7BC308D82A5698F3A8D0C38271AE35F8E9DBFBB694B5C803D89F7AE435DE236D525F54759B65E372FCD68EF20FA7111F9E4AFF73", 0)
-	out := new(Int)
-	for i := 0; i < b.N; i++ {
-		out.Exp(x, y, n)
-	}
-}
-
-func checkGcd(aBytes, bBytes []byte) bool {
-	x := new(Int)
-	y := new(Int)
-	a := new(Int).SetBytes(aBytes)
-	b := new(Int).SetBytes(bBytes)
-
-	d := new(Int).GCD(x, y, a, b)
-	x.Mul(x, a)
-	y.Mul(y, b)
-	x.Add(x, y)
-
-	return x.Cmp(d) == 0
-}
-
-var gcdTests = []struct {
-	d, x, y, a, b string
-}{
-	// a <= 0 || b <= 0
-	{"0", "0", "0", "0", "0"},
-	{"0", "0", "0", "0", "7"},
-	{"0", "0", "0", "11", "0"},
-	{"0", "0", "0", "-77", "35"},
-	{"0", "0", "0", "64515", "-24310"},
-	{"0", "0", "0", "-64515", "-24310"},
-
-	{"1", "-9", "47", "120", "23"},
-	{"7", "1", "-2", "77", "35"},
-	{"935", "-3", "8", "64515", "24310"},
-	{"935000000000000000", "-3", "8", "64515000000000000000", "24310000000000000000"},
-	{"1", "-221", "22059940471369027483332068679400581064239780177629666810348940098015901108344", "98920366548084643601728869055592650835572950932266967461790948584315647051443", "991"},
-
-	// test early exit (after one Euclidean iteration) in binaryGCD
-	{"1", "", "", "1", "98920366548084643601728869055592650835572950932266967461790948584315647051443"},
-}
-
-func testGcd(t *testing.T, d, x, y, a, b *Int) {
-	var X *Int
-	if x != nil {
-		X = new(Int)
-	}
-	var Y *Int
-	if y != nil {
-		Y = new(Int)
-	}
-
-	D := new(Int).GCD(X, Y, a, b)
-	if D.Cmp(d) != 0 {
-		t.Errorf("GCD(%s, %s): got d = %s, want %s", a, b, D, d)
-	}
-	if x != nil && X.Cmp(x) != 0 {
-		t.Errorf("GCD(%s, %s): got x = %s, want %s", a, b, X, x)
-	}
-	if y != nil && Y.Cmp(y) != 0 {
-		t.Errorf("GCD(%s, %s): got y = %s, want %s", a, b, Y, y)
-	}
-
-	// binaryGCD requires a > 0 && b > 0
-	if a.Sign() <= 0 || b.Sign() <= 0 {
-		return
-	}
-
-	D.binaryGCD(a, b)
-	if D.Cmp(d) != 0 {
-		t.Errorf("binaryGcd(%s, %s): got d = %s, want %s", a, b, D, d)
-	}
-
-	// check results in presence of aliasing (issue #11284)
-	a2 := new(Int).Set(a)
-	b2 := new(Int).Set(b)
-	a2.binaryGCD(a2, b2) // result is same as 1st argument
-	if a2.Cmp(d) != 0 {
-		t.Errorf("binaryGcd(%s, %s): got d = %s, want %s", a, b, a2, d)
-	}
-
-	a2 = new(Int).Set(a)
-	b2 = new(Int).Set(b)
-	b2.binaryGCD(a2, b2) // result is same as 2nd argument
-	if b2.Cmp(d) != 0 {
-		t.Errorf("binaryGcd(%s, %s): got d = %s, want %s", a, b, b2, d)
-	}
-}
-
-func TestGcd(t *testing.T) {
-	for _, test := range gcdTests {
-		d, _ := new(Int).SetString(test.d, 0)
-		x, _ := new(Int).SetString(test.x, 0)
-		y, _ := new(Int).SetString(test.y, 0)
-		a, _ := new(Int).SetString(test.a, 0)
-		b, _ := new(Int).SetString(test.b, 0)
-
-		testGcd(t, d, nil, nil, a, b)
-		testGcd(t, d, x, nil, a, b)
-		testGcd(t, d, nil, y, a, b)
-		testGcd(t, d, x, y, a, b)
-	}
-
-	if err := quick.Check(checkGcd, nil); err != nil {
-		t.Error(err)
-	}
-}
-
-type intShiftTest struct {
-	in    string
-	shift uint
-	out   string
-}
-
-var rshTests = []intShiftTest{
-	{"0", 0, "0"},
-	{"-0", 0, "0"},
-	{"0", 1, "0"},
-	{"0", 2, "0"},
-	{"1", 0, "1"},
-	{"1", 1, "0"},
-	{"1", 2, "0"},
-	{"2", 0, "2"},
-	{"2", 1, "1"},
-	{"-1", 0, "-1"},
-	{"-1", 1, "-1"},
-	{"-1", 10, "-1"},
-	{"-100", 2, "-25"},
-	{"-100", 3, "-13"},
-	{"-100", 100, "-1"},
-	{"4294967296", 0, "4294967296"},
-	{"4294967296", 1, "2147483648"},
-	{"4294967296", 2, "1073741824"},
-	{"18446744073709551616", 0, "18446744073709551616"},
-	{"18446744073709551616", 1, "9223372036854775808"},
-	{"18446744073709551616", 2, "4611686018427387904"},
-	{"18446744073709551616", 64, "1"},
-	{"340282366920938463463374607431768211456", 64, "18446744073709551616"},
-	{"340282366920938463463374607431768211456", 128, "1"},
-}
-
-func TestRsh(t *testing.T) {
-	for i, test := range rshTests {
-		in, _ := new(Int).SetString(test.in, 10)
-		expected, _ := new(Int).SetString(test.out, 10)
-		out := new(Int).Rsh(in, test.shift)
-
-		if !isNormalized(out) {
-			t.Errorf("#%d: %v is not normalized", i, *out)
-		}
-		if out.Cmp(expected) != 0 {
-			t.Errorf("#%d: got %s want %s", i, out, expected)
-		}
-	}
-}
-
-func TestRshSelf(t *testing.T) {
-	for i, test := range rshTests {
-		z, _ := new(Int).SetString(test.in, 10)
-		expected, _ := new(Int).SetString(test.out, 10)
-		z.Rsh(z, test.shift)
-
-		if !isNormalized(z) {
-			t.Errorf("#%d: %v is not normalized", i, *z)
-		}
-		if z.Cmp(expected) != 0 {
-			t.Errorf("#%d: got %s want %s", i, z, expected)
-		}
-	}
-}
-
-var lshTests = []intShiftTest{
-	{"0", 0, "0"},
-	{"0", 1, "0"},
-	{"0", 2, "0"},
-	{"1", 0, "1"},
-	{"1", 1, "2"},
-	{"1", 2, "4"},
-	{"2", 0, "2"},
-	{"2", 1, "4"},
-	{"2", 2, "8"},
-	{"-87", 1, "-174"},
-	{"4294967296", 0, "4294967296"},
-	{"4294967296", 1, "8589934592"},
-	{"4294967296", 2, "17179869184"},
-	{"18446744073709551616", 0, "18446744073709551616"},
-	{"9223372036854775808", 1, "18446744073709551616"},
-	{"4611686018427387904", 2, "18446744073709551616"},
-	{"1", 64, "18446744073709551616"},
-	{"18446744073709551616", 64, "340282366920938463463374607431768211456"},
-	{"1", 128, "340282366920938463463374607431768211456"},
-}
-
-func TestLsh(t *testing.T) {
-	for i, test := range lshTests {
-		in, _ := new(Int).SetString(test.in, 10)
-		expected, _ := new(Int).SetString(test.out, 10)
-		out := new(Int).Lsh(in, test.shift)
-
-		if !isNormalized(out) {
-			t.Errorf("#%d: %v is not normalized", i, *out)
-		}
-		if out.Cmp(expected) != 0 {
-			t.Errorf("#%d: got %s want %s", i, out, expected)
-		}
-	}
-}
-
-func TestLshSelf(t *testing.T) {
-	for i, test := range lshTests {
-		z, _ := new(Int).SetString(test.in, 10)
-		expected, _ := new(Int).SetString(test.out, 10)
-		z.Lsh(z, test.shift)
-
-		if !isNormalized(z) {
-			t.Errorf("#%d: %v is not normalized", i, *z)
-		}
-		if z.Cmp(expected) != 0 {
-			t.Errorf("#%d: got %s want %s", i, z, expected)
-		}
-	}
-}
-
-func TestLshRsh(t *testing.T) {
-	for i, test := range rshTests {
-		in, _ := new(Int).SetString(test.in, 10)
-		out := new(Int).Lsh(in, test.shift)
-		out = out.Rsh(out, test.shift)
-
-		if !isNormalized(out) {
-			t.Errorf("#%d: %v is not normalized", i, *out)
-		}
-		if in.Cmp(out) != 0 {
-			t.Errorf("#%d: got %s want %s", i, out, in)
-		}
-	}
-	for i, test := range lshTests {
-		in, _ := new(Int).SetString(test.in, 10)
-		out := new(Int).Lsh(in, test.shift)
-		out.Rsh(out, test.shift)
-
-		if !isNormalized(out) {
-			t.Errorf("#%d: %v is not normalized", i, *out)
-		}
-		if in.Cmp(out) != 0 {
-			t.Errorf("#%d: got %s want %s", i, out, in)
-		}
-	}
-}
-
-var int64Tests = []int64{
-	0,
-	1,
-	-1,
-	4294967295,
-	-4294967295,
-	4294967296,
-	-4294967296,
-	9223372036854775807,
-	-9223372036854775807,
-	-9223372036854775808,
-}
-
-func TestInt64(t *testing.T) {
-	for i, testVal := range int64Tests {
-		in := NewInt(testVal)
-		out := in.Int64()
-
-		if out != testVal {
-			t.Errorf("#%d got %d want %d", i, out, testVal)
-		}
-	}
-}
-
-var uint64Tests = []uint64{
-	0,
-	1,
-	4294967295,
-	4294967296,
-	8589934591,
-	8589934592,
-	9223372036854775807,
-	9223372036854775808,
-	18446744073709551615, // 1<<64 - 1
-}
-
-func TestUint64(t *testing.T) {
-	in := new(Int)
-	for i, testVal := range uint64Tests {
-		in.SetUint64(testVal)
-		out := in.Uint64()
-
-		if out != testVal {
-			t.Errorf("#%d got %d want %d", i, out, testVal)
-		}
-
-		str := fmt.Sprint(testVal)
-		strOut := in.String()
-		if strOut != str {
-			t.Errorf("#%d.String got %s want %s", i, strOut, str)
-		}
-	}
-}
-
-var bitwiseTests = []struct {
-	x, y                 string
-	and, or, xor, andNot string
-}{
-	{"0x00", "0x00", "0x00", "0x00", "0x00", "0x00"},
-	{"0x00", "0x01", "0x00", "0x01", "0x01", "0x00"},
-	{"0x01", "0x00", "0x00", "0x01", "0x01", "0x01"},
-	{"-0x01", "0x00", "0x00", "-0x01", "-0x01", "-0x01"},
-	{"-0xaf", "-0x50", "-0xf0", "-0x0f", "0xe1", "0x41"},
-	{"0x00", "-0x01", "0x00", "-0x01", "-0x01", "0x00"},
-	{"0x01", "0x01", "0x01", "0x01", "0x00", "0x00"},
-	{"-0x01", "-0x01", "-0x01", "-0x01", "0x00", "0x00"},
-	{"0x07", "0x08", "0x00", "0x0f", "0x0f", "0x07"},
-	{"0x05", "0x0f", "0x05", "0x0f", "0x0a", "0x00"},
-	{"0xff", "-0x0a", "0xf6", "-0x01", "-0xf7", "0x09"},
-	{"0x013ff6", "0x9a4e", "0x1a46", "0x01bffe", "0x01a5b8", "0x0125b0"},
-	{"-0x013ff6", "0x9a4e", "0x800a", "-0x0125b2", "-0x01a5bc", "-0x01c000"},
-	{"-0x013ff6", "-0x9a4e", "-0x01bffe", "-0x1a46", "0x01a5b8", "0x8008"},
-	{
-		"0x1000009dc6e3d9822cba04129bcbe3401",
-		"0xb9bd7d543685789d57cb918e833af352559021483cdb05cc21fd",
-		"0x1000001186210100001000009048c2001",
-		"0xb9bd7d543685789d57cb918e8bfeff7fddb2ebe87dfbbdfe35fd",
-		"0xb9bd7d543685789d57ca918e8ae69d6fcdb2eae87df2b97215fc",
-		"0x8c40c2d8822caa04120b8321400",
-	},
-	{
-		"0x1000009dc6e3d9822cba04129bcbe3401",
-		"-0xb9bd7d543685789d57cb918e833af352559021483cdb05cc21fd",
-		"0x8c40c2d8822caa04120b8321401",
-		"-0xb9bd7d543685789d57ca918e82229142459020483cd2014001fd",
-		"-0xb9bd7d543685789d57ca918e8ae69d6fcdb2eae87df2b97215fe",
-		"0x1000001186210100001000009048c2000",
-	},
-	{
-		"-0x1000009dc6e3d9822cba04129bcbe3401",
-		"-0xb9bd7d543685789d57cb918e833af352559021483cdb05cc21fd",
-		"-0xb9bd7d543685789d57cb918e8bfeff7fddb2ebe87dfbbdfe35fd",
-		"-0x1000001186210100001000009048c2001",
-		"0xb9bd7d543685789d57ca918e8ae69d6fcdb2eae87df2b97215fc",
-		"0xb9bd7d543685789d57ca918e82229142459020483cd2014001fc",
-	},
-}
-
-type bitFun func(z, x, y *Int) *Int
-
-func testBitFun(t *testing.T, msg string, f bitFun, x, y *Int, exp string) {
-	expected := new(Int)
-	expected.SetString(exp, 0)
-
-	out := f(new(Int), x, y)
-	if out.Cmp(expected) != 0 {
-		t.Errorf("%s: got %s want %s", msg, out, expected)
-	}
-}
-
-func testBitFunSelf(t *testing.T, msg string, f bitFun, x, y *Int, exp string) {
-	self := new(Int)
-	self.Set(x)
-	expected := new(Int)
-	expected.SetString(exp, 0)
-
-	self = f(self, self, y)
-	if self.Cmp(expected) != 0 {
-		t.Errorf("%s: got %s want %s", msg, self, expected)
-	}
-}
-
-func altBit(x *Int, i int) uint {
-	z := new(Int).Rsh(x, uint(i))
-	z = z.And(z, NewInt(1))
-	if z.Cmp(new(Int)) != 0 {
-		return 1
-	}
-	return 0
-}
-
-func altSetBit(z *Int, x *Int, i int, b uint) *Int {
-	one := NewInt(1)
-	m := one.Lsh(one, uint(i))
-	switch b {
-	case 1:
-		return z.Or(x, m)
-	case 0:
-		return z.AndNot(x, m)
-	}
-	panic("set bit is not 0 or 1")
-}
-
-func testBitset(t *testing.T, x *Int) {
-	n := x.BitLen()
-	z := new(Int).Set(x)
-	z1 := new(Int).Set(x)
-	for i := 0; i < n+10; i++ {
-		old := z.Bit(i)
-		old1 := altBit(z1, i)
-		if old != old1 {
-			t.Errorf("bitset: inconsistent value for Bit(%s, %d), got %v want %v", z1, i, old, old1)
-		}
-		z := new(Int).SetBit(z, i, 1)
-		z1 := altSetBit(new(Int), z1, i, 1)
-		if z.Bit(i) == 0 {
-			t.Errorf("bitset: bit %d of %s got 0 want 1", i, x)
-		}
-		if z.Cmp(z1) != 0 {
-			t.Errorf("bitset: inconsistent value after SetBit 1, got %s want %s", z, z1)
-		}
-		z.SetBit(z, i, 0)
-		altSetBit(z1, z1, i, 0)
-		if z.Bit(i) != 0 {
-			t.Errorf("bitset: bit %d of %s got 1 want 0", i, x)
-		}
-		if z.Cmp(z1) != 0 {
-			t.Errorf("bitset: inconsistent value after SetBit 0, got %s want %s", z, z1)
-		}
-		altSetBit(z1, z1, i, old)
-		z.SetBit(z, i, old)
-		if z.Cmp(z1) != 0 {
-			t.Errorf("bitset: inconsistent value after SetBit old, got %s want %s", z, z1)
-		}
-	}
-	if z.Cmp(x) != 0 {
-		t.Errorf("bitset: got %s want %s", z, x)
-	}
-}
-
-var bitsetTests = []struct {
-	x string
-	i int
-	b uint
-}{
-	{"0", 0, 0},
-	{"0", 200, 0},
-	{"1", 0, 1},
-	{"1", 1, 0},
-	{"-1", 0, 1},
-	{"-1", 200, 1},
-	{"0x2000000000000000000000000000", 108, 0},
-	{"0x2000000000000000000000000000", 109, 1},
-	{"0x2000000000000000000000000000", 110, 0},
-	{"-0x2000000000000000000000000001", 108, 1},
-	{"-0x2000000000000000000000000001", 109, 0},
-	{"-0x2000000000000000000000000001", 110, 1},
-}
-
-func TestBitSet(t *testing.T) {
-	for _, test := range bitwiseTests {
-		x := new(Int)
-		x.SetString(test.x, 0)
-		testBitset(t, x)
-		x = new(Int)
-		x.SetString(test.y, 0)
-		testBitset(t, x)
-	}
-	for i, test := range bitsetTests {
-		x := new(Int)
-		x.SetString(test.x, 0)
-		b := x.Bit(test.i)
-		if b != test.b {
-			t.Errorf("#%d got %v want %v", i, b, test.b)
-		}
-	}
-	z := NewInt(1)
-	z.SetBit(NewInt(0), 2, 1)
-	if z.Cmp(NewInt(4)) != 0 {
-		t.Errorf("destination leaked into result; got %s want 4", z)
-	}
-}
-
-func BenchmarkBitset(b *testing.B) {
-	z := new(Int)
-	z.SetBit(z, 512, 1)
-	b.ResetTimer()
-	b.StartTimer()
-	for i := b.N - 1; i >= 0; i-- {
-		z.SetBit(z, i&512, 1)
-	}
-}
-
-func BenchmarkBitsetNeg(b *testing.B) {
-	z := NewInt(-1)
-	z.SetBit(z, 512, 0)
-	b.ResetTimer()
-	b.StartTimer()
-	for i := b.N - 1; i >= 0; i-- {
-		z.SetBit(z, i&512, 0)
-	}
-}
-
-func BenchmarkBitsetOrig(b *testing.B) {
-	z := new(Int)
-	altSetBit(z, z, 512, 1)
-	b.ResetTimer()
-	b.StartTimer()
-	for i := b.N - 1; i >= 0; i-- {
-		altSetBit(z, z, i&512, 1)
-	}
-}
-
-func BenchmarkBitsetNegOrig(b *testing.B) {
-	z := NewInt(-1)
-	altSetBit(z, z, 512, 0)
-	b.ResetTimer()
-	b.StartTimer()
-	for i := b.N - 1; i >= 0; i-- {
-		altSetBit(z, z, i&512, 0)
-	}
-}
-
-// tri generates the trinomial 2**(n*2) - 2**n - 1, which is always 3 mod 4 and
-// 7 mod 8, so that 2 is always a quadratic residue.
-func tri(n uint) *Int {
-	x := NewInt(1)
-	x.Lsh(x, n)
-	x2 := new(Int).Lsh(x, n)
-	x2.Sub(x2, x)
-	x2.Sub(x2, intOne)
-	return x2
-}
-
-func BenchmarkModSqrt225_Tonelli(b *testing.B) {
-	p := tri(225)
-	x := NewInt(2)
-	for i := 0; i < b.N; i++ {
-		x.SetUint64(2)
-		x.modSqrtTonelliShanks(x, p)
-	}
-}
-
-func BenchmarkModSqrt224_3Mod4(b *testing.B) {
-	p := tri(225)
-	x := new(Int).SetUint64(2)
-	for i := 0; i < b.N; i++ {
-		x.SetUint64(2)
-		x.modSqrt3Mod4Prime(x, p)
-	}
-}
-
-func BenchmarkModSqrt5430_Tonelli(b *testing.B) {
-	if isRaceBuilder {
-		b.Skip("skipping on race builder")
-	}
-	p := tri(5430)
-	x := new(Int).SetUint64(2)
-	for i := 0; i < b.N; i++ {
-		x.SetUint64(2)
-		x.modSqrtTonelliShanks(x, p)
-	}
-}
-
-func BenchmarkModSqrt5430_3Mod4(b *testing.B) {
-	if isRaceBuilder {
-		b.Skip("skipping on race builder")
-	}
-	p := tri(5430)
-	x := new(Int).SetUint64(2)
-	for i := 0; i < b.N; i++ {
-		x.SetUint64(2)
-		x.modSqrt3Mod4Prime(x, p)
-	}
-}
-
-func TestBitwise(t *testing.T) {
-	x := new(Int)
-	y := new(Int)
-	for _, test := range bitwiseTests {
-		x.SetString(test.x, 0)
-		y.SetString(test.y, 0)
-
-		testBitFun(t, "and", (*Int).And, x, y, test.and)
-		testBitFunSelf(t, "and", (*Int).And, x, y, test.and)
-		testBitFun(t, "andNot", (*Int).AndNot, x, y, test.andNot)
-		testBitFunSelf(t, "andNot", (*Int).AndNot, x, y, test.andNot)
-		testBitFun(t, "or", (*Int).Or, x, y, test.or)
-		testBitFunSelf(t, "or", (*Int).Or, x, y, test.or)
-		testBitFun(t, "xor", (*Int).Xor, x, y, test.xor)
-		testBitFunSelf(t, "xor", (*Int).Xor, x, y, test.xor)
-	}
-}
-
-var notTests = []struct {
-	in  string
-	out string
-}{
-	{"0", "-1"},
-	{"1", "-2"},
-	{"7", "-8"},
-	{"0", "-1"},
-	{"-81910", "81909"},
-	{
-		"298472983472983471903246121093472394872319615612417471234712061",
-		"-298472983472983471903246121093472394872319615612417471234712062",
-	},
-}
-
-func TestNot(t *testing.T) {
-	in := new(Int)
-	out := new(Int)
-	expected := new(Int)
-	for i, test := range notTests {
-		in.SetString(test.in, 10)
-		expected.SetString(test.out, 10)
-		out = out.Not(in)
-		if out.Cmp(expected) != 0 {
-			t.Errorf("#%d: got %s want %s", i, out, expected)
-		}
-		out = out.Not(out)
-		if out.Cmp(in) != 0 {
-			t.Errorf("#%d: got %s want %s", i, out, in)
-		}
-	}
-}
-
-var modInverseTests = []struct {
-	element string
-	modulus string
-}{
-	{"1234567", "458948883992"},
-	{"239487239847", "2410312426921032588552076022197566074856950548502459942654116941958108831682612228890093858261341614673227141477904012196503648957050582631942730706805009223062734745341073406696246014589361659774041027169249453200378729434170325843778659198143763193776859869524088940195577346119843545301547043747207749969763750084308926339295559968882457872412993810129130294592999947926365264059284647209730384947211681434464714438488520940127459844288859336526896320919633919"},
-	{"-10", "13"}, // issue #16984
-}
-
-func TestModInverse(t *testing.T) {
-	var element, modulus, gcd, inverse Int
-	one := NewInt(1)
-	for i, test := range modInverseTests {
-		(&element).SetString(test.element, 10)
-		(&modulus).SetString(test.modulus, 10)
-		(&inverse).ModInverse(&element, &modulus)
-		(&inverse).Mul(&inverse, &element)
-		(&inverse).Mod(&inverse, &modulus)
-		if (&inverse).Cmp(one) != 0 {
-			t.Errorf("#%d: failed (e·e^(-1)=%s)", i, &inverse)
-		}
-	}
-	// exhaustive test for small values
-	for n := 2; n < 100; n++ {
-		(&modulus).SetInt64(int64(n))
-		for x := 1; x < n; x++ {
-			(&element).SetInt64(int64(x))
-			(&gcd).GCD(nil, nil, &element, &modulus)
-			if (&gcd).Cmp(one) != 0 {
-				continue
-			}
-			(&inverse).ModInverse(&element, &modulus)
-			(&inverse).Mul(&inverse, &element)
-			(&inverse).Mod(&inverse, &modulus)
-			if (&inverse).Cmp(one) != 0 {
-				t.Errorf("ModInverse(%d,%d)*%d%%%d=%d, not 1", &element, &modulus, &element, &modulus, &inverse)
-			}
-		}
-	}
-}
-
-// testModSqrt is a helper for TestModSqrt,
-// which checks that ModSqrt can compute a square-root of elt^2.
-func testModSqrt(t *testing.T, elt, mod, sq, sqrt *Int) bool {
-	var sqChk, sqrtChk, sqrtsq Int
-	sq.Mul(elt, elt)
-	sq.Mod(sq, mod)
-	z := sqrt.ModSqrt(sq, mod)
-	if z != sqrt {
-		t.Errorf("ModSqrt returned wrong value %s", z)
-	}
-
-	// test ModSqrt arguments outside the range [0,mod)
-	sqChk.Add(sq, mod)
-	z = sqrtChk.ModSqrt(&sqChk, mod)
-	if z != &sqrtChk || z.Cmp(sqrt) != 0 {
-		t.Errorf("ModSqrt returned inconsistent value %s", z)
-	}
-	sqChk.Sub(sq, mod)
-	z = sqrtChk.ModSqrt(&sqChk, mod)
-	if z != &sqrtChk || z.Cmp(sqrt) != 0 {
-		t.Errorf("ModSqrt returned inconsistent value %s", z)
-	}
-
-	// make sure we actually got a square root
-	if sqrt.Cmp(elt) == 0 {
-		return true // we found the "desired" square root
-	}
-	sqrtsq.Mul(sqrt, sqrt) // make sure we found the "other" one
-	sqrtsq.Mod(&sqrtsq, mod)
-	return sq.Cmp(&sqrtsq) == 0
-}
-
-func TestModSqrt(t *testing.T) {
-	var elt, mod, modx4, sq, sqrt Int
-	r := rand.New(rand.NewSource(9))
-	for i, s := range primes[1:] { // skip 2, use only odd primes
-		mod.SetString(s, 10)
-		modx4.Lsh(&mod, 2)
-
-		// test a few random elements per prime
-		for x := 1; x < 5; x++ {
-			elt.Rand(r, &modx4)
-			elt.Sub(&elt, &mod) // test range [-mod, 3*mod)
-			if !testModSqrt(t, &elt, &mod, &sq, &sqrt) {
-				t.Errorf("#%d: failed (sqrt(e) = %s)", i, &sqrt)
-			}
-		}
-
-		if testing.Short() && i > 2 {
-			break
-		}
-	}
-
-	if testing.Short() {
-		return
-	}
-
-	// exhaustive test for small values
-	for n := 3; n < 100; n++ {
-		mod.SetInt64(int64(n))
-		if !mod.ProbablyPrime(10) {
-			continue
-		}
-		isSquare := make([]bool, n)
-
-		// test all the squares
-		for x := 1; x < n; x++ {
-			elt.SetInt64(int64(x))
-			if !testModSqrt(t, &elt, &mod, &sq, &sqrt) {
-				t.Errorf("#%d: failed (sqrt(%d,%d) = %s)", x, &elt, &mod, &sqrt)
-			}
-			isSquare[sq.Uint64()] = true
-		}
-
-		// test all non-squares
-		for x := 1; x < n; x++ {
-			sq.SetInt64(int64(x))
-			z := sqrt.ModSqrt(&sq, &mod)
-			if !isSquare[x] && z != nil {
-				t.Errorf("#%d: failed (sqrt(%d,%d) = nil)", x, &sqrt, &mod)
-			}
-		}
-	}
-}
-
-func TestJacobi(t *testing.T) {
-	testCases := []struct {
-		x, y   int64
-		result int
-	}{
-		{0, 1, 1},
-		{0, -1, 1},
-		{1, 1, 1},
-		{1, -1, 1},
-		{0, 5, 0},
-		{1, 5, 1},
-		{2, 5, -1},
-		{-2, 5, -1},
-		{2, -5, -1},
-		{-2, -5, 1},
-		{3, 5, -1},
-		{5, 5, 0},
-		{-5, 5, 0},
-		{6, 5, 1},
-		{6, -5, 1},
-		{-6, 5, 1},
-		{-6, -5, -1},
-	}
-
-	var x, y Int
-
-	for i, test := range testCases {
-		x.SetInt64(test.x)
-		y.SetInt64(test.y)
-		expected := test.result
-		actual := Jacobi(&x, &y)
-		if actual != expected {
-			t.Errorf("#%d: Jacobi(%d, %d) = %d, but expected %d", i, test.x, test.y, actual, expected)
-		}
-	}
-}
-
-func TestJacobiPanic(t *testing.T) {
-	const failureMsg = "test failure"
-	defer func() {
-		msg := recover()
-		if msg == nil || msg == failureMsg {
-			panic(msg)
-		}
-		t.Log(msg)
-	}()
-	x := NewInt(1)
-	y := NewInt(2)
-	// Jacobi should panic when the second argument is even.
-	Jacobi(x, y)
-	panic(failureMsg)
-}
-
-func TestIssue2607(t *testing.T) {
-	// This code sequence used to hang.
-	n := NewInt(10)
-	n.Rand(rand.New(rand.NewSource(9)), n)
-}
-
-func TestSqrt(t *testing.T) {
-	root := 0
-	r := new(Int)
-	for i := 0; i < 10000; i++ {
-		if (root+1)*(root+1) <= i {
-			root++
-		}
-		n := NewInt(int64(i))
-		r.SetInt64(-2)
-		r.Sqrt(n)
-		if r.Cmp(NewInt(int64(root))) != 0 {
-			t.Errorf("Sqrt(%v) = %v, want %v", n, r, root)
-		}
-	}
-
-	for i := 0; i < 1000; i += 10 {
-		n, _ := new(Int).SetString("1"+strings.Repeat("0", i), 10)
-		r := new(Int).Sqrt(n)
-		root, _ := new(Int).SetString("1"+strings.Repeat("0", i/2), 10)
-		if r.Cmp(root) != 0 {
-			t.Errorf("Sqrt(1e%d) = %v, want 1e%d", i, r, i/2)
-		}
-	}
-
-	// Test aliasing.
-	r.SetInt64(100)
-	r.Sqrt(r)
-	if r.Int64() != 10 {
-		t.Errorf("Sqrt(100) = %v, want 10 (aliased output)", r.Int64())
-	}
-}
-
-func BenchmarkSqrt(b *testing.B) {
-	n, _ := new(Int).SetString("1"+strings.Repeat("0", 1001), 10)
-	b.ResetTimer()
-	t := new(Int)
-	for i := 0; i < b.N; i++ {
-		t.Sqrt(n)
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/intconv.go b/pkg/bootstrap/src/bootstrap/math/big/intconv.go
deleted file mode 100644
index 16ef73e..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/intconv.go
+++ /dev/null
@@ -1,255 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/intconv.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/intconv.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements int-to-string conversion functions.
-
-package big
-
-import (
-	"errors"
-	"fmt"
-	"io"
-)
-
-// TODO(gri) Should rename itoa to utoa (there's no sign). That
-// would permit the introduction of itoa which is like utoa but
-// reserves a byte for a possible sign that's passed in. That
-// would permit Int.Text to be implemented w/o the need for
-// string copy if the number is negative.
-
-// Text returns the string representation of x in the given base.
-// Base must be between 2 and 36, inclusive. The result uses the
-// lower-case letters 'a' to 'z' for digit values >= 10. No base
-// prefix (such as "0x") is added to the string.
-func (x *Int) Text(base int) string {
-	if x == nil {
-		return "<nil>"
-	}
-	return string(x.abs.itoa(x.neg, base))
-}
-
-// Append appends the string representation of x, as generated by
-// x.Text(base), to buf and returns the extended buffer.
-func (x *Int) Append(buf []byte, base int) []byte {
-	if x == nil {
-		return append(buf, "<nil>"...)
-	}
-	return append(buf, x.abs.itoa(x.neg, base)...)
-}
-
-func (x *Int) String() string {
-	return x.Text(10)
-}
-
-// write count copies of text to s
-func writeMultiple(s fmt.State, text string, count int) {
-	if len(text) > 0 {
-		b := []byte(text)
-		for ; count > 0; count-- {
-			s.Write(b)
-		}
-	}
-}
-
-var _ fmt.Formatter = intOne // *Int must implement fmt.Formatter
-
-// Format implements fmt.Formatter. It accepts the formats
-// 'b' (binary), 'o' (octal), 'd' (decimal), 'x' (lowercase
-// hexadecimal), and 'X' (uppercase hexadecimal).
-// Also supported are the full suite of package fmt's format
-// flags for integral types, including '+' and ' ' for sign
-// control, '#' for leading zero in octal and for hexadecimal,
-// a leading "0x" or "0X" for "%#x" and "%#X" respectively,
-// specification of minimum digits precision, output field
-// width, space or zero padding, and '-' for left or right
-// justification.
-//
-func (x *Int) Format(s fmt.State, ch rune) {
-	// determine base
-	var base int
-	switch ch {
-	case 'b':
-		base = 2
-	case 'o':
-		base = 8
-	case 'd', 's', 'v':
-		base = 10
-	case 'x', 'X':
-		base = 16
-	default:
-		// unknown format
-		fmt.Fprintf(s, "%%!%c(big.Int=%s)", ch, x.String())
-		return
-	}
-
-	if x == nil {
-		fmt.Fprint(s, "<nil>")
-		return
-	}
-
-	// determine sign character
-	sign := ""
-	switch {
-	case x.neg:
-		sign = "-"
-	case s.Flag('+'): // supersedes ' ' when both specified
-		sign = "+"
-	case s.Flag(' '):
-		sign = " "
-	}
-
-	// determine prefix characters for indicating output base
-	prefix := ""
-	if s.Flag('#') {
-		switch ch {
-		case 'o': // octal
-			prefix = "0"
-		case 'x': // hexadecimal
-			prefix = "0x"
-		case 'X':
-			prefix = "0X"
-		}
-	}
-
-	digits := x.abs.utoa(base)
-	if ch == 'X' {
-		// faster than bytes.ToUpper
-		for i, d := range digits {
-			if 'a' <= d && d <= 'z' {
-				digits[i] = 'A' + (d - 'a')
-			}
-		}
-	}
-
-	// number of characters for the three classes of number padding
-	var left int  // space characters to left of digits for right justification ("%8d")
-	var zeros int // zero characters (actually cs[0]) as left-most digits ("%.8d")
-	var right int // space characters to right of digits for left justification ("%-8d")
-
-	// determine number padding from precision: the least number of digits to output
-	precision, precisionSet := s.Precision()
-	if precisionSet {
-		switch {
-		case len(digits) < precision:
-			zeros = precision - len(digits) // count of zero padding
-		case len(digits) == 1 && digits[0] == '0' && precision == 0:
-			return // print nothing if zero value (x == 0) and zero precision ("." or ".0")
-		}
-	}
-
-	// determine field pad from width: the least number of characters to output
-	length := len(sign) + len(prefix) + zeros + len(digits)
-	if width, widthSet := s.Width(); widthSet && length < width { // pad as specified
-		switch d := width - length; {
-		case s.Flag('-'):
-			// pad on the right with spaces; supersedes '0' when both specified
-			right = d
-		case s.Flag('0') && !precisionSet:
-			// pad with zeros unless precision also specified
-			zeros = d
-		default:
-			// pad on the left with spaces
-			left = d
-		}
-	}
-
-	// print number as [left pad][sign][prefix][zero pad][digits][right pad]
-	writeMultiple(s, " ", left)
-	writeMultiple(s, sign, 1)
-	writeMultiple(s, prefix, 1)
-	writeMultiple(s, "0", zeros)
-	s.Write(digits)
-	writeMultiple(s, " ", right)
-}
-
-// scan sets z to the integer value corresponding to the longest possible prefix
-// read from r representing a signed integer number in a given conversion base.
-// It returns z, the actual conversion base used, and an error, if any. In the
-// error case, the value of z is undefined but the returned value is nil. The
-// syntax follows the syntax of integer literals in Go.
-//
-// The base argument must be 0 or a value from 2 through MaxBase. If the base
-// is 0, the string prefix determines the actual conversion base. A prefix of
-// ``0x'' or ``0X'' selects base 16; the ``0'' prefix selects base 8, and a
-// ``0b'' or ``0B'' prefix selects base 2. Otherwise the selected base is 10.
-//
-func (z *Int) scan(r io.ByteScanner, base int) (*Int, int, error) {
-	// determine sign
-	neg, err := scanSign(r)
-	if err != nil {
-		return nil, 0, err
-	}
-
-	// determine mantissa
-	z.abs, base, _, err = z.abs.scan(r, base, false)
-	if err != nil {
-		return nil, base, err
-	}
-	z.neg = len(z.abs) > 0 && neg // 0 has no sign
-
-	return z, base, nil
-}
-
-func scanSign(r io.ByteScanner) (neg bool, err error) {
-	var ch byte
-	if ch, err = r.ReadByte(); err != nil {
-		return false, err
-	}
-	switch ch {
-	case '-':
-		neg = true
-	case '+':
-		// nothing to do
-	default:
-		r.UnreadByte()
-	}
-	return
-}
-
-// byteReader is a local wrapper around fmt.ScanState;
-// it implements the ByteReader interface.
-type byteReader struct {
-	fmt.ScanState
-}
-
-func (r byteReader) ReadByte() (byte, error) {
-	ch, size, err := r.ReadRune()
-	if size != 1 && err == nil {
-		err = fmt.Errorf("invalid rune %#U", ch)
-	}
-	return byte(ch), err
-}
-
-func (r byteReader) UnreadByte() error {
-	return r.UnreadRune()
-}
-
-var _ fmt.Scanner = intOne // *Int must implement fmt.Scanner
-
-// Scan is a support routine for fmt.Scanner; it sets z to the value of
-// the scanned number. It accepts the formats 'b' (binary), 'o' (octal),
-// 'd' (decimal), 'x' (lowercase hexadecimal), and 'X' (uppercase hexadecimal).
-func (z *Int) Scan(s fmt.ScanState, ch rune) error {
-	s.SkipSpace() // skip leading space characters
-	base := 0
-	switch ch {
-	case 'b':
-		base = 2
-	case 'o':
-		base = 8
-	case 'd':
-		base = 10
-	case 'x', 'X':
-		base = 16
-	case 's', 'v':
-		// let scan determine the base
-	default:
-		return errors.New("Int.Scan: invalid verb")
-	}
-	_, _, err := z.scan(byteReader{s}, base)
-	return err
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/intconv_test.go b/pkg/bootstrap/src/bootstrap/math/big/intconv_test.go
deleted file mode 100644
index 83ff0e6..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/intconv_test.go
+++ /dev/null
@@ -1,394 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/intconv_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/intconv_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package big
-
-import (
-	"bytes"
-	"fmt"
-	"testing"
-)
-
-var stringTests = []struct {
-	in   string
-	out  string
-	base int
-	val  int64
-	ok   bool
-}{
-	{in: ""},
-	{in: "a"},
-	{in: "z"},
-	{in: "+"},
-	{in: "-"},
-	{in: "0b"},
-	{in: "0x"},
-	{in: "2", base: 2},
-	{in: "0b2", base: 0},
-	{in: "08"},
-	{in: "8", base: 8},
-	{in: "0xg", base: 0},
-	{in: "g", base: 16},
-	{"0", "0", 0, 0, true},
-	{"0", "0", 10, 0, true},
-	{"0", "0", 16, 0, true},
-	{"+0", "0", 0, 0, true},
-	{"-0", "0", 0, 0, true},
-	{"10", "10", 0, 10, true},
-	{"10", "10", 10, 10, true},
-	{"10", "10", 16, 16, true},
-	{"-10", "-10", 16, -16, true},
-	{"+10", "10", 16, 16, true},
-	{"0x10", "16", 0, 16, true},
-	{in: "0x10", base: 16},
-	{"-0x10", "-16", 0, -16, true},
-	{"+0x10", "16", 0, 16, true},
-	{"00", "0", 0, 0, true},
-	{"0", "0", 8, 0, true},
-	{"07", "7", 0, 7, true},
-	{"7", "7", 8, 7, true},
-	{"023", "19", 0, 19, true},
-	{"23", "23", 8, 19, true},
-	{"cafebabe", "cafebabe", 16, 0xcafebabe, true},
-	{"0b0", "0", 0, 0, true},
-	{"-111", "-111", 2, -7, true},
-	{"-0b111", "-7", 0, -7, true},
-	{"0b1001010111", "599", 0, 0x257, true},
-	{"1001010111", "1001010111", 2, 0x257, true},
-}
-
-func TestIntText(t *testing.T) {
-	z := new(Int)
-	for _, test := range stringTests {
-		if !test.ok {
-			continue
-		}
-
-		_, ok := z.SetString(test.in, test.base)
-		if !ok {
-			t.Errorf("%v: failed to parse", test)
-			continue
-		}
-
-		base := test.base
-		if base == 0 {
-			base = 10
-		}
-
-		if got := z.Text(base); got != test.out {
-			t.Errorf("%v: got %s; want %s", test, got, test.out)
-		}
-	}
-}
-
-func TestAppendText(t *testing.T) {
-	z := new(Int)
-	var buf []byte
-	for _, test := range stringTests {
-		if !test.ok {
-			continue
-		}
-
-		_, ok := z.SetString(test.in, test.base)
-		if !ok {
-			t.Errorf("%v: failed to parse", test)
-			continue
-		}
-
-		base := test.base
-		if base == 0 {
-			base = 10
-		}
-
-		i := len(buf)
-		buf = z.Append(buf, base)
-		if got := string(buf[i:]); got != test.out {
-			t.Errorf("%v: got %s; want %s", test, got, test.out)
-		}
-	}
-}
-
-func format(base int) string {
-	switch base {
-	case 2:
-		return "%b"
-	case 8:
-		return "%o"
-	case 16:
-		return "%x"
-	}
-	return "%d"
-}
-
-func TestGetString(t *testing.T) {
-	z := new(Int)
-	for i, test := range stringTests {
-		if !test.ok {
-			continue
-		}
-		z.SetInt64(test.val)
-
-		if test.base == 10 {
-			if got := z.String(); got != test.out {
-				t.Errorf("#%da got %s; want %s", i, got, test.out)
-			}
-		}
-
-		if got := fmt.Sprintf(format(test.base), z); got != test.out {
-			t.Errorf("#%db got %s; want %s", i, got, test.out)
-		}
-	}
-}
-
-func TestSetString(t *testing.T) {
-	tmp := new(Int)
-	for i, test := range stringTests {
-		// initialize to a non-zero value so that issues with parsing
-		// 0 are detected
-		tmp.SetInt64(1234567890)
-		n1, ok1 := new(Int).SetString(test.in, test.base)
-		n2, ok2 := tmp.SetString(test.in, test.base)
-		expected := NewInt(test.val)
-		if ok1 != test.ok || ok2 != test.ok {
-			t.Errorf("#%d (input '%s') ok incorrect (should be %t)", i, test.in, test.ok)
-			continue
-		}
-		if !ok1 {
-			if n1 != nil {
-				t.Errorf("#%d (input '%s') n1 != nil", i, test.in)
-			}
-			continue
-		}
-		if !ok2 {
-			if n2 != nil {
-				t.Errorf("#%d (input '%s') n2 != nil", i, test.in)
-			}
-			continue
-		}
-
-		if ok1 && !isNormalized(n1) {
-			t.Errorf("#%d (input '%s'): %v is not normalized", i, test.in, *n1)
-		}
-		if ok2 && !isNormalized(n2) {
-			t.Errorf("#%d (input '%s'): %v is not normalized", i, test.in, *n2)
-		}
-
-		if n1.Cmp(expected) != 0 {
-			t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n1, test.val)
-		}
-		if n2.Cmp(expected) != 0 {
-			t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n2, test.val)
-		}
-	}
-}
-
-var formatTests = []struct {
-	input  string
-	format string
-	output string
-}{
-	{"<nil>", "%x", "<nil>"},
-	{"<nil>", "%#x", "<nil>"},
-	{"<nil>", "%#y", "%!y(big.Int=<nil>)"},
-
-	{"10", "%b", "1010"},
-	{"10", "%o", "12"},
-	{"10", "%d", "10"},
-	{"10", "%v", "10"},
-	{"10", "%x", "a"},
-	{"10", "%X", "A"},
-	{"-10", "%X", "-A"},
-	{"10", "%y", "%!y(big.Int=10)"},
-	{"-10", "%y", "%!y(big.Int=-10)"},
-
-	{"10", "%#b", "1010"},
-	{"10", "%#o", "012"},
-	{"10", "%#d", "10"},
-	{"10", "%#v", "10"},
-	{"10", "%#x", "0xa"},
-	{"10", "%#X", "0XA"},
-	{"-10", "%#X", "-0XA"},
-	{"10", "%#y", "%!y(big.Int=10)"},
-	{"-10", "%#y", "%!y(big.Int=-10)"},
-
-	{"1234", "%d", "1234"},
-	{"1234", "%3d", "1234"},
-	{"1234", "%4d", "1234"},
-	{"-1234", "%d", "-1234"},
-	{"1234", "% 5d", " 1234"},
-	{"1234", "%+5d", "+1234"},
-	{"1234", "%-5d", "1234 "},
-	{"1234", "%x", "4d2"},
-	{"1234", "%X", "4D2"},
-	{"-1234", "%3x", "-4d2"},
-	{"-1234", "%4x", "-4d2"},
-	{"-1234", "%5x", " -4d2"},
-	{"-1234", "%-5x", "-4d2 "},
-	{"1234", "%03d", "1234"},
-	{"1234", "%04d", "1234"},
-	{"1234", "%05d", "01234"},
-	{"1234", "%06d", "001234"},
-	{"-1234", "%06d", "-01234"},
-	{"1234", "%+06d", "+01234"},
-	{"1234", "% 06d", " 01234"},
-	{"1234", "%-6d", "1234  "},
-	{"1234", "%-06d", "1234  "},
-	{"-1234", "%-06d", "-1234 "},
-
-	{"1234", "%.3d", "1234"},
-	{"1234", "%.4d", "1234"},
-	{"1234", "%.5d", "01234"},
-	{"1234", "%.6d", "001234"},
-	{"-1234", "%.3d", "-1234"},
-	{"-1234", "%.4d", "-1234"},
-	{"-1234", "%.5d", "-01234"},
-	{"-1234", "%.6d", "-001234"},
-
-	{"1234", "%8.3d", "    1234"},
-	{"1234", "%8.4d", "    1234"},
-	{"1234", "%8.5d", "   01234"},
-	{"1234", "%8.6d", "  001234"},
-	{"-1234", "%8.3d", "   -1234"},
-	{"-1234", "%8.4d", "   -1234"},
-	{"-1234", "%8.5d", "  -01234"},
-	{"-1234", "%8.6d", " -001234"},
-
-	{"1234", "%+8.3d", "   +1234"},
-	{"1234", "%+8.4d", "   +1234"},
-	{"1234", "%+8.5d", "  +01234"},
-	{"1234", "%+8.6d", " +001234"},
-	{"-1234", "%+8.3d", "   -1234"},
-	{"-1234", "%+8.4d", "   -1234"},
-	{"-1234", "%+8.5d", "  -01234"},
-	{"-1234", "%+8.6d", " -001234"},
-
-	{"1234", "% 8.3d", "    1234"},
-	{"1234", "% 8.4d", "    1234"},
-	{"1234", "% 8.5d", "   01234"},
-	{"1234", "% 8.6d", "  001234"},
-	{"-1234", "% 8.3d", "   -1234"},
-	{"-1234", "% 8.4d", "   -1234"},
-	{"-1234", "% 8.5d", "  -01234"},
-	{"-1234", "% 8.6d", " -001234"},
-
-	{"1234", "%.3x", "4d2"},
-	{"1234", "%.4x", "04d2"},
-	{"1234", "%.5x", "004d2"},
-	{"1234", "%.6x", "0004d2"},
-	{"-1234", "%.3x", "-4d2"},
-	{"-1234", "%.4x", "-04d2"},
-	{"-1234", "%.5x", "-004d2"},
-	{"-1234", "%.6x", "-0004d2"},
-
-	{"1234", "%8.3x", "     4d2"},
-	{"1234", "%8.4x", "    04d2"},
-	{"1234", "%8.5x", "   004d2"},
-	{"1234", "%8.6x", "  0004d2"},
-	{"-1234", "%8.3x", "    -4d2"},
-	{"-1234", "%8.4x", "   -04d2"},
-	{"-1234", "%8.5x", "  -004d2"},
-	{"-1234", "%8.6x", " -0004d2"},
-
-	{"1234", "%+8.3x", "    +4d2"},
-	{"1234", "%+8.4x", "   +04d2"},
-	{"1234", "%+8.5x", "  +004d2"},
-	{"1234", "%+8.6x", " +0004d2"},
-	{"-1234", "%+8.3x", "    -4d2"},
-	{"-1234", "%+8.4x", "   -04d2"},
-	{"-1234", "%+8.5x", "  -004d2"},
-	{"-1234", "%+8.6x", " -0004d2"},
-
-	{"1234", "% 8.3x", "     4d2"},
-	{"1234", "% 8.4x", "    04d2"},
-	{"1234", "% 8.5x", "   004d2"},
-	{"1234", "% 8.6x", "  0004d2"},
-	{"1234", "% 8.7x", " 00004d2"},
-	{"1234", "% 8.8x", " 000004d2"},
-	{"-1234", "% 8.3x", "    -4d2"},
-	{"-1234", "% 8.4x", "   -04d2"},
-	{"-1234", "% 8.5x", "  -004d2"},
-	{"-1234", "% 8.6x", " -0004d2"},
-	{"-1234", "% 8.7x", "-00004d2"},
-	{"-1234", "% 8.8x", "-000004d2"},
-
-	{"1234", "%-8.3d", "1234    "},
-	{"1234", "%-8.4d", "1234    "},
-	{"1234", "%-8.5d", "01234   "},
-	{"1234", "%-8.6d", "001234  "},
-	{"1234", "%-8.7d", "0001234 "},
-	{"1234", "%-8.8d", "00001234"},
-	{"-1234", "%-8.3d", "-1234   "},
-	{"-1234", "%-8.4d", "-1234   "},
-	{"-1234", "%-8.5d", "-01234  "},
-	{"-1234", "%-8.6d", "-001234 "},
-	{"-1234", "%-8.7d", "-0001234"},
-	{"-1234", "%-8.8d", "-00001234"},
-
-	{"16777215", "%b", "111111111111111111111111"}, // 2**24 - 1
-
-	{"0", "%.d", ""},
-	{"0", "%.0d", ""},
-	{"0", "%3.d", ""},
-}
-
-func TestFormat(t *testing.T) {
-	for i, test := range formatTests {
-		var x *Int
-		if test.input != "<nil>" {
-			var ok bool
-			x, ok = new(Int).SetString(test.input, 0)
-			if !ok {
-				t.Errorf("#%d failed reading input %s", i, test.input)
-			}
-		}
-		output := fmt.Sprintf(test.format, x)
-		if output != test.output {
-			t.Errorf("#%d got %q; want %q, {%q, %q, %q}", i, output, test.output, test.input, test.format, test.output)
-		}
-	}
-}
-
-var scanTests = []struct {
-	input     string
-	format    string
-	output    string
-	remaining int
-}{
-	{"1010", "%b", "10", 0},
-	{"0b1010", "%v", "10", 0},
-	{"12", "%o", "10", 0},
-	{"012", "%v", "10", 0},
-	{"10", "%d", "10", 0},
-	{"10", "%v", "10", 0},
-	{"a", "%x", "10", 0},
-	{"0xa", "%v", "10", 0},
-	{"A", "%X", "10", 0},
-	{"-A", "%X", "-10", 0},
-	{"+0b1011001", "%v", "89", 0},
-	{"0xA", "%v", "10", 0},
-	{"0 ", "%v", "0", 1},
-	{"2+3", "%v", "2", 2},
-	{"0XABC 12", "%v", "2748", 3},
-}
-
-func TestScan(t *testing.T) {
-	var buf bytes.Buffer
-	for i, test := range scanTests {
-		x := new(Int)
-		buf.Reset()
-		buf.WriteString(test.input)
-		if _, err := fmt.Fscanf(&buf, test.format, x); err != nil {
-			t.Errorf("#%d error: %s", i, err)
-		}
-		if x.String() != test.output {
-			t.Errorf("#%d got %s; want %s", i, x.String(), test.output)
-		}
-		if buf.Len() != test.remaining {
-			t.Errorf("#%d got %d bytes remaining; want %d", i, buf.Len(), test.remaining)
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/intmarsh.go b/pkg/bootstrap/src/bootstrap/math/big/intmarsh.go
deleted file mode 100644
index e1e4cf2..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/intmarsh.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/intmarsh.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/intmarsh.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements encoding/decoding of Ints.
-
-package big
-
-import "fmt"
-
-// Gob codec version. Permits backward-compatible changes to the encoding.
-const intGobVersion byte = 1
-
-// GobEncode implements the gob.GobEncoder interface.
-func (x *Int) GobEncode() ([]byte, error) {
-	if x == nil {
-		return nil, nil
-	}
-	buf := make([]byte, 1+len(x.abs)*_S) // extra byte for version and sign bit
-	i := x.abs.bytes(buf) - 1            // i >= 0
-	b := intGobVersion << 1              // make space for sign bit
-	if x.neg {
-		b |= 1
-	}
-	buf[i] = b
-	return buf[i:], nil
-}
-
-// GobDecode implements the gob.GobDecoder interface.
-func (z *Int) GobDecode(buf []byte) error {
-	if len(buf) == 0 {
-		// Other side sent a nil or default value.
-		*z = Int{}
-		return nil
-	}
-	b := buf[0]
-	if b>>1 != intGobVersion {
-		return fmt.Errorf("Int.GobDecode: encoding version %d not supported", b>>1)
-	}
-	z.neg = b&1 != 0
-	z.abs = z.abs.setBytes(buf[1:])
-	return nil
-}
-
-// MarshalText implements the encoding.TextMarshaler interface.
-func (x *Int) MarshalText() (text []byte, err error) {
-	if x == nil {
-		return []byte("<nil>"), nil
-	}
-	return x.abs.itoa(x.neg, 10), nil
-}
-
-// UnmarshalText implements the encoding.TextUnmarshaler interface.
-func (z *Int) UnmarshalText(text []byte) error {
-	// TODO(gri): get rid of the []byte/string conversion
-	if _, ok := z.SetString(string(text), 0); !ok {
-		return fmt.Errorf("math/big: cannot unmarshal %q into a *big.Int", text)
-	}
-	return nil
-}
-
-// The JSON marshalers are only here for API backward compatibility
-// (programs that explicitly look for these two methods). JSON works
-// fine with the TextMarshaler only.
-
-// MarshalJSON implements the json.Marshaler interface.
-func (x *Int) MarshalJSON() ([]byte, error) {
-	return x.MarshalText()
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (z *Int) UnmarshalJSON(text []byte) error {
-	// Ignore null, like in the main JSON package.
-	if string(text) == "null" {
-		return nil
-	}
-	return z.UnmarshalText(text)
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/intmarsh_test.go b/pkg/bootstrap/src/bootstrap/math/big/intmarsh_test.go
deleted file mode 100644
index 5dba8d4..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/intmarsh_test.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/intmarsh_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/intmarsh_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package big
-
-import (
-	"bytes"
-	"encoding/gob"
-	"encoding/json"
-	"encoding/xml"
-	"testing"
-)
-
-var encodingTests = []string{
-	"0",
-	"1",
-	"2",
-	"10",
-	"1000",
-	"1234567890",
-	"298472983472983471903246121093472394872319615612417471234712061",
-}
-
-func TestIntGobEncoding(t *testing.T) {
-	var medium bytes.Buffer
-	enc := gob.NewEncoder(&medium)
-	dec := gob.NewDecoder(&medium)
-	for _, test := range encodingTests {
-		for _, sign := range []string{"", "+", "-"} {
-			x := sign + test
-			medium.Reset() // empty buffer for each test case (in case of failures)
-			var tx Int
-			tx.SetString(x, 10)
-			if err := enc.Encode(&tx); err != nil {
-				t.Errorf("encoding of %s failed: %s", &tx, err)
-				continue
-			}
-			var rx Int
-			if err := dec.Decode(&rx); err != nil {
-				t.Errorf("decoding of %s failed: %s", &tx, err)
-				continue
-			}
-			if rx.Cmp(&tx) != 0 {
-				t.Errorf("transmission of %s failed: got %s want %s", &tx, &rx, &tx)
-			}
-		}
-	}
-}
-
-// Sending a nil Int pointer (inside a slice) on a round trip through gob should yield a zero.
-// TODO: top-level nils.
-func TestGobEncodingNilIntInSlice(t *testing.T) {
-	buf := new(bytes.Buffer)
-	enc := gob.NewEncoder(buf)
-	dec := gob.NewDecoder(buf)
-
-	var in = make([]*Int, 1)
-	err := enc.Encode(&in)
-	if err != nil {
-		t.Errorf("gob encode failed: %q", err)
-	}
-	var out []*Int
-	err = dec.Decode(&out)
-	if err != nil {
-		t.Fatalf("gob decode failed: %q", err)
-	}
-	if len(out) != 1 {
-		t.Fatalf("wrong len; want 1 got %d", len(out))
-	}
-	var zero Int
-	if out[0].Cmp(&zero) != 0 {
-		t.Fatalf("transmission of (*Int)(nil) failed: got %s want 0", out)
-	}
-}
-
-func TestIntJSONEncoding(t *testing.T) {
-	for _, test := range encodingTests {
-		for _, sign := range []string{"", "+", "-"} {
-			x := sign + test
-			var tx Int
-			tx.SetString(x, 10)
-			b, err := json.Marshal(&tx)
-			if err != nil {
-				t.Errorf("marshaling of %s failed: %s", &tx, err)
-				continue
-			}
-			var rx Int
-			if err := json.Unmarshal(b, &rx); err != nil {
-				t.Errorf("unmarshaling of %s failed: %s", &tx, err)
-				continue
-			}
-			if rx.Cmp(&tx) != 0 {
-				t.Errorf("JSON encoding of %s failed: got %s want %s", &tx, &rx, &tx)
-			}
-		}
-	}
-}
-
-func TestIntXMLEncoding(t *testing.T) {
-	for _, test := range encodingTests {
-		for _, sign := range []string{"", "+", "-"} {
-			x := sign + test
-			var tx Int
-			tx.SetString(x, 0)
-			b, err := xml.Marshal(&tx)
-			if err != nil {
-				t.Errorf("marshaling of %s failed: %s", &tx, err)
-				continue
-			}
-			var rx Int
-			if err := xml.Unmarshal(b, &rx); err != nil {
-				t.Errorf("unmarshaling of %s failed: %s", &tx, err)
-				continue
-			}
-			if rx.Cmp(&tx) != 0 {
-				t.Errorf("XML encoding of %s failed: got %s want %s", &tx, &rx, &tx)
-			}
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/nat.go b/pkg/bootstrap/src/bootstrap/math/big/nat.go
deleted file mode 100644
index 984bd3d..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/nat.go
+++ /dev/null
@@ -1,1262 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/nat.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/nat.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements unsigned multi-precision integers (natural
-// numbers). They are the building blocks for the implementation
-// of signed integers, rationals, and floating-point numbers.
-
-package big
-
-import (
-	"math/rand"
-	"sync"
-)
-
-// An unsigned integer x of the form
-//
-//   x = x[n-1]*_B^(n-1) + x[n-2]*_B^(n-2) + ... + x[1]*_B + x[0]
-//
-// with 0 <= x[i] < _B and 0 <= i < n is stored in a slice of length n,
-// with the digits x[i] as the slice elements.
-//
-// A number is normalized if the slice contains no leading 0 digits.
-// During arithmetic operations, denormalized values may occur but are
-// always normalized before returning the final result. The normalized
-// representation of 0 is the empty or nil slice (length = 0).
-//
-type nat []Word
-
-var (
-	natOne = nat{1}
-	natTwo = nat{2}
-	natTen = nat{10}
-)
-
-func (z nat) clear() {
-	for i := range z {
-		z[i] = 0
-	}
-}
-
-func (z nat) norm() nat {
-	i := len(z)
-	for i > 0 && z[i-1] == 0 {
-		i--
-	}
-	return z[0:i]
-}
-
-func (z nat) make(n int) nat {
-	if n <= cap(z) {
-		return z[:n] // reuse z
-	}
-	// Choosing a good value for e has significant performance impact
-	// because it increases the chance that a value can be reused.
-	const e = 4 // extra capacity
-	return make(nat, n, n+e)
-}
-
-func (z nat) setWord(x Word) nat {
-	if x == 0 {
-		return z[:0]
-	}
-	z = z.make(1)
-	z[0] = x
-	return z
-}
-
-func (z nat) setUint64(x uint64) nat {
-	// single-digit values
-	if w := Word(x); uint64(w) == x {
-		return z.setWord(w)
-	}
-
-	// compute number of words n required to represent x
-	n := 0
-	for t := x; t > 0; t >>= _W {
-		n++
-	}
-
-	// split x into n words
-	z = z.make(n)
-	for i := range z {
-		z[i] = Word(x & _M)
-		x >>= _W
-	}
-
-	return z
-}
-
-func (z nat) set(x nat) nat {
-	z = z.make(len(x))
-	copy(z, x)
-	return z
-}
-
-func (z nat) add(x, y nat) nat {
-	m := len(x)
-	n := len(y)
-
-	switch {
-	case m < n:
-		return z.add(y, x)
-	case m == 0:
-		// n == 0 because m >= n; result is 0
-		return z[:0]
-	case n == 0:
-		// result is x
-		return z.set(x)
-	}
-	// m > 0
-
-	z = z.make(m + 1)
-	c := addVV(z[0:n], x, y)
-	if m > n {
-		c = addVW(z[n:m], x[n:], c)
-	}
-	z[m] = c
-
-	return z.norm()
-}
-
-func (z nat) sub(x, y nat) nat {
-	m := len(x)
-	n := len(y)
-
-	switch {
-	case m < n:
-		panic("underflow")
-	case m == 0:
-		// n == 0 because m >= n; result is 0
-		return z[:0]
-	case n == 0:
-		// result is x
-		return z.set(x)
-	}
-	// m > 0
-
-	z = z.make(m)
-	c := subVV(z[0:n], x, y)
-	if m > n {
-		c = subVW(z[n:], x[n:], c)
-	}
-	if c != 0 {
-		panic("underflow")
-	}
-
-	return z.norm()
-}
-
-func (x nat) cmp(y nat) (r int) {
-	m := len(x)
-	n := len(y)
-	if m != n || m == 0 {
-		switch {
-		case m < n:
-			r = -1
-		case m > n:
-			r = 1
-		}
-		return
-	}
-
-	i := m - 1
-	for i > 0 && x[i] == y[i] {
-		i--
-	}
-
-	switch {
-	case x[i] < y[i]:
-		r = -1
-	case x[i] > y[i]:
-		r = 1
-	}
-	return
-}
-
-func (z nat) mulAddWW(x nat, y, r Word) nat {
-	m := len(x)
-	if m == 0 || y == 0 {
-		return z.setWord(r) // result is r
-	}
-	// m > 0
-
-	z = z.make(m + 1)
-	z[m] = mulAddVWW(z[0:m], x, y, r)
-
-	return z.norm()
-}
-
-// basicMul multiplies x and y and leaves the result in z.
-// The (non-normalized) result is placed in z[0 : len(x) + len(y)].
-func basicMul(z, x, y nat) {
-	z[0 : len(x)+len(y)].clear() // initialize z
-	for i, d := range y {
-		if d != 0 {
-			z[len(x)+i] = addMulVVW(z[i:i+len(x)], x, d)
-		}
-	}
-}
-
-// montgomery computes z mod m = x*y*2**(-n*_W) mod m,
-// assuming k = -1/m mod 2**_W.
-// z is used for storing the result which is returned;
-// z must not alias x, y or m.
-// See Gueron, "Efficient Software Implementations of Modular Exponentiation".
-// https://eprint.iacr.org/2011/239.pdf
-// In the terminology of that paper, this is an "Almost Montgomery Multiplication":
-// x and y are required to satisfy 0 <= z < 2**(n*_W) and then the result
-// z is guaranteed to satisfy 0 <= z < 2**(n*_W), but it may not be < m.
-func (z nat) montgomery(x, y, m nat, k Word, n int) nat {
-	// This code assumes x, y, m are all the same length, n.
-	// (required by addMulVVW and the for loop).
-	// It also assumes that x, y are already reduced mod m,
-	// or else the result will not be properly reduced.
-	if len(x) != n || len(y) != n || len(m) != n {
-		panic("math/big: mismatched montgomery number lengths")
-	}
-	z = z.make(n)
-	z.clear()
-	var c Word
-	for i := 0; i < n; i++ {
-		d := y[i]
-		c2 := addMulVVW(z, x, d)
-		t := z[0] * k
-		c3 := addMulVVW(z, m, t)
-		copy(z, z[1:])
-		cx := c + c2
-		cy := cx + c3
-		z[n-1] = cy
-		if cx < c2 || cy < c3 {
-			c = 1
-		} else {
-			c = 0
-		}
-	}
-	if c != 0 {
-		subVV(z, z, m)
-	}
-	return z
-}
-
-// Fast version of z[0:n+n>>1].add(z[0:n+n>>1], x[0:n]) w/o bounds checks.
-// Factored out for readability - do not use outside karatsuba.
-func karatsubaAdd(z, x nat, n int) {
-	if c := addVV(z[0:n], z, x); c != 0 {
-		addVW(z[n:n+n>>1], z[n:], c)
-	}
-}
-
-// Like karatsubaAdd, but does subtract.
-func karatsubaSub(z, x nat, n int) {
-	if c := subVV(z[0:n], z, x); c != 0 {
-		subVW(z[n:n+n>>1], z[n:], c)
-	}
-}
-
-// Operands that are shorter than karatsubaThreshold are multiplied using
-// "grade school" multiplication; for longer operands the Karatsuba algorithm
-// is used.
-var karatsubaThreshold int = 40 // computed by calibrate.go
-
-// karatsuba multiplies x and y and leaves the result in z.
-// Both x and y must have the same length n and n must be a
-// power of 2. The result vector z must have len(z) >= 6*n.
-// The (non-normalized) result is placed in z[0 : 2*n].
-func karatsuba(z, x, y nat) {
-	n := len(y)
-
-	// Switch to basic multiplication if numbers are odd or small.
-	// (n is always even if karatsubaThreshold is even, but be
-	// conservative)
-	if n&1 != 0 || n < karatsubaThreshold || n < 2 {
-		basicMul(z, x, y)
-		return
-	}
-	// n&1 == 0 && n >= karatsubaThreshold && n >= 2
-
-	// Karatsuba multiplication is based on the observation that
-	// for two numbers x and y with:
-	//
-	//   x = x1*b + x0
-	//   y = y1*b + y0
-	//
-	// the product x*y can be obtained with 3 products z2, z1, z0
-	// instead of 4:
-	//
-	//   x*y = x1*y1*b*b + (x1*y0 + x0*y1)*b + x0*y0
-	//       =    z2*b*b +              z1*b +    z0
-	//
-	// with:
-	//
-	//   xd = x1 - x0
-	//   yd = y0 - y1
-	//
-	//   z1 =      xd*yd                    + z2 + z0
-	//      = (x1-x0)*(y0 - y1)             + z2 + z0
-	//      = x1*y0 - x1*y1 - x0*y0 + x0*y1 + z2 + z0
-	//      = x1*y0 -    z2 -    z0 + x0*y1 + z2 + z0
-	//      = x1*y0                 + x0*y1
-
-	// split x, y into "digits"
-	n2 := n >> 1              // n2 >= 1
-	x1, x0 := x[n2:], x[0:n2] // x = x1*b + y0
-	y1, y0 := y[n2:], y[0:n2] // y = y1*b + y0
-
-	// z is used for the result and temporary storage:
-	//
-	//   6*n     5*n     4*n     3*n     2*n     1*n     0*n
-	// z = [z2 copy|z0 copy| xd*yd | yd:xd | x1*y1 | x0*y0 ]
-	//
-	// For each recursive call of karatsuba, an unused slice of
-	// z is passed in that has (at least) half the length of the
-	// caller's z.
-
-	// compute z0 and z2 with the result "in place" in z
-	karatsuba(z, x0, y0)     // z0 = x0*y0
-	karatsuba(z[n:], x1, y1) // z2 = x1*y1
-
-	// compute xd (or the negative value if underflow occurs)
-	s := 1 // sign of product xd*yd
-	xd := z[2*n : 2*n+n2]
-	if subVV(xd, x1, x0) != 0 { // x1-x0
-		s = -s
-		subVV(xd, x0, x1) // x0-x1
-	}
-
-	// compute yd (or the negative value if underflow occurs)
-	yd := z[2*n+n2 : 3*n]
-	if subVV(yd, y0, y1) != 0 { // y0-y1
-		s = -s
-		subVV(yd, y1, y0) // y1-y0
-	}
-
-	// p = (x1-x0)*(y0-y1) == x1*y0 - x1*y1 - x0*y0 + x0*y1 for s > 0
-	// p = (x0-x1)*(y0-y1) == x0*y0 - x0*y1 - x1*y0 + x1*y1 for s < 0
-	p := z[n*3:]
-	karatsuba(p, xd, yd)
-
-	// save original z2:z0
-	// (ok to use upper half of z since we're done recursing)
-	r := z[n*4:]
-	copy(r, z[:n*2])
-
-	// add up all partial products
-	//
-	//   2*n     n     0
-	// z = [ z2  | z0  ]
-	//   +    [ z0  ]
-	//   +    [ z2  ]
-	//   +    [  p  ]
-	//
-	karatsubaAdd(z[n2:], r, n)
-	karatsubaAdd(z[n2:], r[n:], n)
-	if s > 0 {
-		karatsubaAdd(z[n2:], p, n)
-	} else {
-		karatsubaSub(z[n2:], p, n)
-	}
-}
-
-// alias reports whether x and y share the same base array.
-func alias(x, y nat) bool {
-	return cap(x) > 0 && cap(y) > 0 && &x[0:cap(x)][cap(x)-1] == &y[0:cap(y)][cap(y)-1]
-}
-
-// addAt implements z += x<<(_W*i); z must be long enough.
-// (we don't use nat.add because we need z to stay the same
-// slice, and we don't need to normalize z after each addition)
-func addAt(z, x nat, i int) {
-	if n := len(x); n > 0 {
-		if c := addVV(z[i:i+n], z[i:], x); c != 0 {
-			j := i + n
-			if j < len(z) {
-				addVW(z[j:], z[j:], c)
-			}
-		}
-	}
-}
-
-func max(x, y int) int {
-	if x > y {
-		return x
-	}
-	return y
-}
-
-// karatsubaLen computes an approximation to the maximum k <= n such that
-// k = p<<i for a number p <= karatsubaThreshold and an i >= 0. Thus, the
-// result is the largest number that can be divided repeatedly by 2 before
-// becoming about the value of karatsubaThreshold.
-func karatsubaLen(n int) int {
-	i := uint(0)
-	for n > karatsubaThreshold {
-		n >>= 1
-		i++
-	}
-	return n << i
-}
-
-func (z nat) mul(x, y nat) nat {
-	m := len(x)
-	n := len(y)
-
-	switch {
-	case m < n:
-		return z.mul(y, x)
-	case m == 0 || n == 0:
-		return z[:0]
-	case n == 1:
-		return z.mulAddWW(x, y[0], 0)
-	}
-	// m >= n > 1
-
-	// determine if z can be reused
-	if alias(z, x) || alias(z, y) {
-		z = nil // z is an alias for x or y - cannot reuse
-	}
-
-	// use basic multiplication if the numbers are small
-	if n < karatsubaThreshold {
-		z = z.make(m + n)
-		basicMul(z, x, y)
-		return z.norm()
-	}
-	// m >= n && n >= karatsubaThreshold && n >= 2
-
-	// determine Karatsuba length k such that
-	//
-	//   x = xh*b + x0  (0 <= x0 < b)
-	//   y = yh*b + y0  (0 <= y0 < b)
-	//   b = 1<<(_W*k)  ("base" of digits xi, yi)
-	//
-	k := karatsubaLen(n)
-	// k <= n
-
-	// multiply x0 and y0 via Karatsuba
-	x0 := x[0:k]              // x0 is not normalized
-	y0 := y[0:k]              // y0 is not normalized
-	z = z.make(max(6*k, m+n)) // enough space for karatsuba of x0*y0 and full result of x*y
-	karatsuba(z, x0, y0)
-	z = z[0 : m+n]  // z has final length but may be incomplete
-	z[2*k:].clear() // upper portion of z is garbage (and 2*k <= m+n since k <= n <= m)
-
-	// If xh != 0 or yh != 0, add the missing terms to z. For
-	//
-	//   xh = xi*b^i + ... + x2*b^2 + x1*b (0 <= xi < b)
-	//   yh =                         y1*b (0 <= y1 < b)
-	//
-	// the missing terms are
-	//
-	//   x0*y1*b and xi*y0*b^i, xi*y1*b^(i+1) for i > 0
-	//
-	// since all the yi for i > 1 are 0 by choice of k: If any of them
-	// were > 0, then yh >= b^2 and thus y >= b^2. Then k' = k*2 would
-	// be a larger valid threshold contradicting the assumption about k.
-	//
-	if k < n || m != n {
-		var t nat
-
-		// add x0*y1*b
-		x0 := x0.norm()
-		y1 := y[k:]       // y1 is normalized because y is
-		t = t.mul(x0, y1) // update t so we don't lose t's underlying array
-		addAt(z, t, k)
-
-		// add xi*y0<<i, xi*y1*b<<(i+k)
-		y0 := y0.norm()
-		for i := k; i < len(x); i += k {
-			xi := x[i:]
-			if len(xi) > k {
-				xi = xi[:k]
-			}
-			xi = xi.norm()
-			t = t.mul(xi, y0)
-			addAt(z, t, i)
-			t = t.mul(xi, y1)
-			addAt(z, t, i+k)
-		}
-	}
-
-	return z.norm()
-}
-
-// mulRange computes the product of all the unsigned integers in the
-// range [a, b] inclusively. If a > b (empty range), the result is 1.
-func (z nat) mulRange(a, b uint64) nat {
-	switch {
-	case a == 0:
-		// cut long ranges short (optimization)
-		return z.setUint64(0)
-	case a > b:
-		return z.setUint64(1)
-	case a == b:
-		return z.setUint64(a)
-	case a+1 == b:
-		return z.mul(nat(nil).setUint64(a), nat(nil).setUint64(b))
-	}
-	m := (a + b) / 2
-	return z.mul(nat(nil).mulRange(a, m), nat(nil).mulRange(m+1, b))
-}
-
-// q = (x-r)/y, with 0 <= r < y
-func (z nat) divW(x nat, y Word) (q nat, r Word) {
-	m := len(x)
-	switch {
-	case y == 0:
-		panic("division by zero")
-	case y == 1:
-		q = z.set(x) // result is x
-		return
-	case m == 0:
-		q = z[:0] // result is 0
-		return
-	}
-	// m > 0
-	z = z.make(m)
-	r = divWVW(z, 0, x, y)
-	q = z.norm()
-	return
-}
-
-func (z nat) div(z2, u, v nat) (q, r nat) {
-	if len(v) == 0 {
-		panic("division by zero")
-	}
-
-	if u.cmp(v) < 0 {
-		q = z[:0]
-		r = z2.set(u)
-		return
-	}
-
-	if len(v) == 1 {
-		var r2 Word
-		q, r2 = z.divW(u, v[0])
-		r = z2.setWord(r2)
-		return
-	}
-
-	q, r = z.divLarge(z2, u, v)
-	return
-}
-
-// getNat returns a *nat of len n. The contents may not be zero.
-// The pool holds *nat to avoid allocation when converting to interface{}.
-func getNat(n int) *nat {
-	var z *nat
-	if v := natPool.Get(); v != nil {
-		z = v.(*nat)
-	}
-	if z == nil {
-		z = new(nat)
-	}
-	*z = z.make(n)
-	return z
-}
-
-func putNat(x *nat) {
-	natPool.Put(x)
-}
-
-var natPool sync.Pool
-
-// q = (uIn-r)/v, with 0 <= r < y
-// Uses z as storage for q, and u as storage for r if possible.
-// See Knuth, Volume 2, section 4.3.1, Algorithm D.
-// Preconditions:
-//    len(v) >= 2
-//    len(uIn) >= len(v)
-func (z nat) divLarge(u, uIn, v nat) (q, r nat) {
-	n := len(v)
-	m := len(uIn) - n
-
-	// determine if z can be reused
-	// TODO(gri) should find a better solution - this if statement
-	//           is very costly (see e.g. time pidigits -s -n 10000)
-	if alias(z, uIn) || alias(z, v) {
-		z = nil // z is an alias for uIn or v - cannot reuse
-	}
-	q = z.make(m + 1)
-
-	qhatvp := getNat(n + 1)
-	qhatv := *qhatvp
-	if alias(u, uIn) || alias(u, v) {
-		u = nil // u is an alias for uIn or v - cannot reuse
-	}
-	u = u.make(len(uIn) + 1)
-	u.clear() // TODO(gri) no need to clear if we allocated a new u
-
-	// D1.
-	var v1p *nat
-	shift := nlz(v[n-1])
-	if shift > 0 {
-		// do not modify v, it may be used by another goroutine simultaneously
-		v1p = getNat(n)
-		v1 := *v1p
-		shlVU(v1, v, shift)
-		v = v1
-	}
-	u[len(uIn)] = shlVU(u[0:len(uIn)], uIn, shift)
-
-	// D2.
-	vn1 := v[n-1]
-	for j := m; j >= 0; j-- {
-		// D3.
-		qhat := Word(_M)
-		if ujn := u[j+n]; ujn != vn1 {
-			var rhat Word
-			qhat, rhat = divWW(ujn, u[j+n-1], vn1)
-
-			// x1 | x2 = q̂v_{n-2}
-			vn2 := v[n-2]
-			x1, x2 := mulWW(qhat, vn2)
-			// test if q̂v_{n-2} > br̂ + u_{j+n-2}
-			ujn2 := u[j+n-2]
-			for greaterThan(x1, x2, rhat, ujn2) {
-				qhat--
-				prevRhat := rhat
-				rhat += vn1
-				// v[n-1] >= 0, so this tests for overflow.
-				if rhat < prevRhat {
-					break
-				}
-				x1, x2 = mulWW(qhat, vn2)
-			}
-		}
-
-		// D4.
-		qhatv[n] = mulAddVWW(qhatv[0:n], v, qhat, 0)
-
-		c := subVV(u[j:j+len(qhatv)], u[j:], qhatv)
-		if c != 0 {
-			c := addVV(u[j:j+n], u[j:], v)
-			u[j+n] += c
-			qhat--
-		}
-
-		q[j] = qhat
-	}
-	if v1p != nil {
-		putNat(v1p)
-	}
-	putNat(qhatvp)
-
-	q = q.norm()
-	shrVU(u, u, shift)
-	r = u.norm()
-
-	return q, r
-}
-
-// Length of x in bits. x must be normalized.
-func (x nat) bitLen() int {
-	if i := len(x) - 1; i >= 0 {
-		return i*_W + bitLen(x[i])
-	}
-	return 0
-}
-
-const deBruijn32 = 0x077CB531
-
-var deBruijn32Lookup = [...]byte{
-	0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
-	31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9,
-}
-
-const deBruijn64 = 0x03f79d71b4ca8b09
-
-var deBruijn64Lookup = [...]byte{
-	0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4,
-	62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5,
-	63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11,
-	54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6,
-}
-
-// trailingZeroBits returns the number of consecutive least significant zero
-// bits of x.
-func trailingZeroBits(x Word) uint {
-	// x & -x leaves only the right-most bit set in the word. Let k be the
-	// index of that bit. Since only a single bit is set, the value is two
-	// to the power of k. Multiplying by a power of two is equivalent to
-	// left shifting, in this case by k bits. The de Bruijn constant is
-	// such that all six bit, consecutive substrings are distinct.
-	// Therefore, if we have a left shifted version of this constant we can
-	// find by how many bits it was shifted by looking at which six bit
-	// substring ended up at the top of the word.
-	// (Knuth, volume 4, section 7.3.1)
-	switch _W {
-	case 32:
-		return uint(deBruijn32Lookup[((x&-x)*deBruijn32)>>27])
-	case 64:
-		return uint(deBruijn64Lookup[((x&-x)*(deBruijn64&_M))>>58])
-	default:
-		panic("unknown word size")
-	}
-}
-
-// trailingZeroBits returns the number of consecutive least significant zero
-// bits of x.
-func (x nat) trailingZeroBits() uint {
-	if len(x) == 0 {
-		return 0
-	}
-	var i uint
-	for x[i] == 0 {
-		i++
-	}
-	// x[i] != 0
-	return i*_W + trailingZeroBits(x[i])
-}
-
-// z = x << s
-func (z nat) shl(x nat, s uint) nat {
-	m := len(x)
-	if m == 0 {
-		return z[:0]
-	}
-	// m > 0
-
-	n := m + int(s/_W)
-	z = z.make(n + 1)
-	z[n] = shlVU(z[n-m:n], x, s%_W)
-	z[0 : n-m].clear()
-
-	return z.norm()
-}
-
-// z = x >> s
-func (z nat) shr(x nat, s uint) nat {
-	m := len(x)
-	n := m - int(s/_W)
-	if n <= 0 {
-		return z[:0]
-	}
-	// n > 0
-
-	z = z.make(n)
-	shrVU(z, x[m-n:], s%_W)
-
-	return z.norm()
-}
-
-func (z nat) setBit(x nat, i uint, b uint) nat {
-	j := int(i / _W)
-	m := Word(1) << (i % _W)
-	n := len(x)
-	switch b {
-	case 0:
-		z = z.make(n)
-		copy(z, x)
-		if j >= n {
-			// no need to grow
-			return z
-		}
-		z[j] &^= m
-		return z.norm()
-	case 1:
-		if j >= n {
-			z = z.make(j + 1)
-			z[n:].clear()
-		} else {
-			z = z.make(n)
-		}
-		copy(z, x)
-		z[j] |= m
-		// no need to normalize
-		return z
-	}
-	panic("set bit is not 0 or 1")
-}
-
-// bit returns the value of the i'th bit, with lsb == bit 0.
-func (x nat) bit(i uint) uint {
-	j := i / _W
-	if j >= uint(len(x)) {
-		return 0
-	}
-	// 0 <= j < len(x)
-	return uint(x[j] >> (i % _W) & 1)
-}
-
-// sticky returns 1 if there's a 1 bit within the
-// i least significant bits, otherwise it returns 0.
-func (x nat) sticky(i uint) uint {
-	j := i / _W
-	if j >= uint(len(x)) {
-		if len(x) == 0 {
-			return 0
-		}
-		return 1
-	}
-	// 0 <= j < len(x)
-	for _, x := range x[:j] {
-		if x != 0 {
-			return 1
-		}
-	}
-	if x[j]<<(_W-i%_W) != 0 {
-		return 1
-	}
-	return 0
-}
-
-func (z nat) and(x, y nat) nat {
-	m := len(x)
-	n := len(y)
-	if m > n {
-		m = n
-	}
-	// m <= n
-
-	z = z.make(m)
-	for i := 0; i < m; i++ {
-		z[i] = x[i] & y[i]
-	}
-
-	return z.norm()
-}
-
-func (z nat) andNot(x, y nat) nat {
-	m := len(x)
-	n := len(y)
-	if n > m {
-		n = m
-	}
-	// m >= n
-
-	z = z.make(m)
-	for i := 0; i < n; i++ {
-		z[i] = x[i] &^ y[i]
-	}
-	copy(z[n:m], x[n:m])
-
-	return z.norm()
-}
-
-func (z nat) or(x, y nat) nat {
-	m := len(x)
-	n := len(y)
-	s := x
-	if m < n {
-		n, m = m, n
-		s = y
-	}
-	// m >= n
-
-	z = z.make(m)
-	for i := 0; i < n; i++ {
-		z[i] = x[i] | y[i]
-	}
-	copy(z[n:m], s[n:m])
-
-	return z.norm()
-}
-
-func (z nat) xor(x, y nat) nat {
-	m := len(x)
-	n := len(y)
-	s := x
-	if m < n {
-		n, m = m, n
-		s = y
-	}
-	// m >= n
-
-	z = z.make(m)
-	for i := 0; i < n; i++ {
-		z[i] = x[i] ^ y[i]
-	}
-	copy(z[n:m], s[n:m])
-
-	return z.norm()
-}
-
-// greaterThan reports whether (x1<<_W + x2) > (y1<<_W + y2)
-func greaterThan(x1, x2, y1, y2 Word) bool {
-	return x1 > y1 || x1 == y1 && x2 > y2
-}
-
-// modW returns x % d.
-func (x nat) modW(d Word) (r Word) {
-	// TODO(agl): we don't actually need to store the q value.
-	var q nat
-	q = q.make(len(x))
-	return divWVW(q, 0, x, d)
-}
-
-// random creates a random integer in [0..limit), using the space in z if
-// possible. n is the bit length of limit.
-func (z nat) random(rand *rand.Rand, limit nat, n int) nat {
-	if alias(z, limit) {
-		z = nil // z is an alias for limit - cannot reuse
-	}
-	z = z.make(len(limit))
-
-	bitLengthOfMSW := uint(n % _W)
-	if bitLengthOfMSW == 0 {
-		bitLengthOfMSW = _W
-	}
-	mask := Word((1 << bitLengthOfMSW) - 1)
-
-	for {
-		switch _W {
-		case 32:
-			for i := range z {
-				z[i] = Word(rand.Uint32())
-			}
-		case 64:
-			for i := range z {
-				z[i] = Word(rand.Uint32()) | Word(rand.Uint32())<<32
-			}
-		default:
-			panic("unknown word size")
-		}
-		z[len(limit)-1] &= mask
-		if z.cmp(limit) < 0 {
-			break
-		}
-	}
-
-	return z.norm()
-}
-
-// If m != 0 (i.e., len(m) != 0), expNN sets z to x**y mod m;
-// otherwise it sets z to x**y. The result is the value of z.
-func (z nat) expNN(x, y, m nat) nat {
-	if alias(z, x) || alias(z, y) {
-		// We cannot allow in-place modification of x or y.
-		z = nil
-	}
-
-	// x**y mod 1 == 0
-	if len(m) == 1 && m[0] == 1 {
-		return z.setWord(0)
-	}
-	// m == 0 || m > 1
-
-	// x**0 == 1
-	if len(y) == 0 {
-		return z.setWord(1)
-	}
-	// y > 0
-
-	// x**1 mod m == x mod m
-	if len(y) == 1 && y[0] == 1 && len(m) != 0 {
-		_, z = z.div(z, x, m)
-		return z
-	}
-	// y > 1
-
-	if len(m) != 0 {
-		// We likely end up being as long as the modulus.
-		z = z.make(len(m))
-	}
-	z = z.set(x)
-
-	// If the base is non-trivial and the exponent is large, we use
-	// 4-bit, windowed exponentiation. This involves precomputing 14 values
-	// (x^2...x^15) but then reduces the number of multiply-reduces by a
-	// third. Even for a 32-bit exponent, this reduces the number of
-	// operations. Uses Montgomery method for odd moduli.
-	if x.cmp(natOne) > 0 && len(y) > 1 && len(m) > 0 {
-		if m[0]&1 == 1 {
-			return z.expNNMontgomery(x, y, m)
-		}
-		return z.expNNWindowed(x, y, m)
-	}
-
-	v := y[len(y)-1] // v > 0 because y is normalized and y > 0
-	shift := nlz(v) + 1
-	v <<= shift
-	var q nat
-
-	const mask = 1 << (_W - 1)
-
-	// We walk through the bits of the exponent one by one. Each time we
-	// see a bit, we square, thus doubling the power. If the bit is a one,
-	// we also multiply by x, thus adding one to the power.
-
-	w := _W - int(shift)
-	// zz and r are used to avoid allocating in mul and div as
-	// otherwise the arguments would alias.
-	var zz, r nat
-	for j := 0; j < w; j++ {
-		zz = zz.mul(z, z)
-		zz, z = z, zz
-
-		if v&mask != 0 {
-			zz = zz.mul(z, x)
-			zz, z = z, zz
-		}
-
-		if len(m) != 0 {
-			zz, r = zz.div(r, z, m)
-			zz, r, q, z = q, z, zz, r
-		}
-
-		v <<= 1
-	}
-
-	for i := len(y) - 2; i >= 0; i-- {
-		v = y[i]
-
-		for j := 0; j < _W; j++ {
-			zz = zz.mul(z, z)
-			zz, z = z, zz
-
-			if v&mask != 0 {
-				zz = zz.mul(z, x)
-				zz, z = z, zz
-			}
-
-			if len(m) != 0 {
-				zz, r = zz.div(r, z, m)
-				zz, r, q, z = q, z, zz, r
-			}
-
-			v <<= 1
-		}
-	}
-
-	return z.norm()
-}
-
-// expNNWindowed calculates x**y mod m using a fixed, 4-bit window.
-func (z nat) expNNWindowed(x, y, m nat) nat {
-	// zz and r are used to avoid allocating in mul and div as otherwise
-	// the arguments would alias.
-	var zz, r nat
-
-	const n = 4
-	// powers[i] contains x^i.
-	var powers [1 << n]nat
-	powers[0] = natOne
-	powers[1] = x
-	for i := 2; i < 1<<n; i += 2 {
-		p2, p, p1 := &powers[i/2], &powers[i], &powers[i+1]
-		*p = p.mul(*p2, *p2)
-		zz, r = zz.div(r, *p, m)
-		*p, r = r, *p
-		*p1 = p1.mul(*p, x)
-		zz, r = zz.div(r, *p1, m)
-		*p1, r = r, *p1
-	}
-
-	z = z.setWord(1)
-
-	for i := len(y) - 1; i >= 0; i-- {
-		yi := y[i]
-		for j := 0; j < _W; j += n {
-			if i != len(y)-1 || j != 0 {
-				// Unrolled loop for significant performance
-				// gain. Use go test -bench=".*" in crypto/rsa
-				// to check performance before making changes.
-				zz = zz.mul(z, z)
-				zz, z = z, zz
-				zz, r = zz.div(r, z, m)
-				z, r = r, z
-
-				zz = zz.mul(z, z)
-				zz, z = z, zz
-				zz, r = zz.div(r, z, m)
-				z, r = r, z
-
-				zz = zz.mul(z, z)
-				zz, z = z, zz
-				zz, r = zz.div(r, z, m)
-				z, r = r, z
-
-				zz = zz.mul(z, z)
-				zz, z = z, zz
-				zz, r = zz.div(r, z, m)
-				z, r = r, z
-			}
-
-			zz = zz.mul(z, powers[yi>>(_W-n)])
-			zz, z = z, zz
-			zz, r = zz.div(r, z, m)
-			z, r = r, z
-
-			yi <<= n
-		}
-	}
-
-	return z.norm()
-}
-
-// expNNMontgomery calculates x**y mod m using a fixed, 4-bit window.
-// Uses Montgomery representation.
-func (z nat) expNNMontgomery(x, y, m nat) nat {
-	numWords := len(m)
-
-	// We want the lengths of x and m to be equal.
-	// It is OK if x >= m as long as len(x) == len(m).
-	if len(x) > numWords {
-		_, x = nat(nil).div(nil, x, m)
-		// Note: now len(x) <= numWords, not guaranteed ==.
-	}
-	if len(x) < numWords {
-		rr := make(nat, numWords)
-		copy(rr, x)
-		x = rr
-	}
-
-	// Ideally the precomputations would be performed outside, and reused
-	// k0 = -m**-1 mod 2**_W. Algorithm from: Dumas, J.G. "On Newton–Raphson
-	// Iteration for Multiplicative Inverses Modulo Prime Powers".
-	k0 := 2 - m[0]
-	t := m[0] - 1
-	for i := 1; i < _W; i <<= 1 {
-		t *= t
-		k0 *= (t + 1)
-	}
-	k0 = -k0
-
-	// RR = 2**(2*_W*len(m)) mod m
-	RR := nat(nil).setWord(1)
-	zz := nat(nil).shl(RR, uint(2*numWords*_W))
-	_, RR = RR.div(RR, zz, m)
-	if len(RR) < numWords {
-		zz = zz.make(numWords)
-		copy(zz, RR)
-		RR = zz
-	}
-	// one = 1, with equal length to that of m
-	one := make(nat, numWords)
-	one[0] = 1
-
-	const n = 4
-	// powers[i] contains x^i
-	var powers [1 << n]nat
-	powers[0] = powers[0].montgomery(one, RR, m, k0, numWords)
-	powers[1] = powers[1].montgomery(x, RR, m, k0, numWords)
-	for i := 2; i < 1<<n; i++ {
-		powers[i] = powers[i].montgomery(powers[i-1], powers[1], m, k0, numWords)
-	}
-
-	// initialize z = 1 (Montgomery 1)
-	z = z.make(numWords)
-	copy(z, powers[0])
-
-	zz = zz.make(numWords)
-
-	// same windowed exponent, but with Montgomery multiplications
-	for i := len(y) - 1; i >= 0; i-- {
-		yi := y[i]
-		for j := 0; j < _W; j += n {
-			if i != len(y)-1 || j != 0 {
-				zz = zz.montgomery(z, z, m, k0, numWords)
-				z = z.montgomery(zz, zz, m, k0, numWords)
-				zz = zz.montgomery(z, z, m, k0, numWords)
-				z = z.montgomery(zz, zz, m, k0, numWords)
-			}
-			zz = zz.montgomery(z, powers[yi>>(_W-n)], m, k0, numWords)
-			z, zz = zz, z
-			yi <<= n
-		}
-	}
-	// convert to regular number
-	zz = zz.montgomery(z, one, m, k0, numWords)
-
-	// One last reduction, just in case.
-	// See golang.org/issue/13907.
-	if zz.cmp(m) >= 0 {
-		// Common case is m has high bit set; in that case,
-		// since zz is the same length as m, there can be just
-		// one multiple of m to remove. Just subtract.
-		// We think that the subtract should be sufficient in general,
-		// so do that unconditionally, but double-check,
-		// in case our beliefs are wrong.
-		// The div is not expected to be reached.
-		zz = zz.sub(zz, m)
-		if zz.cmp(m) >= 0 {
-			_, zz = nat(nil).div(nil, zz, m)
-		}
-	}
-
-	return zz.norm()
-}
-
-// bytes writes the value of z into buf using big-endian encoding.
-// len(buf) must be >= len(z)*_S. The value of z is encoded in the
-// slice buf[i:]. The number i of unused bytes at the beginning of
-// buf is returned as result.
-func (z nat) bytes(buf []byte) (i int) {
-	i = len(buf)
-	for _, d := range z {
-		for j := 0; j < _S; j++ {
-			i--
-			buf[i] = byte(d)
-			d >>= 8
-		}
-	}
-
-	for i < len(buf) && buf[i] == 0 {
-		i++
-	}
-
-	return
-}
-
-// setBytes interprets buf as the bytes of a big-endian unsigned
-// integer, sets z to that value, and returns z.
-func (z nat) setBytes(buf []byte) nat {
-	z = z.make((len(buf) + _S - 1) / _S)
-
-	k := 0
-	s := uint(0)
-	var d Word
-	for i := len(buf); i > 0; i-- {
-		d |= Word(buf[i-1]) << s
-		if s += 8; s == _S*8 {
-			z[k] = d
-			k++
-			s = 0
-			d = 0
-		}
-	}
-	if k < len(z) {
-		z[k] = d
-	}
-
-	return z.norm()
-}
-
-// sqrt sets z = ⌊√x⌋
-func (z nat) sqrt(x nat) nat {
-	if x.cmp(natOne) <= 0 {
-		return z.set(x)
-	}
-	if alias(z, x) {
-		z = nil
-	}
-
-	// Start with value known to be too large and repeat "z = ⌊(z + ⌊x/z⌋)/2⌋" until it stops getting smaller.
-	// See Brent and Zimmermann, Modern Computer Arithmetic, Algorithm 1.13 (SqrtInt).
-	// https://members.loria.fr/PZimmermann/mca/pub226.html
-	// If x is one less than a perfect square, the sequence oscillates between the correct z and z+1;
-	// otherwise it converges to the correct z and stays there.
-	var z1, z2 nat
-	z1 = z
-	z1 = z1.setUint64(1)
-	z1 = z1.shl(z1, uint(x.bitLen()/2+1)) // must be ≥ √x
-	for n := 0; ; n++ {
-		z2, _ = z2.div(nil, x, z1)
-		z2 = z2.add(z2, z1)
-		z2 = z2.shr(z2, 1)
-		if z2.cmp(z1) >= 0 {
-			// z1 is answer.
-			// Figure out whether z1 or z2 is currently aliased to z by looking at loop count.
-			if n&1 == 0 {
-				return z1
-			}
-			return z.set(z1)
-		}
-		z1, z2 = z2, z1
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/nat_test.go b/pkg/bootstrap/src/bootstrap/math/big/nat_test.go
deleted file mode 100644
index ddc99f1..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/nat_test.go
+++ /dev/null
@@ -1,654 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/nat_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/nat_test.go:1
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package big
-
-import (
-	"fmt"
-	"runtime"
-	"strings"
-	"testing"
-)
-
-var cmpTests = []struct {
-	x, y nat
-	r    int
-}{
-	{nil, nil, 0},
-	{nil, nat(nil), 0},
-	{nat(nil), nil, 0},
-	{nat(nil), nat(nil), 0},
-	{nat{0}, nat{0}, 0},
-	{nat{0}, nat{1}, -1},
-	{nat{1}, nat{0}, 1},
-	{nat{1}, nat{1}, 0},
-	{nat{0, _M}, nat{1}, 1},
-	{nat{1}, nat{0, _M}, -1},
-	{nat{1, _M}, nat{0, _M}, 1},
-	{nat{0, _M}, nat{1, _M}, -1},
-	{nat{16, 571956, 8794, 68}, nat{837, 9146, 1, 754489}, -1},
-	{nat{34986, 41, 105, 1957}, nat{56, 7458, 104, 1957}, 1},
-}
-
-func TestCmp(t *testing.T) {
-	for i, a := range cmpTests {
-		r := a.x.cmp(a.y)
-		if r != a.r {
-			t.Errorf("#%d got r = %v; want %v", i, r, a.r)
-		}
-	}
-}
-
-type funNN func(z, x, y nat) nat
-type argNN struct {
-	z, x, y nat
-}
-
-var sumNN = []argNN{
-	{},
-	{nat{1}, nil, nat{1}},
-	{nat{1111111110}, nat{123456789}, nat{987654321}},
-	{nat{0, 0, 0, 1}, nil, nat{0, 0, 0, 1}},
-	{nat{0, 0, 0, 1111111110}, nat{0, 0, 0, 123456789}, nat{0, 0, 0, 987654321}},
-	{nat{0, 0, 0, 1}, nat{0, 0, _M}, nat{0, 0, 1}},
-}
-
-var prodNN = []argNN{
-	{},
-	{nil, nil, nil},
-	{nil, nat{991}, nil},
-	{nat{991}, nat{991}, nat{1}},
-	{nat{991 * 991}, nat{991}, nat{991}},
-	{nat{0, 0, 991 * 991}, nat{0, 991}, nat{0, 991}},
-	{nat{1 * 991, 2 * 991, 3 * 991, 4 * 991}, nat{1, 2, 3, 4}, nat{991}},
-	{nat{4, 11, 20, 30, 20, 11, 4}, nat{1, 2, 3, 4}, nat{4, 3, 2, 1}},
-	// 3^100 * 3^28 = 3^128
-	{
-		natFromString("11790184577738583171520872861412518665678211592275841109096961"),
-		natFromString("515377520732011331036461129765621272702107522001"),
-		natFromString("22876792454961"),
-	},
-	// z = 111....1 (70000 digits)
-	// x = 10^(99*700) + ... + 10^1400 + 10^700 + 1
-	// y = 111....1 (700 digits, larger than Karatsuba threshold on 32-bit and 64-bit)
-	{
-		natFromString(strings.Repeat("1", 70000)),
-		natFromString("1" + strings.Repeat(strings.Repeat("0", 699)+"1", 99)),
-		natFromString(strings.Repeat("1", 700)),
-	},
-	// z = 111....1 (20000 digits)
-	// x = 10^10000 + 1
-	// y = 111....1 (10000 digits)
-	{
-		natFromString(strings.Repeat("1", 20000)),
-		natFromString("1" + strings.Repeat("0", 9999) + "1"),
-		natFromString(strings.Repeat("1", 10000)),
-	},
-}
-
-func natFromString(s string) nat {
-	x, _, _, err := nat(nil).scan(strings.NewReader(s), 0, false)
-	if err != nil {
-		panic(err)
-	}
-	return x
-}
-
-func TestSet(t *testing.T) {
-	for _, a := range sumNN {
-		z := nat(nil).set(a.z)
-		if z.cmp(a.z) != 0 {
-			t.Errorf("got z = %v; want %v", z, a.z)
-		}
-	}
-}
-
-func testFunNN(t *testing.T, msg string, f funNN, a argNN) {
-	z := f(nil, a.x, a.y)
-	if z.cmp(a.z) != 0 {
-		t.Errorf("%s%+v\n\tgot z = %v; want %v", msg, a, z, a.z)
-	}
-}
-
-func TestFunNN(t *testing.T) {
-	for _, a := range sumNN {
-		arg := a
-		testFunNN(t, "add", nat.add, arg)
-
-		arg = argNN{a.z, a.y, a.x}
-		testFunNN(t, "add symmetric", nat.add, arg)
-
-		arg = argNN{a.x, a.z, a.y}
-		testFunNN(t, "sub", nat.sub, arg)
-
-		arg = argNN{a.y, a.z, a.x}
-		testFunNN(t, "sub symmetric", nat.sub, arg)
-	}
-
-	for _, a := range prodNN {
-		arg := a
-		testFunNN(t, "mul", nat.mul, arg)
-
-		arg = argNN{a.z, a.y, a.x}
-		testFunNN(t, "mul symmetric", nat.mul, arg)
-	}
-}
-
-var mulRangesN = []struct {
-	a, b uint64
-	prod string
-}{
-	{0, 0, "0"},
-	{1, 1, "1"},
-	{1, 2, "2"},
-	{1, 3, "6"},
-	{10, 10, "10"},
-	{0, 100, "0"},
-	{0, 1e9, "0"},
-	{1, 0, "1"},                    // empty range
-	{100, 1, "1"},                  // empty range
-	{1, 10, "3628800"},             // 10!
-	{1, 20, "2432902008176640000"}, // 20!
-	{1, 100,
-		"933262154439441526816992388562667004907159682643816214685929" +
-			"638952175999932299156089414639761565182862536979208272237582" +
-			"51185210916864000000000000000000000000", // 100!
-	},
-}
-
-func TestMulRangeN(t *testing.T) {
-	for i, r := range mulRangesN {
-		prod := string(nat(nil).mulRange(r.a, r.b).utoa(10))
-		if prod != r.prod {
-			t.Errorf("#%d: got %s; want %s", i, prod, r.prod)
-		}
-	}
-}
-
-// allocBytes returns the number of bytes allocated by invoking f.
-func allocBytes(f func()) uint64 {
-	var stats runtime.MemStats
-	runtime.ReadMemStats(&stats)
-	t := stats.TotalAlloc
-	f()
-	runtime.ReadMemStats(&stats)
-	return stats.TotalAlloc - t
-}
-
-// TestMulUnbalanced tests that multiplying numbers of different lengths
-// does not cause deep recursion and in turn allocate too much memory.
-// Test case for issue 3807.
-func TestMulUnbalanced(t *testing.T) {
-	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
-	x := rndNat(50000)
-	y := rndNat(40)
-	allocSize := allocBytes(func() {
-		nat(nil).mul(x, y)
-	})
-	inputSize := uint64(len(x)+len(y)) * _S
-	if ratio := allocSize / uint64(inputSize); ratio > 10 {
-		t.Errorf("multiplication uses too much memory (%d > %d times the size of inputs)", allocSize, ratio)
-	}
-}
-
-func rndNat(n int) nat {
-	return nat(rndV(n)).norm()
-}
-
-func BenchmarkMul(b *testing.B) {
-	mulx := rndNat(1e4)
-	muly := rndNat(1e4)
-	b.ResetTimer()
-	for i := 0; i < b.N; i++ {
-		var z nat
-		z.mul(mulx, muly)
-	}
-}
-
-func TestNLZ(t *testing.T) {
-	var x Word = _B >> 1
-	for i := 0; i <= _W; i++ {
-		if int(nlz(x)) != i {
-			t.Errorf("failed at %x: got %d want %d", x, nlz(x), i)
-		}
-		x >>= 1
-	}
-}
-
-type shiftTest struct {
-	in    nat
-	shift uint
-	out   nat
-}
-
-var leftShiftTests = []shiftTest{
-	{nil, 0, nil},
-	{nil, 1, nil},
-	{natOne, 0, natOne},
-	{natOne, 1, natTwo},
-	{nat{1 << (_W - 1)}, 1, nat{0}},
-	{nat{1 << (_W - 1), 0}, 1, nat{0, 1}},
-}
-
-func TestShiftLeft(t *testing.T) {
-	for i, test := range leftShiftTests {
-		var z nat
-		z = z.shl(test.in, test.shift)
-		for j, d := range test.out {
-			if j >= len(z) || z[j] != d {
-				t.Errorf("#%d: got: %v want: %v", i, z, test.out)
-				break
-			}
-		}
-	}
-}
-
-var rightShiftTests = []shiftTest{
-	{nil, 0, nil},
-	{nil, 1, nil},
-	{natOne, 0, natOne},
-	{natOne, 1, nil},
-	{natTwo, 1, natOne},
-	{nat{0, 1}, 1, nat{1 << (_W - 1)}},
-	{nat{2, 1, 1}, 1, nat{1<<(_W-1) + 1, 1 << (_W - 1)}},
-}
-
-func TestShiftRight(t *testing.T) {
-	for i, test := range rightShiftTests {
-		var z nat
-		z = z.shr(test.in, test.shift)
-		for j, d := range test.out {
-			if j >= len(z) || z[j] != d {
-				t.Errorf("#%d: got: %v want: %v", i, z, test.out)
-				break
-			}
-		}
-	}
-}
-
-type modWTest struct {
-	in       string
-	dividend string
-	out      string
-}
-
-var modWTests32 = []modWTest{
-	{"23492635982634928349238759823742", "252341", "220170"},
-}
-
-var modWTests64 = []modWTest{
-	{"6527895462947293856291561095690465243862946", "524326975699234", "375066989628668"},
-}
-
-func runModWTests(t *testing.T, tests []modWTest) {
-	for i, test := range tests {
-		in, _ := new(Int).SetString(test.in, 10)
-		d, _ := new(Int).SetString(test.dividend, 10)
-		out, _ := new(Int).SetString(test.out, 10)
-
-		r := in.abs.modW(d.abs[0])
-		if r != out.abs[0] {
-			t.Errorf("#%d failed: got %d want %s", i, r, out)
-		}
-	}
-}
-
-func TestModW(t *testing.T) {
-	if _W >= 32 {
-		runModWTests(t, modWTests32)
-	}
-	if _W >= 64 {
-		runModWTests(t, modWTests64)
-	}
-}
-
-func TestTrailingZeroBits(t *testing.T) {
-	// test 0 case explicitly
-	if n := trailingZeroBits(0); n != 0 {
-		t.Errorf("got trailingZeroBits(0) = %d; want 0", n)
-	}
-
-	x := Word(1)
-	for i := uint(0); i < _W; i++ {
-		n := trailingZeroBits(x)
-		if n != i {
-			t.Errorf("got trailingZeroBits(%#x) = %d; want %d", x, n, i%_W)
-		}
-		x <<= 1
-	}
-
-	// test 0 case explicitly
-	if n := nat(nil).trailingZeroBits(); n != 0 {
-		t.Errorf("got nat(nil).trailingZeroBits() = %d; want 0", n)
-	}
-
-	y := nat(nil).set(natOne)
-	for i := uint(0); i <= 3*_W; i++ {
-		n := y.trailingZeroBits()
-		if n != i {
-			t.Errorf("got 0x%s.trailingZeroBits() = %d; want %d", y.utoa(16), n, i)
-		}
-		y = y.shl(y, 1)
-	}
-}
-
-var montgomeryTests = []struct {
-	x, y, m      string
-	k0           uint64
-	out32, out64 string
-}{
-	{
-		"0xffffffffffffffffffffffffffffffffffffffffffffffffe",
-		"0xffffffffffffffffffffffffffffffffffffffffffffffffe",
-		"0xfffffffffffffffffffffffffffffffffffffffffffffffff",
-		1,
-		"0x1000000000000000000000000000000000000000000",
-		"0x10000000000000000000000000000000000",
-	},
-	{
-		"0x000000000ffffff5",
-		"0x000000000ffffff0",
-		"0x0000000010000001",
-		0xff0000000fffffff,
-		"0x000000000bfffff4",
-		"0x0000000003400001",
-	},
-	{
-		"0x0000000080000000",
-		"0x00000000ffffffff",
-		"0x1000000000000001",
-		0xfffffffffffffff,
-		"0x0800000008000001",
-		"0x0800000008000001",
-	},
-	{
-		"0x0000000080000000",
-		"0x0000000080000000",
-		"0xffffffff00000001",
-		0xfffffffeffffffff,
-		"0xbfffffff40000001",
-		"0xbfffffff40000001",
-	},
-	{
-		"0x0000000080000000",
-		"0x0000000080000000",
-		"0x00ffffff00000001",
-		0xfffffeffffffff,
-		"0xbfffff40000001",
-		"0xbfffff40000001",
-	},
-	{
-		"0x0000000080000000",
-		"0x0000000080000000",
-		"0x0000ffff00000001",
-		0xfffeffffffff,
-		"0xbfff40000001",
-		"0xbfff40000001",
-	},
-	{
-		"0x3321ffffffffffffffffffffffffffff00000000000022222623333333332bbbb888c0",
-		"0x3321ffffffffffffffffffffffffffff00000000000022222623333333332bbbb888c0",
-		"0x33377fffffffffffffffffffffffffffffffffffffffffffff0000000000022222eee1",
-		0xdecc8f1249812adf,
-		"0x04eb0e11d72329dc0915f86784820fc403275bf2f6620a20e0dd344c5cd0875e50deb5",
-		"0x0d7144739a7d8e11d72329dc0915f86784820fc403275bf2f61ed96f35dd34dbb3d6a0",
-	},
-	{
-		"0x10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffff00000000000022222223333333333444444444",
-		"0x10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffff999999999999999aaabbbbbbbbcccccccccccc",
-		"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff33377fffffffffffffffffffffffffffffffffffffffffffff0000000000022222eee1",
-		0xdecc8f1249812adf,
-		"0x5c0d52f451aec609b15da8e5e5626c4eaa88723bdeac9d25ca9b961269400410ca208a16af9c2fb07d7a11c7772cba02c22f9711078d51a3797eb18e691295293284d988e349fa6deba46b25a4ecd9f715",
-		"0x92fcad4b5c0d52f451aec609b15da8e5e5626c4eaa88723bdeac9d25ca9b961269400410ca208a16af9c2fb07d799c32fe2f3cc5422f9711078d51a3797eb18e691295293284d8f5e69caf6decddfe1df6",
-	},
-}
-
-func TestMontgomery(t *testing.T) {
-	one := NewInt(1)
-	_B := new(Int).Lsh(one, _W)
-	for i, test := range montgomeryTests {
-		x := natFromString(test.x)
-		y := natFromString(test.y)
-		m := natFromString(test.m)
-		for len(x) < len(m) {
-			x = append(x, 0)
-		}
-		for len(y) < len(m) {
-			y = append(y, 0)
-		}
-
-		if x.cmp(m) > 0 {
-			_, r := nat(nil).div(nil, x, m)
-			t.Errorf("#%d: x > m (0x%s > 0x%s; use 0x%s)", i, x.utoa(16), m.utoa(16), r.utoa(16))
-		}
-		if y.cmp(m) > 0 {
-			_, r := nat(nil).div(nil, x, m)
-			t.Errorf("#%d: y > m (0x%s > 0x%s; use 0x%s)", i, y.utoa(16), m.utoa(16), r.utoa(16))
-		}
-
-		var out nat
-		if _W == 32 {
-			out = natFromString(test.out32)
-		} else {
-			out = natFromString(test.out64)
-		}
-
-		// t.Logf("#%d: len=%d\n", i, len(m))
-
-		// check output in table
-		xi := &Int{abs: x}
-		yi := &Int{abs: y}
-		mi := &Int{abs: m}
-		p := new(Int).Mod(new(Int).Mul(xi, new(Int).Mul(yi, new(Int).ModInverse(new(Int).Lsh(one, uint(len(m))*_W), mi))), mi)
-		if out.cmp(p.abs.norm()) != 0 {
-			t.Errorf("#%d: out in table=0x%s, computed=0x%s", i, out.utoa(16), p.abs.norm().utoa(16))
-		}
-
-		// check k0 in table
-		k := new(Int).Mod(&Int{abs: m}, _B)
-		k = new(Int).Sub(_B, k)
-		k = new(Int).Mod(k, _B)
-		k0 := Word(new(Int).ModInverse(k, _B).Uint64())
-		if k0 != Word(test.k0) {
-			t.Errorf("#%d: k0 in table=%#x, computed=%#x\n", i, test.k0, k0)
-		}
-
-		// check montgomery with correct k0 produces correct output
-		z := nat(nil).montgomery(x, y, m, k0, len(m))
-		z = z.norm()
-		if z.cmp(out) != 0 {
-			t.Errorf("#%d: got 0x%s want 0x%s", i, z.utoa(16), out.utoa(16))
-		}
-	}
-}
-
-var expNNTests = []struct {
-	x, y, m string
-	out     string
-}{
-	{"0", "0", "0", "1"},
-	{"0", "0", "1", "0"},
-	{"1", "1", "1", "0"},
-	{"2", "1", "1", "0"},
-	{"2", "2", "1", "0"},
-	{"10", "100000000000", "1", "0"},
-	{"0x8000000000000000", "2", "", "0x40000000000000000000000000000000"},
-	{"0x8000000000000000", "2", "6719", "4944"},
-	{"0x8000000000000000", "3", "6719", "5447"},
-	{"0x8000000000000000", "1000", "6719", "1603"},
-	{"0x8000000000000000", "1000000", "6719", "3199"},
-	{
-		"2938462938472983472983659726349017249287491026512746239764525612965293865296239471239874193284792387498274256129746192347",
-		"298472983472983471903246121093472394872319615612417471234712061",
-		"29834729834729834729347290846729561262544958723956495615629569234729836259263598127342374289365912465901365498236492183464",
-		"23537740700184054162508175125554701713153216681790245129157191391322321508055833908509185839069455749219131480588829346291",
-	},
-	{
-		"11521922904531591643048817447554701904414021819823889996244743037378330903763518501116638828335352811871131385129455853417360623007349090150042001944696604737499160174391019030572483602867266711107136838523916077674888297896995042968746762200926853379",
-		"426343618817810911523",
-		"444747819283133684179",
-		"42",
-	},
-}
-
-func TestExpNN(t *testing.T) {
-	for i, test := range expNNTests {
-		x := natFromString(test.x)
-		y := natFromString(test.y)
-		out := natFromString(test.out)
-
-		var m nat
-		if len(test.m) > 0 {
-			m = natFromString(test.m)
-		}
-
-		z := nat(nil).expNN(x, y, m)
-		if z.cmp(out) != 0 {
-			t.Errorf("#%d got %s want %s", i, z.utoa(10), out.utoa(10))
-		}
-	}
-}
-
-func BenchmarkExp3Power(b *testing.B) {
-	const x = 3
-	for _, y := range []Word{
-		0x10, 0x40, 0x100, 0x400, 0x1000, 0x4000, 0x10000, 0x40000, 0x100000, 0x400000,
-	} {
-		b.Run(fmt.Sprintf("%#x", y), func(b *testing.B) {
-			var z nat
-			for i := 0; i < b.N; i++ {
-				z.expWW(x, y)
-			}
-		})
-	}
-}
-
-func fibo(n int) nat {
-	switch n {
-	case 0:
-		return nil
-	case 1:
-		return nat{1}
-	}
-	f0 := fibo(0)
-	f1 := fibo(1)
-	var f2 nat
-	for i := 1; i < n; i++ {
-		f2 = f2.add(f0, f1)
-		f0, f1, f2 = f1, f2, f0
-	}
-	return f1
-}
-
-var fiboNums = []string{
-	"0",
-	"55",
-	"6765",
-	"832040",
-	"102334155",
-	"12586269025",
-	"1548008755920",
-	"190392490709135",
-	"23416728348467685",
-	"2880067194370816120",
-	"354224848179261915075",
-}
-
-func TestFibo(t *testing.T) {
-	for i, want := range fiboNums {
-		n := i * 10
-		got := string(fibo(n).utoa(10))
-		if got != want {
-			t.Errorf("fibo(%d) failed: got %s want %s", n, got, want)
-		}
-	}
-}
-
-func BenchmarkFibo(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		fibo(1e0)
-		fibo(1e1)
-		fibo(1e2)
-		fibo(1e3)
-		fibo(1e4)
-		fibo(1e5)
-	}
-}
-
-var bitTests = []struct {
-	x    string
-	i    uint
-	want uint
-}{
-	{"0", 0, 0},
-	{"0", 1, 0},
-	{"0", 1000, 0},
-
-	{"0x1", 0, 1},
-	{"0x10", 0, 0},
-	{"0x10", 3, 0},
-	{"0x10", 4, 1},
-	{"0x10", 5, 0},
-
-	{"0x8000000000000000", 62, 0},
-	{"0x8000000000000000", 63, 1},
-	{"0x8000000000000000", 64, 0},
-
-	{"0x3" + strings.Repeat("0", 32), 127, 0},
-	{"0x3" + strings.Repeat("0", 32), 128, 1},
-	{"0x3" + strings.Repeat("0", 32), 129, 1},
-	{"0x3" + strings.Repeat("0", 32), 130, 0},
-}
-
-func TestBit(t *testing.T) {
-	for i, test := range bitTests {
-		x := natFromString(test.x)
-		if got := x.bit(test.i); got != test.want {
-			t.Errorf("#%d: %s.bit(%d) = %v; want %v", i, test.x, test.i, got, test.want)
-		}
-	}
-}
-
-var stickyTests = []struct {
-	x    string
-	i    uint
-	want uint
-}{
-	{"0", 0, 0},
-	{"0", 1, 0},
-	{"0", 1000, 0},
-
-	{"0x1", 0, 0},
-	{"0x1", 1, 1},
-
-	{"0x1350", 0, 0},
-	{"0x1350", 4, 0},
-	{"0x1350", 5, 1},
-
-	{"0x8000000000000000", 63, 0},
-	{"0x8000000000000000", 64, 1},
-
-	{"0x1" + strings.Repeat("0", 100), 400, 0},
-	{"0x1" + strings.Repeat("0", 100), 401, 1},
-}
-
-func TestSticky(t *testing.T) {
-	for i, test := range stickyTests {
-		x := natFromString(test.x)
-		if got := x.sticky(test.i); got != test.want {
-			t.Errorf("#%d: %s.sticky(%d) = %v; want %v", i, test.x, test.i, got, test.want)
-		}
-		if test.want == 1 {
-			// all subsequent i's should also return 1
-			for d := uint(1); d <= 3; d++ {
-				if got := x.sticky(test.i + d); got != 1 {
-					t.Errorf("#%d: %s.sticky(%d) = %v; want %v", i, test.x, test.i+d, got, 1)
-				}
-			}
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/natconv.go b/pkg/bootstrap/src/bootstrap/math/big/natconv.go
deleted file mode 100644
index c15a11a..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/natconv.go
+++ /dev/null
@@ -1,495 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/natconv.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/natconv.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements nat-to-string conversion functions.
-
-package big
-
-import (
-	"errors"
-	"fmt"
-	"io"
-	"math"
-	"sync"
-)
-
-const digits = "0123456789abcdefghijklmnopqrstuvwxyz"
-
-// Note: MaxBase = len(digits), but it must remain a rune constant
-//       for API compatibility.
-
-// MaxBase is the largest number base accepted for string conversions.
-const MaxBase = 'z' - 'a' + 10 + 1
-
-// maxPow returns (b**n, n) such that b**n is the largest power b**n <= _M.
-// For instance maxPow(10) == (1e19, 19) for 19 decimal digits in a 64bit Word.
-// In other words, at most n digits in base b fit into a Word.
-// TODO(gri) replace this with a table, generated at build time.
-func maxPow(b Word) (p Word, n int) {
-	p, n = b, 1 // assuming b <= _M
-	for max := _M / b; p <= max; {
-		// p == b**n && p <= max
-		p *= b
-		n++
-	}
-	// p == b**n && p <= _M
-	return
-}
-
-// pow returns x**n for n > 0, and 1 otherwise.
-func pow(x Word, n int) (p Word) {
-	// n == sum of bi * 2**i, for 0 <= i < imax, and bi is 0 or 1
-	// thus x**n == product of x**(2**i) for all i where bi == 1
-	// (Russian Peasant Method for exponentiation)
-	p = 1
-	for n > 0 {
-		if n&1 != 0 {
-			p *= x
-		}
-		x *= x
-		n >>= 1
-	}
-	return
-}
-
-// scan scans the number corresponding to the longest possible prefix
-// from r representing an unsigned number in a given conversion base.
-// It returns the corresponding natural number res, the actual base b,
-// a digit count, and a read or syntax error err, if any.
-//
-//	number   = [ prefix ] mantissa .
-//	prefix   = "0" [ "x" | "X" | "b" | "B" ] .
-//      mantissa = digits | digits "." [ digits ] | "." digits .
-//	digits   = digit { digit } .
-//	digit    = "0" ... "9" | "a" ... "z" | "A" ... "Z" .
-//
-// Unless fracOk is set, the base argument must be 0 or a value between
-// 2 and MaxBase. If fracOk is set, the base argument must be one of
-// 0, 2, 10, or 16. Providing an invalid base argument leads to a run-
-// time panic.
-//
-// For base 0, the number prefix determines the actual base: A prefix of
-// ``0x'' or ``0X'' selects base 16; if fracOk is not set, the ``0'' prefix
-// selects base 8, and a ``0b'' or ``0B'' prefix selects base 2. Otherwise
-// the selected base is 10 and no prefix is accepted.
-//
-// If fracOk is set, an octal prefix is ignored (a leading ``0'' simply
-// stands for a zero digit), and a period followed by a fractional part
-// is permitted. The result value is computed as if there were no period
-// present; and the count value is used to determine the fractional part.
-//
-// A result digit count > 0 corresponds to the number of (non-prefix) digits
-// parsed. A digit count <= 0 indicates the presence of a period (if fracOk
-// is set, only), and -count is the number of fractional digits found.
-// In this case, the actual value of the scanned number is res * b**count.
-//
-func (z nat) scan(r io.ByteScanner, base int, fracOk bool) (res nat, b, count int, err error) {
-	// reject illegal bases
-	baseOk := base == 0 ||
-		!fracOk && 2 <= base && base <= MaxBase ||
-		fracOk && (base == 2 || base == 10 || base == 16)
-	if !baseOk {
-		panic(fmt.Sprintf("illegal number base %d", base))
-	}
-
-	// one char look-ahead
-	ch, err := r.ReadByte()
-	if err != nil {
-		return
-	}
-
-	// determine actual base
-	b = base
-	if base == 0 {
-		// actual base is 10 unless there's a base prefix
-		b = 10
-		if ch == '0' {
-			count = 1
-			switch ch, err = r.ReadByte(); err {
-			case nil:
-				// possibly one of 0x, 0X, 0b, 0B
-				if !fracOk {
-					b = 8
-				}
-				switch ch {
-				case 'x', 'X':
-					b = 16
-				case 'b', 'B':
-					b = 2
-				}
-				switch b {
-				case 16, 2:
-					count = 0 // prefix is not counted
-					if ch, err = r.ReadByte(); err != nil {
-						// io.EOF is also an error in this case
-						return
-					}
-				case 8:
-					count = 0 // prefix is not counted
-				}
-			case io.EOF:
-				// input is "0"
-				res = z[:0]
-				err = nil
-				return
-			default:
-				// read error
-				return
-			}
-		}
-	}
-
-	// convert string
-	// Algorithm: Collect digits in groups of at most n digits in di
-	// and then use mulAddWW for every such group to add them to the
-	// result.
-	z = z[:0]
-	b1 := Word(b)
-	bn, n := maxPow(b1) // at most n digits in base b1 fit into Word
-	di := Word(0)       // 0 <= di < b1**i < bn
-	i := 0              // 0 <= i < n
-	dp := -1            // position of decimal point
-	for {
-		if fracOk && ch == '.' {
-			fracOk = false
-			dp = count
-			// advance
-			if ch, err = r.ReadByte(); err != nil {
-				if err == io.EOF {
-					err = nil
-					break
-				}
-				return
-			}
-		}
-
-		// convert rune into digit value d1
-		var d1 Word
-		switch {
-		case '0' <= ch && ch <= '9':
-			d1 = Word(ch - '0')
-		case 'a' <= ch && ch <= 'z':
-			d1 = Word(ch - 'a' + 10)
-		case 'A' <= ch && ch <= 'Z':
-			d1 = Word(ch - 'A' + 10)
-		default:
-			d1 = MaxBase + 1
-		}
-		if d1 >= b1 {
-			r.UnreadByte() // ch does not belong to number anymore
-			break
-		}
-		count++
-
-		// collect d1 in di
-		di = di*b1 + d1
-		i++
-
-		// if di is "full", add it to the result
-		if i == n {
-			z = z.mulAddWW(z, bn, di)
-			di = 0
-			i = 0
-		}
-
-		// advance
-		if ch, err = r.ReadByte(); err != nil {
-			if err == io.EOF {
-				err = nil
-				break
-			}
-			return
-		}
-	}
-
-	if count == 0 {
-		// no digits found
-		switch {
-		case base == 0 && b == 8:
-			// there was only the octal prefix 0 (possibly followed by digits > 7);
-			// count as one digit and return base 10, not 8
-			count = 1
-			b = 10
-		case base != 0 || b != 8:
-			// there was neither a mantissa digit nor the octal prefix 0
-			err = errors.New("syntax error scanning number")
-		}
-		return
-	}
-	// count > 0
-
-	// add remaining digits to result
-	if i > 0 {
-		z = z.mulAddWW(z, pow(b1, i), di)
-	}
-	res = z.norm()
-
-	// adjust for fraction, if any
-	if dp >= 0 {
-		// 0 <= dp <= count > 0
-		count = dp - count
-	}
-
-	return
-}
-
-// utoa converts x to an ASCII representation in the given base;
-// base must be between 2 and MaxBase, inclusive.
-func (x nat) utoa(base int) []byte {
-	return x.itoa(false, base)
-}
-
-// itoa is like utoa but it prepends a '-' if neg && x != 0.
-func (x nat) itoa(neg bool, base int) []byte {
-	if base < 2 || base > MaxBase {
-		panic("invalid base")
-	}
-
-	// x == 0
-	if len(x) == 0 {
-		return []byte("0")
-	}
-	// len(x) > 0
-
-	// allocate buffer for conversion
-	i := int(float64(x.bitLen())/math.Log2(float64(base))) + 1 // off by 1 at most
-	if neg {
-		i++
-	}
-	s := make([]byte, i)
-
-	// convert power of two and non power of two bases separately
-	if b := Word(base); b == b&-b {
-		// shift is base b digit size in bits
-		shift := trailingZeroBits(b) // shift > 0 because b >= 2
-		mask := Word(1<<shift - 1)
-		w := x[0]         // current word
-		nbits := uint(_W) // number of unprocessed bits in w
-
-		// convert less-significant words (include leading zeros)
-		for k := 1; k < len(x); k++ {
-			// convert full digits
-			for nbits >= shift {
-				i--
-				s[i] = digits[w&mask]
-				w >>= shift
-				nbits -= shift
-			}
-
-			// convert any partial leading digit and advance to next word
-			if nbits == 0 {
-				// no partial digit remaining, just advance
-				w = x[k]
-				nbits = _W
-			} else {
-				// partial digit in current word w (== x[k-1]) and next word x[k]
-				w |= x[k] << nbits
-				i--
-				s[i] = digits[w&mask]
-
-				// advance
-				w = x[k] >> (shift - nbits)
-				nbits = _W - (shift - nbits)
-			}
-		}
-
-		// convert digits of most-significant word w (omit leading zeros)
-		for w != 0 {
-			i--
-			s[i] = digits[w&mask]
-			w >>= shift
-		}
-
-	} else {
-		bb, ndigits := maxPow(b)
-
-		// construct table of successive squares of bb*leafSize to use in subdivisions
-		// result (table != nil) <=> (len(x) > leafSize > 0)
-		table := divisors(len(x), b, ndigits, bb)
-
-		// preserve x, create local copy for use by convertWords
-		q := nat(nil).set(x)
-
-		// convert q to string s in base b
-		q.convertWords(s, b, ndigits, bb, table)
-
-		// strip leading zeros
-		// (x != 0; thus s must contain at least one non-zero digit
-		// and the loop will terminate)
-		i = 0
-		for s[i] == '0' {
-			i++
-		}
-	}
-
-	if neg {
-		i--
-		s[i] = '-'
-	}
-
-	return s[i:]
-}
-
-// Convert words of q to base b digits in s. If q is large, it is recursively "split in half"
-// by nat/nat division using tabulated divisors. Otherwise, it is converted iteratively using
-// repeated nat/Word division.
-//
-// The iterative method processes n Words by n divW() calls, each of which visits every Word in the
-// incrementally shortened q for a total of n + (n-1) + (n-2) ... + 2 + 1, or n(n+1)/2 divW()'s.
-// Recursive conversion divides q by its approximate square root, yielding two parts, each half
-// the size of q. Using the iterative method on both halves means 2 * (n/2)(n/2 + 1)/2 divW()'s
-// plus the expensive long div(). Asymptotically, the ratio is favorable at 1/2 the divW()'s, and
-// is made better by splitting the subblocks recursively. Best is to split blocks until one more
-// split would take longer (because of the nat/nat div()) than the twice as many divW()'s of the
-// iterative approach. This threshold is represented by leafSize. Benchmarking of leafSize in the
-// range 2..64 shows that values of 8 and 16 work well, with a 4x speedup at medium lengths and
-// ~30x for 20000 digits. Use nat_test.go's BenchmarkLeafSize tests to optimize leafSize for
-// specific hardware.
-//
-func (q nat) convertWords(s []byte, b Word, ndigits int, bb Word, table []divisor) {
-	// split larger blocks recursively
-	if table != nil {
-		// len(q) > leafSize > 0
-		var r nat
-		index := len(table) - 1
-		for len(q) > leafSize {
-			// find divisor close to sqrt(q) if possible, but in any case < q
-			maxLength := q.bitLen()     // ~= log2 q, or at of least largest possible q of this bit length
-			minLength := maxLength >> 1 // ~= log2 sqrt(q)
-			for index > 0 && table[index-1].nbits > minLength {
-				index-- // desired
-			}
-			if table[index].nbits >= maxLength && table[index].bbb.cmp(q) >= 0 {
-				index--
-				if index < 0 {
-					panic("internal inconsistency")
-				}
-			}
-
-			// split q into the two digit number (q'*bbb + r) to form independent subblocks
-			q, r = q.div(r, q, table[index].bbb)
-
-			// convert subblocks and collect results in s[:h] and s[h:]
-			h := len(s) - table[index].ndigits
-			r.convertWords(s[h:], b, ndigits, bb, table[0:index])
-			s = s[:h] // == q.convertWords(s, b, ndigits, bb, table[0:index+1])
-		}
-	}
-
-	// having split any large blocks now process the remaining (small) block iteratively
-	i := len(s)
-	var r Word
-	if b == 10 {
-		// hard-coding for 10 here speeds this up by 1.25x (allows for / and % by constants)
-		for len(q) > 0 {
-			// extract least significant, base bb "digit"
-			q, r = q.divW(q, bb)
-			for j := 0; j < ndigits && i > 0; j++ {
-				i--
-				// avoid % computation since r%10 == r - int(r/10)*10;
-				// this appears to be faster for BenchmarkString10000Base10
-				// and smaller strings (but a bit slower for larger ones)
-				t := r / 10
-				s[i] = '0' + byte(r-t*10)
-				r = t
-			}
-		}
-	} else {
-		for len(q) > 0 {
-			// extract least significant, base bb "digit"
-			q, r = q.divW(q, bb)
-			for j := 0; j < ndigits && i > 0; j++ {
-				i--
-				s[i] = digits[r%b]
-				r /= b
-			}
-		}
-	}
-
-	// prepend high-order zeros
-	for i > 0 { // while need more leading zeros
-		i--
-		s[i] = '0'
-	}
-}
-
-// Split blocks greater than leafSize Words (or set to 0 to disable recursive conversion)
-// Benchmark and configure leafSize using: go test -bench="Leaf"
-//   8 and 16 effective on 3.0 GHz Xeon "Clovertown" CPU (128 byte cache lines)
-//   8 and 16 effective on 2.66 GHz Core 2 Duo "Penryn" CPU
-var leafSize int = 8 // number of Word-size binary values treat as a monolithic block
-
-type divisor struct {
-	bbb     nat // divisor
-	nbits   int // bit length of divisor (discounting leading zeros) ~= log2(bbb)
-	ndigits int // digit length of divisor in terms of output base digits
-}
-
-var cacheBase10 struct {
-	sync.Mutex
-	table [64]divisor // cached divisors for base 10
-}
-
-// expWW computes x**y
-func (z nat) expWW(x, y Word) nat {
-	return z.expNN(nat(nil).setWord(x), nat(nil).setWord(y), nil)
-}
-
-// construct table of powers of bb*leafSize to use in subdivisions
-func divisors(m int, b Word, ndigits int, bb Word) []divisor {
-	// only compute table when recursive conversion is enabled and x is large
-	if leafSize == 0 || m <= leafSize {
-		return nil
-	}
-
-	// determine k where (bb**leafSize)**(2**k) >= sqrt(x)
-	k := 1
-	for words := leafSize; words < m>>1 && k < len(cacheBase10.table); words <<= 1 {
-		k++
-	}
-
-	// reuse and extend existing table of divisors or create new table as appropriate
-	var table []divisor // for b == 10, table overlaps with cacheBase10.table
-	if b == 10 {
-		cacheBase10.Lock()
-		table = cacheBase10.table[0:k] // reuse old table for this conversion
-	} else {
-		table = make([]divisor, k) // create new table for this conversion
-	}
-
-	// extend table
-	if table[k-1].ndigits == 0 {
-		// add new entries as needed
-		var larger nat
-		for i := 0; i < k; i++ {
-			if table[i].ndigits == 0 {
-				if i == 0 {
-					table[0].bbb = nat(nil).expWW(bb, Word(leafSize))
-					table[0].ndigits = ndigits * leafSize
-				} else {
-					table[i].bbb = nat(nil).mul(table[i-1].bbb, table[i-1].bbb)
-					table[i].ndigits = 2 * table[i-1].ndigits
-				}
-
-				// optimization: exploit aggregated extra bits in macro blocks
-				larger = nat(nil).set(table[i].bbb)
-				for mulAddVWW(larger, larger, b, 0) == 0 {
-					table[i].bbb = table[i].bbb.set(larger)
-					table[i].ndigits++
-				}
-
-				table[i].nbits = table[i].bbb.bitLen()
-			}
-		}
-	}
-
-	if b == 10 {
-		cacheBase10.Unlock()
-	}
-
-	return table
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/natconv_test.go b/pkg/bootstrap/src/bootstrap/math/big/natconv_test.go
deleted file mode 100644
index 7dd2444..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/natconv_test.go
+++ /dev/null
@@ -1,388 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/natconv_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/natconv_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package big
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"strings"
-	"testing"
-)
-
-func itoa(x nat, base int) []byte {
-	// special cases
-	switch {
-	case base < 2:
-		panic("illegal base")
-	case len(x) == 0:
-		return []byte("0")
-	}
-
-	// allocate buffer for conversion
-	i := x.bitLen()/log2(Word(base)) + 1 // +1: round up
-	s := make([]byte, i)
-
-	// don't destroy x
-	q := nat(nil).set(x)
-
-	// convert
-	for len(q) > 0 {
-		i--
-		var r Word
-		q, r = q.divW(q, Word(base))
-		s[i] = digits[r]
-	}
-
-	return s[i:]
-}
-
-var strTests = []struct {
-	x nat    // nat value to be converted
-	b int    // conversion base
-	s string // expected result
-}{
-	{nil, 2, "0"},
-	{nat{1}, 2, "1"},
-	{nat{0xc5}, 2, "11000101"},
-	{nat{03271}, 8, "3271"},
-	{nat{10}, 10, "10"},
-	{nat{1234567890}, 10, "1234567890"},
-	{nat{0xdeadbeef}, 16, "deadbeef"},
-	{nat{0x229be7}, 17, "1a2b3c"},
-	{nat{0x309663e6}, 32, "o9cov6"},
-}
-
-func TestString(t *testing.T) {
-	// test invalid base explicitly
-	var panicStr string
-	func() {
-		defer func() {
-			panicStr = recover().(string)
-		}()
-		natOne.utoa(1)
-	}()
-	if panicStr != "invalid base" {
-		t.Errorf("expected panic for invalid base")
-	}
-
-	for _, a := range strTests {
-		s := string(a.x.utoa(a.b))
-		if s != a.s {
-			t.Errorf("string%+v\n\tgot s = %s; want %s", a, s, a.s)
-		}
-
-		x, b, _, err := nat(nil).scan(strings.NewReader(a.s), a.b, false)
-		if x.cmp(a.x) != 0 {
-			t.Errorf("scan%+v\n\tgot z = %v; want %v", a, x, a.x)
-		}
-		if b != a.b {
-			t.Errorf("scan%+v\n\tgot b = %d; want %d", a, b, a.b)
-		}
-		if err != nil {
-			t.Errorf("scan%+v\n\tgot error = %s", a, err)
-		}
-	}
-}
-
-var natScanTests = []struct {
-	s     string // string to be scanned
-	base  int    // input base
-	frac  bool   // fraction ok
-	x     nat    // expected nat
-	b     int    // expected base
-	count int    // expected digit count
-	ok    bool   // expected success
-	next  rune   // next character (or 0, if at EOF)
-}{
-	// error: no mantissa
-	{},
-	{s: "?"},
-	{base: 10},
-	{base: 36},
-	{s: "?", base: 10},
-	{s: "0x"},
-	{s: "345", base: 2},
-
-	// error: incorrect use of decimal point
-	{s: ".0"},
-	{s: ".0", base: 10},
-	{s: ".", base: 0},
-	{s: "0x.0"},
-
-	// no errors
-	{"0", 0, false, nil, 10, 1, true, 0},
-	{"0", 10, false, nil, 10, 1, true, 0},
-	{"0", 36, false, nil, 36, 1, true, 0},
-	{"1", 0, false, nat{1}, 10, 1, true, 0},
-	{"1", 10, false, nat{1}, 10, 1, true, 0},
-	{"0 ", 0, false, nil, 10, 1, true, ' '},
-	{"08", 0, false, nil, 10, 1, true, '8'},
-	{"08", 10, false, nat{8}, 10, 2, true, 0},
-	{"018", 0, false, nat{1}, 8, 1, true, '8'},
-	{"0b1", 0, false, nat{1}, 2, 1, true, 0},
-	{"0b11000101", 0, false, nat{0xc5}, 2, 8, true, 0},
-	{"03271", 0, false, nat{03271}, 8, 4, true, 0},
-	{"10ab", 0, false, nat{10}, 10, 2, true, 'a'},
-	{"1234567890", 0, false, nat{1234567890}, 10, 10, true, 0},
-	{"xyz", 36, false, nat{(33*36+34)*36 + 35}, 36, 3, true, 0},
-	{"xyz?", 36, false, nat{(33*36+34)*36 + 35}, 36, 3, true, '?'},
-	{"0x", 16, false, nil, 16, 1, true, 'x'},
-	{"0xdeadbeef", 0, false, nat{0xdeadbeef}, 16, 8, true, 0},
-	{"0XDEADBEEF", 0, false, nat{0xdeadbeef}, 16, 8, true, 0},
-
-	// no errors, decimal point
-	{"0.", 0, false, nil, 10, 1, true, '.'},
-	{"0.", 10, true, nil, 10, 0, true, 0},
-	{"0.1.2", 10, true, nat{1}, 10, -1, true, '.'},
-	{".000", 10, true, nil, 10, -3, true, 0},
-	{"12.3", 10, true, nat{123}, 10, -1, true, 0},
-	{"012.345", 10, true, nat{12345}, 10, -3, true, 0},
-}
-
-func TestScanBase(t *testing.T) {
-	for _, a := range natScanTests {
-		r := strings.NewReader(a.s)
-		x, b, count, err := nat(nil).scan(r, a.base, a.frac)
-		if err == nil && !a.ok {
-			t.Errorf("scan%+v\n\texpected error", a)
-		}
-		if err != nil {
-			if a.ok {
-				t.Errorf("scan%+v\n\tgot error = %s", a, err)
-			}
-			continue
-		}
-		if x.cmp(a.x) != 0 {
-			t.Errorf("scan%+v\n\tgot z = %v; want %v", a, x, a.x)
-		}
-		if b != a.b {
-			t.Errorf("scan%+v\n\tgot b = %d; want %d", a, b, a.base)
-		}
-		if count != a.count {
-			t.Errorf("scan%+v\n\tgot count = %d; want %d", a, count, a.count)
-		}
-		next, _, err := r.ReadRune()
-		if err == io.EOF {
-			next = 0
-			err = nil
-		}
-		if err == nil && next != a.next {
-			t.Errorf("scan%+v\n\tgot next = %q; want %q", a, next, a.next)
-		}
-	}
-}
-
-var pi = "3" +
-	"14159265358979323846264338327950288419716939937510582097494459230781640628620899862803482534211706798214808651" +
-	"32823066470938446095505822317253594081284811174502841027019385211055596446229489549303819644288109756659334461" +
-	"28475648233786783165271201909145648566923460348610454326648213393607260249141273724587006606315588174881520920" +
-	"96282925409171536436789259036001133053054882046652138414695194151160943305727036575959195309218611738193261179" +
-	"31051185480744623799627495673518857527248912279381830119491298336733624406566430860213949463952247371907021798" +
-	"60943702770539217176293176752384674818467669405132000568127145263560827785771342757789609173637178721468440901" +
-	"22495343014654958537105079227968925892354201995611212902196086403441815981362977477130996051870721134999999837" +
-	"29780499510597317328160963185950244594553469083026425223082533446850352619311881710100031378387528865875332083" +
-	"81420617177669147303598253490428755468731159562863882353787593751957781857780532171226806613001927876611195909" +
-	"21642019893809525720106548586327886593615338182796823030195203530185296899577362259941389124972177528347913151" +
-	"55748572424541506959508295331168617278558890750983817546374649393192550604009277016711390098488240128583616035" +
-	"63707660104710181942955596198946767837449448255379774726847104047534646208046684259069491293313677028989152104" +
-	"75216205696602405803815019351125338243003558764024749647326391419927260426992279678235478163600934172164121992" +
-	"45863150302861829745557067498385054945885869269956909272107975093029553211653449872027559602364806654991198818" +
-	"34797753566369807426542527862551818417574672890977772793800081647060016145249192173217214772350141441973568548" +
-	"16136115735255213347574184946843852332390739414333454776241686251898356948556209921922218427255025425688767179" +
-	"04946016534668049886272327917860857843838279679766814541009538837863609506800642251252051173929848960841284886" +
-	"26945604241965285022210661186306744278622039194945047123713786960956364371917287467764657573962413890865832645" +
-	"99581339047802759009946576407895126946839835259570982582262052248940772671947826848260147699090264013639443745" +
-	"53050682034962524517493996514314298091906592509372216964615157098583874105978859597729754989301617539284681382" +
-	"68683868942774155991855925245953959431049972524680845987273644695848653836736222626099124608051243884390451244" +
-	"13654976278079771569143599770012961608944169486855584840635342207222582848864815845602850601684273945226746767" +
-	"88952521385225499546667278239864565961163548862305774564980355936345681743241125150760694794510965960940252288" +
-	"79710893145669136867228748940560101503308617928680920874760917824938589009714909675985261365549781893129784821" +
-	"68299894872265880485756401427047755513237964145152374623436454285844479526586782105114135473573952311342716610" +
-	"21359695362314429524849371871101457654035902799344037420073105785390621983874478084784896833214457138687519435" +
-	"06430218453191048481005370614680674919278191197939952061419663428754440643745123718192179998391015919561814675" +
-	"14269123974894090718649423196156794520809514655022523160388193014209376213785595663893778708303906979207734672" +
-	"21825625996615014215030680384477345492026054146659252014974428507325186660021324340881907104863317346496514539" +
-	"05796268561005508106658796998163574736384052571459102897064140110971206280439039759515677157700420337869936007" +
-	"23055876317635942187312514712053292819182618612586732157919841484882916447060957527069572209175671167229109816" +
-	"90915280173506712748583222871835209353965725121083579151369882091444210067510334671103141267111369908658516398" +
-	"31501970165151168517143765761835155650884909989859982387345528331635507647918535893226185489632132933089857064" +
-	"20467525907091548141654985946163718027098199430992448895757128289059232332609729971208443357326548938239119325" +
-	"97463667305836041428138830320382490375898524374417029132765618093773444030707469211201913020330380197621101100" +
-	"44929321516084244485963766983895228684783123552658213144957685726243344189303968642624341077322697802807318915" +
-	"44110104468232527162010526522721116603966655730925471105578537634668206531098965269186205647693125705863566201" +
-	"85581007293606598764861179104533488503461136576867532494416680396265797877185560845529654126654085306143444318" +
-	"58676975145661406800700237877659134401712749470420562230538994561314071127000407854733269939081454664645880797" +
-	"27082668306343285878569830523580893306575740679545716377525420211495576158140025012622859413021647155097925923" +
-	"09907965473761255176567513575178296664547791745011299614890304639947132962107340437518957359614589019389713111" +
-	"79042978285647503203198691514028708085990480109412147221317947647772622414254854540332157185306142288137585043" +
-	"06332175182979866223717215916077166925474873898665494945011465406284336639379003976926567214638530673609657120" +
-	"91807638327166416274888800786925602902284721040317211860820419000422966171196377921337575114959501566049631862" +
-	"94726547364252308177036751590673502350728354056704038674351362222477158915049530984448933309634087807693259939" +
-	"78054193414473774418426312986080998886874132604721569516239658645730216315981931951673538129741677294786724229" +
-	"24654366800980676928238280689964004824354037014163149658979409243237896907069779422362508221688957383798623001" +
-	"59377647165122893578601588161755782973523344604281512627203734314653197777416031990665541876397929334419521541" +
-	"34189948544473456738316249934191318148092777710386387734317720754565453220777092120190516609628049092636019759" +
-	"88281613323166636528619326686336062735676303544776280350450777235547105859548702790814356240145171806246436267" +
-	"94561275318134078330336254232783944975382437205835311477119926063813346776879695970309833913077109870408591337"
-
-// Test case for BenchmarkScanPi.
-func TestScanPi(t *testing.T) {
-	var x nat
-	z, _, _, err := x.scan(strings.NewReader(pi), 10, false)
-	if err != nil {
-		t.Errorf("scanning pi: %s", err)
-	}
-	if s := string(z.utoa(10)); s != pi {
-		t.Errorf("scanning pi: got %s", s)
-	}
-}
-
-func TestScanPiParallel(t *testing.T) {
-	const n = 2
-	c := make(chan int)
-	for i := 0; i < n; i++ {
-		go func() {
-			TestScanPi(t)
-			c <- 0
-		}()
-	}
-	for i := 0; i < n; i++ {
-		<-c
-	}
-}
-
-func BenchmarkScanPi(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		var x nat
-		x.scan(strings.NewReader(pi), 10, false)
-	}
-}
-
-func BenchmarkStringPiParallel(b *testing.B) {
-	var x nat
-	x, _, _, _ = x.scan(strings.NewReader(pi), 0, false)
-	if string(x.utoa(10)) != pi {
-		panic("benchmark incorrect: conversion failed")
-	}
-	b.RunParallel(func(pb *testing.PB) {
-		for pb.Next() {
-			x.utoa(10)
-		}
-	})
-}
-
-func BenchmarkScan(b *testing.B) {
-	const x = 10
-	for _, base := range []int{2, 8, 10, 16} {
-		for _, y := range []Word{10, 100, 1000, 10000, 100000} {
-			if isRaceBuilder && y > 1000 {
-				continue
-			}
-			b.Run(fmt.Sprintf("%d/Base%d", y, base), func(b *testing.B) {
-				b.StopTimer()
-				var z nat
-				z = z.expWW(x, y)
-
-				s := z.utoa(base)
-				if t := itoa(z, base); !bytes.Equal(s, t) {
-					b.Fatalf("scanning: got %s; want %s", s, t)
-				}
-				b.StartTimer()
-
-				for i := 0; i < b.N; i++ {
-					z.scan(bytes.NewReader(s), base, false)
-				}
-			})
-		}
-	}
-}
-
-func BenchmarkString(b *testing.B) {
-	const x = 10
-	for _, base := range []int{2, 8, 10, 16} {
-		for _, y := range []Word{10, 100, 1000, 10000, 100000} {
-			if isRaceBuilder && y > 1000 {
-				continue
-			}
-			b.Run(fmt.Sprintf("%d/Base%d", y, base), func(b *testing.B) {
-				b.StopTimer()
-				var z nat
-				z = z.expWW(x, y)
-				z.utoa(base) // warm divisor cache
-				b.StartTimer()
-
-				for i := 0; i < b.N; i++ {
-					_ = z.utoa(base)
-				}
-			})
-		}
-	}
-}
-
-func BenchmarkLeafSize(b *testing.B) {
-	for n := 0; n <= 16; n++ {
-		b.Run(fmt.Sprint(n), func(b *testing.B) { LeafSizeHelper(b, 10, n) })
-	}
-	// Try some large lengths
-	for _, n := range []int{32, 64} {
-		b.Run(fmt.Sprint(n), func(b *testing.B) { LeafSizeHelper(b, 10, n) })
-	}
-}
-
-func LeafSizeHelper(b *testing.B, base, size int) {
-	b.StopTimer()
-	originalLeafSize := leafSize
-	resetTable(cacheBase10.table[:])
-	leafSize = size
-	b.StartTimer()
-
-	for d := 1; d <= 10000; d *= 10 {
-		b.StopTimer()
-		var z nat
-		z = z.expWW(Word(base), Word(d)) // build target number
-		_ = z.utoa(base)                 // warm divisor cache
-		b.StartTimer()
-
-		for i := 0; i < b.N; i++ {
-			_ = z.utoa(base)
-		}
-	}
-
-	b.StopTimer()
-	resetTable(cacheBase10.table[:])
-	leafSize = originalLeafSize
-	b.StartTimer()
-}
-
-func resetTable(table []divisor) {
-	if table != nil && table[0].bbb != nil {
-		for i := 0; i < len(table); i++ {
-			table[i].bbb = nil
-			table[i].nbits = 0
-			table[i].ndigits = 0
-		}
-	}
-}
-
-func TestStringPowers(t *testing.T) {
-	var p Word
-	for b := 2; b <= 16; b++ {
-		for p = 0; p <= 512; p++ {
-			x := nat(nil).expWW(Word(b), p)
-			xs := x.utoa(b)
-			xs2 := itoa(x, b)
-			if !bytes.Equal(xs, xs2) {
-				t.Errorf("failed at %d ** %d in base %d: %s != %s", b, p, b, xs, xs2)
-			}
-		}
-		if b >= 3 && testing.Short() {
-			break
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/prime.go b/pkg/bootstrap/src/bootstrap/math/big/prime.go
deleted file mode 100644
index 5dbb8bf..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/prime.go
+++ /dev/null
@@ -1,323 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/prime.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/prime.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package big
-
-import "math/rand"
-
-// ProbablyPrime reports whether x is probably prime,
-// applying the Miller-Rabin test with n pseudorandomly chosen bases
-// as well as a Baillie-PSW test.
-//
-// If x is prime, ProbablyPrime returns true.
-// If x is chosen randomly and not prime, ProbablyPrime probably returns false.
-// The probability of returning true for a randomly chosen non-prime is at most ¼ⁿ.
-//
-// ProbablyPrime is 100% accurate for inputs less than 2⁶⁴.
-// See Menezes et al., Handbook of Applied Cryptography, 1997, pp. 145-149,
-// and FIPS 186-4 Appendix F for further discussion of the error probabilities.
-//
-// ProbablyPrime is not suitable for judging primes that an adversary may
-// have crafted to fool the test.
-//
-// As of Go 1.8, ProbablyPrime(0) is allowed and applies only a Baillie-PSW test.
-// Before Go 1.8, ProbablyPrime applied only the Miller-Rabin tests, and ProbablyPrime(0) panicked.
-func (x *Int) ProbablyPrime(n int) bool {
-	// Note regarding the doc comment above:
-	// It would be more precise to say that the Baillie-PSW test uses the
-	// extra strong Lucas test as its Lucas test, but since no one knows
-	// how to tell any of the Lucas tests apart inside a Baillie-PSW test
-	// (they all work equally well empirically), that detail need not be
-	// documented or implicitly guaranteed.
-	// The comment does avoid saying "the" Baillie-PSW test
-	// because of this general ambiguity.
-
-	if n < 0 {
-		panic("negative n for ProbablyPrime")
-	}
-	if x.neg || len(x.abs) == 0 {
-		return false
-	}
-
-	// primeBitMask records the primes < 64.
-	const primeBitMask uint64 = 1<<2 | 1<<3 | 1<<5 | 1<<7 |
-		1<<11 | 1<<13 | 1<<17 | 1<<19 | 1<<23 | 1<<29 | 1<<31 |
-		1<<37 | 1<<41 | 1<<43 | 1<<47 | 1<<53 | 1<<59 | 1<<61
-
-	w := x.abs[0]
-	if len(x.abs) == 1 && w < 64 {
-		return primeBitMask&(1<<w) != 0
-	}
-
-	if w&1 == 0 {
-		return false // n is even
-	}
-
-	const primesA = 3 * 5 * 7 * 11 * 13 * 17 * 19 * 23 * 37
-	const primesB = 29 * 31 * 41 * 43 * 47 * 53
-
-	var rA, rB uint32
-	switch _W {
-	case 32:
-		rA = uint32(x.abs.modW(primesA))
-		rB = uint32(x.abs.modW(primesB))
-	case 64:
-		r := x.abs.modW((primesA * primesB) & _M)
-		rA = uint32(r % primesA)
-		rB = uint32(r % primesB)
-	default:
-		panic("math/big: invalid word size")
-	}
-
-	if rA%3 == 0 || rA%5 == 0 || rA%7 == 0 || rA%11 == 0 || rA%13 == 0 || rA%17 == 0 || rA%19 == 0 || rA%23 == 0 || rA%37 == 0 ||
-		rB%29 == 0 || rB%31 == 0 || rB%41 == 0 || rB%43 == 0 || rB%47 == 0 || rB%53 == 0 {
-		return false
-	}
-
-	return x.abs.probablyPrimeMillerRabin(n+1, true) && x.abs.probablyPrimeLucas()
-}
-
-// probablyPrimeMillerRabin reports whether n passes reps rounds of the
-// Miller-Rabin primality test, using pseudo-randomly chosen bases.
-// If force2 is true, one of the rounds is forced to use base 2.
-// See Handbook of Applied Cryptography, p. 139, Algorithm 4.24.
-// The number n is known to be non-zero.
-func (n nat) probablyPrimeMillerRabin(reps int, force2 bool) bool {
-	nm1 := nat(nil).sub(n, natOne)
-	// determine q, k such that nm1 = q << k
-	k := nm1.trailingZeroBits()
-	q := nat(nil).shr(nm1, k)
-
-	nm3 := nat(nil).sub(nm1, natTwo)
-	rand := rand.New(rand.NewSource(int64(n[0])))
-
-	var x, y, quotient nat
-	nm3Len := nm3.bitLen()
-
-NextRandom:
-	for i := 0; i < reps; i++ {
-		if i == reps-1 && force2 {
-			x = x.set(natTwo)
-		} else {
-			x = x.random(rand, nm3, nm3Len)
-			x = x.add(x, natTwo)
-		}
-		y = y.expNN(x, q, n)
-		if y.cmp(natOne) == 0 || y.cmp(nm1) == 0 {
-			continue
-		}
-		for j := uint(1); j < k; j++ {
-			y = y.mul(y, y)
-			quotient, y = quotient.div(y, y, n)
-			if y.cmp(nm1) == 0 {
-				continue NextRandom
-			}
-			if y.cmp(natOne) == 0 {
-				return false
-			}
-		}
-		return false
-	}
-
-	return true
-}
-
-// probablyPrimeLucas reports whether n passes the "almost extra strong" Lucas probable prime test,
-// using Baillie-OEIS parameter selection. This corresponds to "AESLPSP" on Jacobsen's tables (link below).
-// The combination of this test and a Miller-Rabin/Fermat test with base 2 gives a Baillie-PSW test.
-//
-// References:
-//
-// Baillie and Wagstaff, "Lucas Pseudoprimes", Mathematics of Computation 35(152),
-// October 1980, pp. 1391-1417, especially page 1401.
-// http://www.ams.org/journals/mcom/1980-35-152/S0025-5718-1980-0583518-6/S0025-5718-1980-0583518-6.pdf
-//
-// Grantham, "Frobenius Pseudoprimes", Mathematics of Computation 70(234),
-// March 2000, pp. 873-891.
-// http://www.ams.org/journals/mcom/2001-70-234/S0025-5718-00-01197-2/S0025-5718-00-01197-2.pdf
-//
-// Baillie, "Extra strong Lucas pseudoprimes", OEIS A217719, https://oeis.org/A217719.
-//
-// Jacobsen, "Pseudoprime Statistics, Tables, and Data", http://ntheory.org/pseudoprimes.html.
-//
-// Nicely, "The Baillie-PSW Primality Test", http://www.trnicely.net/misc/bpsw.html.
-// (Note that Nicely's definition of the "extra strong" test gives the wrong Jacobi condition,
-// as pointed out by Jacobsen.)
-//
-// Crandall and Pomerance, Prime Numbers: A Computational Perspective, 2nd ed.
-// Springer, 2005.
-func (n nat) probablyPrimeLucas() bool {
-	// Discard 0, 1.
-	if len(n) == 0 || n.cmp(natOne) == 0 {
-		return false
-	}
-	// Two is the only even prime.
-	// Already checked by caller, but here to allow testing in isolation.
-	if n[0]&1 == 0 {
-		return n.cmp(natTwo) == 0
-	}
-
-	// Baillie-OEIS "method C" for choosing D, P, Q,
-	// as in https://oeis.org/A217719/a217719.txt:
-	// try increasing P ≥ 3 such that D = P² - 4 (so Q = 1)
-	// until Jacobi(D, n) = -1.
-	// The search is expected to succeed for non-square n after just a few trials.
-	// After more than expected failures, check whether n is square
-	// (which would cause Jacobi(D, n) = 1 for all D not dividing n).
-	p := Word(3)
-	d := nat{1}
-	t1 := nat(nil) // temp
-	intD := &Int{abs: d}
-	intN := &Int{abs: n}
-	for ; ; p++ {
-		if p > 10000 {
-			// This is widely believed to be impossible.
-			// If we get a report, we'll want the exact number n.
-			panic("math/big: internal error: cannot find (D/n) = -1 for " + intN.String())
-		}
-		d[0] = p*p - 4
-		j := Jacobi(intD, intN)
-		if j == -1 {
-			break
-		}
-		if j == 0 {
-			// d = p²-4 = (p-2)(p+2).
-			// If (d/n) == 0 then d shares a prime factor with n.
-			// Since the loop proceeds in increasing p and starts with p-2==1,
-			// the shared prime factor must be p+2.
-			// If p+2 == n, then n is prime; otherwise p+2 is a proper factor of n.
-			return len(n) == 1 && n[0] == p+2
-		}
-		if p == 40 {
-			// We'll never find (d/n) = -1 if n is a square.
-			// If n is a non-square we expect to find a d in just a few attempts on average.
-			// After 40 attempts, take a moment to check if n is indeed a square.
-			t1 = t1.sqrt(n)
-			t1 = t1.mul(t1, t1)
-			if t1.cmp(n) == 0 {
-				return false
-			}
-		}
-	}
-
-	// Grantham definition of "extra strong Lucas pseudoprime", after Thm 2.3 on p. 876
-	// (D, P, Q above have become Δ, b, 1):
-	//
-	// Let U_n = U_n(b, 1), V_n = V_n(b, 1), and Δ = b²-4.
-	// An extra strong Lucas pseudoprime to base b is a composite n = 2^r s + Jacobi(Δ, n),
-	// where s is odd and gcd(n, 2*Δ) = 1, such that either (i) U_s ≡ 0 mod n and V_s ≡ ±2 mod n,
-	// or (ii) V_{2^t s} ≡ 0 mod n for some 0 ≤ t < r-1.
-	//
-	// We know gcd(n, Δ) = 1 or else we'd have found Jacobi(d, n) == 0 above.
-	// We know gcd(n, 2) = 1 because n is odd.
-	//
-	// Arrange s = (n - Jacobi(Δ, n)) / 2^r = (n+1) / 2^r.
-	s := nat(nil).add(n, natOne)
-	r := int(s.trailingZeroBits())
-	s = s.shr(s, uint(r))
-	nm2 := nat(nil).sub(n, natTwo) // n-2
-
-	// We apply the "almost extra strong" test, which checks the above conditions
-	// except for U_s ≡ 0 mod n, which allows us to avoid computing any U_k values.
-	// Jacobsen points out that maybe we should just do the full extra strong test:
-	// "It is also possible to recover U_n using Crandall and Pomerance equation 3.13:
-	// U_n = D^-1 (2V_{n+1} - PV_n) allowing us to run the full extra-strong test
-	// at the cost of a single modular inversion. This computation is easy and fast in GMP,
-	// so we can get the full extra-strong test at essentially the same performance as the
-	// almost extra strong test."
-
-	// Compute Lucas sequence V_s(b, 1), where:
-	//
-	//	V(0) = 2
-	//	V(1) = P
-	//	V(k) = P V(k-1) - Q V(k-2).
-	//
-	// (Remember that due to method C above, P = b, Q = 1.)
-	//
-	// In general V(k) = α^k + β^k, where α and β are roots of x² - Px + Q.
-	// Crandall and Pomerance (p.147) observe that for 0 ≤ j ≤ k,
-	//
-	//	V(j+k) = V(j)V(k) - V(k-j).
-	//
-	// So in particular, to quickly double the subscript:
-	//
-	//	V(2k) = V(k)² - 2
-	//	V(2k+1) = V(k) V(k+1) - P
-	//
-	// We can therefore start with k=0 and build up to k=s in log₂(s) steps.
-	natP := nat(nil).setWord(p)
-	vk := nat(nil).setWord(2)
-	vk1 := nat(nil).setWord(p)
-	t2 := nat(nil) // temp
-	for i := int(s.bitLen()); i >= 0; i-- {
-		if s.bit(uint(i)) != 0 {
-			// k' = 2k+1
-			// V(k') = V(2k+1) = V(k) V(k+1) - P.
-			t1 = t1.mul(vk, vk1)
-			t1 = t1.add(t1, n)
-			t1 = t1.sub(t1, natP)
-			t2, vk = t2.div(vk, t1, n)
-			// V(k'+1) = V(2k+2) = V(k+1)² - 2.
-			t1 = t1.mul(vk1, vk1)
-			t1 = t1.add(t1, nm2)
-			t2, vk1 = t2.div(vk1, t1, n)
-		} else {
-			// k' = 2k
-			// V(k'+1) = V(2k+1) = V(k) V(k+1) - P.
-			t1 = t1.mul(vk, vk1)
-			t1 = t1.add(t1, n)
-			t1 = t1.sub(t1, natP)
-			t2, vk1 = t2.div(vk1, t1, n)
-			// V(k') = V(2k) = V(k)² - 2
-			t1 = t1.mul(vk, vk)
-			t1 = t1.add(t1, nm2)
-			t2, vk = t2.div(vk, t1, n)
-		}
-	}
-
-	// Now k=s, so vk = V(s). Check V(s) ≡ ±2 (mod n).
-	if vk.cmp(natTwo) == 0 || vk.cmp(nm2) == 0 {
-		// Check U(s) ≡ 0.
-		// As suggested by Jacobsen, apply Crandall and Pomerance equation 3.13:
-		//
-		//	U(k) = D⁻¹ (2 V(k+1) - P V(k))
-		//
-		// Since we are checking for U(k) == 0 it suffices to check 2 V(k+1) == P V(k) mod n,
-		// or P V(k) - 2 V(k+1) == 0 mod n.
-		t1 := t1.mul(vk, natP)
-		t2 := t2.shl(vk1, 1)
-		if t1.cmp(t2) < 0 {
-			t1, t2 = t2, t1
-		}
-		t1 = t1.sub(t1, t2)
-		t3 := vk1 // steal vk1, no longer needed below
-		vk1 = nil
-		_ = vk1
-		t2, t3 = t2.div(t3, t1, n)
-		if len(t3) == 0 {
-			return true
-		}
-	}
-
-	// Check V(2^t s) ≡ 0 mod n for some 0 ≤ t < r-1.
-	for t := 0; t < r-1; t++ {
-		if len(vk) == 0 { // vk == 0
-			return true
-		}
-		// Optimization: V(k) = 2 is a fixed point for V(k') = V(k)² - 2,
-		// so if V(k) = 2, we can stop: we will never find a future V(k) == 0.
-		if len(vk) == 1 && vk[0] == 2 { // vk == 2
-			return false
-		}
-		// k' = 2k
-		// V(k') = V(2k) = V(k)² - 2
-		t1 = t1.mul(vk, vk)
-		t1 = t1.sub(t1, natTwo)
-		t2, vk = t2.div(vk, t1, n)
-	}
-	return false
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/prime_test.go b/pkg/bootstrap/src/bootstrap/math/big/prime_test.go
deleted file mode 100644
index 9fca97e..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/prime_test.go
+++ /dev/null
@@ -1,217 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/prime_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/prime_test.go:1
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package big
-
-import (
-	"fmt"
-	"strings"
-	"testing"
-	"unicode"
-)
-
-var primes = []string{
-	"2",
-	"3",
-	"5",
-	"7",
-	"11",
-
-	"13756265695458089029",
-	"13496181268022124907",
-	"10953742525620032441",
-	"17908251027575790097",
-
-	// https://golang.org/issue/638
-	"18699199384836356663",
-
-	"98920366548084643601728869055592650835572950932266967461790948584315647051443",
-	"94560208308847015747498523884063394671606671904944666360068158221458669711639",
-
-	// http://primes.utm.edu/lists/small/small3.html
-	"449417999055441493994709297093108513015373787049558499205492347871729927573118262811508386655998299074566974373711472560655026288668094291699357843464363003144674940345912431129144354948751003607115263071543163",
-	"230975859993204150666423538988557839555560243929065415434980904258310530753006723857139742334640122533598517597674807096648905501653461687601339782814316124971547968912893214002992086353183070342498989426570593",
-	"5521712099665906221540423207019333379125265462121169655563495403888449493493629943498064604536961775110765377745550377067893607246020694972959780839151452457728855382113555867743022746090187341871655890805971735385789993",
-	"203956878356401977405765866929034577280193993314348263094772646453283062722701277632936616063144088173312372882677123879538709400158306567338328279154499698366071906766440037074217117805690872792848149112022286332144876183376326512083574821647933992961249917319836219304274280243803104015000563790123",
-
-	// ECC primes: http://tools.ietf.org/html/draft-ladd-safecurves-02
-	"3618502788666131106986593281521497120414687020801267626233049500247285301239",                                                                                  // Curve1174: 2^251-9
-	"57896044618658097711785492504343953926634992332820282019728792003956564819949",                                                                                 // Curve25519: 2^255-19
-	"9850501549098619803069760025035903451269934817616361666987073351061430442874302652853566563721228910201656997576599",                                           // E-382: 2^382-105
-	"42307582002575910332922579714097346549017899709713998034217522897561970639123926132812109468141778230245837569601494931472367",                                 // Curve41417: 2^414-17
-	"6864797660130609714981900799081393217269435300143305409394463459185543183397656052122559640661454554977296311391480858037121987999716643812574028291115057151", // E-521: 2^521-1
-}
-
-var composites = []string{
-	"0",
-	"1",
-	"21284175091214687912771199898307297748211672914763848041968395774954376176754",
-	"6084766654921918907427900243509372380954290099172559290432744450051395395951",
-	"84594350493221918389213352992032324280367711247940675652888030554255915464401",
-	"82793403787388584738507275144194252681",
-
-	// Arnault, "Rabin-Miller Primality Test: Composite Numbers Which Pass It",
-	// Mathematics of Computation, 64(209) (January 1995), pp. 335-361.
-	"1195068768795265792518361315725116351898245581", // strong pseudoprime to prime bases 2 through 29
-	// strong pseudoprime to all prime bases up to 200
-	`
-     80383745745363949125707961434194210813883768828755814583748891752229
-      74273765333652186502336163960045457915042023603208766569966760987284
-       0439654082329287387918508691668573282677617710293896977394701670823
-        0428687109997439976544144845341155872450633409279022275296229414984
-         2306881685404326457534018329786111298960644845216191652872597534901`,
-
-	// Extra-strong Lucas pseudoprimes. https://oeis.org/A217719
-	"989",
-	"3239",
-	"5777",
-	"10877",
-	"27971",
-	"29681",
-	"30739",
-	"31631",
-	"39059",
-	"72389",
-	"73919",
-	"75077",
-	"100127",
-	"113573",
-	"125249",
-	"137549",
-	"137801",
-	"153931",
-	"155819",
-	"161027",
-	"162133",
-	"189419",
-	"218321",
-	"231703",
-	"249331",
-	"370229",
-	"429479",
-	"430127",
-	"459191",
-	"473891",
-	"480689",
-	"600059",
-	"621781",
-	"632249",
-	"635627",
-
-	"3673744903",
-	"3281593591",
-	"2385076987",
-	"2738053141",
-	"2009621503",
-	"1502682721",
-	"255866131",
-	"117987841",
-	"587861",
-
-	"6368689",
-	"8725753",
-	"80579735209",
-	"105919633",
-}
-
-func cutSpace(r rune) rune {
-	if unicode.IsSpace(r) {
-		return -1
-	}
-	return r
-}
-
-func TestProbablyPrime(t *testing.T) {
-	nreps := 20
-	if testing.Short() {
-		nreps = 3
-	}
-	for i, s := range primes {
-		p, _ := new(Int).SetString(s, 10)
-		if !p.ProbablyPrime(nreps) || !p.ProbablyPrime(1) || !p.ProbablyPrime(0) {
-			t.Errorf("#%d prime found to be non-prime (%s)", i, s)
-		}
-	}
-
-	for i, s := range composites {
-		s = strings.Map(cutSpace, s)
-		c, _ := new(Int).SetString(s, 10)
-		if c.ProbablyPrime(nreps) || c.ProbablyPrime(1) || c.ProbablyPrime(0) {
-			t.Errorf("#%d composite found to be prime (%s)", i, s)
-		}
-	}
-
-	// check that ProbablyPrime panics if n <= 0
-	c := NewInt(11) // a prime
-	for _, n := range []int{-1, 0, 1} {
-		func() {
-			defer func() {
-				if n < 0 && recover() == nil {
-					t.Fatalf("expected panic from ProbablyPrime(%d)", n)
-				}
-			}()
-			if !c.ProbablyPrime(n) {
-				t.Fatalf("%v should be a prime", c)
-			}
-		}()
-	}
-}
-
-func BenchmarkProbablyPrime(b *testing.B) {
-	p, _ := new(Int).SetString("203956878356401977405765866929034577280193993314348263094772646453283062722701277632936616063144088173312372882677123879538709400158306567338328279154499698366071906766440037074217117805690872792848149112022286332144876183376326512083574821647933992961249917319836219304274280243803104015000563790123", 10)
-	for _, n := range []int{0, 1, 5, 10, 20} {
-		b.Run(fmt.Sprintf("n=%d", n), func(b *testing.B) {
-			for i := 0; i < b.N; i++ {
-				p.ProbablyPrime(n)
-			}
-		})
-	}
-
-	b.Run("Lucas", func(b *testing.B) {
-		for i := 0; i < b.N; i++ {
-			p.abs.probablyPrimeLucas()
-		}
-	})
-	b.Run("MillerRabinBase2", func(b *testing.B) {
-		for i := 0; i < b.N; i++ {
-			p.abs.probablyPrimeMillerRabin(1, true)
-		}
-	})
-}
-
-func TestMillerRabinPseudoprimes(t *testing.T) {
-	testPseudoprimes(t, "probablyPrimeMillerRabin",
-		func(n nat) bool { return n.probablyPrimeMillerRabin(1, true) && !n.probablyPrimeLucas() },
-		// https://oeis.org/A001262
-		[]int{2047, 3277, 4033, 4681, 8321, 15841, 29341, 42799, 49141, 52633, 65281, 74665, 80581, 85489, 88357, 90751})
-}
-
-func TestLucasPseudoprimes(t *testing.T) {
-	testPseudoprimes(t, "probablyPrimeLucas",
-		func(n nat) bool { return n.probablyPrimeLucas() && !n.probablyPrimeMillerRabin(1, true) },
-		// https://oeis.org/A217719
-		[]int{989, 3239, 5777, 10877, 27971, 29681, 30739, 31631, 39059, 72389, 73919, 75077})
-}
-
-func testPseudoprimes(t *testing.T, name string, cond func(nat) bool, want []int) {
-	n := nat{1}
-	for i := 3; i < 100000; i += 2 {
-		n[0] = Word(i)
-		pseudo := cond(n)
-		if pseudo && (len(want) == 0 || i != want[0]) {
-			t.Errorf("%s(%v, base=2) = %v, want false", name, i)
-		} else if !pseudo && len(want) >= 1 && i == want[0] {
-			t.Errorf("%s(%v, base=2) = false, want true", name, i)
-		}
-		if len(want) > 0 && i == want[0] {
-			want = want[1:]
-		}
-	}
-	if len(want) > 0 {
-		t.Fatalf("forgot to test %v", want)
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/rat.go b/pkg/bootstrap/src/bootstrap/math/big/rat.go
deleted file mode 100644
index a8a4d13..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/rat.go
+++ /dev/null
@@ -1,513 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/rat.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/rat.go:1
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements multi-precision rational numbers.
-
-package big
-
-import (
-	"fmt"
-	"math"
-)
-
-// A Rat represents a quotient a/b of arbitrary precision.
-// The zero value for a Rat represents the value 0.
-type Rat struct {
-	// To make zero values for Rat work w/o initialization,
-	// a zero value of b (len(b) == 0) acts like b == 1.
-	// a.neg determines the sign of the Rat, b.neg is ignored.
-	a, b Int
-}
-
-// NewRat creates a new Rat with numerator a and denominator b.
-func NewRat(a, b int64) *Rat {
-	return new(Rat).SetFrac64(a, b)
-}
-
-// SetFloat64 sets z to exactly f and returns z.
-// If f is not finite, SetFloat returns nil.
-func (z *Rat) SetFloat64(f float64) *Rat {
-	const expMask = 1<<11 - 1
-	bits := math.Float64bits(f)
-	mantissa := bits & (1<<52 - 1)
-	exp := int((bits >> 52) & expMask)
-	switch exp {
-	case expMask: // non-finite
-		return nil
-	case 0: // denormal
-		exp -= 1022
-	default: // normal
-		mantissa |= 1 << 52
-		exp -= 1023
-	}
-
-	shift := 52 - exp
-
-	// Optimization (?): partially pre-normalise.
-	for mantissa&1 == 0 && shift > 0 {
-		mantissa >>= 1
-		shift--
-	}
-
-	z.a.SetUint64(mantissa)
-	z.a.neg = f < 0
-	z.b.Set(intOne)
-	if shift > 0 {
-		z.b.Lsh(&z.b, uint(shift))
-	} else {
-		z.a.Lsh(&z.a, uint(-shift))
-	}
-	return z.norm()
-}
-
-// quotToFloat32 returns the non-negative float32 value
-// nearest to the quotient a/b, using round-to-even in
-// halfway cases. It does not mutate its arguments.
-// Preconditions: b is non-zero; a and b have no common factors.
-func quotToFloat32(a, b nat) (f float32, exact bool) {
-	const (
-		// float size in bits
-		Fsize = 32
-
-		// mantissa
-		Msize  = 23
-		Msize1 = Msize + 1 // incl. implicit 1
-		Msize2 = Msize1 + 1
-
-		// exponent
-		Esize = Fsize - Msize1
-		Ebias = 1<<(Esize-1) - 1
-		Emin  = 1 - Ebias
-		Emax  = Ebias
-	)
-
-	// TODO(adonovan): specialize common degenerate cases: 1.0, integers.
-	alen := a.bitLen()
-	if alen == 0 {
-		return 0, true
-	}
-	blen := b.bitLen()
-	if blen == 0 {
-		panic("division by zero")
-	}
-
-	// 1. Left-shift A or B such that quotient A/B is in [1<<Msize1, 1<<(Msize2+1)
-	// (Msize2 bits if A < B when they are left-aligned, Msize2+1 bits if A >= B).
-	// This is 2 or 3 more than the float32 mantissa field width of Msize:
-	// - the optional extra bit is shifted away in step 3 below.
-	// - the high-order 1 is omitted in "normal" representation;
-	// - the low-order 1 will be used during rounding then discarded.
-	exp := alen - blen
-	var a2, b2 nat
-	a2 = a2.set(a)
-	b2 = b2.set(b)
-	if shift := Msize2 - exp; shift > 0 {
-		a2 = a2.shl(a2, uint(shift))
-	} else if shift < 0 {
-		b2 = b2.shl(b2, uint(-shift))
-	}
-
-	// 2. Compute quotient and remainder (q, r).  NB: due to the
-	// extra shift, the low-order bit of q is logically the
-	// high-order bit of r.
-	var q nat
-	q, r := q.div(a2, a2, b2) // (recycle a2)
-	mantissa := low32(q)
-	haveRem := len(r) > 0 // mantissa&1 && !haveRem => remainder is exactly half
-
-	// 3. If quotient didn't fit in Msize2 bits, redo division by b2<<1
-	// (in effect---we accomplish this incrementally).
-	if mantissa>>Msize2 == 1 {
-		if mantissa&1 == 1 {
-			haveRem = true
-		}
-		mantissa >>= 1
-		exp++
-	}
-	if mantissa>>Msize1 != 1 {
-		panic(fmt.Sprintf("expected exactly %d bits of result", Msize2))
-	}
-
-	// 4. Rounding.
-	if Emin-Msize <= exp && exp <= Emin {
-		// Denormal case; lose 'shift' bits of precision.
-		shift := uint(Emin - (exp - 1)) // [1..Esize1)
-		lostbits := mantissa & (1<<shift - 1)
-		haveRem = haveRem || lostbits != 0
-		mantissa >>= shift
-		exp = 2 - Ebias // == exp + shift
-	}
-	// Round q using round-half-to-even.
-	exact = !haveRem
-	if mantissa&1 != 0 {
-		exact = false
-		if haveRem || mantissa&2 != 0 {
-			if mantissa++; mantissa >= 1<<Msize2 {
-				// Complete rollover 11...1 => 100...0, so shift is safe
-				mantissa >>= 1
-				exp++
-			}
-		}
-	}
-	mantissa >>= 1 // discard rounding bit.  Mantissa now scaled by 1<<Msize1.
-
-	f = float32(math.Ldexp(float64(mantissa), exp-Msize1))
-	if math.IsInf(float64(f), 0) {
-		exact = false
-	}
-	return
-}
-
-// quotToFloat64 returns the non-negative float64 value
-// nearest to the quotient a/b, using round-to-even in
-// halfway cases. It does not mutate its arguments.
-// Preconditions: b is non-zero; a and b have no common factors.
-func quotToFloat64(a, b nat) (f float64, exact bool) {
-	const (
-		// float size in bits
-		Fsize = 64
-
-		// mantissa
-		Msize  = 52
-		Msize1 = Msize + 1 // incl. implicit 1
-		Msize2 = Msize1 + 1
-
-		// exponent
-		Esize = Fsize - Msize1
-		Ebias = 1<<(Esize-1) - 1
-		Emin  = 1 - Ebias
-		Emax  = Ebias
-	)
-
-	// TODO(adonovan): specialize common degenerate cases: 1.0, integers.
-	alen := a.bitLen()
-	if alen == 0 {
-		return 0, true
-	}
-	blen := b.bitLen()
-	if blen == 0 {
-		panic("division by zero")
-	}
-
-	// 1. Left-shift A or B such that quotient A/B is in [1<<Msize1, 1<<(Msize2+1)
-	// (Msize2 bits if A < B when they are left-aligned, Msize2+1 bits if A >= B).
-	// This is 2 or 3 more than the float64 mantissa field width of Msize:
-	// - the optional extra bit is shifted away in step 3 below.
-	// - the high-order 1 is omitted in "normal" representation;
-	// - the low-order 1 will be used during rounding then discarded.
-	exp := alen - blen
-	var a2, b2 nat
-	a2 = a2.set(a)
-	b2 = b2.set(b)
-	if shift := Msize2 - exp; shift > 0 {
-		a2 = a2.shl(a2, uint(shift))
-	} else if shift < 0 {
-		b2 = b2.shl(b2, uint(-shift))
-	}
-
-	// 2. Compute quotient and remainder (q, r).  NB: due to the
-	// extra shift, the low-order bit of q is logically the
-	// high-order bit of r.
-	var q nat
-	q, r := q.div(a2, a2, b2) // (recycle a2)
-	mantissa := low64(q)
-	haveRem := len(r) > 0 // mantissa&1 && !haveRem => remainder is exactly half
-
-	// 3. If quotient didn't fit in Msize2 bits, redo division by b2<<1
-	// (in effect---we accomplish this incrementally).
-	if mantissa>>Msize2 == 1 {
-		if mantissa&1 == 1 {
-			haveRem = true
-		}
-		mantissa >>= 1
-		exp++
-	}
-	if mantissa>>Msize1 != 1 {
-		panic(fmt.Sprintf("expected exactly %d bits of result", Msize2))
-	}
-
-	// 4. Rounding.
-	if Emin-Msize <= exp && exp <= Emin {
-		// Denormal case; lose 'shift' bits of precision.
-		shift := uint(Emin - (exp - 1)) // [1..Esize1)
-		lostbits := mantissa & (1<<shift - 1)
-		haveRem = haveRem || lostbits != 0
-		mantissa >>= shift
-		exp = 2 - Ebias // == exp + shift
-	}
-	// Round q using round-half-to-even.
-	exact = !haveRem
-	if mantissa&1 != 0 {
-		exact = false
-		if haveRem || mantissa&2 != 0 {
-			if mantissa++; mantissa >= 1<<Msize2 {
-				// Complete rollover 11...1 => 100...0, so shift is safe
-				mantissa >>= 1
-				exp++
-			}
-		}
-	}
-	mantissa >>= 1 // discard rounding bit.  Mantissa now scaled by 1<<Msize1.
-
-	f = math.Ldexp(float64(mantissa), exp-Msize1)
-	if math.IsInf(f, 0) {
-		exact = false
-	}
-	return
-}
-
-// Float32 returns the nearest float32 value for x and a bool indicating
-// whether f represents x exactly. If the magnitude of x is too large to
-// be represented by a float32, f is an infinity and exact is false.
-// The sign of f always matches the sign of x, even if f == 0.
-func (x *Rat) Float32() (f float32, exact bool) {
-	b := x.b.abs
-	if len(b) == 0 {
-		b = b.set(natOne) // materialize denominator
-	}
-	f, exact = quotToFloat32(x.a.abs, b)
-	if x.a.neg {
-		f = -f
-	}
-	return
-}
-
-// Float64 returns the nearest float64 value for x and a bool indicating
-// whether f represents x exactly. If the magnitude of x is too large to
-// be represented by a float64, f is an infinity and exact is false.
-// The sign of f always matches the sign of x, even if f == 0.
-func (x *Rat) Float64() (f float64, exact bool) {
-	b := x.b.abs
-	if len(b) == 0 {
-		b = b.set(natOne) // materialize denominator
-	}
-	f, exact = quotToFloat64(x.a.abs, b)
-	if x.a.neg {
-		f = -f
-	}
-	return
-}
-
-// SetFrac sets z to a/b and returns z.
-func (z *Rat) SetFrac(a, b *Int) *Rat {
-	z.a.neg = a.neg != b.neg
-	babs := b.abs
-	if len(babs) == 0 {
-		panic("division by zero")
-	}
-	if &z.a == b || alias(z.a.abs, babs) {
-		babs = nat(nil).set(babs) // make a copy
-	}
-	z.a.abs = z.a.abs.set(a.abs)
-	z.b.abs = z.b.abs.set(babs)
-	return z.norm()
-}
-
-// SetFrac64 sets z to a/b and returns z.
-func (z *Rat) SetFrac64(a, b int64) *Rat {
-	z.a.SetInt64(a)
-	if b == 0 {
-		panic("division by zero")
-	}
-	if b < 0 {
-		b = -b
-		z.a.neg = !z.a.neg
-	}
-	z.b.abs = z.b.abs.setUint64(uint64(b))
-	return z.norm()
-}
-
-// SetInt sets z to x (by making a copy of x) and returns z.
-func (z *Rat) SetInt(x *Int) *Rat {
-	z.a.Set(x)
-	z.b.abs = z.b.abs[:0]
-	return z
-}
-
-// SetInt64 sets z to x and returns z.
-func (z *Rat) SetInt64(x int64) *Rat {
-	z.a.SetInt64(x)
-	z.b.abs = z.b.abs[:0]
-	return z
-}
-
-// Set sets z to x (by making a copy of x) and returns z.
-func (z *Rat) Set(x *Rat) *Rat {
-	if z != x {
-		z.a.Set(&x.a)
-		z.b.Set(&x.b)
-	}
-	return z
-}
-
-// Abs sets z to |x| (the absolute value of x) and returns z.
-func (z *Rat) Abs(x *Rat) *Rat {
-	z.Set(x)
-	z.a.neg = false
-	return z
-}
-
-// Neg sets z to -x and returns z.
-func (z *Rat) Neg(x *Rat) *Rat {
-	z.Set(x)
-	z.a.neg = len(z.a.abs) > 0 && !z.a.neg // 0 has no sign
-	return z
-}
-
-// Inv sets z to 1/x and returns z.
-func (z *Rat) Inv(x *Rat) *Rat {
-	if len(x.a.abs) == 0 {
-		panic("division by zero")
-	}
-	z.Set(x)
-	a := z.b.abs
-	if len(a) == 0 {
-		a = a.set(natOne) // materialize numerator
-	}
-	b := z.a.abs
-	if b.cmp(natOne) == 0 {
-		b = b[:0] // normalize denominator
-	}
-	z.a.abs, z.b.abs = a, b // sign doesn't change
-	return z
-}
-
-// Sign returns:
-//
-//	-1 if x <  0
-//	 0 if x == 0
-//	+1 if x >  0
-//
-func (x *Rat) Sign() int {
-	return x.a.Sign()
-}
-
-// IsInt reports whether the denominator of x is 1.
-func (x *Rat) IsInt() bool {
-	return len(x.b.abs) == 0 || x.b.abs.cmp(natOne) == 0
-}
-
-// Num returns the numerator of x; it may be <= 0.
-// The result is a reference to x's numerator; it
-// may change if a new value is assigned to x, and vice versa.
-// The sign of the numerator corresponds to the sign of x.
-func (x *Rat) Num() *Int {
-	return &x.a
-}
-
-// Denom returns the denominator of x; it is always > 0.
-// The result is a reference to x's denominator; it
-// may change if a new value is assigned to x, and vice versa.
-func (x *Rat) Denom() *Int {
-	x.b.neg = false // the result is always >= 0
-	if len(x.b.abs) == 0 {
-		x.b.abs = x.b.abs.set(natOne) // materialize denominator
-	}
-	return &x.b
-}
-
-func (z *Rat) norm() *Rat {
-	switch {
-	case len(z.a.abs) == 0:
-		// z == 0 - normalize sign and denominator
-		z.a.neg = false
-		z.b.abs = z.b.abs[:0]
-	case len(z.b.abs) == 0:
-		// z is normalized int - nothing to do
-	case z.b.abs.cmp(natOne) == 0:
-		// z is int - normalize denominator
-		z.b.abs = z.b.abs[:0]
-	default:
-		neg := z.a.neg
-		z.a.neg = false
-		z.b.neg = false
-		if f := NewInt(0).binaryGCD(&z.a, &z.b); f.Cmp(intOne) != 0 {
-			z.a.abs, _ = z.a.abs.div(nil, z.a.abs, f.abs)
-			z.b.abs, _ = z.b.abs.div(nil, z.b.abs, f.abs)
-			if z.b.abs.cmp(natOne) == 0 {
-				// z is int - normalize denominator
-				z.b.abs = z.b.abs[:0]
-			}
-		}
-		z.a.neg = neg
-	}
-	return z
-}
-
-// mulDenom sets z to the denominator product x*y (by taking into
-// account that 0 values for x or y must be interpreted as 1) and
-// returns z.
-func mulDenom(z, x, y nat) nat {
-	switch {
-	case len(x) == 0:
-		return z.set(y)
-	case len(y) == 0:
-		return z.set(x)
-	}
-	return z.mul(x, y)
-}
-
-// scaleDenom computes x*f.
-// If f == 0 (zero value of denominator), the result is (a copy of) x.
-func scaleDenom(x *Int, f nat) *Int {
-	var z Int
-	if len(f) == 0 {
-		return z.Set(x)
-	}
-	z.abs = z.abs.mul(x.abs, f)
-	z.neg = x.neg
-	return &z
-}
-
-// Cmp compares x and y and returns:
-//
-//   -1 if x <  y
-//    0 if x == y
-//   +1 if x >  y
-//
-func (x *Rat) Cmp(y *Rat) int {
-	return scaleDenom(&x.a, y.b.abs).Cmp(scaleDenom(&y.a, x.b.abs))
-}
-
-// Add sets z to the sum x+y and returns z.
-func (z *Rat) Add(x, y *Rat) *Rat {
-	a1 := scaleDenom(&x.a, y.b.abs)
-	a2 := scaleDenom(&y.a, x.b.abs)
-	z.a.Add(a1, a2)
-	z.b.abs = mulDenom(z.b.abs, x.b.abs, y.b.abs)
-	return z.norm()
-}
-
-// Sub sets z to the difference x-y and returns z.
-func (z *Rat) Sub(x, y *Rat) *Rat {
-	a1 := scaleDenom(&x.a, y.b.abs)
-	a2 := scaleDenom(&y.a, x.b.abs)
-	z.a.Sub(a1, a2)
-	z.b.abs = mulDenom(z.b.abs, x.b.abs, y.b.abs)
-	return z.norm()
-}
-
-// Mul sets z to the product x*y and returns z.
-func (z *Rat) Mul(x, y *Rat) *Rat {
-	z.a.Mul(&x.a, &y.a)
-	z.b.abs = mulDenom(z.b.abs, x.b.abs, y.b.abs)
-	return z.norm()
-}
-
-// Quo sets z to the quotient x/y and returns z.
-// If y == 0, a division-by-zero run-time panic occurs.
-func (z *Rat) Quo(x, y *Rat) *Rat {
-	if len(y.a.abs) == 0 {
-		panic("division by zero")
-	}
-	a := scaleDenom(&x.a, y.b.abs)
-	b := scaleDenom(&y.a, x.b.abs)
-	z.a.abs = a.abs
-	z.b.abs = b.abs
-	z.a.neg = a.neg != b.neg
-	return z.norm()
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/rat_test.go b/pkg/bootstrap/src/bootstrap/math/big/rat_test.go
deleted file mode 100644
index 7830bbe..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/rat_test.go
+++ /dev/null
@@ -1,625 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/rat_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/rat_test.go:1
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package big
-
-import (
-	"math"
-	"testing"
-)
-
-func TestZeroRat(t *testing.T) {
-	var x, y, z Rat
-	y.SetFrac64(0, 42)
-
-	if x.Cmp(&y) != 0 {
-		t.Errorf("x and y should be both equal and zero")
-	}
-
-	if s := x.String(); s != "0/1" {
-		t.Errorf("got x = %s, want 0/1", s)
-	}
-
-	if s := x.RatString(); s != "0" {
-		t.Errorf("got x = %s, want 0", s)
-	}
-
-	z.Add(&x, &y)
-	if s := z.RatString(); s != "0" {
-		t.Errorf("got x+y = %s, want 0", s)
-	}
-
-	z.Sub(&x, &y)
-	if s := z.RatString(); s != "0" {
-		t.Errorf("got x-y = %s, want 0", s)
-	}
-
-	z.Mul(&x, &y)
-	if s := z.RatString(); s != "0" {
-		t.Errorf("got x*y = %s, want 0", s)
-	}
-
-	// check for division by zero
-	defer func() {
-		if s := recover(); s == nil || s.(string) != "division by zero" {
-			panic(s)
-		}
-	}()
-	z.Quo(&x, &y)
-}
-
-func TestRatSign(t *testing.T) {
-	zero := NewRat(0, 1)
-	for _, a := range setStringTests {
-		x, ok := new(Rat).SetString(a.in)
-		if !ok {
-			continue
-		}
-		s := x.Sign()
-		e := x.Cmp(zero)
-		if s != e {
-			t.Errorf("got %d; want %d for z = %v", s, e, &x)
-		}
-	}
-}
-
-var ratCmpTests = []struct {
-	rat1, rat2 string
-	out        int
-}{
-	{"0", "0/1", 0},
-	{"1/1", "1", 0},
-	{"-1", "-2/2", 0},
-	{"1", "0", 1},
-	{"0/1", "1/1", -1},
-	{"-5/1434770811533343057144", "-5/1434770811533343057145", -1},
-	{"49832350382626108453/8964749413", "49832350382626108454/8964749413", -1},
-	{"-37414950961700930/7204075375675961", "37414950961700930/7204075375675961", -1},
-	{"37414950961700930/7204075375675961", "74829901923401860/14408150751351922", 0},
-}
-
-func TestRatCmp(t *testing.T) {
-	for i, test := range ratCmpTests {
-		x, _ := new(Rat).SetString(test.rat1)
-		y, _ := new(Rat).SetString(test.rat2)
-
-		out := x.Cmp(y)
-		if out != test.out {
-			t.Errorf("#%d got out = %v; want %v", i, out, test.out)
-		}
-	}
-}
-
-func TestIsInt(t *testing.T) {
-	one := NewInt(1)
-	for _, a := range setStringTests {
-		x, ok := new(Rat).SetString(a.in)
-		if !ok {
-			continue
-		}
-		i := x.IsInt()
-		e := x.Denom().Cmp(one) == 0
-		if i != e {
-			t.Errorf("got IsInt(%v) == %v; want %v", x, i, e)
-		}
-	}
-}
-
-func TestRatAbs(t *testing.T) {
-	zero := new(Rat)
-	for _, a := range setStringTests {
-		x, ok := new(Rat).SetString(a.in)
-		if !ok {
-			continue
-		}
-		e := new(Rat).Set(x)
-		if e.Cmp(zero) < 0 {
-			e.Sub(zero, e)
-		}
-		z := new(Rat).Abs(x)
-		if z.Cmp(e) != 0 {
-			t.Errorf("got Abs(%v) = %v; want %v", x, z, e)
-		}
-	}
-}
-
-func TestRatNeg(t *testing.T) {
-	zero := new(Rat)
-	for _, a := range setStringTests {
-		x, ok := new(Rat).SetString(a.in)
-		if !ok {
-			continue
-		}
-		e := new(Rat).Sub(zero, x)
-		z := new(Rat).Neg(x)
-		if z.Cmp(e) != 0 {
-			t.Errorf("got Neg(%v) = %v; want %v", x, z, e)
-		}
-	}
-}
-
-func TestRatInv(t *testing.T) {
-	zero := new(Rat)
-	for _, a := range setStringTests {
-		x, ok := new(Rat).SetString(a.in)
-		if !ok {
-			continue
-		}
-		if x.Cmp(zero) == 0 {
-			continue // avoid division by zero
-		}
-		e := new(Rat).SetFrac(x.Denom(), x.Num())
-		z := new(Rat).Inv(x)
-		if z.Cmp(e) != 0 {
-			t.Errorf("got Inv(%v) = %v; want %v", x, z, e)
-		}
-	}
-}
-
-type ratBinFun func(z, x, y *Rat) *Rat
-type ratBinArg struct {
-	x, y, z string
-}
-
-func testRatBin(t *testing.T, i int, name string, f ratBinFun, a ratBinArg) {
-	x, _ := new(Rat).SetString(a.x)
-	y, _ := new(Rat).SetString(a.y)
-	z, _ := new(Rat).SetString(a.z)
-	out := f(new(Rat), x, y)
-
-	if out.Cmp(z) != 0 {
-		t.Errorf("%s #%d got %s want %s", name, i, out, z)
-	}
-}
-
-var ratBinTests = []struct {
-	x, y      string
-	sum, prod string
-}{
-	{"0", "0", "0", "0"},
-	{"0", "1", "1", "0"},
-	{"-1", "0", "-1", "0"},
-	{"-1", "1", "0", "-1"},
-	{"1", "1", "2", "1"},
-	{"1/2", "1/2", "1", "1/4"},
-	{"1/4", "1/3", "7/12", "1/12"},
-	{"2/5", "-14/3", "-64/15", "-28/15"},
-	{"4707/49292519774798173060", "-3367/70976135186689855734", "84058377121001851123459/1749296273614329067191168098769082663020", "-1760941/388732505247628681598037355282018369560"},
-	{"-61204110018146728334/3", "-31052192278051565633/2", "-215564796870448153567/6", "950260896245257153059642991192710872711/3"},
-	{"-854857841473707320655/4237645934602118692642972629634714039", "-18/31750379913563777419", "-27/133467566250814981", "15387441146526731771790/134546868362786310073779084329032722548987800600710485341"},
-	{"618575745270541348005638912139/19198433543745179392300736", "-19948846211000086/637313996471", "27674141753240653/30123979153216", "-6169936206128396568797607742807090270137721977/6117715203873571641674006593837351328"},
-	{"-3/26206484091896184128", "5/2848423294177090248", "15310893822118706237/9330894968229805033368778458685147968", "-5/24882386581946146755650075889827061248"},
-	{"26946729/330400702820", "41563965/225583428284", "1238218672302860271/4658307703098666660055", "224002580204097/14906584649915733312176"},
-	{"-8259900599013409474/7", "-84829337473700364773/56707961321161574960", "-468402123685491748914621885145127724451/396955729248131024720", "350340947706464153265156004876107029701/198477864624065512360"},
-	{"575775209696864/1320203974639986246357", "29/712593081308", "410331716733912717985762465/940768218243776489278275419794956", "808/45524274987585732633"},
-	{"1786597389946320496771/2066653520653241", "6269770/1992362624741777", "3559549865190272133656109052308126637/4117523232840525481453983149257", "8967230/3296219033"},
-	{"-36459180403360509753/32150500941194292113930", "9381566963714/9633539", "301622077145533298008420642898530153/309723104686531919656937098270", "-3784609207827/3426986245"},
-}
-
-func TestRatBin(t *testing.T) {
-	for i, test := range ratBinTests {
-		arg := ratBinArg{test.x, test.y, test.sum}
-		testRatBin(t, i, "Add", (*Rat).Add, arg)
-
-		arg = ratBinArg{test.y, test.x, test.sum}
-		testRatBin(t, i, "Add symmetric", (*Rat).Add, arg)
-
-		arg = ratBinArg{test.sum, test.x, test.y}
-		testRatBin(t, i, "Sub", (*Rat).Sub, arg)
-
-		arg = ratBinArg{test.sum, test.y, test.x}
-		testRatBin(t, i, "Sub symmetric", (*Rat).Sub, arg)
-
-		arg = ratBinArg{test.x, test.y, test.prod}
-		testRatBin(t, i, "Mul", (*Rat).Mul, arg)
-
-		arg = ratBinArg{test.y, test.x, test.prod}
-		testRatBin(t, i, "Mul symmetric", (*Rat).Mul, arg)
-
-		if test.x != "0" {
-			arg = ratBinArg{test.prod, test.x, test.y}
-			testRatBin(t, i, "Quo", (*Rat).Quo, arg)
-		}
-
-		if test.y != "0" {
-			arg = ratBinArg{test.prod, test.y, test.x}
-			testRatBin(t, i, "Quo symmetric", (*Rat).Quo, arg)
-		}
-	}
-}
-
-func TestIssue820(t *testing.T) {
-	x := NewRat(3, 1)
-	y := NewRat(2, 1)
-	z := y.Quo(x, y)
-	q := NewRat(3, 2)
-	if z.Cmp(q) != 0 {
-		t.Errorf("got %s want %s", z, q)
-	}
-
-	y = NewRat(3, 1)
-	x = NewRat(2, 1)
-	z = y.Quo(x, y)
-	q = NewRat(2, 3)
-	if z.Cmp(q) != 0 {
-		t.Errorf("got %s want %s", z, q)
-	}
-
-	x = NewRat(3, 1)
-	z = x.Quo(x, x)
-	q = NewRat(3, 3)
-	if z.Cmp(q) != 0 {
-		t.Errorf("got %s want %s", z, q)
-	}
-}
-
-var setFrac64Tests = []struct {
-	a, b int64
-	out  string
-}{
-	{0, 1, "0"},
-	{0, -1, "0"},
-	{1, 1, "1"},
-	{-1, 1, "-1"},
-	{1, -1, "-1"},
-	{-1, -1, "1"},
-	{-9223372036854775808, -9223372036854775808, "1"},
-}
-
-func TestRatSetFrac64Rat(t *testing.T) {
-	for i, test := range setFrac64Tests {
-		x := new(Rat).SetFrac64(test.a, test.b)
-		if x.RatString() != test.out {
-			t.Errorf("#%d got %s want %s", i, x.RatString(), test.out)
-		}
-	}
-}
-
-func TestIssue2379(t *testing.T) {
-	// 1) no aliasing
-	q := NewRat(3, 2)
-	x := new(Rat)
-	x.SetFrac(NewInt(3), NewInt(2))
-	if x.Cmp(q) != 0 {
-		t.Errorf("1) got %s want %s", x, q)
-	}
-
-	// 2) aliasing of numerator
-	x = NewRat(2, 3)
-	x.SetFrac(NewInt(3), x.Num())
-	if x.Cmp(q) != 0 {
-		t.Errorf("2) got %s want %s", x, q)
-	}
-
-	// 3) aliasing of denominator
-	x = NewRat(2, 3)
-	x.SetFrac(x.Denom(), NewInt(2))
-	if x.Cmp(q) != 0 {
-		t.Errorf("3) got %s want %s", x, q)
-	}
-
-	// 4) aliasing of numerator and denominator
-	x = NewRat(2, 3)
-	x.SetFrac(x.Denom(), x.Num())
-	if x.Cmp(q) != 0 {
-		t.Errorf("4) got %s want %s", x, q)
-	}
-
-	// 5) numerator and denominator are the same
-	q = NewRat(1, 1)
-	x = new(Rat)
-	n := NewInt(7)
-	x.SetFrac(n, n)
-	if x.Cmp(q) != 0 {
-		t.Errorf("5) got %s want %s", x, q)
-	}
-}
-
-func TestIssue3521(t *testing.T) {
-	a := new(Int)
-	b := new(Int)
-	a.SetString("64375784358435883458348587", 0)
-	b.SetString("4789759874531", 0)
-
-	// 0) a raw zero value has 1 as denominator
-	zero := new(Rat)
-	one := NewInt(1)
-	if zero.Denom().Cmp(one) != 0 {
-		t.Errorf("0) got %s want %s", zero.Denom(), one)
-	}
-
-	// 1a) a zero value remains zero independent of denominator
-	x := new(Rat)
-	x.Denom().Set(new(Int).Neg(b))
-	if x.Cmp(zero) != 0 {
-		t.Errorf("1a) got %s want %s", x, zero)
-	}
-
-	// 1b) a zero value may have a denominator != 0 and != 1
-	x.Num().Set(a)
-	qab := new(Rat).SetFrac(a, b)
-	if x.Cmp(qab) != 0 {
-		t.Errorf("1b) got %s want %s", x, qab)
-	}
-
-	// 2a) an integral value becomes a fraction depending on denominator
-	x.SetFrac64(10, 2)
-	x.Denom().SetInt64(3)
-	q53 := NewRat(5, 3)
-	if x.Cmp(q53) != 0 {
-		t.Errorf("2a) got %s want %s", x, q53)
-	}
-
-	// 2b) an integral value becomes a fraction depending on denominator
-	x = NewRat(10, 2)
-	x.Denom().SetInt64(3)
-	if x.Cmp(q53) != 0 {
-		t.Errorf("2b) got %s want %s", x, q53)
-	}
-
-	// 3) changing the numerator/denominator of a Rat changes the Rat
-	x.SetFrac(a, b)
-	a = x.Num()
-	b = x.Denom()
-	a.SetInt64(5)
-	b.SetInt64(3)
-	if x.Cmp(q53) != 0 {
-		t.Errorf("3) got %s want %s", x, q53)
-	}
-}
-
-func TestFloat32Distribution(t *testing.T) {
-	// Generate a distribution of (sign, mantissa, exp) values
-	// broader than the float32 range, and check Rat.Float32()
-	// always picks the closest float32 approximation.
-	var add = []int64{
-		0,
-		1,
-		3,
-		5,
-		7,
-		9,
-		11,
-	}
-	var winc, einc = uint64(5), 15 // quick test (~60ms on x86-64)
-	if *long {
-		winc, einc = uint64(1), 1 // soak test (~1.5s on x86-64)
-	}
-
-	for _, sign := range "+-" {
-		for _, a := range add {
-			for wid := uint64(0); wid < 30; wid += winc {
-				b := 1<<wid + a
-				if sign == '-' {
-					b = -b
-				}
-				for exp := -150; exp < 150; exp += einc {
-					num, den := NewInt(b), NewInt(1)
-					if exp > 0 {
-						num.Lsh(num, uint(exp))
-					} else {
-						den.Lsh(den, uint(-exp))
-					}
-					r := new(Rat).SetFrac(num, den)
-					f, _ := r.Float32()
-
-					if !checkIsBestApprox32(t, f, r) {
-						// Append context information.
-						t.Errorf("(input was mantissa %#x, exp %d; f = %g (%b); f ~ %g; r = %v)",
-							b, exp, f, f, math.Ldexp(float64(b), exp), r)
-					}
-
-					checkNonLossyRoundtrip32(t, f)
-				}
-			}
-		}
-	}
-}
-
-func TestFloat64Distribution(t *testing.T) {
-	// Generate a distribution of (sign, mantissa, exp) values
-	// broader than the float64 range, and check Rat.Float64()
-	// always picks the closest float64 approximation.
-	var add = []int64{
-		0,
-		1,
-		3,
-		5,
-		7,
-		9,
-		11,
-	}
-	var winc, einc = uint64(10), 500 // quick test (~12ms on x86-64)
-	if *long {
-		winc, einc = uint64(1), 1 // soak test (~75s on x86-64)
-	}
-
-	for _, sign := range "+-" {
-		for _, a := range add {
-			for wid := uint64(0); wid < 60; wid += winc {
-				b := 1<<wid + a
-				if sign == '-' {
-					b = -b
-				}
-				for exp := -1100; exp < 1100; exp += einc {
-					num, den := NewInt(b), NewInt(1)
-					if exp > 0 {
-						num.Lsh(num, uint(exp))
-					} else {
-						den.Lsh(den, uint(-exp))
-					}
-					r := new(Rat).SetFrac(num, den)
-					f, _ := r.Float64()
-
-					if !checkIsBestApprox64(t, f, r) {
-						// Append context information.
-						t.Errorf("(input was mantissa %#x, exp %d; f = %g (%b); f ~ %g; r = %v)",
-							b, exp, f, f, math.Ldexp(float64(b), exp), r)
-					}
-
-					checkNonLossyRoundtrip64(t, f)
-				}
-			}
-		}
-	}
-}
-
-// TestSetFloat64NonFinite checks that SetFloat64 of a non-finite value
-// returns nil.
-func TestSetFloat64NonFinite(t *testing.T) {
-	for _, f := range []float64{math.NaN(), math.Inf(+1), math.Inf(-1)} {
-		var r Rat
-		if r2 := r.SetFloat64(f); r2 != nil {
-			t.Errorf("SetFloat64(%g) was %v, want nil", f, r2)
-		}
-	}
-}
-
-// checkNonLossyRoundtrip32 checks that a float->Rat->float roundtrip is
-// non-lossy for finite f.
-func checkNonLossyRoundtrip32(t *testing.T, f float32) {
-	if !isFinite(float64(f)) {
-		return
-	}
-	r := new(Rat).SetFloat64(float64(f))
-	if r == nil {
-		t.Errorf("Rat.SetFloat64(float64(%g) (%b)) == nil", f, f)
-		return
-	}
-	f2, exact := r.Float32()
-	if f != f2 || !exact {
-		t.Errorf("Rat.SetFloat64(float64(%g)).Float32() = %g (%b), %v, want %g (%b), %v; delta = %b",
-			f, f2, f2, exact, f, f, true, f2-f)
-	}
-}
-
-// checkNonLossyRoundtrip64 checks that a float->Rat->float roundtrip is
-// non-lossy for finite f.
-func checkNonLossyRoundtrip64(t *testing.T, f float64) {
-	if !isFinite(f) {
-		return
-	}
-	r := new(Rat).SetFloat64(f)
-	if r == nil {
-		t.Errorf("Rat.SetFloat64(%g (%b)) == nil", f, f)
-		return
-	}
-	f2, exact := r.Float64()
-	if f != f2 || !exact {
-		t.Errorf("Rat.SetFloat64(%g).Float64() = %g (%b), %v, want %g (%b), %v; delta = %b",
-			f, f2, f2, exact, f, f, true, f2-f)
-	}
-}
-
-// delta returns the absolute difference between r and f.
-func delta(r *Rat, f float64) *Rat {
-	d := new(Rat).Sub(r, new(Rat).SetFloat64(f))
-	return d.Abs(d)
-}
-
-// checkIsBestApprox32 checks that f is the best possible float32
-// approximation of r.
-// Returns true on success.
-func checkIsBestApprox32(t *testing.T, f float32, r *Rat) bool {
-	if math.Abs(float64(f)) >= math.MaxFloat32 {
-		// Cannot check +Inf, -Inf, nor the float next to them (MaxFloat32).
-		// But we have tests for these special cases.
-		return true
-	}
-
-	// r must be strictly between f0 and f1, the floats bracketing f.
-	f0 := math.Nextafter32(f, float32(math.Inf(-1)))
-	f1 := math.Nextafter32(f, float32(math.Inf(+1)))
-
-	// For f to be correct, r must be closer to f than to f0 or f1.
-	df := delta(r, float64(f))
-	df0 := delta(r, float64(f0))
-	df1 := delta(r, float64(f1))
-	if df.Cmp(df0) > 0 {
-		t.Errorf("Rat(%v).Float32() = %g (%b), but previous float32 %g (%b) is closer", r, f, f, f0, f0)
-		return false
-	}
-	if df.Cmp(df1) > 0 {
-		t.Errorf("Rat(%v).Float32() = %g (%b), but next float32 %g (%b) is closer", r, f, f, f1, f1)
-		return false
-	}
-	if df.Cmp(df0) == 0 && !isEven32(f) {
-		t.Errorf("Rat(%v).Float32() = %g (%b); halfway should have rounded to %g (%b) instead", r, f, f, f0, f0)
-		return false
-	}
-	if df.Cmp(df1) == 0 && !isEven32(f) {
-		t.Errorf("Rat(%v).Float32() = %g (%b); halfway should have rounded to %g (%b) instead", r, f, f, f1, f1)
-		return false
-	}
-	return true
-}
-
-// checkIsBestApprox64 checks that f is the best possible float64
-// approximation of r.
-// Returns true on success.
-func checkIsBestApprox64(t *testing.T, f float64, r *Rat) bool {
-	if math.Abs(f) >= math.MaxFloat64 {
-		// Cannot check +Inf, -Inf, nor the float next to them (MaxFloat64).
-		// But we have tests for these special cases.
-		return true
-	}
-
-	// r must be strictly between f0 and f1, the floats bracketing f.
-	f0 := math.Nextafter(f, math.Inf(-1))
-	f1 := math.Nextafter(f, math.Inf(+1))
-
-	// For f to be correct, r must be closer to f than to f0 or f1.
-	df := delta(r, f)
-	df0 := delta(r, f0)
-	df1 := delta(r, f1)
-	if df.Cmp(df0) > 0 {
-		t.Errorf("Rat(%v).Float64() = %g (%b), but previous float64 %g (%b) is closer", r, f, f, f0, f0)
-		return false
-	}
-	if df.Cmp(df1) > 0 {
-		t.Errorf("Rat(%v).Float64() = %g (%b), but next float64 %g (%b) is closer", r, f, f, f1, f1)
-		return false
-	}
-	if df.Cmp(df0) == 0 && !isEven64(f) {
-		t.Errorf("Rat(%v).Float64() = %g (%b); halfway should have rounded to %g (%b) instead", r, f, f, f0, f0)
-		return false
-	}
-	if df.Cmp(df1) == 0 && !isEven64(f) {
-		t.Errorf("Rat(%v).Float64() = %g (%b); halfway should have rounded to %g (%b) instead", r, f, f, f1, f1)
-		return false
-	}
-	return true
-}
-
-func isEven32(f float32) bool { return math.Float32bits(f)&1 == 0 }
-func isEven64(f float64) bool { return math.Float64bits(f)&1 == 0 }
-
-func TestIsFinite(t *testing.T) {
-	finites := []float64{
-		1.0 / 3,
-		4891559871276714924261e+222,
-		math.MaxFloat64,
-		math.SmallestNonzeroFloat64,
-		-math.MaxFloat64,
-		-math.SmallestNonzeroFloat64,
-	}
-	for _, f := range finites {
-		if !isFinite(f) {
-			t.Errorf("!IsFinite(%g (%b))", f, f)
-		}
-	}
-	nonfinites := []float64{
-		math.NaN(),
-		math.Inf(-1),
-		math.Inf(+1),
-	}
-	for _, f := range nonfinites {
-		if isFinite(f) {
-			t.Errorf("IsFinite(%g, (%b))", f, f)
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/ratconv.go b/pkg/bootstrap/src/bootstrap/math/big/ratconv.go
deleted file mode 100644
index 82ca458..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/ratconv.go
+++ /dev/null
@@ -1,281 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/ratconv.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/ratconv.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements rat-to-string conversion functions.
-
-package big
-
-import (
-	"errors"
-	"fmt"
-	"io"
-	"strconv"
-	"strings"
-)
-
-func ratTok(ch rune) bool {
-	return strings.ContainsRune("+-/0123456789.eE", ch)
-}
-
-var ratZero Rat
-var _ fmt.Scanner = &ratZero // *Rat must implement fmt.Scanner
-
-// Scan is a support routine for fmt.Scanner. It accepts the formats
-// 'e', 'E', 'f', 'F', 'g', 'G', and 'v'. All formats are equivalent.
-func (z *Rat) Scan(s fmt.ScanState, ch rune) error {
-	tok, err := s.Token(true, ratTok)
-	if err != nil {
-		return err
-	}
-	if !strings.ContainsRune("efgEFGv", ch) {
-		return errors.New("Rat.Scan: invalid verb")
-	}
-	if _, ok := z.SetString(string(tok)); !ok {
-		return errors.New("Rat.Scan: invalid syntax")
-	}
-	return nil
-}
-
-// SetString sets z to the value of s and returns z and a boolean indicating
-// success. s can be given as a fraction "a/b" or as a floating-point number
-// optionally followed by an exponent. The entire string (not just a prefix)
-// must be valid for success. If the operation failed, the value of z is un-
-// defined but the returned value is nil.
-func (z *Rat) SetString(s string) (*Rat, bool) {
-	if len(s) == 0 {
-		return nil, false
-	}
-	// len(s) > 0
-
-	// parse fraction a/b, if any
-	if sep := strings.Index(s, "/"); sep >= 0 {
-		if _, ok := z.a.SetString(s[:sep], 0); !ok {
-			return nil, false
-		}
-		r := strings.NewReader(s[sep+1:])
-		var err error
-		if z.b.abs, _, _, err = z.b.abs.scan(r, 0, false); err != nil {
-			return nil, false
-		}
-		// entire string must have been consumed
-		if _, err = r.ReadByte(); err != io.EOF {
-			return nil, false
-		}
-		if len(z.b.abs) == 0 {
-			return nil, false
-		}
-		return z.norm(), true
-	}
-
-	// parse floating-point number
-	r := strings.NewReader(s)
-
-	// sign
-	neg, err := scanSign(r)
-	if err != nil {
-		return nil, false
-	}
-
-	// mantissa
-	var ecorr int
-	z.a.abs, _, ecorr, err = z.a.abs.scan(r, 10, true)
-	if err != nil {
-		return nil, false
-	}
-
-	// exponent
-	var exp int64
-	exp, _, err = scanExponent(r, false)
-	if err != nil {
-		return nil, false
-	}
-
-	// there should be no unread characters left
-	if _, err = r.ReadByte(); err != io.EOF {
-		return nil, false
-	}
-
-	// special-case 0 (see also issue #16176)
-	if len(z.a.abs) == 0 {
-		return z, true
-	}
-	// len(z.a.abs) > 0
-
-	// correct exponent
-	if ecorr < 0 {
-		exp += int64(ecorr)
-	}
-
-	// compute exponent power
-	expabs := exp
-	if expabs < 0 {
-		expabs = -expabs
-	}
-	powTen := nat(nil).expNN(natTen, nat(nil).setWord(Word(expabs)), nil)
-
-	// complete fraction
-	if exp < 0 {
-		z.b.abs = powTen
-		z.norm()
-	} else {
-		z.a.abs = z.a.abs.mul(z.a.abs, powTen)
-		z.b.abs = z.b.abs[:0]
-	}
-
-	z.a.neg = neg && len(z.a.abs) > 0 // 0 has no sign
-
-	return z, true
-}
-
-// scanExponent scans the longest possible prefix of r representing a decimal
-// ('e', 'E') or binary ('p') exponent, if any. It returns the exponent, the
-// exponent base (10 or 2), or a read or syntax error, if any.
-//
-//	exponent = ( "E" | "e" | "p" ) [ sign ] digits .
-//	sign     = "+" | "-" .
-//	digits   = digit { digit } .
-//	digit    = "0" ... "9" .
-//
-// A binary exponent is only permitted if binExpOk is set.
-func scanExponent(r io.ByteScanner, binExpOk bool) (exp int64, base int, err error) {
-	base = 10
-
-	var ch byte
-	if ch, err = r.ReadByte(); err != nil {
-		if err == io.EOF {
-			err = nil // no exponent; same as e0
-		}
-		return
-	}
-
-	switch ch {
-	case 'e', 'E':
-		// ok
-	case 'p':
-		if binExpOk {
-			base = 2
-			break // ok
-		}
-		fallthrough // binary exponent not permitted
-	default:
-		r.UnreadByte()
-		return // no exponent; same as e0
-	}
-
-	var neg bool
-	if neg, err = scanSign(r); err != nil {
-		return
-	}
-
-	var digits []byte
-	if neg {
-		digits = append(digits, '-')
-	}
-
-	// no need to use nat.scan for exponent digits
-	// since we only care about int64 values - the
-	// from-scratch scan is easy enough and faster
-	for i := 0; ; i++ {
-		if ch, err = r.ReadByte(); err != nil {
-			if err != io.EOF || i == 0 {
-				return
-			}
-			err = nil
-			break // i > 0
-		}
-		if ch < '0' || '9' < ch {
-			if i == 0 {
-				r.UnreadByte()
-				err = fmt.Errorf("invalid exponent (missing digits)")
-				return
-			}
-			break // i > 0
-		}
-		digits = append(digits, ch)
-	}
-	// i > 0 => we have at least one digit
-
-	exp, err = strconv.ParseInt(string(digits), 10, 64)
-	return
-}
-
-// String returns a string representation of x in the form "a/b" (even if b == 1).
-func (x *Rat) String() string {
-	var buf []byte
-	buf = x.a.Append(buf, 10)
-	buf = append(buf, '/')
-	if len(x.b.abs) != 0 {
-		buf = x.b.Append(buf, 10)
-	} else {
-		buf = append(buf, '1')
-	}
-	return string(buf)
-}
-
-// RatString returns a string representation of x in the form "a/b" if b != 1,
-// and in the form "a" if b == 1.
-func (x *Rat) RatString() string {
-	if x.IsInt() {
-		return x.a.String()
-	}
-	return x.String()
-}
-
-// FloatString returns a string representation of x in decimal form with prec
-// digits of precision after the decimal point. The last digit is rounded to
-// nearest, with halves rounded away from zero.
-func (x *Rat) FloatString(prec int) string {
-	var buf []byte
-
-	if x.IsInt() {
-		buf = x.a.Append(buf, 10)
-		if prec > 0 {
-			buf = append(buf, '.')
-			for i := prec; i > 0; i-- {
-				buf = append(buf, '0')
-			}
-		}
-		return string(buf)
-	}
-	// x.b.abs != 0
-
-	q, r := nat(nil).div(nat(nil), x.a.abs, x.b.abs)
-
-	p := natOne
-	if prec > 0 {
-		p = nat(nil).expNN(natTen, nat(nil).setUint64(uint64(prec)), nil)
-	}
-
-	r = r.mul(r, p)
-	r, r2 := r.div(nat(nil), r, x.b.abs)
-
-	// see if we need to round up
-	r2 = r2.add(r2, r2)
-	if x.b.abs.cmp(r2) <= 0 {
-		r = r.add(r, natOne)
-		if r.cmp(p) >= 0 {
-			q = nat(nil).add(q, natOne)
-			r = nat(nil).sub(r, p)
-		}
-	}
-
-	if x.a.neg {
-		buf = append(buf, '-')
-	}
-	buf = append(buf, q.utoa(10)...) // itoa ignores sign if q == 0
-
-	if prec > 0 {
-		buf = append(buf, '.')
-		rs := r.utoa(10)
-		for i := prec - len(rs); i > 0; i-- {
-			buf = append(buf, '0')
-		}
-		buf = append(buf, rs...)
-	}
-
-	return string(buf)
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/ratconv_test.go b/pkg/bootstrap/src/bootstrap/math/big/ratconv_test.go
deleted file mode 100644
index a753517..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/ratconv_test.go
+++ /dev/null
@@ -1,462 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/ratconv_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/ratconv_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package big
-
-import (
-	"bytes"
-	"fmt"
-	"math"
-	"strconv"
-	"strings"
-	"testing"
-)
-
-type StringTest struct {
-	in, out string
-	ok      bool
-}
-
-var setStringTests = []StringTest{
-	{"0", "0", true},
-	{"-0", "0", true},
-	{"1", "1", true},
-	{"-1", "-1", true},
-	{"1.", "1", true},
-	{"1e0", "1", true},
-	{"1.e1", "10", true},
-	{in: "1e"},
-	{in: "1.e"},
-	{in: "1e+14e-5"},
-	{in: "1e4.5"},
-	{in: "r"},
-	{in: "a/b"},
-	{in: "a.b"},
-	{"-0.1", "-1/10", true},
-	{"-.1", "-1/10", true},
-	{"2/4", "1/2", true},
-	{".25", "1/4", true},
-	{"-1/5", "-1/5", true},
-	{"8129567.7690E14", "812956776900000000000", true},
-	{"78189e+4", "781890000", true},
-	{"553019.8935e+8", "55301989350000", true},
-	{"98765432109876543210987654321e-10", "98765432109876543210987654321/10000000000", true},
-	{"9877861857500000E-7", "3951144743/4", true},
-	{"2169378.417e-3", "2169378417/1000000", true},
-	{"884243222337379604041632732738665534", "884243222337379604041632732738665534", true},
-	{"53/70893980658822810696", "53/70893980658822810696", true},
-	{"106/141787961317645621392", "53/70893980658822810696", true},
-	{"204211327800791583.81095", "4084226556015831676219/20000", true},
-	{"0e9999999999", "0", true}, // issue #16176
-	{in: "1/0"},
-	{in: "4/3/2"}, // issue 17001
-	{in: "4/3/"},
-	{in: "4/3."},
-	{in: "4/"},
-}
-
-// These are not supported by fmt.Fscanf.
-var setStringTests2 = []StringTest{
-	{"0x10", "16", true},
-	{"-010/1", "-8", true}, // TODO(gri) should we even permit octal here?
-	{"-010.", "-10", true},
-	{"0x10/0x20", "1/2", true},
-	{"0b1000/3", "8/3", true},
-	{in: "4/3x"},
-	// TODO(gri) add more tests
-}
-
-func TestRatSetString(t *testing.T) {
-	var tests []StringTest
-	tests = append(tests, setStringTests...)
-	tests = append(tests, setStringTests2...)
-
-	for i, test := range tests {
-		x, ok := new(Rat).SetString(test.in)
-
-		if ok {
-			if !test.ok {
-				t.Errorf("#%d SetString(%q) expected failure", i, test.in)
-			} else if x.RatString() != test.out {
-				t.Errorf("#%d SetString(%q) got %s want %s", i, test.in, x.RatString(), test.out)
-			}
-		} else if x != nil {
-			t.Errorf("#%d SetString(%q) got %p want nil", i, test.in, x)
-		}
-	}
-}
-
-func TestRatScan(t *testing.T) {
-	var buf bytes.Buffer
-	for i, test := range setStringTests {
-		x := new(Rat)
-		buf.Reset()
-		buf.WriteString(test.in)
-
-		_, err := fmt.Fscanf(&buf, "%v", x)
-		if err == nil != test.ok {
-			if test.ok {
-				t.Errorf("#%d (%s) error: %s", i, test.in, err)
-			} else {
-				t.Errorf("#%d (%s) expected error", i, test.in)
-			}
-			continue
-		}
-		if err == nil && x.RatString() != test.out {
-			t.Errorf("#%d got %s want %s", i, x.RatString(), test.out)
-		}
-	}
-}
-
-var floatStringTests = []struct {
-	in   string
-	prec int
-	out  string
-}{
-	{"0", 0, "0"},
-	{"0", 4, "0.0000"},
-	{"1", 0, "1"},
-	{"1", 2, "1.00"},
-	{"-1", 0, "-1"},
-	{"0.05", 1, "0.1"},
-	{"-0.05", 1, "-0.1"},
-	{".25", 2, "0.25"},
-	{".25", 1, "0.3"},
-	{".25", 3, "0.250"},
-	{"-1/3", 3, "-0.333"},
-	{"-2/3", 4, "-0.6667"},
-	{"0.96", 1, "1.0"},
-	{"0.999", 2, "1.00"},
-	{"0.9", 0, "1"},
-	{".25", -1, "0"},
-	{".55", -1, "1"},
-}
-
-func TestFloatString(t *testing.T) {
-	for i, test := range floatStringTests {
-		x, _ := new(Rat).SetString(test.in)
-
-		if x.FloatString(test.prec) != test.out {
-			t.Errorf("#%d got %s want %s", i, x.FloatString(test.prec), test.out)
-		}
-	}
-}
-
-// Test inputs to Rat.SetString. The prefix "long:" causes the test
-// to be skipped except in -long mode.  (The threshold is about 500us.)
-var float64inputs = []string{
-	// Constants plundered from strconv/testfp.txt.
-
-	// Table 1: Stress Inputs for Conversion to 53-bit Binary, < 1/2 ULP
-	"5e+125",
-	"69e+267",
-	"999e-026",
-	"7861e-034",
-	"75569e-254",
-	"928609e-261",
-	"9210917e+080",
-	"84863171e+114",
-	"653777767e+273",
-	"5232604057e-298",
-	"27235667517e-109",
-	"653532977297e-123",
-	"3142213164987e-294",
-	"46202199371337e-072",
-	"231010996856685e-073",
-	"9324754620109615e+212",
-	"78459735791271921e+049",
-	"272104041512242479e+200",
-	"6802601037806061975e+198",
-	"20505426358836677347e-221",
-	"836168422905420598437e-234",
-	"4891559871276714924261e+222",
-
-	// Table 2: Stress Inputs for Conversion to 53-bit Binary, > 1/2 ULP
-	"9e-265",
-	"85e-037",
-	"623e+100",
-	"3571e+263",
-	"81661e+153",
-	"920657e-023",
-	"4603285e-024",
-	"87575437e-309",
-	"245540327e+122",
-	"6138508175e+120",
-	"83356057653e+193",
-	"619534293513e+124",
-	"2335141086879e+218",
-	"36167929443327e-159",
-	"609610927149051e-255",
-	"3743626360493413e-165",
-	"94080055902682397e-242",
-	"899810892172646163e+283",
-	"7120190517612959703e+120",
-	"25188282901709339043e-252",
-	"308984926168550152811e-052",
-	"6372891218502368041059e+064",
-
-	// Table 14: Stress Inputs for Conversion to 24-bit Binary, <1/2 ULP
-	"5e-20",
-	"67e+14",
-	"985e+15",
-	"7693e-42",
-	"55895e-16",
-	"996622e-44",
-	"7038531e-32",
-	"60419369e-46",
-	"702990899e-20",
-	"6930161142e-48",
-	"25933168707e+13",
-	"596428896559e+20",
-
-	// Table 15: Stress Inputs for Conversion to 24-bit Binary, >1/2 ULP
-	"3e-23",
-	"57e+18",
-	"789e-35",
-	"2539e-18",
-	"76173e+28",
-	"887745e-11",
-	"5382571e-37",
-	"82381273e-35",
-	"750486563e-38",
-	"3752432815e-39",
-	"75224575729e-45",
-	"459926601011e+15",
-
-	// Constants plundered from strconv/atof_test.go.
-
-	"0",
-	"1",
-	"+1",
-	"1e23",
-	"1E23",
-	"100000000000000000000000",
-	"1e-100",
-	"123456700",
-	"99999999999999974834176",
-	"100000000000000000000001",
-	"100000000000000008388608",
-	"100000000000000016777215",
-	"100000000000000016777216",
-	"-1",
-	"-0.1",
-	"-0", // NB: exception made for this input
-	"1e-20",
-	"625e-3",
-
-	// largest float64
-	"1.7976931348623157e308",
-	"-1.7976931348623157e308",
-	// next float64 - too large
-	"1.7976931348623159e308",
-	"-1.7976931348623159e308",
-	// the border is ...158079
-	// borderline - okay
-	"1.7976931348623158e308",
-	"-1.7976931348623158e308",
-	// borderline - too large
-	"1.797693134862315808e308",
-	"-1.797693134862315808e308",
-
-	// a little too large
-	"1e308",
-	"2e308",
-	"1e309",
-
-	// way too large
-	"1e310",
-	"-1e310",
-	"1e400",
-	"-1e400",
-	"long:1e400000",
-	"long:-1e400000",
-
-	// denormalized
-	"1e-305",
-	"1e-306",
-	"1e-307",
-	"1e-308",
-	"1e-309",
-	"1e-310",
-	"1e-322",
-	// smallest denormal
-	"5e-324",
-	"4e-324",
-	"3e-324",
-	// too small
-	"2e-324",
-	// way too small
-	"1e-350",
-	"long:1e-400000",
-	// way too small, negative
-	"-1e-350",
-	"long:-1e-400000",
-
-	// try to overflow exponent
-	// [Disabled: too slow and memory-hungry with rationals.]
-	// "1e-4294967296",
-	// "1e+4294967296",
-	// "1e-18446744073709551616",
-	// "1e+18446744073709551616",
-
-	// http://www.exploringbinary.com/java-hangs-when-converting-2-2250738585072012e-308/
-	"2.2250738585072012e-308",
-	// http://www.exploringbinary.com/php-hangs-on-numeric-value-2-2250738585072011e-308/
-	"2.2250738585072011e-308",
-
-	// A very large number (initially wrongly parsed by the fast algorithm).
-	"4.630813248087435e+307",
-
-	// A different kind of very large number.
-	"22.222222222222222",
-	"long:2." + strings.Repeat("2", 4000) + "e+1",
-
-	// Exactly halfway between 1 and math.Nextafter(1, 2).
-	// Round to even (down).
-	"1.00000000000000011102230246251565404236316680908203125",
-	// Slightly lower; still round down.
-	"1.00000000000000011102230246251565404236316680908203124",
-	// Slightly higher; round up.
-	"1.00000000000000011102230246251565404236316680908203126",
-	// Slightly higher, but you have to read all the way to the end.
-	"long:1.00000000000000011102230246251565404236316680908203125" + strings.Repeat("0", 10000) + "1",
-
-	// Smallest denormal, 2^(-1022-52)
-	"4.940656458412465441765687928682213723651e-324",
-	// Half of smallest denormal, 2^(-1022-53)
-	"2.470328229206232720882843964341106861825e-324",
-	// A little more than the exact half of smallest denormal
-	// 2^-1075 + 2^-1100.  (Rounds to 1p-1074.)
-	"2.470328302827751011111470718709768633275e-324",
-	// The exact halfway between smallest normal and largest denormal:
-	// 2^-1022 - 2^-1075.  (Rounds to 2^-1022.)
-	"2.225073858507201136057409796709131975935e-308",
-
-	"1152921504606846975",  //   1<<60 - 1
-	"-1152921504606846975", // -(1<<60 - 1)
-	"1152921504606846977",  //   1<<60 + 1
-	"-1152921504606846977", // -(1<<60 + 1)
-
-	"1/3",
-}
-
-// isFinite reports whether f represents a finite rational value.
-// It is equivalent to !math.IsNan(f) && !math.IsInf(f, 0).
-func isFinite(f float64) bool {
-	return math.Abs(f) <= math.MaxFloat64
-}
-
-func TestFloat32SpecialCases(t *testing.T) {
-	for _, input := range float64inputs {
-		if strings.HasPrefix(input, "long:") {
-			if !*long {
-				continue
-			}
-			input = input[len("long:"):]
-		}
-
-		r, ok := new(Rat).SetString(input)
-		if !ok {
-			t.Errorf("Rat.SetString(%q) failed", input)
-			continue
-		}
-		f, exact := r.Float32()
-
-		// 1. Check string -> Rat -> float32 conversions are
-		// consistent with strconv.ParseFloat.
-		// Skip this check if the input uses "a/b" rational syntax.
-		if !strings.Contains(input, "/") {
-			e64, _ := strconv.ParseFloat(input, 32)
-			e := float32(e64)
-
-			// Careful: negative Rats too small for
-			// float64 become -0, but Rat obviously cannot
-			// preserve the sign from SetString("-0").
-			switch {
-			case math.Float32bits(e) == math.Float32bits(f):
-				// Ok: bitwise equal.
-			case f == 0 && r.Num().BitLen() == 0:
-				// Ok: Rat(0) is equivalent to both +/- float64(0).
-			default:
-				t.Errorf("strconv.ParseFloat(%q) = %g (%b), want %g (%b); delta = %g", input, e, e, f, f, f-e)
-			}
-		}
-
-		if !isFinite(float64(f)) {
-			continue
-		}
-
-		// 2. Check f is best approximation to r.
-		if !checkIsBestApprox32(t, f, r) {
-			// Append context information.
-			t.Errorf("(input was %q)", input)
-		}
-
-		// 3. Check f->R->f roundtrip is non-lossy.
-		checkNonLossyRoundtrip32(t, f)
-
-		// 4. Check exactness using slow algorithm.
-		if wasExact := new(Rat).SetFloat64(float64(f)).Cmp(r) == 0; wasExact != exact {
-			t.Errorf("Rat.SetString(%q).Float32().exact = %t, want %t", input, exact, wasExact)
-		}
-	}
-}
-
-func TestFloat64SpecialCases(t *testing.T) {
-	for _, input := range float64inputs {
-		if strings.HasPrefix(input, "long:") {
-			if !*long {
-				continue
-			}
-			input = input[len("long:"):]
-		}
-
-		r, ok := new(Rat).SetString(input)
-		if !ok {
-			t.Errorf("Rat.SetString(%q) failed", input)
-			continue
-		}
-		f, exact := r.Float64()
-
-		// 1. Check string -> Rat -> float64 conversions are
-		// consistent with strconv.ParseFloat.
-		// Skip this check if the input uses "a/b" rational syntax.
-		if !strings.Contains(input, "/") {
-			e, _ := strconv.ParseFloat(input, 64)
-
-			// Careful: negative Rats too small for
-			// float64 become -0, but Rat obviously cannot
-			// preserve the sign from SetString("-0").
-			switch {
-			case math.Float64bits(e) == math.Float64bits(f):
-				// Ok: bitwise equal.
-			case f == 0 && r.Num().BitLen() == 0:
-				// Ok: Rat(0) is equivalent to both +/- float64(0).
-			default:
-				t.Errorf("strconv.ParseFloat(%q) = %g (%b), want %g (%b); delta = %g", input, e, e, f, f, f-e)
-			}
-		}
-
-		if !isFinite(f) {
-			continue
-		}
-
-		// 2. Check f is best approximation to r.
-		if !checkIsBestApprox64(t, f, r) {
-			// Append context information.
-			t.Errorf("(input was %q)", input)
-		}
-
-		// 3. Check f->R->f roundtrip is non-lossy.
-		checkNonLossyRoundtrip64(t, f)
-
-		// 4. Check exactness using slow algorithm.
-		if wasExact := new(Rat).SetFloat64(f).Cmp(r) == 0; wasExact != exact {
-			t.Errorf("Rat.SetString(%q).Float64().exact = %t, want %t", input, exact, wasExact)
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/ratmarsh.go b/pkg/bootstrap/src/bootstrap/math/big/ratmarsh.go
deleted file mode 100644
index 1355ea4..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/ratmarsh.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/ratmarsh.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/ratmarsh.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements encoding/decoding of Rats.
-
-package big
-
-import (
-	"encoding/binary"
-	"errors"
-	"fmt"
-)
-
-// Gob codec version. Permits backward-compatible changes to the encoding.
-const ratGobVersion byte = 1
-
-// GobEncode implements the gob.GobEncoder interface.
-func (x *Rat) GobEncode() ([]byte, error) {
-	if x == nil {
-		return nil, nil
-	}
-	buf := make([]byte, 1+4+(len(x.a.abs)+len(x.b.abs))*_S) // extra bytes for version and sign bit (1), and numerator length (4)
-	i := x.b.abs.bytes(buf)
-	j := x.a.abs.bytes(buf[:i])
-	n := i - j
-	if int(uint32(n)) != n {
-		// this should never happen
-		return nil, errors.New("Rat.GobEncode: numerator too large")
-	}
-	binary.BigEndian.PutUint32(buf[j-4:j], uint32(n))
-	j -= 1 + 4
-	b := ratGobVersion << 1 // make space for sign bit
-	if x.a.neg {
-		b |= 1
-	}
-	buf[j] = b
-	return buf[j:], nil
-}
-
-// GobDecode implements the gob.GobDecoder interface.
-func (z *Rat) GobDecode(buf []byte) error {
-	if len(buf) == 0 {
-		// Other side sent a nil or default value.
-		*z = Rat{}
-		return nil
-	}
-	b := buf[0]
-	if b>>1 != ratGobVersion {
-		return fmt.Errorf("Rat.GobDecode: encoding version %d not supported", b>>1)
-	}
-	const j = 1 + 4
-	i := j + binary.BigEndian.Uint32(buf[j-4:j])
-	z.a.neg = b&1 != 0
-	z.a.abs = z.a.abs.setBytes(buf[j:i])
-	z.b.abs = z.b.abs.setBytes(buf[i:])
-	return nil
-}
-
-// MarshalText implements the encoding.TextMarshaler interface.
-func (x *Rat) MarshalText() (text []byte, err error) {
-	// TODO(gri): get rid of the []byte/string conversion
-	return []byte(x.RatString()), nil
-}
-
-// UnmarshalText implements the encoding.TextUnmarshaler interface.
-func (z *Rat) UnmarshalText(text []byte) error {
-	// TODO(gri): get rid of the []byte/string conversion
-	if _, ok := z.SetString(string(text)); !ok {
-		return fmt.Errorf("math/big: cannot unmarshal %q into a *big.Rat", text)
-	}
-	return nil
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/ratmarsh_test.go b/pkg/bootstrap/src/bootstrap/math/big/ratmarsh_test.go
deleted file mode 100644
index 5f88a0d..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/ratmarsh_test.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/ratmarsh_test.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/ratmarsh_test.go:1
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package big
-
-import (
-	"bytes"
-	"encoding/gob"
-	"encoding/json"
-	"encoding/xml"
-	"testing"
-)
-
-func TestRatGobEncoding(t *testing.T) {
-	var medium bytes.Buffer
-	enc := gob.NewEncoder(&medium)
-	dec := gob.NewDecoder(&medium)
-	for _, test := range encodingTests {
-		medium.Reset() // empty buffer for each test case (in case of failures)
-		var tx Rat
-		tx.SetString(test + ".14159265")
-		if err := enc.Encode(&tx); err != nil {
-			t.Errorf("encoding of %s failed: %s", &tx, err)
-			continue
-		}
-		var rx Rat
-		if err := dec.Decode(&rx); err != nil {
-			t.Errorf("decoding of %s failed: %s", &tx, err)
-			continue
-		}
-		if rx.Cmp(&tx) != 0 {
-			t.Errorf("transmission of %s failed: got %s want %s", &tx, &rx, &tx)
-		}
-	}
-}
-
-// Sending a nil Rat pointer (inside a slice) on a round trip through gob should yield a zero.
-// TODO: top-level nils.
-func TestGobEncodingNilRatInSlice(t *testing.T) {
-	buf := new(bytes.Buffer)
-	enc := gob.NewEncoder(buf)
-	dec := gob.NewDecoder(buf)
-
-	var in = make([]*Rat, 1)
-	err := enc.Encode(&in)
-	if err != nil {
-		t.Errorf("gob encode failed: %q", err)
-	}
-	var out []*Rat
-	err = dec.Decode(&out)
-	if err != nil {
-		t.Fatalf("gob decode failed: %q", err)
-	}
-	if len(out) != 1 {
-		t.Fatalf("wrong len; want 1 got %d", len(out))
-	}
-	var zero Rat
-	if out[0].Cmp(&zero) != 0 {
-		t.Fatalf("transmission of (*Int)(nil) failed: got %s want 0", out)
-	}
-}
-
-var ratNums = []string{
-	"-141592653589793238462643383279502884197169399375105820974944592307816406286",
-	"-1415926535897932384626433832795028841971",
-	"-141592653589793",
-	"-1",
-	"0",
-	"1",
-	"141592653589793",
-	"1415926535897932384626433832795028841971",
-	"141592653589793238462643383279502884197169399375105820974944592307816406286",
-}
-
-var ratDenoms = []string{
-	"1",
-	"718281828459045",
-	"7182818284590452353602874713526624977572",
-	"718281828459045235360287471352662497757247093699959574966967627724076630353",
-}
-
-func TestRatJSONEncoding(t *testing.T) {
-	for _, num := range ratNums {
-		for _, denom := range ratDenoms {
-			var tx Rat
-			tx.SetString(num + "/" + denom)
-			b, err := json.Marshal(&tx)
-			if err != nil {
-				t.Errorf("marshaling of %s failed: %s", &tx, err)
-				continue
-			}
-			var rx Rat
-			if err := json.Unmarshal(b, &rx); err != nil {
-				t.Errorf("unmarshaling of %s failed: %s", &tx, err)
-				continue
-			}
-			if rx.Cmp(&tx) != 0 {
-				t.Errorf("JSON encoding of %s failed: got %s want %s", &tx, &rx, &tx)
-			}
-		}
-	}
-}
-
-func TestRatXMLEncoding(t *testing.T) {
-	for _, num := range ratNums {
-		for _, denom := range ratDenoms {
-			var tx Rat
-			tx.SetString(num + "/" + denom)
-			b, err := xml.Marshal(&tx)
-			if err != nil {
-				t.Errorf("marshaling of %s failed: %s", &tx, err)
-				continue
-			}
-			var rx Rat
-			if err := xml.Unmarshal(b, &rx); err != nil {
-				t.Errorf("unmarshaling of %s failed: %s", &tx, err)
-				continue
-			}
-			if rx.Cmp(&tx) != 0 {
-				t.Errorf("XML encoding of %s failed: got %s want %s", &tx, &rx, &tx)
-			}
-		}
-	}
-}
diff --git a/pkg/bootstrap/src/bootstrap/math/big/roundingmode_string.go b/pkg/bootstrap/src/bootstrap/math/big/roundingmode_string.go
deleted file mode 100644
index 90bb3e9..0000000
--- a/pkg/bootstrap/src/bootstrap/math/big/roundingmode_string.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Do not edit. Bootstrap copy of /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/roundingmode_string.go
-
-//line /usr/local/google/buildbot/src/android/build-tools/out/obj/go/src/math/big/roundingmode_string.go:1
-// generated by stringer -type=RoundingMode; DO NOT EDIT
-
-package big
-
-import "fmt"
-
-const _RoundingMode_name = "ToNearestEvenToNearestAwayToZeroAwayFromZeroToNegativeInfToPositiveInf"
-
-var _RoundingMode_index = [...]uint8{0, 13, 26, 32, 44, 57, 70}
-
-func (i RoundingMode) String() string {
-	if i+1 >= RoundingMode(len(_RoundingMode_index)) {
-		return fmt.Sprintf("RoundingMode(%d)", i)
-	}
-	return _RoundingMode_name[_RoundingMode_index[i]:_RoundingMode_index[i+1]]
-}
diff --git a/pkg/linux_amd64/archive/tar.a b/pkg/linux_amd64/archive/tar.a
index eba24fc..727033a 100644
--- a/pkg/linux_amd64/archive/tar.a
+++ b/pkg/linux_amd64/archive/tar.a
Binary files differ
diff --git a/pkg/linux_amd64/archive/zip.a b/pkg/linux_amd64/archive/zip.a
index 5e71810..d35da79 100644
--- a/pkg/linux_amd64/archive/zip.a
+++ b/pkg/linux_amd64/archive/zip.a
Binary files differ
diff --git a/pkg/linux_amd64/bufio.a b/pkg/linux_amd64/bufio.a
index ded8e7c..ac0cf3b 100644
--- a/pkg/linux_amd64/bufio.a
+++ b/pkg/linux_amd64/bufio.a
Binary files differ
diff --git a/pkg/linux_amd64/bytes.a b/pkg/linux_amd64/bytes.a
index 8ddaff8..09da595 100644
--- a/pkg/linux_amd64/bytes.a
+++ b/pkg/linux_amd64/bytes.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/asm/internal/arch.a b/pkg/linux_amd64/cmd/asm/internal/arch.a
index 779af49..901e56b 100644
--- a/pkg/linux_amd64/cmd/asm/internal/arch.a
+++ b/pkg/linux_amd64/cmd/asm/internal/arch.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/asm/internal/asm.a b/pkg/linux_amd64/cmd/asm/internal/asm.a
index 4082164..14dfeca 100644
--- a/pkg/linux_amd64/cmd/asm/internal/asm.a
+++ b/pkg/linux_amd64/cmd/asm/internal/asm.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/asm/internal/flags.a b/pkg/linux_amd64/cmd/asm/internal/flags.a
index b16dca7..65874d2 100644
--- a/pkg/linux_amd64/cmd/asm/internal/flags.a
+++ b/pkg/linux_amd64/cmd/asm/internal/flags.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/asm/internal/lex.a b/pkg/linux_amd64/cmd/asm/internal/lex.a
index 63cdb47..f940f7b 100644
--- a/pkg/linux_amd64/cmd/asm/internal/lex.a
+++ b/pkg/linux_amd64/cmd/asm/internal/lex.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/compile/internal/amd64.a b/pkg/linux_amd64/cmd/compile/internal/amd64.a
index eb521da..2dc7fb3 100644
--- a/pkg/linux_amd64/cmd/compile/internal/amd64.a
+++ b/pkg/linux_amd64/cmd/compile/internal/amd64.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/compile/internal/arm.a b/pkg/linux_amd64/cmd/compile/internal/arm.a
index 798c4d7..36514ca 100644
--- a/pkg/linux_amd64/cmd/compile/internal/arm.a
+++ b/pkg/linux_amd64/cmd/compile/internal/arm.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/compile/internal/arm64.a b/pkg/linux_amd64/cmd/compile/internal/arm64.a
index efd6503..50399b1 100644
--- a/pkg/linux_amd64/cmd/compile/internal/arm64.a
+++ b/pkg/linux_amd64/cmd/compile/internal/arm64.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/compile/internal/gc.a b/pkg/linux_amd64/cmd/compile/internal/gc.a
index d824232..8ab1f5c 100644
--- a/pkg/linux_amd64/cmd/compile/internal/gc.a
+++ b/pkg/linux_amd64/cmd/compile/internal/gc.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/compile/internal/mips.a b/pkg/linux_amd64/cmd/compile/internal/mips.a
index 46b7a33..526fc13 100644
--- a/pkg/linux_amd64/cmd/compile/internal/mips.a
+++ b/pkg/linux_amd64/cmd/compile/internal/mips.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/compile/internal/mips64.a b/pkg/linux_amd64/cmd/compile/internal/mips64.a
index c39daa7..925fad5 100644
--- a/pkg/linux_amd64/cmd/compile/internal/mips64.a
+++ b/pkg/linux_amd64/cmd/compile/internal/mips64.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/compile/internal/ppc64.a b/pkg/linux_amd64/cmd/compile/internal/ppc64.a
index ac73b0d..32a2b38 100644
--- a/pkg/linux_amd64/cmd/compile/internal/ppc64.a
+++ b/pkg/linux_amd64/cmd/compile/internal/ppc64.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/compile/internal/s390x.a b/pkg/linux_amd64/cmd/compile/internal/s390x.a
index 00f4748..fc3beb2 100644
--- a/pkg/linux_amd64/cmd/compile/internal/s390x.a
+++ b/pkg/linux_amd64/cmd/compile/internal/s390x.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/compile/internal/ssa.a b/pkg/linux_amd64/cmd/compile/internal/ssa.a
index 02b49fd..61a6fe6 100644
--- a/pkg/linux_amd64/cmd/compile/internal/ssa.a
+++ b/pkg/linux_amd64/cmd/compile/internal/ssa.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/compile/internal/syntax.a b/pkg/linux_amd64/cmd/compile/internal/syntax.a
index 4477582..81701b0 100644
--- a/pkg/linux_amd64/cmd/compile/internal/syntax.a
+++ b/pkg/linux_amd64/cmd/compile/internal/syntax.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/compile/internal/test.a b/pkg/linux_amd64/cmd/compile/internal/test.a
index 00ae811..770240d 100644
--- a/pkg/linux_amd64/cmd/compile/internal/test.a
+++ b/pkg/linux_amd64/cmd/compile/internal/test.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/compile/internal/x86.a b/pkg/linux_amd64/cmd/compile/internal/x86.a
index 2cbf460..f13c6ef 100644
--- a/pkg/linux_amd64/cmd/compile/internal/x86.a
+++ b/pkg/linux_amd64/cmd/compile/internal/x86.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/internal/bio.a b/pkg/linux_amd64/cmd/internal/bio.a
index ccf215b..198b644 100644
--- a/pkg/linux_amd64/cmd/internal/bio.a
+++ b/pkg/linux_amd64/cmd/internal/bio.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/internal/browser.a b/pkg/linux_amd64/cmd/internal/browser.a
index b179463..9a79097 100644
--- a/pkg/linux_amd64/cmd/internal/browser.a
+++ b/pkg/linux_amd64/cmd/internal/browser.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/internal/dwarf.a b/pkg/linux_amd64/cmd/internal/dwarf.a
index 6f562c3..f538f01 100644
--- a/pkg/linux_amd64/cmd/internal/dwarf.a
+++ b/pkg/linux_amd64/cmd/internal/dwarf.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/internal/gcprog.a b/pkg/linux_amd64/cmd/internal/gcprog.a
index be6dbab..ddf11e9 100644
--- a/pkg/linux_amd64/cmd/internal/gcprog.a
+++ b/pkg/linux_amd64/cmd/internal/gcprog.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/internal/goobj.a b/pkg/linux_amd64/cmd/internal/goobj.a
index b4e7393..e20c881 100644
--- a/pkg/linux_amd64/cmd/internal/goobj.a
+++ b/pkg/linux_amd64/cmd/internal/goobj.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/internal/obj.a b/pkg/linux_amd64/cmd/internal/obj.a
index f9baf86..46832be 100644
--- a/pkg/linux_amd64/cmd/internal/obj.a
+++ b/pkg/linux_amd64/cmd/internal/obj.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/internal/obj/arm.a b/pkg/linux_amd64/cmd/internal/obj/arm.a
index 48105ba..56a827f 100644
--- a/pkg/linux_amd64/cmd/internal/obj/arm.a
+++ b/pkg/linux_amd64/cmd/internal/obj/arm.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/internal/obj/arm64.a b/pkg/linux_amd64/cmd/internal/obj/arm64.a
index 8596ccc..987ed74 100644
--- a/pkg/linux_amd64/cmd/internal/obj/arm64.a
+++ b/pkg/linux_amd64/cmd/internal/obj/arm64.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/internal/obj/mips.a b/pkg/linux_amd64/cmd/internal/obj/mips.a
index 485d72f..ea31e6d 100644
--- a/pkg/linux_amd64/cmd/internal/obj/mips.a
+++ b/pkg/linux_amd64/cmd/internal/obj/mips.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/internal/obj/ppc64.a b/pkg/linux_amd64/cmd/internal/obj/ppc64.a
index c97c46c..b9b0f6a 100644
--- a/pkg/linux_amd64/cmd/internal/obj/ppc64.a
+++ b/pkg/linux_amd64/cmd/internal/obj/ppc64.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/internal/obj/s390x.a b/pkg/linux_amd64/cmd/internal/obj/s390x.a
index 15c6dbc..0fda8d0 100644
--- a/pkg/linux_amd64/cmd/internal/obj/s390x.a
+++ b/pkg/linux_amd64/cmd/internal/obj/s390x.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/internal/obj/x86.a b/pkg/linux_amd64/cmd/internal/obj/x86.a
index e28fc2f..a031ba3 100644
--- a/pkg/linux_amd64/cmd/internal/obj/x86.a
+++ b/pkg/linux_amd64/cmd/internal/obj/x86.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/internal/objfile.a b/pkg/linux_amd64/cmd/internal/objfile.a
index 0e79ee3..2ceb030 100644
--- a/pkg/linux_amd64/cmd/internal/objfile.a
+++ b/pkg/linux_amd64/cmd/internal/objfile.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/internal/sys.a b/pkg/linux_amd64/cmd/internal/sys.a
index bdcfc57..9472fff 100644
--- a/pkg/linux_amd64/cmd/internal/sys.a
+++ b/pkg/linux_amd64/cmd/internal/sys.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/link/internal/amd64.a b/pkg/linux_amd64/cmd/link/internal/amd64.a
index ab58fe8..ade5b96 100644
--- a/pkg/linux_amd64/cmd/link/internal/amd64.a
+++ b/pkg/linux_amd64/cmd/link/internal/amd64.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/link/internal/arm.a b/pkg/linux_amd64/cmd/link/internal/arm.a
index 112d4d9..6d8f3c5 100644
--- a/pkg/linux_amd64/cmd/link/internal/arm.a
+++ b/pkg/linux_amd64/cmd/link/internal/arm.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/link/internal/arm64.a b/pkg/linux_amd64/cmd/link/internal/arm64.a
index 181ed4a..f435009 100644
--- a/pkg/linux_amd64/cmd/link/internal/arm64.a
+++ b/pkg/linux_amd64/cmd/link/internal/arm64.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/link/internal/ld.a b/pkg/linux_amd64/cmd/link/internal/ld.a
index 34c7245..3df1320 100644
--- a/pkg/linux_amd64/cmd/link/internal/ld.a
+++ b/pkg/linux_amd64/cmd/link/internal/ld.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/link/internal/mips.a b/pkg/linux_amd64/cmd/link/internal/mips.a
index bd25bd9..7336b0d 100644
--- a/pkg/linux_amd64/cmd/link/internal/mips.a
+++ b/pkg/linux_amd64/cmd/link/internal/mips.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/link/internal/mips64.a b/pkg/linux_amd64/cmd/link/internal/mips64.a
index bffe6a1..90f49aa 100644
--- a/pkg/linux_amd64/cmd/link/internal/mips64.a
+++ b/pkg/linux_amd64/cmd/link/internal/mips64.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/link/internal/ppc64.a b/pkg/linux_amd64/cmd/link/internal/ppc64.a
index 8f72d1b..6969f28 100644
--- a/pkg/linux_amd64/cmd/link/internal/ppc64.a
+++ b/pkg/linux_amd64/cmd/link/internal/ppc64.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/link/internal/s390x.a b/pkg/linux_amd64/cmd/link/internal/s390x.a
index 80571d1..0b913fb 100644
--- a/pkg/linux_amd64/cmd/link/internal/s390x.a
+++ b/pkg/linux_amd64/cmd/link/internal/s390x.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/link/internal/x86.a b/pkg/linux_amd64/cmd/link/internal/x86.a
index 0860da8..05609c8 100644
--- a/pkg/linux_amd64/cmd/link/internal/x86.a
+++ b/pkg/linux_amd64/cmd/link/internal/x86.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/pprof/internal/commands.a b/pkg/linux_amd64/cmd/pprof/internal/commands.a
index 45d3dc3..eda68a2 100644
--- a/pkg/linux_amd64/cmd/pprof/internal/commands.a
+++ b/pkg/linux_amd64/cmd/pprof/internal/commands.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/pprof/internal/driver.a b/pkg/linux_amd64/cmd/pprof/internal/driver.a
index bff639d..5f832b9 100644
--- a/pkg/linux_amd64/cmd/pprof/internal/driver.a
+++ b/pkg/linux_amd64/cmd/pprof/internal/driver.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/pprof/internal/fetch.a b/pkg/linux_amd64/cmd/pprof/internal/fetch.a
index 3996a74..3c99deb 100644
--- a/pkg/linux_amd64/cmd/pprof/internal/fetch.a
+++ b/pkg/linux_amd64/cmd/pprof/internal/fetch.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/pprof/internal/plugin.a b/pkg/linux_amd64/cmd/pprof/internal/plugin.a
index ecf586f..035a05a 100644
--- a/pkg/linux_amd64/cmd/pprof/internal/plugin.a
+++ b/pkg/linux_amd64/cmd/pprof/internal/plugin.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/pprof/internal/report.a b/pkg/linux_amd64/cmd/pprof/internal/report.a
index f788cf2..d807e60 100644
--- a/pkg/linux_amd64/cmd/pprof/internal/report.a
+++ b/pkg/linux_amd64/cmd/pprof/internal/report.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/pprof/internal/svg.a b/pkg/linux_amd64/cmd/pprof/internal/svg.a
index d753f1c..76bed89 100644
--- a/pkg/linux_amd64/cmd/pprof/internal/svg.a
+++ b/pkg/linux_amd64/cmd/pprof/internal/svg.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/pprof/internal/symbolizer.a b/pkg/linux_amd64/cmd/pprof/internal/symbolizer.a
index 0dfccd5..58d8248 100644
--- a/pkg/linux_amd64/cmd/pprof/internal/symbolizer.a
+++ b/pkg/linux_amd64/cmd/pprof/internal/symbolizer.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/pprof/internal/symbolz.a b/pkg/linux_amd64/cmd/pprof/internal/symbolz.a
index 7b088f0..225ab62 100644
--- a/pkg/linux_amd64/cmd/pprof/internal/symbolz.a
+++ b/pkg/linux_amd64/cmd/pprof/internal/symbolz.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/pprof/internal/tempfile.a b/pkg/linux_amd64/cmd/pprof/internal/tempfile.a
index de30c5a..cf1ca1a 100644
--- a/pkg/linux_amd64/cmd/pprof/internal/tempfile.a
+++ b/pkg/linux_amd64/cmd/pprof/internal/tempfile.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/vendor/golang.org/x/arch/arm/armasm.a b/pkg/linux_amd64/cmd/vendor/golang.org/x/arch/arm/armasm.a
index fd10dc4..438d8be 100644
--- a/pkg/linux_amd64/cmd/vendor/golang.org/x/arch/arm/armasm.a
+++ b/pkg/linux_amd64/cmd/vendor/golang.org/x/arch/arm/armasm.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/vendor/golang.org/x/arch/ppc64/ppc64asm.a b/pkg/linux_amd64/cmd/vendor/golang.org/x/arch/ppc64/ppc64asm.a
index 9951164..dbab11b 100644
--- a/pkg/linux_amd64/cmd/vendor/golang.org/x/arch/ppc64/ppc64asm.a
+++ b/pkg/linux_amd64/cmd/vendor/golang.org/x/arch/ppc64/ppc64asm.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/vendor/golang.org/x/arch/x86/x86asm.a b/pkg/linux_amd64/cmd/vendor/golang.org/x/arch/x86/x86asm.a
index 0b1b5cb..1c4e55d 100644
--- a/pkg/linux_amd64/cmd/vendor/golang.org/x/arch/x86/x86asm.a
+++ b/pkg/linux_amd64/cmd/vendor/golang.org/x/arch/x86/x86asm.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/vet/internal/cfg.a b/pkg/linux_amd64/cmd/vet/internal/cfg.a
index d0433d3..279acd6 100644
--- a/pkg/linux_amd64/cmd/vet/internal/cfg.a
+++ b/pkg/linux_amd64/cmd/vet/internal/cfg.a
Binary files differ
diff --git a/pkg/linux_amd64/cmd/vet/internal/whitelist.a b/pkg/linux_amd64/cmd/vet/internal/whitelist.a
index 4179075..17286f6 100644
--- a/pkg/linux_amd64/cmd/vet/internal/whitelist.a
+++ b/pkg/linux_amd64/cmd/vet/internal/whitelist.a
Binary files differ
diff --git a/pkg/linux_amd64/compress/bzip2.a b/pkg/linux_amd64/compress/bzip2.a
index 6e273fd..82da97d 100644
--- a/pkg/linux_amd64/compress/bzip2.a
+++ b/pkg/linux_amd64/compress/bzip2.a
Binary files differ
diff --git a/pkg/linux_amd64/compress/flate.a b/pkg/linux_amd64/compress/flate.a
index 09aa2de..853879c 100644
--- a/pkg/linux_amd64/compress/flate.a
+++ b/pkg/linux_amd64/compress/flate.a
Binary files differ
diff --git a/pkg/linux_amd64/compress/gzip.a b/pkg/linux_amd64/compress/gzip.a
index a507c5a..237aac8 100644
--- a/pkg/linux_amd64/compress/gzip.a
+++ b/pkg/linux_amd64/compress/gzip.a
Binary files differ
diff --git a/pkg/linux_amd64/compress/lzw.a b/pkg/linux_amd64/compress/lzw.a
index e1a2db7..3095647 100644
--- a/pkg/linux_amd64/compress/lzw.a
+++ b/pkg/linux_amd64/compress/lzw.a
Binary files differ
diff --git a/pkg/linux_amd64/compress/zlib.a b/pkg/linux_amd64/compress/zlib.a
index e2a19e5..50eaf18 100644
--- a/pkg/linux_amd64/compress/zlib.a
+++ b/pkg/linux_amd64/compress/zlib.a
Binary files differ
diff --git a/pkg/linux_amd64/container/heap.a b/pkg/linux_amd64/container/heap.a
index 83409a1..8f66743 100644
--- a/pkg/linux_amd64/container/heap.a
+++ b/pkg/linux_amd64/container/heap.a
Binary files differ
diff --git a/pkg/linux_amd64/container/list.a b/pkg/linux_amd64/container/list.a
index 6f0ef9d..6729db0 100644
--- a/pkg/linux_amd64/container/list.a
+++ b/pkg/linux_amd64/container/list.a
Binary files differ
diff --git a/pkg/linux_amd64/container/ring.a b/pkg/linux_amd64/container/ring.a
index c045d7b..75199ae 100644
--- a/pkg/linux_amd64/container/ring.a
+++ b/pkg/linux_amd64/container/ring.a
Binary files differ
diff --git a/pkg/linux_amd64/context.a b/pkg/linux_amd64/context.a
index 83184f8..d369ca4 100644
--- a/pkg/linux_amd64/context.a
+++ b/pkg/linux_amd64/context.a
Binary files differ
diff --git a/pkg/linux_amd64/crypto.a b/pkg/linux_amd64/crypto.a
index da5ed3e..3eab237 100644
--- a/pkg/linux_amd64/crypto.a
+++ b/pkg/linux_amd64/crypto.a
Binary files differ
diff --git a/pkg/linux_amd64/crypto/aes.a b/pkg/linux_amd64/crypto/aes.a
index dec9809..7a193ae 100644
--- a/pkg/linux_amd64/crypto/aes.a
+++ b/pkg/linux_amd64/crypto/aes.a
Binary files differ
diff --git a/pkg/linux_amd64/crypto/cipher.a b/pkg/linux_amd64/crypto/cipher.a
index 5d532aa..db16ad0 100644
--- a/pkg/linux_amd64/crypto/cipher.a
+++ b/pkg/linux_amd64/crypto/cipher.a
Binary files differ
diff --git a/pkg/linux_amd64/crypto/des.a b/pkg/linux_amd64/crypto/des.a
index a4f7dab..be5b398 100644
--- a/pkg/linux_amd64/crypto/des.a
+++ b/pkg/linux_amd64/crypto/des.a
Binary files differ
diff --git a/pkg/linux_amd64/crypto/dsa.a b/pkg/linux_amd64/crypto/dsa.a
index 22ec386..10d17c9 100644
--- a/pkg/linux_amd64/crypto/dsa.a
+++ b/pkg/linux_amd64/crypto/dsa.a
Binary files differ
diff --git a/pkg/linux_amd64/crypto/ecdsa.a b/pkg/linux_amd64/crypto/ecdsa.a
index 0bd7a64..2a54f0b 100644
--- a/pkg/linux_amd64/crypto/ecdsa.a
+++ b/pkg/linux_amd64/crypto/ecdsa.a
Binary files differ
diff --git a/pkg/linux_amd64/crypto/elliptic.a b/pkg/linux_amd64/crypto/elliptic.a
index aa8ac48..086a0c9 100644
--- a/pkg/linux_amd64/crypto/elliptic.a
+++ b/pkg/linux_amd64/crypto/elliptic.a
Binary files differ
diff --git a/pkg/linux_amd64/crypto/hmac.a b/pkg/linux_amd64/crypto/hmac.a
index b8dcca9..0f7ffd5 100644
--- a/pkg/linux_amd64/crypto/hmac.a
+++ b/pkg/linux_amd64/crypto/hmac.a
Binary files differ
diff --git a/pkg/linux_amd64/crypto/internal/cipherhw.a b/pkg/linux_amd64/crypto/internal/cipherhw.a
index 4e28d49..e43dcdd 100644
--- a/pkg/linux_amd64/crypto/internal/cipherhw.a
+++ b/pkg/linux_amd64/crypto/internal/cipherhw.a
Binary files differ
diff --git a/pkg/linux_amd64/crypto/md5.a b/pkg/linux_amd64/crypto/md5.a
index 526752d..02dd8aa 100644
--- a/pkg/linux_amd64/crypto/md5.a
+++ b/pkg/linux_amd64/crypto/md5.a
Binary files differ
diff --git a/pkg/linux_amd64/crypto/rand.a b/pkg/linux_amd64/crypto/rand.a
index 6079841..89be589 100644
--- a/pkg/linux_amd64/crypto/rand.a
+++ b/pkg/linux_amd64/crypto/rand.a
Binary files differ
diff --git a/pkg/linux_amd64/crypto/rc4.a b/pkg/linux_amd64/crypto/rc4.a
index 228c607..699c677 100644
--- a/pkg/linux_amd64/crypto/rc4.a
+++ b/pkg/linux_amd64/crypto/rc4.a
Binary files differ
diff --git a/pkg/linux_amd64/crypto/rsa.a b/pkg/linux_amd64/crypto/rsa.a
index 5292eed..aaff161 100644
--- a/pkg/linux_amd64/crypto/rsa.a
+++ b/pkg/linux_amd64/crypto/rsa.a
Binary files differ
diff --git a/pkg/linux_amd64/crypto/sha1.a b/pkg/linux_amd64/crypto/sha1.a
index c6ee1cf..5ea0c74 100644
--- a/pkg/linux_amd64/crypto/sha1.a
+++ b/pkg/linux_amd64/crypto/sha1.a
Binary files differ
diff --git a/pkg/linux_amd64/crypto/sha256.a b/pkg/linux_amd64/crypto/sha256.a
index c8345f6..3907754 100644
--- a/pkg/linux_amd64/crypto/sha256.a
+++ b/pkg/linux_amd64/crypto/sha256.a
Binary files differ
diff --git a/pkg/linux_amd64/crypto/sha512.a b/pkg/linux_amd64/crypto/sha512.a
index 97ce0c3..a0e09c0 100644
--- a/pkg/linux_amd64/crypto/sha512.a
+++ b/pkg/linux_amd64/crypto/sha512.a
Binary files differ
diff --git a/pkg/linux_amd64/crypto/subtle.a b/pkg/linux_amd64/crypto/subtle.a
index 1df7f4e..59f1006 100644
--- a/pkg/linux_amd64/crypto/subtle.a
+++ b/pkg/linux_amd64/crypto/subtle.a
Binary files differ
diff --git a/pkg/linux_amd64/crypto/tls.a b/pkg/linux_amd64/crypto/tls.a
index 27cc016..dab4b5d 100644
--- a/pkg/linux_amd64/crypto/tls.a
+++ b/pkg/linux_amd64/crypto/tls.a
Binary files differ
diff --git a/pkg/linux_amd64/crypto/x509.a b/pkg/linux_amd64/crypto/x509.a
index fc4a711..52e4304 100644
--- a/pkg/linux_amd64/crypto/x509.a
+++ b/pkg/linux_amd64/crypto/x509.a
Binary files differ
diff --git a/pkg/linux_amd64/crypto/x509/pkix.a b/pkg/linux_amd64/crypto/x509/pkix.a
index 7a34b4d..9836428 100644
--- a/pkg/linux_amd64/crypto/x509/pkix.a
+++ b/pkg/linux_amd64/crypto/x509/pkix.a
Binary files differ
diff --git a/pkg/linux_amd64/database/sql.a b/pkg/linux_amd64/database/sql.a
index 2d19c8b..63afa06 100644
--- a/pkg/linux_amd64/database/sql.a
+++ b/pkg/linux_amd64/database/sql.a
Binary files differ
diff --git a/pkg/linux_amd64/database/sql/driver.a b/pkg/linux_amd64/database/sql/driver.a
index 85814a6..6d715e9 100644
--- a/pkg/linux_amd64/database/sql/driver.a
+++ b/pkg/linux_amd64/database/sql/driver.a
Binary files differ
diff --git a/pkg/linux_amd64/debug/dwarf.a b/pkg/linux_amd64/debug/dwarf.a
index c9aae31..4eaeeab 100644
--- a/pkg/linux_amd64/debug/dwarf.a
+++ b/pkg/linux_amd64/debug/dwarf.a
Binary files differ
diff --git a/pkg/linux_amd64/debug/elf.a b/pkg/linux_amd64/debug/elf.a
index c0e563a..50a4361 100644
--- a/pkg/linux_amd64/debug/elf.a
+++ b/pkg/linux_amd64/debug/elf.a
Binary files differ
diff --git a/pkg/linux_amd64/debug/gosym.a b/pkg/linux_amd64/debug/gosym.a
index af0999d..40810f7 100644
--- a/pkg/linux_amd64/debug/gosym.a
+++ b/pkg/linux_amd64/debug/gosym.a
Binary files differ
diff --git a/pkg/linux_amd64/debug/macho.a b/pkg/linux_amd64/debug/macho.a
index ad83ff1..fda4fb5 100644
--- a/pkg/linux_amd64/debug/macho.a
+++ b/pkg/linux_amd64/debug/macho.a
Binary files differ
diff --git a/pkg/linux_amd64/debug/pe.a b/pkg/linux_amd64/debug/pe.a
index 7fe9a43..67b2265 100644
--- a/pkg/linux_amd64/debug/pe.a
+++ b/pkg/linux_amd64/debug/pe.a
Binary files differ
diff --git a/pkg/linux_amd64/debug/plan9obj.a b/pkg/linux_amd64/debug/plan9obj.a
index b45c432..30bf5dd 100644
--- a/pkg/linux_amd64/debug/plan9obj.a
+++ b/pkg/linux_amd64/debug/plan9obj.a
Binary files differ
diff --git a/pkg/linux_amd64/encoding.a b/pkg/linux_amd64/encoding.a
index 30efc42..0cd8df2 100644
--- a/pkg/linux_amd64/encoding.a
+++ b/pkg/linux_amd64/encoding.a
Binary files differ
diff --git a/pkg/linux_amd64/encoding/ascii85.a b/pkg/linux_amd64/encoding/ascii85.a
index 84bddbb..fb57155 100644
--- a/pkg/linux_amd64/encoding/ascii85.a
+++ b/pkg/linux_amd64/encoding/ascii85.a
Binary files differ
diff --git a/pkg/linux_amd64/encoding/asn1.a b/pkg/linux_amd64/encoding/asn1.a
index a4ed025..d137a5c 100644
--- a/pkg/linux_amd64/encoding/asn1.a
+++ b/pkg/linux_amd64/encoding/asn1.a
Binary files differ
diff --git a/pkg/linux_amd64/encoding/base32.a b/pkg/linux_amd64/encoding/base32.a
index cb5d1d8..8a63858 100644
--- a/pkg/linux_amd64/encoding/base32.a
+++ b/pkg/linux_amd64/encoding/base32.a
Binary files differ
diff --git a/pkg/linux_amd64/encoding/base64.a b/pkg/linux_amd64/encoding/base64.a
index d96183c..286c7c6 100644
--- a/pkg/linux_amd64/encoding/base64.a
+++ b/pkg/linux_amd64/encoding/base64.a
Binary files differ
diff --git a/pkg/linux_amd64/encoding/binary.a b/pkg/linux_amd64/encoding/binary.a
index b02c5f5..49f6caf 100644
--- a/pkg/linux_amd64/encoding/binary.a
+++ b/pkg/linux_amd64/encoding/binary.a
Binary files differ
diff --git a/pkg/linux_amd64/encoding/csv.a b/pkg/linux_amd64/encoding/csv.a
index 22ca5f3..5ba2dfb 100644
--- a/pkg/linux_amd64/encoding/csv.a
+++ b/pkg/linux_amd64/encoding/csv.a
Binary files differ
diff --git a/pkg/linux_amd64/encoding/gob.a b/pkg/linux_amd64/encoding/gob.a
index 4cceaa4..ac938c0 100644
--- a/pkg/linux_amd64/encoding/gob.a
+++ b/pkg/linux_amd64/encoding/gob.a
Binary files differ
diff --git a/pkg/linux_amd64/encoding/hex.a b/pkg/linux_amd64/encoding/hex.a
index f312681..eab4f57 100644
--- a/pkg/linux_amd64/encoding/hex.a
+++ b/pkg/linux_amd64/encoding/hex.a
Binary files differ
diff --git a/pkg/linux_amd64/encoding/json.a b/pkg/linux_amd64/encoding/json.a
index a1d5133..e1f9f37 100644
--- a/pkg/linux_amd64/encoding/json.a
+++ b/pkg/linux_amd64/encoding/json.a
Binary files differ
diff --git a/pkg/linux_amd64/encoding/pem.a b/pkg/linux_amd64/encoding/pem.a
index adea958..c764fe9 100644
--- a/pkg/linux_amd64/encoding/pem.a
+++ b/pkg/linux_amd64/encoding/pem.a
Binary files differ
diff --git a/pkg/linux_amd64/encoding/xml.a b/pkg/linux_amd64/encoding/xml.a
index f49b46b..b9575f0 100644
--- a/pkg/linux_amd64/encoding/xml.a
+++ b/pkg/linux_amd64/encoding/xml.a
Binary files differ
diff --git a/pkg/linux_amd64/errors.a b/pkg/linux_amd64/errors.a
index 4e211b9..9a37d4e 100644
--- a/pkg/linux_amd64/errors.a
+++ b/pkg/linux_amd64/errors.a
Binary files differ
diff --git a/pkg/linux_amd64/expvar.a b/pkg/linux_amd64/expvar.a
index c2053f0..485714f 100644
--- a/pkg/linux_amd64/expvar.a
+++ b/pkg/linux_amd64/expvar.a
Binary files differ
diff --git a/pkg/linux_amd64/flag.a b/pkg/linux_amd64/flag.a
index 6d7b6ac..1faf549 100644
--- a/pkg/linux_amd64/flag.a
+++ b/pkg/linux_amd64/flag.a
Binary files differ
diff --git a/pkg/linux_amd64/fmt.a b/pkg/linux_amd64/fmt.a
index 78a1289..df60e2a 100644
--- a/pkg/linux_amd64/fmt.a
+++ b/pkg/linux_amd64/fmt.a
Binary files differ
diff --git a/pkg/linux_amd64/go/ast.a b/pkg/linux_amd64/go/ast.a
index 2925965..2b2aa9f 100644
--- a/pkg/linux_amd64/go/ast.a
+++ b/pkg/linux_amd64/go/ast.a
Binary files differ
diff --git a/pkg/linux_amd64/go/build.a b/pkg/linux_amd64/go/build.a
index a8b6729..6fe747e 100644
--- a/pkg/linux_amd64/go/build.a
+++ b/pkg/linux_amd64/go/build.a
Binary files differ
diff --git a/pkg/linux_amd64/go/constant.a b/pkg/linux_amd64/go/constant.a
index a9944e3..3929daa 100644
--- a/pkg/linux_amd64/go/constant.a
+++ b/pkg/linux_amd64/go/constant.a
Binary files differ
diff --git a/pkg/linux_amd64/go/doc.a b/pkg/linux_amd64/go/doc.a
index c56f22c..a25c061 100644
--- a/pkg/linux_amd64/go/doc.a
+++ b/pkg/linux_amd64/go/doc.a
Binary files differ
diff --git a/pkg/linux_amd64/go/format.a b/pkg/linux_amd64/go/format.a
index 71b271f..c3e0963 100644
--- a/pkg/linux_amd64/go/format.a
+++ b/pkg/linux_amd64/go/format.a
Binary files differ
diff --git a/pkg/linux_amd64/go/importer.a b/pkg/linux_amd64/go/importer.a
index 59fba67..447b216 100644
--- a/pkg/linux_amd64/go/importer.a
+++ b/pkg/linux_amd64/go/importer.a
Binary files differ
diff --git a/pkg/linux_amd64/go/internal/gccgoimporter.a b/pkg/linux_amd64/go/internal/gccgoimporter.a
index 698859d..2a509cc 100644
--- a/pkg/linux_amd64/go/internal/gccgoimporter.a
+++ b/pkg/linux_amd64/go/internal/gccgoimporter.a
Binary files differ
diff --git a/pkg/linux_amd64/go/internal/gcimporter.a b/pkg/linux_amd64/go/internal/gcimporter.a
index eca1d21..05201c5 100644
--- a/pkg/linux_amd64/go/internal/gcimporter.a
+++ b/pkg/linux_amd64/go/internal/gcimporter.a
Binary files differ
diff --git a/pkg/linux_amd64/go/parser.a b/pkg/linux_amd64/go/parser.a
index 8e5a263..2958d3a 100644
--- a/pkg/linux_amd64/go/parser.a
+++ b/pkg/linux_amd64/go/parser.a
Binary files differ
diff --git a/pkg/linux_amd64/go/printer.a b/pkg/linux_amd64/go/printer.a
index de76605..dd6df35 100644
--- a/pkg/linux_amd64/go/printer.a
+++ b/pkg/linux_amd64/go/printer.a
Binary files differ
diff --git a/pkg/linux_amd64/go/scanner.a b/pkg/linux_amd64/go/scanner.a
index b5f73c0..d6ca8fe 100644
--- a/pkg/linux_amd64/go/scanner.a
+++ b/pkg/linux_amd64/go/scanner.a
Binary files differ
diff --git a/pkg/linux_amd64/go/token.a b/pkg/linux_amd64/go/token.a
index 074e74c..7a38936 100644
--- a/pkg/linux_amd64/go/token.a
+++ b/pkg/linux_amd64/go/token.a
Binary files differ
diff --git a/pkg/linux_amd64/go/types.a b/pkg/linux_amd64/go/types.a
index 0ffb440..f27e11c 100644
--- a/pkg/linux_amd64/go/types.a
+++ b/pkg/linux_amd64/go/types.a
Binary files differ
diff --git a/pkg/linux_amd64/hash.a b/pkg/linux_amd64/hash.a
index c58de73..2fea342 100644
--- a/pkg/linux_amd64/hash.a
+++ b/pkg/linux_amd64/hash.a
Binary files differ
diff --git a/pkg/linux_amd64/hash/adler32.a b/pkg/linux_amd64/hash/adler32.a
index 15c68bb..bd74b44 100644
--- a/pkg/linux_amd64/hash/adler32.a
+++ b/pkg/linux_amd64/hash/adler32.a
Binary files differ
diff --git a/pkg/linux_amd64/hash/crc32.a b/pkg/linux_amd64/hash/crc32.a
index 9bf1ff1..a2be6de 100644
--- a/pkg/linux_amd64/hash/crc32.a
+++ b/pkg/linux_amd64/hash/crc32.a
Binary files differ
diff --git a/pkg/linux_amd64/hash/crc64.a b/pkg/linux_amd64/hash/crc64.a
index 41b9ddd..89fd653 100644
--- a/pkg/linux_amd64/hash/crc64.a
+++ b/pkg/linux_amd64/hash/crc64.a
Binary files differ
diff --git a/pkg/linux_amd64/hash/fnv.a b/pkg/linux_amd64/hash/fnv.a
index 46f99f1..d3defb5 100644
--- a/pkg/linux_amd64/hash/fnv.a
+++ b/pkg/linux_amd64/hash/fnv.a
Binary files differ
diff --git a/pkg/linux_amd64/html.a b/pkg/linux_amd64/html.a
index c07341b..977bcc0 100644
--- a/pkg/linux_amd64/html.a
+++ b/pkg/linux_amd64/html.a
Binary files differ
diff --git a/pkg/linux_amd64/html/template.a b/pkg/linux_amd64/html/template.a
index 7018403..85f0ffb 100644
--- a/pkg/linux_amd64/html/template.a
+++ b/pkg/linux_amd64/html/template.a
Binary files differ
diff --git a/pkg/linux_amd64/image.a b/pkg/linux_amd64/image.a
index 90f0038..eb7d5df 100644
--- a/pkg/linux_amd64/image.a
+++ b/pkg/linux_amd64/image.a
Binary files differ
diff --git a/pkg/linux_amd64/image/color.a b/pkg/linux_amd64/image/color.a
index 2e7ff23..7376c1c 100644
--- a/pkg/linux_amd64/image/color.a
+++ b/pkg/linux_amd64/image/color.a
Binary files differ
diff --git a/pkg/linux_amd64/image/color/palette.a b/pkg/linux_amd64/image/color/palette.a
index 2e11504..20891ef 100644
--- a/pkg/linux_amd64/image/color/palette.a
+++ b/pkg/linux_amd64/image/color/palette.a
Binary files differ
diff --git a/pkg/linux_amd64/image/draw.a b/pkg/linux_amd64/image/draw.a
index 1ddb1c8..c01cccb 100644
--- a/pkg/linux_amd64/image/draw.a
+++ b/pkg/linux_amd64/image/draw.a
Binary files differ
diff --git a/pkg/linux_amd64/image/gif.a b/pkg/linux_amd64/image/gif.a
index 4942984..b45e238 100644
--- a/pkg/linux_amd64/image/gif.a
+++ b/pkg/linux_amd64/image/gif.a
Binary files differ
diff --git a/pkg/linux_amd64/image/internal/imageutil.a b/pkg/linux_amd64/image/internal/imageutil.a
index 0f5b1bd..c9aa0c6 100644
--- a/pkg/linux_amd64/image/internal/imageutil.a
+++ b/pkg/linux_amd64/image/internal/imageutil.a
Binary files differ
diff --git a/pkg/linux_amd64/image/jpeg.a b/pkg/linux_amd64/image/jpeg.a
index c398d29..b80f11a 100644
--- a/pkg/linux_amd64/image/jpeg.a
+++ b/pkg/linux_amd64/image/jpeg.a
Binary files differ
diff --git a/pkg/linux_amd64/image/png.a b/pkg/linux_amd64/image/png.a
index 0fbb978..4301b8c 100644
--- a/pkg/linux_amd64/image/png.a
+++ b/pkg/linux_amd64/image/png.a
Binary files differ
diff --git a/pkg/linux_amd64/index/suffixarray.a b/pkg/linux_amd64/index/suffixarray.a
index 2f00042..21af9fd 100644
--- a/pkg/linux_amd64/index/suffixarray.a
+++ b/pkg/linux_amd64/index/suffixarray.a
Binary files differ
diff --git a/pkg/linux_amd64/internal/nettrace.a b/pkg/linux_amd64/internal/nettrace.a
index f290441..ab8d291 100644
--- a/pkg/linux_amd64/internal/nettrace.a
+++ b/pkg/linux_amd64/internal/nettrace.a
Binary files differ
diff --git a/pkg/linux_amd64/internal/pprof/profile.a b/pkg/linux_amd64/internal/pprof/profile.a
index 5d6c49b..b246772 100644
--- a/pkg/linux_amd64/internal/pprof/profile.a
+++ b/pkg/linux_amd64/internal/pprof/profile.a
Binary files differ
diff --git a/pkg/linux_amd64/internal/race.a b/pkg/linux_amd64/internal/race.a
index 7ad88f7..1b5629a 100644
--- a/pkg/linux_amd64/internal/race.a
+++ b/pkg/linux_amd64/internal/race.a
Binary files differ
diff --git a/pkg/linux_amd64/internal/singleflight.a b/pkg/linux_amd64/internal/singleflight.a
index 9880f6a..15ecc9f 100644
--- a/pkg/linux_amd64/internal/singleflight.a
+++ b/pkg/linux_amd64/internal/singleflight.a
Binary files differ
diff --git a/pkg/linux_amd64/internal/syscall/unix.a b/pkg/linux_amd64/internal/syscall/unix.a
index ce364b9..ed1ecff 100644
--- a/pkg/linux_amd64/internal/syscall/unix.a
+++ b/pkg/linux_amd64/internal/syscall/unix.a
Binary files differ
diff --git a/pkg/linux_amd64/internal/syscall/windows.a b/pkg/linux_amd64/internal/syscall/windows.a
index d2f483f..8ceb915 100644
--- a/pkg/linux_amd64/internal/syscall/windows.a
+++ b/pkg/linux_amd64/internal/syscall/windows.a
Binary files differ
diff --git a/pkg/linux_amd64/internal/syscall/windows/registry.a b/pkg/linux_amd64/internal/syscall/windows/registry.a
index 6db7563..c4d79e6 100644
--- a/pkg/linux_amd64/internal/syscall/windows/registry.a
+++ b/pkg/linux_amd64/internal/syscall/windows/registry.a
Binary files differ
diff --git a/pkg/linux_amd64/internal/syscall/windows/sysdll.a b/pkg/linux_amd64/internal/syscall/windows/sysdll.a
index 6bbec6b..2ec4176 100644
--- a/pkg/linux_amd64/internal/syscall/windows/sysdll.a
+++ b/pkg/linux_amd64/internal/syscall/windows/sysdll.a
Binary files differ
diff --git a/pkg/linux_amd64/internal/testenv.a b/pkg/linux_amd64/internal/testenv.a
index 26c7233..ed9d171 100644
--- a/pkg/linux_amd64/internal/testenv.a
+++ b/pkg/linux_amd64/internal/testenv.a
Binary files differ
diff --git a/pkg/linux_amd64/internal/trace.a b/pkg/linux_amd64/internal/trace.a
index 2f90c7e..94e61b7 100644
--- a/pkg/linux_amd64/internal/trace.a
+++ b/pkg/linux_amd64/internal/trace.a
Binary files differ
diff --git a/pkg/linux_amd64/io.a b/pkg/linux_amd64/io.a
index 74db223..6a4dee1 100644
--- a/pkg/linux_amd64/io.a
+++ b/pkg/linux_amd64/io.a
Binary files differ
diff --git a/pkg/linux_amd64/io/ioutil.a b/pkg/linux_amd64/io/ioutil.a
index 203343b..3f2eb42 100644
--- a/pkg/linux_amd64/io/ioutil.a
+++ b/pkg/linux_amd64/io/ioutil.a
Binary files differ
diff --git a/pkg/linux_amd64/log.a b/pkg/linux_amd64/log.a
index 9d7b24d..3048e09 100644
--- a/pkg/linux_amd64/log.a
+++ b/pkg/linux_amd64/log.a
Binary files differ
diff --git a/pkg/linux_amd64/log/syslog.a b/pkg/linux_amd64/log/syslog.a
index e1fc5fc..bc033c9 100644
--- a/pkg/linux_amd64/log/syslog.a
+++ b/pkg/linux_amd64/log/syslog.a
Binary files differ
diff --git a/pkg/linux_amd64/math.a b/pkg/linux_amd64/math.a
index b8edfa6..84ec044 100644
--- a/pkg/linux_amd64/math.a
+++ b/pkg/linux_amd64/math.a
Binary files differ
diff --git a/pkg/linux_amd64/math/big.a b/pkg/linux_amd64/math/big.a
index dae3ef4..95bc85d 100644
--- a/pkg/linux_amd64/math/big.a
+++ b/pkg/linux_amd64/math/big.a
Binary files differ
diff --git a/pkg/linux_amd64/math/cmplx.a b/pkg/linux_amd64/math/cmplx.a
index e9bf361..ba90880 100644
--- a/pkg/linux_amd64/math/cmplx.a
+++ b/pkg/linux_amd64/math/cmplx.a
Binary files differ
diff --git a/pkg/linux_amd64/math/rand.a b/pkg/linux_amd64/math/rand.a
index 557a613..63610ed 100644
--- a/pkg/linux_amd64/math/rand.a
+++ b/pkg/linux_amd64/math/rand.a
Binary files differ
diff --git a/pkg/linux_amd64/mime.a b/pkg/linux_amd64/mime.a
index e337e56..fcc10cc 100644
--- a/pkg/linux_amd64/mime.a
+++ b/pkg/linux_amd64/mime.a
Binary files differ
diff --git a/pkg/linux_amd64/mime/multipart.a b/pkg/linux_amd64/mime/multipart.a
index 52fdc08..05bfe90 100644
--- a/pkg/linux_amd64/mime/multipart.a
+++ b/pkg/linux_amd64/mime/multipart.a
Binary files differ
diff --git a/pkg/linux_amd64/mime/quotedprintable.a b/pkg/linux_amd64/mime/quotedprintable.a
index 5512cbc..5f735b6 100644
--- a/pkg/linux_amd64/mime/quotedprintable.a
+++ b/pkg/linux_amd64/mime/quotedprintable.a
Binary files differ
diff --git a/pkg/linux_amd64/net.a b/pkg/linux_amd64/net.a
index b300293..aef2c64 100644
--- a/pkg/linux_amd64/net.a
+++ b/pkg/linux_amd64/net.a
Binary files differ
diff --git a/pkg/linux_amd64/net/http.a b/pkg/linux_amd64/net/http.a
index 55a4d6b..5f3976c 100644
--- a/pkg/linux_amd64/net/http.a
+++ b/pkg/linux_amd64/net/http.a
Binary files differ
diff --git a/pkg/linux_amd64/net/http/cgi.a b/pkg/linux_amd64/net/http/cgi.a
index 00de147..cb07c8e 100644
--- a/pkg/linux_amd64/net/http/cgi.a
+++ b/pkg/linux_amd64/net/http/cgi.a
Binary files differ
diff --git a/pkg/linux_amd64/net/http/cookiejar.a b/pkg/linux_amd64/net/http/cookiejar.a
index b09a682..4a61953 100644
--- a/pkg/linux_amd64/net/http/cookiejar.a
+++ b/pkg/linux_amd64/net/http/cookiejar.a
Binary files differ
diff --git a/pkg/linux_amd64/net/http/fcgi.a b/pkg/linux_amd64/net/http/fcgi.a
index f88a0a6..c28c65a 100644
--- a/pkg/linux_amd64/net/http/fcgi.a
+++ b/pkg/linux_amd64/net/http/fcgi.a
Binary files differ
diff --git a/pkg/linux_amd64/net/http/httptest.a b/pkg/linux_amd64/net/http/httptest.a
index 51a54b8..c720582 100644
--- a/pkg/linux_amd64/net/http/httptest.a
+++ b/pkg/linux_amd64/net/http/httptest.a
Binary files differ
diff --git a/pkg/linux_amd64/net/http/httptrace.a b/pkg/linux_amd64/net/http/httptrace.a
index 53bc9f3..9fdddb4 100644
--- a/pkg/linux_amd64/net/http/httptrace.a
+++ b/pkg/linux_amd64/net/http/httptrace.a
Binary files differ
diff --git a/pkg/linux_amd64/net/http/httputil.a b/pkg/linux_amd64/net/http/httputil.a
index b7b9886..92f77fa 100644
--- a/pkg/linux_amd64/net/http/httputil.a
+++ b/pkg/linux_amd64/net/http/httputil.a
Binary files differ
diff --git a/pkg/linux_amd64/net/http/internal.a b/pkg/linux_amd64/net/http/internal.a
index af4c4b1..c59f34f 100644
--- a/pkg/linux_amd64/net/http/internal.a
+++ b/pkg/linux_amd64/net/http/internal.a
Binary files differ
diff --git a/pkg/linux_amd64/net/http/pprof.a b/pkg/linux_amd64/net/http/pprof.a
index 188a8dd..a012d9b 100644
--- a/pkg/linux_amd64/net/http/pprof.a
+++ b/pkg/linux_amd64/net/http/pprof.a
Binary files differ
diff --git a/pkg/linux_amd64/net/internal/socktest.a b/pkg/linux_amd64/net/internal/socktest.a
index 6f1c04f..01fd938 100644
--- a/pkg/linux_amd64/net/internal/socktest.a
+++ b/pkg/linux_amd64/net/internal/socktest.a
Binary files differ
diff --git a/pkg/linux_amd64/net/mail.a b/pkg/linux_amd64/net/mail.a
index dff5a57..e292ebb 100644
--- a/pkg/linux_amd64/net/mail.a
+++ b/pkg/linux_amd64/net/mail.a
Binary files differ
diff --git a/pkg/linux_amd64/net/rpc.a b/pkg/linux_amd64/net/rpc.a
index 0eacba6..4d433de 100644
--- a/pkg/linux_amd64/net/rpc.a
+++ b/pkg/linux_amd64/net/rpc.a
Binary files differ
diff --git a/pkg/linux_amd64/net/rpc/jsonrpc.a b/pkg/linux_amd64/net/rpc/jsonrpc.a
index 169ece7..d415caf 100644
--- a/pkg/linux_amd64/net/rpc/jsonrpc.a
+++ b/pkg/linux_amd64/net/rpc/jsonrpc.a
Binary files differ
diff --git a/pkg/linux_amd64/net/smtp.a b/pkg/linux_amd64/net/smtp.a
index 55c8162..b8a664c 100644
--- a/pkg/linux_amd64/net/smtp.a
+++ b/pkg/linux_amd64/net/smtp.a
Binary files differ
diff --git a/pkg/linux_amd64/net/textproto.a b/pkg/linux_amd64/net/textproto.a
index 0828842..6168ab9 100644
--- a/pkg/linux_amd64/net/textproto.a
+++ b/pkg/linux_amd64/net/textproto.a
Binary files differ
diff --git a/pkg/linux_amd64/net/url.a b/pkg/linux_amd64/net/url.a
index 37e9dd8..828a56b 100644
--- a/pkg/linux_amd64/net/url.a
+++ b/pkg/linux_amd64/net/url.a
Binary files differ
diff --git a/pkg/linux_amd64/os.a b/pkg/linux_amd64/os.a
index 0cd15be..4a25372 100644
--- a/pkg/linux_amd64/os.a
+++ b/pkg/linux_amd64/os.a
Binary files differ
diff --git a/pkg/linux_amd64/os/exec.a b/pkg/linux_amd64/os/exec.a
index 355c456..4a38193 100644
--- a/pkg/linux_amd64/os/exec.a
+++ b/pkg/linux_amd64/os/exec.a
Binary files differ
diff --git a/pkg/linux_amd64/os/signal.a b/pkg/linux_amd64/os/signal.a
index 327c8cd..89ea43d 100644
--- a/pkg/linux_amd64/os/signal.a
+++ b/pkg/linux_amd64/os/signal.a
Binary files differ
diff --git a/pkg/linux_amd64/os/user.a b/pkg/linux_amd64/os/user.a
index 040eaec..c971d36 100644
--- a/pkg/linux_amd64/os/user.a
+++ b/pkg/linux_amd64/os/user.a
Binary files differ
diff --git a/pkg/linux_amd64/path.a b/pkg/linux_amd64/path.a
index 16f3fe2..4756b52 100644
--- a/pkg/linux_amd64/path.a
+++ b/pkg/linux_amd64/path.a
Binary files differ
diff --git a/pkg/linux_amd64/path/filepath.a b/pkg/linux_amd64/path/filepath.a
index 806e1b6..3c07daf 100644
--- a/pkg/linux_amd64/path/filepath.a
+++ b/pkg/linux_amd64/path/filepath.a
Binary files differ
diff --git a/pkg/linux_amd64/plugin.a b/pkg/linux_amd64/plugin.a
index 5385bdb..a1c7cd0 100644
--- a/pkg/linux_amd64/plugin.a
+++ b/pkg/linux_amd64/plugin.a
Binary files differ
diff --git a/pkg/linux_amd64/reflect.a b/pkg/linux_amd64/reflect.a
index 9b4cd9c..dd96bf0 100644
--- a/pkg/linux_amd64/reflect.a
+++ b/pkg/linux_amd64/reflect.a
Binary files differ
diff --git a/pkg/linux_amd64/regexp.a b/pkg/linux_amd64/regexp.a
index 6d8ed97..c27974f 100644
--- a/pkg/linux_amd64/regexp.a
+++ b/pkg/linux_amd64/regexp.a
Binary files differ
diff --git a/pkg/linux_amd64/regexp/syntax.a b/pkg/linux_amd64/regexp/syntax.a
index 19c2b4a..87dc5f9 100644
--- a/pkg/linux_amd64/regexp/syntax.a
+++ b/pkg/linux_amd64/regexp/syntax.a
Binary files differ
diff --git a/pkg/linux_amd64/runtime.a b/pkg/linux_amd64/runtime.a
index 68feaa3..31b24b6 100644
--- a/pkg/linux_amd64/runtime.a
+++ b/pkg/linux_amd64/runtime.a
Binary files differ
diff --git a/pkg/linux_amd64/runtime/cgo.a b/pkg/linux_amd64/runtime/cgo.a
index a1ac37e..31242de 100644
--- a/pkg/linux_amd64/runtime/cgo.a
+++ b/pkg/linux_amd64/runtime/cgo.a
Binary files differ
diff --git a/pkg/linux_amd64/runtime/debug.a b/pkg/linux_amd64/runtime/debug.a
index 27bdde9..9c44e05 100644
--- a/pkg/linux_amd64/runtime/debug.a
+++ b/pkg/linux_amd64/runtime/debug.a
Binary files differ
diff --git a/pkg/linux_amd64/runtime/internal/atomic.a b/pkg/linux_amd64/runtime/internal/atomic.a
index 8cba27e..096dbe4 100644
--- a/pkg/linux_amd64/runtime/internal/atomic.a
+++ b/pkg/linux_amd64/runtime/internal/atomic.a
Binary files differ
diff --git a/pkg/linux_amd64/runtime/internal/sys.a b/pkg/linux_amd64/runtime/internal/sys.a
index 4d5dc63..e36d04f 100644
--- a/pkg/linux_amd64/runtime/internal/sys.a
+++ b/pkg/linux_amd64/runtime/internal/sys.a
Binary files differ
diff --git a/pkg/linux_amd64/runtime/pprof.a b/pkg/linux_amd64/runtime/pprof.a
index 94cbff9..739439f 100644
--- a/pkg/linux_amd64/runtime/pprof.a
+++ b/pkg/linux_amd64/runtime/pprof.a
Binary files differ
diff --git a/pkg/linux_amd64/runtime/pprof/internal/protopprof.a b/pkg/linux_amd64/runtime/pprof/internal/protopprof.a
index ecf844d..9a0d001 100644
--- a/pkg/linux_amd64/runtime/pprof/internal/protopprof.a
+++ b/pkg/linux_amd64/runtime/pprof/internal/protopprof.a
Binary files differ
diff --git a/pkg/linux_amd64/runtime/race.a b/pkg/linux_amd64/runtime/race.a
index 7f3816c..9e1d2f8 100644
--- a/pkg/linux_amd64/runtime/race.a
+++ b/pkg/linux_amd64/runtime/race.a
Binary files differ
diff --git a/pkg/linux_amd64/runtime/trace.a b/pkg/linux_amd64/runtime/trace.a
index bad5bbe..670e90d 100644
--- a/pkg/linux_amd64/runtime/trace.a
+++ b/pkg/linux_amd64/runtime/trace.a
Binary files differ
diff --git a/pkg/linux_amd64/sort.a b/pkg/linux_amd64/sort.a
index c2250b6..5be21db 100644
--- a/pkg/linux_amd64/sort.a
+++ b/pkg/linux_amd64/sort.a
Binary files differ
diff --git a/pkg/linux_amd64/strconv.a b/pkg/linux_amd64/strconv.a
index 1dab99e..ee384e5 100644
--- a/pkg/linux_amd64/strconv.a
+++ b/pkg/linux_amd64/strconv.a
Binary files differ
diff --git a/pkg/linux_amd64/strings.a b/pkg/linux_amd64/strings.a
index 2b9e13f..a3cc7d3 100644
--- a/pkg/linux_amd64/strings.a
+++ b/pkg/linux_amd64/strings.a
Binary files differ
diff --git a/pkg/linux_amd64/sync.a b/pkg/linux_amd64/sync.a
index 19d9219..fe1d081 100644
--- a/pkg/linux_amd64/sync.a
+++ b/pkg/linux_amd64/sync.a
Binary files differ
diff --git a/pkg/linux_amd64/sync/atomic.a b/pkg/linux_amd64/sync/atomic.a
index 08a4d70..4a7940b 100644
--- a/pkg/linux_amd64/sync/atomic.a
+++ b/pkg/linux_amd64/sync/atomic.a
Binary files differ
diff --git a/pkg/linux_amd64/syscall.a b/pkg/linux_amd64/syscall.a
index 34716a8..46ed8da 100644
--- a/pkg/linux_amd64/syscall.a
+++ b/pkg/linux_amd64/syscall.a
Binary files differ
diff --git a/pkg/linux_amd64/testing.a b/pkg/linux_amd64/testing.a
index 2c92e94..8dbbb6e 100644
--- a/pkg/linux_amd64/testing.a
+++ b/pkg/linux_amd64/testing.a
Binary files differ
diff --git a/pkg/linux_amd64/testing/internal/testdeps.a b/pkg/linux_amd64/testing/internal/testdeps.a
index 738c8c9..cfe39e5 100644
--- a/pkg/linux_amd64/testing/internal/testdeps.a
+++ b/pkg/linux_amd64/testing/internal/testdeps.a
Binary files differ
diff --git a/pkg/linux_amd64/testing/iotest.a b/pkg/linux_amd64/testing/iotest.a
index cb57926..fb1f36e 100644
--- a/pkg/linux_amd64/testing/iotest.a
+++ b/pkg/linux_amd64/testing/iotest.a
Binary files differ
diff --git a/pkg/linux_amd64/testing/quick.a b/pkg/linux_amd64/testing/quick.a
index 21d7a22..dfcedbb 100644
--- a/pkg/linux_amd64/testing/quick.a
+++ b/pkg/linux_amd64/testing/quick.a
Binary files differ
diff --git a/pkg/linux_amd64/text/scanner.a b/pkg/linux_amd64/text/scanner.a
index 1ada823..ac43411 100644
--- a/pkg/linux_amd64/text/scanner.a
+++ b/pkg/linux_amd64/text/scanner.a
Binary files differ
diff --git a/pkg/linux_amd64/text/tabwriter.a b/pkg/linux_amd64/text/tabwriter.a
index 225ec84..08b1941 100644
--- a/pkg/linux_amd64/text/tabwriter.a
+++ b/pkg/linux_amd64/text/tabwriter.a
Binary files differ
diff --git a/pkg/linux_amd64/text/template.a b/pkg/linux_amd64/text/template.a
index bdb2a52..96e9d61 100644
--- a/pkg/linux_amd64/text/template.a
+++ b/pkg/linux_amd64/text/template.a
Binary files differ
diff --git a/pkg/linux_amd64/text/template/parse.a b/pkg/linux_amd64/text/template/parse.a
index af1550f..a9256f4 100644
--- a/pkg/linux_amd64/text/template/parse.a
+++ b/pkg/linux_amd64/text/template/parse.a
Binary files differ
diff --git a/pkg/linux_amd64/time.a b/pkg/linux_amd64/time.a
index f390d67..e891b33 100644
--- a/pkg/linux_amd64/time.a
+++ b/pkg/linux_amd64/time.a
Binary files differ
diff --git a/pkg/linux_amd64/unicode.a b/pkg/linux_amd64/unicode.a
index a5ee04f..3c30761 100644
--- a/pkg/linux_amd64/unicode.a
+++ b/pkg/linux_amd64/unicode.a
Binary files differ
diff --git a/pkg/linux_amd64/unicode/utf16.a b/pkg/linux_amd64/unicode/utf16.a
index b3484ff..d0ea3c1 100644
--- a/pkg/linux_amd64/unicode/utf16.a
+++ b/pkg/linux_amd64/unicode/utf16.a
Binary files differ
diff --git a/pkg/linux_amd64/unicode/utf8.a b/pkg/linux_amd64/unicode/utf8.a
index e0a2708..04dc16a 100644
--- a/pkg/linux_amd64/unicode/utf8.a
+++ b/pkg/linux_amd64/unicode/utf8.a
Binary files differ
diff --git a/pkg/linux_amd64/vendor/golang_org/x/crypto/chacha20poly1305.a b/pkg/linux_amd64/vendor/golang_org/x/crypto/chacha20poly1305.a
index 9093b4d..5e37e09 100644
--- a/pkg/linux_amd64/vendor/golang_org/x/crypto/chacha20poly1305.a
+++ b/pkg/linux_amd64/vendor/golang_org/x/crypto/chacha20poly1305.a
Binary files differ
diff --git a/pkg/linux_amd64/vendor/golang_org/x/crypto/chacha20poly1305/internal/chacha20.a b/pkg/linux_amd64/vendor/golang_org/x/crypto/chacha20poly1305/internal/chacha20.a
index 5674561..52c0227 100644
--- a/pkg/linux_amd64/vendor/golang_org/x/crypto/chacha20poly1305/internal/chacha20.a
+++ b/pkg/linux_amd64/vendor/golang_org/x/crypto/chacha20poly1305/internal/chacha20.a
Binary files differ
diff --git a/pkg/linux_amd64/vendor/golang_org/x/crypto/curve25519.a b/pkg/linux_amd64/vendor/golang_org/x/crypto/curve25519.a
index 12d7dc1..ad0b4c8 100644
--- a/pkg/linux_amd64/vendor/golang_org/x/crypto/curve25519.a
+++ b/pkg/linux_amd64/vendor/golang_org/x/crypto/curve25519.a
Binary files differ
diff --git a/pkg/linux_amd64/vendor/golang_org/x/crypto/poly1305.a b/pkg/linux_amd64/vendor/golang_org/x/crypto/poly1305.a
index 044e864..0d5f213 100644
--- a/pkg/linux_amd64/vendor/golang_org/x/crypto/poly1305.a
+++ b/pkg/linux_amd64/vendor/golang_org/x/crypto/poly1305.a
Binary files differ
diff --git a/pkg/linux_amd64/vendor/golang_org/x/net/http2/hpack.a b/pkg/linux_amd64/vendor/golang_org/x/net/http2/hpack.a
index 2017671..f6e03fd 100644
--- a/pkg/linux_amd64/vendor/golang_org/x/net/http2/hpack.a
+++ b/pkg/linux_amd64/vendor/golang_org/x/net/http2/hpack.a
Binary files differ
diff --git a/pkg/linux_amd64/vendor/golang_org/x/net/idna.a b/pkg/linux_amd64/vendor/golang_org/x/net/idna.a
index b6b2c92..941e383 100644
--- a/pkg/linux_amd64/vendor/golang_org/x/net/idna.a
+++ b/pkg/linux_amd64/vendor/golang_org/x/net/idna.a
Binary files differ
diff --git a/pkg/linux_amd64/vendor/golang_org/x/net/lex/httplex.a b/pkg/linux_amd64/vendor/golang_org/x/net/lex/httplex.a
index 4a81b77..9f075d6 100644
--- a/pkg/linux_amd64/vendor/golang_org/x/net/lex/httplex.a
+++ b/pkg/linux_amd64/vendor/golang_org/x/net/lex/httplex.a
Binary files differ
diff --git a/pkg/linux_amd64/vendor/golang_org/x/text/transform.a b/pkg/linux_amd64/vendor/golang_org/x/text/transform.a
index 3b27e11..2771408 100644
--- a/pkg/linux_amd64/vendor/golang_org/x/text/transform.a
+++ b/pkg/linux_amd64/vendor/golang_org/x/text/transform.a
Binary files differ
diff --git a/pkg/linux_amd64/vendor/golang_org/x/text/unicode/norm.a b/pkg/linux_amd64/vendor/golang_org/x/text/unicode/norm.a
index 9ebbcf8..7ad2b83 100644
--- a/pkg/linux_amd64/vendor/golang_org/x/text/unicode/norm.a
+++ b/pkg/linux_amd64/vendor/golang_org/x/text/unicode/norm.a
Binary files differ
diff --git a/pkg/linux_amd64/vendor/golang_org/x/text/width.a b/pkg/linux_amd64/vendor/golang_org/x/text/width.a
index 5a11c7a..2e56f8c 100644
--- a/pkg/linux_amd64/vendor/golang_org/x/text/width.a
+++ b/pkg/linux_amd64/vendor/golang_org/x/text/width.a
Binary files differ
diff --git a/pkg/linux_amd64_race/archive/tar.a b/pkg/linux_amd64_race/archive/tar.a
index a331ee1..618d0e9 100644
--- a/pkg/linux_amd64_race/archive/tar.a
+++ b/pkg/linux_amd64_race/archive/tar.a
Binary files differ
diff --git a/pkg/linux_amd64_race/archive/zip.a b/pkg/linux_amd64_race/archive/zip.a
index e5d66d9..c66cd50 100644
--- a/pkg/linux_amd64_race/archive/zip.a
+++ b/pkg/linux_amd64_race/archive/zip.a
Binary files differ
diff --git a/pkg/linux_amd64_race/bufio.a b/pkg/linux_amd64_race/bufio.a
index 4468762..c57816a 100644
--- a/pkg/linux_amd64_race/bufio.a
+++ b/pkg/linux_amd64_race/bufio.a
Binary files differ
diff --git a/pkg/linux_amd64_race/bytes.a b/pkg/linux_amd64_race/bytes.a
index 79c6f03..39c0c99 100644
--- a/pkg/linux_amd64_race/bytes.a
+++ b/pkg/linux_amd64_race/bytes.a
Binary files differ
diff --git a/pkg/linux_amd64_race/compress/bzip2.a b/pkg/linux_amd64_race/compress/bzip2.a
index 677f2b1..1b16f7c 100644
--- a/pkg/linux_amd64_race/compress/bzip2.a
+++ b/pkg/linux_amd64_race/compress/bzip2.a
Binary files differ
diff --git a/pkg/linux_amd64_race/compress/flate.a b/pkg/linux_amd64_race/compress/flate.a
index 98e6126..0bf6196 100644
--- a/pkg/linux_amd64_race/compress/flate.a
+++ b/pkg/linux_amd64_race/compress/flate.a
Binary files differ
diff --git a/pkg/linux_amd64_race/compress/gzip.a b/pkg/linux_amd64_race/compress/gzip.a
index cea29e2..6b89fd3 100644
--- a/pkg/linux_amd64_race/compress/gzip.a
+++ b/pkg/linux_amd64_race/compress/gzip.a
Binary files differ
diff --git a/pkg/linux_amd64_race/compress/lzw.a b/pkg/linux_amd64_race/compress/lzw.a
index 34f5a26..74da9f1 100644
--- a/pkg/linux_amd64_race/compress/lzw.a
+++ b/pkg/linux_amd64_race/compress/lzw.a
Binary files differ
diff --git a/pkg/linux_amd64_race/compress/zlib.a b/pkg/linux_amd64_race/compress/zlib.a
index a0ef9c7..e0f992e 100644
--- a/pkg/linux_amd64_race/compress/zlib.a
+++ b/pkg/linux_amd64_race/compress/zlib.a
Binary files differ
diff --git a/pkg/linux_amd64_race/container/heap.a b/pkg/linux_amd64_race/container/heap.a
index a986413..cf899f4 100644
--- a/pkg/linux_amd64_race/container/heap.a
+++ b/pkg/linux_amd64_race/container/heap.a
Binary files differ
diff --git a/pkg/linux_amd64_race/container/list.a b/pkg/linux_amd64_race/container/list.a
index ccee218..3a7ddf2 100644
--- a/pkg/linux_amd64_race/container/list.a
+++ b/pkg/linux_amd64_race/container/list.a
Binary files differ
diff --git a/pkg/linux_amd64_race/container/ring.a b/pkg/linux_amd64_race/container/ring.a
index 7d91581..5520f25 100644
--- a/pkg/linux_amd64_race/container/ring.a
+++ b/pkg/linux_amd64_race/container/ring.a
Binary files differ
diff --git a/pkg/linux_amd64_race/context.a b/pkg/linux_amd64_race/context.a
index 920b9d3..bf903c1 100644
--- a/pkg/linux_amd64_race/context.a
+++ b/pkg/linux_amd64_race/context.a
Binary files differ
diff --git a/pkg/linux_amd64_race/crypto.a b/pkg/linux_amd64_race/crypto.a
index c8f82b2..2cfc64c 100644
--- a/pkg/linux_amd64_race/crypto.a
+++ b/pkg/linux_amd64_race/crypto.a
Binary files differ
diff --git a/pkg/linux_amd64_race/crypto/aes.a b/pkg/linux_amd64_race/crypto/aes.a
index 44f375b..dad7a83 100644
--- a/pkg/linux_amd64_race/crypto/aes.a
+++ b/pkg/linux_amd64_race/crypto/aes.a
Binary files differ
diff --git a/pkg/linux_amd64_race/crypto/cipher.a b/pkg/linux_amd64_race/crypto/cipher.a
index 6f48b24..aa9016e 100644
--- a/pkg/linux_amd64_race/crypto/cipher.a
+++ b/pkg/linux_amd64_race/crypto/cipher.a
Binary files differ
diff --git a/pkg/linux_amd64_race/crypto/des.a b/pkg/linux_amd64_race/crypto/des.a
index e97acd6..9dcc417 100644
--- a/pkg/linux_amd64_race/crypto/des.a
+++ b/pkg/linux_amd64_race/crypto/des.a
Binary files differ
diff --git a/pkg/linux_amd64_race/crypto/dsa.a b/pkg/linux_amd64_race/crypto/dsa.a
index ffa174c..795da56 100644
--- a/pkg/linux_amd64_race/crypto/dsa.a
+++ b/pkg/linux_amd64_race/crypto/dsa.a
Binary files differ
diff --git a/pkg/linux_amd64_race/crypto/ecdsa.a b/pkg/linux_amd64_race/crypto/ecdsa.a
index 96a4f60..6c6a0cf 100644
--- a/pkg/linux_amd64_race/crypto/ecdsa.a
+++ b/pkg/linux_amd64_race/crypto/ecdsa.a
Binary files differ
diff --git a/pkg/linux_amd64_race/crypto/elliptic.a b/pkg/linux_amd64_race/crypto/elliptic.a
index 6ac2dd4..c585cce 100644
--- a/pkg/linux_amd64_race/crypto/elliptic.a
+++ b/pkg/linux_amd64_race/crypto/elliptic.a
Binary files differ
diff --git a/pkg/linux_amd64_race/crypto/hmac.a b/pkg/linux_amd64_race/crypto/hmac.a
index b8051af..329c5c8 100644
--- a/pkg/linux_amd64_race/crypto/hmac.a
+++ b/pkg/linux_amd64_race/crypto/hmac.a
Binary files differ
diff --git a/pkg/linux_amd64_race/crypto/internal/cipherhw.a b/pkg/linux_amd64_race/crypto/internal/cipherhw.a
index b15f785..2472ebc 100644
--- a/pkg/linux_amd64_race/crypto/internal/cipherhw.a
+++ b/pkg/linux_amd64_race/crypto/internal/cipherhw.a
Binary files differ
diff --git a/pkg/linux_amd64_race/crypto/md5.a b/pkg/linux_amd64_race/crypto/md5.a
index cfc3e23..bef20b2 100644
--- a/pkg/linux_amd64_race/crypto/md5.a
+++ b/pkg/linux_amd64_race/crypto/md5.a
Binary files differ
diff --git a/pkg/linux_amd64_race/crypto/rand.a b/pkg/linux_amd64_race/crypto/rand.a
index e832bc8..133e0d3 100644
--- a/pkg/linux_amd64_race/crypto/rand.a
+++ b/pkg/linux_amd64_race/crypto/rand.a
Binary files differ
diff --git a/pkg/linux_amd64_race/crypto/rc4.a b/pkg/linux_amd64_race/crypto/rc4.a
index b7d85e5..d812a22 100644
--- a/pkg/linux_amd64_race/crypto/rc4.a
+++ b/pkg/linux_amd64_race/crypto/rc4.a
Binary files differ
diff --git a/pkg/linux_amd64_race/crypto/rsa.a b/pkg/linux_amd64_race/crypto/rsa.a
index bcb40e5..62b4087 100644
--- a/pkg/linux_amd64_race/crypto/rsa.a
+++ b/pkg/linux_amd64_race/crypto/rsa.a
Binary files differ
diff --git a/pkg/linux_amd64_race/crypto/sha1.a b/pkg/linux_amd64_race/crypto/sha1.a
index 2e7ad9c..a8bc4cb 100644
--- a/pkg/linux_amd64_race/crypto/sha1.a
+++ b/pkg/linux_amd64_race/crypto/sha1.a
Binary files differ
diff --git a/pkg/linux_amd64_race/crypto/sha256.a b/pkg/linux_amd64_race/crypto/sha256.a
index 64392b3..cc48fba 100644
--- a/pkg/linux_amd64_race/crypto/sha256.a
+++ b/pkg/linux_amd64_race/crypto/sha256.a
Binary files differ
diff --git a/pkg/linux_amd64_race/crypto/sha512.a b/pkg/linux_amd64_race/crypto/sha512.a
index e90c4f0..096002d 100644
--- a/pkg/linux_amd64_race/crypto/sha512.a
+++ b/pkg/linux_amd64_race/crypto/sha512.a
Binary files differ
diff --git a/pkg/linux_amd64_race/crypto/subtle.a b/pkg/linux_amd64_race/crypto/subtle.a
index 0f2a38a..4a9ad35 100644
--- a/pkg/linux_amd64_race/crypto/subtle.a
+++ b/pkg/linux_amd64_race/crypto/subtle.a
Binary files differ
diff --git a/pkg/linux_amd64_race/crypto/tls.a b/pkg/linux_amd64_race/crypto/tls.a
index e9e43d2..28fa04e 100644
--- a/pkg/linux_amd64_race/crypto/tls.a
+++ b/pkg/linux_amd64_race/crypto/tls.a
Binary files differ
diff --git a/pkg/linux_amd64_race/crypto/x509.a b/pkg/linux_amd64_race/crypto/x509.a
index dab067e..bc7648e 100644
--- a/pkg/linux_amd64_race/crypto/x509.a
+++ b/pkg/linux_amd64_race/crypto/x509.a
Binary files differ
diff --git a/pkg/linux_amd64_race/crypto/x509/pkix.a b/pkg/linux_amd64_race/crypto/x509/pkix.a
index 373c93d..0fafd35 100644
--- a/pkg/linux_amd64_race/crypto/x509/pkix.a
+++ b/pkg/linux_amd64_race/crypto/x509/pkix.a
Binary files differ
diff --git a/pkg/linux_amd64_race/database/sql.a b/pkg/linux_amd64_race/database/sql.a
index 0056fa3..9f644b7 100644
--- a/pkg/linux_amd64_race/database/sql.a
+++ b/pkg/linux_amd64_race/database/sql.a
Binary files differ
diff --git a/pkg/linux_amd64_race/database/sql/driver.a b/pkg/linux_amd64_race/database/sql/driver.a
index 5de76ba..93452ec 100644
--- a/pkg/linux_amd64_race/database/sql/driver.a
+++ b/pkg/linux_amd64_race/database/sql/driver.a
Binary files differ
diff --git a/pkg/linux_amd64_race/debug/dwarf.a b/pkg/linux_amd64_race/debug/dwarf.a
index 5f8e4da..a891325 100644
--- a/pkg/linux_amd64_race/debug/dwarf.a
+++ b/pkg/linux_amd64_race/debug/dwarf.a
Binary files differ
diff --git a/pkg/linux_amd64_race/debug/elf.a b/pkg/linux_amd64_race/debug/elf.a
index 1ce4117..2944fa1 100644
--- a/pkg/linux_amd64_race/debug/elf.a
+++ b/pkg/linux_amd64_race/debug/elf.a
Binary files differ
diff --git a/pkg/linux_amd64_race/debug/gosym.a b/pkg/linux_amd64_race/debug/gosym.a
index beac72a..47ab43f 100644
--- a/pkg/linux_amd64_race/debug/gosym.a
+++ b/pkg/linux_amd64_race/debug/gosym.a
Binary files differ
diff --git a/pkg/linux_amd64_race/debug/macho.a b/pkg/linux_amd64_race/debug/macho.a
index f88526f..5b6f9de 100644
--- a/pkg/linux_amd64_race/debug/macho.a
+++ b/pkg/linux_amd64_race/debug/macho.a
Binary files differ
diff --git a/pkg/linux_amd64_race/debug/pe.a b/pkg/linux_amd64_race/debug/pe.a
index a7efcb5..5f863f9 100644
--- a/pkg/linux_amd64_race/debug/pe.a
+++ b/pkg/linux_amd64_race/debug/pe.a
Binary files differ
diff --git a/pkg/linux_amd64_race/debug/plan9obj.a b/pkg/linux_amd64_race/debug/plan9obj.a
index 2078ba9..237f4b0 100644
--- a/pkg/linux_amd64_race/debug/plan9obj.a
+++ b/pkg/linux_amd64_race/debug/plan9obj.a
Binary files differ
diff --git a/pkg/linux_amd64_race/encoding.a b/pkg/linux_amd64_race/encoding.a
index 6c55873..e1ddae0 100644
--- a/pkg/linux_amd64_race/encoding.a
+++ b/pkg/linux_amd64_race/encoding.a
Binary files differ
diff --git a/pkg/linux_amd64_race/encoding/ascii85.a b/pkg/linux_amd64_race/encoding/ascii85.a
index e73a2f5..9efead7 100644
--- a/pkg/linux_amd64_race/encoding/ascii85.a
+++ b/pkg/linux_amd64_race/encoding/ascii85.a
Binary files differ
diff --git a/pkg/linux_amd64_race/encoding/asn1.a b/pkg/linux_amd64_race/encoding/asn1.a
index 96c2bdb..8bf50fe 100644
--- a/pkg/linux_amd64_race/encoding/asn1.a
+++ b/pkg/linux_amd64_race/encoding/asn1.a
Binary files differ
diff --git a/pkg/linux_amd64_race/encoding/base32.a b/pkg/linux_amd64_race/encoding/base32.a
index af46a8c..6c15101 100644
--- a/pkg/linux_amd64_race/encoding/base32.a
+++ b/pkg/linux_amd64_race/encoding/base32.a
Binary files differ
diff --git a/pkg/linux_amd64_race/encoding/base64.a b/pkg/linux_amd64_race/encoding/base64.a
index ddd478d..713ae39 100644
--- a/pkg/linux_amd64_race/encoding/base64.a
+++ b/pkg/linux_amd64_race/encoding/base64.a
Binary files differ
diff --git a/pkg/linux_amd64_race/encoding/binary.a b/pkg/linux_amd64_race/encoding/binary.a
index ff993ef..04af01e 100644
--- a/pkg/linux_amd64_race/encoding/binary.a
+++ b/pkg/linux_amd64_race/encoding/binary.a
Binary files differ
diff --git a/pkg/linux_amd64_race/encoding/csv.a b/pkg/linux_amd64_race/encoding/csv.a
index bf7bb22..2f155f8 100644
--- a/pkg/linux_amd64_race/encoding/csv.a
+++ b/pkg/linux_amd64_race/encoding/csv.a
Binary files differ
diff --git a/pkg/linux_amd64_race/encoding/gob.a b/pkg/linux_amd64_race/encoding/gob.a
index 250835b..a11d934 100644
--- a/pkg/linux_amd64_race/encoding/gob.a
+++ b/pkg/linux_amd64_race/encoding/gob.a
Binary files differ
diff --git a/pkg/linux_amd64_race/encoding/hex.a b/pkg/linux_amd64_race/encoding/hex.a
index 4249c34..e2f220e 100644
--- a/pkg/linux_amd64_race/encoding/hex.a
+++ b/pkg/linux_amd64_race/encoding/hex.a
Binary files differ
diff --git a/pkg/linux_amd64_race/encoding/json.a b/pkg/linux_amd64_race/encoding/json.a
index 1053f3f..acb5713 100644
--- a/pkg/linux_amd64_race/encoding/json.a
+++ b/pkg/linux_amd64_race/encoding/json.a
Binary files differ
diff --git a/pkg/linux_amd64_race/encoding/pem.a b/pkg/linux_amd64_race/encoding/pem.a
index 8e08602..b06df14 100644
--- a/pkg/linux_amd64_race/encoding/pem.a
+++ b/pkg/linux_amd64_race/encoding/pem.a
Binary files differ
diff --git a/pkg/linux_amd64_race/encoding/xml.a b/pkg/linux_amd64_race/encoding/xml.a
index 9e7684b..a9b115e 100644
--- a/pkg/linux_amd64_race/encoding/xml.a
+++ b/pkg/linux_amd64_race/encoding/xml.a
Binary files differ
diff --git a/pkg/linux_amd64_race/errors.a b/pkg/linux_amd64_race/errors.a
index 07bf8b7..5a375bc 100644
--- a/pkg/linux_amd64_race/errors.a
+++ b/pkg/linux_amd64_race/errors.a
Binary files differ
diff --git a/pkg/linux_amd64_race/expvar.a b/pkg/linux_amd64_race/expvar.a
index d391dc6..0592fd0 100644
--- a/pkg/linux_amd64_race/expvar.a
+++ b/pkg/linux_amd64_race/expvar.a
Binary files differ
diff --git a/pkg/linux_amd64_race/flag.a b/pkg/linux_amd64_race/flag.a
index c99afa0..b1688bd 100644
--- a/pkg/linux_amd64_race/flag.a
+++ b/pkg/linux_amd64_race/flag.a
Binary files differ
diff --git a/pkg/linux_amd64_race/fmt.a b/pkg/linux_amd64_race/fmt.a
index d20eab8..36fecfe 100644
--- a/pkg/linux_amd64_race/fmt.a
+++ b/pkg/linux_amd64_race/fmt.a
Binary files differ
diff --git a/pkg/linux_amd64_race/go/ast.a b/pkg/linux_amd64_race/go/ast.a
index a4e56e7..e8ab461 100644
--- a/pkg/linux_amd64_race/go/ast.a
+++ b/pkg/linux_amd64_race/go/ast.a
Binary files differ
diff --git a/pkg/linux_amd64_race/go/build.a b/pkg/linux_amd64_race/go/build.a
index 75ed780..9e144a6 100644
--- a/pkg/linux_amd64_race/go/build.a
+++ b/pkg/linux_amd64_race/go/build.a
Binary files differ
diff --git a/pkg/linux_amd64_race/go/constant.a b/pkg/linux_amd64_race/go/constant.a
index 40130f7..9cb44cd 100644
--- a/pkg/linux_amd64_race/go/constant.a
+++ b/pkg/linux_amd64_race/go/constant.a
Binary files differ
diff --git a/pkg/linux_amd64_race/go/doc.a b/pkg/linux_amd64_race/go/doc.a
index 4dfbbd3..bdd04ad 100644
--- a/pkg/linux_amd64_race/go/doc.a
+++ b/pkg/linux_amd64_race/go/doc.a
Binary files differ
diff --git a/pkg/linux_amd64_race/go/format.a b/pkg/linux_amd64_race/go/format.a
index c932b16..c4ca152 100644
--- a/pkg/linux_amd64_race/go/format.a
+++ b/pkg/linux_amd64_race/go/format.a
Binary files differ
diff --git a/pkg/linux_amd64_race/go/importer.a b/pkg/linux_amd64_race/go/importer.a
index 294836e..f852164 100644
--- a/pkg/linux_amd64_race/go/importer.a
+++ b/pkg/linux_amd64_race/go/importer.a
Binary files differ
diff --git a/pkg/linux_amd64_race/go/internal/gccgoimporter.a b/pkg/linux_amd64_race/go/internal/gccgoimporter.a
index ec8d9c0..f4e29cf 100644
--- a/pkg/linux_amd64_race/go/internal/gccgoimporter.a
+++ b/pkg/linux_amd64_race/go/internal/gccgoimporter.a
Binary files differ
diff --git a/pkg/linux_amd64_race/go/internal/gcimporter.a b/pkg/linux_amd64_race/go/internal/gcimporter.a
index 6ab7213..f68cce0 100644
--- a/pkg/linux_amd64_race/go/internal/gcimporter.a
+++ b/pkg/linux_amd64_race/go/internal/gcimporter.a
Binary files differ
diff --git a/pkg/linux_amd64_race/go/parser.a b/pkg/linux_amd64_race/go/parser.a
index 5f43845..3bfdf97 100644
--- a/pkg/linux_amd64_race/go/parser.a
+++ b/pkg/linux_amd64_race/go/parser.a
Binary files differ
diff --git a/pkg/linux_amd64_race/go/printer.a b/pkg/linux_amd64_race/go/printer.a
index 172c198..82b2f02 100644
--- a/pkg/linux_amd64_race/go/printer.a
+++ b/pkg/linux_amd64_race/go/printer.a
Binary files differ
diff --git a/pkg/linux_amd64_race/go/scanner.a b/pkg/linux_amd64_race/go/scanner.a
index f7c7783..88a3df9 100644
--- a/pkg/linux_amd64_race/go/scanner.a
+++ b/pkg/linux_amd64_race/go/scanner.a
Binary files differ
diff --git a/pkg/linux_amd64_race/go/token.a b/pkg/linux_amd64_race/go/token.a
index 4e92ed3..ad8b656 100644
--- a/pkg/linux_amd64_race/go/token.a
+++ b/pkg/linux_amd64_race/go/token.a
Binary files differ
diff --git a/pkg/linux_amd64_race/go/types.a b/pkg/linux_amd64_race/go/types.a
index b6e4232..2085c7c 100644
--- a/pkg/linux_amd64_race/go/types.a
+++ b/pkg/linux_amd64_race/go/types.a
Binary files differ
diff --git a/pkg/linux_amd64_race/hash.a b/pkg/linux_amd64_race/hash.a
index f6536dd..b771935 100644
--- a/pkg/linux_amd64_race/hash.a
+++ b/pkg/linux_amd64_race/hash.a
Binary files differ
diff --git a/pkg/linux_amd64_race/hash/adler32.a b/pkg/linux_amd64_race/hash/adler32.a
index 47eb354..f0390d3 100644
--- a/pkg/linux_amd64_race/hash/adler32.a
+++ b/pkg/linux_amd64_race/hash/adler32.a
Binary files differ
diff --git a/pkg/linux_amd64_race/hash/crc32.a b/pkg/linux_amd64_race/hash/crc32.a
index 17a7cf6..5cfeece 100644
--- a/pkg/linux_amd64_race/hash/crc32.a
+++ b/pkg/linux_amd64_race/hash/crc32.a
Binary files differ
diff --git a/pkg/linux_amd64_race/hash/crc64.a b/pkg/linux_amd64_race/hash/crc64.a
index 924b498..b2e133c 100644
--- a/pkg/linux_amd64_race/hash/crc64.a
+++ b/pkg/linux_amd64_race/hash/crc64.a
Binary files differ
diff --git a/pkg/linux_amd64_race/hash/fnv.a b/pkg/linux_amd64_race/hash/fnv.a
index b173a2f..0ff4ba0 100644
--- a/pkg/linux_amd64_race/hash/fnv.a
+++ b/pkg/linux_amd64_race/hash/fnv.a
Binary files differ
diff --git a/pkg/linux_amd64_race/html.a b/pkg/linux_amd64_race/html.a
index 6160ebc..0595412 100644
--- a/pkg/linux_amd64_race/html.a
+++ b/pkg/linux_amd64_race/html.a
Binary files differ
diff --git a/pkg/linux_amd64_race/html/template.a b/pkg/linux_amd64_race/html/template.a
index ec0ce7d..7dcaae4 100644
--- a/pkg/linux_amd64_race/html/template.a
+++ b/pkg/linux_amd64_race/html/template.a
Binary files differ
diff --git a/pkg/linux_amd64_race/image.a b/pkg/linux_amd64_race/image.a
index 641f056..887c5e1 100644
--- a/pkg/linux_amd64_race/image.a
+++ b/pkg/linux_amd64_race/image.a
Binary files differ
diff --git a/pkg/linux_amd64_race/image/color.a b/pkg/linux_amd64_race/image/color.a
index a55fb75..146eb60 100644
--- a/pkg/linux_amd64_race/image/color.a
+++ b/pkg/linux_amd64_race/image/color.a
Binary files differ
diff --git a/pkg/linux_amd64_race/image/color/palette.a b/pkg/linux_amd64_race/image/color/palette.a
index a4f78da..c306fe6 100644
--- a/pkg/linux_amd64_race/image/color/palette.a
+++ b/pkg/linux_amd64_race/image/color/palette.a
Binary files differ
diff --git a/pkg/linux_amd64_race/image/draw.a b/pkg/linux_amd64_race/image/draw.a
index 3a17f43..2c20424 100644
--- a/pkg/linux_amd64_race/image/draw.a
+++ b/pkg/linux_amd64_race/image/draw.a
Binary files differ
diff --git a/pkg/linux_amd64_race/image/gif.a b/pkg/linux_amd64_race/image/gif.a
index a9ab9f9..bfe0ca9 100644
--- a/pkg/linux_amd64_race/image/gif.a
+++ b/pkg/linux_amd64_race/image/gif.a
Binary files differ
diff --git a/pkg/linux_amd64_race/image/internal/imageutil.a b/pkg/linux_amd64_race/image/internal/imageutil.a
index 206c122..4c58d4d 100644
--- a/pkg/linux_amd64_race/image/internal/imageutil.a
+++ b/pkg/linux_amd64_race/image/internal/imageutil.a
Binary files differ
diff --git a/pkg/linux_amd64_race/image/jpeg.a b/pkg/linux_amd64_race/image/jpeg.a
index 1551787..2c66bea 100644
--- a/pkg/linux_amd64_race/image/jpeg.a
+++ b/pkg/linux_amd64_race/image/jpeg.a
Binary files differ
diff --git a/pkg/linux_amd64_race/image/png.a b/pkg/linux_amd64_race/image/png.a
index 0337a81..b9af763 100644
--- a/pkg/linux_amd64_race/image/png.a
+++ b/pkg/linux_amd64_race/image/png.a
Binary files differ
diff --git a/pkg/linux_amd64_race/index/suffixarray.a b/pkg/linux_amd64_race/index/suffixarray.a
index e2b3cd5..4a2c5da 100644
--- a/pkg/linux_amd64_race/index/suffixarray.a
+++ b/pkg/linux_amd64_race/index/suffixarray.a
Binary files differ
diff --git a/pkg/linux_amd64_race/internal/nettrace.a b/pkg/linux_amd64_race/internal/nettrace.a
index 952d0f2..0dacf24 100644
--- a/pkg/linux_amd64_race/internal/nettrace.a
+++ b/pkg/linux_amd64_race/internal/nettrace.a
Binary files differ
diff --git a/pkg/linux_amd64_race/internal/pprof/profile.a b/pkg/linux_amd64_race/internal/pprof/profile.a
index 6c1f32f..b313b96 100644
--- a/pkg/linux_amd64_race/internal/pprof/profile.a
+++ b/pkg/linux_amd64_race/internal/pprof/profile.a
Binary files differ
diff --git a/pkg/linux_amd64_race/internal/race.a b/pkg/linux_amd64_race/internal/race.a
index b72e29b..597c3be 100644
--- a/pkg/linux_amd64_race/internal/race.a
+++ b/pkg/linux_amd64_race/internal/race.a
Binary files differ
diff --git a/pkg/linux_amd64_race/internal/singleflight.a b/pkg/linux_amd64_race/internal/singleflight.a
index d20d90d..0764ed4 100644
--- a/pkg/linux_amd64_race/internal/singleflight.a
+++ b/pkg/linux_amd64_race/internal/singleflight.a
Binary files differ
diff --git a/pkg/linux_amd64_race/internal/syscall/unix.a b/pkg/linux_amd64_race/internal/syscall/unix.a
index 128a554..f980f78 100644
--- a/pkg/linux_amd64_race/internal/syscall/unix.a
+++ b/pkg/linux_amd64_race/internal/syscall/unix.a
Binary files differ
diff --git a/pkg/linux_amd64_race/internal/syscall/windows.a b/pkg/linux_amd64_race/internal/syscall/windows.a
index f54aba9..3b2a970 100644
--- a/pkg/linux_amd64_race/internal/syscall/windows.a
+++ b/pkg/linux_amd64_race/internal/syscall/windows.a
Binary files differ
diff --git a/pkg/linux_amd64_race/internal/syscall/windows/registry.a b/pkg/linux_amd64_race/internal/syscall/windows/registry.a
index 0d3ef32..8148770 100644
--- a/pkg/linux_amd64_race/internal/syscall/windows/registry.a
+++ b/pkg/linux_amd64_race/internal/syscall/windows/registry.a
Binary files differ
diff --git a/pkg/linux_amd64_race/internal/syscall/windows/sysdll.a b/pkg/linux_amd64_race/internal/syscall/windows/sysdll.a
index 4780a5e..1811f6b 100644
--- a/pkg/linux_amd64_race/internal/syscall/windows/sysdll.a
+++ b/pkg/linux_amd64_race/internal/syscall/windows/sysdll.a
Binary files differ
diff --git a/pkg/linux_amd64_race/internal/testenv.a b/pkg/linux_amd64_race/internal/testenv.a
index ea0cea5..77c1a22 100644
--- a/pkg/linux_amd64_race/internal/testenv.a
+++ b/pkg/linux_amd64_race/internal/testenv.a
Binary files differ
diff --git a/pkg/linux_amd64_race/internal/trace.a b/pkg/linux_amd64_race/internal/trace.a
index 291539e..75c4b91 100644
--- a/pkg/linux_amd64_race/internal/trace.a
+++ b/pkg/linux_amd64_race/internal/trace.a
Binary files differ
diff --git a/pkg/linux_amd64_race/io.a b/pkg/linux_amd64_race/io.a
index 46f3281..973cb1e 100644
--- a/pkg/linux_amd64_race/io.a
+++ b/pkg/linux_amd64_race/io.a
Binary files differ
diff --git a/pkg/linux_amd64_race/io/ioutil.a b/pkg/linux_amd64_race/io/ioutil.a
index ba0e806..4037c2b 100644
--- a/pkg/linux_amd64_race/io/ioutil.a
+++ b/pkg/linux_amd64_race/io/ioutil.a
Binary files differ
diff --git a/pkg/linux_amd64_race/log.a b/pkg/linux_amd64_race/log.a
index 30b2dcd..e759472 100644
--- a/pkg/linux_amd64_race/log.a
+++ b/pkg/linux_amd64_race/log.a
Binary files differ
diff --git a/pkg/linux_amd64_race/log/syslog.a b/pkg/linux_amd64_race/log/syslog.a
index 60215f8..2c86f52 100644
--- a/pkg/linux_amd64_race/log/syslog.a
+++ b/pkg/linux_amd64_race/log/syslog.a
Binary files differ
diff --git a/pkg/linux_amd64_race/math.a b/pkg/linux_amd64_race/math.a
index b37be7c..111060d 100644
--- a/pkg/linux_amd64_race/math.a
+++ b/pkg/linux_amd64_race/math.a
Binary files differ
diff --git a/pkg/linux_amd64_race/math/big.a b/pkg/linux_amd64_race/math/big.a
index f67c41a..97c1ba6 100644
--- a/pkg/linux_amd64_race/math/big.a
+++ b/pkg/linux_amd64_race/math/big.a
Binary files differ
diff --git a/pkg/linux_amd64_race/math/cmplx.a b/pkg/linux_amd64_race/math/cmplx.a
index cbf46f9..f0e541b 100644
--- a/pkg/linux_amd64_race/math/cmplx.a
+++ b/pkg/linux_amd64_race/math/cmplx.a
Binary files differ
diff --git a/pkg/linux_amd64_race/math/rand.a b/pkg/linux_amd64_race/math/rand.a
index 9deb43a..787d45a 100644
--- a/pkg/linux_amd64_race/math/rand.a
+++ b/pkg/linux_amd64_race/math/rand.a
Binary files differ
diff --git a/pkg/linux_amd64_race/mime.a b/pkg/linux_amd64_race/mime.a
index 916c268..9d5d1c9 100644
--- a/pkg/linux_amd64_race/mime.a
+++ b/pkg/linux_amd64_race/mime.a
Binary files differ
diff --git a/pkg/linux_amd64_race/mime/multipart.a b/pkg/linux_amd64_race/mime/multipart.a
index bec9939..e3eea3e 100644
--- a/pkg/linux_amd64_race/mime/multipart.a
+++ b/pkg/linux_amd64_race/mime/multipart.a
Binary files differ
diff --git a/pkg/linux_amd64_race/mime/quotedprintable.a b/pkg/linux_amd64_race/mime/quotedprintable.a
index bba735b..0047148 100644
--- a/pkg/linux_amd64_race/mime/quotedprintable.a
+++ b/pkg/linux_amd64_race/mime/quotedprintable.a
Binary files differ
diff --git a/pkg/linux_amd64_race/net.a b/pkg/linux_amd64_race/net.a
index 11b9669..d2e00be 100644
--- a/pkg/linux_amd64_race/net.a
+++ b/pkg/linux_amd64_race/net.a
Binary files differ
diff --git a/pkg/linux_amd64_race/net/http.a b/pkg/linux_amd64_race/net/http.a
index 4d2a4ff..3182861 100644
--- a/pkg/linux_amd64_race/net/http.a
+++ b/pkg/linux_amd64_race/net/http.a
Binary files differ
diff --git a/pkg/linux_amd64_race/net/http/cgi.a b/pkg/linux_amd64_race/net/http/cgi.a
index 6025168..56250a0 100644
--- a/pkg/linux_amd64_race/net/http/cgi.a
+++ b/pkg/linux_amd64_race/net/http/cgi.a
Binary files differ
diff --git a/pkg/linux_amd64_race/net/http/cookiejar.a b/pkg/linux_amd64_race/net/http/cookiejar.a
index 96b2412..b29190e 100644
--- a/pkg/linux_amd64_race/net/http/cookiejar.a
+++ b/pkg/linux_amd64_race/net/http/cookiejar.a
Binary files differ
diff --git a/pkg/linux_amd64_race/net/http/fcgi.a b/pkg/linux_amd64_race/net/http/fcgi.a
index d3b3c1d..eb77fc3 100644
--- a/pkg/linux_amd64_race/net/http/fcgi.a
+++ b/pkg/linux_amd64_race/net/http/fcgi.a
Binary files differ
diff --git a/pkg/linux_amd64_race/net/http/httptest.a b/pkg/linux_amd64_race/net/http/httptest.a
index c5b216d..bfed2ed 100644
--- a/pkg/linux_amd64_race/net/http/httptest.a
+++ b/pkg/linux_amd64_race/net/http/httptest.a
Binary files differ
diff --git a/pkg/linux_amd64_race/net/http/httptrace.a b/pkg/linux_amd64_race/net/http/httptrace.a
index 8dc833f..de025ac 100644
--- a/pkg/linux_amd64_race/net/http/httptrace.a
+++ b/pkg/linux_amd64_race/net/http/httptrace.a
Binary files differ
diff --git a/pkg/linux_amd64_race/net/http/httputil.a b/pkg/linux_amd64_race/net/http/httputil.a
index f3d92ae..b01c962 100644
--- a/pkg/linux_amd64_race/net/http/httputil.a
+++ b/pkg/linux_amd64_race/net/http/httputil.a
Binary files differ
diff --git a/pkg/linux_amd64_race/net/http/internal.a b/pkg/linux_amd64_race/net/http/internal.a
index 719ed41..c8fe444 100644
--- a/pkg/linux_amd64_race/net/http/internal.a
+++ b/pkg/linux_amd64_race/net/http/internal.a
Binary files differ
diff --git a/pkg/linux_amd64_race/net/http/pprof.a b/pkg/linux_amd64_race/net/http/pprof.a
index f3ecf2a..f47e973 100644
--- a/pkg/linux_amd64_race/net/http/pprof.a
+++ b/pkg/linux_amd64_race/net/http/pprof.a
Binary files differ
diff --git a/pkg/linux_amd64_race/net/internal/socktest.a b/pkg/linux_amd64_race/net/internal/socktest.a
index 8227864..ae57212 100644
--- a/pkg/linux_amd64_race/net/internal/socktest.a
+++ b/pkg/linux_amd64_race/net/internal/socktest.a
Binary files differ
diff --git a/pkg/linux_amd64_race/net/mail.a b/pkg/linux_amd64_race/net/mail.a
index dd6315d..8b0487f 100644
--- a/pkg/linux_amd64_race/net/mail.a
+++ b/pkg/linux_amd64_race/net/mail.a
Binary files differ
diff --git a/pkg/linux_amd64_race/net/rpc.a b/pkg/linux_amd64_race/net/rpc.a
index 93662d0..64a4e8e 100644
--- a/pkg/linux_amd64_race/net/rpc.a
+++ b/pkg/linux_amd64_race/net/rpc.a
Binary files differ
diff --git a/pkg/linux_amd64_race/net/rpc/jsonrpc.a b/pkg/linux_amd64_race/net/rpc/jsonrpc.a
index 88dda76..f06377b 100644
--- a/pkg/linux_amd64_race/net/rpc/jsonrpc.a
+++ b/pkg/linux_amd64_race/net/rpc/jsonrpc.a
Binary files differ
diff --git a/pkg/linux_amd64_race/net/smtp.a b/pkg/linux_amd64_race/net/smtp.a
index 547b063..ea30d8e 100644
--- a/pkg/linux_amd64_race/net/smtp.a
+++ b/pkg/linux_amd64_race/net/smtp.a
Binary files differ
diff --git a/pkg/linux_amd64_race/net/textproto.a b/pkg/linux_amd64_race/net/textproto.a
index ec8ec13..5fe0c39 100644
--- a/pkg/linux_amd64_race/net/textproto.a
+++ b/pkg/linux_amd64_race/net/textproto.a
Binary files differ
diff --git a/pkg/linux_amd64_race/net/url.a b/pkg/linux_amd64_race/net/url.a
index 7cd1860..e967dd9 100644
--- a/pkg/linux_amd64_race/net/url.a
+++ b/pkg/linux_amd64_race/net/url.a
Binary files differ
diff --git a/pkg/linux_amd64_race/os.a b/pkg/linux_amd64_race/os.a
index ce949cb..b439ef4 100644
--- a/pkg/linux_amd64_race/os.a
+++ b/pkg/linux_amd64_race/os.a
Binary files differ
diff --git a/pkg/linux_amd64_race/os/exec.a b/pkg/linux_amd64_race/os/exec.a
index 84ba830..1235254 100644
--- a/pkg/linux_amd64_race/os/exec.a
+++ b/pkg/linux_amd64_race/os/exec.a
Binary files differ
diff --git a/pkg/linux_amd64_race/os/signal.a b/pkg/linux_amd64_race/os/signal.a
index d647522..4d132db 100644
--- a/pkg/linux_amd64_race/os/signal.a
+++ b/pkg/linux_amd64_race/os/signal.a
Binary files differ
diff --git a/pkg/linux_amd64_race/os/user.a b/pkg/linux_amd64_race/os/user.a
index 67022e0..a6c4a90 100644
--- a/pkg/linux_amd64_race/os/user.a
+++ b/pkg/linux_amd64_race/os/user.a
Binary files differ
diff --git a/pkg/linux_amd64_race/path.a b/pkg/linux_amd64_race/path.a
index b94c54e..f8994c6 100644
--- a/pkg/linux_amd64_race/path.a
+++ b/pkg/linux_amd64_race/path.a
Binary files differ
diff --git a/pkg/linux_amd64_race/path/filepath.a b/pkg/linux_amd64_race/path/filepath.a
index 38a838d..e009e08 100644
--- a/pkg/linux_amd64_race/path/filepath.a
+++ b/pkg/linux_amd64_race/path/filepath.a
Binary files differ
diff --git a/pkg/linux_amd64_race/plugin.a b/pkg/linux_amd64_race/plugin.a
index 933cc29..1a8b819 100644
--- a/pkg/linux_amd64_race/plugin.a
+++ b/pkg/linux_amd64_race/plugin.a
Binary files differ
diff --git a/pkg/linux_amd64_race/reflect.a b/pkg/linux_amd64_race/reflect.a
index 530619c..1c3a2ee 100644
--- a/pkg/linux_amd64_race/reflect.a
+++ b/pkg/linux_amd64_race/reflect.a
Binary files differ
diff --git a/pkg/linux_amd64_race/regexp.a b/pkg/linux_amd64_race/regexp.a
index cc949af..cfbfda0 100644
--- a/pkg/linux_amd64_race/regexp.a
+++ b/pkg/linux_amd64_race/regexp.a
Binary files differ
diff --git a/pkg/linux_amd64_race/regexp/syntax.a b/pkg/linux_amd64_race/regexp/syntax.a
index 8cff6ac..031ea67 100644
--- a/pkg/linux_amd64_race/regexp/syntax.a
+++ b/pkg/linux_amd64_race/regexp/syntax.a
Binary files differ
diff --git a/pkg/linux_amd64_race/runtime.a b/pkg/linux_amd64_race/runtime.a
index da08a68..cc8da5f 100644
--- a/pkg/linux_amd64_race/runtime.a
+++ b/pkg/linux_amd64_race/runtime.a
Binary files differ
diff --git a/pkg/linux_amd64_race/runtime/cgo.a b/pkg/linux_amd64_race/runtime/cgo.a
index 9bea4c3..2902de7 100644
--- a/pkg/linux_amd64_race/runtime/cgo.a
+++ b/pkg/linux_amd64_race/runtime/cgo.a
Binary files differ
diff --git a/pkg/linux_amd64_race/runtime/debug.a b/pkg/linux_amd64_race/runtime/debug.a
index 5a2e8d0..a9d6bd1 100644
--- a/pkg/linux_amd64_race/runtime/debug.a
+++ b/pkg/linux_amd64_race/runtime/debug.a
Binary files differ
diff --git a/pkg/linux_amd64_race/runtime/internal/atomic.a b/pkg/linux_amd64_race/runtime/internal/atomic.a
index 8cba27e..096dbe4 100644
--- a/pkg/linux_amd64_race/runtime/internal/atomic.a
+++ b/pkg/linux_amd64_race/runtime/internal/atomic.a
Binary files differ
diff --git a/pkg/linux_amd64_race/runtime/internal/sys.a b/pkg/linux_amd64_race/runtime/internal/sys.a
index 4d5dc63..e36d04f 100644
--- a/pkg/linux_amd64_race/runtime/internal/sys.a
+++ b/pkg/linux_amd64_race/runtime/internal/sys.a
Binary files differ
diff --git a/pkg/linux_amd64_race/runtime/pprof.a b/pkg/linux_amd64_race/runtime/pprof.a
index 722c35b..ef1f4a8 100644
--- a/pkg/linux_amd64_race/runtime/pprof.a
+++ b/pkg/linux_amd64_race/runtime/pprof.a
Binary files differ
diff --git a/pkg/linux_amd64_race/runtime/pprof/internal/protopprof.a b/pkg/linux_amd64_race/runtime/pprof/internal/protopprof.a
index aaf08c3..968d017 100644
--- a/pkg/linux_amd64_race/runtime/pprof/internal/protopprof.a
+++ b/pkg/linux_amd64_race/runtime/pprof/internal/protopprof.a
Binary files differ
diff --git a/pkg/linux_amd64_race/runtime/race.a b/pkg/linux_amd64_race/runtime/race.a
index b39edef..8eaab2c 100644
--- a/pkg/linux_amd64_race/runtime/race.a
+++ b/pkg/linux_amd64_race/runtime/race.a
Binary files differ
diff --git a/pkg/linux_amd64_race/runtime/trace.a b/pkg/linux_amd64_race/runtime/trace.a
index d31bcde..53954bd 100644
--- a/pkg/linux_amd64_race/runtime/trace.a
+++ b/pkg/linux_amd64_race/runtime/trace.a
Binary files differ
diff --git a/pkg/linux_amd64_race/sort.a b/pkg/linux_amd64_race/sort.a
index c291d05..6eb4061 100644
--- a/pkg/linux_amd64_race/sort.a
+++ b/pkg/linux_amd64_race/sort.a
Binary files differ
diff --git a/pkg/linux_amd64_race/strconv.a b/pkg/linux_amd64_race/strconv.a
index 433f8b3..d236b07 100644
--- a/pkg/linux_amd64_race/strconv.a
+++ b/pkg/linux_amd64_race/strconv.a
Binary files differ
diff --git a/pkg/linux_amd64_race/strings.a b/pkg/linux_amd64_race/strings.a
index 7ec223c..20578eb 100644
--- a/pkg/linux_amd64_race/strings.a
+++ b/pkg/linux_amd64_race/strings.a
Binary files differ
diff --git a/pkg/linux_amd64_race/sync.a b/pkg/linux_amd64_race/sync.a
index dcaac6b..04850c8 100644
--- a/pkg/linux_amd64_race/sync.a
+++ b/pkg/linux_amd64_race/sync.a
Binary files differ
diff --git a/pkg/linux_amd64_race/sync/atomic.a b/pkg/linux_amd64_race/sync/atomic.a
index d332b5d..d0a975c 100644
--- a/pkg/linux_amd64_race/sync/atomic.a
+++ b/pkg/linux_amd64_race/sync/atomic.a
Binary files differ
diff --git a/pkg/linux_amd64_race/syscall.a b/pkg/linux_amd64_race/syscall.a
index ee98687..2824af9 100644
--- a/pkg/linux_amd64_race/syscall.a
+++ b/pkg/linux_amd64_race/syscall.a
Binary files differ
diff --git a/pkg/linux_amd64_race/testing.a b/pkg/linux_amd64_race/testing.a
index c204846..95f5365 100644
--- a/pkg/linux_amd64_race/testing.a
+++ b/pkg/linux_amd64_race/testing.a
Binary files differ
diff --git a/pkg/linux_amd64_race/testing/internal/testdeps.a b/pkg/linux_amd64_race/testing/internal/testdeps.a
index f5391c0..c25c1bc 100644
--- a/pkg/linux_amd64_race/testing/internal/testdeps.a
+++ b/pkg/linux_amd64_race/testing/internal/testdeps.a
Binary files differ
diff --git a/pkg/linux_amd64_race/testing/iotest.a b/pkg/linux_amd64_race/testing/iotest.a
index 71d5aab..a06aeff 100644
--- a/pkg/linux_amd64_race/testing/iotest.a
+++ b/pkg/linux_amd64_race/testing/iotest.a
Binary files differ
diff --git a/pkg/linux_amd64_race/testing/quick.a b/pkg/linux_amd64_race/testing/quick.a
index 18fd4b7..3a30bb7 100644
--- a/pkg/linux_amd64_race/testing/quick.a
+++ b/pkg/linux_amd64_race/testing/quick.a
Binary files differ
diff --git a/pkg/linux_amd64_race/text/scanner.a b/pkg/linux_amd64_race/text/scanner.a
index a972bd3..9843b39 100644
--- a/pkg/linux_amd64_race/text/scanner.a
+++ b/pkg/linux_amd64_race/text/scanner.a
Binary files differ
diff --git a/pkg/linux_amd64_race/text/tabwriter.a b/pkg/linux_amd64_race/text/tabwriter.a
index b20b0d6..ada4600 100644
--- a/pkg/linux_amd64_race/text/tabwriter.a
+++ b/pkg/linux_amd64_race/text/tabwriter.a
Binary files differ
diff --git a/pkg/linux_amd64_race/text/template.a b/pkg/linux_amd64_race/text/template.a
index 3af949c..3a0fd68 100644
--- a/pkg/linux_amd64_race/text/template.a
+++ b/pkg/linux_amd64_race/text/template.a
Binary files differ
diff --git a/pkg/linux_amd64_race/text/template/parse.a b/pkg/linux_amd64_race/text/template/parse.a
index fdb6a34..20784ec 100644
--- a/pkg/linux_amd64_race/text/template/parse.a
+++ b/pkg/linux_amd64_race/text/template/parse.a
Binary files differ
diff --git a/pkg/linux_amd64_race/time.a b/pkg/linux_amd64_race/time.a
index 579e86a..eb03081 100644
--- a/pkg/linux_amd64_race/time.a
+++ b/pkg/linux_amd64_race/time.a
Binary files differ
diff --git a/pkg/linux_amd64_race/unicode.a b/pkg/linux_amd64_race/unicode.a
index 98aa18e..0d9f5a0 100644
--- a/pkg/linux_amd64_race/unicode.a
+++ b/pkg/linux_amd64_race/unicode.a
Binary files differ
diff --git a/pkg/linux_amd64_race/unicode/utf16.a b/pkg/linux_amd64_race/unicode/utf16.a
index 2437467..6a029af 100644
--- a/pkg/linux_amd64_race/unicode/utf16.a
+++ b/pkg/linux_amd64_race/unicode/utf16.a
Binary files differ
diff --git a/pkg/linux_amd64_race/unicode/utf8.a b/pkg/linux_amd64_race/unicode/utf8.a
index c14deda..d7316ae 100644
--- a/pkg/linux_amd64_race/unicode/utf8.a
+++ b/pkg/linux_amd64_race/unicode/utf8.a
Binary files differ
diff --git a/pkg/linux_amd64_race/vendor/golang_org/x/crypto/chacha20poly1305.a b/pkg/linux_amd64_race/vendor/golang_org/x/crypto/chacha20poly1305.a
index 524d587..62e9429 100644
--- a/pkg/linux_amd64_race/vendor/golang_org/x/crypto/chacha20poly1305.a
+++ b/pkg/linux_amd64_race/vendor/golang_org/x/crypto/chacha20poly1305.a
Binary files differ
diff --git a/pkg/linux_amd64_race/vendor/golang_org/x/crypto/chacha20poly1305/internal/chacha20.a b/pkg/linux_amd64_race/vendor/golang_org/x/crypto/chacha20poly1305/internal/chacha20.a
index a7acb0f..fc98d2e 100644
--- a/pkg/linux_amd64_race/vendor/golang_org/x/crypto/chacha20poly1305/internal/chacha20.a
+++ b/pkg/linux_amd64_race/vendor/golang_org/x/crypto/chacha20poly1305/internal/chacha20.a
Binary files differ
diff --git a/pkg/linux_amd64_race/vendor/golang_org/x/crypto/curve25519.a b/pkg/linux_amd64_race/vendor/golang_org/x/crypto/curve25519.a
index ef0f815..5e1e67b 100644
--- a/pkg/linux_amd64_race/vendor/golang_org/x/crypto/curve25519.a
+++ b/pkg/linux_amd64_race/vendor/golang_org/x/crypto/curve25519.a
Binary files differ
diff --git a/pkg/linux_amd64_race/vendor/golang_org/x/crypto/poly1305.a b/pkg/linux_amd64_race/vendor/golang_org/x/crypto/poly1305.a
index 6ffcfc0..c5f2625 100644
--- a/pkg/linux_amd64_race/vendor/golang_org/x/crypto/poly1305.a
+++ b/pkg/linux_amd64_race/vendor/golang_org/x/crypto/poly1305.a
Binary files differ
diff --git a/pkg/linux_amd64_race/vendor/golang_org/x/net/http2/hpack.a b/pkg/linux_amd64_race/vendor/golang_org/x/net/http2/hpack.a
index ecfb57e..bf21d6d 100644
--- a/pkg/linux_amd64_race/vendor/golang_org/x/net/http2/hpack.a
+++ b/pkg/linux_amd64_race/vendor/golang_org/x/net/http2/hpack.a
Binary files differ
diff --git a/pkg/linux_amd64_race/vendor/golang_org/x/net/idna.a b/pkg/linux_amd64_race/vendor/golang_org/x/net/idna.a
index 9c2a00f..a7bc3d6 100644
--- a/pkg/linux_amd64_race/vendor/golang_org/x/net/idna.a
+++ b/pkg/linux_amd64_race/vendor/golang_org/x/net/idna.a
Binary files differ
diff --git a/pkg/linux_amd64_race/vendor/golang_org/x/net/lex/httplex.a b/pkg/linux_amd64_race/vendor/golang_org/x/net/lex/httplex.a
index d13711f..a56bc05 100644
--- a/pkg/linux_amd64_race/vendor/golang_org/x/net/lex/httplex.a
+++ b/pkg/linux_amd64_race/vendor/golang_org/x/net/lex/httplex.a
Binary files differ
diff --git a/pkg/linux_amd64_race/vendor/golang_org/x/text/transform.a b/pkg/linux_amd64_race/vendor/golang_org/x/text/transform.a
index 81d0bb6..672cfbb 100644
--- a/pkg/linux_amd64_race/vendor/golang_org/x/text/transform.a
+++ b/pkg/linux_amd64_race/vendor/golang_org/x/text/transform.a
Binary files differ
diff --git a/pkg/linux_amd64_race/vendor/golang_org/x/text/unicode/norm.a b/pkg/linux_amd64_race/vendor/golang_org/x/text/unicode/norm.a
index 875c921..c239b9d 100644
--- a/pkg/linux_amd64_race/vendor/golang_org/x/text/unicode/norm.a
+++ b/pkg/linux_amd64_race/vendor/golang_org/x/text/unicode/norm.a
Binary files differ
diff --git a/pkg/linux_amd64_race/vendor/golang_org/x/text/width.a b/pkg/linux_amd64_race/vendor/golang_org/x/text/width.a
index db16ffd..c732066 100644
--- a/pkg/linux_amd64_race/vendor/golang_org/x/text/width.a
+++ b/pkg/linux_amd64_race/vendor/golang_org/x/text/width.a
Binary files differ
diff --git a/pkg/tool/linux_amd64/addr2line b/pkg/tool/linux_amd64/addr2line
index 97b8515..b4f8cb2 100755
--- a/pkg/tool/linux_amd64/addr2line
+++ b/pkg/tool/linux_amd64/addr2line
Binary files differ
diff --git a/pkg/tool/linux_amd64/api b/pkg/tool/linux_amd64/api
index a752f7f..2e8408f 100755
--- a/pkg/tool/linux_amd64/api
+++ b/pkg/tool/linux_amd64/api
Binary files differ
diff --git a/pkg/tool/linux_amd64/asm b/pkg/tool/linux_amd64/asm
index 6293e17..a0286e7 100755
--- a/pkg/tool/linux_amd64/asm
+++ b/pkg/tool/linux_amd64/asm
Binary files differ
diff --git a/pkg/tool/linux_amd64/cgo b/pkg/tool/linux_amd64/cgo
index 412faed..ade371d 100755
--- a/pkg/tool/linux_amd64/cgo
+++ b/pkg/tool/linux_amd64/cgo
Binary files differ
diff --git a/pkg/tool/linux_amd64/compile b/pkg/tool/linux_amd64/compile
index f51deaa..0881db0 100755
--- a/pkg/tool/linux_amd64/compile
+++ b/pkg/tool/linux_amd64/compile
Binary files differ
diff --git a/pkg/tool/linux_amd64/cover b/pkg/tool/linux_amd64/cover
index 08270d8..df9394c 100755
--- a/pkg/tool/linux_amd64/cover
+++ b/pkg/tool/linux_amd64/cover
Binary files differ
diff --git a/pkg/tool/linux_amd64/dist b/pkg/tool/linux_amd64/dist
index cde9b82..cfed181 100755
--- a/pkg/tool/linux_amd64/dist
+++ b/pkg/tool/linux_amd64/dist
Binary files differ
diff --git a/pkg/tool/linux_amd64/doc b/pkg/tool/linux_amd64/doc
index f1176ce..fec72a8 100755
--- a/pkg/tool/linux_amd64/doc
+++ b/pkg/tool/linux_amd64/doc
Binary files differ
diff --git a/pkg/tool/linux_amd64/fix b/pkg/tool/linux_amd64/fix
index 53efe54..9182750 100755
--- a/pkg/tool/linux_amd64/fix
+++ b/pkg/tool/linux_amd64/fix
Binary files differ
diff --git a/pkg/tool/linux_amd64/link b/pkg/tool/linux_amd64/link
index 28f93ca..b8e490d 100755
--- a/pkg/tool/linux_amd64/link
+++ b/pkg/tool/linux_amd64/link
Binary files differ
diff --git a/pkg/tool/linux_amd64/nm b/pkg/tool/linux_amd64/nm
index 034834a..f19902d 100755
--- a/pkg/tool/linux_amd64/nm
+++ b/pkg/tool/linux_amd64/nm
Binary files differ
diff --git a/pkg/tool/linux_amd64/objdump b/pkg/tool/linux_amd64/objdump
index fb5cdff..f089739 100755
--- a/pkg/tool/linux_amd64/objdump
+++ b/pkg/tool/linux_amd64/objdump
Binary files differ
diff --git a/pkg/tool/linux_amd64/pack b/pkg/tool/linux_amd64/pack
index 6fb5685..ab5b4ee 100755
--- a/pkg/tool/linux_amd64/pack
+++ b/pkg/tool/linux_amd64/pack
Binary files differ
diff --git a/pkg/tool/linux_amd64/pprof b/pkg/tool/linux_amd64/pprof
index 957e90f..21da25f 100755
--- a/pkg/tool/linux_amd64/pprof
+++ b/pkg/tool/linux_amd64/pprof
Binary files differ
diff --git a/pkg/tool/linux_amd64/trace b/pkg/tool/linux_amd64/trace
index ccd5d28..b540a0f 100755
--- a/pkg/tool/linux_amd64/trace
+++ b/pkg/tool/linux_amd64/trace
Binary files differ
diff --git a/pkg/tool/linux_amd64/vet b/pkg/tool/linux_amd64/vet
index d54e768..9dedb37 100755
--- a/pkg/tool/linux_amd64/vet
+++ b/pkg/tool/linux_amd64/vet
Binary files differ
diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go
index 6b3c426..416e2b2 100644
--- a/src/cmd/compile/internal/gc/sinit.go
+++ b/src/cmd/compile/internal/gc/sinit.go
@@ -585,7 +585,7 @@
 }
 
 func (n *Node) isSimpleName() bool {
-	return n.Op == ONAME && n.Addable && n.Class != PAUTOHEAP
+	return n.Op == ONAME && n.Addable && n.Class != PAUTOHEAP && n.Class != PEXTERN
 }
 
 func litas(l *Node, r *Node, init *Nodes) {
diff --git a/src/cmd/compile/internal/gc/util.go b/src/cmd/compile/internal/gc/util.go
index bb5cede..c62bd00 100644
--- a/src/cmd/compile/internal/gc/util.go
+++ b/src/cmd/compile/internal/gc/util.go
@@ -57,8 +57,13 @@
 			Fatalf("%v", err)
 		}
 		atExit(func() {
-			runtime.GC() // profile all outstanding allocations
-			if err := pprof.WriteHeapProfile(f); err != nil {
+			// Profile all outstanding allocations.
+			runtime.GC()
+			// compilebench parses the memory profile to extract memstats,
+			// which are only written in the legacy pprof format.
+			// See golang.org/issue/18641 and runtime/pprof/pprof.go:writeHeap.
+			const writeLegacyFormat = 1
+			if err := pprof.Lookup("heap").WriteTo(f, writeLegacyFormat); err != nil {
 				Fatalf("%v", err)
 			}
 		})
diff --git a/src/cmd/compile/internal/s390x/ssa.go b/src/cmd/compile/internal/s390x/ssa.go
index e2d3c28..a9dafa8 100644
--- a/src/cmd/compile/internal/s390x/ssa.go
+++ b/src/cmd/compile/internal/s390x/ssa.go
@@ -424,7 +424,7 @@
 		p.To.Type = obj.TYPE_MEM
 		p.To.Reg = v.Args[0].Reg()
 		gc.AddAux2(&p.To, v, sc.Off())
-	case ssa.OpCopy, ssa.OpS390XMOVDconvert:
+	case ssa.OpCopy, ssa.OpS390XMOVDconvert, ssa.OpS390XMOVDreg:
 		if v.Type.IsMemory() {
 			return
 		}
@@ -433,6 +433,11 @@
 		if x != y {
 			opregreg(moveByType(v.Type), y, x)
 		}
+	case ssa.OpS390XMOVDnop:
+		if v.Reg() != v.Args[0].Reg() {
+			v.Fatalf("input[0] and output not in same register %s", v.LongString())
+		}
+		// nothing to do
 	case ssa.OpLoadReg:
 		if v.Type.IsFlags() {
 			v.Fatalf("load flags not implemented: %v", v.LongString())
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules
index c36b6f7..940c060 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM64.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules
@@ -529,109 +529,147 @@
 // can be encoded in the instructions
 // since this rewriting takes place before stack allocation, the offset to SP is unknown,
 // so don't do it for args and locals with unaligned offset
-(MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVBload [off1+off2] {sym} ptr mem)
-(MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVBUload [off1+off2] {sym} ptr mem)
+(MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) ->
+	(MOVBload [off1+off2] {sym} ptr mem)
+(MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) ->
+	(MOVBUload [off1+off2] {sym} ptr mem)
 (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
-	&& (off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym) ->
+	&& is32Bit(off1+off2)
+	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
 	(MOVHload [off1+off2] {sym} ptr mem)
 (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
-	&& (off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym) ->
+	&& is32Bit(off1+off2)
+	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
 	(MOVHUload [off1+off2] {sym} ptr mem)
 (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
-	&& (off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym) ->
+	&& is32Bit(off1+off2)
+	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
 	(MOVWload [off1+off2] {sym} ptr mem)
 (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem)
-	&& (off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym) ->
+	&& is32Bit(off1+off2)
+	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
 	(MOVWUload [off1+off2] {sym} ptr mem)
 (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
-	&& (off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym) ->
+	&& is32Bit(off1+off2)
+	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
 	(MOVDload [off1+off2] {sym} ptr mem)
 (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
-	&& (off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym) ->
+	&& is32Bit(off1+off2)
+	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
 	(FMOVSload [off1+off2] {sym} ptr mem)
 (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
-	&& (off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym) ->
+	&& is32Bit(off1+off2)
+	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
 	(FMOVDload [off1+off2] {sym} ptr mem)
 
-(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) -> (MOVBstore [off1+off2] {sym} ptr val mem)
+(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) ->
+	(MOVBstore [off1+off2] {sym} ptr val mem)
 (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	&& (off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym) ->
+	&& is32Bit(off1+off2)
+	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
 	(MOVHstore [off1+off2] {sym} ptr val mem)
 (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	&& (off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym) ->
+	&& is32Bit(off1+off2)
+	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
 	(MOVWstore [off1+off2] {sym} ptr val mem)
 (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	&& (off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym) ->
+	&& is32Bit(off1+off2)
+	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
 	(MOVDstore [off1+off2] {sym} ptr val mem)
 (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	&& (off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym) ->
+	&& is32Bit(off1+off2)
+	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
 	(FMOVSstore [off1+off2] {sym} ptr val mem)
 (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	&& (off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym) ->
+	&& is32Bit(off1+off2)
+	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
 	(FMOVDstore [off1+off2] {sym} ptr val mem)
-(MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVBstorezero [off1+off2] {sym} ptr mem)
+(MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) ->
+	(MOVBstorezero [off1+off2] {sym} ptr mem)
 (MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
-	&& (off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym) ->
+	&& is32Bit(off1+off2)
+	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
 	(MOVHstorezero [off1+off2] {sym} ptr mem)
 (MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
-	&& (off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym) ->
+	&& is32Bit(off1+off2)
+	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
 	(MOVWstorezero [off1+off2] {sym} ptr mem)
 (MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
-	&& (off1+off2)%2==8 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym) ->
+	&& is32Bit(off1+off2)
+	&& ((off1+off2)%2==8 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)) ->
 	(MOVDstorezero [off1+off2] {sym} ptr mem)
 
-(MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+(MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+	&& is32Bit(off1+off2) ->
 	(MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-(MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+(MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+	&& is32Bit(off1+off2) ->
 	(MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+	&& is32Bit(off1+off2)
 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1)) ->
 	(MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+	&& is32Bit(off1+off2)
 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1)) ->
 	(MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+	&& is32Bit(off1+off2)
 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1)) ->
 	(MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+	&& is32Bit(off1+off2)
 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1)) ->
 	(MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+	&& is32Bit(off1+off2)
 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1)) ->
 	(MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+	&& is32Bit(off1+off2)
 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1)) ->
 	(FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+	&& is32Bit(off1+off2)
 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1)) ->
 	(FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 
-(MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
+(MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
+	&& is32Bit(off1+off2) ->
 	(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
 (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
+	&& is32Bit(off1+off2)
 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1)) ->
 	(MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
 (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
+	&& is32Bit(off1+off2)
 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1)) ->
 	(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
 (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
+	&& is32Bit(off1+off2)
 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1)) ->
 	(MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
 (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
+	&& is32Bit(off1+off2)
 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1)) ->
 	(FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
 (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
+	&& is32Bit(off1+off2)
 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1)) ->
 	(FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
-(MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
+(MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+	&& is32Bit(off1+off2) ->
 	(MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 (MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+	&& is32Bit(off1+off2)
 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1)) ->
 	(MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 (MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+	&& is32Bit(off1+off2)
 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1)) ->
 	(MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 (MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+	&& is32Bit(off1+off2)
 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1)) ->
 	(MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 
diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules
index 0e0f1f9..cad753e 100644
--- a/src/cmd/compile/internal/ssa/gen/PPC64.rules
+++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules
@@ -338,9 +338,9 @@
 (Geq32F x y) -> (FGreaterEqual (FCMPU x y))
 (Geq64F x y) -> (FGreaterEqual (FCMPU x y))
 
-(Geq8U x y)  -> (GreaterEqual (CMPU (ZeroExt8to32 x) (ZeroExt8to32 y)))
-(Geq16U x y) -> (GreaterEqual (CMPU (ZeroExt16to32 x) (ZeroExt16to32 y)))
-(Geq32U x y) -> (GreaterEqual (CMPU x y))
+(Geq8U x y)  -> (GreaterEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Geq16U x y) -> (GreaterEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Geq32U x y) -> (GreaterEqual (CMPWU x y))
 (Geq64U x y) -> (GreaterEqual (CMPU x y))
 
 // Absorb pseudo-ops into blocks.
diff --git a/src/cmd/compile/internal/ssa/gen/S390X.rules b/src/cmd/compile/internal/ssa/gen/S390X.rules
index 3e0533a..c26515c 100644
--- a/src/cmd/compile/internal/ssa/gen/S390X.rules
+++ b/src/cmd/compile/internal/ssa/gen/S390X.rules
@@ -312,9 +312,12 @@
 
 // Lowering loads
 (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVDload ptr mem)
-(Load <t> ptr mem) && is32BitInt(t) -> (MOVWZload ptr mem)
-(Load <t> ptr mem) && is16BitInt(t) -> (MOVHZload ptr mem)
-(Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) -> (MOVBZload ptr mem)
+(Load <t> ptr mem) && is32BitInt(t) && isSigned(t) -> (MOVWload ptr mem)
+(Load <t> ptr mem) && is32BitInt(t) && !isSigned(t) -> (MOVWZload ptr mem)
+(Load <t> ptr mem) && is16BitInt(t) && isSigned(t) -> (MOVHload ptr mem)
+(Load <t> ptr mem) && is16BitInt(t) && !isSigned(t) -> (MOVHZload ptr mem)
+(Load <t> ptr mem) && is8BitInt(t) && isSigned(t) -> (MOVBload ptr mem)
+(Load <t> ptr mem) && (t.IsBoolean() || (is8BitInt(t) && !isSigned(t))) -> (MOVBZload ptr mem)
 (Load <t> ptr mem) && is32BitFloat(t) -> (FMOVSload ptr mem)
 (Load <t> ptr mem) && is64BitFloat(t) -> (FMOVDload ptr mem)
 
@@ -445,16 +448,20 @@
 // ***************************
 // TODO: Should the optimizations be a separate pass?
 
+// if a register move has only 1 use, just use the same register without emitting instruction
+// MOVDnop doesn't emit instruction, only for ensuring the type.
+(MOVDreg x) && x.Uses == 1 -> (MOVDnop x)
+
 // Fold sign extensions into conditional moves of constants.
 // Designed to remove the MOVBZreg inserted by the If lowering.
-(MOVBZreg x:(MOVDLT (MOVDconst [c]) (MOVDconst [d]) _)) && int64(uint8(c)) == c && int64(uint8(d)) == d -> x
-(MOVBZreg x:(MOVDLE (MOVDconst [c]) (MOVDconst [d]) _)) && int64(uint8(c)) == c && int64(uint8(d)) == d -> x
-(MOVBZreg x:(MOVDGT (MOVDconst [c]) (MOVDconst [d]) _)) && int64(uint8(c)) == c && int64(uint8(d)) == d -> x
-(MOVBZreg x:(MOVDGE (MOVDconst [c]) (MOVDconst [d]) _)) && int64(uint8(c)) == c && int64(uint8(d)) == d -> x
-(MOVBZreg x:(MOVDEQ (MOVDconst [c]) (MOVDconst [d]) _)) && int64(uint8(c)) == c && int64(uint8(d)) == d -> x
-(MOVBZreg x:(MOVDNE (MOVDconst [c]) (MOVDconst [d]) _)) && int64(uint8(c)) == c && int64(uint8(d)) == d -> x
-(MOVBZreg x:(MOVDGTnoinv (MOVDconst [c]) (MOVDconst [d]) _)) && int64(uint8(c)) == c && int64(uint8(d)) == d -> x
-(MOVBZreg x:(MOVDGEnoinv (MOVDconst [c]) (MOVDconst [d]) _)) && int64(uint8(c)) == c && int64(uint8(d)) == d -> x
+(MOVBZreg x:(MOVDLT (MOVDconst [c]) (MOVDconst [d]) _)) && int64(uint8(c)) == c && int64(uint8(d)) == d -> (MOVDreg x)
+(MOVBZreg x:(MOVDLE (MOVDconst [c]) (MOVDconst [d]) _)) && int64(uint8(c)) == c && int64(uint8(d)) == d -> (MOVDreg x)
+(MOVBZreg x:(MOVDGT (MOVDconst [c]) (MOVDconst [d]) _)) && int64(uint8(c)) == c && int64(uint8(d)) == d -> (MOVDreg x)
+(MOVBZreg x:(MOVDGE (MOVDconst [c]) (MOVDconst [d]) _)) && int64(uint8(c)) == c && int64(uint8(d)) == d -> (MOVDreg x)
+(MOVBZreg x:(MOVDEQ (MOVDconst [c]) (MOVDconst [d]) _)) && int64(uint8(c)) == c && int64(uint8(d)) == d -> (MOVDreg x)
+(MOVBZreg x:(MOVDNE (MOVDconst [c]) (MOVDconst [d]) _)) && int64(uint8(c)) == c && int64(uint8(d)) == d -> (MOVDreg x)
+(MOVBZreg x:(MOVDGTnoinv (MOVDconst [c]) (MOVDconst [d]) _)) && int64(uint8(c)) == c && int64(uint8(d)) == d -> (MOVDreg x)
+(MOVBZreg x:(MOVDGEnoinv (MOVDconst [c]) (MOVDconst [d]) _)) && int64(uint8(c)) == c && int64(uint8(d)) == d -> (MOVDreg x)
 
 // Fold boolean tests into blocks.
 (NE (CMPWconst [0] (MOVDLT (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) -> (LT cmp yes no)
@@ -572,46 +579,46 @@
 (MOVDNE x y (InvertFlags cmp)) -> (MOVDNE x y cmp)
 
 // don't extend after proper load
-(MOVBreg x:(MOVBload _ _)) -> x
-(MOVBZreg x:(MOVBZload _ _)) -> x
-(MOVHreg x:(MOVBload _ _)) -> x
-(MOVHreg x:(MOVBZload _ _)) -> x
-(MOVHreg x:(MOVHload _ _)) -> x
-(MOVHZreg x:(MOVBZload _ _)) -> x
-(MOVHZreg x:(MOVHZload _ _)) -> x
-(MOVWreg x:(MOVBload _ _)) -> x
-(MOVWreg x:(MOVBZload _ _)) -> x
-(MOVWreg x:(MOVHload _ _)) -> x
-(MOVWreg x:(MOVHZload _ _)) -> x
-(MOVWreg x:(MOVWload _ _)) -> x
-(MOVWZreg x:(MOVBZload _ _)) -> x
-(MOVWZreg x:(MOVHZload _ _)) -> x
-(MOVWZreg x:(MOVWZload _ _)) -> x
+(MOVBreg x:(MOVBload _ _)) -> (MOVDreg x)
+(MOVBZreg x:(MOVBZload _ _)) -> (MOVDreg x)
+(MOVHreg x:(MOVBload _ _)) -> (MOVDreg x)
+(MOVHreg x:(MOVBZload _ _)) -> (MOVDreg x)
+(MOVHreg x:(MOVHload _ _)) -> (MOVDreg x)
+(MOVHZreg x:(MOVBZload _ _)) -> (MOVDreg x)
+(MOVHZreg x:(MOVHZload _ _)) -> (MOVDreg x)
+(MOVWreg x:(MOVBload _ _)) -> (MOVDreg x)
+(MOVWreg x:(MOVBZload _ _)) -> (MOVDreg x)
+(MOVWreg x:(MOVHload _ _)) -> (MOVDreg x)
+(MOVWreg x:(MOVHZload _ _)) -> (MOVDreg x)
+(MOVWreg x:(MOVWload _ _)) -> (MOVDreg x)
+(MOVWZreg x:(MOVBZload _ _)) -> (MOVDreg x)
+(MOVWZreg x:(MOVHZload _ _)) -> (MOVDreg x)
+(MOVWZreg x:(MOVWZload _ _)) -> (MOVDreg x)
 
 // don't extend if argument is already extended
-(MOVBreg x:(Arg <t>)) && is8BitInt(t) && isSigned(t) -> x
-(MOVBZreg x:(Arg <t>)) && is8BitInt(t) && !isSigned(t) -> x
-(MOVHreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && isSigned(t) -> x
-(MOVHZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && !isSigned(t) -> x
-(MOVWreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t) -> x
-(MOVWZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t) -> x
+(MOVBreg x:(Arg <t>)) && is8BitInt(t) && isSigned(t) -> (MOVDreg x)
+(MOVBZreg x:(Arg <t>)) && is8BitInt(t) && !isSigned(t) -> (MOVDreg x)
+(MOVHreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && isSigned(t) -> (MOVDreg x)
+(MOVHZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && !isSigned(t) -> (MOVDreg x)
+(MOVWreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t) -> (MOVDreg x)
+(MOVWZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t) -> (MOVDreg x)
 
 // fold double extensions
-(MOVBreg x:(MOVBreg _)) -> x
-(MOVBZreg x:(MOVBZreg _)) -> x
-(MOVHreg x:(MOVBreg _)) -> x
-(MOVHreg x:(MOVBZreg _)) -> x
-(MOVHreg x:(MOVHreg _)) -> x
-(MOVHZreg x:(MOVBZreg _)) -> x
-(MOVHZreg x:(MOVHZreg _)) -> x
-(MOVWreg x:(MOVBreg _)) -> x
-(MOVWreg x:(MOVBZreg _)) -> x
-(MOVWreg x:(MOVHreg _)) -> x
-(MOVWreg x:(MOVHreg _)) -> x
-(MOVWreg x:(MOVWreg _)) -> x
-(MOVWZreg x:(MOVBZreg _)) -> x
-(MOVWZreg x:(MOVHZreg _)) -> x
-(MOVWZreg x:(MOVWZreg _)) -> x
+(MOVBreg x:(MOVBreg _)) -> (MOVDreg x)
+(MOVBZreg x:(MOVBZreg _)) -> (MOVDreg x)
+(MOVHreg x:(MOVBreg _)) -> (MOVDreg x)
+(MOVHreg x:(MOVBZreg _)) -> (MOVDreg x)
+(MOVHreg x:(MOVHreg _)) -> (MOVDreg x)
+(MOVHZreg x:(MOVBZreg _)) -> (MOVDreg x)
+(MOVHZreg x:(MOVHZreg _)) -> (MOVDreg x)
+(MOVWreg x:(MOVBreg _)) -> (MOVDreg x)
+(MOVWreg x:(MOVBZreg _)) -> (MOVDreg x)
+(MOVWreg x:(MOVHreg _)) -> (MOVDreg x)
+(MOVWreg x:(MOVHreg _)) -> (MOVDreg x)
+(MOVWreg x:(MOVWreg _)) -> (MOVDreg x)
+(MOVWZreg x:(MOVBZreg _)) -> (MOVDreg x)
+(MOVWZreg x:(MOVHZreg _)) -> (MOVDreg x)
+(MOVWZreg x:(MOVWZreg _)) -> (MOVDreg x)
 
 // fold extensions into constants
 (MOVBreg (MOVDconst [c])) -> (MOVDconst [int64(int8(c))])
@@ -641,10 +648,10 @@
 (MOVWZreg x:(MOVWZloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWZloadidx <v.Type> [off] {sym} ptr idx mem)
 
 // replace load from same location as preceding store with copy
-(MOVBZload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
-(MOVHZload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
-(MOVWZload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
-(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+(MOVBZload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVDreg x)
+(MOVHZload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVDreg x)
+(MOVWZload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVDreg x)
+(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVDreg x)
 
 // Don't extend before storing
 (MOVWstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
@@ -885,9 +892,9 @@
 (MOVDEQ y _ (FlagLT)) -> y
 (MOVDEQ y _ (FlagGT)) -> y
 
-(MOVDNE _ y (FlagEQ)) -> y
-(MOVDNE x _ (FlagLT)) -> x
-(MOVDNE x _ (FlagGT)) -> x
+(MOVDNE y _ (FlagEQ)) -> y
+(MOVDNE _ x (FlagLT)) -> x
+(MOVDNE _ x (FlagGT)) -> x
 
 (MOVDLT y _ (FlagEQ)) -> y
 (MOVDLT _ x (FlagLT)) -> x
diff --git a/src/cmd/compile/internal/ssa/gen/S390XOps.go b/src/cmd/compile/internal/ssa/gen/S390XOps.go
index 7a25c26..29383f6 100644
--- a/src/cmd/compile/internal/ssa/gen/S390XOps.go
+++ b/src/cmd/compile/internal/ssa/gen/S390XOps.go
@@ -311,6 +311,9 @@
 		{name: "MOVHZreg", argLength: 1, reg: gp11sp, asm: "MOVHZ", typ: "UInt64"}, // zero extend arg0 from int16 to int64
 		{name: "MOVWreg", argLength: 1, reg: gp11sp, asm: "MOVW", typ: "Int64"},    // sign extend arg0 from int32 to int64
 		{name: "MOVWZreg", argLength: 1, reg: gp11sp, asm: "MOVWZ", typ: "UInt64"}, // zero extend arg0 from int32 to int64
+		{name: "MOVDreg", argLength: 1, reg: gp11sp, asm: "MOVD"},                  // move from arg0
+
+		{name: "MOVDnop", argLength: 1, reg: gp11, resultInArg0: true}, // nop, return arg0 in same register
 
 		{name: "MOVDconst", reg: gp01, asm: "MOVD", typ: "UInt64", aux: "Int64", rematerializeable: true}, // auxint
 
diff --git a/src/cmd/compile/internal/ssa/nilcheck.go b/src/cmd/compile/internal/ssa/nilcheck.go
index 9f58db6..0a34cd1 100644
--- a/src/cmd/compile/internal/ssa/nilcheck.go
+++ b/src/cmd/compile/internal/ssa/nilcheck.go
@@ -82,7 +82,7 @@
 				}
 			}
 
-			// Next, process values in the block.
+			// Next, eliminate any redundant nil checks in this block.
 			i := 0
 			for _, v := range b.Values {
 				b.Values[i] = v
@@ -105,13 +105,10 @@
 							f.Config.Warnl(v.Line, "removed nil check")
 						}
 						v.reset(OpUnknown)
+						// TODO: f.freeValue(v)
 						i--
 						continue
 					}
-					// Record the fact that we know ptr is non nil, and remember to
-					// undo that information when this dominator subtree is done.
-					nonNilValues[ptr.ID] = true
-					work = append(work, bp{op: ClearPtr, ptr: ptr})
 				}
 			}
 			for j := i; j < len(b.Values); j++ {
@@ -119,6 +116,21 @@
 			}
 			b.Values = b.Values[:i]
 
+			// Finally, find redundant nil checks for subsequent blocks.
+			// Note that we can't add these until the loop above is done, as the
+			// values in the block are not ordered in any way when this pass runs.
+			// This was the cause of issue #18725.
+			for _, v := range b.Values {
+				if v.Op != OpNilCheck {
+					continue
+				}
+				ptr := v.Args[0]
+				// Record the fact that we know ptr is non nil, and remember to
+				// undo that information when this dominator subtree is done.
+				nonNilValues[ptr.ID] = true
+				work = append(work, bp{op: ClearPtr, ptr: ptr})
+			}
+
 			// Add all dominated blocks to the work list.
 			for w := sdom[node.block.ID].child; w != nil; w = sdom[w.ID].sibling {
 				work = append(work, bp{op: Work, block: w})
diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go
index a63c5b9..9d11d03 100644
--- a/src/cmd/compile/internal/ssa/opGen.go
+++ b/src/cmd/compile/internal/ssa/opGen.go
@@ -1473,6 +1473,8 @@
 	OpS390XMOVHZreg
 	OpS390XMOVWreg
 	OpS390XMOVWZreg
+	OpS390XMOVDreg
+	OpS390XMOVDnop
 	OpS390XMOVDconst
 	OpS390XCFDBRA
 	OpS390XCGDBRA
@@ -18571,6 +18573,32 @@
 		},
 	},
 	{
+		name:   "MOVDreg",
+		argLen: 1,
+		asm:    s390x.AMOVD,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 54271}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP
+			},
+			outputs: []outputInfo{
+				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+			},
+		},
+	},
+	{
+		name:         "MOVDnop",
+		argLen:       1,
+		resultInArg0: true,
+		reg: regInfo{
+			inputs: []inputInfo{
+				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+			},
+			outputs: []outputInfo{
+				{0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+			},
+		},
+	},
+	{
 		name:              "MOVDconst",
 		auxType:           auxInt64,
 		argLen:            0,
diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go
index dd5aa28..862c645 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM64.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM64.go
@@ -2625,7 +2625,7 @@
 	b := v.Block
 	_ = b
 	// match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: (off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)
+	// cond: is32Bit(off1+off2) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
 	// result: (FMOVDload [off1+off2] {sym} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -2637,7 +2637,7 @@
 		off2 := v_0.AuxInt
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym)) {
+		if !(is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
 			break
 		}
 		v.reset(OpARM64FMOVDload)
@@ -2648,7 +2648,7 @@
 		return true
 	}
 	// match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
 	// result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -2661,7 +2661,7 @@
 		sym2 := v_0.Aux
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
 			break
 		}
 		v.reset(OpARM64FMOVDload)
@@ -2677,7 +2677,7 @@
 	b := v.Block
 	_ = b
 	// match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	// cond: (off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)
+	// cond: is32Bit(off1+off2) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
 	// result: (FMOVDstore [off1+off2] {sym} ptr val mem)
 	for {
 		off1 := v.AuxInt
@@ -2690,7 +2690,7 @@
 		ptr := v_0.Args[0]
 		val := v.Args[1]
 		mem := v.Args[2]
-		if !((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym)) {
+		if !(is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
 			break
 		}
 		v.reset(OpARM64FMOVDstore)
@@ -2702,7 +2702,7 @@
 		return true
 	}
 	// match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
 	// result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
 	for {
 		off1 := v.AuxInt
@@ -2716,7 +2716,7 @@
 		ptr := v_0.Args[0]
 		val := v.Args[1]
 		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
 			break
 		}
 		v.reset(OpARM64FMOVDstore)
@@ -2733,7 +2733,7 @@
 	b := v.Block
 	_ = b
 	// match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: (off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)
+	// cond: is32Bit(off1+off2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
 	// result: (FMOVSload [off1+off2] {sym} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -2745,7 +2745,7 @@
 		off2 := v_0.AuxInt
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym)) {
+		if !(is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
 			break
 		}
 		v.reset(OpARM64FMOVSload)
@@ -2756,7 +2756,7 @@
 		return true
 	}
 	// match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
 	// result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -2769,7 +2769,7 @@
 		sym2 := v_0.Aux
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
 			break
 		}
 		v.reset(OpARM64FMOVSload)
@@ -2785,7 +2785,7 @@
 	b := v.Block
 	_ = b
 	// match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	// cond: (off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)
+	// cond: is32Bit(off1+off2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
 	// result: (FMOVSstore [off1+off2] {sym} ptr val mem)
 	for {
 		off1 := v.AuxInt
@@ -2798,7 +2798,7 @@
 		ptr := v_0.Args[0]
 		val := v.Args[1]
 		mem := v.Args[2]
-		if !((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym)) {
+		if !(is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
 			break
 		}
 		v.reset(OpARM64FMOVSstore)
@@ -2810,7 +2810,7 @@
 		return true
 	}
 	// match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
 	// result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
 	for {
 		off1 := v.AuxInt
@@ -2824,7 +2824,7 @@
 		ptr := v_0.Args[0]
 		val := v.Args[1]
 		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
 			break
 		}
 		v.reset(OpARM64FMOVSstore)
@@ -3511,7 +3511,7 @@
 	b := v.Block
 	_ = b
 	// match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond:
+	// cond: is32Bit(off1+off2)
 	// result: (MOVBUload [off1+off2] {sym} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -3523,6 +3523,9 @@
 		off2 := v_0.AuxInt
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
+		if !(is32Bit(off1 + off2)) {
+			break
+		}
 		v.reset(OpARM64MOVBUload)
 		v.AuxInt = off1 + off2
 		v.Aux = sym
@@ -3531,7 +3534,7 @@
 		return true
 	}
 	// match: (MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2)
 	// result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -3544,7 +3547,7 @@
 		sym2 := v_0.Aux
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
 			break
 		}
 		v.reset(OpARM64MOVBUload)
@@ -3623,7 +3626,7 @@
 	b := v.Block
 	_ = b
 	// match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond:
+	// cond: is32Bit(off1+off2)
 	// result: (MOVBload [off1+off2] {sym} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -3635,6 +3638,9 @@
 		off2 := v_0.AuxInt
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
+		if !(is32Bit(off1 + off2)) {
+			break
+		}
 		v.reset(OpARM64MOVBload)
 		v.AuxInt = off1 + off2
 		v.Aux = sym
@@ -3643,7 +3649,7 @@
 		return true
 	}
 	// match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2)
 	// result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -3656,7 +3662,7 @@
 		sym2 := v_0.Aux
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
 			break
 		}
 		v.reset(OpARM64MOVBload)
@@ -3735,7 +3741,7 @@
 	b := v.Block
 	_ = b
 	// match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	// cond:
+	// cond: is32Bit(off1+off2)
 	// result: (MOVBstore [off1+off2] {sym} ptr val mem)
 	for {
 		off1 := v.AuxInt
@@ -3748,6 +3754,9 @@
 		ptr := v_0.Args[0]
 		val := v.Args[1]
 		mem := v.Args[2]
+		if !(is32Bit(off1 + off2)) {
+			break
+		}
 		v.reset(OpARM64MOVBstore)
 		v.AuxInt = off1 + off2
 		v.Aux = sym
@@ -3757,7 +3766,7 @@
 		return true
 	}
 	// match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2)
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2)
 	// result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
 	for {
 		off1 := v.AuxInt
@@ -3771,7 +3780,7 @@
 		ptr := v_0.Args[0]
 		val := v.Args[1]
 		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2)) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
 			break
 		}
 		v.reset(OpARM64MOVBstore)
@@ -3936,7 +3945,7 @@
 	b := v.Block
 	_ = b
 	// match: (MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond:
+	// cond: is32Bit(off1+off2)
 	// result: (MOVBstorezero [off1+off2] {sym} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -3948,6 +3957,9 @@
 		off2 := v_0.AuxInt
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
+		if !(is32Bit(off1 + off2)) {
+			break
+		}
 		v.reset(OpARM64MOVBstorezero)
 		v.AuxInt = off1 + off2
 		v.Aux = sym
@@ -3956,7 +3968,7 @@
 		return true
 	}
 	// match: (MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2)
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2)
 	// result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -3969,7 +3981,7 @@
 		sym2 := v_0.Aux
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2)) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
 			break
 		}
 		v.reset(OpARM64MOVBstorezero)
@@ -3985,7 +3997,7 @@
 	b := v.Block
 	_ = b
 	// match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: (off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)
+	// cond: is32Bit(off1+off2) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
 	// result: (MOVDload [off1+off2] {sym} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -3997,7 +4009,7 @@
 		off2 := v_0.AuxInt
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym)) {
+		if !(is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
 			break
 		}
 		v.reset(OpARM64MOVDload)
@@ -4008,7 +4020,7 @@
 		return true
 	}
 	// match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
 	// result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -4021,7 +4033,7 @@
 		sym2 := v_0.Aux
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
 			break
 		}
 		v.reset(OpARM64MOVDload)
@@ -4088,7 +4100,7 @@
 	b := v.Block
 	_ = b
 	// match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	// cond: (off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)
+	// cond: is32Bit(off1+off2) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
 	// result: (MOVDstore [off1+off2] {sym} ptr val mem)
 	for {
 		off1 := v.AuxInt
@@ -4101,7 +4113,7 @@
 		ptr := v_0.Args[0]
 		val := v.Args[1]
 		mem := v.Args[2]
-		if !((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym)) {
+		if !(is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
 			break
 		}
 		v.reset(OpARM64MOVDstore)
@@ -4113,7 +4125,7 @@
 		return true
 	}
 	// match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
 	// result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
 	for {
 		off1 := v.AuxInt
@@ -4127,7 +4139,7 @@
 		ptr := v_0.Args[0]
 		val := v.Args[1]
 		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
 			break
 		}
 		v.reset(OpARM64MOVDstore)
@@ -4166,7 +4178,7 @@
 	b := v.Block
 	_ = b
 	// match: (MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: (off1+off2)%2==8 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)
+	// cond: is32Bit(off1+off2) 	&& ((off1+off2)%2==8 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
 	// result: (MOVDstorezero [off1+off2] {sym} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -4178,7 +4190,7 @@
 		off2 := v_0.AuxInt
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !((off1+off2)%2 == 8 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym)) {
+		if !(is32Bit(off1+off2) && ((off1+off2)%2 == 8 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
 			break
 		}
 		v.reset(OpARM64MOVDstorezero)
@@ -4189,7 +4201,7 @@
 		return true
 	}
 	// match: (MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) 	&& ((off1+off2)%8==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
 	// result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -4202,7 +4214,7 @@
 		sym2 := v_0.Aux
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%8 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
 			break
 		}
 		v.reset(OpARM64MOVDstorezero)
@@ -4218,7 +4230,7 @@
 	b := v.Block
 	_ = b
 	// match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: (off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)
+	// cond: is32Bit(off1+off2) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
 	// result: (MOVHUload [off1+off2] {sym} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -4230,7 +4242,7 @@
 		off2 := v_0.AuxInt
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym)) {
+		if !(is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
 			break
 		}
 		v.reset(OpARM64MOVHUload)
@@ -4241,7 +4253,7 @@
 		return true
 	}
 	// match: (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
 	// result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -4254,7 +4266,7 @@
 		sym2 := v_0.Aux
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
 			break
 		}
 		v.reset(OpARM64MOVHUload)
@@ -4357,7 +4369,7 @@
 	b := v.Block
 	_ = b
 	// match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: (off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)
+	// cond: is32Bit(off1+off2) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
 	// result: (MOVHload [off1+off2] {sym} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -4369,7 +4381,7 @@
 		off2 := v_0.AuxInt
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym)) {
+		if !(is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
 			break
 		}
 		v.reset(OpARM64MOVHload)
@@ -4380,7 +4392,7 @@
 		return true
 	}
 	// match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
 	// result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -4393,7 +4405,7 @@
 		sym2 := v_0.Aux
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
 			break
 		}
 		v.reset(OpARM64MOVHload)
@@ -4520,7 +4532,7 @@
 	b := v.Block
 	_ = b
 	// match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	// cond: (off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)
+	// cond: is32Bit(off1+off2) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
 	// result: (MOVHstore [off1+off2] {sym} ptr val mem)
 	for {
 		off1 := v.AuxInt
@@ -4533,7 +4545,7 @@
 		ptr := v_0.Args[0]
 		val := v.Args[1]
 		mem := v.Args[2]
-		if !((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym)) {
+		if !(is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
 			break
 		}
 		v.reset(OpARM64MOVHstore)
@@ -4545,7 +4557,7 @@
 		return true
 	}
 	// match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
 	// result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
 	for {
 		off1 := v.AuxInt
@@ -4559,7 +4571,7 @@
 		ptr := v_0.Args[0]
 		val := v.Args[1]
 		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
 			break
 		}
 		v.reset(OpARM64MOVHstore)
@@ -4682,7 +4694,7 @@
 	b := v.Block
 	_ = b
 	// match: (MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: (off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)
+	// cond: is32Bit(off1+off2) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
 	// result: (MOVHstorezero [off1+off2] {sym} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -4694,7 +4706,7 @@
 		off2 := v_0.AuxInt
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym)) {
+		if !(is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
 			break
 		}
 		v.reset(OpARM64MOVHstorezero)
@@ -4705,7 +4717,7 @@
 		return true
 	}
 	// match: (MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) 	&& ((off1+off2)%2==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
 	// result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -4718,7 +4730,7 @@
 		sym2 := v_0.Aux
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%2 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
 			break
 		}
 		v.reset(OpARM64MOVHstorezero)
@@ -4734,7 +4746,7 @@
 	b := v.Block
 	_ = b
 	// match: (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: (off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)
+	// cond: is32Bit(off1+off2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
 	// result: (MOVWUload [off1+off2] {sym} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -4746,7 +4758,7 @@
 		off2 := v_0.AuxInt
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym)) {
+		if !(is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
 			break
 		}
 		v.reset(OpARM64MOVWUload)
@@ -4757,7 +4769,7 @@
 		return true
 	}
 	// match: (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
 	// result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -4770,7 +4782,7 @@
 		sym2 := v_0.Aux
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
 			break
 		}
 		v.reset(OpARM64MOVWUload)
@@ -4897,7 +4909,7 @@
 	b := v.Block
 	_ = b
 	// match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: (off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)
+	// cond: is32Bit(off1+off2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
 	// result: (MOVWload [off1+off2] {sym} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -4909,7 +4921,7 @@
 		off2 := v_0.AuxInt
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym)) {
+		if !(is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
 			break
 		}
 		v.reset(OpARM64MOVWload)
@@ -4920,7 +4932,7 @@
 		return true
 	}
 	// match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
 	// result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -4933,7 +4945,7 @@
 		sym2 := v_0.Aux
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
 			break
 		}
 		v.reset(OpARM64MOVWload)
@@ -5108,7 +5120,7 @@
 	b := v.Block
 	_ = b
 	// match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
-	// cond: (off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)
+	// cond: is32Bit(off1+off2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
 	// result: (MOVWstore [off1+off2] {sym} ptr val mem)
 	for {
 		off1 := v.AuxInt
@@ -5121,7 +5133,7 @@
 		ptr := v_0.Args[0]
 		val := v.Args[1]
 		mem := v.Args[2]
-		if !((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym)) {
+		if !(is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
 			break
 		}
 		v.reset(OpARM64MOVWstore)
@@ -5133,7 +5145,7 @@
 		return true
 	}
 	// match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
-	// cond: canMergeSym(sym1,sym2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
 	// result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
 	for {
 		off1 := v.AuxInt
@@ -5147,7 +5159,7 @@
 		ptr := v_0.Args[0]
 		val := v.Args[1]
 		mem := v.Args[2]
-		if !(canMergeSym(sym1, sym2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
 			break
 		}
 		v.reset(OpARM64MOVWstore)
@@ -5228,7 +5240,7 @@
 	b := v.Block
 	_ = b
 	// match: (MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
-	// cond: (off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym)
+	// cond: is32Bit(off1+off2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym) && !isAuto(sym))
 	// result: (MOVWstorezero [off1+off2] {sym} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -5240,7 +5252,7 @@
 		off2 := v_0.AuxInt
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym)) {
+		if !(is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym) && !isAuto(sym))) {
 			break
 		}
 		v.reset(OpARM64MOVWstorezero)
@@ -5251,7 +5263,7 @@
 		return true
 	}
 	// match: (MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
-	// cond: canMergeSym(sym1,sym2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
+	// cond: canMergeSym(sym1,sym2) 	&& is32Bit(off1+off2) 	&& ((off1+off2)%4==0 || off1+off2<256 && off1+off2>-256 && !isArg(sym1) && !isAuto(sym1))
 	// result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
 	for {
 		off1 := v.AuxInt
@@ -5264,7 +5276,7 @@
 		sym2 := v_0.Aux
 		ptr := v_0.Args[0]
 		mem := v.Args[1]
-		if !(canMergeSym(sym1, sym2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
+		if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && ((off1+off2)%4 == 0 || off1+off2 < 256 && off1+off2 > -256 && !isArg(sym1) && !isAuto(sym1))) {
 			break
 		}
 		v.reset(OpARM64MOVWstorezero)
diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go
index 8c8373b..031459c 100644
--- a/src/cmd/compile/internal/ssa/rewritePPC64.go
+++ b/src/cmd/compile/internal/ssa/rewritePPC64.go
@@ -1543,12 +1543,12 @@
 	_ = b
 	// match: (Geq16U x y)
 	// cond:
-	// result: (GreaterEqual (CMPU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+	// result: (GreaterEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
 	for {
 		x := v.Args[0]
 		y := v.Args[1]
 		v.reset(OpPPC64GreaterEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPU, TypeFlags)
+		v0 := b.NewValue0(v.Line, OpPPC64CMPWU, TypeFlags)
 		v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32())
 		v1.AddArg(x)
 		v0.AddArg(v1)
@@ -1598,12 +1598,12 @@
 	_ = b
 	// match: (Geq32U x y)
 	// cond:
-	// result: (GreaterEqual (CMPU x y))
+	// result: (GreaterEqual (CMPWU x y))
 	for {
 		x := v.Args[0]
 		y := v.Args[1]
 		v.reset(OpPPC64GreaterEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPU, TypeFlags)
+		v0 := b.NewValue0(v.Line, OpPPC64CMPWU, TypeFlags)
 		v0.AddArg(x)
 		v0.AddArg(y)
 		v.AddArg(v0)
@@ -1687,12 +1687,12 @@
 	_ = b
 	// match: (Geq8U x y)
 	// cond:
-	// result: (GreaterEqual (CMPU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+	// result: (GreaterEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
 	for {
 		x := v.Args[0]
 		y := v.Args[1]
 		v.reset(OpPPC64GreaterEqual)
-		v0 := b.NewValue0(v.Line, OpPPC64CMPU, TypeFlags)
+		v0 := b.NewValue0(v.Line, OpPPC64CMPWU, TypeFlags)
 		v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32())
 		v1.AddArg(x)
 		v0.AddArg(v1)
diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go
index 7d023bc..0425ced 100644
--- a/src/cmd/compile/internal/ssa/rewriteS390X.go
+++ b/src/cmd/compile/internal/ssa/rewriteS390X.go
@@ -524,6 +524,8 @@
 		return rewriteValueS390X_OpS390XMOVDload(v, config)
 	case OpS390XMOVDloadidx:
 		return rewriteValueS390X_OpS390XMOVDloadidx(v, config)
+	case OpS390XMOVDreg:
+		return rewriteValueS390X_OpS390XMOVDreg(v, config)
 	case OpS390XMOVDstore:
 		return rewriteValueS390X_OpS390XMOVDstore(v, config)
 	case OpS390XMOVDstoreconst:
@@ -3236,13 +3238,28 @@
 		return true
 	}
 	// match: (Load <t> ptr mem)
-	// cond: is32BitInt(t)
+	// cond: is32BitInt(t) && isSigned(t)
+	// result: (MOVWload ptr mem)
+	for {
+		t := v.Type
+		ptr := v.Args[0]
+		mem := v.Args[1]
+		if !(is32BitInt(t) && isSigned(t)) {
+			break
+		}
+		v.reset(OpS390XMOVWload)
+		v.AddArg(ptr)
+		v.AddArg(mem)
+		return true
+	}
+	// match: (Load <t> ptr mem)
+	// cond: is32BitInt(t) && !isSigned(t)
 	// result: (MOVWZload ptr mem)
 	for {
 		t := v.Type
 		ptr := v.Args[0]
 		mem := v.Args[1]
-		if !(is32BitInt(t)) {
+		if !(is32BitInt(t) && !isSigned(t)) {
 			break
 		}
 		v.reset(OpS390XMOVWZload)
@@ -3251,13 +3268,28 @@
 		return true
 	}
 	// match: (Load <t> ptr mem)
-	// cond: is16BitInt(t)
+	// cond: is16BitInt(t) && isSigned(t)
+	// result: (MOVHload ptr mem)
+	for {
+		t := v.Type
+		ptr := v.Args[0]
+		mem := v.Args[1]
+		if !(is16BitInt(t) && isSigned(t)) {
+			break
+		}
+		v.reset(OpS390XMOVHload)
+		v.AddArg(ptr)
+		v.AddArg(mem)
+		return true
+	}
+	// match: (Load <t> ptr mem)
+	// cond: is16BitInt(t) && !isSigned(t)
 	// result: (MOVHZload ptr mem)
 	for {
 		t := v.Type
 		ptr := v.Args[0]
 		mem := v.Args[1]
-		if !(is16BitInt(t)) {
+		if !(is16BitInt(t) && !isSigned(t)) {
 			break
 		}
 		v.reset(OpS390XMOVHZload)
@@ -3266,13 +3298,28 @@
 		return true
 	}
 	// match: (Load <t> ptr mem)
-	// cond: (t.IsBoolean() || is8BitInt(t))
+	// cond: is8BitInt(t) && isSigned(t)
+	// result: (MOVBload ptr mem)
+	for {
+		t := v.Type
+		ptr := v.Args[0]
+		mem := v.Args[1]
+		if !(is8BitInt(t) && isSigned(t)) {
+			break
+		}
+		v.reset(OpS390XMOVBload)
+		v.AddArg(ptr)
+		v.AddArg(mem)
+		return true
+	}
+	// match: (Load <t> ptr mem)
+	// cond: (t.IsBoolean() || (is8BitInt(t) && !isSigned(t)))
 	// result: (MOVBZload ptr mem)
 	for {
 		t := v.Type
 		ptr := v.Args[0]
 		mem := v.Args[1]
-		if !(t.IsBoolean() || is8BitInt(t)) {
+		if !(t.IsBoolean() || (is8BitInt(t) && !isSigned(t))) {
 			break
 		}
 		v.reset(OpS390XMOVBZload)
@@ -7802,7 +7849,7 @@
 	_ = b
 	// match: (MOVBZload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
 	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		off := v.AuxInt
 		sym := v.Aux
@@ -7818,8 +7865,7 @@
 		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
@@ -7976,7 +8022,7 @@
 	_ = b
 	// match: (MOVBZreg x:(MOVDLT (MOVDconst [c]) (MOVDconst [d]) _))
 	// cond: int64(uint8(c)) == c && int64(uint8(d)) == d
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVDLT {
@@ -7995,14 +8041,13 @@
 		if !(int64(uint8(c)) == c && int64(uint8(d)) == d) {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVBZreg x:(MOVDLE (MOVDconst [c]) (MOVDconst [d]) _))
 	// cond: int64(uint8(c)) == c && int64(uint8(d)) == d
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVDLE {
@@ -8021,14 +8066,13 @@
 		if !(int64(uint8(c)) == c && int64(uint8(d)) == d) {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVBZreg x:(MOVDGT (MOVDconst [c]) (MOVDconst [d]) _))
 	// cond: int64(uint8(c)) == c && int64(uint8(d)) == d
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVDGT {
@@ -8047,14 +8091,13 @@
 		if !(int64(uint8(c)) == c && int64(uint8(d)) == d) {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVBZreg x:(MOVDGE (MOVDconst [c]) (MOVDconst [d]) _))
 	// cond: int64(uint8(c)) == c && int64(uint8(d)) == d
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVDGE {
@@ -8073,14 +8116,13 @@
 		if !(int64(uint8(c)) == c && int64(uint8(d)) == d) {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVBZreg x:(MOVDEQ (MOVDconst [c]) (MOVDconst [d]) _))
 	// cond: int64(uint8(c)) == c && int64(uint8(d)) == d
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVDEQ {
@@ -8099,14 +8141,13 @@
 		if !(int64(uint8(c)) == c && int64(uint8(d)) == d) {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVBZreg x:(MOVDNE (MOVDconst [c]) (MOVDconst [d]) _))
 	// cond: int64(uint8(c)) == c && int64(uint8(d)) == d
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVDNE {
@@ -8125,14 +8166,13 @@
 		if !(int64(uint8(c)) == c && int64(uint8(d)) == d) {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVBZreg x:(MOVDGTnoinv (MOVDconst [c]) (MOVDconst [d]) _))
 	// cond: int64(uint8(c)) == c && int64(uint8(d)) == d
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVDGTnoinv {
@@ -8151,14 +8191,13 @@
 		if !(int64(uint8(c)) == c && int64(uint8(d)) == d) {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVBZreg x:(MOVDGEnoinv (MOVDconst [c]) (MOVDconst [d]) _))
 	// cond: int64(uint8(c)) == c && int64(uint8(d)) == d
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVDGEnoinv {
@@ -8177,27 +8216,25 @@
 		if !(int64(uint8(c)) == c && int64(uint8(d)) == d) {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVBZreg x:(MOVBZload _ _))
 	// cond:
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVBZload {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVBZreg x:(Arg <t>))
 	// cond: is8BitInt(t) && !isSigned(t)
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpArg {
@@ -8207,21 +8244,19 @@
 		if !(is8BitInt(t) && !isSigned(t)) {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVBZreg x:(MOVBZreg _))
 	// cond:
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVBZreg {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
@@ -8349,20 +8384,19 @@
 	_ = b
 	// match: (MOVBreg x:(MOVBload _ _))
 	// cond:
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVBload {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVBreg x:(Arg <t>))
 	// cond: is8BitInt(t) && isSigned(t)
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpArg {
@@ -8372,21 +8406,19 @@
 		if !(is8BitInt(t) && isSigned(t)) {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVBreg x:(MOVBreg _))
 	// cond:
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVBreg {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
@@ -9847,11 +9879,11 @@
 		v.AddArg(cmp)
 		return true
 	}
-	// match: (MOVDNE _ y (FlagEQ))
+	// match: (MOVDNE y _ (FlagEQ))
 	// cond:
 	// result: y
 	for {
-		y := v.Args[1]
+		y := v.Args[0]
 		v_2 := v.Args[2]
 		if v_2.Op != OpS390XFlagEQ {
 			break
@@ -9861,11 +9893,11 @@
 		v.AddArg(y)
 		return true
 	}
-	// match: (MOVDNE x _ (FlagLT))
+	// match: (MOVDNE _ x (FlagLT))
 	// cond:
 	// result: x
 	for {
-		x := v.Args[0]
+		x := v.Args[1]
 		v_2 := v.Args[2]
 		if v_2.Op != OpS390XFlagLT {
 			break
@@ -9875,11 +9907,11 @@
 		v.AddArg(x)
 		return true
 	}
-	// match: (MOVDNE x _ (FlagGT))
+	// match: (MOVDNE _ x (FlagGT))
 	// cond:
 	// result: x
 	for {
-		x := v.Args[0]
+		x := v.Args[1]
 		v_2 := v.Args[2]
 		if v_2.Op != OpS390XFlagGT {
 			break
@@ -9995,7 +10027,7 @@
 	_ = b
 	// match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _))
 	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		off := v.AuxInt
 		sym := v.Aux
@@ -10011,8 +10043,7 @@
 		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
@@ -10164,6 +10195,23 @@
 	}
 	return false
 }
+func rewriteValueS390X_OpS390XMOVDreg(v *Value, config *Config) bool {
+	b := v.Block
+	_ = b
+	// match: (MOVDreg x)
+	// cond: x.Uses == 1
+	// result: (MOVDnop x)
+	for {
+		x := v.Args[0]
+		if !(x.Uses == 1) {
+			break
+		}
+		v.reset(OpS390XMOVDnop)
+		v.AddArg(x)
+		return true
+	}
+	return false
+}
 func rewriteValueS390X_OpS390XMOVDstore(v *Value, config *Config) bool {
 	b := v.Block
 	_ = b
@@ -10912,7 +10960,7 @@
 	_ = b
 	// match: (MOVHZload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
 	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		off := v.AuxInt
 		sym := v.Aux
@@ -10928,8 +10976,7 @@
 		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
@@ -11086,33 +11133,31 @@
 	_ = b
 	// match: (MOVHZreg x:(MOVBZload _ _))
 	// cond:
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVBZload {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVHZreg x:(MOVHZload _ _))
 	// cond:
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVHZload {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVHZreg x:(Arg <t>))
 	// cond: (is8BitInt(t) || is16BitInt(t)) && !isSigned(t)
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpArg {
@@ -11122,34 +11167,31 @@
 		if !((is8BitInt(t) || is16BitInt(t)) && !isSigned(t)) {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVHZreg x:(MOVBZreg _))
 	// cond:
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVBZreg {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVHZreg x:(MOVHZreg _))
 	// cond:
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVHZreg {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
@@ -11277,46 +11319,43 @@
 	_ = b
 	// match: (MOVHreg x:(MOVBload _ _))
 	// cond:
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVBload {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVHreg x:(MOVBZload _ _))
 	// cond:
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVBZload {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVHreg x:(MOVHload _ _))
 	// cond:
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVHload {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVHreg x:(Arg <t>))
 	// cond: (is8BitInt(t) || is16BitInt(t)) && isSigned(t)
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpArg {
@@ -11326,47 +11365,43 @@
 		if !((is8BitInt(t) || is16BitInt(t)) && isSigned(t)) {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVHreg x:(MOVBreg _))
 	// cond:
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVBreg {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVHreg x:(MOVBZreg _))
 	// cond:
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVBZreg {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVHreg x:(MOVHreg _))
 	// cond:
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVHreg {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
@@ -12310,7 +12345,7 @@
 	_ = b
 	// match: (MOVWZload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
 	// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		off := v.AuxInt
 		sym := v.Aux
@@ -12326,8 +12361,7 @@
 		if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
@@ -12484,46 +12518,43 @@
 	_ = b
 	// match: (MOVWZreg x:(MOVBZload _ _))
 	// cond:
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVBZload {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVWZreg x:(MOVHZload _ _))
 	// cond:
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVHZload {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVWZreg x:(MOVWZload _ _))
 	// cond:
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVWZload {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVWZreg x:(Arg <t>))
 	// cond: (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t)
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpArg {
@@ -12533,47 +12564,43 @@
 		if !((is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t)) {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVWZreg x:(MOVBZreg _))
 	// cond:
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVBZreg {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVWZreg x:(MOVHZreg _))
 	// cond:
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVHZreg {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVWZreg x:(MOVWZreg _))
 	// cond:
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVWZreg {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
@@ -12701,72 +12728,67 @@
 	_ = b
 	// match: (MOVWreg x:(MOVBload _ _))
 	// cond:
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVBload {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVWreg x:(MOVBZload _ _))
 	// cond:
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVBZload {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVWreg x:(MOVHload _ _))
 	// cond:
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVHload {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVWreg x:(MOVHZload _ _))
 	// cond:
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVHZload {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVWreg x:(MOVWload _ _))
 	// cond:
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVWload {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVWreg x:(Arg <t>))
 	// cond: (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t)
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpArg {
@@ -12776,73 +12798,67 @@
 		if !((is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t)) {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVWreg x:(MOVBreg _))
 	// cond:
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVBreg {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVWreg x:(MOVBZreg _))
 	// cond:
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVBZreg {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVWreg x:(MOVHreg _))
 	// cond:
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVHreg {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVWreg x:(MOVHreg _))
 	// cond:
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVHreg {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
 	// match: (MOVWreg x:(MOVWreg _))
 	// cond:
-	// result: x
+	// result: (MOVDreg x)
 	for {
 		x := v.Args[0]
 		if x.Op != OpS390XMOVWreg {
 			break
 		}
-		v.reset(OpCopy)
-		v.Type = x.Type
+		v.reset(OpS390XMOVDreg)
 		v.AddArg(x)
 		return true
 	}
diff --git a/src/cmd/compile/internal/syntax/parser.go b/src/cmd/compile/internal/syntax/parser.go
index 121dfb7..e45ca05 100644
--- a/src/cmd/compile/internal/syntax/parser.go
+++ b/src/cmd/compile/internal/syntax/parser.go
@@ -1634,6 +1634,8 @@
 	return body
 }
 
+var dummyCond = &Name{Value: "false"}
+
 func (p *parser) header(forStmt bool) (init SimpleStmt, cond Expr, post SimpleStmt) {
 	if p.tok == _Lbrace {
 		return
@@ -1680,7 +1682,8 @@
 	case *ExprStmt:
 		cond = s.X
 	default:
-		p.error("invalid condition, tag, or type switch guard")
+		p.syntax_error(fmt.Sprintf("%s used as value", String(s)))
+		cond = dummyCond // avoid follow-up error for if statements
 	}
 
 	p.xnest = outer
diff --git a/src/cmd/dist/test.go b/src/cmd/dist/test.go
index 7d5f79f..c51dcea 100644
--- a/src/cmd/dist/test.go
+++ b/src/cmd/dist/test.go
@@ -15,7 +15,6 @@
 	"os/exec"
 	"path/filepath"
 	"regexp"
-	"runtime"
 	"strconv"
 	"strings"
 	"sync"
@@ -354,7 +353,7 @@
 
 	// This test needs its stdout/stderr to be terminals, so we don't run it from cmd/go's tests.
 	// See issue 18153.
-	if runtime.GOOS == "linux" {
+	if t.goos == "linux" {
 		t.tests = append(t.tests, distTest{
 			name:    "cmd_go_test_terminal",
 			heading: "cmd/go terminal test",
@@ -568,7 +567,7 @@
 		if t.gohostos == "linux" && t.goarch == "amd64" {
 			t.registerTest("testasan", "../misc/cgo/testasan", "go", "run", "main.go")
 		}
-		if t.gohostos == "linux" && t.goarch == "amd64" {
+		if t.goos == "linux" && t.goarch == "amd64" {
 			t.registerTest("testsanitizers", "../misc/cgo/testsanitizers", "./test.bash")
 		}
 		if t.hasBash() && t.goos != "android" && !t.iOS() && t.gohostos != "windows" {
diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go
index e93fd6e..3d5dd2b 100644
--- a/src/cmd/go/alldocs.go
+++ b/src/cmd/go/alldocs.go
@@ -17,7 +17,7 @@
 // 	clean       remove object files
 // 	doc         show documentation for package or symbol
 // 	env         print Go environment information
-// 	bug         print information for bug reports
+// 	bug         start a bug report
 // 	fix         run go tool fix on packages
 // 	fmt         run gofmt on package sources
 // 	generate    generate Go files by processing source
@@ -324,15 +324,14 @@
 // each named variable on its own line.
 //
 //
-// Print information for bug reports
+// Start a bug report
 //
 // Usage:
 //
 // 	go bug
 //
-// Bug prints information that helps file effective bug reports.
-//
-// Bugs may be reported at https://golang.org/issue/new.
+// Bug opens the default browser and starts a new bug report.
+// The report includes useful system information.
 //
 //
 // Run go tool fix on packages
diff --git a/src/cmd/go/get.go b/src/cmd/go/get.go
index 1d7677c..6fb4235 100644
--- a/src/cmd/go/get.go
+++ b/src/cmd/go/get.go
@@ -428,7 +428,7 @@
 			return fmt.Errorf("cannot download, $GOPATH not set. For more details see: 'go help gopath'")
 		}
 		// Guard against people setting GOPATH=$GOROOT.
-		if list[0] == goroot {
+		if filepath.Clean(list[0]) == filepath.Clean(goroot) {
 			return fmt.Errorf("cannot download, $GOPATH must not be set to $GOROOT. For more details see: 'go help gopath'")
 		}
 		if _, err := os.Stat(filepath.Join(list[0], "src/cmd/go/alldocs.go")); err == nil {
diff --git a/src/cmd/go/go_test.go b/src/cmd/go/go_test.go
index 5727eb0..56de65c 100644
--- a/src/cmd/go/go_test.go
+++ b/src/cmd/go/go_test.go
@@ -1683,173 +1683,111 @@
 	}
 }
 
-// Test go env missing GOPATH shows default.
-func TestMissingGOPATHEnvShowsDefault(t *testing.T) {
+func TestDefaultGOPATH(t *testing.T) {
 	tg := testgo(t)
 	defer tg.cleanup()
 	tg.parallel()
-	tg.setenv("GOPATH", "")
+	tg.tempDir("home/go")
+	tg.setenv(homeEnvName(), tg.path("home"))
+
 	tg.run("env", "GOPATH")
+	tg.grepStdout(regexp.QuoteMeta(tg.path("home/go")), "want GOPATH=$HOME/go")
 
-	want := filepath.Join(os.Getenv(homeEnvName()), "go")
-	got := strings.TrimSpace(tg.getStdout())
-	if got != want {
-		t.Errorf("got %q; want %q", got, want)
-	}
+	tg.setenv("GOROOT", tg.path("home/go"))
+	tg.run("env", "GOPATH")
+	tg.grepStdoutNot(".", "want unset GOPATH because GOROOT=$HOME/go")
+
+	tg.setenv("GOROOT", tg.path("home/go")+"/")
+	tg.run("env", "GOPATH")
+	tg.grepStdoutNot(".", "want unset GOPATH because GOROOT=$HOME/go/")
 }
 
-// Test go get missing GOPATH causes go get to warn if directory doesn't exist.
-func TestMissingGOPATHGetWarnsIfNotExists(t *testing.T) {
+func TestDefaultGOPATHGet(t *testing.T) {
 	testenv.MustHaveExternalNetwork(t)
 
-	if _, err := exec.LookPath("git"); err != nil {
-		t.Skip("skipping because git binary not found")
-	}
-
 	tg := testgo(t)
 	defer tg.cleanup()
-
-	// setenv variables for test and defer deleting temporary home directory.
 	tg.setenv("GOPATH", "")
-	tmp, err := ioutil.TempDir("", "")
-	if err != nil {
-		t.Fatalf("could not create tmp home: %v", err)
-	}
-	defer os.RemoveAll(tmp)
-	tg.setenv(homeEnvName(), tmp)
+	tg.tempDir("home")
+	tg.setenv(homeEnvName(), tg.path("home"))
 
+	// warn for creating directory
 	tg.run("get", "-v", "github.com/golang/example/hello")
+	tg.grepStderr("created GOPATH="+regexp.QuoteMeta(tg.path("home/go"))+"; see 'go help gopath'", "did not create GOPATH")
 
-	want := fmt.Sprintf("created GOPATH=%s; see 'go help gopath'", filepath.Join(tmp, "go"))
-	got := strings.TrimSpace(tg.getStderr())
-	if !strings.Contains(got, want) {
-		t.Errorf("got %q; want %q", got, want)
-	}
-}
-
-// Test go get missing GOPATH causes no warning if directory exists.
-func TestMissingGOPATHGetDoesntWarnIfExists(t *testing.T) {
-	testenv.MustHaveExternalNetwork(t)
-
-	if _, err := exec.LookPath("git"); err != nil {
-		t.Skip("skipping because git binary not found")
-	}
-
-	tg := testgo(t)
-	defer tg.cleanup()
-
-	// setenv variables for test and defer resetting them.
-	tg.setenv("GOPATH", "")
-	tmp, err := ioutil.TempDir("", "")
-	if err != nil {
-		t.Fatalf("could not create tmp home: %v", err)
-	}
-	defer os.RemoveAll(tmp)
-	if err := os.Mkdir(filepath.Join(tmp, "go"), 0777); err != nil {
-		t.Fatalf("could not create $HOME/go: %v", err)
-	}
-
-	tg.setenv(homeEnvName(), tmp)
-
+	// no warning if directory already exists
+	tg.must(os.RemoveAll(tg.path("home/go")))
+	tg.tempDir("home/go")
 	tg.run("get", "github.com/golang/example/hello")
+	tg.grepStderrNot(".", "expected no output on standard error")
 
-	got := strings.TrimSpace(tg.getStderr())
-	if got != "" {
-		t.Errorf("got %q; wants empty", got)
-	}
+	// error if $HOME/go is a file
+	tg.must(os.RemoveAll(tg.path("home/go")))
+	tg.tempFile("home/go", "")
+	tg.runFail("get", "github.com/golang/example/hello")
+	tg.grepStderr(`mkdir .*[/\\]go: .*(not a directory|cannot find the path)`, "expected error because $HOME/go is a file")
 }
 
-// Test go get missing GOPATH fails if pointed file is not a directory.
-func TestMissingGOPATHGetFailsIfItsNotDirectory(t *testing.T) {
-	testenv.MustHaveExternalNetwork(t)
-
+func TestDefaultGOPATHPrintedSearchList(t *testing.T) {
 	tg := testgo(t)
 	defer tg.cleanup()
-
-	// setenv variables for test and defer resetting them.
 	tg.setenv("GOPATH", "")
-	tmp, err := ioutil.TempDir("", "")
-	if err != nil {
-		t.Fatalf("could not create tmp home: %v", err)
-	}
-	defer os.RemoveAll(tmp)
+	tg.tempDir("home")
+	tg.setenv(homeEnvName(), tg.path("home"))
 
-	path := filepath.Join(tmp, "go")
-	if err := ioutil.WriteFile(path, nil, 0777); err != nil {
-		t.Fatalf("could not create GOPATH at %s: %v", path, err)
-	}
-	tg.setenv(homeEnvName(), tmp)
-
-	const pkg = "github.com/golang/example/hello"
-	tg.runFail("get", pkg)
-
-	msg := "not a directory"
-	if runtime.GOOS == "windows" {
-		msg = "The system cannot find the path specified."
-	}
-	want := fmt.Sprintf("package %s: mkdir %s: %s", pkg, filepath.Join(tmp, "go"), msg)
-	got := strings.TrimSpace(tg.getStderr())
-	if got != want {
-		t.Errorf("got %q; wants %q", got, want)
-	}
-}
-
-// Test go install of missing package when missing GOPATH fails and shows default GOPATH.
-func TestMissingGOPATHInstallMissingPackageFailsAndShowsDefault(t *testing.T) {
-	tg := testgo(t)
-	defer tg.cleanup()
-
-	// setenv variables for test and defer resetting them.
-	tg.setenv("GOPATH", "")
-	tmp, err := ioutil.TempDir("", "")
-	if err != nil {
-		t.Fatalf("could not create tmp home: %v", err)
-	}
-	defer os.RemoveAll(tmp)
-	if err := os.Mkdir(filepath.Join(tmp, "go"), 0777); err != nil {
-		t.Fatalf("could not create $HOME/go: %v", err)
-	}
-	tg.setenv(homeEnvName(), tmp)
-
-	const pkg = "github.com/golang/example/hello"
-	tg.runFail("install", pkg)
-
-	pkgPath := filepath.Join(strings.Split(pkg, "/")...)
-	want := fmt.Sprintf("can't load package: package %s: cannot find package \"%s\" in any of:", pkg, pkg) +
-		fmt.Sprintf("\n\t%s (from $GOROOT)", filepath.Join(runtime.GOROOT(), "src", pkgPath)) +
-		fmt.Sprintf("\n\t%s (from $GOPATH)", filepath.Join(tmp, "go", "src", pkgPath))
-
-	got := strings.TrimSpace(tg.getStderr())
-	if got != want {
-		t.Errorf("got %q; wants %q", got, want)
-	}
+	tg.runFail("install", "github.com/golang/example/hello")
+	tg.grepStderr(regexp.QuoteMeta(tg.path("home/go/src/github.com/golang/example/hello"))+`.*from \$GOPATH`, "expected default GOPATH")
 }
 
 // Issue 4186.  go get cannot be used to download packages to $GOROOT.
 // Test that without GOPATH set, go get should fail.
-func TestWithoutGOPATHGoGetFails(t *testing.T) {
+func TestGoGetIntoGOROOT(t *testing.T) {
 	testenv.MustHaveExternalNetwork(t)
 
 	tg := testgo(t)
 	defer tg.cleanup()
 	tg.parallel()
 	tg.tempDir("src")
-	tg.setenv("GOPATH", "")
-	tg.setenv("GOROOT", tg.path("."))
-	tg.runFail("get", "-d", "golang.org/x/codereview/cmd/hgpatch")
-}
 
-// Test that with GOPATH=$GOROOT, go get should fail.
-func TestWithGOPATHEqualsGOROOTGoGetFails(t *testing.T) {
-	testenv.MustHaveExternalNetwork(t)
-
-	tg := testgo(t)
-	defer tg.cleanup()
-	tg.parallel()
-	tg.tempDir("src")
+	// Fails because GOROOT=GOPATH
 	tg.setenv("GOPATH", tg.path("."))
 	tg.setenv("GOROOT", tg.path("."))
-	tg.runFail("get", "-d", "golang.org/x/codereview/cmd/hgpatch")
+	tg.runFail("get", "-d", "github.com/golang/example/hello")
+	tg.grepStderr("warning: GOPATH set to GOROOT", "go should detect GOPATH=GOROOT")
+	tg.grepStderr(`\$GOPATH must not be set to \$GOROOT`, "go should detect GOPATH=GOROOT")
+
+	// Fails because GOROOT=GOPATH after cleaning.
+	tg.setenv("GOPATH", tg.path(".")+"/")
+	tg.setenv("GOROOT", tg.path("."))
+	tg.runFail("get", "-d", "github.com/golang/example/hello")
+	tg.grepStderr("warning: GOPATH set to GOROOT", "go should detect GOPATH=GOROOT")
+	tg.grepStderr(`\$GOPATH must not be set to \$GOROOT`, "go should detect GOPATH=GOROOT")
+
+	tg.setenv("GOPATH", tg.path("."))
+	tg.setenv("GOROOT", tg.path(".")+"/")
+	tg.runFail("get", "-d", "github.com/golang/example/hello")
+	tg.grepStderr("warning: GOPATH set to GOROOT", "go should detect GOPATH=GOROOT")
+	tg.grepStderr(`\$GOPATH must not be set to \$GOROOT`, "go should detect GOPATH=GOROOT")
+
+	// Fails because GOROOT=$HOME/go so default GOPATH unset.
+	tg.tempDir("home/go")
+	tg.setenv(homeEnvName(), tg.path("home"))
+	tg.setenv("GOPATH", "")
+	tg.setenv("GOROOT", tg.path("home/go"))
+	tg.runFail("get", "-d", "github.com/golang/example/hello")
+	tg.grepStderr(`\$GOPATH not set`, "expected GOPATH not set")
+
+	tg.setenv(homeEnvName(), tg.path("home")+"/")
+	tg.setenv("GOPATH", "")
+	tg.setenv("GOROOT", tg.path("home/go"))
+	tg.runFail("get", "-d", "github.com/golang/example/hello")
+	tg.grepStderr(`\$GOPATH not set`, "expected GOPATH not set")
+
+	tg.setenv(homeEnvName(), tg.path("home"))
+	tg.setenv("GOPATH", "")
+	tg.setenv("GOROOT", tg.path("home/go")+"/")
+	tg.runFail("get", "-d", "github.com/golang/example/hello")
+	tg.grepStderr(`\$GOPATH not set`, "expected GOPATH not set")
 }
 
 func TestLdflagsArgumentsWithSpacesIssue3941(t *testing.T) {
@@ -3744,6 +3682,13 @@
 	tg.grepBoth(okPattern, "go test did not say ok")
 }
 
+// Issue 18845
+func TestBenchTimeout(t *testing.T) {
+	tg := testgo(t)
+	defer tg.cleanup()
+	tg.run("test", "-bench", ".", "-timeout", "750ms", "testdata/timeoutbench_test.go")
+}
+
 func TestLinkXImportPathEscape(t *testing.T) {
 	// golang.org/issue/16710
 	tg := testgo(t)
@@ -3787,3 +3732,26 @@
 	tg.setenv("GOPATH", tg.path("go"))
 	tg.run("build", "p")
 }
+
+// Issue 18778.
+func TestDotDotDotOutsideGOPATH(t *testing.T) {
+	tg := testgo(t)
+	defer tg.cleanup()
+
+	tg.tempFile("pkgs/a.go", `package x`)
+	tg.tempFile("pkgs/a_test.go", `package x_test
+import "testing"
+func TestX(t *testing.T) {}`)
+
+	tg.tempFile("pkgs/a/a.go", `package a`)
+	tg.tempFile("pkgs/a/a_test.go", `package a_test
+import "testing"
+func TestA(t *testing.T) {}`)
+
+	tg.cd(tg.path("pkgs"))
+	tg.run("build", "./...")
+	tg.run("test", "./...")
+	tg.run("list", "./...")
+	tg.grepStdout("pkgs$", "expected package not listed")
+	tg.grepStdout("pkgs/a", "expected package not listed")
+}
diff --git a/src/cmd/go/main.go b/src/cmd/go/main.go
index 07fc4e2..d80ff2d 100644
--- a/src/cmd/go/main.go
+++ b/src/cmd/go/main.go
@@ -136,7 +136,7 @@
 	// Diagnose common mistake: GOPATH==GOROOT.
 	// This setting is equivalent to not setting GOPATH at all,
 	// which is not what most people want when they do it.
-	if gopath := buildContext.GOPATH; gopath == runtime.GOROOT() {
+	if gopath := buildContext.GOPATH; filepath.Clean(gopath) == filepath.Clean(runtime.GOROOT()) {
 		fmt.Fprintf(os.Stderr, "warning: GOPATH set to GOROOT (%s) has no effect\n", gopath)
 	} else {
 		for _, p := range filepath.SplitList(gopath) {
diff --git a/src/cmd/go/pkg.go b/src/cmd/go/pkg.go
index d69fa51..e40f942 100644
--- a/src/cmd/go/pkg.go
+++ b/src/cmd/go/pkg.go
@@ -429,7 +429,7 @@
 func cleanImport(path string) string {
 	orig := path
 	path = pathpkg.Clean(path)
-	if strings.HasPrefix(orig, "./") && path != ".." && path != "." && !strings.HasPrefix(path, "../") {
+	if strings.HasPrefix(orig, "./") && path != ".." && !strings.HasPrefix(path, "../") {
 		path = "./" + path
 	}
 	return path
diff --git a/src/cmd/go/testdata/timeoutbench_test.go b/src/cmd/go/testdata/timeoutbench_test.go
new file mode 100644
index 0000000..57a8888
--- /dev/null
+++ b/src/cmd/go/testdata/timeoutbench_test.go
@@ -0,0 +1,10 @@
+package timeoutbench_test
+
+import (
+	"testing"
+	"time"
+)
+
+func BenchmarkSleep1s(b *testing.B) {
+	time.Sleep(1 * time.Second)
+}
diff --git a/src/cmd/internal/obj/zbootstrap.go b/src/cmd/internal/obj/zbootstrap.go
index 90b60aa..467e9d2 100644
--- a/src/cmd/internal/obj/zbootstrap.go
+++ b/src/cmd/internal/obj/zbootstrap.go
@@ -10,6 +10,6 @@
 const defaultGOOS = runtime.GOOS
 const defaultGOARCH = runtime.GOARCH
 const defaultGO_EXTLINK_ENABLED = ``
-const version = `go1.8rc2`
+const version = `go1.8`
 const stackGuardMultiplier = 1
 const goexperiment = ``
diff --git a/src/cmd/link/internal/ld/config.go b/src/cmd/link/internal/ld/config.go
index 7d00ff1..2656c24 100644
--- a/src/cmd/link/internal/ld/config.go
+++ b/src/cmd/link/internal/ld/config.go
@@ -238,6 +238,8 @@
 				Linkmode = LinkExternal
 			} else if iscgo && externalobj {
 				Linkmode = LinkExternal
+			} else if Buildmode == BuildmodePIE {
+				Linkmode = LinkExternal // https://golang.org/issue/18968
 			} else {
 				Linkmode = LinkInternal
 			}
diff --git a/src/cmd/link/internal/ld/dwarf.go b/src/cmd/link/internal/ld/dwarf.go
index 61d3e4f..22d2c54 100644
--- a/src/cmd/link/internal/ld/dwarf.go
+++ b/src/cmd/link/internal/ld/dwarf.go
@@ -1080,7 +1080,7 @@
 		epcs = s
 
 		dsym := ctxt.Syms.Lookup(dwarf.InfoPrefix+s.Name, int(s.Version))
-		dsym.Attr |= AttrHidden
+		dsym.Attr |= AttrHidden | AttrReachable
 		dsym.Type = obj.SDWARFINFO
 		for _, r := range dsym.R {
 			if r.Type == obj.R_DWARFREF && r.Sym.Size == 0 {
diff --git a/src/crypto/x509/root_linux.go b/src/crypto/x509/root_linux.go
index 38dd72d..aa1785e 100644
--- a/src/crypto/x509/root_linux.go
+++ b/src/crypto/x509/root_linux.go
@@ -7,8 +7,8 @@
 // Possible certificate files; stop after finding one.
 var certFiles = []string{
 	"/etc/ssl/certs/ca-certificates.crt",                // Debian/Ubuntu/Gentoo etc.
-	"/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", // CentOS/RHEL 7
 	"/etc/pki/tls/certs/ca-bundle.crt",                  // Fedora/RHEL 6
 	"/etc/ssl/ca-bundle.pem",                            // OpenSUSE
 	"/etc/pki/tls/cacert.pem",                           // OpenELEC
+	"/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", // CentOS/RHEL 7
 }
diff --git a/src/database/sql/ctxutil.go b/src/database/sql/ctxutil.go
index 1071446..bd652b5 100644
--- a/src/database/sql/ctxutil.go
+++ b/src/database/sql/ctxutil.go
@@ -35,15 +35,12 @@
 		return nil, err
 	}
 
-	resi, err := execer.Exec(query, dargs)
-	if err == nil {
-		select {
-		default:
-		case <-ctx.Done():
-			return resi, ctx.Err()
-		}
+	select {
+	default:
+	case <-ctx.Done():
+		return nil, ctx.Err()
 	}
-	return resi, err
+	return execer.Exec(query, dargs)
 }
 
 func ctxDriverQuery(ctx context.Context, queryer driver.Queryer, query string, nvdargs []driver.NamedValue) (driver.Rows, error) {
@@ -56,16 +53,12 @@
 		return nil, err
 	}
 
-	rowsi, err := queryer.Query(query, dargs)
-	if err == nil {
-		select {
-		default:
-		case <-ctx.Done():
-			rowsi.Close()
-			return nil, ctx.Err()
-		}
+	select {
+	default:
+	case <-ctx.Done():
+		return nil, ctx.Err()
 	}
-	return rowsi, err
+	return queryer.Query(query, dargs)
 }
 
 func ctxDriverStmtExec(ctx context.Context, si driver.Stmt, nvdargs []driver.NamedValue) (driver.Result, error) {
@@ -77,15 +70,12 @@
 		return nil, err
 	}
 
-	resi, err := si.Exec(dargs)
-	if err == nil {
-		select {
-		default:
-		case <-ctx.Done():
-			return resi, ctx.Err()
-		}
+	select {
+	default:
+	case <-ctx.Done():
+		return nil, ctx.Err()
 	}
-	return resi, err
+	return si.Exec(dargs)
 }
 
 func ctxDriverStmtQuery(ctx context.Context, si driver.Stmt, nvdargs []driver.NamedValue) (driver.Rows, error) {
@@ -97,16 +87,12 @@
 		return nil, err
 	}
 
-	rowsi, err := si.Query(dargs)
-	if err == nil {
-		select {
-		default:
-		case <-ctx.Done():
-			rowsi.Close()
-			return nil, ctx.Err()
-		}
+	select {
+	default:
+	case <-ctx.Done():
+		return nil, ctx.Err()
 	}
-	return rowsi, err
+	return si.Query(dargs)
 }
 
 var errLevelNotSupported = errors.New("sql: selected isolation level is not supported")
diff --git a/src/database/sql/sql.go b/src/database/sql/sql.go
index 0fa7c34..c016681 100644
--- a/src/database/sql/sql.go
+++ b/src/database/sql/sql.go
@@ -305,8 +305,9 @@
 
 	mu           sync.Mutex // protects following fields
 	freeConn     []*driverConn
-	connRequests []chan connRequest
-	numOpen      int // number of opened and pending open connections
+	connRequests map[uint64]chan connRequest
+	nextRequest  uint64 // Next key to use in connRequests.
+	numOpen      int    // number of opened and pending open connections
 	// Used to signal the need for new connections
 	// a goroutine running connectionOpener() reads on this chan and
 	// maybeOpenNewConnections sends on the chan (one send per needed connection)
@@ -572,10 +573,11 @@
 		return nil, fmt.Errorf("sql: unknown driver %q (forgotten import?)", driverName)
 	}
 	db := &DB{
-		driver:   driveri,
-		dsn:      dataSourceName,
-		openerCh: make(chan struct{}, connectionRequestQueueSize),
-		lastPut:  make(map[*driverConn]string),
+		driver:       driveri,
+		dsn:          dataSourceName,
+		openerCh:     make(chan struct{}, connectionRequestQueueSize),
+		lastPut:      make(map[*driverConn]string),
+		connRequests: make(map[uint64]chan connRequest),
 	}
 	go db.connectionOpener()
 	return db, nil
@@ -881,6 +883,14 @@
 
 var errDBClosed = errors.New("sql: database is closed")
 
+// nextRequestKeyLocked returns the next connection request key.
+// It is assumed that nextRequest will not overflow.
+func (db *DB) nextRequestKeyLocked() uint64 {
+	next := db.nextRequest
+	db.nextRequest++
+	return next
+}
+
 // conn returns a newly-opened or cached *driverConn.
 func (db *DB) conn(ctx context.Context, strategy connReuseStrategy) (*driverConn, error) {
 	db.mu.Lock()
@@ -918,12 +928,25 @@
 		// Make the connRequest channel. It's buffered so that the
 		// connectionOpener doesn't block while waiting for the req to be read.
 		req := make(chan connRequest, 1)
-		db.connRequests = append(db.connRequests, req)
+		reqKey := db.nextRequestKeyLocked()
+		db.connRequests[reqKey] = req
 		db.mu.Unlock()
 
 		// Timeout the connection request with the context.
 		select {
 		case <-ctx.Done():
+			// Remove the connection request and ensure no value has been sent
+			// on it after removing.
+			db.mu.Lock()
+			delete(db.connRequests, reqKey)
+			db.mu.Unlock()
+			select {
+			default:
+			case ret, ok := <-req:
+				if ok {
+					db.putConn(ret.conn, ret.err)
+				}
+			}
 			return nil, ctx.Err()
 		case ret, ok := <-req:
 			if !ok {
@@ -1044,12 +1067,12 @@
 		return false
 	}
 	if c := len(db.connRequests); c > 0 {
-		req := db.connRequests[0]
-		// This copy is O(n) but in practice faster than a linked list.
-		// TODO: consider compacting it down less often and
-		// moving the base instead?
-		copy(db.connRequests, db.connRequests[1:])
-		db.connRequests = db.connRequests[:c-1]
+		var req chan connRequest
+		var reqKey uint64
+		for reqKey, req = range db.connRequests {
+			break
+		}
+		delete(db.connRequests, reqKey) // Remove from pending requests.
 		if err == nil {
 			dc.inUse = true
 		}
@@ -1357,16 +1380,7 @@
 		cancel: cancel,
 		ctx:    ctx,
 	}
-	go func(tx *Tx) {
-		select {
-		case <-tx.ctx.Done():
-			if !tx.isDone() {
-				// Discard and close the connection used to ensure the transaction
-				// is closed and the resources are released.
-				tx.rollback(true)
-			}
-		}
-	}(tx)
+	go tx.awaitDone()
 	return tx, nil
 }
 
@@ -1388,6 +1402,11 @@
 type Tx struct {
 	db *DB
 
+	// closemu prevents the transaction from closing while there
+	// is an active query. It is held for read during queries
+	// and exclusively during close.
+	closemu sync.RWMutex
+
 	// dc is owned exclusively until Commit or Rollback, at which point
 	// it's returned with putConn.
 	dc  *driverConn
@@ -1413,6 +1432,20 @@
 	ctx context.Context
 }
 
+// awaitDone blocks until the context in Tx is canceled and rolls back
+// the transaction if it's not already done.
+func (tx *Tx) awaitDone() {
+	// Wait for either the transaction to be committed or rolled
+	// back, or for the associated context to be closed.
+	<-tx.ctx.Done()
+
+	// Discard and close the connection used to ensure the
+	// transaction is closed and the resources are released.  This
+	// rollback does nothing if the transaction has already been
+	// committed or rolled back.
+	tx.rollback(true)
+}
+
 func (tx *Tx) isDone() bool {
 	return atomic.LoadInt32(&tx.done) != 0
 }
@@ -1424,16 +1457,31 @@
 // close returns the connection to the pool and
 // must only be called by Tx.rollback or Tx.Commit.
 func (tx *Tx) close(err error) {
+	tx.closemu.Lock()
+	defer tx.closemu.Unlock()
+
 	tx.db.putConn(tx.dc, err)
 	tx.cancel()
 	tx.dc = nil
 	tx.txi = nil
 }
 
+// hookTxGrabConn specifies an optional hook to be called on
+// a successful call to (*Tx).grabConn. For tests.
+var hookTxGrabConn func()
+
 func (tx *Tx) grabConn(ctx context.Context) (*driverConn, error) {
+	select {
+	default:
+	case <-ctx.Done():
+		return nil, ctx.Err()
+	}
 	if tx.isDone() {
 		return nil, ErrTxDone
 	}
+	if hookTxGrabConn != nil { // test hook
+		hookTxGrabConn()
+	}
 	return tx.dc, nil
 }
 
@@ -1503,6 +1551,9 @@
 // for the execution of the returned statement. The returned statement
 // will run in the transaction context.
 func (tx *Tx) PrepareContext(ctx context.Context, query string) (*Stmt, error) {
+	tx.closemu.RLock()
+	defer tx.closemu.RUnlock()
+
 	// TODO(bradfitz): We could be more efficient here and either
 	// provide a method to take an existing Stmt (created on
 	// perhaps a different Conn), and re-create it on this Conn if
@@ -1567,6 +1618,9 @@
 // The returned statement operates within the transaction and will be closed
 // when the transaction has been committed or rolled back.
 func (tx *Tx) StmtContext(ctx context.Context, stmt *Stmt) *Stmt {
+	tx.closemu.RLock()
+	defer tx.closemu.RUnlock()
+
 	// TODO(bradfitz): optimize this. Currently this re-prepares
 	// each time. This is fine for now to illustrate the API but
 	// we should really cache already-prepared statements
@@ -1618,6 +1672,9 @@
 // ExecContext executes a query that doesn't return rows.
 // For example: an INSERT and UPDATE.
 func (tx *Tx) ExecContext(ctx context.Context, query string, args ...interface{}) (Result, error) {
+	tx.closemu.RLock()
+	defer tx.closemu.RUnlock()
+
 	dc, err := tx.grabConn(ctx)
 	if err != nil {
 		return nil, err
@@ -1661,6 +1718,9 @@
 
 // QueryContext executes a query that returns rows, typically a SELECT.
 func (tx *Tx) QueryContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) {
+	tx.closemu.RLock()
+	defer tx.closemu.RUnlock()
+
 	dc, err := tx.grabConn(ctx)
 	if err != nil {
 		return nil, err
@@ -2034,29 +2094,32 @@
 	dc          *driverConn // owned; must call releaseConn when closed to release
 	releaseConn func(error)
 	rowsi       driver.Rows
+	cancel      func()      // called when Rows is closed, may be nil.
+	closeStmt   *driverStmt // if non-nil, statement to Close on close
 
-	// closed value is 1 when the Rows is closed.
-	// Use atomic operations on value when checking value.
-	closed    int32
-	ctxClose  chan struct{} // closed when Rows is closed, may be null.
-	lastcols  []driver.Value
-	lasterr   error       // non-nil only if closed is true
-	closeStmt *driverStmt // if non-nil, statement to Close on close
+	// closemu prevents Rows from closing while there
+	// is an active streaming result. It is held for read during non-close operations
+	// and exclusively during close.
+	//
+	// closemu guards lasterr and closed.
+	closemu sync.RWMutex
+	closed  bool
+	lasterr error // non-nil only if closed is true
+
+	// lastcols is only used in Scan, Next, and NextResultSet which are expected
+	// not not be called concurrently.
+	lastcols []driver.Value
 }
 
 func (rs *Rows) initContextClose(ctx context.Context) {
-	if ctx.Done() == context.Background().Done() {
-		return
-	}
+	ctx, rs.cancel = context.WithCancel(ctx)
+	go rs.awaitDone(ctx)
+}
 
-	rs.ctxClose = make(chan struct{})
-	go func() {
-		select {
-		case <-ctx.Done():
-			rs.Close()
-		case <-rs.ctxClose:
-		}
-	}()
+// awaitDone blocks until the rows are closed or the context canceled.
+func (rs *Rows) awaitDone(ctx context.Context) {
+	<-ctx.Done()
+	rs.close(ctx.Err())
 }
 
 // Next prepares the next result row for reading with the Scan method. It
@@ -2066,8 +2129,19 @@
 //
 // Every call to Scan, even the first one, must be preceded by a call to Next.
 func (rs *Rows) Next() bool {
-	if rs.isClosed() {
-		return false
+	var doClose, ok bool
+	withLock(rs.closemu.RLocker(), func() {
+		doClose, ok = rs.nextLocked()
+	})
+	if doClose {
+		rs.Close()
+	}
+	return ok
+}
+
+func (rs *Rows) nextLocked() (doClose, ok bool) {
+	if rs.closed {
+		return false, false
 	}
 	if rs.lastcols == nil {
 		rs.lastcols = make([]driver.Value, len(rs.rowsi.Columns()))
@@ -2076,23 +2150,21 @@
 	if rs.lasterr != nil {
 		// Close the connection if there is a driver error.
 		if rs.lasterr != io.EOF {
-			rs.Close()
-			return false
+			return true, false
 		}
 		nextResultSet, ok := rs.rowsi.(driver.RowsNextResultSet)
 		if !ok {
-			rs.Close()
-			return false
+			return true, false
 		}
 		// The driver is at the end of the current result set.
 		// Test to see if there is another result set after the current one.
 		// Only close Rows if there is no further result sets to read.
 		if !nextResultSet.HasNextResultSet() {
-			rs.Close()
+			doClose = true
 		}
-		return false
+		return doClose, false
 	}
-	return true
+	return false, true
 }
 
 // NextResultSet prepares the next result set for reading. It returns true if
@@ -2104,18 +2176,28 @@
 // scanning. If there are further result sets they may not have rows in the result
 // set.
 func (rs *Rows) NextResultSet() bool {
-	if rs.isClosed() {
+	var doClose bool
+	defer func() {
+		if doClose {
+			rs.Close()
+		}
+	}()
+	rs.closemu.RLock()
+	defer rs.closemu.RUnlock()
+
+	if rs.closed {
 		return false
 	}
+
 	rs.lastcols = nil
 	nextResultSet, ok := rs.rowsi.(driver.RowsNextResultSet)
 	if !ok {
-		rs.Close()
+		doClose = true
 		return false
 	}
 	rs.lasterr = nextResultSet.NextResultSet()
 	if rs.lasterr != nil {
-		rs.Close()
+		doClose = true
 		return false
 	}
 	return true
@@ -2124,6 +2206,8 @@
 // Err returns the error, if any, that was encountered during iteration.
 // Err may be called after an explicit or implicit Close.
 func (rs *Rows) Err() error {
+	rs.closemu.RLock()
+	defer rs.closemu.RUnlock()
 	if rs.lasterr == io.EOF {
 		return nil
 	}
@@ -2134,7 +2218,9 @@
 // Columns returns an error if the rows are closed, or if the rows
 // are from QueryRow and there was a deferred error.
 func (rs *Rows) Columns() ([]string, error) {
-	if rs.isClosed() {
+	rs.closemu.RLock()
+	defer rs.closemu.RUnlock()
+	if rs.closed {
 		return nil, errors.New("sql: Rows are closed")
 	}
 	if rs.rowsi == nil {
@@ -2146,7 +2232,9 @@
 // ColumnTypes returns column information such as column type, length,
 // and nullable. Some information may not be available from some drivers.
 func (rs *Rows) ColumnTypes() ([]*ColumnType, error) {
-	if rs.isClosed() {
+	rs.closemu.RLock()
+	defer rs.closemu.RUnlock()
+	if rs.closed {
 		return nil, errors.New("sql: Rows are closed")
 	}
 	if rs.rowsi == nil {
@@ -2296,9 +2384,13 @@
 // For scanning into *bool, the source may be true, false, 1, 0, or
 // string inputs parseable by strconv.ParseBool.
 func (rs *Rows) Scan(dest ...interface{}) error {
-	if rs.isClosed() {
+	rs.closemu.RLock()
+	if rs.closed {
+		rs.closemu.RUnlock()
 		return errors.New("sql: Rows are closed")
 	}
+	rs.closemu.RUnlock()
+
 	if rs.lastcols == nil {
 		return errors.New("sql: Scan called without calling Next")
 	}
@@ -2314,27 +2406,39 @@
 	return nil
 }
 
-var rowsCloseHook func(*Rows, *error)
-
-func (rs *Rows) isClosed() bool {
-	return atomic.LoadInt32(&rs.closed) != 0
-}
+// rowsCloseHook returns a function so tests may install the
+// hook throug a test only mutex.
+var rowsCloseHook = func() func(*Rows, *error) { return nil }
 
 // Close closes the Rows, preventing further enumeration. If Next is called
 // and returns false and there are no further result sets,
 // the Rows are closed automatically and it will suffice to check the
 // result of Err. Close is idempotent and does not affect the result of Err.
 func (rs *Rows) Close() error {
-	if !atomic.CompareAndSwapInt32(&rs.closed, 0, 1) {
+	return rs.close(nil)
+}
+
+func (rs *Rows) close(err error) error {
+	rs.closemu.Lock()
+	defer rs.closemu.Unlock()
+
+	if rs.closed {
 		return nil
 	}
-	if rs.ctxClose != nil {
-		close(rs.ctxClose)
+	rs.closed = true
+
+	if rs.lasterr == nil {
+		rs.lasterr = err
 	}
-	err := rs.rowsi.Close()
-	if fn := rowsCloseHook; fn != nil {
+
+	err = rs.rowsi.Close()
+	if fn := rowsCloseHook(); fn != nil {
 		fn(rs, &err)
 	}
+	if rs.cancel != nil {
+		rs.cancel()
+	}
+
 	if rs.closeStmt != nil {
 		rs.closeStmt.Close()
 	}
diff --git a/src/database/sql/sql_test.go b/src/database/sql/sql_test.go
index 63e1292..450e5f1 100644
--- a/src/database/sql/sql_test.go
+++ b/src/database/sql/sql_test.go
@@ -14,6 +14,7 @@
 	"runtime"
 	"strings"
 	"sync"
+	"sync/atomic"
 	"testing"
 	"time"
 )
@@ -152,8 +153,13 @@
 	if err != nil {
 		t.Fatalf("error closing DB: %v", err)
 	}
-	if count := db.numOpenConns(); count != 0 {
-		t.Fatalf("%d connections still open after closing DB", count)
+
+	var numOpen int
+	if !waitCondition(5*time.Second, 5*time.Millisecond, func() bool {
+		numOpen = db.numOpenConns()
+		return numOpen == 0
+	}) {
+		t.Fatalf("%d connections still open after closing DB", numOpen)
 	}
 }
 
@@ -275,6 +281,7 @@
 	}
 }
 
+// TestQueryContext tests canceling the context while scanning the rows.
 func TestQueryContext(t *testing.T) {
 	db := newTestDB(t, "people")
 	defer closeDB(t, db)
@@ -296,7 +303,7 @@
 	for rows.Next() {
 		if index == 2 {
 			cancel()
-			time.Sleep(10 * time.Millisecond)
+			waitForRowsClose(t, rows, 5*time.Second)
 		}
 		var r row
 		err = rows.Scan(&r.age, &r.name)
@@ -312,9 +319,13 @@
 		got = append(got, r)
 		index++
 	}
-	err = rows.Err()
-	if err != nil {
-		t.Fatalf("Err: %v", err)
+	select {
+	case <-ctx.Done():
+		if err := ctx.Err(); err != context.Canceled {
+			t.Fatalf("context err = %v; want context.Canceled")
+		}
+	default:
+		t.Fatalf("context err = nil; want context.Canceled")
 	}
 	want := []row{
 		{age: 1, name: "Alice"},
@@ -326,9 +337,8 @@
 
 	// And verify that the final rows.Next() call, which hit EOF,
 	// also closed the rows connection.
-	if n := db.numFreeConns(); n != 1 {
-		t.Fatalf("free conns after query hitting EOF = %d; want 1", n)
-	}
+	waitForRowsClose(t, rows, 5*time.Second)
+	waitForFree(t, db, 5*time.Second, 1)
 	if prepares := numPrepares(t, db) - prepares0; prepares != 1 {
 		t.Errorf("executed %d Prepare statements; want 1", prepares)
 	}
@@ -345,12 +355,39 @@
 	return false
 }
 
+// waitForFree checks db.numFreeConns until either it equals want or
+// the maxWait time elapses.
+func waitForFree(t *testing.T, db *DB, maxWait time.Duration, want int) {
+	var numFree int
+	if !waitCondition(maxWait, 5*time.Millisecond, func() bool {
+		numFree = db.numFreeConns()
+		return numFree == want
+	}) {
+		t.Fatalf("free conns after hitting EOF = %d; want %d", numFree, want)
+	}
+}
+
+func waitForRowsClose(t *testing.T, rows *Rows, maxWait time.Duration) {
+	if !waitCondition(maxWait, 5*time.Millisecond, func() bool {
+		rows.closemu.RLock()
+		defer rows.closemu.RUnlock()
+		return rows.closed
+	}) {
+		t.Fatal("failed to close rows")
+	}
+}
+
+// TestQueryContextWait ensures that rows and all internal statements are closed when
+// a query context is closed during execution.
 func TestQueryContextWait(t *testing.T) {
 	db := newTestDB(t, "people")
 	defer closeDB(t, db)
 	prepares0 := numPrepares(t, db)
 
-	ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*15)
+	// TODO(kardianos): convert this from using a timeout to using an explicit
+	// cancel when the query signals that is is "executing" the query.
+	ctx, cancel := context.WithTimeout(context.Background(), 300*time.Millisecond)
+	defer cancel()
 
 	// This will trigger the *fakeConn.Prepare method which will take time
 	// performing the query. The ctxDriverPrepare func will check the context
@@ -361,14 +398,17 @@
 	}
 
 	// Verify closed rows connection after error condition.
-	if n := db.numFreeConns(); n != 1 {
-		t.Fatalf("free conns after query hitting EOF = %d; want 1", n)
-	}
+	waitForFree(t, db, 5*time.Second, 1)
 	if prepares := numPrepares(t, db) - prepares0; prepares != 1 {
-		t.Errorf("executed %d Prepare statements; want 1", prepares)
+		// TODO(kardianos): if the context timeouts before the db.QueryContext
+		// executes this check may fail. After adjusting how the context
+		// is canceled above revert this back to a Fatal error.
+		t.Logf("executed %d Prepare statements; want 1", prepares)
 	}
 }
 
+// TestTxContextWait tests the transaction behavior when the tx context is canceled
+// during execution of the query.
 func TestTxContextWait(t *testing.T) {
 	db := newTestDB(t, "people")
 	defer closeDB(t, db)
@@ -377,6 +417,10 @@
 
 	tx, err := db.BeginTx(ctx, nil)
 	if err != nil {
+		// Guard against the context being canceled before BeginTx completes.
+		if err == context.DeadlineExceeded {
+			t.Skip("tx context canceled prior to first use")
+		}
 		t.Fatal(err)
 	}
 
@@ -388,19 +432,7 @@
 		t.Fatalf("expected QueryContext to error with context deadline exceeded but returned %v", err)
 	}
 
-	var numFree int
-	if !waitCondition(5*time.Second, 5*time.Millisecond, func() bool {
-		numFree = db.numFreeConns()
-		return numFree == 0
-	}) {
-		t.Fatalf("free conns after hitting EOF = %d; want 0", numFree)
-	}
-
-	// Ensure the dropped connection allows more connections to be made.
-	// Checked on DB Close.
-	waitCondition(5*time.Second, 5*time.Millisecond, func() bool {
-		return db.numOpenConns() == 0
-	})
+	waitForFree(t, db, 5*time.Second, 0)
 }
 
 func TestMultiResultSetQuery(t *testing.T) {
@@ -471,9 +503,7 @@
 
 	// And verify that the final rows.Next() call, which hit EOF,
 	// also closed the rows connection.
-	if n := db.numFreeConns(); n != 1 {
-		t.Fatalf("free conns after query hitting EOF = %d; want 1", n)
-	}
+	waitForFree(t, db, 5*time.Second, 1)
 	if prepares := numPrepares(t, db) - prepares0; prepares != 1 {
 		t.Errorf("executed %d Prepare statements; want 1", prepares)
 	}
@@ -526,6 +556,63 @@
 	}
 }
 
+func TestPoolExhaustOnCancel(t *testing.T) {
+	if testing.Short() {
+		t.Skip("long test")
+	}
+	db := newTestDB(t, "people")
+	defer closeDB(t, db)
+
+	max := 3
+
+	db.SetMaxOpenConns(max)
+
+	// First saturate the connection pool.
+	// Then start new requests for a connection that is cancelled after it is requested.
+
+	var saturate, saturateDone sync.WaitGroup
+	saturate.Add(max)
+	saturateDone.Add(max)
+
+	for i := 0; i < max; i++ {
+		go func() {
+			saturate.Done()
+			rows, err := db.Query("WAIT|500ms|SELECT|people|name,photo|")
+			if err != nil {
+				t.Fatalf("Query: %v", err)
+			}
+			rows.Close()
+			saturateDone.Done()
+		}()
+	}
+
+	saturate.Wait()
+
+	// Now cancel the request while it is waiting.
+	ctx, cancel := context.WithTimeout(context.Background(), time.Second*2)
+	defer cancel()
+
+	for i := 0; i < max; i++ {
+		ctxReq, cancelReq := context.WithCancel(ctx)
+		go func() {
+			time.Sleep(time.Millisecond * 100)
+			cancelReq()
+		}()
+		err := db.PingContext(ctxReq)
+		if err != context.Canceled {
+			t.Fatalf("PingContext (Exhaust): %v", err)
+		}
+	}
+
+	saturateDone.Wait()
+
+	// Now try to open a normal connection.
+	err := db.PingContext(ctx)
+	if err != nil {
+		t.Fatalf("PingContext (Normal): %v", err)
+	}
+}
+
 func TestByteOwnership(t *testing.T) {
 	db := newTestDB(t, "people")
 	defer closeDB(t, db)
@@ -1135,6 +1222,24 @@
 	}
 }
 
+var atomicRowsCloseHook atomic.Value // of func(*Rows, *error)
+
+func init() {
+	rowsCloseHook = func() func(*Rows, *error) {
+		fn, _ := atomicRowsCloseHook.Load().(func(*Rows, *error))
+		return fn
+	}
+}
+
+func setRowsCloseHook(fn func(*Rows, *error)) {
+	if fn == nil {
+		// Can't change an atomic.Value back to nil, so set it to this
+		// no-op func instead.
+		fn = func(*Rows, *error) {}
+	}
+	atomicRowsCloseHook.Store(fn)
+}
+
 // Test issue 6651
 func TestIssue6651(t *testing.T) {
 	db := newTestDB(t, "people")
@@ -1147,6 +1252,7 @@
 		return fmt.Errorf(want)
 	}
 	defer func() { rowsCursorNextHook = nil }()
+
 	err := db.QueryRow("SELECT|people|name|").Scan(&v)
 	if err == nil || err.Error() != want {
 		t.Errorf("error = %q; want %q", err, want)
@@ -1154,10 +1260,10 @@
 	rowsCursorNextHook = nil
 
 	want = "error in rows.Close"
-	rowsCloseHook = func(rows *Rows, err *error) {
+	setRowsCloseHook(func(rows *Rows, err *error) {
 		*err = fmt.Errorf(want)
-	}
-	defer func() { rowsCloseHook = nil }()
+	})
+	defer setRowsCloseHook(nil)
 	err = db.QueryRow("SELECT|people|name|").Scan(&v)
 	if err == nil || err.Error() != want {
 		t.Errorf("error = %q; want %q", err, want)
@@ -1830,7 +1936,9 @@
 		db.dumpDeps(t)
 	}
 
-	if len(stmt.css) > nquery {
+	if !waitCondition(5*time.Second, 5*time.Millisecond, func() bool {
+		return len(stmt.css) <= nquery
+	}) {
 		t.Errorf("len(stmt.css) = %d; want <= %d", len(stmt.css), nquery)
 	}
 
@@ -2576,10 +2684,10 @@
 	if err != nil {
 		t.Fatal(err)
 	}
-	rowsCloseHook = func(rows *Rows, err *error) {
+	setRowsCloseHook(func(rows *Rows, err *error) {
 		*err = driver.ErrBadConn
-	}
-	defer func() { rowsCloseHook = nil }()
+	})
+	defer setRowsCloseHook(nil)
 	for i := 0; i < 10; i++ {
 		rows, err := stmt.Query()
 		if err != nil {
@@ -2642,7 +2750,10 @@
 			if err != nil {
 				return
 			}
-			rows, err := tx.QueryContext(ctx, "WAIT|"+qwait+"|SELECT|people|name|")
+			// This is expected to give a cancel error many, but not all the time.
+			// Test failure will happen with a panic or other race condition being
+			// reported.
+			rows, _ := tx.QueryContext(ctx, "WAIT|"+qwait+"|SELECT|people|name|")
 			if rows != nil {
 				rows.Close()
 			}
@@ -2652,7 +2763,50 @@
 		}()
 	}
 	wg.Wait()
-	time.Sleep(milliWait * 3 * time.Millisecond)
+}
+
+// TestIssue18719 closes the context right before use. The sql.driverConn
+// will nil out the ci on close in a lock, but if another process uses it right after
+// it will panic with on the nil ref.
+//
+// See https://golang.org/cl/35550 .
+func TestIssue18719(t *testing.T) {
+	db := newTestDB(t, "people")
+	defer closeDB(t, db)
+
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+
+	tx, err := db.BeginTx(ctx, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	hookTxGrabConn = func() {
+		cancel()
+
+		// Wait for the context to cancel and tx to rollback.
+		for tx.isDone() == false {
+			time.Sleep(time.Millisecond * 3)
+		}
+	}
+	defer func() { hookTxGrabConn = nil }()
+
+	// This call will grab the connection and cancel the context
+	// after it has done so. Code after must deal with the canceled state.
+	rows, err := tx.QueryContext(ctx, "SELECT|people|name|")
+	if err != nil {
+		rows.Close()
+		t.Fatalf("expected error %v but got %v", nil, err)
+	}
+
+	// Rows may be ignored because it will be closed when the context is canceled.
+
+	// Do not explicitly rollback. The rollback will happen from the
+	// canceled context.
+
+	cancel()
+	waitForRowsClose(t, rows, 5*time.Second)
 }
 
 func TestConcurrency(t *testing.T) {
diff --git a/src/encoding/xml/marshal.go b/src/encoding/xml/marshal.go
index 1176f5d..4c6ba8c 100644
--- a/src/encoding/xml/marshal.go
+++ b/src/encoding/xml/marshal.go
@@ -775,6 +775,20 @@
 
 var ddBytes = []byte("--")
 
+// indirect drills into interfaces and pointers, returning the pointed-at value.
+// If it encounters a nil interface or pointer, indirect returns that nil value.
+// This can turn into an infinite loop given a cyclic chain,
+// but it matches the Go 1 behavior.
+func indirect(vf reflect.Value) reflect.Value {
+	for vf.Kind() == reflect.Interface || vf.Kind() == reflect.Ptr {
+		if vf.IsNil() {
+			return vf
+		}
+		vf = vf.Elem()
+	}
+	return vf
+}
+
 func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error {
 	s := parentStack{p: p}
 	for i := range tinfo.fields {
@@ -816,17 +830,9 @@
 					continue
 				}
 			}
-			// Drill into interfaces and pointers.
-			// This can turn into an infinite loop given a cyclic chain,
-			// but it matches the Go 1 behavior.
-			for vf.Kind() == reflect.Interface || vf.Kind() == reflect.Ptr {
-				if vf.IsNil() {
-					return nil
-				}
-				vf = vf.Elem()
-			}
 
 			var scratch [64]byte
+			vf = indirect(vf)
 			switch vf.Kind() {
 			case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
 				if err := emit(p, strconv.AppendInt(scratch[:0], vf.Int(), 10)); err != nil {
@@ -861,6 +867,7 @@
 			if err := s.trim(finfo.parents); err != nil {
 				return err
 			}
+			vf = indirect(vf)
 			k := vf.Kind()
 			if !(k == reflect.String || k == reflect.Slice && vf.Type().Elem().Kind() == reflect.Uint8) {
 				return fmt.Errorf("xml: bad type for comment field of %s", val.Type())
@@ -901,6 +908,7 @@
 			continue
 
 		case fInnerXml:
+			vf = indirect(vf)
 			iface := vf.Interface()
 			switch raw := iface.(type) {
 			case []byte:
diff --git a/src/encoding/xml/marshal_test.go b/src/encoding/xml/marshal_test.go
index d79b99a..5ec7ece 100644
--- a/src/encoding/xml/marshal_test.go
+++ b/src/encoding/xml/marshal_test.go
@@ -386,6 +386,140 @@
 	return &x
 }
 
+func stringptr(x string) *string {
+	return &x
+}
+
+type T1 struct{}
+type T2 struct{}
+type T3 struct{}
+
+type IndirComment struct {
+	T1      T1
+	Comment *string `xml:",comment"`
+	T2      T2
+}
+
+type DirectComment struct {
+	T1      T1
+	Comment string `xml:",comment"`
+	T2      T2
+}
+
+type IfaceComment struct {
+	T1      T1
+	Comment interface{} `xml:",comment"`
+	T2      T2
+}
+
+type IndirChardata struct {
+	T1       T1
+	Chardata *string `xml:",chardata"`
+	T2       T2
+}
+
+type DirectChardata struct {
+	T1       T1
+	Chardata string `xml:",chardata"`
+	T2       T2
+}
+
+type IfaceChardata struct {
+	T1       T1
+	Chardata interface{} `xml:",chardata"`
+	T2       T2
+}
+
+type IndirCDATA struct {
+	T1    T1
+	CDATA *string `xml:",cdata"`
+	T2    T2
+}
+
+type DirectCDATA struct {
+	T1    T1
+	CDATA string `xml:",cdata"`
+	T2    T2
+}
+
+type IfaceCDATA struct {
+	T1    T1
+	CDATA interface{} `xml:",cdata"`
+	T2    T2
+}
+
+type IndirInnerXML struct {
+	T1       T1
+	InnerXML *string `xml:",innerxml"`
+	T2       T2
+}
+
+type DirectInnerXML struct {
+	T1       T1
+	InnerXML string `xml:",innerxml"`
+	T2       T2
+}
+
+type IfaceInnerXML struct {
+	T1       T1
+	InnerXML interface{} `xml:",innerxml"`
+	T2       T2
+}
+
+type IndirElement struct {
+	T1      T1
+	Element *string
+	T2      T2
+}
+
+type DirectElement struct {
+	T1      T1
+	Element string
+	T2      T2
+}
+
+type IfaceElement struct {
+	T1      T1
+	Element interface{}
+	T2      T2
+}
+
+type IndirOmitEmpty struct {
+	T1        T1
+	OmitEmpty *string `xml:",omitempty"`
+	T2        T2
+}
+
+type DirectOmitEmpty struct {
+	T1        T1
+	OmitEmpty string `xml:",omitempty"`
+	T2        T2
+}
+
+type IfaceOmitEmpty struct {
+	T1        T1
+	OmitEmpty interface{} `xml:",omitempty"`
+	T2        T2
+}
+
+type IndirAny struct {
+	T1  T1
+	Any *string `xml:",any"`
+	T2  T2
+}
+
+type DirectAny struct {
+	T1  T1
+	Any string `xml:",any"`
+	T2  T2
+}
+
+type IfaceAny struct {
+	T1  T1
+	Any interface{} `xml:",any"`
+	T2  T2
+}
+
 var (
 	nameAttr     = "Sarah"
 	ageAttr      = uint(12)
@@ -398,10 +532,12 @@
 // please try to make them two-way as well to ensure that
 // marshaling and unmarshaling are as symmetrical as feasible.
 var marshalTests = []struct {
-	Value         interface{}
-	ExpectXML     string
-	MarshalOnly   bool
-	UnmarshalOnly bool
+	Value          interface{}
+	ExpectXML      string
+	MarshalOnly    bool
+	MarshalError   string
+	UnmarshalOnly  bool
+	UnmarshalError string
 }{
 	// Test nil marshals to nothing
 	{Value: nil, ExpectXML: ``, MarshalOnly: true},
@@ -1133,6 +1269,382 @@
 		ExpectXML: `<NestedAndCData><A><B></B><B></B></A><![CDATA[test]]></NestedAndCData>`,
 		Value:     &NestedAndCData{AB: make([]string, 2), CDATA: "test"},
 	},
+	// Test pointer indirection in various kinds of fields.
+	// https://golang.org/issue/19063
+	{
+		ExpectXML:   `<IndirComment><T1></T1><!--hi--><T2></T2></IndirComment>`,
+		Value:       &IndirComment{Comment: stringptr("hi")},
+		MarshalOnly: true,
+	},
+	{
+		ExpectXML:   `<IndirComment><T1></T1><T2></T2></IndirComment>`,
+		Value:       &IndirComment{Comment: stringptr("")},
+		MarshalOnly: true,
+	},
+	{
+		ExpectXML:    `<IndirComment><T1></T1><T2></T2></IndirComment>`,
+		Value:        &IndirComment{Comment: nil},
+		MarshalError: "xml: bad type for comment field of xml.IndirComment",
+	},
+	{
+		ExpectXML:     `<IndirComment><T1></T1><!--hi--><T2></T2></IndirComment>`,
+		Value:         &IndirComment{Comment: nil},
+		UnmarshalOnly: true,
+	},
+	{
+		ExpectXML:   `<IfaceComment><T1></T1><!--hi--><T2></T2></IfaceComment>`,
+		Value:       &IfaceComment{Comment: "hi"},
+		MarshalOnly: true,
+	},
+	{
+		ExpectXML:     `<IfaceComment><T1></T1><!--hi--><T2></T2></IfaceComment>`,
+		Value:         &IfaceComment{Comment: nil},
+		UnmarshalOnly: true,
+	},
+	{
+		ExpectXML:    `<IfaceComment><T1></T1><T2></T2></IfaceComment>`,
+		Value:        &IfaceComment{Comment: nil},
+		MarshalError: "xml: bad type for comment field of xml.IfaceComment",
+	},
+	{
+		ExpectXML:     `<IfaceComment><T1></T1><T2></T2></IfaceComment>`,
+		Value:         &IfaceComment{Comment: nil},
+		UnmarshalOnly: true,
+	},
+	{
+		ExpectXML: `<DirectComment><T1></T1><!--hi--><T2></T2></DirectComment>`,
+		Value:     &DirectComment{Comment: string("hi")},
+	},
+	{
+		ExpectXML: `<DirectComment><T1></T1><T2></T2></DirectComment>`,
+		Value:     &DirectComment{Comment: string("")},
+	},
+	{
+		ExpectXML: `<IndirChardata><T1></T1>hi<T2></T2></IndirChardata>`,
+		Value:     &IndirChardata{Chardata: stringptr("hi")},
+	},
+	{
+		ExpectXML:     `<IndirChardata><T1></T1><![CDATA[hi]]><T2></T2></IndirChardata>`,
+		Value:         &IndirChardata{Chardata: stringptr("hi")},
+		UnmarshalOnly: true, // marshals without CDATA
+	},
+	{
+		ExpectXML: `<IndirChardata><T1></T1><T2></T2></IndirChardata>`,
+		Value:     &IndirChardata{Chardata: stringptr("")},
+	},
+	{
+		ExpectXML:   `<IndirChardata><T1></T1><T2></T2></IndirChardata>`,
+		Value:       &IndirChardata{Chardata: nil},
+		MarshalOnly: true, // unmarshal leaves Chardata=stringptr("")
+	},
+	{
+		ExpectXML:      `<IfaceChardata><T1></T1>hi<T2></T2></IfaceChardata>`,
+		Value:          &IfaceChardata{Chardata: string("hi")},
+		UnmarshalError: "cannot unmarshal into interface {}",
+	},
+	{
+		ExpectXML:      `<IfaceChardata><T1></T1><![CDATA[hi]]><T2></T2></IfaceChardata>`,
+		Value:          &IfaceChardata{Chardata: string("hi")},
+		UnmarshalOnly:  true, // marshals without CDATA
+		UnmarshalError: "cannot unmarshal into interface {}",
+	},
+	{
+		ExpectXML:      `<IfaceChardata><T1></T1><T2></T2></IfaceChardata>`,
+		Value:          &IfaceChardata{Chardata: string("")},
+		UnmarshalError: "cannot unmarshal into interface {}",
+	},
+	{
+		ExpectXML:      `<IfaceChardata><T1></T1><T2></T2></IfaceChardata>`,
+		Value:          &IfaceChardata{Chardata: nil},
+		UnmarshalError: "cannot unmarshal into interface {}",
+	},
+	{
+		ExpectXML: `<DirectChardata><T1></T1>hi<T2></T2></DirectChardata>`,
+		Value:     &DirectChardata{Chardata: string("hi")},
+	},
+	{
+		ExpectXML:     `<DirectChardata><T1></T1><![CDATA[hi]]><T2></T2></DirectChardata>`,
+		Value:         &DirectChardata{Chardata: string("hi")},
+		UnmarshalOnly: true, // marshals without CDATA
+	},
+	{
+		ExpectXML: `<DirectChardata><T1></T1><T2></T2></DirectChardata>`,
+		Value:     &DirectChardata{Chardata: string("")},
+	},
+	{
+		ExpectXML: `<IndirCDATA><T1></T1><![CDATA[hi]]><T2></T2></IndirCDATA>`,
+		Value:     &IndirCDATA{CDATA: stringptr("hi")},
+	},
+	{
+		ExpectXML:     `<IndirCDATA><T1></T1>hi<T2></T2></IndirCDATA>`,
+		Value:         &IndirCDATA{CDATA: stringptr("hi")},
+		UnmarshalOnly: true, // marshals with CDATA
+	},
+	{
+		ExpectXML: `<IndirCDATA><T1></T1><T2></T2></IndirCDATA>`,
+		Value:     &IndirCDATA{CDATA: stringptr("")},
+	},
+	{
+		ExpectXML:   `<IndirCDATA><T1></T1><T2></T2></IndirCDATA>`,
+		Value:       &IndirCDATA{CDATA: nil},
+		MarshalOnly: true, // unmarshal leaves CDATA=stringptr("")
+	},
+	{
+		ExpectXML:      `<IfaceCDATA><T1></T1><![CDATA[hi]]><T2></T2></IfaceCDATA>`,
+		Value:          &IfaceCDATA{CDATA: string("hi")},
+		UnmarshalError: "cannot unmarshal into interface {}",
+	},
+	{
+		ExpectXML:      `<IfaceCDATA><T1></T1>hi<T2></T2></IfaceCDATA>`,
+		Value:          &IfaceCDATA{CDATA: string("hi")},
+		UnmarshalOnly:  true, // marshals with CDATA
+		UnmarshalError: "cannot unmarshal into interface {}",
+	},
+	{
+		ExpectXML:      `<IfaceCDATA><T1></T1><T2></T2></IfaceCDATA>`,
+		Value:          &IfaceCDATA{CDATA: string("")},
+		UnmarshalError: "cannot unmarshal into interface {}",
+	},
+	{
+		ExpectXML:      `<IfaceCDATA><T1></T1><T2></T2></IfaceCDATA>`,
+		Value:          &IfaceCDATA{CDATA: nil},
+		UnmarshalError: "cannot unmarshal into interface {}",
+	},
+	{
+		ExpectXML: `<DirectCDATA><T1></T1><![CDATA[hi]]><T2></T2></DirectCDATA>`,
+		Value:     &DirectCDATA{CDATA: string("hi")},
+	},
+	{
+		ExpectXML:     `<DirectCDATA><T1></T1>hi<T2></T2></DirectCDATA>`,
+		Value:         &DirectCDATA{CDATA: string("hi")},
+		UnmarshalOnly: true, // marshals with CDATA
+	},
+	{
+		ExpectXML: `<DirectCDATA><T1></T1><T2></T2></DirectCDATA>`,
+		Value:     &DirectCDATA{CDATA: string("")},
+	},
+	{
+		ExpectXML:   `<IndirInnerXML><T1></T1><hi/><T2></T2></IndirInnerXML>`,
+		Value:       &IndirInnerXML{InnerXML: stringptr("<hi/>")},
+		MarshalOnly: true,
+	},
+	{
+		ExpectXML:   `<IndirInnerXML><T1></T1><T2></T2></IndirInnerXML>`,
+		Value:       &IndirInnerXML{InnerXML: stringptr("")},
+		MarshalOnly: true,
+	},
+	{
+		ExpectXML: `<IndirInnerXML><T1></T1><T2></T2></IndirInnerXML>`,
+		Value:     &IndirInnerXML{InnerXML: nil},
+	},
+	{
+		ExpectXML:     `<IndirInnerXML><T1></T1><hi/><T2></T2></IndirInnerXML>`,
+		Value:         &IndirInnerXML{InnerXML: nil},
+		UnmarshalOnly: true,
+	},
+	{
+		ExpectXML:   `<IfaceInnerXML><T1></T1><hi/><T2></T2></IfaceInnerXML>`,
+		Value:       &IfaceInnerXML{InnerXML: "<hi/>"},
+		MarshalOnly: true,
+	},
+	{
+		ExpectXML:     `<IfaceInnerXML><T1></T1><hi/><T2></T2></IfaceInnerXML>`,
+		Value:         &IfaceInnerXML{InnerXML: nil},
+		UnmarshalOnly: true,
+	},
+	{
+		ExpectXML: `<IfaceInnerXML><T1></T1><T2></T2></IfaceInnerXML>`,
+		Value:     &IfaceInnerXML{InnerXML: nil},
+	},
+	{
+		ExpectXML:     `<IfaceInnerXML><T1></T1><T2></T2></IfaceInnerXML>`,
+		Value:         &IfaceInnerXML{InnerXML: nil},
+		UnmarshalOnly: true,
+	},
+	{
+		ExpectXML:   `<DirectInnerXML><T1></T1><hi/><T2></T2></DirectInnerXML>`,
+		Value:       &DirectInnerXML{InnerXML: string("<hi/>")},
+		MarshalOnly: true,
+	},
+	{
+		ExpectXML:     `<DirectInnerXML><T1></T1><hi/><T2></T2></DirectInnerXML>`,
+		Value:         &DirectInnerXML{InnerXML: string("<T1></T1><hi/><T2></T2>")},
+		UnmarshalOnly: true,
+	},
+	{
+		ExpectXML:   `<DirectInnerXML><T1></T1><T2></T2></DirectInnerXML>`,
+		Value:       &DirectInnerXML{InnerXML: string("")},
+		MarshalOnly: true,
+	},
+	{
+		ExpectXML:     `<DirectInnerXML><T1></T1><T2></T2></DirectInnerXML>`,
+		Value:         &DirectInnerXML{InnerXML: string("<T1></T1><T2></T2>")},
+		UnmarshalOnly: true,
+	},
+	{
+		ExpectXML: `<IndirElement><T1></T1><Element>hi</Element><T2></T2></IndirElement>`,
+		Value:     &IndirElement{Element: stringptr("hi")},
+	},
+	{
+		ExpectXML: `<IndirElement><T1></T1><Element></Element><T2></T2></IndirElement>`,
+		Value:     &IndirElement{Element: stringptr("")},
+	},
+	{
+		ExpectXML: `<IndirElement><T1></T1><T2></T2></IndirElement>`,
+		Value:     &IndirElement{Element: nil},
+	},
+	{
+		ExpectXML:   `<IfaceElement><T1></T1><Element>hi</Element><T2></T2></IfaceElement>`,
+		Value:       &IfaceElement{Element: "hi"},
+		MarshalOnly: true,
+	},
+	{
+		ExpectXML:     `<IfaceElement><T1></T1><Element>hi</Element><T2></T2></IfaceElement>`,
+		Value:         &IfaceElement{Element: nil},
+		UnmarshalOnly: true,
+	},
+	{
+		ExpectXML: `<IfaceElement><T1></T1><T2></T2></IfaceElement>`,
+		Value:     &IfaceElement{Element: nil},
+	},
+	{
+		ExpectXML:     `<IfaceElement><T1></T1><T2></T2></IfaceElement>`,
+		Value:         &IfaceElement{Element: nil},
+		UnmarshalOnly: true,
+	},
+	{
+		ExpectXML: `<DirectElement><T1></T1><Element>hi</Element><T2></T2></DirectElement>`,
+		Value:     &DirectElement{Element: string("hi")},
+	},
+	{
+		ExpectXML: `<DirectElement><T1></T1><Element></Element><T2></T2></DirectElement>`,
+		Value:     &DirectElement{Element: string("")},
+	},
+	{
+		ExpectXML: `<IndirOmitEmpty><T1></T1><OmitEmpty>hi</OmitEmpty><T2></T2></IndirOmitEmpty>`,
+		Value:     &IndirOmitEmpty{OmitEmpty: stringptr("hi")},
+	},
+	{
+		// Note: Changed in Go 1.8 to include <OmitEmpty> element (because x.OmitEmpty != nil).
+		ExpectXML:   `<IndirOmitEmpty><T1></T1><OmitEmpty></OmitEmpty><T2></T2></IndirOmitEmpty>`,
+		Value:       &IndirOmitEmpty{OmitEmpty: stringptr("")},
+		MarshalOnly: true,
+	},
+	{
+		ExpectXML:     `<IndirOmitEmpty><T1></T1><OmitEmpty></OmitEmpty><T2></T2></IndirOmitEmpty>`,
+		Value:         &IndirOmitEmpty{OmitEmpty: stringptr("")},
+		UnmarshalOnly: true,
+	},
+	{
+		ExpectXML: `<IndirOmitEmpty><T1></T1><T2></T2></IndirOmitEmpty>`,
+		Value:     &IndirOmitEmpty{OmitEmpty: nil},
+	},
+	{
+		ExpectXML:   `<IfaceOmitEmpty><T1></T1><OmitEmpty>hi</OmitEmpty><T2></T2></IfaceOmitEmpty>`,
+		Value:       &IfaceOmitEmpty{OmitEmpty: "hi"},
+		MarshalOnly: true,
+	},
+	{
+		ExpectXML:     `<IfaceOmitEmpty><T1></T1><OmitEmpty>hi</OmitEmpty><T2></T2></IfaceOmitEmpty>`,
+		Value:         &IfaceOmitEmpty{OmitEmpty: nil},
+		UnmarshalOnly: true,
+	},
+	{
+		ExpectXML: `<IfaceOmitEmpty><T1></T1><T2></T2></IfaceOmitEmpty>`,
+		Value:     &IfaceOmitEmpty{OmitEmpty: nil},
+	},
+	{
+		ExpectXML:     `<IfaceOmitEmpty><T1></T1><T2></T2></IfaceOmitEmpty>`,
+		Value:         &IfaceOmitEmpty{OmitEmpty: nil},
+		UnmarshalOnly: true,
+	},
+	{
+		ExpectXML: `<DirectOmitEmpty><T1></T1><OmitEmpty>hi</OmitEmpty><T2></T2></DirectOmitEmpty>`,
+		Value:     &DirectOmitEmpty{OmitEmpty: string("hi")},
+	},
+	{
+		ExpectXML: `<DirectOmitEmpty><T1></T1><T2></T2></DirectOmitEmpty>`,
+		Value:     &DirectOmitEmpty{OmitEmpty: string("")},
+	},
+	{
+		ExpectXML: `<IndirAny><T1></T1><Any>hi</Any><T2></T2></IndirAny>`,
+		Value:     &IndirAny{Any: stringptr("hi")},
+	},
+	{
+		ExpectXML: `<IndirAny><T1></T1><Any></Any><T2></T2></IndirAny>`,
+		Value:     &IndirAny{Any: stringptr("")},
+	},
+	{
+		ExpectXML: `<IndirAny><T1></T1><T2></T2></IndirAny>`,
+		Value:     &IndirAny{Any: nil},
+	},
+	{
+		ExpectXML:   `<IfaceAny><T1></T1><Any>hi</Any><T2></T2></IfaceAny>`,
+		Value:       &IfaceAny{Any: "hi"},
+		MarshalOnly: true,
+	},
+	{
+		ExpectXML:     `<IfaceAny><T1></T1><Any>hi</Any><T2></T2></IfaceAny>`,
+		Value:         &IfaceAny{Any: nil},
+		UnmarshalOnly: true,
+	},
+	{
+		ExpectXML: `<IfaceAny><T1></T1><T2></T2></IfaceAny>`,
+		Value:     &IfaceAny{Any: nil},
+	},
+	{
+		ExpectXML:     `<IfaceAny><T1></T1><T2></T2></IfaceAny>`,
+		Value:         &IfaceAny{Any: nil},
+		UnmarshalOnly: true,
+	},
+	{
+		ExpectXML: `<DirectAny><T1></T1><Any>hi</Any><T2></T2></DirectAny>`,
+		Value:     &DirectAny{Any: string("hi")},
+	},
+	{
+		ExpectXML: `<DirectAny><T1></T1><Any></Any><T2></T2></DirectAny>`,
+		Value:     &DirectAny{Any: string("")},
+	},
+	{
+		ExpectXML:     `<IndirFoo><T1></T1><Foo>hi</Foo><T2></T2></IndirFoo>`,
+		Value:         &IndirAny{Any: stringptr("hi")},
+		UnmarshalOnly: true,
+	},
+	{
+		ExpectXML:     `<IndirFoo><T1></T1><Foo></Foo><T2></T2></IndirFoo>`,
+		Value:         &IndirAny{Any: stringptr("")},
+		UnmarshalOnly: true,
+	},
+	{
+		ExpectXML:     `<IndirFoo><T1></T1><T2></T2></IndirFoo>`,
+		Value:         &IndirAny{Any: nil},
+		UnmarshalOnly: true,
+	},
+	{
+		ExpectXML:     `<IfaceFoo><T1></T1><Foo>hi</Foo><T2></T2></IfaceFoo>`,
+		Value:         &IfaceAny{Any: nil},
+		UnmarshalOnly: true,
+	},
+	{
+		ExpectXML:     `<IfaceFoo><T1></T1><T2></T2></IfaceFoo>`,
+		Value:         &IfaceAny{Any: nil},
+		UnmarshalOnly: true,
+	},
+	{
+		ExpectXML:     `<IfaceFoo><T1></T1><T2></T2></IfaceFoo>`,
+		Value:         &IfaceAny{Any: nil},
+		UnmarshalOnly: true,
+	},
+	{
+		ExpectXML:     `<DirectFoo><T1></T1><Foo>hi</Foo><T2></T2></DirectFoo>`,
+		Value:         &DirectAny{Any: string("hi")},
+		UnmarshalOnly: true,
+	},
+	{
+		ExpectXML:     `<DirectFoo><T1></T1><Foo></Foo><T2></T2></DirectFoo>`,
+		Value:         &DirectAny{Any: string("")},
+		UnmarshalOnly: true,
+	},
 }
 
 func TestMarshal(t *testing.T) {
@@ -1142,7 +1654,17 @@
 		}
 		data, err := Marshal(test.Value)
 		if err != nil {
-			t.Errorf("#%d: marshal(%#v): %s", idx, test.Value, err)
+			if test.MarshalError == "" {
+				t.Errorf("#%d: marshal(%#v): %s", idx, test.Value, err)
+				continue
+			}
+			if !strings.Contains(err.Error(), test.MarshalError) {
+				t.Errorf("#%d: marshal(%#v): %s, want %q", idx, test.Value, err, test.MarshalError)
+			}
+			continue
+		}
+		if test.MarshalError != "" {
+			t.Errorf("#%d: Marshal succeeded, want error %q", idx, test.MarshalError)
 			continue
 		}
 		if got, want := string(data), test.ExpectXML; got != want {
@@ -1268,8 +1790,16 @@
 		}
 
 		if err != nil {
-			t.Errorf("#%d: unexpected error: %#v", i, err)
-		} else if got, want := dest, test.Value; !reflect.DeepEqual(got, want) {
+			if test.UnmarshalError == "" {
+				t.Errorf("#%d: unmarshal(%#v): %s", i, test.ExpectXML, err)
+				continue
+			}
+			if !strings.Contains(err.Error(), test.UnmarshalError) {
+				t.Errorf("#%d: unmarshal(%#v): %s, want %q", i, test.ExpectXML, err, test.UnmarshalError)
+			}
+			continue
+		}
+		if got, want := dest, test.Value; !reflect.DeepEqual(got, want) {
 			t.Errorf("#%d: unmarshal(%q):\nhave %#v\nwant %#v", i, test.ExpectXML, got, want)
 		}
 	}
diff --git a/src/go/build/build.go b/src/go/build/build.go
index da12d50..f11bc0c 100644
--- a/src/go/build/build.go
+++ b/src/go/build/build.go
@@ -266,7 +266,7 @@
 	}
 	if home := os.Getenv(env); home != "" {
 		def := filepath.Join(home, "go")
-		if def == runtime.GOROOT() {
+		if filepath.Clean(def) == filepath.Clean(runtime.GOROOT()) {
 			// Don't set the default GOPATH to GOROOT,
 			// as that will trigger warnings from the go tool.
 			return ""
diff --git a/src/go/printer/nodes.go b/src/go/printer/nodes.go
index 11f26d4..ea43286 100644
--- a/src/go/printer/nodes.go
+++ b/src/go/printer/nodes.go
@@ -733,7 +733,7 @@
 
 	case *ast.FuncLit:
 		p.expr(x.Type)
-		p.adjBlock(p.distanceFrom(x.Type.Pos()), blank, x.Body)
+		p.funcBody(p.distanceFrom(x.Type.Pos()), blank, x.Body)
 
 	case *ast.ParenExpr:
 		if _, hasParens := x.X.(*ast.ParenExpr); hasParens {
@@ -825,6 +825,7 @@
 		if x.Type != nil {
 			p.expr1(x.Type, token.HighestPrec, depth)
 		}
+		p.level++
 		p.print(x.Lbrace, token.LBRACE)
 		p.exprList(x.Lbrace, x.Elts, 1, commaTerm, x.Rbrace)
 		// do not insert extra line break following a /*-style comment
@@ -837,6 +838,7 @@
 			mode |= noExtraBlank
 		}
 		p.print(mode, x.Rbrace, token.RBRACE, mode)
+		p.level--
 
 	case *ast.Ellipsis:
 		p.print(token.ELLIPSIS)
@@ -1557,18 +1559,23 @@
 	return bodySize
 }
 
-// adjBlock prints an "adjacent" block (e.g., a for-loop or function body) following
-// a header (e.g., a for-loop control clause or function signature) of given headerSize.
+// funcBody prints a function body following a function header of given headerSize.
 // If the header's and block's size are "small enough" and the block is "simple enough",
 // the block is printed on the current line, without line breaks, spaced from the header
 // by sep. Otherwise the block's opening "{" is printed on the current line, followed by
 // lines for the block's statements and its closing "}".
 //
-func (p *printer) adjBlock(headerSize int, sep whiteSpace, b *ast.BlockStmt) {
+func (p *printer) funcBody(headerSize int, sep whiteSpace, b *ast.BlockStmt) {
 	if b == nil {
 		return
 	}
 
+	// save/restore composite literal nesting level
+	defer func(level int) {
+		p.level = level
+	}(p.level)
+	p.level = 0
+
 	const maxSize = 100
 	if headerSize+p.bodySize(b, maxSize) <= maxSize {
 		p.print(sep, b.Lbrace, token.LBRACE)
@@ -1613,7 +1620,7 @@
 	}
 	p.expr(d.Name)
 	p.signature(d.Type.Params, d.Type.Results)
-	p.adjBlock(p.distanceFrom(d.Pos()), vtab, d.Body)
+	p.funcBody(p.distanceFrom(d.Pos()), vtab, d.Body)
 }
 
 func (p *printer) decl(decl ast.Decl) {
diff --git a/src/go/printer/printer.go b/src/go/printer/printer.go
index eabf23e..be61dad 100644
--- a/src/go/printer/printer.go
+++ b/src/go/printer/printer.go
@@ -58,6 +58,7 @@
 	// Current state
 	output      []byte       // raw printer result
 	indent      int          // current indentation
+	level       int          // level == 0: outside composite literal; level > 0: inside composite literal
 	mode        pmode        // current printer mode
 	impliedSemi bool         // if set, a linebreak implies a semicolon
 	lastTok     token.Token  // last token printed (token.ILLEGAL if it's whitespace)
@@ -744,15 +745,19 @@
 		// follows on the same line but is not a comma, and not a "closing"
 		// token immediately following its corresponding "opening" token,
 		// add an extra separator unless explicitly disabled. Use a blank
-		// as separator unless we have pending linebreaks and they are not
-		// disabled, in which case we want a linebreak (issue 15137).
+		// as separator unless we have pending linebreaks, they are not
+		// disabled, and we are outside a composite literal, in which case
+		// we want a linebreak (issue 15137).
+		// TODO(gri) This has become overly complicated. We should be able
+		// to track whether we're inside an expression or statement and
+		// use that information to decide more directly.
 		needsLinebreak := false
 		if p.mode&noExtraBlank == 0 &&
 			last.Text[1] == '*' && p.lineFor(last.Pos()) == next.Line &&
 			tok != token.COMMA &&
 			(tok != token.RPAREN || p.prevOpen == token.LPAREN) &&
 			(tok != token.RBRACK || p.prevOpen == token.LBRACK) {
-			if p.containsLinebreak() && p.mode&noExtraLinebreak == 0 {
+			if p.containsLinebreak() && p.mode&noExtraLinebreak == 0 && p.level == 0 {
 				needsLinebreak = true
 			} else {
 				p.writeByte(' ', 1)
diff --git a/src/go/printer/testdata/comments2.golden b/src/go/printer/testdata/comments2.golden
index 7676a26..8b3a94d 100644
--- a/src/go/printer/testdata/comments2.golden
+++ b/src/go/printer/testdata/comments2.golden
@@ -103,3 +103,62 @@
 	mask := uint64(1)<<c - 1		// Allocation mask
 	used := atomic.LoadUint64(&h.used)	// Current allocations
 }
+
+// Test cases for issue 18782
+var _ = [][]int{
+	/*       a, b, c, d, e */
+	/* a */ {0, 0, 0, 0, 0},
+	/* b */ {0, 5, 4, 4, 4},
+	/* c */ {0, 4, 5, 4, 4},
+	/* d */ {0, 4, 4, 5, 4},
+	/* e */ {0, 4, 4, 4, 5},
+}
+
+var _ = T{ /* a */ 0}
+
+var _ = T{ /* a */ /* b */ 0}
+
+var _ = T{	/* a */	/* b */
+	/* c */ 0,
+}
+
+var _ = T{	/* a */	/* b */
+	/* c */
+	/* d */ 0,
+}
+
+var _ = T{
+	/* a */
+	/* b */ 0,
+}
+
+var _ = T{ /* a */ {}}
+
+var _ = T{ /* a */ /* b */ {}}
+
+var _ = T{	/* a */	/* b */
+	/* c */ {},
+}
+
+var _ = T{	/* a */	/* b */
+	/* c */
+	/* d */ {},
+}
+
+var _ = T{
+	/* a */
+	/* b */ {},
+}
+
+var _ = []T{
+	func() {
+		var _ = [][]int{
+			/*       a, b, c, d, e */
+			/* a */ {0, 0, 0, 0, 0},
+			/* b */ {0, 5, 4, 4, 4},
+			/* c */ {0, 4, 5, 4, 4},
+			/* d */ {0, 4, 4, 5, 4},
+			/* e */ {0, 4, 4, 4, 5},
+		}
+	},
+}
diff --git a/src/go/printer/testdata/comments2.input b/src/go/printer/testdata/comments2.input
index 4a055c8..8d38c41 100644
--- a/src/go/printer/testdata/comments2.input
+++ b/src/go/printer/testdata/comments2.input
@@ -103,3 +103,66 @@
    mask := uint64(1)<<c - 1 // Allocation mask
    used := atomic.LoadUint64(&h.used) // Current allocations
 }
+
+// Test cases for issue 18782
+var _ = [][]int{
+   /*       a, b, c, d, e */
+   /* a */ {0, 0, 0, 0, 0},
+   /* b */ {0, 5, 4, 4, 4},
+   /* c */ {0, 4, 5, 4, 4},
+   /* d */ {0, 4, 4, 5, 4},
+   /* e */ {0, 4, 4, 4, 5},
+}
+
+var _ = T{ /* a */ 0,
+}
+
+var _ = T{ /* a */ /* b */ 0,
+}
+
+var _ = T{ /* a */ /* b */
+   /* c */ 0,
+}
+
+var _ = T{ /* a */ /* b */
+   /* c */
+   /* d */ 0,
+}
+
+var _ = T{
+   /* a */
+   /* b */ 0,
+}
+
+var _ = T{ /* a */ {},
+}
+
+var _ = T{ /* a */ /* b */ {},
+}
+
+var _ = T{ /* a */ /* b */
+   /* c */ {},
+}
+
+var _ = T{ /* a */ /* b */
+   /* c */
+   /* d */ {},
+}
+
+var _ = T{
+   /* a */
+   /* b */ {},
+}
+
+var _ = []T{
+   func() {
+      var _ = [][]int{
+         /*       a, b, c, d, e */
+         /* a */ {0, 0, 0, 0, 0},
+         /* b */ {0, 5, 4, 4, 4},
+         /* c */ {0, 4, 5, 4, 4},
+         /* d */ {0, 4, 4, 5, 4},
+         /* e */ {0, 4, 4, 4, 5},
+      }
+   },
+}
diff --git a/src/net/http/client.go b/src/net/http/client.go
index d368bae..0005538 100644
--- a/src/net/http/client.go
+++ b/src/net/http/client.go
@@ -413,11 +413,12 @@
 
 // redirectBehavior describes what should happen when the
 // client encounters a 3xx status code from the server
-func redirectBehavior(reqMethod string, resp *Response, ireq *Request) (redirectMethod string, shouldRedirect bool) {
+func redirectBehavior(reqMethod string, resp *Response, ireq *Request) (redirectMethod string, shouldRedirect, includeBody bool) {
 	switch resp.StatusCode {
 	case 301, 302, 303:
 		redirectMethod = reqMethod
 		shouldRedirect = true
+		includeBody = false
 
 		// RFC 2616 allowed automatic redirection only with GET and
 		// HEAD requests. RFC 7231 lifts this restriction, but we still
@@ -429,6 +430,7 @@
 	case 307, 308:
 		redirectMethod = reqMethod
 		shouldRedirect = true
+		includeBody = true
 
 		// Treat 307 and 308 specially, since they're new in
 		// Go 1.8, and they also require re-sending the request body.
@@ -449,7 +451,7 @@
 			shouldRedirect = false
 		}
 	}
-	return redirectMethod, shouldRedirect
+	return redirectMethod, shouldRedirect, includeBody
 }
 
 // Do sends an HTTP request and returns an HTTP response, following
@@ -492,11 +494,14 @@
 	}
 
 	var (
-		deadline       = c.deadline()
-		reqs           []*Request
-		resp           *Response
-		copyHeaders    = c.makeHeadersCopier(req)
+		deadline    = c.deadline()
+		reqs        []*Request
+		resp        *Response
+		copyHeaders = c.makeHeadersCopier(req)
+
+		// Redirect behavior:
 		redirectMethod string
+		includeBody    bool
 	)
 	uerr := func(err error) error {
 		req.closeBody()
@@ -534,7 +539,7 @@
 				Cancel:   ireq.Cancel,
 				ctx:      ireq.ctx,
 			}
-			if ireq.GetBody != nil {
+			if includeBody && ireq.GetBody != nil {
 				req.Body, err = ireq.GetBody()
 				if err != nil {
 					return nil, uerr(err)
@@ -598,7 +603,7 @@
 		}
 
 		var shouldRedirect bool
-		redirectMethod, shouldRedirect = redirectBehavior(req.Method, resp, reqs[0])
+		redirectMethod, shouldRedirect, includeBody = redirectBehavior(req.Method, resp, reqs[0])
 		if !shouldRedirect {
 			return resp, nil
 		}
diff --git a/src/net/http/client_test.go b/src/net/http/client_test.go
index eaf2cdc..4f674dd 100644
--- a/src/net/http/client_test.go
+++ b/src/net/http/client_test.go
@@ -360,25 +360,25 @@
 	wantSegments := []string{
 		`POST / "first"`,
 		`POST /?code=301&next=302 "c301"`,
-		`GET /?code=302 "c301"`,
-		`GET / "c301"`,
+		`GET /?code=302 ""`,
+		`GET / ""`,
 		`POST /?code=302&next=302 "c302"`,
-		`GET /?code=302 "c302"`,
-		`GET / "c302"`,
+		`GET /?code=302 ""`,
+		`GET / ""`,
 		`POST /?code=303&next=301 "c303wc301"`,
-		`GET /?code=301 "c303wc301"`,
-		`GET / "c303wc301"`,
+		`GET /?code=301 ""`,
+		`GET / ""`,
 		`POST /?code=304 "c304"`,
 		`POST /?code=305 "c305"`,
 		`POST /?code=307&next=303,308,302 "c307"`,
 		`POST /?code=303&next=308,302 "c307"`,
-		`GET /?code=308&next=302 "c307"`,
+		`GET /?code=308&next=302 ""`,
 		`GET /?code=302 "c307"`,
-		`GET / "c307"`,
+		`GET / ""`,
 		`POST /?code=308&next=302,301 "c308"`,
 		`POST /?code=302&next=301 "c308"`,
-		`GET /?code=301 "c308"`,
-		`GET / "c308"`,
+		`GET /?code=301 ""`,
+		`GET / ""`,
 		`POST /?code=404 "c404"`,
 	}
 	want := strings.Join(wantSegments, "\n")
@@ -399,20 +399,20 @@
 	wantSegments := []string{
 		`DELETE / "first"`,
 		`DELETE /?code=301&next=302,308 "c301"`,
-		`GET /?code=302&next=308 "c301"`,
-		`GET /?code=308 "c301"`,
+		`GET /?code=302&next=308 ""`,
+		`GET /?code=308 ""`,
 		`GET / "c301"`,
 		`DELETE /?code=302&next=302 "c302"`,
-		`GET /?code=302 "c302"`,
-		`GET / "c302"`,
+		`GET /?code=302 ""`,
+		`GET / ""`,
 		`DELETE /?code=303 "c303"`,
-		`GET / "c303"`,
+		`GET / ""`,
 		`DELETE /?code=307&next=301,308,303,302,304 "c307"`,
 		`DELETE /?code=301&next=308,303,302,304 "c307"`,
-		`GET /?code=308&next=303,302,304 "c307"`,
+		`GET /?code=308&next=303,302,304 ""`,
 		`GET /?code=303&next=302,304 "c307"`,
-		`GET /?code=302&next=304 "c307"`,
-		`GET /?code=304 "c307"`,
+		`GET /?code=302&next=304 ""`,
+		`GET /?code=304 ""`,
 		`DELETE /?code=308&next=307 "c308"`,
 		`DELETE /?code=307 "c308"`,
 		`DELETE / "c308"`,
@@ -432,7 +432,11 @@
 	ts = httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
 		log.Lock()
 		slurp, _ := ioutil.ReadAll(r.Body)
-		fmt.Fprintf(&log.Buffer, "%s %s %q\n", r.Method, r.RequestURI, slurp)
+		fmt.Fprintf(&log.Buffer, "%s %s %q", r.Method, r.RequestURI, slurp)
+		if cl := r.Header.Get("Content-Length"); r.Method == "GET" && len(slurp) == 0 && (r.ContentLength != 0 || cl != "") {
+			fmt.Fprintf(&log.Buffer, " (but with body=%T, content-length = %v, %q)", r.Body, r.ContentLength, cl)
+		}
+		log.WriteByte('\n')
 		log.Unlock()
 		urlQuery := r.URL.Query()
 		if v := urlQuery.Get("code"); v != "" {
@@ -475,7 +479,24 @@
 	want = strings.TrimSpace(want)
 
 	if got != want {
-		t.Errorf("Log differs.\n Got:\n%s\nWant:\n%s\n", got, want)
+		got, want, lines := removeCommonLines(got, want)
+		t.Errorf("Log differs after %d common lines.\n\nGot:\n%s\n\nWant:\n%s\n", lines, got, want)
+	}
+}
+
+func removeCommonLines(a, b string) (asuffix, bsuffix string, commonLines int) {
+	for {
+		nl := strings.IndexByte(a, '\n')
+		if nl < 0 {
+			return a, b, commonLines
+		}
+		line := a[:nl+1]
+		if !strings.HasPrefix(b, line) {
+			return a, b, commonLines
+		}
+		commonLines++
+		a = a[len(line):]
+		b = b[len(line):]
 	}
 }
 
diff --git a/src/net/http/serve_test.go b/src/net/http/serve_test.go
index 681dff1..73dd56e 100644
--- a/src/net/http/serve_test.go
+++ b/src/net/http/serve_test.go
@@ -5277,7 +5277,7 @@
 		defer conn.Close()
 		slurp, err := ioutil.ReadAll(buf.Reader)
 		if err != nil {
-			t.Error("Copy: %v", err)
+			t.Errorf("Copy: %v", err)
 		}
 		allX := true
 		for _, v := range slurp {
diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go
index 022350b..3eca293 100644
--- a/src/reflect/all_test.go
+++ b/src/reflect/all_test.go
@@ -2478,17 +2478,24 @@
 }
 
 func TestPtrTo(t *testing.T) {
+	// This block of code means that the ptrToThis field of the
+	// reflect data for *unsafe.Pointer is non zero, see
+	// https://golang.org/issue/19003
+	var x unsafe.Pointer
+	var y = &x
+	var z = &y
+
 	var i int
 
-	typ := TypeOf(i)
+	typ := TypeOf(z)
 	for i = 0; i < 100; i++ {
 		typ = PtrTo(typ)
 	}
 	for i = 0; i < 100; i++ {
 		typ = typ.Elem()
 	}
-	if typ != TypeOf(i) {
-		t.Errorf("after 100 PtrTo and Elem, have %s, want %s", typ, TypeOf(i))
+	if typ != TypeOf(z) {
+		t.Errorf("after 100 PtrTo and Elem, have %s, want %s", typ, TypeOf(z))
 	}
 }
 
diff --git a/src/reflect/type.go b/src/reflect/type.go
index 9d6e7a6..5d3c5c6 100644
--- a/src/reflect/type.go
+++ b/src/reflect/type.go
@@ -1469,6 +1469,7 @@
 	pp := *prototype
 
 	pp.str = resolveReflectName(newName(s, "", "", false))
+	pp.ptrToThis = 0
 
 	// For the type structures linked into the binary, the
 	// compiler provides a good hash of the string.
diff --git a/src/runtime/internal/sys/zversion.go b/src/runtime/internal/sys/zversion.go
index 87e4d26..449729c 100644
--- a/src/runtime/internal/sys/zversion.go
+++ b/src/runtime/internal/sys/zversion.go
@@ -3,7 +3,7 @@
 package sys
 
 const DefaultGoroot = `./prebuilts/go/linux-x86`
-const TheVersion = `go1.8rc2`
+const TheVersion = `go1.8`
 const Goexperiment = ``
 const StackGuardMultiplier = 1
 
diff --git a/src/runtime/memmove_amd64.s b/src/runtime/memmove_amd64.s
index 464f5fd..c2286d3 100644
--- a/src/runtime/memmove_amd64.s
+++ b/src/runtime/memmove_amd64.s
@@ -146,10 +146,16 @@
 move_0:
 	RET
 move_3or4:
+	CMPQ	BX, $4
+	JB	move_3
+	MOVL	(SI), AX
+	MOVL	AX, (DI)
+	RET
+move_3:
 	MOVW	(SI), AX
-	MOVW	-2(SI)(BX*1), CX
+	MOVB	2(SI), CX
 	MOVW	AX, (DI)
-	MOVW	CX, -2(DI)(BX*1)
+	MOVB	CX, 2(DI)
 	RET
 move_5through7:
 	MOVL	(SI), AX
diff --git a/src/runtime/memmove_test.go b/src/runtime/memmove_test.go
index dbfa284..74b8753 100644
--- a/src/runtime/memmove_test.go
+++ b/src/runtime/memmove_test.go
@@ -6,6 +6,7 @@
 
 import (
 	"crypto/rand"
+	"encoding/binary"
 	"fmt"
 	"internal/race"
 	. "runtime"
@@ -447,3 +448,22 @@
 		_ = y
 	}
 }
+
+func BenchmarkIssue18740(b *testing.B) {
+	// This tests that memmove uses one 4-byte load/store to move 4 bytes.
+	// It used to do 2 2-byte load/stores, which leads to a pipeline stall
+	// when we try to read the result with one 4-byte load.
+	var buf [4]byte
+	for j := 0; j < b.N; j++ {
+		s := uint32(0)
+		for i := 0; i < 4096; i += 4 {
+			copy(buf[:], g[i:])
+			s += binary.LittleEndian.Uint32(buf[:])
+		}
+		sink = uint64(s)
+	}
+}
+
+// TODO: 2 byte and 8 byte benchmarks also.
+
+var g [4096]byte
diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go
index 0b996d8..cd57720 100644
--- a/src/runtime/mgc.go
+++ b/src/runtime/mgc.go
@@ -628,10 +628,12 @@
 //go:nowritebarrier
 func (c *gcControllerState) enlistWorker() {
 	// If there are idle Ps, wake one so it will run an idle worker.
-	if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
-		wakep()
-		return
-	}
+	// NOTE: This is suspected of causing deadlocks. See golang.org/issue/19112.
+	//
+	//	if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
+	//		wakep()
+	//		return
+	//	}
 
 	// There are no idle Ps. If we need more dedicated workers,
 	// try to preempt a running P so it will switch to a worker.
diff --git a/src/runtime/msan.go b/src/runtime/msan.go
index 7177c8e..c0f3957 100644
--- a/src/runtime/msan.go
+++ b/src/runtime/msan.go
@@ -28,9 +28,11 @@
 // the runtime, but operations like a slice copy can call msanread
 // anyhow for values on the stack. Just ignore msanread when running
 // on the system stack. The other msan functions are fine.
+//
+//go:nosplit
 func msanread(addr unsafe.Pointer, sz uintptr) {
 	g := getg()
-	if g == g.m.g0 || g == g.m.gsignal {
+	if g == nil || g.m == nil || g == g.m.g0 || g == g.m.gsignal {
 		return
 	}
 	domsanread(addr, sz)
diff --git a/src/runtime/runtime-gdb_test.go b/src/runtime/runtime-gdb_test.go
index c284437..f886961 100644
--- a/src/runtime/runtime-gdb_test.go
+++ b/src/runtime/runtime-gdb_test.go
@@ -7,6 +7,7 @@
 import (
 	"bytes"
 	"fmt"
+	"go/build"
 	"internal/testenv"
 	"io/ioutil"
 	"os"
@@ -67,7 +68,6 @@
 }
 
 const helloSource = `
-package main
 import "fmt"
 var gslice []string
 func main() {
@@ -85,9 +85,20 @@
 `
 
 func TestGdbPython(t *testing.T) {
+	testGdbPython(t, false)
+}
+
+func TestGdbPythonCgo(t *testing.T) {
+	testGdbPython(t, true)
+}
+
+func testGdbPython(t *testing.T, cgo bool) {
 	if runtime.GOARCH == "mips64" {
 		testenv.SkipFlaky(t, 18173)
 	}
+	if cgo && !build.Default.CgoEnabled {
+		t.Skip("skipping because cgo is not enabled")
+	}
 
 	t.Parallel()
 	checkGdbEnvironment(t)
@@ -100,8 +111,15 @@
 	}
 	defer os.RemoveAll(dir)
 
+	var buf bytes.Buffer
+	buf.WriteString("package main\n")
+	if cgo {
+		buf.WriteString(`import "C"` + "\n")
+	}
+	buf.WriteString(helloSource)
+
 	src := filepath.Join(dir, "main.go")
-	err = ioutil.WriteFile(src, []byte(helloSource), 0644)
+	err = ioutil.WriteFile(src, buf.Bytes(), 0644)
 	if err != nil {
 		t.Fatalf("failed to create file: %v", err)
 	}
diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go
index f521906..ed82783 100644
--- a/src/runtime/symtab.go
+++ b/src/runtime/symtab.go
@@ -285,6 +285,25 @@
 			md.gcbssmask = progToPointerMask((*byte)(unsafe.Pointer(md.gcbss)), md.ebss-md.bss)
 		}
 	}
+
+	// Modules appear in the moduledata linked list in the order they are
+	// loaded by the dynamic loader, with one exception: the
+	// firstmoduledata itself the module that contains the runtime. This
+	// is not always the first module (when using -buildmode=shared, it
+	// is typically libstd.so, the second module). The order matters for
+	// typelinksinit, so we swap the first module with whatever module
+	// contains the main function.
+	//
+	// See Issue #18729.
+	mainText := funcPC(main_main)
+	for i, md := range *modules {
+		if md.text <= mainText && mainText <= md.etext {
+			(*modules)[0] = md
+			(*modules)[i] = &firstmoduledata
+			break
+		}
+	}
+
 	atomicstorep(unsafe.Pointer(&modulesSlice), unsafe.Pointer(modules))
 }
 
diff --git a/src/runtime/testdata/testprogcgo/threadpprof.go b/src/runtime/testdata/testprogcgo/threadpprof.go
index 44afb91..3da8296 100644
--- a/src/runtime/testdata/testprogcgo/threadpprof.go
+++ b/src/runtime/testdata/testprogcgo/threadpprof.go
@@ -61,7 +61,7 @@
 	return 0;
 }
 
-void runCPUHogThread() {
+void runCPUHogThread(void) {
 	pthread_t tid;
 	pthread_create(&tid, 0, cpuHogDriver, 0);
 }
diff --git a/src/runtime/testdata/testprogcgo/traceback.go b/src/runtime/testdata/testprogcgo/traceback.go
index e8b0a04..2a023f6 100644
--- a/src/runtime/testdata/testprogcgo/traceback.go
+++ b/src/runtime/testdata/testprogcgo/traceback.go
@@ -15,16 +15,16 @@
 
 char *p;
 
-static int f3() {
+static int f3(void) {
 	*p = 0;
 	return 0;
 }
 
-static int f2() {
+static int f2(void) {
 	return f3();
 }
 
-static int f1() {
+static int f1(void) {
 	return f2();
 }
 
diff --git a/src/testing/testing.go b/src/testing/testing.go
index ddbdc25..bd19a31 100644
--- a/src/testing/testing.go
+++ b/src/testing/testing.go
@@ -821,6 +821,7 @@
 	haveExamples = len(m.examples) > 0
 	testRan, testOk := runTests(m.deps.MatchString, m.tests)
 	exampleRan, exampleOk := runExamples(m.deps.MatchString, m.examples)
+	stopAlarm()
 	if !testRan && !exampleRan && *matchBenchmarks == "" {
 		fmt.Fprintln(os.Stderr, "testing: warning: no tests to run")
 	}
diff --git a/src/vendor/golang_org/x/crypto/curve25519/const_amd64.h b/src/vendor/golang_org/x/crypto/curve25519/const_amd64.h
new file mode 100644
index 0000000..80ad222
--- /dev/null
+++ b/src/vendor/golang_org/x/crypto/curve25519/const_amd64.h
@@ -0,0 +1,8 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
+
+#define REDMASK51     0x0007FFFFFFFFFFFF
diff --git a/src/vendor/golang_org/x/crypto/curve25519/const_amd64.s b/src/vendor/golang_org/x/crypto/curve25519/const_amd64.s
index 797f9b0..0ad5398 100644
--- a/src/vendor/golang_org/x/crypto/curve25519/const_amd64.s
+++ b/src/vendor/golang_org/x/crypto/curve25519/const_amd64.s
@@ -7,8 +7,8 @@
 
 // +build amd64,!gccgo,!appengine
 
-DATA ·REDMASK51(SB)/8, $0x0007FFFFFFFFFFFF
-GLOBL ·REDMASK51(SB), 8, $8
+// These constants cannot be encoded in non-MOVQ immediates.
+// We access them directly from memory instead.
 
 DATA ·_121666_213(SB)/8, $996687872
 GLOBL ·_121666_213(SB), 8, $8
diff --git a/src/vendor/golang_org/x/crypto/curve25519/freeze_amd64.s b/src/vendor/golang_org/x/crypto/curve25519/freeze_amd64.s
index 932800b..536479b 100644
--- a/src/vendor/golang_org/x/crypto/curve25519/freeze_amd64.s
+++ b/src/vendor/golang_org/x/crypto/curve25519/freeze_amd64.s
@@ -7,6 +7,8 @@
 
 // +build amd64,!gccgo,!appengine
 
+#include "const_amd64.h"
+
 // func freeze(inout *[5]uint64)
 TEXT ·freeze(SB),7,$0-8
 	MOVQ inout+0(FP), DI
@@ -16,7 +18,7 @@
 	MOVQ 16(DI),CX
 	MOVQ 24(DI),R8
 	MOVQ 32(DI),R9
-	MOVQ ·REDMASK51(SB),AX
+	MOVQ $REDMASK51,AX
 	MOVQ AX,R10
 	SUBQ $18,R10
 	MOVQ $3,R11
diff --git a/src/vendor/golang_org/x/crypto/curve25519/ladderstep_amd64.s b/src/vendor/golang_org/x/crypto/curve25519/ladderstep_amd64.s
index ee7b36c..7074e5c 100644
--- a/src/vendor/golang_org/x/crypto/curve25519/ladderstep_amd64.s
+++ b/src/vendor/golang_org/x/crypto/curve25519/ladderstep_amd64.s
@@ -7,6 +7,8 @@
 
 // +build amd64,!gccgo,!appengine
 
+#include "const_amd64.h"
+
 // func ladderstep(inout *[5][5]uint64)
 TEXT ·ladderstep(SB),0,$296-8
 	MOVQ inout+0(FP),DI
@@ -118,7 +120,7 @@
 	MULQ 72(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ ·REDMASK51(SB),DX
+	MOVQ $REDMASK51,DX
 	SHLQ $13,CX:SI
 	ANDQ DX,SI
 	SHLQ $13,R9:R8
@@ -233,7 +235,7 @@
 	MULQ 32(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ ·REDMASK51(SB),DX
+	MOVQ $REDMASK51,DX
 	SHLQ $13,CX:SI
 	ANDQ DX,SI
 	SHLQ $13,R9:R8
@@ -438,7 +440,7 @@
 	MULQ 72(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ ·REDMASK51(SB),DX
+	MOVQ $REDMASK51,DX
 	SHLQ $13,CX:SI
 	ANDQ DX,SI
 	SHLQ $13,R9:R8
@@ -588,7 +590,7 @@
 	MULQ 32(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ ·REDMASK51(SB),DX
+	MOVQ $REDMASK51,DX
 	SHLQ $13,CX:SI
 	ANDQ DX,SI
 	SHLQ $13,R9:R8
@@ -728,7 +730,7 @@
 	MULQ 152(DI)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ ·REDMASK51(SB),DX
+	MOVQ $REDMASK51,DX
 	SHLQ $13,CX:SI
 	ANDQ DX,SI
 	SHLQ $13,R9:R8
@@ -843,7 +845,7 @@
 	MULQ 192(DI)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ ·REDMASK51(SB),DX
+	MOVQ $REDMASK51,DX
 	SHLQ $13,CX:SI
 	ANDQ DX,SI
 	SHLQ $13,R9:R8
@@ -993,7 +995,7 @@
 	MULQ 32(DI)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ ·REDMASK51(SB),DX
+	MOVQ $REDMASK51,DX
 	SHLQ $13,CX:SI
 	ANDQ DX,SI
 	SHLQ $13,R9:R8
@@ -1143,7 +1145,7 @@
 	MULQ 112(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ ·REDMASK51(SB),DX
+	MOVQ $REDMASK51,DX
 	SHLQ $13,CX:SI
 	ANDQ DX,SI
 	SHLQ $13,R9:R8
@@ -1329,7 +1331,7 @@
 	MULQ 192(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ ·REDMASK51(SB),DX
+	MOVQ $REDMASK51,DX
 	SHLQ $13,CX:SI
 	ANDQ DX,SI
 	SHLQ $13,R9:R8
diff --git a/src/vendor/golang_org/x/crypto/curve25519/mul_amd64.s b/src/vendor/golang_org/x/crypto/curve25519/mul_amd64.s
index 33ce57d..b162e65 100644
--- a/src/vendor/golang_org/x/crypto/curve25519/mul_amd64.s
+++ b/src/vendor/golang_org/x/crypto/curve25519/mul_amd64.s
@@ -7,6 +7,8 @@
 
 // +build amd64,!gccgo,!appengine
 
+#include "const_amd64.h"
+
 // func mul(dest, a, b *[5]uint64)
 TEXT ·mul(SB),0,$16-24
 	MOVQ dest+0(FP), DI
@@ -121,7 +123,7 @@
 	MULQ 32(CX)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ ·REDMASK51(SB),SI
+	MOVQ $REDMASK51,SI
 	SHLQ $13,R9:R8
 	ANDQ SI,R8
 	SHLQ $13,R11:R10
diff --git a/src/vendor/golang_org/x/crypto/curve25519/square_amd64.s b/src/vendor/golang_org/x/crypto/curve25519/square_amd64.s
index 3a92804..4e864a8 100644
--- a/src/vendor/golang_org/x/crypto/curve25519/square_amd64.s
+++ b/src/vendor/golang_org/x/crypto/curve25519/square_amd64.s
@@ -7,6 +7,8 @@
 
 // +build amd64,!gccgo,!appengine
 
+#include "const_amd64.h"
+
 // func square(out, in *[5]uint64)
 TEXT ·square(SB),7,$0-16
 	MOVQ out+0(FP), DI
@@ -84,7 +86,7 @@
 	MULQ 32(SI)
 	ADDQ AX,R13
 	ADCQ DX,R14
-	MOVQ ·REDMASK51(SB),SI
+	MOVQ $REDMASK51,SI
 	SHLQ $13,R8:CX
 	ANDQ SI,CX
 	SHLQ $13,R10:R9
diff --git a/test/fixedbugs/issue18725.go b/test/fixedbugs/issue18725.go
new file mode 100644
index 0000000..c632dba
--- /dev/null
+++ b/test/fixedbugs/issue18725.go
@@ -0,0 +1,24 @@
+// run
+
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "os"
+
+func panicWhenNot(cond bool) {
+	if cond {
+		os.Exit(0)
+	} else {
+		panic("nilcheck elim failed")
+	}
+}
+
+func main() {
+	e := (*string)(nil)
+	panicWhenNot(e == e)
+	// Should never reach this line.
+	panicWhenNot(*e == *e)
+}
diff --git a/test/fixedbugs/issue18808.go b/test/fixedbugs/issue18808.go
new file mode 100644
index 0000000..c98386e
--- /dev/null
+++ b/test/fixedbugs/issue18808.go
@@ -0,0 +1,63 @@
+// run
+
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+const lim = 0x80000000
+
+//go:noinline
+func eq(x uint32) {
+	if x == lim {
+		return
+	}
+	panic("x == lim returned false")
+}
+
+//go:noinline
+func neq(x uint32) {
+	if x != lim {
+		panic("x != lim returned true")
+	}
+}
+
+//go:noinline
+func gt(x uint32) {
+	if x > lim {
+		return
+	}
+	panic("x > lim returned false")
+}
+
+//go:noinline
+func gte(x uint32) {
+	if x >= lim {
+		return
+	}
+	panic("x >= lim returned false")
+}
+
+//go:noinline
+func lt(x uint32) {
+	if x < lim {
+		panic("x < lim returned true")
+	}
+}
+
+//go:noinline
+func lte(x uint32) {
+	if x <= lim {
+		panic("x <= lim returned true")
+	}
+}
+
+func main() {
+	eq(lim)
+	neq(lim)
+	gt(lim+1)
+	gte(lim+1)
+	lt(lim+1)
+	lte(lim+1)
+}
diff --git a/test/fixedbugs/issue18906.go b/test/fixedbugs/issue18906.go
new file mode 100644
index 0000000..544400b
--- /dev/null
+++ b/test/fixedbugs/issue18906.go
@@ -0,0 +1,36 @@
+// run
+
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+//go:noinline
+func f(x int) {
+}
+
+//go:noinline
+func val() int8 {
+	return -1
+}
+
+var (
+	array = [257]int{}
+	slice = array[1:]
+)
+
+func init() {
+	for i := range array {
+		array[i] = i - 1
+	}
+}
+
+func main() {
+	x := val()
+	y := int(uint8(x))
+	f(y) // try and force y to be calculated and spilled
+	if slice[y] != 255 {
+		panic("incorrect value")
+	}
+}
diff --git a/test/fixedbugs/issue18915.go b/test/fixedbugs/issue18915.go
new file mode 100644
index 0000000..a432bbc
--- /dev/null
+++ b/test/fixedbugs/issue18915.go
@@ -0,0 +1,21 @@
+// errorcheck
+
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Make sure error message for invalid conditions
+// or tags are consistent with earlier Go versions.
+
+package p
+
+func _() {
+	if a := 10 { // ERROR "a := 10 used as value"
+	}
+
+	for b := 10 { // ERROR "b := 10 used as value"
+	}
+
+	switch c := 10 { // ERROR "c := 10 used as value"
+	}
+}
diff --git a/test/fixedbugs/issue6036.go b/test/fixedbugs/issue6036.go
index 795b223..8ebef5a 100644
--- a/test/fixedbugs/issue6036.go
+++ b/test/fixedbugs/issue6036.go
@@ -1,4 +1,4 @@
-// +build amd64
+// +build !386,!arm,!mips,!mipsle,!amd64p32
 // compile
 
 // Copyright 2013 The Go Authors. All rights reserved.
diff --git a/test/nilptr3.go b/test/nilptr3.go
index 8fdae8c..c681cba 100644
--- a/test/nilptr3.go
+++ b/test/nilptr3.go
@@ -40,23 +40,23 @@
 )
 
 func f1() {
-	_ = *intp // ERROR "generated nil check"
+	_ = *intp // ERROR "removed nil check"
 
 	// This one should be removed but the block copy needs
 	// to be turned into its own pseudo-op in order to see
 	// the indirect.
-	_ = *arrayp // ERROR "generated nil check"
+	_ = *arrayp // ERROR "removed nil check"
 
 	// 0-byte indirect doesn't suffice.
 	// we don't registerize globals, so there are no removed.* nil checks.
-	_ = *array0p // ERROR "generated nil check"
 	_ = *array0p // ERROR "removed nil check"
+	_ = *array0p // ERROR "generated nil check"
 
-	_ = *intp    // ERROR "removed nil check"
+	_ = *intp    // ERROR "generated nil check"
 	_ = *arrayp  // ERROR "removed nil check"
 	_ = *structp // ERROR "generated nil check"
 	_ = *emptyp  // ERROR "generated nil check"
-	_ = *arrayp  // ERROR "removed nil check"
+	_ = *arrayp  // ERROR "generated nil check"
 }
 
 func f2() {
@@ -71,15 +71,15 @@
 		empty1p    *Empty1
 	)
 
-	_ = *intp       // ERROR "generated nil check"
-	_ = *arrayp     // ERROR "generated nil check"
-	_ = *array0p    // ERROR "generated nil check"
-	_ = *array0p    // ERROR "removed.* nil check"
 	_ = *intp       // ERROR "removed.* nil check"
 	_ = *arrayp     // ERROR "removed.* nil check"
+	_ = *array0p    // ERROR "removed.* nil check"
+	_ = *array0p    // ERROR "generated nil check"
+	_ = *intp       // ERROR "generated nil check"
+	_ = *arrayp     // ERROR "removed.* nil check"
 	_ = *structp    // ERROR "generated nil check"
 	_ = *emptyp     // ERROR "generated nil check"
-	_ = *arrayp     // ERROR "removed.* nil check"
+	_ = *arrayp     // ERROR "generated nil check"
 	_ = *bigarrayp  // ERROR "generated nil check" ARM removed nil check before indirect!!
 	_ = *bigstructp // ERROR "generated nil check"
 	_ = *empty1p    // ERROR "generated nil check"
@@ -122,16 +122,16 @@
 	// x wasn't going to change across the function call.
 	// But it's a little complex to do and in practice doesn't
 	// matter enough.
-	_ = x[9999] // ERROR "removed nil check"
+	_ = x[9999] // ERROR "generated nil check" // TODO: fix
 }
 
 func f3a() {
 	x := fx10k()
 	y := fx10k()
 	z := fx10k()
-	_ = &x[9] // ERROR "generated nil check"
-	y = z
 	_ = &x[9] // ERROR "removed.* nil check"
+	y = z
+	_ = &x[9] // ERROR "generated nil check"
 	x = y
 	_ = &x[9] // ERROR "generated nil check"
 }
@@ -139,11 +139,11 @@
 func f3b() {
 	x := fx10k()
 	y := fx10k()
-	_ = &x[9] // ERROR "generated nil check"
+	_ = &x[9] // ERROR "removed.* nil check"
 	y = x
 	_ = &x[9] // ERROR "removed.* nil check"
 	x = y
-	_ = &x[9] // ERROR "removed.* nil check"
+	_ = &x[9] // ERROR "generated nil check"
 }
 
 func fx10() *[10]int
@@ -179,15 +179,15 @@
 	_ = x[9] // ERROR "generated nil check"  // bug would like to remove before indirect
 
 	fx10()
-	_ = x[9] // ERROR "removed nil check"
+	_ = x[9] // ERROR "generated nil check"  // TODO: fix
 
 	x = fx10()
 	y := fx10()
-	_ = &x[9] // ERROR "generated nil check"
+	_ = &x[9] // ERROR "removed[a-z ]* nil check"
 	y = x
 	_ = &x[9] // ERROR "removed[a-z ]* nil check"
 	x = y
-	_ = &x[9] // ERROR "removed[a-z ]* nil check"
+	_ = &x[9] // ERROR "generated nil check"
 }
 
 func f5(p *float32, q *float64, r *float32, s *float64) float64 {
diff --git a/test/writebarrier.go b/test/writebarrier.go
index 6460a6f..13f7b54 100644
--- a/test/writebarrier.go
+++ b/test/writebarrier.go
@@ -220,3 +220,19 @@
 	*p = x // no barrier
 	return
 }
+
+type T23 struct {
+	p *int
+	a int
+}
+
+var t23 T23
+var i23 int
+
+func f23() {
+	// zeroing global needs write barrier for the hybrid barrier.
+	t23 = T23{} // ERROR "write barrier"
+	// also test partial assignments
+	t23 = T23{a: 1}    // ERROR "write barrier"
+	t23 = T23{p: &i23} // ERROR "write barrier"
+}